From 5cbc93d4bb093d951b0dd92e3677fc3aab910adb Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Wed, 6 Sep 2023 16:12:06 +1200 Subject: [PATCH] [docs] test removing search_index.js (#661) --- v0.3.13/search_index.js | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/v0.3.13/search_index.js b/v0.3.13/search_index.js index ada59c35f..b522e9c72 100644 --- a/v0.3.13/search_index.js +++ b/v0.3.13/search_index.js @@ -1,3 +1,2 @@ -var documenterSearchIndex = {"docs": -[{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/01_first_steps.jl\"","category":"page"},{"location":"tutorial/01_first_steps/#Basic-I:-first-steps","page":"Basic I: first steps","title":"Basic I: first steps","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"SDDP.jl is a solver for multistage stochastic optimization problems. By multistage, we mean problems in which the agent makes a sequence of decisions over time. By stochastic, we mean that our agent is making decisions in the presence of uncertainty that is gradually revealed over the multiple stages.","category":"page"},{"location":"tutorial/01_first_steps/#Background-theory","page":"Basic I: first steps","title":"Background theory","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Multistage stochastic programming is complicated, and the literature has not settled upon standard naming conventions, so we must begin with some unavoidable theory and notation.","category":"page"},{"location":"tutorial/01_first_steps/#Policy-graphs","page":"Basic I: first steps","title":"Policy graphs","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A multistage stochastic program can be modeled by a policy graph. A policy graph is a graph with nodes and arcs. The simplest type of policy graph is a linear graph. Here's a linear graph with three nodes:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"(Image: Linear policy graph)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"In addition to nodes 1, 2, and 3, there is also a root node (the circle), and three arcs. Each arc has an origin node and a destination node, like 1 => 2, and a corresponding probability of transitioning from the origin to the destination. Unless specified, we assume that the arc probabilities are uniform over the number of outgoing arcs. Thus, in this picture the arc probabilities are all 1.0. The squiggly lines denote random variables that we will discuss shortly.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"We denote the set of nodes by mathcalN, the root node by R, and the probability of transitioning from node i to node j by p_ij. (If no arc exists, then p_ij = 0.) We define the set of successors of node i as i^+ = j in mathcalN p_ij 0.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Each square node in the graph corresponds to a place at which the agent makes a decision, and we call moments in time at which the agent makes a decision stages. By convention, we try to draw policy graphs from left-to-right, with the stages as columns. There can be more than one node in a stage! Here's an example, taken from the paper Dowson (2020):","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"(Image: Markovian policy graph)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The columns represent time, and the rows represent different states of the world. In this case, the rows represent different prices that milk can be sold for at the end of each year. The squiggly lines denote a multivariate random variable that models the weekly amount of rainfall that occurs. You can think of the nodes as forming a Markov chain, therefore, we call problems with a structure like this Markovian policy graphs. Moreover, note that policy graphs can have cycles! This allows them to model infinite horizon problems.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"note: Note\nThe sum of probabilities on the outgoing arcs of node i can be less than 1, i.e., sumlimits_jin i^+ p_ij le 1. What does this mean? One interpretation is that the probability is a discount factor. Another interpretation is that there is an implicit \"zero\" node that we have not modeled, with p_i0 = 1 - sumlimits_jin i^+ p_ij. This zero node has C_0(x u omega) = 0, and 0^+ = varnothing.","category":"page"},{"location":"tutorial/01_first_steps/#Problem-notation","page":"Basic I: first steps","title":"Problem notation","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A common feature of multistage stochastic optimization problems is that they model an agent controlling a system over time. This system can be described by three types of variables.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"State variables track a property of the system over time.\nEach node has an associated incoming state variable (the value of the state at the start of the node), and an outgoing state variable (the value of the state at the end of the node).\nExamples of state variables include the volume of water in a reservoir, the number of units of inventory in a warehouse, or the spatial position of a moving vehicle.\nBecause state variables track the system over time, each node must have the same set of state variables.\nWe denote state variables by the letter x for the incoming state variable and x^prime for the outgoing state variable.\nControl variables are actions taken (implicitly or explicitly) by the agent within a node which modify the state variables.\nExamples of control variables include releases of water from the reservoir, sales or purchasing decisions, and acceleration or braking of the vehicle.\nControl variables are local to a node i, and they can differ between nodes. For example, some control variables may be available within certain nodes.\nWe denote control variables by the letter u.\nRandom variables are finite, discrete, exogenous random variables that the agent observes at the start of a node, before the control variables are decided.\nExamples of random variables include rainfall inflow into a reservoir, probalistic perishing of inventory, and steering errors in a vehicle.\nRandom variables are local to a node i, and they can differ between nodes. For example, some nodes may have random variables, and some nodes may not.\nWe denote random variables by the Greek letter omega and the sample space from which they are drawn by Omega_i. The probability of sampling omega is denoted p_omega for simplicity.\nImportantly, the random variable associated with node i is independent of the random variables in all other nodes.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"In a node i, the three variables are related by a transition function, which maps the incoming state, the controls, and the random variables to the outgoing state as follows: x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"As a result of entering a node i with the incoming state x, observing random variable omega, and choosing control u, the agent incurs a cost C_i(x u omega). (If the agent is a maximizer, this can be a profit, or a negative cost.) We call C_i the stage objective.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"To choose their control variables in node i, the agent uses a decision rule u = pi_i(x omega), which is a function that maps the incoming state variable and observation of the random variable to a control u. This control must satisfy some feasibilty requirements u in U_i(x omega).","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The set of decision rules, with one element for each node in the policy graph, is called a policy.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The goal of the agent is to find a policy that minimizes the expected cost of starting at the root node with some initial condition x_R, and proceeding from node to node along the probabilistic arcs until they reach a node with no outgoing arcs (or it reaches an implicit \"zero\" node).","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"min_pi mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"where","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"V_i^pi(x omega) = C_i(x u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"where u = pi_i(x omega) in U_i(x omega), and x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The expectations are a bit complicated, but they are equivalent to:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi) = sumlimits_j in i^+ p_ij sumlimits_varphi in Omega_j p_varphiV_j(x^prime varphi)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"An optimal policy is the set of decision rules that the agent can use to make decisions and achieve the smallest expected cost.","category":"page"},{"location":"tutorial/01_first_steps/#Assumptions","page":"Basic I: first steps","title":"Assumptions","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"warning: Warning\nThis section is important!","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The space of problems you can model with this framework is very large. Too large, in fact, for us to form tractable solution algorithms for! Stochastic dual dynamic programming requires the following assumptions in order to work:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Assumption 1: finite nodes","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"There is a finite number of nodes in mathcalN.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Assumption 2: finite random variables","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The sample space Omega_i is finite and discrete for each node iinmathcalN.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Assumption 3: convex problems","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Given fixed omega, C_i(x u omega) is a convex function, T_i(x u omega) is linear, and U_i(x u omega) is a non-empty, bounded convex set with respect to x and u.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Assumption 4: no infinite loops","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"For all loops in the policy graph, the product of the arc transition probabilities around the loop is strictly less than 1.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"note: Note\nSDDP.jl relaxes assumption (3) to allow for integer state and control variables, but we won't go into the details here. Assumption (4) essentially means that we obtain a discounted-cost solution for infinite-horizon problems, instead of an average-cost solution; see Dowson (2020) for details.","category":"page"},{"location":"tutorial/01_first_steps/#Dynamic-programming-and-subproblems","page":"Basic I: first steps","title":"Dynamic programming and subproblems","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Now that we have formulated our problem, we need some ways of computing optimal decision rules. One way is to just use a heuristic like \"choose a control randomally from the set of feasible controls.\" However, such a policy is unlikely to be optimal.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A better way of obtaining an optimal policy is to use Bellman's principle of optimality, a.k.a Dynamic Programming, and define a recursive subproblem as follows:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"note: Note\nWe add barx as a decision variable, along with the fishing constraint barx = x for two reasons: it makes it obvious that formulating a problem with x times u results in a bilinear program instead of a linear program (see Assumption 3), and it simplifies the implementation of the SDDP algorithm.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"These subproblems are very difficult to solve exactly, because they involve recursive optimization problems with lots of nested expectations.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Therefore, instead of solving them exactly, SDDP.jl works by iteratively approximating the expectation term of each subproblem, which is also called the cost-to-go term. For now, you don't need to understand the details, other than that there is a nasty cost-to-go term that we deal with behind-the-scenes.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The subproblem view of a multistage stochastic program is also important, because it provides a convienient way of communicating the different parts of the broader problem, and it is how we will communicate the problem to SDDP.jl. All we need to do is drop the cost-to-go term and fishing constraint, and define a new subproblem SP as:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx u omega) \n x^prime = T_i(barx u omega) \n u in U_i(barx omega)\nendaligned","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"note: Note\nWhen we talk about formulating a subproblem with SDDP.jl, this is the formulation we mean.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"We've retained the transition function and uncertainty set because they help to motivate the different components of the subproblem. However, in general, the subproblem can be more general. A better (less restrictive) representation might be:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx x^prime u omega) \n (barx x^prime u) in mathcalX_i(omega)\nendaligned","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Note that the outgoing state variable can appear in the objective, and we can add constraints involving the incoming and outgoing state variables. It should be obvious how to map between the two representations.","category":"page"},{"location":"tutorial/01_first_steps/#Example:-hydro-thermal-scheduling","page":"Basic I: first steps","title":"Example: hydro-thermal scheduling","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Hydrothermal scheduling is the most common application of stochastic dual dynamic programming. To illustrate some of the basic functionality of SDDP.jl, we implement a very simple model of the hydrothermal scheduling problem. To make things even simpler to start with, we're not going to include any uncertainty; that will come in the next tutorial.","category":"page"},{"location":"tutorial/01_first_steps/#Problem-statement","page":"Basic I: first steps","title":"Problem statement","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"We consider the problem of scheduling electrical generation over three weeks in order to meet a known demand of 150 MWh in each week.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"There are two generators: a thermal generator, and a hydro generator. In each week, the agent needs to decide how much energy to generate from thermal, and how much energy to generate from hydro.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The thermal generator has a short-run marginal cost of \\$50/MWh in the first stage, \\$100/MWh in the second stage, and \\$150/MWh in the third stage.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The hydro generator has a short-run marginal cost of \\$0/MWh.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The hydro generator draws water from a reservoir which has a maximum capacity of 200 MWh. (Although water is usually measured in m³, we measure it in the energy-equivalent MWh to simplify things. In practice, there is a conversion function between m³ flowing throw the turbine and MWh.) At the start of the first time period, the reservoir is full.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"In addition to the ability to generate electricity by passing water through the hydroelectric turbine, the hydro generator can also spill water down a spillway (bypassing the turbine) in order to prevent the water from over-topping the dam. We assume that there is no cost of spillage.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The goal of the agent is to minimize the expected cost of generation over the three weeks.","category":"page"},{"location":"tutorial/01_first_steps/#Formulating-the-problem","page":"Basic I: first steps","title":"Formulating the problem","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Before going further, we need to load SDDP.jl:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"using SDDP","category":"page"},{"location":"tutorial/01_first_steps/#Graph-structure","page":"Basic I: first steps","title":"Graph structure","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"First, we need to identify the structre of the policy graph. From the problem statement, we want to model the problem over three weeks in weekly stages. Therefore, the policy graph is a linear graph with three stages:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"graph = SDDP.LinearGraph(3)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Root\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n","category":"page"},{"location":"tutorial/01_first_steps/#Building-the-subproblem","page":"Basic I: first steps","title":"Building the subproblem","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Next, we need to construct the associated subproblem for each node in graph. To do so, we need to provide SDDP.jl a function which takes two arguments. The first is subproblem::Model, which is an empty JuMP model. The second is node, which is the name of each node in the policy graph. If the graph is linear, SDDP defaults to naming the nodes using the integers in 1:T. Here's an example that we are going to flesh out over the next few paragraphs:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"function subproblem_builder(subproblem::Model, node::Int)\n # ... stuff to go here ...\n return subproblem\nend","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"subproblem_builder (generic function with 1 method)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"note: Note\nWe don't need to add the fishing constraint barx = x; SDDP.jl does this automatically.","category":"page"},{"location":"tutorial/01_first_steps/#State-variables","page":"Basic I: first steps","title":"State variables","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The first part of the subproblem we need to identify are the state variables. Since we only have one reservoir, there is only one state variable, volume, the volume of water in the reservoir [MWh].","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The volume had bounds of [0, 200], and the reservoir was full at the start of time, so x_R = 200.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"We add state variables to our subproblem using JuMP's @variable macro. However, in addition to the usual syntax, we also pass SDDP.State, and we need to provide the initial value (x_R) using the initial_value keyword.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n return subproblem\nend","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"subproblem_builder (generic function with 1 method)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The syntax for adding a state variable is a little obtuse, because volume is not single JuMP variable. Instead, volume is a struct with two fields, .in and .out, corresponding to the incoming and outgoing state variables respectively.","category":"page"},{"location":"tutorial/01_first_steps/#Control-variables","page":"Basic I: first steps","title":"Control variables","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The next part of the subproblem we need to identiy are the control variables. The control variables for our problem are:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"thermal_generation: the quantity of energy generated from thermal [MWh/week]\nhydro_generation: the quantity of energy generated from hydro [MWh/week]\nhydro_spill: the volume of water spilled from the reservoir in each week [MWh/week]","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Each of these variables is non-negative.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"We add control variables to our subproblem as normal JuMP variables, using @variable or @variables:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n return subproblem\nend","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"subproblem_builder (generic function with 1 method)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"tip: Tip\nModeling is an art, and a tricky part of that art is figuring out which variables are state variables, and which are control variables. A good rule is: if you need a value of a control variable in some future node to make a decision, it is a state variable instead.","category":"page"},{"location":"tutorial/01_first_steps/#Random-variables","page":"Basic I: first steps","title":"Random variables","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"The next step is to identify any random variables. In this simple example, there are none, so we can skip it.","category":"page"},{"location":"tutorial/01_first_steps/#Transition-function-and-contraints","page":"Basic I: first steps","title":"Transition function and contraints","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Now that we've identified our variables, we can define the transition function and the constraints.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"For our problem, the state variable is the volume of water in the reservoir. The volume of water decreases in response to water being used for hydro generation and spillage. So the transition function is: volume.out = volume.in - hydro_generation - hydro_spill. (Note how we use volume.in and volume.out to refer to the incoming and outgoing state variables.)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"There is also a constraint that the total generation must sum to 150 MWh.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Both the transition function and any additional constraint are added using JuMP's @constraint and @constraints macro.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Transition function and constraints\n @constraints(subproblem, begin\n volume.out == volume.in - hydro_generation - hydro_spill\n hydro_generation + thermal_generation == 150\n end)\n return subproblem\nend","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"subproblem_builder (generic function with 1 method)","category":"page"},{"location":"tutorial/01_first_steps/#Objective-function","page":"Basic I: first steps","title":"Objective function","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Finally, we need to add an objective function using @stageobjective. The objective of the agent is to minimize the cost of thermal generation. This is complicated by a fuel cost that depends on the node.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"One possibility is to use an if statement on node to define the correct objective:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Transition function and constraints\n @constraints(subproblem, begin\n volume.out == volume.in - hydro_generation - hydro_spill\n hydro_generation + thermal_generation == 150\n end)\n # Stage-objective\n if node == 1\n @stageobjective(subproblem, 50 * thermal_generation)\n elseif node == 2\n @stageobjective(subproblem, 100 * thermal_generation)\n else\n @assert node == 3\n @stageobjective(subproblem, 150 * thermal_generation)\n end\n return subproblem\nend","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"subproblem_builder (generic function with 1 method)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A second possibility is to use an array of fuel costs, and use node to index the correct value:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"function subproblem_builder(subproblem::Model, node::Int)\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Transition function and constraints\n @constraints(subproblem, begin\n volume.out == volume.in - hydro_generation - hydro_spill\n hydro_generation + thermal_generation == 150\n end)\n # Stage-objective\n fuel_cost = [50, 100, 150]\n @stageobjective(subproblem, fuel_cost[node] * thermal_generation)\n return subproblem\nend","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"subproblem_builder (generic function with 1 method)","category":"page"},{"location":"tutorial/01_first_steps/#Contructing-the-model","page":"Basic I: first steps","title":"Contructing the model","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Now that we've written our subproblem, we need to contruct the full model. For that, we're going to need a linear solver. Let's choose GLPK:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"using GLPK","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Then, we can create a full model using SDDP.PolicyGraph, passing our subproblem_builder function as the first argument, and our graph as the second:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"model = SDDP.PolicyGraph(\n subproblem_builder,\n graph;\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A policy graph with 3 nodes.\n Node indices: 1, 2, 3\n","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"sense: the optimization sense. Must be :Min or :Max.\nlower_bound: you must supply a valid bound on the objective. For our problem, we know that we cannot incur a negative cost so \\$0 is a valid lower bound.\noptimizer: This is borrowed directly from JuMP's Model constructor: Model(GLPK.Optimizer)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Because linear policy graphs are the most commonly used structure, we can use SDDP.LinearPolicyGraph(; stages) instead of passing SDDP.LinearGraph(3) to SDDP.PolicyGraph.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"model = SDDP.LinearPolicyGraph(\n subproblem_builder;\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A policy graph with 3 nodes.\n Node indices: 1, 2, 3\n","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"There is also the option is to use Julia's do syntax to avoid needing to define a subproblem_builder function separately:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n) do subproblem, node\n # State variables\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Control variables\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n end)\n # Transition function and constraints\n @constraints(subproblem, begin\n volume.out == volume.in - hydro_generation - hydro_spill\n hydro_generation + thermal_generation == 150\n end)\n # Stage-objective\n if node == 1\n @stageobjective(subproblem, 50 * thermal_generation)\n elseif node == 2\n @stageobjective(subproblem, 100 * thermal_generation)\n else\n @assert node == 3\n @stageobjective(subproblem, 150 * thermal_generation)\n end\nend","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A policy graph with 3 nodes.\n Node indices: 1, 2, 3\n","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"info: Info\nJulia's do syntax is just a different way of passing an anonymous function inner to some function outer which takes inner as the first argument. For example, given:outer(inner::Function, x, y) = inner(x, y)thenouter(1, 2) do x, y\n return x^2 + y^2\nendis equivalent to:outer((x, y) -> x^2 + y^2, 1, 2)For our purpose, inner is subproblem_builder, and outer is SDDP.PolicyGraph.","category":"page"},{"location":"tutorial/01_first_steps/#Training-a-policy","page":"Basic I: first steps","title":"Training a policy","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Now we have a model, which is a description of the policy graph, we need to train a policy. Models can be trained using the SDDP.train function. It accepts a number of keyword arguments. iteration_limit terminates the training after the provided number of iterations.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"SDDP.train(model; iteration_limit = 3)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 1.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 2e+02]\n Non-zero Bounds range [2e+02, 2e+02]\n Non-zero RHS range [2e+02, 2e+02]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 3.250000e+04 1.500000e+04 1.666069e-03 1 6\n 2 1.750000e+04 1.750000e+04 2.079010e-03 1 12\n 3 1.750000e+04 1.750000e+04 2.485991e-03 1 18\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"tip: Tip\nFor more information on the numerical stability report, read the Numerical stability report section.","category":"page"},{"location":"tutorial/01_first_steps/#Obtaining-the-decision-rule","page":"Basic I: first steps","title":"Obtaining the decision rule","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"After training a policy, we can create a decision rule using SDDP.DecisionRule:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"rule = SDDP.DecisionRule(model; node = 1)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A decision rule for node 1","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Then, to evalute the decision rule, we use SDDP.evaluate:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"solution = SDDP.evaluate(\n rule;\n incoming_state = Dict(:volume => 150.0),\n controls_to_record = [:hydro_generation, :thermal_generation],\n)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"(stage_objective = 7500.0, outgoing_state = Dict(:volume=>150.0), controls = Dict(:thermal_generation=>150.0,:hydro_generation=>0.0))","category":"page"},{"location":"tutorial/01_first_steps/#Simulating-the-policy","page":"Basic I: first steps","title":"Simulating the policy","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Once you have a trained policy, you can also simulate it using SDDP.simulate. The return value from simulate is a vector with one element for each replication. Each element is itself a vector, with one element for each stage. Each element, corresponding to a particular stage in a particular replication, is a dictionary that records information from the simulation.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"simulations = SDDP.simulate(\n # The trained model to simulate.\n model,\n # The number of replications.\n 1,\n # A list of names to record the values of.\n [:volume, :thermal_generation, :hydro_generation, :hydro_spill]\n)\n\nreplication = 1\nstage = 2\nsimulations[replication][stage]","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Dict{Symbol,Any} with 10 entries:\n :volume => State{Float64}(200.0, 150.0)\n :hydro_spill => 0.0\n :bellman_term => 0.0\n :noise_term => nothing\n :node_index => 2\n :stage_objective => 10000.0\n :objective_state => nothing\n :thermal_generation => 100.0\n :hydro_generation => 50.0\n :belief => Dict(2=>1.0)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Ignore many of the entries for now; they will be relevant later.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"One element of iterest is :volume.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"outgoing_volume = [stage[:volume].out for stage in simulations[1]]","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"3-element Array{Float64,1}:\n 200.0\n 150.0\n 0.0","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Another is :thermal_generation.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"thermal_generation = [stage[:thermal_generation] for stage in simulations[1]]","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"3-element Array{Float64,1}:\n 150.0\n 100.0\n 0.0","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"From this, we can see the optimal policy: in the first stage, use 150 MWh of thermal generation and 0 MWh of hydro generation. In the second stage, use 100 MWh of thermal and 50 MWh of hydro. In the third and final stage, use 0 MWh of thermal and 150 MWh of hydro.","category":"page"},{"location":"tutorial/01_first_steps/#Extracting-the-water-values","page":"Basic I: first steps","title":"Extracting the water values","text":"","category":"section"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"First, we construct a value function from the first subproblem:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"V = SDDP.ValueFunction(model; node = 1)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"A value function for node 1","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"Then we can evaluate V at a point:","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"cost, price = SDDP.evaluate(V; volume = 10)","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"(36000.0, Dict(:volume=>-150.0))","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"This returns the cost-to-go (cost), and the gradient of the cost-to-go function with resspect to each state variable. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the the expected long-run cost.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"For our example, the value of water at the end of the first stage is \\$150, because each additional unit of water can displace a unit of thermal generation in the final stage when the price is \\$150/MWh.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"This concludes our first tutorial for SDDP.jl. In the next tutorial, Basic II: adding uncertainty, we will extend this problem by adding uncertainty.","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"","category":"page"},{"location":"tutorial/01_first_steps/","page":"Basic I: first steps","title":"Basic I: first steps","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Create-a-general-policy-graph","page":"Create a general policy graph","title":"Create a general policy graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"DocTestSetup = quote\n using SDDP, GLPK\nend","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.jl uses the concept of a policy graph to formulate multistage stochastic programming problems. We highly recommend that you read the following paper before continuing with this tutorial.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Dowson, O. (2018). The policy graph decomposition of multistage stochastic optimization problems. Optimization Online. link","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-[SDDP.Graph](@ref)","page":"Create a general policy graph","title":"Creating a SDDP.Graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/#Linear-graphs","page":"Create a general policy graph","title":"Linear graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Linear policy graphs can be created using the SDDP.LinearGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.LinearGraph(3)\nRoot\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"We can add nodes to a graph using SDDP.add_node and edges using SDDP.add_edge.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.add_node(graph, 4)\n\njulia> SDDP.add_edge(graph, 3 => 4, 1.0)\n\njulia> SDDP.add_edge(graph, 4 => 1, 0.9)\n\njulia> graph\nRoot\n 0\nNodes\n 1\n 2\n 3\n 4\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n 3 => 4 w.p. 1.0\n 4 => 1 w.p. 0.9","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Look! We just made a cyclic graph! SDDP.jl can solve infinite horizon problems. The probability on the arc that completes a cycle should be interpreted as a discount factor.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Markovian-policy-graphs","page":"Create a general policy graph","title":"Markovian policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Markovian policy graphs can be created using the SDDP.MarkovianGraph function.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> SDDP.MarkovianGraph(Matrix{Float64}[[1.0]', [0.4 0.6]])\nRoot\n (0, 1)\nNodes\n (1, 1)\n (2, 1)\n (2, 2)\nArcs\n (0, 1) => (1, 1) w.p. 1.0\n (1, 1) => (2, 1) w.p. 0.4\n (1, 1) => (2, 2) w.p. 0.6","category":"page"},{"location":"guides/create_a_general_policy_graph/#General-graphs","page":"Create a general policy graph","title":"General graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Arbitrarily complicated graphs can be constructed using SDDP.Graph, SDDP.add_node and SDDP.add_edge. For example","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.Graph(:root_node)\nRoot\n root_node\nNodes\nArcs\n\njulia> SDDP.add_node(graph, :decision_node)\n\njulia> SDDP.add_edge(graph, :root_node => :decision_node, 1.0)\n\njulia> SDDP.add_edge(graph, :decision_node => :decision_node, 0.9)\n\njulia> graph\nRoot\n root_node\nNodes\n decision_node\nArcs\n root_node => decision_node w.p. 1.0\n decision_node => decision_node w.p. 0.9","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-policy-graph","page":"Create a general policy graph","title":"Creating a policy graph","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Once you have constructed an instance of [SDDP.Graph], you can create a policy graph by passing the graph as the first argument.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"julia> graph = SDDP.Graph(\n :root_node,\n [:decision_node],\n [\n (:root_node => :decision_node, 1.0),\n (:decision_node => :decision_node, 0.9)\n ]);\n\njulia> model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0,\n optimizer = GLPK.Optimizer) do subproblem, node\n println(\"Called from node: \", node)\n end;\nCalled from node: decision_node","category":"page"},{"location":"guides/create_a_general_policy_graph/#Special-cases","page":"Create a general policy graph","title":"Special cases","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"There are two special cases which cover the majority of models in the literature.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.LinearPolicyGraph is a special case where a SDDP.LinearGraph is passed as the first argument.\nSDDP.MarkovianPolicyGraph is a special case where a SDDP.MarkovianGraph is passed as the first argument.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Note that the type of the names of all nodes (including the root node) must be the same. In this case, they are Symbols.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Simulating-non-standard-policy-graphs","page":"Create a general policy graph","title":"Simulating non-standard policy graphs","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"If you simulate a policy graph with a node that has outgoing arcs that sum to less than one, you will end up with simulations of different lengths. (The most common case is an infinite horizon stochastic program, aka a linear policy graph with a single cycle.)","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"To simulate a fixed number of stages, use:","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"simulations = SDDP.simulate(\n model,\n 1,\n sampling_scheme = SDDP.InSampleMonteCarlo(\n max_depth = 10,\n terminate_on_dummy_leaf = false\n )\n)","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"Here, max_depth controls the number of stages, and terminate_on_dummy_leaf = false stops us from terminating early.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"See also Simulate using a different sampling scheme.","category":"page"},{"location":"guides/create_a_general_policy_graph/#Creating-a-Markovian-graph-automatically","page":"Create a general policy graph","title":"Creating a Markovian graph automatically","text":"","category":"section"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"SDDP.jl can create a Markovian graph by automatically discretizing a one-dimensional stochastic process and fitting a Markov chain.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"To access this functionality, pass a function that takes no arguments and returns a Vector{Float64} to SDDP.MarkovianGraph. To keyword arguments also need to be provided: budget is the total number of nodes in the Markovian graph, and scenarios is the number of realizations of the simulator function used to approximate the graph.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"In some cases, scenarios may be too small to provide a reasonable fit of the stochastic process. If so, SDDP.jl will automatically try to re-fit the Markov chain using more scenarios.","category":"page"},{"location":"guides/create_a_general_policy_graph/","page":"Create a general policy graph","title":"Create a general policy graph","text":"function simulator()\n scenario = zeros(5)\n for i = 2:5\n scenario[i] = scenario[i - 1] + rand() - 0.5\n end\n return scenario\nend\n\nmodel = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(simulator; budget = 10, scenarios = 100),\n sense = :Max,\n upper_bound = 1e3\n) do subproblem, node\n (stage, price) = node\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 1)\n @constraint(subproblem, x.out <= x.in)\n @stageobjective(subproblem, price * x.out)\nend","category":"page"},{"location":"guides/debug_a_model/#Debug-a-model","page":"Debug a model","title":"Debug a model","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Building multistage stochastic programming models is hard. There are a lot of different pieces that need to be put together, and we typically have no idea of the optimal policy, so it can be hard (impossible?) to validate the solution.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"That said, here are a few tips to verify and validate models built using SDDP.jl.","category":"page"},{"location":"guides/debug_a_model/#Writing-subproblems-to-file","page":"Debug a model","title":"Writing subproblems to file","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"The first step to debug a model is to write out the subproblems to a file in order to check that you are actually building what you think you are building.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"This can be achieved with the help of two functions: SDDP.parameterize and SDDP.write_subproblem_to_file. The first lets you parameterize a node given a noise, and the second writes out the subproblem to a file.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Here is an example model:","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"using SDDP, GLPK\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n direct_mode = false\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 1)\n @variable(subproblem, y)\n @constraint(subproblem, balance, x.in == x.out + y)\n SDDP.parameterize(subproblem, [1.1, 2.2]) do ω\n @stageobjective(subproblem, ω * x.out)\n JuMP.fix(y, ω)\n end\nend\n\n# output\n\nA policy graph with 2 nodes.\n Node indices: 1, 2","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Initially, model hasn't been parameterized with a concrete realizations of ω. Let's do so now by parameterizing the first subproblem with ω=1.1.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.parameterize(model[1], 1.1)","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Easy! To parameterize the second stage problem, we would have used model[2].","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Now to write out the problem to a file. We'll get a few warnings because some variables and constraints don't have names. They don't matter, so ignore them.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.write_subproblem_to_file(model[1], \"subproblem.lp\")\n\njulia> read(\"subproblem.lp\") |> String |> print\nminimize\nobj: 1.1 x_out + 1 x2\nsubject to\nbalance: -1 y + 1 x_in - 1 x_out = 0\nBounds\nx2 >= 0\ny = 1.1\nx_in free\nx_out free\nEnd","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"It is easy to see that ω has been set in the objective, and as the fixed value for y.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"It is also possible to parameterize the subproblems using values for ω that are not in the original problem formulation.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> SDDP.parameterize(model[1], 3.3)\n\njulia> SDDP.write_subproblem_to_file(model[1], \"subproblem.lp\")\n\njulia> read(\"subproblem.lp\") |> String |> print\nminimize\nobj: 3.3 x_out + 1 x2\nsubject to\nbalance: -1 y + 1 x_in - 1 x_out = 0\nBounds\nx2 >= 0\ny = 3.3\nx_in free\nx_out free\nEnd\n\njulia> rm(\"subproblem.lp\") # Clean up.","category":"page"},{"location":"guides/debug_a_model/#Solve-the-determinstic-equivalent","page":"Debug a model","title":"Solve the determinstic equivalent","text":"","category":"section"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"Sometimes, it can be helpful to solve the deterministic equivalent of a problem in order to obtain an exact solution to the problem. To obtain a JuMP model that represents the deterministic equivalent, use SDDP.deterministic_equivalent. The returned model is just a normal JuMP model. Use JuMP to optimize it and query the solution.","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"julia> det_equiv = SDDP.deterministic_equivalent(model, GLPK.Optimizer)\nA JuMP Model\nFeasibility problem with:\nVariables: 24\n`GenericAffExpr{Float64,VariableRef}`-in-`MathOptInterface.EqualTo{Float64}`: 10 constraints\n`VariableRef`-in-`MathOptInterface.EqualTo{Float64}`: 8 constraints\n`VariableRef`-in-`MathOptInterface.GreaterThan{Float64}`: 6 constraints\n`VariableRef`-in-`MathOptInterface.LessThan{Float64}`: 4 constraints\nModel mode: AUTOMATIC\nCachingOptimizer state: EMPTY_OPTIMIZER\nSolver name: GLPK\n\njulia> optimize!(det_equiv)\n\njulia> objective_value(det_equiv)\n-5.472500000000001","category":"page"},{"location":"guides/debug_a_model/","page":"Debug a model","title":"Debug a model","text":"warning: Warning\nThe determinstic equivalent scales poorly with problem size. Only use this on small problems!","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/booking_management.jl\"","category":"page"},{"location":"examples/booking_management/#Booking-management","page":"Booking management","title":"Booking management","text":"","category":"section"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"This example concerns the acceptance of booking requests for rooms in a hotel in the leadup to a large event.","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"Each stage, we receive a booking request and can choose to accept or decline it. Once accepted, bookings cannot be terminated.","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"using SDDP, GLPK, Test\n\nfunction booking_management_model(\n num_days,\n num_rooms,\n num_requests,\n integrality_handler,\n)\n # maximum revenue that could be accrued.\n max_revenue = (num_rooms + num_requests) * num_days * num_rooms\n # booking_requests is a vector of {0,1} arrays of size\n # (num_days x num_rooms) if the room is requested.\n booking_requests = Array{Int,2}[]\n for room = 1:num_rooms\n for day = 1:num_days\n # note: length_of_stay is 0 indexed to avoid unncecessary +/- 1\n # on the indexing\n for length_of_stay = 0:(num_days-day)\n req = zeros(Int, (num_rooms, num_days))\n req[room:room, day.+(0:length_of_stay)] .= 1\n push!(booking_requests, req)\n end\n end\n end\n\n model = SDDP.LinearPolicyGraph(\n stages = num_requests,\n upper_bound = max_revenue,\n sense = :Max,\n optimizer = GLPK.Optimizer,\n integrality_handler = integrality_handler,\n ) do sp, stage\n\n @variable(\n sp,\n 0 <= vacancy[room = 1:num_rooms, day = 1:num_days] <= 1,\n SDDP.State,\n Bin,\n initial_value = 1\n )\n @variables(sp, begin\n # Accept request for booking of room for length of time.\n 0 <= accept_request <= 1, Bin\n # Accept a booking for an individual room on an individual day.\n 0 <= room_request_accepted[1:num_rooms, 1:num_days] <= 1, Bin\n # Helper for JuMP.fix\n req[1:num_rooms, 1:num_days]\n end)\n for room = 1:num_rooms, day = 1:num_days\n @constraints(\n sp,\n begin\n # Update vacancy if we accept a room request\n vacancy[room, day].out ==\n vacancy[room, day].in - room_request_accepted[room, day]\n # Can't accept a request of a filled room\n room_request_accepted[room, day] <= vacancy[room, day].in\n # Can't accept invididual room request if entire request is declined\n room_request_accepted[room, day] <= accept_request\n # Can't accept request if room not requested\n room_request_accepted[room, day] <= req[room, day]\n # Accept all individual rooms is entire request is accepted\n room_request_accepted[room, day] + (1 - accept_request) >=\n req[room, day]\n end\n )\n end\n SDDP.parameterize(sp, booking_requests) do request\n JuMP.fix.(req, request)\n end\n @stageobjective(\n sp,\n sum(\n (room + stage - 1) * room_request_accepted[room, day]\n for room = 1:num_rooms for day = 1:num_days\n )\n )\n end\nend\n\nfunction booking_management(integrality_handler)\n m_1_2_5 = booking_management_model(1, 2, 5, integrality_handler)\n SDDP.train(m_1_2_5, iteration_limit = 10, log_frequency = 5)\n if integrality_handler == SDDP.ContinuousRelaxation()\n @test SDDP.calculate_bound(m_1_2_5) >= 7.25 - 1e-4\n else\n @test isapprox(SDDP.calculate_bound(m_1_2_5), 7.25, atol = 0.02)\n end\n\n m_2_2_3 = booking_management_model(2, 2, 3, integrality_handler)\n SDDP.train(m_2_2_3, iteration_limit = 50, log_frequency = 10)\n if integrality_handler == SDDP.ContinuousRelaxation()\n @test SDDP.calculate_bound(m_1_2_5) > 6.13\n else\n @test isapprox(SDDP.calculate_bound(m_2_2_3), 6.13, atol = 0.02)\n end\nend\n\nfor integrality_handler in [SDDP.SDDiP(), SDDP.ContinuousRelaxation()]\n booking_management(integrality_handler)\nend","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 5\n State variables : 2\n Scenarios : 3.20000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 6e+00]\n Non-zero Bounds range [1e+00, 1e+01]\n Non-zero RHS range [1e+00, 1e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 5 6.000000e+00 7.250000e+00 3.588071e-01 1 75\n 10 1.000000e+01 7.250000e+00 4.025459e-01 1 150\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 4\n Scenarios : 2.16000e+02\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 4e+00]\n Non-zero Bounds range [1e+00, 2e+01]\n Non-zero RHS range [1e+00, 1e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n┌ Warning: Found a cut with a mix of small and large coefficients.\n│ The order of magnitude difference is 16.56115026274564.\n│ The smallest cofficient is -7.401486830834375e-17.\n│ The largest coefficient is 2.6944444444444446.\n│ \n│ You can ignore this warning, but it may be an indication of numerical issues.\n│ \n│ Consider rescaling your model by using different units, e.g, kilometers instead\n│ of meters. You should also consider reducing the accuracy of your input data (if\n│ you haven't already). For example, it probably doesn't make sense to measure the\n│ inflow into a reservoir to 10 decimal places.\n└ @ SDDP /home/runner/work/SDDP.jl/SDDP.jl/src/plugins/bellman_functions.jl:68\n 10 4.000000e+00 6.310185e+00 2.075539e-01 1 210\n 20 5.000000e+00 6.143519e+00 3.278999e-01 1 420\n 30 6.000000e+00 6.143519e+00 4.355509e-01 1 630\n 40 1.200000e+01 6.143519e+00 5.526760e-01 1 840\n 50 2.000000e+00 6.143519e+00 6.691539e-01 1 1050\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 5\n State variables : 2\n Scenarios : 3.20000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 6e+00]\n Non-zero Bounds range [1e+00, 1e+01]\n Non-zero RHS range [1e+00, 1e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 5 9.956790e+00 7.500000e+00 6.516218e-03 1 75\n 10 1.000000e+01 7.250000e+00 1.060414e-02 1 150\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 4\n Scenarios : 2.16000e+02\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 4e+00]\n Non-zero Bounds range [1e+00, 2e+01]\n Non-zero RHS range [1e+00, 1e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 6.000000e+00 6.870675e+00 1.397204e-02 1 210\n 20 5.686275e+00 6.262097e+00 2.921009e-02 1 420\n 30 6.000000e+00 6.232831e+00 4.677701e-02 1 630\n 40 8.333333e+00 6.180109e+00 8.184195e-02 1 840\n 50 6.000000e+00 6.176311e+00 1.037240e-01 1 1050\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"","category":"page"},{"location":"examples/booking_management/","page":"Booking management","title":"Booking management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/no_strong_duality.jl\"","category":"page"},{"location":"examples/no_strong_duality/#No-strong-duality","page":"No strong duality","title":"No strong duality","text":"","category":"section"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"This example is interesting, because strong duality doesn't hold for the extensive form (see if you can show why!), but we still converge.","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"using SDDP, GLPK, Test\n\nfunction no_strong_duality()\n model = SDDP.PolicyGraph(\n SDDP.Graph(:root, [:node], [(:root => :node, 1.0), (:node => :node, 0.5)]),\n optimizer = GLPK.Optimizer,\n lower_bound = 0.0,\n ) do sp, t\n @variable(sp, x, SDDP.State, initial_value = 1.0)\n @stageobjective(sp, x.out)\n @constraint(sp, x.in == x.out)\n end\n SDDP.train(model, iteration_limit = 20, log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 2.0 atol = 1e-8\n return\nend\n\nno_strong_duality()","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 1\n State variables : 1\n Scenarios : Inf\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 1e+00]\n Non-zero Bounds range [0e+00, 0e+00]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n┌ Warning: Found a cut with a mix of small and large coefficients.\n│ The order of magnitude difference is 15.896597823213316.\n│ The smallest cofficient is 1.1102230246251565e-16.\n│ The largest coefficient is 0.8749999999999999.\n│ \n│ You can ignore this warning, but it may be an indication of numerical issues.\n│ \n│ Consider rescaling your model by using different units, e.g, kilometers instead\n│ of meters. You should also consider reducing the accuracy of your input data (if\n│ you haven't already). For example, it probably doesn't make sense to measure the\n│ inflow into a reservoir to 10 decimal places.\n└ @ SDDP /home/runner/work/SDDP.jl/SDDP.jl/src/plugins/bellman_functions.jl:68\n 10 2.000000e+00 2.000000e+00 3.363132e-03 1 52\n 20 1.000000e+00 2.000000e+00 6.284952e-03 1 104\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"","category":"page"},{"location":"examples/no_strong_duality/","page":"No strong duality","title":"No strong duality","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/StructDualDynProg.jl_prob5.2_2stages.jl\"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/#StructDualDynProg:-Problem-5.2,-2-stages","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"","category":"section"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"This example comes from StochasticDualDynamicProgramming.jl","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"using SDDP, GLPK, Test\n\nfunction test_prob52_2stages()\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n direct_mode = true,\n ) do subproblem, stage\n # ========== Problem data ==========\n n = 4\n m = 3\n ic = [16, 5, 32, 2]\n C = [25, 80, 6.5, 160]\n T = [8760, 7000, 1500] / 8760\n D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]\n p2 = [0.9, 0.1]\n # ========== State Variables ==========\n @variable(subproblem, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)\n # ========== Variables ==========\n @variables(subproblem, begin\n y[1:n, 1:m] >= 0\n v[1:n] >= 0\n penalty >= 0\n rhs_noise[1:m] # Dummy variable for RHS noise term.\n end)\n # ========== Constraints ==========\n @constraints(subproblem, begin\n [i = 1:n], x[i].out == x[i].in + v[i]\n [i = 1:n], sum(y[i, :]) <= x[i].in\n [j = 1:m], sum(y[:, j]) + penalty >= rhs_noise[j]\n end)\n if stage == 2\n # No investment in last stage.\n @constraint(subproblem, sum(v) == 0)\n end\n # ========== Uncertainty ==========\n if stage != 1 # no uncertainty in first stage\n SDDP.parameterize(subproblem, 1:size(D2, 2), p2) do ω\n for j = 1:m\n JuMP.fix(rhs_noise[j], D2[j, ω])\n end\n end\n end\n # ========== Stage objective ==========\n @stageobjective(subproblem, ic' * v + C' * y * T + 1e6 * penalty)\n return\n end\n SDDP.train(model, iteration_limit = 50, log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 340315.52 atol = 0.1\n return\nend\n\ntest_prob52_2stages()","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 2\n State variables : 4\n Scenarios : 2.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 1e+06]\n Non-zero Bounds range [0e+00, 0e+00]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 3.719972e+05 3.092989e+05 5.707312e-02 1 50\n 20 3.403500e+05 3.392359e+05 6.058598e-02 1 100\n 30 3.336456e+05 3.402384e+05 6.494093e-02 1 150\n 40 3.337559e+05 3.403155e+05 6.879115e-02 1 200\n 50 3.337559e+05 3.403155e+05 7.208705e-02 1 250\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_2stages/","page":"StructDualDynProg: Problem 5.2, 2 stages","title":"StructDualDynProg: Problem 5.2, 2 stages","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/stochastic_all_blacks.jl\"","category":"page"},{"location":"examples/stochastic_all_blacks/#Stochastic-All-Blacks","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"","category":"section"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"using SDDP, GLPK, Test\n\nfunction stochastic_all_blacks()\n # Number of time periods\n T = 3\n # Number of seats\n N = 2\n # R_ij = price of seat i at time j\n R = [3 3 6; 3 3 6]\n # Number of noises\n s = 3\n offers = [[[1, 1], [0, 0], [1, 1]], [[1, 0], [0, 0], [0, 0]], [[0, 1], [1, 0], [1, 1]]]\n\n model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = GLPK.Optimizer,\n integrality_handler = SDDP.SDDiP(),\n ) do sp, stage\n # Seat remaining?\n @variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)\n # Action: accept offer, or don't accept offer\n # We are allowed to accept some of the seats offered but not others\n @variable(sp, accept_offer[1:N], Bin)\n @variable(sp, offers_made[1:N])\n # Balance on seats\n @constraint(sp, balance[i in 1:N], x[i].in - x[i].out == accept_offer[i])\n @stageobjective(sp, sum(R[i, stage] * accept_offer[i] for i = 1:N))\n SDDP.parameterize(sp, offers[stage]) do o\n JuMP.fix.(offers_made, o)\n end\n @constraint(sp, accept_offer .<= offers_made)\n end\n\n SDDP.train(model, iteration_limit = 10)\n @test SDDP.calculate_bound(model) ≈ 8.0\n return\nend\n\nstochastic_all_blacks()","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 2\n Scenarios : 2.70000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 6e+00]\n Non-zero Bounds range [1e+00, 1e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 6.000000e+00 8.000000e+00 2.002358e-01 1 11\n 2 6.000000e+00 8.000000e+00 2.039330e-01 1 22\n 3 6.000000e+00 8.000000e+00 2.074678e-01 1 33\n 4 1.200000e+01 8.000000e+00 2.106240e-01 1 44\n 5 1.200000e+01 8.000000e+00 2.140930e-01 1 55\n 6 1.200000e+01 8.000000e+00 2.172358e-01 1 66\n 7 6.000000e+00 8.000000e+00 2.206459e-01 1 77\n 8 6.000000e+00 8.000000e+00 2.237828e-01 1 88\n 9 6.000000e+00 8.000000e+00 2.434540e-01 1 99\n 10 1.200000e+01 8.000000e+00 2.473798e-01 1 110\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"","category":"page"},{"location":"examples/stochastic_all_blacks/","page":"Stochastic All Blacks","title":"Stochastic All Blacks","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/FAST_production_management.jl\"","category":"page"},{"location":"examples/FAST_production_management/#FAST:-the-production-management-problem","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"","category":"section"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"An implementation of the Production Management example from FAST","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"using SDDP, GLPK, Test\n\nfunction fast_production_management(; cut_type)\n DEMAND = [2, 10]\n H = 3\n N = 2\n C = [0.2, 0.7]\n S = 2 .+ [0.33, 0.54]\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(H),\n bellman_function = SDDP.BellmanFunction(lower_bound = -50.0, cut_type = cut_type),\n optimizer = GLPK.Optimizer,\n ) do sp, t\n @variable(sp, x[1:N] >= 0, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n s[i = 1:N] >= 0\n d\n end)\n @constraints(sp, begin\n [i = 1:N], s[i] <= x[i].in\n sum(s) <= d\n end)\n SDDP.parameterize(sp, t == 1 ? [0] : DEMAND) do ω\n JuMP.fix(d, ω)\n end\n @stageobjective(sp, sum(C[i] * x[i].out for i = 1:N) - S's)\n end\n SDDP.train(model, iteration_limit = 10, print_level = 2, log_frequency = 5)\n @test SDDP.calculate_bound(model) ≈ -23.96 atol = 1e-2\nend\n\nfast_production_management(cut_type = SDDP.SINGLE_CUT)\nfast_production_management(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"Test Passed","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"","category":"page"},{"location":"examples/FAST_production_management/","page":"FAST: the production management problem","title":"FAST: the production management problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/generation_expansion.jl\"","category":"page"},{"location":"examples/generation_expansion/#Generation-expansion","page":"Generation expansion","title":"Generation expansion","text":"","category":"section"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"using SDDP, LinearAlgebra, GLPK, Test\n\nfunction generation_expansion(integrality_handler)\n build_cost = 1e4\n use_cost = 4\n num_units = 5\n capacities = ones(num_units)\n demand_vals =\n 0.5 * [\n 5 5 5 5 5 5 5 5\n 4 3 1 3 0 9 8 17\n 0 9 4 2 19 19 13 7\n 25 11 4 14 4 6 15 12\n 6 7 5 3 8 4 17 13\n ]\n # Cost of unmet demand\n penalty = 5e5\n # Discounting rate\n rho = 0.99\n model = SDDP.LinearPolicyGraph(\n stages = 5,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n integrality_handler = integrality_handler,\n ) do sp, stage\n\n @variable(sp, 0 <= invested[1:num_units] <= 1, SDDP.State, Int, initial_value = 0)\n @variables(sp, begin\n generation >= 0\n unmet >= 0\n demand\n end)\n\n @constraints(\n sp,\n begin\n # Can't un-invest\n investment[i in 1:num_units], invested[i].out >= invested[i].in\n # Generation capacity\n sum(capacities[i] * invested[i].out for i = 1:num_units) >= generation\n # Meet demand or pay a penalty\n unmet >= demand - sum(generation)\n # For fewer iterations order the units to break symmetry, units are identical (tougher numerically)\n # [j in 1:(num_units - 1)], invested[j].out <= invested[j + 1].out\n end\n )\n # Demand is uncertain\n SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, demand_vals[stage, :])\n\n @expression(\n sp,\n investment_cost,\n build_cost * sum(invested[i].out - invested[i].in for i = 1:num_units)\n )\n @stageobjective(\n sp,\n (investment_cost + generation * use_cost) * rho^(stage - 1) + penalty * unmet\n )\n end\n SDDP.train(model, iteration_limit = 50, log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 2.078860e6 atol = 1e3\n return\nend\n\n# Solve a continuous relaxation only, tough for SDDiP.\ngeneration_expansion(SDDP.ContinuousRelaxation())","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 5\n State variables : 5\n Scenarios : 3.27680e+04\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 5e+05]\n Non-zero Bounds range [1e+00, 1e+00]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 7.799834e+06 2.078398e+06 3.310895e-02 1 420\n 20 2.299771e+06 2.078649e+06 6.032395e-02 1 840\n 30 3.465095e+04 2.078649e+06 9.135604e-02 1 1260\n 40 3.299731e+06 2.078825e+06 1.355970e-01 1 1680\n 50 1.299312e+06 2.078825e+06 1.637139e-01 1 2100\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"","category":"page"},{"location":"examples/generation_expansion/","page":"Generation expansion","title":"Generation expansion","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/biobjective_hydro.jl\"","category":"page"},{"location":"examples/biobjective_hydro/#Biobjective-hydro-thermal","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"","category":"section"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"using SDDP, GLPK, Statistics, Test\n\nfunction biobjective_hydro()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(3),\n bellman_function = SDDP.BellmanFunction(lower_bound = 0.0),\n optimizer = GLPK.Optimizer,\n ) do subproblem, stage\n @variable(subproblem, 0 <= v <= 200, SDDP.State, initial_value = 50)\n\n SDDP.add_objective_state(\n subproblem,\n initial_value = 0.0,\n lower_bound = 0.0,\n upper_bound = 1.0,\n lipschitz = 1e6,\n ) do y, ω\n return y + ω.λ\n end\n\n @variables(subproblem, begin\n 0 <= g[i = 1:2] <= 100\n 0 <= u <= 150\n s >= 0\n a\n shortage_cost >= 0\n objective_1\n objective_2\n end)\n @constraints(subproblem, begin\n v.out == v.in - u - s + a\n demand_eq, g[1] + g[2] + u == 150\n objective_1 == g[1] + 10 * g[2]\n shortage_cost >= 40 - v.out\n shortage_cost >= 60 - 2 * v.out\n shortage_cost >= 80 - 4 * v.out\n objective_2 == shortage_cost\n end)\n price_noise_terms = (stage == 1) ? [0.04, 0.12, 0.22, 0.64] : [0.0]\n Ω = [(a = i, λ = j) for i = 0.0:5:50.0 for j in price_noise_terms]\n SDDP.parameterize(subproblem, Ω) do ω\n JuMP.fix(a, ω.a)\n # This *has* to be called from inside `SDDP.parameterize`,\n # otherwise it doesn't make sense.\n λ = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, λ * objective_1 + (1 - λ) * objective_2)\n end\n end\n SDDP.train(model, iteration_limit = 50, log_frequency = 10)\n\n results = SDDP.simulate(model, 500)\n objectives = [sum(s[:stage_objective] for s in simulation) for simulation in results]\n sample_mean = round(Statistics.mean(objectives); digits = 2)\n sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)\n println(\"Confidence_interval = $(sample_mean) ± $(sample_ci)\")\n @test SDDP.calculate_bound(model) ≈ sample_mean atol = sample_ci\n return\nend\n\nbiobjective_hydro()","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 5.32400e+03\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+01]\n Non-zero Objective range [1e+00, 1e+00]\n Non-zero Bounds range [1e+02, 1e+06]\n Non-zero RHS range [4e+01, 2e+02]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 8.840000e+01 1.183555e+02 7.197459e-01 1 690\n 20 3.000000e+01 1.849490e+02 7.625339e-01 1 1380\n 30 1.302000e+02 1.851810e+02 8.242009e-01 1 2070\n 40 4.965964e+02 1.864233e+02 8.752019e-01 1 2760\n 50 1.136000e+02 1.864322e+02 9.303849e-01 1 3450\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\nConfidence_interval = 189.59 ± 14.64\n","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"","category":"page"},{"location":"examples/biobjective_hydro/","page":"Biobjective hydro-thermal","title":"Biobjective hydro-thermal","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/11_objective_states.jl\"","category":"page"},{"location":"tutorial/11_objective_states/#Advanced-I:-objective-states","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"","category":"section"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"There are many applications in which we want to model a price process that follows some auto-regressive process. Common examples include stock prices on financial exchanges and spot-prices in energy markets.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"However, it is well known that these cannot be incorporated in to SDDP because they result in cost-to-go functions that are convex with respect to some state variables (e.g., the reservoir levels) and concave with respect to other state variables (e.g., the spot price in the current stage).","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"To overcome this problem, the approach in the literature has been to discretize the price process in order to model it using a Markovian policy graph like those discussed in Basic IV: Markov uncertainty.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"However, recent work offers a way to include stagewise-dependent objective uncertainty into the objective function of SDDP subproblems. Readers are directed to the following works for an introduction:","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"Downward, A., Dowson, O., and Baucke, R. (2017). Stochastic dual dynamic programming with stagewise dependent objective uncertainty. Optimization Online. link\nDowson, O. PhD Thesis. University of Auckland, 2018. link","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"The method discussed in the above works introduces the concept of an objective state into SDDP. Unlike normal state variables in SDDP (e.g., the volume of water in the reservoir), the cost-to-go function is concave with respect to the objective states. Thus, the method builds an outer approximation of the cost-to-go function in the normal state-space, and an inner approximation of the cost-to-go function in the objective state-space.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"warning: Warning\nSupport for objective states in SDDP.jl is experimental. Models are considerably more computational intensive, the interface is less user-friendly, and there are subtle gotchas to be aware of. Only use this if you have read and understood the theory behind the method.","category":"page"},{"location":"tutorial/11_objective_states/#One-dimensional-objective-states","page":"Advanced I: objective states","title":"One-dimensional objective states","text":"","category":"section"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"Let's assume that the fuel cost is not fixed, but instead evolves according to a multiplicative auto-regressive process: fuel_cost[t] = ω * fuel_cost[t-1], where ω is drawn from the sample space [0.75, 0.9, 1.1, 1.25] with equal probability.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"An objective state can be added to a subproblem using the SDDP.add_objective_state function. This can only be called once per subproblem. If you want to add a multi-dimensional objective state, read Multi-dimensional objective states. SDDP.add_objective_state takes a number of keyword arguments. The two required ones are","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"initial_value: the value of the objective state at the root node of the policy graph (i.e., identical to the initial_value when defining normal state variables.\nlipschitz: the Lipschitz constant of the cost-to-go function with respect to the objective state. In other words, this value is the maximum change in the cost-to-go function at any point in the state space, given a one-unit change in the objective state.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"There are also two optional keyword arguments: lower_bound and upper_bound, which give SDDP.jl hints (importantly, not constraints) about the domain of the objective state. Setting these bounds appropriately can improve the speed of convergence.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"Finally, SDDP.add_objective_state requires an update function. This function takes two arguments. The first is the incoming value of the objective state, and the second is the realization of the stagewise-independent noise term (set using SDDP.parameterize). The function should return the value of the objective state to be used in the current subproblem.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"This connection with the stagewise-independent noise term means that SDDP.parameterize must be called in a subproblem that defines an objective state. Inside SDDP.parameterize, the value of the objective state to be used in the current subproblem (i.e., after the update function), can be queried using SDDP.objective_state.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"Here is the full model with the objective state.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"using SDDP, GLPK\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, t\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end)\n\n # Add an objective state. ω will be the same value that is called in\n # `SDDP.parameterize`.\n\n SDDP.add_objective_state(\n subproblem,\n initial_value = 50.0,\n lipschitz = 10_000.0,\n lower_bound = 50.0,\n upper_bound = 150.0\n ) do fuel_cost, ω\n return ω.fuel * fuel_cost\n end\n\n # Create the cartesian product of a multi-dimensional random variable.\n\n Ω = [\n (fuel = f, inflow = w)\n for f in [0.75, 0.9, 1.1, 1.25] for w in [0.0, 50.0, 100.0]\n ]\n\n SDDP.parameterize(subproblem, Ω) do ω\n # Query the current fuel cost.\n fuel_cost = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, fuel_cost * thermal_generation)\n JuMP.fix(inflow, ω.inflow)\n end\nend","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"A policy graph with 3 nodes.\n Node indices: 1, 2, 3\n","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"After creating our model, we can train and simulate as usual.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"SDDP.train(model, iteration_limit = 10, run_numerical_stability_report=false)\n\nsimulations = SDDP.simulate(model, 1)\n\nprint(\"Finished training and simulating.\")","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 1.72800e+03\n Solver : serial mode\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 7.593750e+03 4.083333e+03 1.867681e-01 1 39\n 2 6.187500e+03 4.427083e+03 1.887791e-01 1 78\n 3 7.734375e+03 4.740588e+03 1.908011e-01 1 117\n 4 5.625000e+03 4.740588e+03 1.928391e-01 1 156\n 5 0.000000e+00 4.805741e+03 1.948841e-01 1 195\n 6 1.890625e+04 4.943695e+03 1.968002e-01 1 234\n 7 5.591330e+03 4.943695e+03 1.988392e-01 1 273\n 8 2.109375e+03 4.950572e+03 2.011392e-01 1 312\n 9 2.227500e+03 4.951302e+03 2.034142e-01 1 351\n 10 0.000000e+00 4.951302e+03 2.055631e-01 1 390\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\nFinished training and simulating.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"To demonstrate how the objective states are updated, consider the sequence of noise observations:","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"[stage[:noise_term] for stage in simulations[1]]","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"3-element Array{NamedTuple{(:fuel, :inflow),Tuple{Float64,Float64}},1}:\n (fuel = 1.1, inflow = 0.0) \n (fuel = 1.25, inflow = 50.0) \n (fuel = 0.75, inflow = 100.0)","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"This, the fuel cost in the first stage should be 0.75 * 50 = 37.5. The fuel cost in the second stage should be 0.9 * 37.5 = 33.75. The fuel cost in the third stage should be 1.25 * 33.75 = 42.1875.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"To confirm this, the values of the objective state in a simulation can be queried using the :objective_state key.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"[stage[:objective_state] for stage in simulations[1]]","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"3-element Array{Float64,1}:\n 55.00000000000001 \n 68.75000000000001 \n 51.562500000000014","category":"page"},{"location":"tutorial/11_objective_states/#Multi-dimensional-objective-states","page":"Advanced I: objective states","title":"Multi-dimensional objective states","text":"","category":"section"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"You can construct multi-dimensional price processes using NTuples. Just replace every scalar value associated with the objective state by a tuple. For example, initial_value = 1.0 becomes initial_value = (1.0, 2.0).","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"Here is an example:","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"model = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, t\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end)\n\n SDDP.add_objective_state(\n subproblem,\n initial_value = (50.0, 50.0),\n lipschitz = (10_000.0, 10_000.0),\n lower_bound = (50.0, 50.0),\n upper_bound = (150.0, 150.0)\n ) do fuel_cost, ω\n fuel_cost′ = fuel_cost[1] + 0.5 * (fuel_cost[1] - fuel_cost[2]) + ω.fuel\n return (fuel_cost′, fuel_cost[1])\n end\n\n Ω = [\n (fuel = f, inflow = w)\n for f in [-10.0, -5.0, 5.0, 10.0] for w in [0.0, 50.0, 100.0]\n ]\n\n SDDP.parameterize(subproblem, Ω) do ω\n (fuel_cost, fuel_cost_old) = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, fuel_cost * thermal_generation)\n JuMP.fix(inflow, ω.inflow)\n end\nend\n\nSDDP.train(model, iteration_limit = 10, run_numerical_stability_report=false)\n\nsimulations = SDDP.simulate(model, 1)\n\nprint(\"Finished training and simulating.\")","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 1.72800e+03\n Solver : serial mode\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 1.937500e+03 3.438976e+03 5.226650e-01 1 39\n 2 3.062500e+03 4.085467e+03 5.247340e-01 1 78\n 3 0.000000e+00 4.111110e+03 5.267580e-01 1 117\n 4 2.812500e+03 4.166941e+03 5.289860e-01 1 156\n 5 6.625000e+03 4.176758e+03 5.311201e-01 1 195\n 6 1.075000e+04 4.517928e+03 5.332129e-01 1 234\n 7 6.125282e+03 4.517928e+03 5.355599e-01 1 273\n 8 9.750000e+03 4.585852e+03 5.376499e-01 1 312\n 9 9.140998e+03 4.692845e+03 5.399508e-01 1 351\n 10 3.437500e+03 4.729854e+03 5.422060e-01 1 390\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\nFinished training and simulating.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"This time, since our objective state is two-dimensional, the objective states are tuples with two elements:","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"[stage[:objective_state] for stage in simulations[1]]","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"3-element Array{Tuple{Float64,Float64},1}:\n (40.0, 50.0)\n (25.0, 40.0)\n (27.5, 25.0)","category":"page"},{"location":"tutorial/11_objective_states/#objective_state_warnings","page":"Advanced I: objective states","title":"Warnings","text":"","category":"section"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"There are number of things to be aware of when using objective states.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"The key assumption is that price is independent of the states and actions in the model.\nThat means that the price cannot appear in any @constraints. Nor can you use any @variables in the update function.\nChoosing an appropriate Lipschitz constant is difficult.\nThe points discussed in Choosing an initial bound are relevant. The Lipschitz constant should not be chosen as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen to small, it may cut of the feasible region and lead to a sub-optimal solution.\nYou need to ensure that the cost-to-go function is concave with respect to the objective state before the update.\nIf the update function is linear, this is always the case. In some situations, the update function can be nonlinear (e.g., multiplicative as we have above). In general, placing constraints on the price (e.g., clamp(price, 0, 1)) will destroy concavity. Caveat emptor. It's up to you if this is a problem. If it isn't you'll get a good heuristic with no guarantee of global optimality.","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"","category":"page"},{"location":"tutorial/11_objective_states/","page":"Advanced I: objective states","title":"Advanced I: objective states","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/06_warnings.jl\"","category":"page"},{"location":"tutorial/06_warnings/#Basic-VI:-words-of-warning","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"","category":"section"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"SDDP is a powerful solution technique for multistage stochastic programming. However, there are a number of subtle things to be aware of before creating your own models.","category":"page"},{"location":"tutorial/06_warnings/#Numerical-stability","page":"Basic VI: words of warning","title":"Numerical stability","text":"","category":"section"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"If you aren't aware, SDDP builds an outer-approximation to a convex function using cutting planes. This results in a formulation that is particularly hard for solvers like GLPK, Gurobi, and CPLEX to deal with. As a result, you may run into weird behavior. This behavior could include:","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"Iterations suddenly taking a long time (the solver stalled)\nSubproblems turning infeasible or unbounded after many iterations\nSolvers returning \"Numerical Error\" statuses","category":"page"},{"location":"tutorial/06_warnings/#Attempting-to-recover-from-serious-numerical-issues...","page":"Basic VI: words of warning","title":"Attempting to recover from serious numerical issues...","text":"","category":"section"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"SDDP.jl will try hard to avoid and overcome numerical issues. If it encounters issues, you may see the warning Attempting to recover from serious numerical issues.... If you see this warning multiple times, you should try to follow the suggestions in this tutorial to improve the stability of your model.","category":"page"},{"location":"tutorial/06_warnings/#Problem-scaling","page":"Basic VI: words of warning","title":"Problem scaling","text":"","category":"section"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"In almost all cases, the cause of this is poor problem scaling. For our purpose, poor problem scaling means having variables with very large numbers and variables with very small numbers in the same model.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"tip: Tip\nGurobi has an excellent set of articles on numerical issues and how to avoid them.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"Consider, for example, the hydro-thermal scheduling problem we have been discussing in previous tutorials.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"If we define the volume of the reservoir in terms of m³, then a lake might have a capacity of 10^10 m³: @variable(subproblem, 0 <= volume <= 10^10). Moreover, the cost per cubic meter might be around \\$0.05/m³. To calculate the value of water in our reservoir, we need to multiple a variable on the order of 10^10, by one on the order of 10⁻²! That is twelve orders of magnitude!","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"To improve the performance of the SDDP algorithm (and reduce the chance of weird behavior), try to re-scale the units of the problem in order to reduce the largest difference in magnitude. For example, if we talk in terms of million m³, then we have a capacity of 10⁴ million m³, and a price of \\$50,000 per million m³. Now things are only one order of magnitude apart.","category":"page"},{"location":"tutorial/06_warnings/#Numerical-stability-report","page":"Basic VI: words of warning","title":"Numerical stability report","text":"","category":"section"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"To aid in the diagnose of numerical issues, you can call SDDP.numerical_stability_report. By default, this aggregates all of the nodes into a single report. You can produce a stability report for each node by passing by_node=true.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"using SDDP\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 2, lower_bound = -1e10, direct_mode = false\n) do subproblem, t\n @variable(subproblem, x >= -1e7, SDDP.State, initial_value=1e-5)\n @constraint(subproblem, 1e9 * x.out >= 1e-6 * x.in + 1e-8)\n @stageobjective(subproblem, 1e9 * x.out)\nend\n\nSDDP.numerical_stability_report(model)","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"Numerical stability report\n Non-zero Matrix range [1e-06, 1e+09]\n Non-zero Objective range [1e+00, 1e+09]\n Non-zero Bounds range [1e+07, 1e+10]\n Non-zero RHS range [1e-08, 1e-08]\nWARNING: numerical stability issues detected\n - Matrix range contains small coefficients\n - Matrix range contains large coefficients\n - Objective range contains large coefficients\n - Bounds range contains large coefficients\n - RHS range contains small coefficients\nVery large or small absolute values of coefficients\ncan cause numerical stability issues. Consider\nreformulating the model.\n\n","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"The report analyses the magnitude (in absolute terms) of the coefficients in the constraint matrix, the objective function, any variable bounds, and in the RHS of the constraints. A warning will be thrown in SDDP.jl detects very large or small values. As discussed in Problem scaling, this is an indication that you should reformulate your model.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"By default, a numerical stability check is run when you call SDDP.train, although it can be turned off by passing run_numerical_stability_report = false.","category":"page"},{"location":"tutorial/06_warnings/#Solver-specific-options","page":"Basic VI: words of warning","title":"Solver-specific options","text":"","category":"section"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"If you have a particularly troublesome model, you should investigate setting solver-specific options to improve the numerical stability of each solver. For example, Gurobi has a NumericFocus option.","category":"page"},{"location":"tutorial/06_warnings/#Choosing-an-initial-bound","page":"Basic VI: words of warning","title":"Choosing an initial bound","text":"","category":"section"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"One of the important requirements when building a SDDP model is to choose an appropriate bound on the objective (lower if minimizing, upper if maximizing). However, it can be hard to choose a bound if you don't know the solution! (Which is very likely.)","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"The bound should not be as large as possible (since this will help with convergence and the numerical issues discussed above), but if chosen too small, it may cut off the feasible region and lead to a sub-optimal solution.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"Consider the following simple model, where we first set lower_bound to 0.0.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"using SDDP, GLPK\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, t\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 2)\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(subproblem, x.out == x.in - u)\n @constraint(subproblem, u + v == 1.5)\n @stageobjective(subproblem, t * v)\nend\n\nSDDP.train(model, iteration_limit = 5, run_numerical_stability_report = false)","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 1.00000e+00\n Solver : serial mode\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 6.500000e+00 3.000000e+00 2.010107e-03 1 6\n 2 3.500000e+00 3.500000e+00 2.368212e-03 1 12\n 3 3.500000e+00 3.500000e+00 2.759218e-03 1 18\n 4 3.500000e+00 3.500000e+00 3.114223e-03 1 24\n 5 3.500000e+00 3.500000e+00 3.410101e-03 1 30\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"Now consider the case when we set the lower_bound to 10.0:","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"using SDDP, GLPK\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 10.0,\n optimizer = GLPK.Optimizer\n) do subproblem, t\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 2)\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(subproblem, x.out == x.in - u)\n @constraint(subproblem, u + v == 1.5)\n @stageobjective(subproblem, t * v)\nend\n\nSDDP.train(model, iteration_limit = 5, run_numerical_stability_report = false)","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 1.00000e+00\n Solver : serial mode\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 6.500000e+00 1.100000e+01 1.834154e-03 1 6\n 2 5.500000e+00 1.100000e+01 2.156973e-03 1 12\n 3 5.500000e+00 1.100000e+01 2.457142e-03 1 18\n 4 5.500000e+00 1.100000e+01 2.753973e-03 1 24\n 5 5.500000e+00 1.100000e+01 3.041983e-03 1 30\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"How do we tell which is more appropriate? There are a few clues that you should look out for.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"The bound converges to a value above (if minimizing) the simulated cost of the policy. In this case, the problem is deterministic, so it is easy to tell. But you can also check by performing a Monte Carlo simulation like we did in Basic II: adding uncertainty.\nThe bound converges to different values when we change the bound. This is another clear give-away. The bound provided by the user is only used in the initial iterations. It should not change the value of the converged policy. Thus, if you don't know an appropriate value for the bound, choose an initial value, and then increase (or decrease) the value of the bound to confirm that the value of the policy doesn't change.\nThe bound converges to a value close to the bound provided by the user. This varies between models, but notice that 11.0 is quite close to 10.0 compared with 3.5 and 0.0.","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"This concludes our sixth tutorial for SDDP.jl. You're now ready to start solving multistage stochastic programs with SDDP.jl!","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"","category":"page"},{"location":"tutorial/06_warnings/","page":"Basic VI: words of warning","title":"Basic VI: words of warning","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/asset_management_simple.jl\"","category":"page"},{"location":"examples/asset_management_simple/#Asset-management","page":"Asset management","title":"Asset management","text":"","category":"section"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"using SDDP, GLPK, Test\n\nfunction asset_management_simple()\n model = SDDP.PolicyGraph(\n SDDP.MarkovianGraph(Array{Float64,2}[\n [1.0]',\n [0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n ]),\n bellman_function = SDDP.BellmanFunction(lower_bound = -1_000.0),\n optimizer = GLPK.Optimizer,\n ) do subproblem, index\n (stage, markov_state) = index\n rstock = [1.25, 1.06]\n rbonds = [1.14, 1.12]\n @variable(subproblem, stocks >= 0, SDDP.State, initial_value = 0.0)\n @variable(subproblem, bonds >= 0, SDDP.State, initial_value = 0.0)\n if stage == 1\n @constraint(subproblem, stocks.out + bonds.out == 55)\n @stageobjective(subproblem, 0)\n elseif 1 < stage < 4\n @constraint(\n subproblem,\n rstock[markov_state] * stocks.in + rbonds[markov_state] * bonds.in ==\n stocks.out + bonds.out\n )\n @stageobjective(subproblem, 0)\n else\n @variable(subproblem, over >= 0)\n @variable(subproblem, short >= 0)\n @constraint(\n subproblem,\n rstock[markov_state] * stocks.in + rbonds[markov_state] * bonds.in - over + short == 80\n )\n @stageobjective(subproblem, -over + 4 * short)\n end\n end\n SDDP.train(model, iteration_limit = 25, log_frequency = 5)\n @test SDDP.termination_status(model) == :iteration_limit\n @test SDDP.calculate_bound(model) ≈ 1.514 atol = 1e-4\n return\nend\n\nasset_management_simple()","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 7\n State variables : 2\n Scenarios : 8.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 4e+00]\n Non-zero Bounds range [1e+03, 1e+03]\n Non-zero RHS range [6e+01, 8e+01]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 5 -2.239977e-01 1.232831e+00 1.284909e-02 1 55\n 10 -2.496563e+01 1.514085e+00 2.135515e-02 1 110\n 15 1.534772e-12 1.514085e+00 2.504110e-02 1 165\n 20 -2.479988e+01 1.514085e+00 2.833414e-02 1 220\n 25 4.864000e+01 1.514085e+00 3.256702e-02 1 275\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"","category":"page"},{"location":"examples/asset_management_simple/","page":"Asset management","title":"Asset management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/choose_a_stopping_rule/#Choose-a-stopping-rule","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The theory of SDDP tells us that the algorithm converges to an optimal policy almost surely in a finite number of iterations. In practice, this number is very large. Therefore, we need some way of pre-emptively terminating SDDP when the solution is “good enough.” We call heuristics for pre-emptively terminating SDDP stopping rules.","category":"page"},{"location":"guides/choose_a_stopping_rule/#Basic-limits","page":"Choose a stopping rule","title":"Basic limits","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The training of an SDDP policy can be terminated after a fixed number of iterations using the iteration_limit keyword.","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, iteration_limit = 10)","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"The training of an SDDP policy can be terminated after a fixed number of seconds using the time_limit keyword.","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, time_limit = 2.0)","category":"page"},{"location":"guides/choose_a_stopping_rule/#Stopping-rules","page":"Choose a stopping rule","title":"Stopping rules","text":"","category":"section"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"In addition to the limits provided as keyword arguments, a variety of other stopping rules are available. These can be passed to SDDP.train as a vector to the stopping_rules keyword. For example:","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.train(model, stopping_rules = [SDDP.BoundStalling(10, 1e-4)])","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"Here are the stopping rules implemented in SDDP.jl:","category":"page"},{"location":"guides/choose_a_stopping_rule/","page":"Choose a stopping rule","title":"Choose a stopping rule","text":"SDDP.IterationLimit\nSDDP.TimeLimit\nSDDP.Statistical\nSDDP.BoundStalling","category":"page"},{"location":"guides/choose_a_stopping_rule/#SDDP.IterationLimit","page":"Choose a stopping rule","title":"SDDP.IterationLimit","text":"IterationLimit(limit::Int)\n\nTeriminate the algorithm after limit number of iterations.\n\n\n\n\n\n","category":"type"},{"location":"guides/choose_a_stopping_rule/#SDDP.TimeLimit","page":"Choose a stopping rule","title":"SDDP.TimeLimit","text":"TimeLimit(limit::Float64)\n\nTeriminate the algorithm after limit seconds of computation.\n\n\n\n\n\n","category":"type"},{"location":"guides/choose_a_stopping_rule/#SDDP.Statistical","page":"Choose a stopping rule","title":"SDDP.Statistical","text":"Statistical(; num_replications, iteration_period = 1, z_score = 1.96,\n verbose = true)\n\nPerform an in-sample Monte Carlo simulation of the policy with num_replications replications every iteration_periods. Terminate if the deterministic bound (lower if minimizing) calls into the confidence interval for the mean of the simulated cost. If verbose = true, print the confidence interval.\n\nNote that this tests assumes that the simulated values are normally distributed. In infinite horizon models, this is almost never the case. The distribution is usually closer to exponential or log-normal.\n\n\n\n\n\n","category":"type"},{"location":"guides/choose_a_stopping_rule/#SDDP.BoundStalling","page":"Choose a stopping rule","title":"SDDP.BoundStalling","text":"BoundStalling(num_previous_iterations::Int, tolerance::Float64)\n\nTeriminate the algorithm once the deterministic bound (lower if minimizing, upper if maximizing) fails to improve by more than tolerance in absolute terms for more than num_previous_iterations consecutve iterations.\n\n\n\n\n\n","category":"type"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/asset_management_stagewise.jl\"","category":"page"},{"location":"examples/asset_management_stagewise/#Asset-management-with-modifications","page":"Asset management with modifications","title":"Asset management with modifications","text":"","category":"section"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"A modified version of the Asset Management Problem Taken from the book J.R. Birge, F. Louveaux, Introduction to Stochastic Programming, Springer Series in Operations Research and Financial Engineering, Springer New York, New York, NY, 2011","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"using SDDP, GLPK, Test\n\nfunction asset_management_stagewise(; cut_type)\n ws = [1.25, 1.06]\n wb = [1.14, 1.12]\n Phi = [-1, 5]\n Psi = [0.02, 0.0]\n\n model = SDDP.MarkovianPolicyGraph(\n sense = :Max,\n transition_matrices = Array{Float64,2}[\n [1.0]',\n [0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n [0.5 0.5; 0.5 0.5],\n ],\n bellman_function = SDDP.BellmanFunction(\n upper_bound = 1000.0,\n cut_type = cut_type,\n ),\n optimizer = GLPK.Optimizer,\n ) do subproblem, node\n t, i = node\n @variable(subproblem, xs >= 0, SDDP.State, initial_value = 0)\n @variable(subproblem, xb >= 0, SDDP.State, initial_value = 0)\n if t == 1\n @constraint(subproblem, xs.out + xb.out == 55 + xs.in + xb.in)\n @stageobjective(subproblem, 0)\n elseif t == 2 || t == 3\n @variable(subproblem, phi)\n @constraint(subproblem, ws[i] * xs.in + wb[i] * xb.in + phi == xs.out + xb.out)\n SDDP.parameterize(subproblem, [1, 2], [0.6, 0.4]) do ω\n JuMP.fix(phi, Phi[ω])\n @stageobjective(subproblem, Psi[ω] * xs.out)\n end\n else\n @variable(subproblem, u >= 0)\n @variable(subproblem, v >= 0)\n @constraint(subproblem, ws[i] * xs.in + wb[i] * xb.in + u - v == 80)\n @stageobjective(subproblem, -4u + v)\n end\n end\n SDDP.train(\n model;\n iteration_limit = 100,\n log_frequency = 10,\n risk_measure = (node) -> begin\n if node[1] != 3\n SDDP.Expectation()\n else\n SDDP.EAVaR(lambda = 0.5, beta = 0.5)\n end\n end,\n )\n @test SDDP.calculate_bound(model) ≈ 1.278 atol = 1e-3\n return\nend\n\nasset_management_stagewise(cut_type = SDDP.SINGLE_CUT)\n\nasset_management_stagewise(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 7\n State variables : 2\n Scenarios : 3.20000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [2e-02, 4e+00]\n Non-zero Bounds range [1e+03, 1e+03]\n Non-zero RHS range [6e+01, 8e+01]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 2.154550e+01 1.451485e+00 6.547780e-01 1 150\n 20 3.014193e+01 1.278410e+00 6.649311e-01 1 300\n 30 1.522212e+00 1.278410e+00 6.758761e-01 1 450\n 40 2.825352e+00 1.278410e+00 6.876349e-01 1 600\n 50 -2.315795e+01 1.278410e+00 6.996570e-01 1 750\n 60 -4.523775e+01 1.278410e+00 7.128801e-01 1 900\n 70 2.115547e+01 1.278410e+00 7.263260e-01 1 1050\n 80 1.426676e+01 1.278410e+00 7.536850e-01 1 1200\n 90 1.261296e+01 1.278410e+00 7.681620e-01 1 1350\n 100 -1.051817e+00 1.278410e+00 7.832279e-01 1 1500\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 7\n State variables : 2\n Scenarios : 3.20000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [2e-02, 4e+00]\n Non-zero Bounds range [1e+03, 1e+03]\n Non-zero RHS range [6e+01, 8e+01]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 9.097326e+00 1.426792e+00 1.188612e-02 1 150\n 20 1.302014e+00 1.348579e+00 2.194905e-02 1 300\n 30 1.788011e+01 1.278659e+00 3.267002e-02 1 450\n 40 5.070682e+00 1.281643e+00 4.490113e-02 1 600\n 50 -5.003795e+01 1.278410e+00 5.667114e-02 1 750\n 60 -4.523775e+01 1.278410e+00 7.009411e-02 1 900\n 70 1.111084e+01 1.278410e+00 8.331013e-02 1 1050\n 80 -3.575118e+00 1.278410e+00 9.702802e-02 1 1200\n 90 8.288929e+00 1.278410e+00 1.157219e-01 1 1350\n 100 -1.051817e+00 1.278410e+00 1.327829e-01 1 1500\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"","category":"page"},{"location":"examples/asset_management_stagewise/","page":"Asset management with modifications","title":"Asset management with modifications","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/belief.jl\"","category":"page"},{"location":"examples/belief/#Partially-observable-inventory-management","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"","category":"section"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"using SDDP, GLPK, Random, Statistics, Test\n\nfunction inventory_management_problem()\n demand_values = [1.0, 2.0]\n demand_prob = Dict(:Ah => [0.2, 0.8], :Bh => [0.8, 0.2])\n graph = SDDP.Graph(\n :root_node,\n [:Ad, :Ah, :Bd, :Bh],\n [\n (:root_node => :Ad, 0.5),\n (:root_node => :Bd, 0.5),\n (:Ad => :Ah, 1.0),\n (:Ah => :Ad, 0.8),\n (:Ah => :Bd, 0.1),\n (:Bd => :Bh, 1.0),\n (:Bh => :Bd, 0.8),\n (:Bh => :Ad, 0.1),\n ],\n )\n SDDP.add_ambiguity_set(graph, [:Ad, :Bd], 1e2)\n SDDP.add_ambiguity_set(graph, [:Ah, :Bh], 1e2)\n\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n ) do subproblem, node\n @variables(subproblem, begin\n 0 <= inventory <= 2, (SDDP.State, initial_value = 0.0)\n buy >= 0\n demand\n end)\n @constraint(subproblem, demand == inventory.in - inventory.out + buy)\n if node == :Ad || node == :Bd || node == :D\n JuMP.fix(demand, 0)\n @stageobjective(subproblem, buy)\n else\n SDDP.parameterize(subproblem, demand_values, demand_prob[node]) do ω\n JuMP.fix(demand, ω)\n end\n @stageobjective(subproblem, 2 * buy + inventory.out)\n end\n end\n # Train the policy.\n Random.seed!(123)\n SDDP.train(\n model;\n iteration_limit = 100,\n cut_type = SDDP.SINGLE_CUT,\n log_frequency = 10,\n )\n results = SDDP.simulate(model, 500)\n objectives = [sum(s[:stage_objective] for s in simulation) for simulation in results]\n sample_mean = round(Statistics.mean(objectives); digits = 2)\n sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)\n @test SDDP.calculate_bound(model) ≈ sample_mean atol = sample_ci\n return\nend\n\ninventory_management_problem()","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 4\n State variables : 1\n Scenarios : Inf\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 2e+00]\n Non-zero Bounds range [2e+00, 1e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 1.643844e+01 1.182080e+01 1.803198e+00 1 804\n 20 5.345296e+01 1.634754e+01 2.042317e+00 1 2112\n 30 3.966457e+01 1.798392e+01 2.315569e+00 1 3260\n 40 2.997291e+01 1.848047e+01 2.625775e+00 1 4200\n 50 2.408579e+01 1.859564e+01 2.955907e+00 1 4980\n 60 7.024441e+00 1.865058e+01 3.191740e+00 1 5432\n 70 2.693996e+01 1.869724e+01 3.984609e+00 1 6580\n 80 1.402047e+01 1.874705e+01 4.800237e+00 1 7656\n 90 2.401105e+01 1.877282e+01 5.634025e+00 1 8628\n 100 1.800013e+01 1.879286e+01 7.267354e+00 1 9632\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"","category":"page"},{"location":"examples/belief/","page":"Partially observable inventory management","title":"Partially observable inventory management","text":"This page was generated using Literate.jl.","category":"page"},{"location":"","page":"Home","title":"Home","text":"CurrentModule = SDDP","category":"page"},{"location":"#SDDP.jl","page":"Home","title":"SDDP.jl","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"SDDP.jl is a package for solving large multistage convex stochastic programming problems using stochastic dual dynamic programming. In this manual, we're going to assume a reasonable amount of background knowledge about stochastic optimization, the SDDP algorithm, Julia, and JuMP.","category":"page"},{"location":"","page":"Home","title":"Home","text":"tip: Tip\nIf you haven't used JuMP before, we recommend that you read the JuMP documentation and try building and solving JuMP models before trying SDDP.jl.","category":"page"},{"location":"#Installation","page":"Home","title":"Installation","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"You can install SDDP.jl as follows:","category":"page"},{"location":"","page":"Home","title":"Home","text":"julia> ] add SDDP","category":"page"},{"location":"#Tutorials","page":"Home","title":"Tutorials","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"Once you've got SDDP.jl installed, you should read some tutorials, beginning with Basic I: first steps.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you want a basic understanding of the algorithm behind SDDP.jl, start with Theory I: an intro to SDDP.","category":"page"},{"location":"#How-to-guides","page":"Home","title":"How-to guides","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"If you just want help on a specific topic, check out one of the how-to guides. A good one to get started on is Debug a model.","category":"page"},{"location":"#Examples","page":"Home","title":"Examples","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"SDDP.jl also contains a number of examples. A good one to get started on is the Hydro-thermal scheduling problem. In particular, it shows how to solve an infinite horizon problem.","category":"page"},{"location":"#API-Reference","page":"Home","title":"API Reference","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"If you just want help on a specific function, see the API Reference page.","category":"page"},{"location":"#Citing-SDDP.jl","page":"Home","title":"Citing SDDP.jl","text":"","category":"section"},{"location":"","page":"Home","title":"Home","text":"If you use SDDP.jl, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_sddp.jl,\n\ttitle = {{SDDP}.jl: a {Julia} package for stochastic dual dynamic programming},\n\tjournal = {INFORMS Journal on Computing},\n\tauthor = {Dowson, O. and Kapelevich, L.},\n\tdoi = {https://doi.org/10.1287/ijoc.2020.0987},\n\tnote = {Articles in Advance},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the infinite horizon functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_policy_graph,\n\ttitle = {The policy graph decomposition of multistage stochastic\n optimization problems},\n\tdoi = {https://doi.org/10.1002/net.21932},\n\tjournal = {Networks},\n\tauthor = {Dowson, O.},\n\tvolume = {76},\n\tissue = {1},\n\tpages = {3-23},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the partially observable functionality, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_pomsp,\n\ttitle = {Partially observable multistage stochastic programming},\n\tdoi = {https://doi.org/10.1016/j.orl.2020.06.005},\n\tjournal = {Operations Research Letters},\n\tauthor = {Dowson, O., Morton, D.P., and Pagnoncelli, B.K.},\n\tvolume = {48},\n\tissue = {4},\n\tpages = {505-512},\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is an earlier preprint.","category":"page"},{"location":"","page":"Home","title":"Home","text":"If you use the entropic risk measure, we ask that you please cite the following:","category":"page"},{"location":"","page":"Home","title":"Home","text":"@article{dowson_entropic,\n\ttitle = {Multistage stochastic programs with the entropic risk measure},\n\tjournal = {Optimization Online},\n\tauthor = {Dowson, O., Morton, D.P., and Pagnoncelli, B.K.},\n\turl = {http://www.optimization-online.org/DB_HTML/2020/08/7984.html}\n\tyear = {2020}\n}","category":"page"},{"location":"","page":"Home","title":"Home","text":"Here is a preprint.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/21_theory_intro.jl\"","category":"page"},{"location":"tutorial/21_theory_intro/#Theory-I:-an-intro-to-SDDP","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nThis tutorial is aimed at advanced undergraduates or early-stage graduate students. You don't need prior exposure to stochastic programming! (Indeed, it may be better if you don't, because our approach is non-standard in the literature.)This tutorial is also a living document. If parts are unclear, please open an issue so it can be improved!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"This tutorial will teach you how the stochastic dual dynamic programming algorithm works by implementing a simplified version of the algorithm.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Our implementation is very much a \"vanilla\" version of SDDP; it doesn't have (m)any fancy computational tricks (e.g., the ones included in SDDP.jl) that you need to code a performant or stable version that will work on realistic instances. However, our simplified implementation will work on arbitrary policy graphs, including those with cycles such as infinite horizon problems!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Packages","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"This tutorial uses the following packages. For clarity, we call import PackageName so that we must prefix PackageName. to all functions and structs provided by that package. Everything not prefixed is either part of base Julia, or we wrote it.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"import ForwardDiff\nimport GLPK\nimport JuMP\nimport Statistics","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"tip: Tip\nYou can follow along by installing the above packages, and copy-pasting the code we will write into a Julia REPL. Alternatively, you can download the Julia .jl file which created this tutorial from Github.","category":"page"},{"location":"tutorial/21_theory_intro/#Preliminaries:-background-theory","page":"Theory I: an intro to SDDP","title":"Preliminaries: background theory","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"tip: Tip\nThis section is copied verbatim from Basic I: first steps. If it's familiar, skip to Preliminaries: Kelley's cutting plane algorithm.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Multistage stochastic programming is complicated, and the literature has not settled upon standard naming conventions, so we must begin with some unavoidable theory and notation.","category":"page"},{"location":"tutorial/21_theory_intro/#Policy-graphs","page":"Theory I: an intro to SDDP","title":"Policy graphs","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"A multistage stochastic program can be modeled by a policy graph. A policy graph is a graph with nodes and arcs. The simplest type of policy graph is a linear graph. Here's a linear graph with three nodes:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"(Image: Linear policy graph)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"In addition to nodes 1, 2, and 3, there is also a root node (the circle), and three arcs. Each arc has an origin node and a destination node, like 1 => 2, and a corresponding probability of transitioning from the origin to the destination. Unless specified, we assume that the arc probabilities are uniform over the number of outgoing arcs. Thus, in this picture the arc probabilities are all 1.0. The squiggly lines denote random variables that we will discuss shortly.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"We denote the set of nodes by mathcalN, the root node by R, and the probability of transitioning from node i to node j by p_ij. (If no arc exists, then p_ij = 0.) We define the set of successors of node i as i^+ = j in mathcalN p_ij 0.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Each square node in the graph corresponds to a place at which the agent makes a decision, and we call moments in time at which the agent makes a decision stages. By convention, we try to draw policy graphs from left-to-right, with the stages as columns. There can be more than one node in a stage! Here's an example, taken from the paper Dowson (2020):","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"(Image: Markovian policy graph)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The columns represent time, and the rows represent different states of the world. In this case, the rows represent different prices that milk can be sold for at the end of each year. The squiggly lines denote a multivariate random variable that models the weekly amount of rainfall that occurs. You can think of the nodes as forming a Markov chain, therefore, we call problems with a structure like this Markovian policy graphs. Moreover, note that policy graphs can have cycles! This allows them to model infinite horizon problems.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nThe sum of probabilities on the outgoing arcs of node i can be less than 1, i.e., sumlimits_jin i^+ p_ij le 1. What does this mean? One interpretation is that the probability is a discount factor. Another interpretation is that there is an implicit \"zero\" node that we have not modeled, with p_i0 = 1 - sumlimits_jin i^+ p_ij. This zero node has C_0(x u omega) = 0, and 0^+ = varnothing.","category":"page"},{"location":"tutorial/21_theory_intro/#Problem-notation","page":"Theory I: an intro to SDDP","title":"Problem notation","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"A common feature of multistage stochastic optimization problems is that they model an agent controlling a system over time. This system can be described by three types of variables.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"State variables track a property of the system over time.\nEach node has an associated incoming state variable (the value of the state at the start of the node), and an outgoing state variable (the value of the state at the end of the node).\nExamples of state variables include the volume of water in a reservoir, the number of units of inventory in a warehouse, or the spatial position of a moving vehicle.\nBecause state variables track the system over time, each node must have the same set of state variables.\nWe denote state variables by the letter x for the incoming state variable and x^prime for the outgoing state variable.\nControl variables are actions taken (implicitly or explicitly) by the agent within a node which modify the state variables.\nExamples of control variables include releases of water from the reservoir, sales or purchasing decisions, and acceleration or braking of the vehicle.\nControl variables are local to a node i, and they can differ between nodes. For example, some control variables may be available within certain nodes.\nWe denote control variables by the letter u.\nRandom variables are finite, discrete, exogenous random variables that the agent observes at the start of a node, before the control variables are decided.\nExamples of random variables include rainfall inflow into a reservoir, probalistic perishing of inventory, and steering errors in a vehicle.\nRandom variables are local to a node i, and they can differ between nodes. For example, some nodes may have random variables, and some nodes may not.\nWe denote random variables by the Greek letter omega and the sample space from which they are drawn by Omega_i. The probability of sampling omega is denoted p_omega for simplicity.\nImportantly, the random variable associated with node i is independent of the random variables in all other nodes.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"In a node i, the three variables are related by a transition function, which maps the incoming state, the controls, and the random variables to the outgoing state as follows: x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"As a result of entering a node i with the incoming state x, observing random variable omega, and choosing control u, the agent incurs a cost C_i(x u omega). (If the agent is a maximizer, this can be a profit, or a negative cost.) We call C_i the stage objective.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"To choose their control variables in node i, the agent uses a decision rule u = pi_i(x omega), which is a function that maps the incoming state variable and observation of the random variable to a control u. This control must satisfy some feasibilty requirements u in U_i(x omega).","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The set of decision rules, with one element for each node in the policy graph, is called a policy.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The goal of the agent is to find a policy that minimizes the expected cost of starting at the root node with some initial condition x_R, and proceeding from node to node along the probabilistic arcs until they reach a node with no outgoing arcs (or it reaches an implicit \"zero\" node).","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"min_pi mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"where","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"V_i^pi(x omega) = C_i(x u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"where u = pi_i(x omega) in U_i(x omega), and x^prime = T_i(x u omega).","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The expectations are a bit complicated, but they are equivalent to:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi) = sumlimits_j in i^+ p_ij sumlimits_varphi in Omega_j p_varphiV_j(x^prime varphi)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"An optimal policy is the set of decision rules that the agent can use to make decisions and achieve the smallest expected cost.","category":"page"},{"location":"tutorial/21_theory_intro/#Assumptions","page":"Theory I: an intro to SDDP","title":"Assumptions","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"warning: Warning\nThis section is important!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The space of problems you can model with this framework is very large. Too large, in fact, for us to form tractable solution algorithms for! Stochastic dual dynamic programming requires the following assumptions in order to work:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Assumption 1: finite nodes","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"There is a finite number of nodes in mathcalN.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Assumption 2: finite random variables","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The sample space Omega_i is finite and discrete for each node iinmathcalN.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Assumption 3: convex problems","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Given fixed omega, C_i(x u omega) is a convex function, T_i(x u omega) is linear, and U_i(x u omega) is a non-empty, bounded convex set with respect to x and u.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Assumption 4: no infinite loops","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"For all loops in the policy graph, the product of the arc transition probabilities around the loop is strictly less than 1.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nSDDP.jl relaxes assumption (3) to allow for integer state and control variables, but we won't go into the details here. Assumption (4) essentially means that we obtain a discounted-cost solution for infinite-horizon problems, instead of an average-cost solution; see Dowson (2020) for details.","category":"page"},{"location":"tutorial/21_theory_intro/#Dynamic-programming-and-subproblems","page":"Theory I: an intro to SDDP","title":"Dynamic programming and subproblems","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Now that we have formulated our problem, we need some ways of computing optimal decision rules. One way is to just use a heuristic like \"choose a control randomally from the set of feasible controls.\" However, such a policy is unlikely to be optimal.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"A better way of obtaining an optimal policy is to use Bellman's principle of optimality, a.k.a Dynamic Programming, and define a recursive subproblem as follows:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nWe add barx as a decision variable, along with the fishing constraint barx = x for two reasons: it makes it obvious that formulating a problem with x times u results in a bilinear program instead of a linear program (see Assumption 3), and it simplifies the implementation of the SDDP algorithm.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"These subproblems are very difficult to solve exactly, because they involve recursive optimization problems with lots of nested expectations.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Therefore, instead of solving them exactly, SDDP works by iteratively approximating the expectation term of each subproblem, which is also called the cost-to-go term. For now, you don't need to understand the details, we will explain how in Preliminaries: approximating the cost-to-go term.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The subproblem view of a multistage stochastic program is also important, because it provides a convienient way of communicating the different parts of the broader problem, and it is how we will communicate the problem to SDDP.jl. All we need to do is drop the cost-to-go term and fishing constraint, and define a new subproblem SP as:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx u omega) \n x^prime = T_i(barx u omega) \n u in U_i(barx omega)\nendaligned","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nWhen we talk about formulating a subproblem with SDDP.jl, this is the formulation we mean.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"We've retained the transition function and uncertainty set because they help to motivate the different components of the subproblem. However, in general, the subproblem can be more general. A better (less restrictive) representation might be:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"beginaligned\ntextttSP_i(x omega) minlimits_barx x^prime u C_i(barx x^prime u omega) \n (barx x^prime u) in mathcalX_i(omega)\nendaligned","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Note that the outgoing state variable can appear in the objective, and we can add constraints involving the incoming and outgoing state variables. It should be obvious how to map between the two representations.","category":"page"},{"location":"tutorial/21_theory_intro/#Preliminaries:-Kelley's-cutting-plane-algorithm","page":"Theory I: an intro to SDDP","title":"Preliminaries: Kelley's cutting plane algorithm","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Kelley's cutting plane algorithm is an iterative method for minimizing convex functions. Given a convex function f(x), Kelley's constructs an under-approximation of the function at the minimum by a set of first-order Taylor series approximations (called cuts) constructed at a set of points k = 1ldotsK:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"beginaligned\nf^K = minlimits_theta in mathbbR x in mathbbR^N theta\n theta ge f(x_k) + fracddxf(x_k)^top (x - x_k)quad k=1ldotsK\n theta ge M\nendaligned","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"where M is a sufficiently large negative number that is a lower bound for f over the domain of x.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Kelley's cutting plane algorithm is a structured way of choosing points x_k to visit, so that as more cuts are added:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"lim_K rightarrow infty f^K = minlimits_x in mathbbR^N f(x)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"However, before we introduce the algorithm, we need to introduce some bounds.","category":"page"},{"location":"tutorial/21_theory_intro/#Bounds","page":"Theory I: an intro to SDDP","title":"Bounds","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"By convexity, f^K le f(x) for all x. Thus, if x^* is a minimizer of f, then at any point in time we can construct a lower bound for f(x^*) by solving f^K.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Moreover, we can use the primal solutions x_k^* returned by solving f^k to evaluate f(x_k^*) to generate an upper bound.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Therefore, f^K le f(x^*) le minlimits_k=1ldotsK f(x_k^*).","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"When the lower bound is sufficiently close to the upper bound, we can terminate the algorithm and declare that we have found an solution that is close to optimal.","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation","page":"Theory I: an intro to SDDP","title":"Implementation","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Here is pseudo-code fo the Kelley algorithm:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Take as input a convex function f(x) and a iteration limit K_max. Set K = 0, and initialize f^K. Set lb = -infty and ub = infty.\nSolve f^K to obtain a candidate solution x_K+1.\nUpdate lb = f^K and ub = minub f(x_K+1).\nAdd a cut theta ge f(x_K+1) + fracddxfleft(x_K+1right)^top (x - x_K+1) to form f^K+1.\nIncrement K.\nIf K = K_max or ub - lb epsilon, STOP, otherwise, go to step 2.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"And here's a complete implementation:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function kelleys_cutting_plane(\n # The function to be minimized.\n f::Function,\n # The gradient of `f`. By default, we use automatic differentiation to\n # compute the gradient of f so the user doesn't have to!\n dfdx::Function = x -> ForwardDiff.gradient(f, x);\n # The number of arguments to `f`.\n input_dimension::Int,\n # A lower bound for the function `f` over its domain.\n lower_bound::Float64,\n # The number of iterations to run Kelley's algorithm for before stopping.\n iteration_limit::Int,\n # The absolute tolerance ϵ to use for convergence.\n tolerance::Float64 = 1e-6,\n)\n # Step (1):\n K = 0\n model = JuMP.Model(GLPK.Optimizer)\n JuMP.@variable(model, θ >= lower_bound)\n JuMP.@variable(model, x[1:input_dimension])\n JuMP.@objective(model, Min, θ)\n lower_bound, upper_bound = -Inf, Inf\n while true\n # Step (2):\n JuMP.optimize!(model)\n x_k = JuMP.value.(x)\n # Step (3):\n lower_bound = JuMP.objective_value(model)\n upper_bound = min(upper_bound, f(x_k))\n println(\"K = $K : $(lower_bound) <= f(x*) <= $(upper_bound)\")\n # Step (4):\n JuMP.@constraint(model, θ >= f(x_k) + dfdx(x_k)' * (x .- x_k))\n # Step (5):\n K = K + 1\n # Step (6):\n if K == iteration_limit\n println(\"-- Termination status: iteration limit --\")\n break\n elseif abs(upper_bound - lower_bound) < tolerance\n println(\"-- Termination status: converged --\")\n break\n end\n end\n println(\"Found solution: x_K = \", JuMP.value.(x))\n return\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"kelleys_cutting_plane (generic function with 2 methods)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Let's run our algorithm to see what happens:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"kelleys_cutting_plane(\n input_dimension = 2,\n lower_bound = 0.0,\n iteration_limit = 20,\n) do x\n return (x[1] - 1)^2 + (x[2] + 2)^2 + 1.0\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"K = 0 : 0.0 <= f(x*) <= 6.0\nK = 1 : 0.0 <= f(x*) <= 2.25\nK = 2 : 0.0 <= f(x*) <= 2.25\nK = 3 : 0.0 <= f(x*) <= 1.0986328125\nK = 4 : 0.0 <= f(x*) <= 1.0986328125\nK = 5 : 0.0 <= f(x*) <= 1.0986328125\nK = 6 : 0.31451789700255095 <= f(x*) <= 1.0986328125\nK = 7 : 0.5251698041461692 <= f(x*) <= 1.0986328125\nK = 8 : 0.7300616982038292 <= f(x*) <= 1.0986328125\nK = 9 : 0.8034849495130736 <= f(x*) <= 1.0283101871093756\nK = 10 : 0.928700455583042 <= f(x*) <= 1.0283101871093756\nK = 11 : 0.9487595478289129 <= f(x*) <= 1.005714217279734\nK = 12 : 0.9823564684073586 <= f(x*) <= 1.005714217279734\nK = 13 : 0.9878994829249163 <= f(x*) <= 1.0023277313143473\nK = 14 : 0.9948297447399878 <= f(x*) <= 1.0023277313143473\nK = 15 : 0.9966035252317598 <= f(x*) <= 1.0002586136309095\nK = 16 : 0.9988344662887894 <= f(x*) <= 1.0002586136309095\nK = 17 : 0.999400719780466 <= f(x*) <= 1.0002586136309095\nK = 18 : 0.9995654191110327 <= f(x*) <= 1.0000883284156266\nK = 19 : 0.9998024915692464 <= f(x*) <= 1.0000370583103833\n-- Termination status: iteration limit --\nFound solution: x_K = [0.99392, -1.9997]\n","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"warning: Warning\nIt's hard to choose a valid lower bound! If you choose one too loose, the algorithm can take a long time to converge. However, if you choose one so tight that M f(x^*), then you can obtain a suboptimal solution. For a deeper discussion of the implications for SDDP.jl, see Choosing an initial bound.","category":"page"},{"location":"tutorial/21_theory_intro/#Preliminaries:-approximating-the-cost-to-go-term","page":"Theory I: an intro to SDDP","title":"Preliminaries: approximating the cost-to-go term","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"In the background theory section, we discussed how you could formulate an optimal policy to a multistage stochastic program using the dynamic programming recursion:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"where our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution. Moreover, we alluded to the fact that the cost-to-go term (the nasty recursive expectation) makes this problem intractable to solve.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"However, if, excluding the cost-to-go term (i.e., the SP formulation), V_i(x omega) can be formulated as a linear program (this also works for convex programs, but the math is more involved), then we can make some progress by noticing that x only appears as a right-hand side term of the fishing constraint barx = x.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Therefore, V_i(x cdot) is convex with respect to x for fixed omega. Moreover, if we implement the constraint barx = x by setting the lower- and upper bounds of barx to x, then the reduced cost of the decision variable barx is a subgradient of the function V_i with respect to x! (This is the algorithmic simplification that leads us to add barx and the fishing constraint barx = x.)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Stochastic dual dynamic programming converts this problem into a tractable form by applying Kelley's cutting plane algorithm to the V_j functions in the cost-to-go term:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbE_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)rightquad k=1ldotsK \n theta ge M\nendaligned","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"All we need now is a way of generating these cutting planes in an iterative manner. Before we get to that though, let's start writing some code.","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation:-modeling","page":"Theory I: an intro to SDDP","title":"Implementation: modeling","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Let's make a start by defining the problem structure. Like SDDP.jl, we need a few things:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"A description of the structure of the policy graph: how many nodes there are, and the arcs linking the nodes together with their corresponding probabilities.\nA JuMP model for each node in the policy graph.\nA way to identify the incoming and outgoing state variables of each node.\nA description of the random variable, as well as a function that we can call that will modify the JuMP model to reflect the realization of the random variable.\nA decision variable to act as the approximated cost-to-go term.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"warning: Warning\nIn the interests of brevity, there is minimal error checking. Think about all the different ways you could break the code!","category":"page"},{"location":"tutorial/21_theory_intro/#Structs","page":"Theory I: an intro to SDDP","title":"Structs","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The first struct we are going to use is a State struct that will wrap an incoming and outgoing state variable:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"struct State\n in::JuMP.VariableRef\n out::JuMP.VariableRef\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Next, we need a struct to wrap all of the uncertainty within a node:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"struct Uncertainty\n parameterize::Function\n Ω::Vector{Any}\n P::Vector{Float64}\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"parameterize is a function which takes a realization of the random variable omegainOmega and updates the subproblem accordingly. The finite discrete random variable is defined by the vectors Ω and P, so that the random variable takes the value Ω[i] with probability P[i]. As such, P should sum to 1. (We don't check this here, but we should; we do in SDDP.jl.)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Now we have two building blocks, we can declare the structure of each node:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"struct Node\n subproblem::JuMP.Model\n states::Dict{Symbol,State}\n uncertainty::Uncertainty\n cost_to_go::JuMP.VariableRef\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"subproblem is going to be the JuMP model that we build at each node.\nstates is a dictionary that maps a symbolic name of a state variable to a State object wrapping the incoming and outgoing state variables in subproblem.\nuncertainty is an Uncertainty object described above.\ncost_to_go is a JuMP variable that approximates the cost-to-go term.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Finally, we define a simplified policy graph as follows:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"struct PolicyGraph\n nodes::Vector{Node}\n arcs::Vector{Dict{Int,Float64}}\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"There is a vector of nodes, as well as a data structure for the arcs. arcs is a vector of dictionaries, where arcs[i][j] gives the probabiltiy of transitioning from node i to node j, if an arc exists.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"To simplify things, we will assume that the root node transitions to node 1 with probability 1, and there are no other incoming arcs to node 1. Notably, we can still define cyclic graphs though!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"We also define a nice show method so that we don't accidentally print a large amount of information to the screen when creating a model:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function Base.show(io::IO, model::PolicyGraph)\n println(io, \"A policy graph with $(length(model.nodes)) nodes\")\n println(io, \"Arcs:\")\n for (from, arcs) in enumerate(model.arcs)\n for (to, probability) in arcs\n println(io, \" $(from) => $(to) w.p. $(probability)\")\n end\n end\n return\nend","category":"page"},{"location":"tutorial/21_theory_intro/#Functions","page":"Theory I: an intro to SDDP","title":"Functions","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Now we have some basic types, let's implement some functions so that the user can create a model.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"First, we need an example of a function that the user will provide. Like SDDP.jl, this takes an empty subproblem, and a node index, in this case t::Int. You could change this function to change the model, or define a new one later in the code.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"We're going to copy the example from Basic II: adding uncertainty, with some minor adjustments for the fact we don't have many of the bells and whistles of SDDP.jl. You can probably see how some of the SDDP.jl functionality like @stageobjective and SDDP.parameterize help smooth some of the usability issues like needing to construct both the incoming and outgoing state variables, or needing to explicitly declare return states, uncertainty.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function subproblem_builder(subproblem::JuMP.Model, t::Int)\n # Define the state variables. Note how we fix the incoming state to the\n # initial state variable regardless of `t`! This isn't strictly necessary;\n # it only matters that we do it for the first node.\n JuMP.@variable(subproblem, volume_in == 200)\n JuMP.@variable(subproblem, 0 <= volume_out <= 200)\n states = Dict(:volume => State(volume_in, volume_out))\n # Define the control variables.\n JuMP.@variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n JuMP.@constraints(subproblem, begin\n volume_out == volume_in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end)\n # Define the objective for each stage `t`. Note that we can use `t` as an\n # index for t = 1, 2, 3.\n fuel_cost = [50.0, 100.0, 150.0]\n JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)\n # Finally, we define the uncertainty object. Because this is a simplified\n # implementation of SDDP, we shall politely ask the user to only modify the\n # constraints, and not the objective function! (Not that it changes the\n # algorithm, we just have to add more information to keep track of things.)\n uncertainty = Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω\n JuMP.fix(inflow, ω)\n end\n return states, uncertainty\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"subproblem_builder (generic function with 1 method)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The next function we need to define is the analog of SDDP.PolicyGraph. It should be pretty readable.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function PolicyGraph(\n subproblem_builder::Function;\n graph::Vector{Dict{Int,Float64}},\n lower_bound::Float64,\n optimizer,\n)\n nodes = Node[]\n for t = 1:length(graph)\n # Create a model.\n model = JuMP.Model(optimizer)\n # Use the provided function to build out each subproblem. The user's\n # function returns a dictionary mapping `Symbol`s to `State` objects,\n # and an `Uncertainty` object.\n states, uncertainty = subproblem_builder(model, t)\n # Now add the cost-to-go terms:\n JuMP.@variable(model, cost_to_go >= lower_bound)\n obj = JuMP.objective_function(model)\n JuMP.@objective(model, Min, obj + cost_to_go)\n # If there are no outgoing arcs, the cost-to-go is 0.0.\n if length(graph[t]) == 0\n JuMP.fix(cost_to_go, 0.0; force = true)\n end\n push!(nodes, Node(model, states, uncertainty, cost_to_go))\n end\n return PolicyGraph(nodes, graph)\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Main.##1527.PolicyGraph","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Then, we can create a model using the subproblem_builder function we defined earlier:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"model = PolicyGraph(\n subproblem_builder;\n graph = [\n Dict(2 => 1.0),\n Dict(3 => 1.0),\n Dict{Int,Float64}(),\n ],\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"A policy graph with 3 nodes\nArcs:\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation:-helpful-samplers","page":"Theory I: an intro to SDDP","title":"Implementation: helpful samplers","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Before we get properly coding the solution algorithm, it's also going to be useful to have a function that samples a realization of the random variable defined by Ω and P.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function sample_uncertainty(uncertainty::Uncertainty)\n r = rand()\n for (p, ω) in zip(uncertainty.P, uncertainty.Ω)\n r -= p\n if r < 0.0\n return ω\n end\n end\n error(\"We should never get here because P should sum to 1.0.\")\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"sample_uncertainty (generic function with 1 method)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nrand() samples a uniform random variable in [0, 1).","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"For example:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"for i = 1:3\n println(\"ω = \", sample_uncertainty(model.nodes[1].uncertainty))\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"ω = 50.0\nω = 100.0\nω = 50.0\n","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"It's also going to be useful to define a function that generates a random walk through the nodes of the graph:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function sample_next_node(model::PolicyGraph, current::Int)\n if length(model.arcs[current]) == 0\n # No outgoing arcs!\n return nothing\n else\n r = rand()\n for (to, probability) in model.arcs[current]\n r -= probability\n if r < 0.0\n return to\n end\n end\n # We looped through the outgoing arcs and still have probability left\n # over! This means we've hit an implicit \"zero\" node.\n return nothing\n end\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"sample_next_node (generic function with 1 method)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"For example:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"for i = 1:3\n # We use `repr` to print the next node, because `sample_next_node` can\n # return `nothing`.\n println(\"Next node from $(i) = \", repr(sample_next_node(model, i)))\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Next node from 1 = 2\nNext node from 2 = 3\nNext node from 3 = nothing\n","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"This is a little boring, because our graph is simple. However, more complicated graphs will generate more interesting trajectories!","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation:-the-forward-pass","page":"Theory I: an intro to SDDP","title":"Implementation: the forward pass","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Recall that, after approximating the cost-to-go term, we need a way of generating the cuts. As the first step, we need a way of generating candidate solutions x_k^prime. However, unlike the Kelley's example, our functions V_j^k(x^prime varphi) need two inputs: an outgoing state variable and a realization of the random variable.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"One way of getting these inputs is just to pick a random (feasible) value. However, in doing so, we might pick outgoing state variables that we will never see in practice, or we might infrequently pick outgoing state variables that we will often see in practice. Therefore, a better way of generating the inputs is to use a simulation of the policy, which we call the forward pass.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The forward pass walks the policy graph from start to end, transitioning randomly along the arcs. At each node, it observes a realization of the random variable and solves the approximated subproblem to generate a candidate outgoing state variable x_k^prime. The outgoing state variable is passed as the incoming state variable to the next node in the trajectory.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function forward_pass(model::PolicyGraph, io::IO = stdout)\n println(io, \"| Forward Pass\")\n # First, get the value of the state at the root node (e.g., x_R).\n incoming_state = Dict(\n k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states\n )\n # `simulation_cost` is an accumlator that is going to sum the stage-costs\n # incurred over the forward pass.\n simulation_cost = 0.0\n # We also need to record the nodes visited and resultant outgoing state\n # variables so we can pass them to the backward pass.\n trajectory = Tuple{Int,Dict{Symbol,Float64}}[]\n # Now's the meat of the forward pass: beginning at the first node:\n t = 1\n while t !== nothing\n node = model.nodes[t]\n println(io, \"| | Visiting node $(t)\")\n # Sample the uncertainty:\n ω = sample_uncertainty(node.uncertainty)\n println(io, \"| | | ω = \", ω)\n # Parameterizing the subproblem using the user-provided function:\n node.uncertainty.parameterize(ω)\n println(io, \"| | | x = \", incoming_state)\n # Update the incoming state variable:\n for (k, v) in incoming_state\n JuMP.fix(node.states[k].in, v; force = true)\n end\n # Now solve the subproblem and check we found an optimal solution:\n JuMP.optimize!(node.subproblem)\n if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL\n error(\"Something went terribly wrong!\")\n end\n # Compute the outgoing state variables:\n outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)\n println(io, \"| | | x′ = \", outgoing_state)\n # We also need to compute the stage cost to add to our\n # `simulation_cost` accumulator:\n stage_cost = JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)\n simulation_cost += stage_cost\n println(io, \"| | | C(x, u, ω) = \", stage_cost)\n # As a penultimate step, set the outgoing state of stage t and the\n # incoming state of stage t + 1, and add the node to the trajectory.\n incoming_state = outgoing_state\n push!(trajectory, (t, outgoing_state))\n # Finally, sample a new node to step to. If `t === nothing`, the\n # `while` loop will break.\n t = sample_next_node(model, t)\n end\n return trajectory, simulation_cost\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"forward_pass (generic function with 2 methods)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Let's take a look at one forward pass:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"trajectory, simulation_cost = forward_pass(model);\n","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"| Forward Pass\n| | Visiting node 1\n| | | ω = 50.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>100.0)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 2\n| | | ω = 0.0\n| | | x = Dict(:volume=>100.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 5000.0\n| | Visiting node 3\n| | | ω = 0.0\n| | | x = Dict(:volume=>0.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 22500.0\n","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation:-the-backward-pass","page":"Theory I: an intro to SDDP","title":"Implementation: the backward pass","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"From the forward pass, we obtained a vector of nodes visted and their corresponding outgoing state variables. Now we need to refine the approximation for each node at the candidate solution for the outgoing state variable. That is, we need to add a new cut:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"theta ge mathbbE_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"or alternatively:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"theta ge sumlimits_j in i^+ sumlimits_varphi in Omega_j p_ij p_varphileftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"It doesn't matter what order we visit the nodes to generate these cuts for. For example, we could compute them all in parallel, using the current approximations of V^K_i.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"However, we can be smarter than that.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"If we traverse the list of nodes visited in the forward pass in reverse, then we come to refine the ith node in the trajectory, we will already have improved the approximation of the (i+1)th node in the trajectory as well! Therefore, our refinement of the ith node will be better than if we improved node i first, and then refined node (i+1).","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Because we walk the nodes in reverse, we call this the backward pass.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"info: Info\nIf you're into deep learning, you could view this as the equivalent of back-propagation: the forward pass pushes primal information through the graph (outgoing state variables), and the backward pass pulls dual information (cuts) back through the graph to improve our decisions on the next forward pass.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function backward_pass(\n model::PolicyGraph,\n trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},\n io::IO = stdout,\n)\n println(io, \"| Backward pass\")\n # For the backward pass, we walk back up the nodes.\n for i = reverse(1:length(trajectory))\n index, outgoing_states = trajectory[i]\n node = model.nodes[index]\n println(io, \"| | Visiting node $(index)\")\n if length(model.arcs[index]) == 0\n # If there are no children, the cost-to-go is 0.\n println(io, \"| | | Skipping node because the cost-to-go is 0\")\n continue\n end\n # Create an empty affine expression that we will use to build up the\n # right-hand side of the cut expression.\n cut_expression = JuMP.AffExpr(0.0)\n # For each node j ∈ i⁺\n for (j, P_ij) in model.arcs[index]\n next_node = model.nodes[j]\n # Set the incoming state variables of node j to the outgoing state\n # variables of node i\n for (k, v) in outgoing_states\n JuMP.fix(next_node.states[k].in, v; force = true)\n end\n # Then for each realization of φ ∈ Ωⱼ\n for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)\n # Setup and solve for the realization of φ\n println(io, \"| | | Solving φ = \", φ)\n next_node.uncertainty.parameterize(φ)\n JuMP.optimize!(next_node.subproblem)\n # Then prepare the cut `P_ij * pφ * [V + dVdxᵀ(x - x_k)]``\n V = JuMP.objective_value(next_node.subproblem)\n println(io, \"| | | | V = \", V)\n dVdx = Dict(\n k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states\n )\n println(io, \"| | | | dVdx′ = \", dVdx)\n cut_expression += JuMP.@expression(\n node.subproblem,\n P_ij * pφ * (\n V + sum(\n dVdx[k] * (x.out - outgoing_states[k])\n for (k, x) in node.states\n )\n ),\n )\n end\n end\n # And then refine the cost-to-go variable by adding the cut:\n c = JuMP.@constraint(node.subproblem, node.cost_to_go >= cut_expression)\n println(io, \"| | | Adding cut : \", c)\n end\n return nothing\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"backward_pass (generic function with 2 methods)","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation:-bounds","page":"Theory I: an intro to SDDP","title":"Implementation: bounds","text":"","category":"section"},{"location":"tutorial/21_theory_intro/#Lower-bounds","page":"Theory I: an intro to SDDP","title":"Lower bounds","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Recall from Kelley's that we can obtain a lower bound for f(x^*) be evaluating f^K. The analogous lower bound for a multistage stochastic program is:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"mathbbE_i in R^+ omega in Omega_iV_i^K(x_R omega) le min_pi mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Here's how we compute the lower bound:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function lower_bound(model::PolicyGraph)\n node = model.nodes[1]\n bound = 0.0\n for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)\n node.uncertainty.parameterize(ω)\n JuMP.optimize!(node.subproblem)\n bound += p * JuMP.objective_value(node.subproblem)\n end\n return bound\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"lower_bound (generic function with 1 method)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nThe implementation is simplified because we assumed that there is only one arc from the root node, and that it pointed to the first node in the vector.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Because we haven't trained a policy yet, the lower bound is going to be very bad:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"lower_bound(model)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"0.0","category":"page"},{"location":"tutorial/21_theory_intro/#Upper-bounds","page":"Theory I: an intro to SDDP","title":"Upper bounds","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"With Kelley's algorithm, we could easily construct an upper bound by evaluating f(x_K). However, it is almost always intractable to evaluate an upper bound for multistage stochastic programs due to the large number of nodes and the nested expectations. Instead, we can perform a Monte Carlo simulation of the policy to build a statistical estimate for the value of mathbbE_i in R^+ omega in Omega_iV_i^pi(x_R omega), where pi is the policy defined by the current approximations V^K_i.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function upper_bound(model::PolicyGraph; replications::Int)\n # Pipe the output to `devnull` so we don't print too much!\n simulations = [forward_pass(model, devnull) for i = 1:replications]\n z = [s[2] for s in simulations]\n μ = Statistics.mean(z)\n tσ = 1.96 * Statistics.std(z) / sqrt(replications)\n return μ, tσ\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"upper_bound (generic function with 1 method)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nThe width of the confidence interval is incorrect if there are cycles in the graph, because the distribution of simulation costs z is not symmetric. The mean is correct, however.","category":"page"},{"location":"tutorial/21_theory_intro/#Termination-criteria","page":"Theory I: an intro to SDDP","title":"Termination criteria","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"In Kelley's algorithm, the upper bound was deterministic. Therefore, we could terminate the algorithm when the lower bound was sufficiently close to the upper bound. However, our upper bound for SDDP is not deterministic; it is a confidence interval!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Some people suggest terminating SDDP when the lower bound is contained within the confidence interval. However, this is a poor choice because it is too easy to generate a false positive. For example, if we use a small number of replications then the width of the confidence will be large, and we are more likely to terminate!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"In a future tutorial (not yet written...) we will discuss termination criteria in more depth. For now, pick a large number of iterations and train for as long as possible.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"tip: Tip\nFor a rule of thumb, pick a large number of iterations to train the policy for (e.g., 10 times mathcalN times maxlimits_iinmathcalN Omega_i)","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation:-the-training-loop","page":"Theory I: an intro to SDDP","title":"Implementation: the training loop","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"The train loop of SDDP just applies the forward and backward passes iteratively, followed by a final simulation to compute the upper bound confidence interval:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function train(\n model::PolicyGraph;\n iteration_limit::Int,\n replications::Int,\n io::IO = stdout,\n)\n for i = 1:iteration_limit\n println(io, \"Starting iteration $(i)\")\n outgoing_states, _ = forward_pass(model, io)\n backward_pass(model, outgoing_states, io)\n println(io, \"| Finished iteration\")\n println(io, \"| | lower_bound = \", lower_bound(model))\n end\n println(io, \"Termination status: iteration limit\")\n μ, tσ = upper_bound(model; replications = replications)\n println(io, \"Upper bound = $(μ) ± $(tσ)\")\n return\nend","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"train (generic function with 1 method)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Using our model we defined earlier, we can go:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"train(model; iteration_limit = 3, replications = 100)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Starting iteration 1\n| Forward Pass\n| | Visiting node 1\n| | | ω = 50.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>100.0)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 2\n| | | ω = 50.0\n| | | x = Dict(:volume=>100.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 3\n| | | ω = 50.0\n| | | x = Dict(:volume=>0.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 15000.0\n| Backward pass\n| | Visiting node 3\n| | | Skipping node because the cost-to-go is 0\n| | Visiting node 2\n| | | Solving φ = 0.0\n| | | | V = 22500.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 15000.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 100.0\n| | | | V = 7500.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Adding cut : 150 volume_out + cost_to_go ≥ 15000.0\n| | Visiting node 1\n| | | Solving φ = 0.0\n| | | | V = 15000.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 50.0\n| | | | V = 10000.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 100.0\n| | | | V = 5000.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Adding cut : 99.99999999999999 volume_out + cost_to_go ≥ 20000.0\n| Finished iteration\n| | lower_bound = 5000.000000000002\nStarting iteration 2\n| Forward Pass\n| | Visiting node 1\n| | | ω = 50.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>200.0)\n| | | C(x, u, ω) = 5000.000000000002\n| | Visiting node 2\n| | | ω = 50.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>100.0)\n| | | C(x, u, ω) = -2.8421709430404007e-12\n| | Visiting node 3\n| | | ω = 50.0\n| | | x = Dict(:volume=>100.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 0.0\n| Backward pass\n| | Visiting node 3\n| | | Skipping node because the cost-to-go is 0\n| | Visiting node 2\n| | | Solving φ = 0.0\n| | | | V = 7500.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 0.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 100.0\n| | | | V = 0.0\n| | | | dVdx′ = Dict(:volume=>0.0)\n| | | Adding cut : 100 volume_out + cost_to_go ≥ 12500.0\n| | Visiting node 1\n| | | Solving φ = 0.0\n| | | | V = 7499.999999999997\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 50.0\n| | | | V = 2499.9999999999973\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 100.0\n| | | | V = 0.0\n| | | | dVdx′ = Dict(:volume=>0.0)\n| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 16666.666666666664\n| Finished iteration\n| | lower_bound = 8333.333333333332\nStarting iteration 3\n| Forward Pass\n| | Visiting node 1\n| | | ω = 100.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>200.0)\n| | | C(x, u, ω) = 2500.0\n| | Visiting node 2\n| | | ω = 100.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>125.0)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 3\n| | | ω = 0.0\n| | | x = Dict(:volume=>125.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 3750.0\n| Backward pass\n| | Visiting node 3\n| | | Skipping node because the cost-to-go is 0\n| | Visiting node 2\n| | | Solving φ = 0.0\n| | | | V = 3750.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 0.0\n| | | | dVdx′ = Dict(:volume=>0.0)\n| | | Solving φ = 100.0\n| | | | V = 0.0\n| | | | dVdx′ = Dict(:volume=>0.0)\n| | | Adding cut : 50 volume_out + cost_to_go ≥ 7500.0\n| | Visiting node 1\n| | | Solving φ = 0.0\n| | | | V = 7500.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 50.0\n| | | | V = 2500.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 100.0\n| | | | V = 0.0\n| | | | dVdx′ = Dict(:volume=>0.0)\n| | | Adding cut : 66.66666666666666 volume_out + cost_to_go ≥ 16666.666666666664\n| Finished iteration\n| | lower_bound = 8333.333333333332\nTermination status: iteration limit\nUpper bound = 8475.0 ± 858.3515973343701\n","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Success! We trained a policy for a finite horizon multistage stochastic program using stochastic dual dynamic programming.","category":"page"},{"location":"tutorial/21_theory_intro/#Implementation:-evaluating-the-policy","page":"Theory I: an intro to SDDP","title":"Implementation: evaluating the policy","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"A final step is the ability to evaluate the policy at a given point.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"function evaluate_policy(\n model::PolicyGraph;\n node::Int,\n incoming_state::Dict{Symbol,Float64},\n random_variable,\n)\n the_node = model.nodes[node]\n the_node.uncertainty.parameterize(random_variable)\n for (k, v) in incoming_state\n JuMP.fix(the_node.states[k].in, v; force = true)\n end\n JuMP.optimize!(the_node.subproblem)\n return Dict(\n k => JuMP.value.(v)\n for (k, v) in JuMP.object_dictionary(the_node.subproblem)\n )\nend\n\nevaluate_policy(\n model;\n node = 1,\n incoming_state = Dict(:volume => 150.0),\n random_variable = 75,\n)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Dict{Symbol,Float64} with 8 entries:\n :volume_out => 200.0\n :demand_constraint => 150.0\n :hydro_spill => 0.0\n :inflow => 75.0\n :volume_in => 150.0\n :thermal_generation => 125.0\n :hydro_generation => 25.0\n :cost_to_go => 3333.33","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"note: Note\nThe random variable can be out-of-sample, i.e., it doesn't have to be in the vector Omega we created when defining the model! This is a notable difference to other multistage stochastic solution methods like progressive hedging or using the deterministic equivalent.","category":"page"},{"location":"tutorial/21_theory_intro/#Example:-infinite-horizon","page":"Theory I: an intro to SDDP","title":"Example: infinite horizon","text":"","category":"section"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"As promised earlier, our implementation is actually pretty general. It can solve any multistage stochastic (linear) program defined by a policy graph, including infinite horizon problems!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Here's an example, where we have extended our earlier problem with an arc from node 3 to node 2 with probability 0.5. You can interpret the 0.5 as a discount factor.","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"model = PolicyGraph(\n subproblem_builder;\n graph = [\n Dict(2 => 1.0),\n Dict(3 => 1.0),\n Dict(2 => 0.5),\n ],\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"A policy graph with 3 nodes\nArcs:\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n 3 => 2 w.p. 0.5\n","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Then, train a policy:","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"train(model; iteration_limit = 3, replications = 100)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Starting iteration 1\n| Forward Pass\n| | Visiting node 1\n| | | ω = 0.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>50.0)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 2\n| | | ω = 50.0\n| | | x = Dict(:volume=>50.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 5000.0\n| | Visiting node 3\n| | | ω = 100.0\n| | | x = Dict(:volume=>0.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 7500.0\n| Backward pass\n| | Visiting node 3\n| | | Solving φ = 0.0\n| | | | V = 15000.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 50.0\n| | | | V = 10000.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 100.0\n| | | | V = 5000.0\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Adding cut : 49.99999999999999 volume_out + cost_to_go ≥ 4999.999999999999\n| | Visiting node 2\n| | | Solving φ = 0.0\n| | | | V = 27500.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 20000.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 100.0\n| | | | V = 12500.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Adding cut : 150 volume_out + cost_to_go ≥ 20000.0\n| | Visiting node 1\n| | | Solving φ = 0.0\n| | | | V = 27500.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 20000.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 100.0\n| | | | V = 13333.333333333334\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Adding cut : 133.33333333333331 volume_out + cost_to_go ≥ 26944.444444444445\n| Finished iteration\n| | lower_bound = 5277.777777777781\nStarting iteration 2\n| Forward Pass\n| | Visiting node 1\n| | | ω = 100.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>200.0)\n| | | C(x, u, ω) = 2500.0\n| | Visiting node 2\n| | | ω = 100.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>133.333)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 3\n| | | ω = 100.0\n| | | x = Dict(:volume=>133.333)\n| | | x′ = Dict(:volume=>83.3333)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 2\n| | | ω = 100.0\n| | | x = Dict(:volume=>83.3333)\n| | | x′ = Dict(:volume=>133.333)\n| | | C(x, u, ω) = 10000.0\n| | Visiting node 3\n| | | ω = 50.0\n| | | x = Dict(:volume=>133.333)\n| | | x′ = Dict(:volume=>33.3333)\n| | | C(x, u, ω) = 0.0\n| Backward pass\n| | Visiting node 3\n| | | Solving φ = 0.0\n| | | | V = 30000.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 22500.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 100.0\n| | | | V = 15000.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Adding cut : 75 volume_out + cost_to_go ≥ 13750.000000000002\n| | Visiting node 2\n| | | Solving φ = 0.0\n| | | | V = 16249.999999999998\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 11250.0\n| | | | dVdx′ = Dict(:volume=>-75.0)\n| | | Solving φ = 100.0\n| | | | V = 7500.0\n| | | | dVdx′ = Dict(:volume=>-75.0)\n| | | Adding cut : 100 volume_out + cost_to_go ≥ 25000.0\n| | Visiting node 3\n| | | Solving φ = 0.0\n| | | | V = 31666.666666666664\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 50.0\n| | | | V = 26666.666666666664\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 100.0\n| | | | V = 21666.666666666664\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Adding cut : 49.99999999999999 volume_out + cost_to_go ≥ 17499.999999999996\n| | Visiting node 2\n| | | Solving φ = 0.0\n| | | | V = 19999.999999999996\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 15833.333333333328\n| | | | dVdx′ = Dict(:volume=>-50.0)\n| | | Solving φ = 100.0\n| | | | V = 13333.333333333328\n| | | | dVdx′ = Dict(:volume=>-50.0)\n| | | Adding cut : 83.33333333333331 volume_out + cost_to_go ≥ 27499.999999999993\n| | Visiting node 1\n| | | Solving φ = 0.0\n| | | | V = 23333.33333333333\n| | | | dVdx′ = Dict(:volume=>-83.3333)\n| | | Solving φ = 50.0\n| | | | V = 19166.66666666666\n| | | | dVdx′ = Dict(:volume=>-83.3333)\n| | | Solving φ = 100.0\n| | | | V = 14999.999999999996\n| | | | dVdx′ = Dict(:volume=>-83.3333)\n| | | Adding cut : 83.33333333333331 volume_out + cost_to_go ≥ 35833.33333333333\n| Finished iteration\n| | lower_bound = 24166.666666666664\nStarting iteration 3\n| Forward Pass\n| | Visiting node 1\n| | | ω = 0.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>200.0)\n| | | C(x, u, ω) = 7500.0\n| | Visiting node 2\n| | | ω = 0.0\n| | | x = Dict(:volume=>200.0)\n| | | x′ = Dict(:volume=>50.0)\n| | | C(x, u, ω) = 0.0\n| | Visiting node 3\n| | | ω = 100.0\n| | | x = Dict(:volume=>50.0)\n| | | x′ = Dict(:volume=>0.0)\n| | | C(x, u, ω) = 0.0\n| Backward pass\n| | Visiting node 3\n| | | Solving φ = 0.0\n| | | | V = 42499.99999999999\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 50.0\n| | | | V = 37499.99999999999\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 100.0\n| | | | V = 32499.999999999993\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Adding cut : 49.99999999999999 volume_out + cost_to_go ≥ 18749.999999999996\n| | Visiting node 2\n| | | Solving φ = 0.0\n| | | | V = 33750.0\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 50.0\n| | | | V = 26249.999999999996\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Solving φ = 100.0\n| | | | V = 18749.999999999996\n| | | | dVdx′ = Dict(:volume=>-150.0)\n| | | Adding cut : 150 volume_out + cost_to_go ≥ 33750.0\n| | Visiting node 1\n| | | Solving φ = 0.0\n| | | | V = 24062.499999999993\n| | | | dVdx′ = Dict(:volume=>-100.0)\n| | | Solving φ = 50.0\n| | | | V = 19166.66666666666\n| | | | dVdx′ = Dict(:volume=>-83.3333)\n| | | Solving φ = 100.0\n| | | | V = 14999.999999999996\n| | | | dVdx′ = Dict(:volume=>-83.3333)\n| | | Adding cut : 88.88888888888887 volume_out + cost_to_go ≥ 37187.49999999999\n| Finished iteration\n| | lower_bound = 24409.72222222222\nTermination status: iteration limit\nUpper bound = 26581.25 ± 4608.667643704225\n","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Success! We trained a policy for an infinite horizon multistage stochastic program using stochastic dual dynamic programming. Note how some of the forward passes are different lengths!","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"evaluate_policy(\n model;\n node = 3,\n incoming_state = Dict(:volume => 100.0),\n random_variable = 10.0,\n)","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"Dict{Symbol,Float64} with 8 entries:\n :volume_out => 0.0\n :demand_constraint => 150.0\n :hydro_spill => 0.0\n :inflow => 10.0\n :volume_in => 100.0\n :thermal_generation => 40.0\n :hydro_generation => 110.0\n :cost_to_go => 18750.0","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"","category":"page"},{"location":"tutorial/21_theory_intro/","page":"Theory I: an intro to SDDP","title":"Theory I: an intro to SDDP","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/all_blacks.jl\"","category":"page"},{"location":"examples/all_blacks/#Deterministic-All-Blacks","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"","category":"section"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"using SDDP, GLPK, Test\n\nfunction all_blacks()\n # Number of time periods, number of seats, R_ij = evenue from selling seat\n # i at time j, offer_ij = whether an offer for seat i will come at time j\n (T, N, R, offer) = (3, 2, [3 3 6; 3 3 6], [1 1 0; 1 0 1])\n model = SDDP.LinearPolicyGraph(\n stages = T,\n sense = :Max,\n upper_bound = 100.0,\n optimizer = GLPK.Optimizer,\n integrality_handler = SDDP.SDDiP(),\n ) do sp, stage\n # Seat remaining?\n @variable(sp, 0 <= x[1:N] <= 1, SDDP.State, Bin, initial_value = 1)\n # Action: accept offer, or don't accept offer\n @variable(sp, accept_offer, Bin)\n # Balance on seats\n @constraint(sp, [i in 1:N], x[i].out == x[i].in - offer[i, stage] * accept_offer)\n @stageobjective(sp, sum(R[i, stage] * offer[i, stage] * accept_offer for i = 1:N))\n end\n SDDP.train(model, iteration_limit = 10, log_frequency = 5)\n @test SDDP.calculate_bound(model) ≈ 9.0\n return\nend\n\nall_blacks()","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 2\n Scenarios : 1.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 6e+00]\n Non-zero Bounds range [1e+00, 1e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 5 9.000000e+00 9.000000e+00 7.949615e-02 1 30\n 10 9.000000e+00 9.000000e+00 9.128213e-02 1 60\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"","category":"page"},{"location":"examples/all_blacks/","page":"Deterministic All Blacks","title":"Deterministic All Blacks","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/03_objective_uncertainty.jl\"","category":"page"},{"location":"tutorial/03_objective_uncertainty/#Basic-III:-objective-uncertainty","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"","category":"section"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"In the previous tutorial, Basic II: adding uncertainty, we created a stochastic hydro-thermal scheduling model. In this tutorial, we extend the problem by adding uncertainty to the fuel costs.","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"Previously, we assumed that the fuel cost was deterministic: \\$50/MWh in the first stage, \\$100/MWh in the second stage, and \\$150/MWh in the third stage. For this tutorial, we assume that in addition to these base costs, the actual fuel cost is correlated with the inflows.","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"Our new model for the uncertinty is given by the following table:","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"ω 1 2 3\nP(ω) 1/3 1/3 1/3\ninflow 0 50 100\nfuel_multiplier 1.5 1.0 0.75","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"In stage t, the objective is now to minimize:","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"fuel_multiplier * fuel_cost[t] * thermal_generation","category":"page"},{"location":"tutorial/03_objective_uncertainty/#Creating-a-model","page":"Basic III: objective uncertainty","title":"Creating a model","text":"","category":"section"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"To add an uncertain objective, we can simply call @stageobjective from inside the SDDP.parameterize function.","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"using SDDP, GLPK\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, t\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end)\n fuel_cost = [50.0, 100.0, 150.0]\n # Parameterize the subproblem.\n Ω = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75)\n ]\n SDDP.parameterize(subproblem, Ω, [1/3, 1/3, 1/3]) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"A policy graph with 3 nodes.\n Node indices: 1, 2, 3\n","category":"page"},{"location":"tutorial/03_objective_uncertainty/#Training-and-simulating-the-policy","page":"Basic III: objective uncertainty","title":"Training and simulating the policy","text":"","category":"section"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"As in the previous two tutorials, we train and simulate the policy:","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"SDDP.train(model; iteration_limit = 10)\n\nsimulations = SDDP.simulate(model, 500)\n\nobjective_values = [\n sum(stage[:stage_objective] for stage in sim) for sim in simulations\n]\n\nusing Statistics\n\nμ = round(mean(objective_values), digits = 2)\nci = round(1.96 * std(objective_values) / sqrt(500), digits = 2)\n\nprintln(\"Confidence interval: \", μ, \" ± \", ci)\nprintln(\"Lower bound: \", round(SDDP.calculate_bound(model), digits = 2))","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 2.70000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 2e+02]\n Non-zero Bounds range [2e+02, 2e+02]\n Non-zero RHS range [2e+02, 2e+02]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 2.250000e+04 8.541667e+03 1.477411e-01 1 12\n 2 1.240385e+04 1.050595e+04 1.485231e-01 1 24\n 3 5.000000e+03 1.050595e+04 1.492431e-01 1 36\n 4 1.125000e+04 1.050595e+04 1.499290e-01 1 48\n 5 6.160714e+03 1.062500e+04 1.507161e-01 1 60\n 6 1.875000e+03 1.062500e+04 1.514301e-01 1 72\n 7 5.000000e+03 1.062500e+04 1.522050e-01 1 84\n 8 3.375000e+04 1.062500e+04 1.529970e-01 1 96\n 9 5.000000e+03 1.062500e+04 1.537821e-01 1 108\n 10 5.000000e+03 1.062500e+04 1.545711e-01 1 120\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\nConfidence interval: 10522.5 ± 730.4\nLower bound: 10625.0\n","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"This concludes our third tutorial for SDDP.jl. In the next tutorial, Basic IV: Markov uncertainty, we add stagewise-dependence to the inflows using a Markov chain.","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"","category":"page"},{"location":"tutorial/03_objective_uncertainty/","page":"Basic III: objective uncertainty","title":"Basic III: objective uncertainty","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/22_risk.jl\"","category":"page"},{"location":"tutorial/22_risk/#Theory-II:-risk-aversion","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"In Theory I: an intro to SDDP, we implemented a basic version of the SDDP algorithm. This tutorial extends that implementation to add risk-aversion.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Packages","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"This tutorial uses the following packages. For clarity, we call import PackageName so that we must prefix PackageName. to all functions and structs provided by that package. Everything not prefixed is either part of base Julia, or we wrote it.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"import ForwardDiff\nimport GLPK\nimport Ipopt\nimport JuMP\nimport Statistics","category":"page"},{"location":"tutorial/22_risk/#Risk-aversion:-what-and-why?","page":"Theory II: risk aversion","title":"Risk aversion: what and why?","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Often, the agents making decisions in complex systems are risk-averse, that is, they care more about avoiding very bad outcomes, than they do about having a good average outcome.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"As an example, consumers in a hydro-thermal problem may be willing to pay a slightly higher electricity price on average, if it means that there is a lower probability of blackouts.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Risk aversion in multistage stochastic programming has been well studied in the academic literature, and is widely used in production implementations around the world.","category":"page"},{"location":"tutorial/22_risk/#Risk-measures","page":"Theory II: risk aversion","title":"Risk measures","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"One way to add risk aversion to models is to use a risk measure. A risk measure is a function that maps a random variable to a real number.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"You are probably already familiar with lots of different risk measures. For example, the mean, median, mode, and maximum are all risk measures.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We call the act of applying a risk measure to a random variable \"computing the risk\" of a random variable.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"To keep things simple, and because we need it for SDDP, we restrict our attention to random variables Z with a finite sample space Omega and positive probabilities p_omega for all omega in Omega. We denote the realizations of Z by Z(omega) = z_omega.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"A risk measure, mathbbFZ, is a convex risk measure if it satisfies the following axioms:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Axiom 1: monotonicity","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Given two random variables Z_1 and Z_2, with Z_1 le Z_2 almost surely, then mathbbFZ_1 le FZ_2.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Axiom 2: translation equivariance","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Given two random variables Z_1 and Z_2, then for all a in mathbbR, mathbbFZ + a = mathbbFZ + a.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Axiom 3: convexity","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Given two random variables Z_1 and Z_2, then for all a in 0 1,","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathbbFa Z_1 + (1 - a) Z_2 le a mathbbFZ_1 + (1-a)mathbbFZ_2","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Now we know what a risk measure is, let's see how we can use them to form risk-averse decision rules.","category":"page"},{"location":"tutorial/22_risk/#Risk-averse-decision-rules:-Part-I","page":"Theory II: risk aversion","title":"Risk-averse decision rules: Part I","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We started this tutorial by explaining that we are interested in risk aversion because some agents are risk-averse. What that really means, is that they want a policy that is also risk-averse. The question then becomes, how do we create risk-averse decision rules and policies?","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Recall from Theory I: an intro to SDDP that we can form an optimal decision rule using the recursive formulation:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbE_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"where our decision rule, pi_i(x omega), solves this optimization problem and returns a u^* corresponding to an optimal solution.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"If we can replace the expectation operator mathbbE with another (more risk-averse) risk measure mathbbF, then our decision rule will attempt to choose a control decision now that minimizes the risk of the future costs, as opposed to the expectation of the future costs. This makes our decisions more risk-averse, because we care more about the worst outcomes than we do about the average.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Therefore, we can form a risk-averse decision rule using the formulation:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"beginaligned\nV_i(x omega) = minlimits_barx x^prime u C_i(barx u omega) + mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi)\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x\nendaligned","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"To convert this problem into a tractable equivalent, we apply Kelley's algorithm to the risk-averse cost-to-go term mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi), to obtain the approximated problem:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)quad k=1ldotsK\nendaligned","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"warning: Warning\nNote how we need to expliclty compute a risk-averse subgradient! (We need a subgradient because the function might not be differentiable.) When constructing cuts with the expectation operator in Theory I: an intro to SDDP, we implicitly used the law of total expectation to combine the two expectations; we can't do that for a general risk measure.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"tip: Homework challenge\nIf it's not obvious why we can use Kelley's here, try to use the axioms of a convex risk measure to show that mathbbF_j in i^+ varphi in Omega_jV_j(x^prime varphi) is a convex function w.r.t. x^prime if V_j is also a convex function.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Our challenge is now to find a way to compute the risk-averse cost-to-go function mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right, and a way to compute a subgradient of the risk-averse cost-to-go function with respect to x^prime.","category":"page"},{"location":"tutorial/22_risk/#Primal-risk-measures","page":"Theory II: risk aversion","title":"Primal risk measures","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Now we know what a risk measure is, and how we will use it, let's implement some code to see how we can compute the risk of some random variables.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"note: Note\nWe're going to start by implementing the primal version of each risk measure. We implement the dual version in the next section.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"First, we need some data:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Z = [1.0, 2.0, 3.0, 4.0]","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"4-element Array{Float64,1}:\n 1.0\n 2.0\n 3.0\n 4.0","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"with probabilities:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"p = [0.1, 0.2, 0.4, 0.3]","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"4-element Array{Float64,1}:\n 0.1\n 0.2\n 0.4\n 0.3","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We're going to implement a number of different risk measures, so to leverage Julia's multiple dispatch, we create an abstract type:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"abstract type AbstractRiskMeasure end","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"and function to overload:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"\"\"\"\n primal_risk(F::AbstractRiskMeasure, Z::Vector{<:Real}, p::Vector{Float64})\n\nUse `F` to compute the risk of the random variable defined by a vector of costs\n`Z` and non-zero probabilities `p`.\n\"\"\"\nfunction primal_risk end","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Main.##1589.primal_risk","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"note: Note\nWe want Vector{<:Real} instead of Vector{Float64} because we're going to automatically differentiate this function in the next section.","category":"page"},{"location":"tutorial/22_risk/#Expectation","page":"Theory II: risk aversion","title":"Expectation","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The expectation, mathbbE, also called the mean or the average, is the most widely used convex risk measure. The expectation of a random variable is just the sum of Z weighted by the probability:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathbbFZ = mathbbE_pZ = sumlimits_omegainOmega p_omega z_omega","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"struct Expectation <: AbstractRiskMeasure end\n\nfunction primal_risk(::Expectation, Z::Vector{<:Real}, p::Vector{Float64})\n return sum(p[i] * Z[i] for i = 1:length(p))\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk (generic function with 1 method)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Let's try it out:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk(Expectation(), Z, p)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"2.9000000000000004","category":"page"},{"location":"tutorial/22_risk/#WorstCase","page":"Theory II: risk aversion","title":"WorstCase","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The worst-case risk measure, also called the maximum, is another widely used convex risk measure. This risk measure doesn't care about the probability vector p, only the cost vector Z:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathbbFZ = maxZ = maxlimits_omegainOmega z_omega","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"struct WorstCase <: AbstractRiskMeasure end\n\nfunction primal_risk(::WorstCase, Z::Vector{<:Real}, ::Vector{Float64})\n return maximum(Z)\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk (generic function with 2 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Let's try it out:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk(WorstCase(), Z, p)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"4.0","category":"page"},{"location":"tutorial/22_risk/#Entropic","page":"Theory II: risk aversion","title":"Entropic","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"A more interesting, and less widely used risk measure is the entropic risk measure. The entropic risk measure is parameterized by a value gamma 0, and computes the risk of a random variable as:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathbbF_gammaZ = frac1gammalogleft(mathbbE_pe^gamma Zright) = frac1gammalogleft(sumlimits_omegainOmegap_omega e^gamma z_omegaright)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"tip: Homework challenge\nProve that the entropic risk measure satisfies the three axioms of a convex risk measure.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"struct Entropic <: AbstractRiskMeasure\n γ::Float64\n function Entropic(γ)\n if !(γ > 0)\n throw(DomainError(γ, \"Entropic risk measure must have γ > 0.\"))\n end\n return new(γ)\n end\nend\n\nfunction primal_risk(F::Entropic, Z::Vector{<:Real}, p::Vector{Float64})\n return 1 / F.γ * log(sum(p[i] * exp(F.γ * Z[i]) for i = 1:length(p)))\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk (generic function with 3 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"warning: Warning\nexp(x) overflows when x 709. Therefore, if we are passed a vector of Float64, use arbitrary precision arithmetic with big.(Z).","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function primal_risk(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n return Float64(primal_risk(F, big.(Z), p))\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk (generic function with 4 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Let's try it out for different values of gamma:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0, 1_000.0]\n println(\"γ = $(γ), F[Z] = \", primal_risk(Entropic(γ), Z, p))\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"γ = 0.001, F[Z] = 2.9004449279791005\nγ = 0.01, F[Z] = 2.9044427792027596\nγ = 0.1, F[Z] = 2.9437604953310674\nγ = 1.0, F[Z] = 3.264357634151263\nγ = 10.0, F[Z] = 3.879608772845574\nγ = 100.0, F[Z] = 3.987960271956741\nγ = 1000.0, F[Z] = 3.998796027195674\n","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"info: Info\nThe entropic has two extremes. As gamma rightarrow 0, the entropic acts like the expectation risk measure, and as gamma rightarrow infty, the entropic acts like the worst-case risk measure.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Computing risk measures this way works well for computing the primal value. However, there isn't an obvious way to compute a subgradient of the risk-averse cost-to-go function, which we need for our cut calculation.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"There is a nice solution to this problem, and that is to use the dual representation of a risk measure, instead of the primal.","category":"page"},{"location":"tutorial/22_risk/#Dual-risk-measures","page":"Theory II: risk aversion","title":"Dual risk measures","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Convex risk measures have a dual representation as follows:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathbbFZ = suplimits_q inmathcalM(p) mathbbE_qZ - alpha(p q)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"where alpha is a concave function that maps the probability vectors p and q to a real number, and mathcalM(p) subseteq mathcalP is a convex subset of the probability simplex:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathcalP = p ge 0sumlimits_omegainOmegap_omega = 1","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The dual of a convex risk measure can be interpreted as taking the expectation of the random variable Z with respect to the worst probability vector q that lies within the set mathcalM, less some concave penalty term alpha(p q).","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"If we define a function dual_risk_inner that computes q and α:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"\"\"\"\n dual_risk_inner(\n F::AbstractRiskMeasure, Z::Vector{Float64}, p::Vector{Float64}\n )::Tuple{Vector{Float64},Float64}\n\nReturn a tuple formed by the worst-case probability vector `q` and the\ncorresponding evaluation `α(p, q)`.\n\"\"\"\nfunction dual_risk_inner end","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Main.##1589.dual_risk_inner","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"then we can write a generic dual_risk function as:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function dual_risk(\n F::AbstractRiskMeasure,\n Z::Vector{Float64},\n p::Vector{Float64},\n)\n q, α = dual_risk_inner(F, Z, p)\n return sum(q[i] * Z[i] for i = 1:length(q)) - α\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk (generic function with 1 method)","category":"page"},{"location":"tutorial/22_risk/#Expectation-2","page":"Theory II: risk aversion","title":"Expectation","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"For the expectation risk measure, mathcalM(p) = p, and alpha(cdot cdot) = 0. Therefore:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function dual_risk_inner(::Expectation, ::Vector{Float64}, p::Vector{Float64})\n return p, 0.0\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk_inner (generic function with 1 method)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk(Expectation(), Z, p) == primal_risk(Expectation(), Z, p)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"true","category":"page"},{"location":"tutorial/22_risk/#Worst-case","page":"Theory II: risk aversion","title":"Worst-case","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"For the worst-case risk measure, mathcalM(p) = mathcalP, and alpha(cdot cdot) = 0. Therefore, the dual representation just puts all of the probability weight on the maximum outcome:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function dual_risk_inner(::WorstCase, Z::Vector{Float64}, ::Vector{Float64})\n q = zeros(length(Z))\n _, index = findmax(Z)\n q[index] = 1.0\n return q, 0.0\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk_inner (generic function with 2 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk(WorstCase(), Z, p) == primal_risk(WorstCase(), Z, p)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"true","category":"page"},{"location":"tutorial/22_risk/#Entropic-2","page":"Theory II: risk aversion","title":"Entropic","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"For the entropic risk measure, mathcalM(p) = mathcalP, and:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"alpha(p q) = frac1gammasumlimits_omegainOmega q_omega logleft(fracq_omegap_omegaright)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"One way to solve the dual problem is to explicitly solve a nonlinear optimization problem:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n N = length(p)\n model = JuMP.Model(Ipopt.Optimizer)\n JuMP.set_silent(model)\n # For this problem, the solve is more accurate if we turn off problem\n # scaling.\n JuMP.set_optimizer_attribute(model, \"nlp_scaling_method\", \"none\")\n JuMP.@variable(model, 0 <= q[1:N] <= 1)\n JuMP.@constraint(model, sum(q) == 1)\n JuMP.@NLexpression(\n model,\n α,\n 1 / F.γ * sum(q[i] * log(q[i] / p[i]) for i = 1:N),\n )\n JuMP.@NLobjective(model, Max, sum(q[i] * Z[i] for i = 1:N) - α)\n JuMP.optimize!(model)\n return JuMP.value.(q), JuMP.value(α)\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk_inner (generic function with 3 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n primal = primal_risk(Entropic(γ), Z, p)\n dual = dual_risk(Entropic(γ), Z, p)\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"\n******************************************************************************\nThis program contains Ipopt, a library for large-scale nonlinear optimization.\n Ipopt is released as open source code under the Eclipse Public License (EPL).\n For more information visit http://projects.coin-or.org/Ipopt\n******************************************************************************\n\n✓ γ = 0.001, primal = 2.9004449279791005, dual = 2.9004449279790396\n✓ γ = 0.01, primal = 2.9044427792027596, dual = 2.9044427792027503\n✓ γ = 0.1, primal = 2.9437604953310674, dual = 2.9437604953310665\n✓ γ = 1.0, primal = 3.264357634151263, dual = 3.264357634151263\n✓ γ = 10.0, primal = 3.879608772845574, dual = 3.8796087723675954\n✓ γ = 100.0, primal = 3.987960271956741, dual = 3.987960271956741\n","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"info: Info\nThis method of solving the dual problem \"on-the-side\" is used by SDDP.jl for a number of risk measures, including a distributionally robust risk measure with the Wasserstein distance. Check out all the risk measures that SDDP.jl supports in Add a risk measure.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The \"on-the-side\" method is very general, and it lets us incorporate any convex risk measure into SDDP. However, this comes at an increased computational cost and potential numerical issues (e.g., not converging to the exact solution).","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"However, for the entropic risk measure, Dowson, Morton, and Pagnoncelli (2020) derive the following closed form solution for q^*:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"q_omega^* = fracp_omega e^gamma z_omegasumlimits_varphi in Omega p_varphi e^gamma z_varphi","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"This is faster because we don't need to use Ipopt, and it avoids some of the numerical issues associated with solving a nonlinear program.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function dual_risk_inner(F::Entropic, Z::Vector{Float64}, p::Vector{Float64})\n q, α = zeros(length(p)), big(0.0)\n peγz = p .* exp.(F.γ .* big.(Z))\n sum_peγz = sum(peγz)\n for i = 1:length(q)\n big_q = peγz[i] / sum_peγz\n α += big_q * log(big_q / p[i])\n q[i] = Float64(big_q)\n end\n return q, Float64(α / F.γ)\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk_inner (generic function with 3 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"warning: Warning\nAgain, note that we use big to avoid introducing overflow errors, before explicitly casting back to Float64 for the values we return.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We can check we get the same result as the primal version:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n primal = primal_risk(Entropic(γ), Z, p)\n dual = dual_risk(Entropic(γ), Z, p)\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"✓ γ = 0.001, primal = 2.9004449279791005, dual = 2.9004449279791005\n✓ γ = 0.01, primal = 2.9044427792027596, dual = 2.9044427792027596\n✓ γ = 0.1, primal = 2.9437604953310674, dual = 2.943760495331067\n✓ γ = 1.0, primal = 3.264357634151263, dual = 3.264357634151263\n✓ γ = 10.0, primal = 3.879608772845574, dual = 3.879608772845574\n✓ γ = 100.0, primal = 3.987960271956741, dual = 3.987960271956741\n","category":"page"},{"location":"tutorial/22_risk/#Risk-averse-subgradients","page":"Theory II: risk aversion","title":"Risk-averse subgradients","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We ended the section on primal risk measures by explaining how we couldn't use the primal risk measure in the cut calculation because we needed some way of computing a risk-averse subgradient:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The reason we use the dual representation is because of the following theorem, which explains how to compute a risk-averse gradient.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"info: The risk-averse subgradient theorem\nLet omega in Omega index a random vector with finite support and with nominal probability mass function, p in mathcalP, which satisfies p 0.Consider a convex risk measure, mathbbF, with a convex risk set, mathcalM(p), so that mathbbF can be expressed as the dual form.Let V(xomega) be convex with respect to x for all fixed omegainOmega, and let lambda(tildex omega) be a subgradient of V(xomega) with respect to x at x = tildex for each omega in Omega.Then, sum_omegainOmegaq^*_omega lambda(tildexomega) is a subgradient of mathbbFV(xomega) at tildex, whereq^* in argmax_q in mathcalM(p)leftmathbbE_qV(tildexomega) - alpha(p q)right","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"This theorem can be a little hard to unpack, so let's see an example:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function dual_risk_averse_subgradient(\n V::Function,\n # Use automatic differentiation to compute the gradient of V w.r.t. x,\n # given a fixed ω.\n λ::Function = (x, ω) -> ForwardDiff.gradient(x -> V(x, ω), x);\n F::AbstractRiskMeasure,\n Ω::Vector,\n p::Vector{Float64},\n x̃::Vector{Float64},\n)\n # Evaluate the function at x=x̃ for all ω ∈ Ω.\n V_ω = [V(x̃, ω) for ω in Ω]\n # Solve the dual problem to obtain an optimal q^*.\n q, α = dual_risk_inner(F, V_ω, p)\n # Compute the risk-averse subgradient by taking the expectation of the\n # subgradients w.r.t. q^*.\n dVdx = sum(q[i] * λ(x̃, ω) for (i, ω) in enumerate(Ω))\n return dVdx\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk_averse_subgradient (generic function with 2 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We can compare the subgradient obtained with the dual form against the automatic differentiation of the primal_risk function.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function primal_risk_averse_subgradient(\n V::Function;\n F::AbstractRiskMeasure,\n Ω::Vector,\n p::Vector{Float64},\n x̃::Vector{Float64},\n)\n inner(x) = primal_risk(F, [V(x, ω) for ω in Ω], p)\n return ForwardDiff.gradient(inner, x̃)\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk_averse_subgradient (generic function with 1 method)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"As our example function, we use:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"V(x, ω) = ω * x[1]^2","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"V (generic function with 1 method)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"with:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Ω = [1.0, 2.0, 3.0]","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"3-element Array{Float64,1}:\n 1.0\n 2.0\n 3.0","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"and:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"p = [0.3, 0.4, 0.3]","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"3-element Array{Float64,1}:\n 0.3\n 0.4\n 0.3","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"at the point:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"x̃ = [3.0]","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"1-element Array{Float64,1}:\n 3.0","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"If mathbbF is the expectation risk-measure, then:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathbbFV(x omega) = 2 x^2","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The function evaluation x=3 is 18 and the subgradient is 12. Let's check we get it right with the dual form:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"1-element Array{Float64,1}:\n 12.0","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"and the primal form:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk_averse_subgradient(V; F = Expectation(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"1-element Array{Float64,1}:\n 12.0","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"If mathbbF is the worst-case risk measure, then:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"mathbbFV(x omega) = 3 x^2","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The function evaluation at x=3 is 27, and the subgradient is 18. Let's check we get it right with the dual form:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"dual_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"1-element Array{Float64,1}:\n 18.0","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"and the primal form:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"primal_risk_averse_subgradient(V; F = WorstCase(), Ω = Ω, p = p, x̃ = x̃)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"1-element Array{Float64,1}:\n 18.0","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"If mathbbF is the entropic risk measure, the math is a little more difficult to derive analytically. However, we can check against our primal version:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"for γ in [0.001, 0.01, 0.1, 1.0, 10.0, 100.0]\n dual = dual_risk_averse_subgradient(\n V; F = Entropic(γ), Ω = Ω, p = p, x̃ = x̃\n )\n primal = primal_risk_averse_subgradient(\n V; F = Entropic(γ), Ω = Ω, p = p, x̃ = x̃\n )\n success = primal ≈ dual ? \"✓\" : \"×\"\n println(\"$(success) γ = $(γ), primal = $(primal), dual = $(dual)\")\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"✓ γ = 0.001, primal = [12.0324], dual = [12.0324]\n✓ γ = 0.01, primal = [12.3237], dual = [12.3237]\n✓ γ = 0.1, primal = [14.9332], dual = [14.9332]\n✓ γ = 1.0, primal = [17.999], dual = [17.999]\n✓ γ = 10.0, primal = [18.0], dual = [18.0]\n× γ = 100.0, primal = [NaN], dual = [18.0]\n","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Uh oh! What happened with the last line? It looks our primal_risk_averse_subgradient encountered an error and returned a subgradient of NaN. This is because of th # overflow issue with exp(x). However, we can be confident that our dual method of computing the risk-averse subgradient is both correct and more numerically robust than the primal version.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"info: Info\nAs another sanity check, notice how as gamma rightarrow 0, we tend toward the solution of the expectation risk-measure [12], and as gamma rightarrow infty, we tend toward the solution of the worse-case risk measure [18].","category":"page"},{"location":"tutorial/22_risk/#Risk-averse-decision-rules:-Part-II","page":"Theory II: risk aversion","title":"Risk-averse decision rules: Part II","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Why is the risk-averse subgradient theorem helpful? Using the dual representation of a convex risk measure, we can re-write the cut:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"theta ge mathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right + fracddx^primemathbbF_j in i^+ varphi in Omega_jleftV_j^k(x^prime_k varphi)right^top (x^prime - x^prime_k)quad k=1ldotsK","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"as:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"theta ge mathbbE_q_kleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right - alpha(p q_k)quad k=1ldotsK","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"where q_k = mathrmargsuplimits_q inmathcalM(p) mathbbE_qV_j^k(x_k^prime varphi) - alpha(p q).","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Therefore, we can formulate a risk-averse decision rule as:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"beginaligned\nV_i^K(x omega) = minlimits_barx x^prime u C_i(barx u omega) + theta\n x^prime = T_i(barx u omega) \n u in U_i(barx omega) \n barx = x \n theta ge mathbbE_q_kleftV_j^k(x^prime_k varphi) + fracddx^primeV_j^k(x^prime_k varphi)^top (x^prime - x^prime_k)right - alpha(p q_k)quad k=1ldotsK \n theta ge M\nendaligned","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"where q_k = mathrmargsuplimits_q inmathcalM(p) mathbbE_qV_j^k(x_k^prime varphi) - alpha(p q).","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Thus, to implement risk-averse SDDP, all we need to do is modify the backward pass to include this calculation of q_k, form the cut using q_k instead of p, and subtract the penalty term alpha(p q_k).","category":"page"},{"location":"tutorial/22_risk/#Implementation","page":"Theory II: risk aversion","title":"Implementation","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Now we're ready to implement our risk-averse version of SDDP.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"As a prerequisite, we need most of the code from Theory I: an intro to SDDP.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"

\nClick to view code from the tutorial \"Theory I: an intro to SDDP\".","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"struct State\n in::JuMP.VariableRef\n out::JuMP.VariableRef\nend\n\nstruct Uncertainty\n parameterize::Function\n Ω::Vector{Any}\n P::Vector{Float64}\nend\n\nstruct Node\n subproblem::JuMP.Model\n states::Dict{Symbol,State}\n uncertainty::Uncertainty\n cost_to_go::JuMP.VariableRef\nend\n\nstruct PolicyGraph\n nodes::Vector{Node}\n arcs::Vector{Dict{Int,Float64}}\nend\n\nfunction Base.show(io::IO, model::PolicyGraph)\n println(io, \"A policy graph with $(length(model.nodes)) nodes\")\n println(io, \"Arcs:\")\n for (from, arcs) in enumerate(model.arcs)\n for (to, probability) in arcs\n println(io, \" $(from) => $(to) w.p. $(probability)\")\n end\n end\n return\nend\n\nfunction PolicyGraph(\n subproblem_builder::Function;\n graph::Vector{Dict{Int,Float64}},\n lower_bound::Float64,\n optimizer,\n)\n nodes = Node[]\n for t = 1:length(graph)\n model = JuMP.Model(optimizer)\n states, uncertainty = subproblem_builder(model, t)\n JuMP.@variable(model, cost_to_go >= lower_bound)\n obj = JuMP.objective_function(model)\n JuMP.@objective(model, Min, obj + cost_to_go)\n if length(graph[t]) == 0\n JuMP.fix(cost_to_go, 0.0; force = true)\n end\n push!(nodes, Node(model, states, uncertainty, cost_to_go))\n end\n return PolicyGraph(nodes, graph)\nend\n\nfunction sample_uncertainty(uncertainty::Uncertainty)\n r = rand()\n for (p, ω) in zip(uncertainty.P, uncertainty.Ω)\n r -= p\n if r < 0.0\n return ω\n end\n end\n error(\"We should never get here because P should sum to 1.0.\")\nend\n\nfunction sample_next_node(model::PolicyGraph, current::Int)\n if length(model.arcs[current]) == 0\n return nothing\n else\n r = rand()\n for (to, probability) in model.arcs[current]\n r -= probability\n if r < 0.0\n return to\n end\n end\n return nothing\n end\nend\n\nfunction forward_pass(model::PolicyGraph, io::IO = stdout)\n incoming_state = Dict(\n k => JuMP.fix_value(v.in) for (k, v) in model.nodes[1].states\n )\n simulation_cost = 0.0\n trajectory = Tuple{Int,Dict{Symbol,Float64}}[]\n t = 1\n while t !== nothing\n node = model.nodes[t]\n ω = sample_uncertainty(node.uncertainty)\n node.uncertainty.parameterize(ω)\n for (k, v) in incoming_state\n JuMP.fix(node.states[k].in, v; force = true)\n end\n JuMP.optimize!(node.subproblem)\n if JuMP.termination_status(node.subproblem) != JuMP.MOI.OPTIMAL\n error(\"Something went terribly wrong!\")\n end\n outgoing_state = Dict(k => JuMP.value(v.out) for (k, v) in node.states)\n stage_cost = JuMP.objective_value(node.subproblem) - JuMP.value(node.cost_to_go)\n simulation_cost += stage_cost\n incoming_state = outgoing_state\n push!(trajectory, (t, outgoing_state))\n t = sample_next_node(model, t)\n end\n return trajectory, simulation_cost\nend\n\nfunction upper_bound(model::PolicyGraph; replications::Int)\n simulations = [forward_pass(model, devnull) for i = 1:replications]\n z = [s[2] for s in simulations]\n μ = Statistics.mean(z)\n tσ = 1.96 * Statistics.std(z) / sqrt(replications)\n return μ, tσ\nend\n\nfunction lower_bound(model::PolicyGraph)\n node = model.nodes[1]\n bound = 0.0\n for (p, ω) in zip(node.uncertainty.P, node.uncertainty.Ω)\n node.uncertainty.parameterize(ω)\n JuMP.optimize!(node.subproblem)\n bound += p * JuMP.objective_value(node.subproblem)\n end\n return bound\nend\n\nfunction evaluate_policy(\n model::PolicyGraph;\n node::Int,\n incoming_state::Dict{Symbol,Float64},\n random_variable,\n)\n the_node = model.nodes[node]\n the_node.uncertainty.parameterize(random_variable)\n for (k, v) in incoming_state\n JuMP.fix(the_node.states[k].in, v; force = true)\n end\n JuMP.optimize!(the_node.subproblem)\n return Dict(\n k => JuMP.value.(v)\n for (k, v) in JuMP.object_dictionary(the_node.subproblem)\n )\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"evaluate_policy (generic function with 1 method)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"

","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"First, we need to modify the backward pass to compute the cuts using the risk-averse subgradient theorem:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function backward_pass(\n model::PolicyGraph,\n trajectory::Vector{Tuple{Int,Dict{Symbol,Float64}}},\n io::IO = stdout;\n risk_measure::AbstractRiskMeasure,\n)\n println(io, \"| Backward pass\")\n for i = reverse(1:length(trajectory))\n index, outgoing_states = trajectory[i]\n node = model.nodes[index]\n println(io, \"| | Visiting node $(index)\")\n if length(model.arcs[index]) == 0\n continue\n end\n # =====================================================================\n # New! Create vectors to store the cut expressions, V(x,ω) and p:\n cut_expressions, V_ω, p = JuMP.AffExpr[], Float64[], Float64[]\n # =====================================================================\n for (j, P_ij) in model.arcs[index]\n next_node = model.nodes[j]\n for (k, v) in outgoing_states\n JuMP.fix(next_node.states[k].in, v; force = true)\n end\n for (pφ, φ) in zip(next_node.uncertainty.P, next_node.uncertainty.Ω)\n next_node.uncertainty.parameterize(φ)\n JuMP.optimize!(next_node.subproblem)\n V = JuMP.objective_value(next_node.subproblem)\n dVdx = Dict(\n k => JuMP.reduced_cost(v.in) for (k, v) in next_node.states\n )\n # =============================================================\n # New! Construct and append the expression\n # `V_j^K(x_k, φ) + dVdx_j^K(x'_k, φ)ᵀ(x - x_k)` to the list of\n # cut expressions.\n push!(\n cut_expressions,\n JuMP.@expression(\n node.subproblem,\n V + sum(\n dVdx[k] * (x.out - outgoing_states[k])\n for (k, x) in node.states\n ),\n )\n )\n # Add the objective value to Z:\n push!(V_ω, V)\n # Add the probability to p:\n push!(p, P_ij * pφ)\n # =============================================================\n end\n end\n # =====================================================================\n # New! Using the solutions in V_ω, compute q and α:\n q, α = dual_risk_inner(risk_measure, V_ω, p)\n println(io, \"| | | Z = \", Z)\n println(io, \"| | | p = \", p)\n println(io, \"| | | q = \", q)\n println(io, \"| | | α = \", α)\n # Then add the cut:\n c = JuMP.@constraint(\n node.subproblem,\n node.cost_to_go >=\n sum(q[i] * cut_expressions[i] for i = 1:length(q)) - α\n )\n # =====================================================================\n println(io, \"| | | Adding cut : \", c)\n end\n return nothing\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"backward_pass (generic function with 2 methods)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We also need to update the train loop of SDDP to pass a risk measure to the backward pass:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"function train(\n model::PolicyGraph;\n iteration_limit::Int,\n replications::Int,\n # =========================================================================\n # New! Add a risk_measure argument\n risk_measure::AbstractRiskMeasure,\n # =========================================================================\n io::IO = stdout,\n)\n for i = 1:iteration_limit\n println(io, \"Starting iteration $(i)\")\n outgoing_states, _ = forward_pass(model, io)\n # =====================================================================\n # New! Pass the risk measure to the backward pass.\n backward_pass(model, outgoing_states, io; risk_measure = risk_measure)\n # =====================================================================\n println(io, \"| Finished iteration\")\n println(io, \"| | lower_bound = \", lower_bound(model))\n end\n μ, tσ = upper_bound(model; replications = replications)\n println(io, \"Upper bound = $(μ) ± $(tσ)\")\n return\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"train (generic function with 1 method)","category":"page"},{"location":"tutorial/22_risk/#Risk-averse-bounds","page":"Theory II: risk aversion","title":"Risk-averse bounds","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"warning: Warning\nThis section is important.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"When we had a risk-neutral policy (i.e., we only used the expectation risk measure), we discussed how we could form valid lower and upper bounds.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"The upper bound is still valid as a Monte Carlo simulation of the expected cost of the policy. (Although this upper bound doesn't capture the change in the policy we wanted to achieve, namely that the impact of the worst outcomes were reduced.)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"However, if we use a different risk measure, the lower bound is no longer valid!","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"We can still calculate a \"lower bound\" as the objective of the first-stage approximated subproblem, and this will converge to a finite value. However, we can't meaningfully interpret it as a bound with respect to the optimal policy. Therefore, it's best to just ignore the lower bound when training a risk-averse policy.","category":"page"},{"location":"tutorial/22_risk/#Example:-risk-averse-hydro-thermal-scheduling","page":"Theory II: risk aversion","title":"Example: risk-averse hydro-thermal scheduling","text":"","category":"section"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Now it's time for an example. We create the same problem as Theory I: an intro to SDDP:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"model = PolicyGraph(\n graph = [\n Dict(2 => 1.0),\n Dict(3 => 1.0),\n Dict{Int,Float64}(),\n ],\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n) do subproblem, t\n JuMP.@variable(subproblem, volume_in == 200)\n JuMP.@variable(subproblem, 0 <= volume_out <= 200)\n states = Dict(:volume => State(volume_in, volume_out))\n JuMP.@variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n JuMP.@constraints(subproblem, begin\n volume_out == volume_in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end)\n fuel_cost = [50.0, 100.0, 150.0]\n JuMP.@objective(subproblem, Min, fuel_cost[t] * thermal_generation)\n uncertainty = Uncertainty([0.0, 50.0, 100.0], [1 / 3, 1 / 3, 1 / 3]) do ω\n JuMP.fix(inflow, ω)\n end\n return states, uncertainty\nend","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"A policy graph with 3 nodes\nArcs:\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Then we train a risk-averse policy, passing a risk measure to train:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"train(\n model;\n iteration_limit = 3,\n replications = 100,\n risk_measure = Entropic(1.0),\n)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Starting iteration 1\n| Backward pass\n| | Visiting node 3\n| | Visiting node 2\n| | | Z = [1.0, 2.0, 3.0, 4.0]\n| | | p = [0.333333, 0.333333, 0.333333]\n| | | q = [1.0, 0.0, 0.0]\n| | | α = 1.0986122886681098\n| | | Adding cut : 150 volume_out + cost_to_go ≥ 22498.901387711332\n| | Visiting node 1\n| | | Z = [1.0, 2.0, 3.0, 4.0]\n| | | p = [0.333333, 0.333333, 0.333333]\n| | | q = [1.0, 0.0, 0.0]\n| | | α = 1.0986122886681098\n| | | Adding cut : 150 volume_out + cost_to_go ≥ 37497.802775422664\n| Finished iteration\n| | lower_bound = 12497.802775422664\nStarting iteration 2\n| Backward pass\n| | Visiting node 3\n| | Visiting node 2\n| | | Z = [1.0, 2.0, 3.0, 4.0]\n| | | p = [0.333333, 0.333333, 0.333333]\n| | | q = [0.6, 0.2, 0.2]\n| | | α = 0.14834174943478465\n| | | Adding cut : 89.99999999998764 volume_out + cost_to_go ≥ 13499.851658248712\n| | Visiting node 1\n| | | Z = [1.0, 2.0, 3.0, 4.0]\n| | | p = [0.333333, 0.333333, 0.333333]\n| | | q = [1.0, 0.0, 0.0]\n| | | α = 1.0986122886681098\n| | | Adding cut : 100 volume_out + cost_to_go ≥ 29998.594667538695\n| Finished iteration\n| | lower_bound = 14998.594667538693\nStarting iteration 3\n| Backward pass\n| | Visiting node 3\n| | Visiting node 2\n| | | Z = [1.0, 2.0, 3.0, 4.0]\n| | | p = [0.333333, 0.333333, 0.333333]\n| | | q = [0.843239, 0.0783804, 0.0783804]\n| | | α = 0.5556945523744657\n| | | Adding cut : 126.48587163090163 volume_out + cost_to_go ≥ 18972.32505008287\n| | Visiting node 1\n| | | Z = [1.0, 2.0, 3.0, 4.0]\n| | | p = [0.333333, 0.333333, 0.333333]\n| | | q = [1.0, 0.0, 0.0]\n| | | α = 1.0986122886681098\n| | | Adding cut : 100 volume_out + cost_to_go ≥ 29998.641399238186\n| Finished iteration\n| | lower_bound = 14998.641399238184\nUpper bound = 10724.47892990112 ± 929.8897316014472\n","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Finally, evaluate the decision rule:","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"evaluate_policy(\n model;\n node = 1,\n incoming_state = Dict(:volume => 150.0),\n random_variable = 75,\n)","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"Dict{Symbol,Float64} with 8 entries:\n :volume_out => 200.0\n :demand_constraint => 150.0\n :hydro_spill => 0.0\n :inflow => 75.0\n :volume_in => 150.0\n :thermal_generation => 125.0\n :hydro_generation => 25.0\n :cost_to_go => 9998.64","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"info: Info\nFor this trivial example, the risk-averse policy isn't very different from the policy obtained using the expectation risk-measure. If you try it on some bigger/more interesting problems, you should see the expected cost increase, and the upper tail of the policy decrease.","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"","category":"page"},{"location":"tutorial/22_risk/","page":"Theory II: risk aversion","title":"Theory II: risk aversion","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/sldp_example_one.jl\"","category":"page"},{"location":"examples/sldp_example_one/#SLDP:-example-1","page":"SLDP: example 1","title":"SLDP: example 1","text":"","category":"section"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"This example is derived from Section 4.2 of the paper: Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz Dynamic Programming. Optimization Online. PDF","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"using SDDP, GLPK, Test\n\nfunction sldp_example_one()\n model = SDDP.LinearPolicyGraph(\n stages = 8,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n ) do sp, t\n @variable(sp, x, SDDP.State, initial_value = 2.0)\n @variables(sp, begin\n x⁺ >= 0\n x⁻ >= 0\n 0 <= u <= 1, Bin\n ω\n end)\n @stageobjective(sp, 0.9^(t - 1) * (x⁺ + x⁻))\n @constraints(sp, begin\n x.out == x.in + 2 * u - 1 + ω\n x⁺ >= x.out\n x⁻ >= -x.out\n end)\n points = [\n -0.3089653673606697,\n -0.2718277412744214,\n -0.09611178608243474,\n 0.24645863921577763,\n 0.5204224537256875,\n ]\n SDDP.parameterize(φ -> JuMP.fix(ω, φ), sp, [points; -points])\n end\n SDDP.train(model, iteration_limit = 100, log_frequency = 10)\n @test SDDP.calculate_bound(model) <= 1.1675\n return\nend\n\nsldp_example_one()","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 8\n State variables : 1\n Scenarios : 1.00000e+08\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 2e+00]\n Non-zero Objective range [5e-01, 1e+00]\n Non-zero Bounds range [1e+00, 1e+00]\n Non-zero RHS range [1e+00, 1e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 1.271828e+00 1.166743e+00 2.570701e-02 1 880\n 20 1.271828e+00 1.167077e+00 4.770803e-02 1 1760\n 30 1.831679e+00 1.167188e+00 8.757496e-02 1 2640\n 40 1.673535e+00 1.167299e+00 1.118541e-01 1 3520\n 50 7.281723e-01 1.167299e+00 1.371360e-01 1 4400\n 60 4.795775e-01 1.167299e+00 1.656449e-01 1 5280\n 70 1.554772e+00 1.167299e+00 1.976199e-01 1 6160\n 80 4.795775e-01 1.167299e+00 2.454250e-01 1 7040\n 90 6.910346e-01 1.167299e+00 2.785001e-01 1 7920\n 100 2.348856e+00 1.167299e+00 3.136001e-01 1 8800\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"","category":"page"},{"location":"examples/sldp_example_one/","page":"SLDP: example 1","title":"SLDP: example 1","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Simulate-using-a-different-sampling-scheme","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"DocTestSetup = quote\n using SDDP, GLPK\nend","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"By default, SDDP.simulate will simulate the policy using the distributions of noise terms that were defined when the model was created. We call these in-sample simulations. However, in general the in-sample distributions are an approximation of some underlying probabiltiy model which we term the true process. Therefore, SDDP.jl makes it easy to simulate the policy using different probability distrutions.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"To demonsrate the different ways of simulating the policy, we're going to use the model from the tutorial Basic IV: Markov uncertainty.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"using SDDP, GLPK\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75)\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64, 2}[\n [ 1.0 ]',\n [ 0.75 0.25 ],\n [ 0.75 0.25 ; 0.25 0.75 ]\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, node\n # Unpack the stage and Markov index.\n t, markov_state = node\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end)\n # Note how we can use `markov_state` to dispatch an `if` statement.\n probability = if markov_state == 1 # wet climate state\n [1/6, 1/3, 1/2]\n else # dry climate state\n [1/2, 1/3, 1/6]\n end\n\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation)\n end\nend\n\nSDDP.train(model; iteration_limit = 10, print_level = 0);\n\nmodel\n\n# output\n\nA policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#In-sample-Monte-Carlo-simulation","page":"Simulate using a different sampling scheme","title":"In-sample Monte Carlo simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"To simulate the policy using the data defined when model was created, use SDDP.InSampleMonteCarlo.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"simulations = SDDP.simulate(\n model, 20, sampling_scheme = SDDP.InSampleMonteCarlo()\n)\n\nsort(unique(\n [node[:noise_term] for simulation in simulations for node in simulation]\n))\n\n# output\n\n3-element Array{NamedTuple{(:inflow, :fuel_multiplier),Tuple{Float64,Float64}},1}:\n (inflow = 0.0, fuel_multiplier = 1.5)\n (inflow = 50.0, fuel_multiplier = 1.0)\n (inflow = 100.0, fuel_multiplier = 0.75)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Out-of-sample-Monte-Carlo-simulation","page":"Simulate using a different sampling scheme","title":"Out-of-sample Monte Carlo simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Instead of using the in-sample data, we can perform an out-of-sample simulation of the policy using the SDDP.OutOfSampleMonteCarlo sampling scheme.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"For each node, the SDDP.OutOfSampleMonteCarlo needs to define a new distribution for the transition probabilities between nodes in the policy graph, and a new distribution for the stagewise independent noise terms.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"note: Note\nThe support of the distribution for the stagewise independent noise terms does not have to be the same as the in-sample distributions.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"sampling_scheme = SDDP.OutOfSampleMonteCarlo(model) do node\n stage, markov_state = node\n if stage == 0\n # Called from the root node. Transition to (1, 1) with probability 1.0.\n # Only return the list of children, _not_ a list of noise terms.\n return [SDDP.Noise((1, 1), 1.0)]\n elseif stage == 3\n # Called from the final node. Return an empty list for the children,\n # and a single, deterministic realization for the noise terms.\n children = SDDP.Noise[]\n noise_terms = [SDDP.Noise((inflow = 75.0, fuel_multiplier = 1.2), 1.0)]\n return children, noise_terms\n else\n # Called from a normal node. Return the in-sample distribution for the\n # noise terms, but modify the transition probabilities so that the\n # Markov switching probability is now 50%.\n probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]\n noise_terms = [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]\n children = [\n SDDP.Noise((stage + 1, 1), 0.5), SDDP.Noise((stage + 1, 2), 0.5)\n ]\n return children, noise_terms\n end\nend\n\nsimulations = SDDP.simulate(model, 1, sampling_scheme = sampling_scheme)\n\nsimulations[1][3][:noise_term]\n\n# output\n\n(inflow = 75.0, fuel_multiplier = 1.2)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Alternatively, if you only want to modify the stagewise independent noise terms, pass use_insample_transition = true.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"sampling_scheme = SDDP.OutOfSampleMonteCarlo(\n model, use_insample_transition = true\n) do node\nstage, markov_state = node\n if stage == 3\n # Called from the final node. Return a single, deterministic\n # realization for the noise terms. Don't return the children because we\n # use the in-sample data.\n return [SDDP.Noise((inflow = 65.0, fuel_multiplier = 1.1), 1.0)]\n else\n # Called from a normal node. Return the in-sample distribution for the\n # noise terms. Don't return the children because we use the in-sample\n # data.\n probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]\n return [SDDP.Noise(ω, p) for (ω, p) in zip(Ω, probability)]\n end\nend\n\nsimulations = SDDP.simulate(model, 1, sampling_scheme = sampling_scheme)\n\nsimulations[1][3][:noise_term]\n\n# output\n\n(inflow = 65.0, fuel_multiplier = 1.1)","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/#Historical-simulation","page":"Simulate using a different sampling scheme","title":"Historical simulation","text":"","category":"section"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.","category":"page"},{"location":"guides/simulate_using_a_different_sampling_scheme/","page":"Simulate using a different sampling scheme","title":"Simulate using a different sampling scheme","text":"simulations = SDDP.simulate(\n model,\n sampling_scheme = SDDP.Historical([\n ((1, 1), Ω[1]),\n ((2, 2), Ω[3]),\n ((3, 1), Ω[2])\n ])\n)\n\n[stage[:node_index] for stage in simulations[1]]\n\n# output\n\n3-element Array{Tuple{Int64,Int64},1}:\n (1, 1)\n (2, 2)\n (3, 1)","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/StochDynamicProgramming.jl_stock.jl\"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/#StochDynamicProgramming:-the-stock-problem","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"","category":"section"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"This example comes from StochDynamicProgramming.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"using SDDP, GLPK, Test\n\nfunction stock_example()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(5),\n bellman_function = SDDP.BellmanFunction(lower_bound = -2),\n optimizer = GLPK.Optimizer,\n ) do sp, stage\n @variable(sp, 0 <= state <= 1, SDDP.State, initial_value = 0.5)\n @variable(sp, 0 <= control <= 0.5)\n @variable(sp, ξ)\n @constraint(sp, state.out == state.in - control + ξ)\n SDDP.parameterize(sp, 0.0:1/30:0.3) do ω\n JuMP.fix(ξ, ω)\n end\n @stageobjective(sp, (sin(3 * stage) - 1) * control)\n end\n SDDP.train(model, iteration_limit = 50, log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ -1.471 atol = 0.001\n simulation_results = SDDP.simulate(model, 1_000)\n @test length(simulation_results) == 1_000\n μ = SDDP.Statistics.mean(\n sum(data[:stage_objective] for data in simulation)\n for simulation in simulation_results\n )\n @test μ ≈ -1.471 atol = 0.05\n return\nend\n\nstock_example()","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 5\n State variables : 1\n Scenarios : 1.00000e+05\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [3e-01, 2e+00]\n Non-zero Bounds range [5e-01, 2e+00]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 -1.573790e+00 -1.475506e+00 1.391890e-01 1 550\n 20 -1.567494e+00 -1.472024e+00 1.659811e-01 1 1100\n 30 -1.468424e+00 -1.471412e+00 1.809011e-01 1 1650\n 40 -1.674556e+00 -1.471359e+00 1.971500e-01 1 2200\n 50 -1.506264e+00 -1.471289e+00 2.176049e-01 1 2750\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_stock/","page":"StochDynamicProgramming: the stock problem","title":"StochDynamicProgramming: the stock problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/agriculture_mccardle_farm.jl\"","category":"page"},{"location":"examples/agriculture_mccardle_farm/#The-farm-planning-problem","page":"The farm planning problem","title":"The farm planning problem","text":"","category":"section"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"There are four stages. The first stage is a deterministic planning stage. The next three are wait-and-see operational stages. The uncertainty in the three operational stages is a Markov chain for weather. There are three Markov states: dry, normal, and wet.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"Inspired by R. McCardle, Farm management optimization. Masters thesis, University of Louisville, Louisville, Kentucky, United States of America (2009).","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"All data, including short variable names, is taken from that thesis.","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"using SDDP, GLPK, Test\n\nfunction test_mccardle_farm_model()\n S = [ # cutting, stage\n 0 1 2\n 0 0 1\n 0 0 0\n ]\n t = [60, 60, 245] # days in period\n D = [210, 210, 858] # demand\n q = [ # selling price per bale\n [4.5 4.5 4.5; 4.5 4.5 4.5; 4.5 4.5 4.5],\n [5.5 5.5 5.5; 5.5 5.5 5.5; 5.5 5.5 5.5],\n [6.5 6.5 6.5; 6.5 6.5 6.5; 6.5 6.5 6.5],\n ]\n b = [ # predicted yield (bales/acres) from cutting i in weather j.\n 30 75 37.5\n 15 37.5 18.25\n 7.5 18.75 9.325\n ]\n w = 3000 # max storage\n C = [50 50 50; 50 50 50; 50 50 50] # cost to grow hay\n r = [ # Cost per bale of hay from cutting i during weather condition j.\n [5 5 5; 5 5 5; 5 5 5],\n [6 6 6; 6 6 6; 6 6 6],\n [7 7 7; 7 7 7; 7 7 7],\n ]\n M = 60.0 # max acreage for planting\n H = 0.0 # initial inventory\n V = [0.05, 0.05, 0.05] # inventory cost\n L = 3000.0 # max demand for hay\n\n graph = SDDP.MarkovianGraph([\n ones(Float64, 1, 1),\n [0.14 0.69 0.17],\n [0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],\n [0.14 0.69 0.17; 0.14 0.69 0.17; 0.14 0.69 0.17],\n ])\n\n model = SDDP.PolicyGraph(\n graph,\n bellman_function = SDDP.BellmanFunction(lower_bound = 0.0),\n optimizer = GLPK.Optimizer,\n ) do subproblem, index\n stage, weather = index\n # ===================== State Variables =====================\n # Area planted.\n @variable(subproblem, 0 <= acres <= M, SDDP.State, initial_value = M)\n @variable(\n subproblem,\n bales[i = 1:3] >= 0,\n SDDP.State,\n initial_value = (i == 1 ? H : 0)\n )\n # ===================== Variables =====================\n @variables(subproblem, begin\n buy[1:3] >= 0 # Quantity of bales to buy from each cutting.\n sell[1:3] >= 0 # Quantity of bales to sell from each cutting.\n eat[1:3] >= 0 # Quantity of bales to eat from each cutting.\n pen_p[1:3] >= 0 # Penalties\n pen_n[1:3] >= 0 # Penalties\n end)\n # ===================== Constraints =====================\n if stage == 1\n @constraint(subproblem, acres.out <= acres.in)\n @constraint(subproblem, [i = 1:3], bales[i].in == bales[i].out)\n else\n @expression(\n subproblem,\n cut_ex[c = 1:3],\n bales[c].in + buy[c] - eat[c] - sell[c] + pen_p[c] - pen_n[c]\n )\n @constraints(\n subproblem,\n begin\n # Cannot plant more land than previously cropped.\n acres.out <= acres.in\n # In each stage we need to meet demand.\n sum(eat) >= D[stage-1]\n # We can buy and sell other cuttings.\n bales[stage-1].out == cut_ex[stage-1] + acres.in * b[stage-1, weather]\n [c = 1:3; c != stage - 1], bales[c].out == cut_ex[c]\n # There is some maximum storage.\n sum(bales[i].out for i = 1:3) <= w\n # We can only sell what is in storage.\n [c = 1:3], sell[c] <= bales[c].in\n # Maximum sales quantity.\n sum(sell) <= L\n end\n )\n end\n # ===================== Stage objective =====================\n if stage == 1\n @stageobjective(subproblem, 0.0)\n else\n @stageobjective(\n subproblem,\n 1000 * (sum(pen_p) + sum(pen_n)) +\n # cost of growing\n C[stage-1, weather] * acres.in +\n sum(\n # inventory cost\n V[stage-1] * bales[cutting].in * t[stage-1] +\n # purchase cost\n r[cutting][stage-1, weather] * buy[cutting] +\n # feed cost\n S[cutting, stage-1] * eat[cutting] -\n # sell reward\n q[cutting][stage-1, weather] * sell[cutting] for cutting = 1:3\n )\n )\n end\n return\n end\n SDDP.train(model, iteration_limit = 50, log_frequency = 10)\n @test SDDP.termination_status(model) == :iteration_limit\n @test SDDP.calculate_bound(model) ≈ 4074.1391 atol = 1e-5\nend\n\ntest_mccardle_farm_model()","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"Test Passed","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"","category":"page"},{"location":"examples/agriculture_mccardle_farm/","page":"The farm planning problem","title":"The farm planning problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/13_integrality/","page":"Advanced III: integrality","title":"Advanced III: integrality","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/13_integrality.jl\"","category":"page"},{"location":"tutorial/13_integrality/#Advanced-III:-integrality","page":"Advanced III: integrality","title":"Advanced III: integrality","text":"","category":"section"},{"location":"tutorial/13_integrality/","page":"Advanced III: integrality","title":"Advanced III: integrality","text":"The fundamental reason why SDDP works is convexity. In the classical algorithm, this ruled out the use of integer variables. However, various extensions to the algorithm have been proposed, and these can be run by SDDP.jl using the integrality_handler keyword argument to SDDP.PolicyGraph.","category":"page"},{"location":"tutorial/13_integrality/#Continuous-relaxation","page":"Advanced III: integrality","title":"Continuous relaxation","text":"","category":"section"},{"location":"tutorial/13_integrality/","page":"Advanced III: integrality","title":"Advanced III: integrality","text":"If your model includes binary or integer variables (e.g., air_conditioning.jl), passing integrality_handler = SDDP.ContinuousRelaxation() will make SDDP.jl contruct a (sub-optimal) policy using the continuous relaxation of the problem. But, when you simulate this policy, SDDP.jl will solve the original mixed-integer problem.","category":"page"},{"location":"tutorial/13_integrality/#SDDiP","page":"Advanced III: integrality","title":"SDDiP","text":"","category":"section"},{"location":"tutorial/13_integrality/","page":"Advanced III: integrality","title":"Advanced III: integrality","text":"If you pass integrality_handler = SDDP.SDDiP(), SDDP.jl will use the stochastic dual dynamic integer programming method of Zou, Ahmed, and Sun. See SDDP.SDDiP for more.","category":"page"},{"location":"tutorial/13_integrality/","page":"Advanced III: integrality","title":"Advanced III: integrality","text":"","category":"page"},{"location":"tutorial/13_integrality/","page":"Advanced III: integrality","title":"Advanced III: integrality","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/vehicle_location.jl\"","category":"page"},{"location":"examples/vehicle_location/#Vehicle-location","page":"Vehicle location","title":"Vehicle location","text":"","category":"section"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"This problem is a version of the Ambulance dispatch problem. A hospital is located at 0 on the number line that stretches from 0 to 100. Ambulance bases are located at points 20, 40, 60, 80, and 100. When not responding to a call, Ambulances must be located at a base, or the hospital. In this example there are three ambulances.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"Example location:","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"H B B B B B\n0 ---- 20 ---- 40 ---- 60 ---- 80 ---- 100","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"Each stage, a call comes in from somewhere on the number line. The agent must decide which ambulance to dispatch. They pay the cost of twice the driving distance. If an ambulance is not dispatched in a stage, the ambulance can be relocated to a different base in preparation for future calls. This incurrs a cost of the driving distance.","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"using SDDP, GLPK, Test\n\nfunction vehicle_location_model(integrality_handler)\n hospital_location = 0\n bases = vcat(hospital_location, [20, 40, 60, 80, 100])\n vehicles = [1, 2, 3]\n requests = 0:2:100\n shift_cost(src, dest) = abs(src - dest)\n function dispatch_cost(base, request)\n return 2 * (abs(request - hospital_location) + abs(request - base))\n end\n # Initial state of emergency vehicles at bases. All ambulances start at the\n # hospital.\n initial_state(b, v) = b == hospital_location ? 1.0 : 0.0\n model = SDDP.LinearPolicyGraph(\n stages = 10,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n integrality_handler = integrality_handler,\n ) do sp, t\n # Current location of each vehicle at each base.\n @variable(\n sp,\n 0 <= location[b = bases, v = vehicles] <= 1,\n SDDP.State,\n initial_value = initial_state(b, v)\n )\n @variables(sp, begin\n # Which vehicle is dipatched?\n 0 <= dispatch[bases, vehicles] <= 1, Bin\n # Shifting vehicles between bases: [src, dest, vehicle]\n 0 <= shift[bases, bases, vehicles] <= 1, Bin\n end)\n # Flow of vehicles in and out of bases:\n @expression(\n sp,\n base_balance[b in bases, v in vehicles],\n location[b, v].in - dispatch[b, v] - sum(shift[b, :, v]) + sum(shift[:, b, v])\n )\n @constraints(\n sp,\n begin\n # Only one vehicle dispatched to call.\n sum(dispatch) == 1\n # Can only dispatch vehicle from base if vehicle is at that base.\n [b in bases, v in vehicles], dispatch[b, v] <= location[b, v].in\n # Can only shift vehicle if vehicle is at that src base.\n [b in bases, v in vehicles], sum(shift[b, :, v]) <= location[b, v].in\n # Can only shift vehicle if vehicle is not being dispatched.\n [b in bases, v in vehicles], sum(shift[b, :, v]) + dispatch[b, v] <= 1\n # Can't shift to same base.\n [b in bases, v in vehicles], shift[b, b, v] == 0\n # Update states for non-home/non-hospital bases.\n [b in bases[2:end], v in vehicles],\n location[b, v].out == base_balance[b, v]\n # Update states for home/hospital bases.\n [v in vehicles],\n location[hospital_location, v].out ==\n base_balance[hospital_location, v] + sum(dispatch[:, v])\n end\n )\n SDDP.parameterize(sp, requests) do request\n @stageobjective(\n sp,\n sum(\n # Distance to travel from base to emergency and then to hospital.\n dispatch[b, v] * dispatch_cost(b, request) +\n # Distance travelled by vehicles relocating bases.\n sum(shift_cost(b, dest) * shift[b, dest, v] for dest in bases)\n for b in bases, v in vehicles\n )\n )\n end\n end\n SDDP.train(model, iteration_limit = 50, log_frequency = 10)\n if integrality_handler == SDDP.ContinuousRelaxation()\n @test isapprox(SDDP.calculate_bound(model), 1700, atol = 5)\n end\n return\nend","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"vehicle_location_model (generic function with 1 method)","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"Solve a continuous relaxation only, tough for SDDiP","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"vehicle_location_model(SDDP.ContinuousRelaxation())","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 10\n State variables : 18\n Scenarios : 1.19042e+17\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 4e+02]\n Non-zero Bounds range [1e+00, 1e+00]\n Non-zero RHS range [1e+00, 1e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 1.442857e+03 1.555006e+03 2.003483e+00 1 5200\n 20 1.827844e+03 1.639138e+03 4.064782e+00 1 10400\n 30 1.950965e+03 1.675964e+03 6.200504e+00 1 15600\n 40 2.538518e+03 1.692907e+03 8.360877e+00 1 20800\n 50 1.176808e+03 1.698158e+03 1.055149e+01 1 26000\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"","category":"page"},{"location":"examples/vehicle_location/","page":"Vehicle location","title":"Vehicle location","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/improve_computational_performance/#Improve-computational-performance","page":"Improve computational performance","title":"Improve computational performance","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP is a computationally intensive algorithm. Here are some suggestions for how the computational performance can be improved.","category":"page"},{"location":"guides/improve_computational_performance/#Numerical-stability-(again)","page":"Improve computational performance","title":"Numerical stability (again)","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"We've already discussed this in the Numerical stability section of Basic VI: words of warning. But, it's so important that we're going to say it again: improving the problem scaling is one of the best ways to improve the numerical performance of your models.","category":"page"},{"location":"guides/improve_computational_performance/#Solver-selection","page":"Improve computational performance","title":"Solver selection","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"The majority of the solution time is spent inside the low-level solvers. Choosing which solver (and the associated settings) correctly can lead to big speed-ups.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Choose a commercial solver.\nOptions include CPLEX, Gurobi, and Xpress. Using free solvers such as CLP and GLPK isn't a viable approach for large problems.\nTry different solvers.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Even commercial solvers can have wildly different solution times. We've seen models on which CPLEX was 50% fast than Gurobi, and vice versa.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Experiment with different solver options.\nUsing the default settings is usually a good option. However, sometimes it can pay to change these. In particular, forcing solvers to use the dual simplex algorithm (e.g., Method=1 in Gurobi ) is usually a performance win.","category":"page"},{"location":"guides/improve_computational_performance/#Single-cut-vs.-multi-cut","page":"Improve computational performance","title":"Single-cut vs. multi-cut","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There are two competing ways that cuts can be created in SDDP: single-cut and multi-cut. By default, SDDP.jl uses the single-cut version of SDDP.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"The performance of each method is problem-dependent. We recommend that you try both in order to see which one performs better. In general, the single-cut method works better when the number of realizations of the stagewise-independent random variable is large, whereas the multi-cut method works better on small problems. However, the multi-cut method can cause numerical stability problems, particularly if used in conjunction with objective or belief state variables.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"You can switch between the methods by passing the relevant flag to cut_type in SDDP.train.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.train(model; cut_type = SDDP.SINGLE_CUT)\nSDDP.train(model; cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"guides/improve_computational_performance/#Parallelism","page":"Improve computational performance","title":"Parallelism","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.jl can take advantage of the parallel nature of modern computers to solve problems across multiple cores.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nWe highly recommend that you read the Julia manual's section on parallel computing.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"You can start Julia from a command line with N processors using the -p flag:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"julia -p N","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Alternatively, you can use the Distributed.jl package:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"using Distributed\nDistributed.addprocs(N)","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"warning: Warning\nWorkers DON'T inherit their parent's Pkg environment. Therefore, if you started Julia with --project=/path/to/environment (or if you activated an environment from the REPL), you will need to put the following at the top of your script:using Distributed\n@everywhere begin\n import Pkg\n Pkg.activate(\"/path/to/environment\")\nend","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"Currently SDDP.jl supports to parallel schemes, SDDP.Serial and SDDP.Asynchronous. Instances of these parallel schemes should be passed to the parallel_scheme argument of SDDP.train and SDDP.simulate.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"using SDDP, GLPK\nmodel = SDDP.LinearPolicyGraph(\n stages = 2, lower_bound = 0, optimizer = GLPK.Optimizer\n) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x.out)\nend\nSDDP.train(model; iteration_limit = 10, parallel_scheme = SDDP.Asynchronous())\nSDDP.simulate(model, 10; parallel_scheme = SDDP.Asynchronous())","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There is a large overhead for using the asynchronous solver. Even if you choose asynchronous mode, SDDP.jl will start in serial mode while the initialization takes place. Therefore, in the log you will see that the initial iterations take place on the master thread (Proc. ID = 1), and it is only after while that the solve switches to full parallelism.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nBecause of the large data communication requirements (all cuts have to be shared with all other cores), the solution time will not scale linearly with the number of cores.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"info: Info\nGiven the same number of iterations, the policy obtained from asynchronous mode will be worse than the policy obtained from serial mode. However, the asynchronous solver can take significantly less time to compute the same number of iterations.","category":"page"},{"location":"guides/improve_computational_performance/#Data-movement","page":"Improve computational performance","title":"Data movement","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"By defualt, data defined on the master process is not made available to the workers. Therefore, a model like the following:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"data = 1\nmodel = SDDP.LinearPolicyGraph(stages = 2, lower_bound = 0) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = data)\n @stageobjective(sp, x.out)\nend","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"will result in an UndefVarError error like UndefVarError: data not defined.","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"There are three solutions for this problem.","category":"page"},{"location":"guides/improve_computational_performance/#Option-1:-declare-data-inside-the-build-function","page":"Improve computational performance","title":"Option 1: declare data inside the build function","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"model = SDDP.LinearPolicyGraph(stages = 2) do sp, t\n data = 1\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\nend","category":"page"},{"location":"guides/improve_computational_performance/#Option-2:-use-@everywhere","page":"Improve computational performance","title":"Option 2: use @everywhere","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"@everywhere begin\n data = 1\nend\nmodel = SDDP.LinearPolicyGraph(stages = 2) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\nend","category":"page"},{"location":"guides/improve_computational_performance/#Option-3:-build-the-model-in-a-function","page":"Improve computational performance","title":"Option 3: build the model in a function","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"function build_model()\n data = 1\n return SDDP.LinearPolicyGraph(stages = 2) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 1)\n @stageobjective(sp, x)\n end\nend\n\nmodel = build_model()","category":"page"},{"location":"guides/improve_computational_performance/#Initialization-hooks","page":"Improve computational performance","title":"Initialization hooks","text":"","category":"section"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.Asynchronous accepts a pre-processing hook that is run on each worker process before the model is solved. The most useful situation is for solvers than need an initialization step. A good example is Gurobi, which can share an environment amongst all models on a worker. Notably, this environment cannot be shared amongst workers, so defining one environment at the top of a script will fail!","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"To initialize a new environment on each worker, use the following:","category":"page"},{"location":"guides/improve_computational_performance/","page":"Improve computational performance","title":"Improve computational performance","text":"SDDP.train(\n model,\n parallel_scheme = SDDP.Asynchronous() do m\n env = Gurobi.Env()\n optimizer = with_optimizer(Gurobi.Optimizer, env, OutputFlag = 0)\n for node in values(m.nodes)\n set_optimizer(node.subproblem, optimizer)\n end\n end\n)","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/FAST_quickstart.jl\"","category":"page"},{"location":"examples/FAST_quickstart/#FAST:-the-quickstart-problem","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"","category":"section"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"An implementation of the QuickStart example from FAST","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"using SDDP, GLPK, Test\n\nfunction fast_quickstart()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(2),\n bellman_function = SDDP.BellmanFunction(lower_bound = -5),\n optimizer = GLPK.Optimizer,\n ) do sp, t\n @variable(sp, x >= 0, SDDP.State, initial_value = 0.0)\n if t == 1\n @stageobjective(sp, x.out)\n else\n @variable(sp, s >= 0)\n @constraint(sp, s <= x.in)\n SDDP.parameterize(sp, [2, 3]) do ω\n JuMP.set_upper_bound(s, ω)\n end\n @stageobjective(sp, -2s)\n end\n end\n\n det = SDDP.deterministic_equivalent(model, GLPK.Optimizer)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) == -2\n\n SDDP.train(model, iteration_limit = 3)\n @test SDDP.calculate_bound(model) == -2\nend\n\nfast_quickstart()","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"Test Passed","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"","category":"page"},{"location":"examples/FAST_quickstart/","page":"FAST: the quickstart problem","title":"FAST: the quickstart problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/StructDualDynProg.jl_prob5.2_3stages.jl\"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/#StructDualDynProg:-Problem-5.2,-3-stages","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"","category":"section"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"This example comes from StochasticDualDynamicProgramming.jl.","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"using SDDP, GLPK, Test\n\nfunction test_prob52_3stages()\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n ) do sp, t\n n = 4\n m = 3\n ic = [16, 5, 32, 2]\n C = [25, 80, 6.5, 160]\n T = [8760, 7000, 1500] / 8760\n D2 = [diff([0, 3919, 7329, 10315]) diff([0, 7086, 9004, 11169])]\n p2 = [0.9, 0.1]\n @variable(sp, x[i = 1:n] >= 0, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n y[1:n, 1:m] >= 0\n v[1:n] >= 0\n penalty >= 0\n ξ[j = 1:m]\n end)\n @constraints(sp, begin\n [i = 1:n], x[i].out == x[i].in + v[i]\n [i = 1:n], sum(y[i, :]) <= x[i].in\n [j = 1:m], sum(y[:, j]) + penalty >= ξ[j]\n end)\n @stageobjective(sp, ic'v + C' * y * T + 1e5 * penalty)\n if t != 1 # no uncertainty in first stage\n SDDP.parameterize(sp, 1:size(D2, 2), p2) do ω\n for j = 1:m\n JuMP.fix(ξ[j], D2[j, ω])\n end\n end\n end\n if t == 3\n @constraint(sp, sum(v) == 0)\n end\n end\n\n det = SDDP.deterministic_equivalent(model, GLPK.Optimizer)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) ≈ 406712.49 atol = 0.1\n\n SDDP.train(model, iteration_limit = 30, log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 406712.49 atol = 0.1\n return\nend\n\ntest_prob52_3stages()","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 4\n Scenarios : 4.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 1e+05]\n Non-zero Bounds range [0e+00, 0e+00]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 4.223843e+05 3.391878e+05 1.101303e-02 1 80\n 20 4.004771e+05 4.063231e+05 3.161383e-02 1 160\n 30 3.959476e+05 4.067125e+05 3.732705e-02 1 240\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"","category":"page"},{"location":"examples/StructDualDynProg.jl_prob5.2_3stages/","page":"StructDualDynProg: Problem 5.2, 3 stages","title":"StructDualDynProg: Problem 5.2, 3 stages","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/infinite_horizon_hydro_thermal.jl\"","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/#Infinite-horizon-hydro-thermal","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"","category":"section"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"using SDDP, GLPK, Test, Statistics\n\nfunction infinite_hydro_thermal(; cut_type)\n Ω = [\n (inflow = 0.0, demand = 7.5),\n (inflow = 5.0, demand = 5),\n (inflow = 10.0, demand = 2.5),\n ]\n graph =\n SDDP.Graph(:root_node, [:week], [(:root_node => :week, 1.0), (:week => :week, 0.9)])\n model = SDDP.PolicyGraph(\n graph,\n bellman_function = SDDP.BellmanFunction(lower_bound = 0, cut_type = cut_type),\n optimizer = GLPK.Optimizer,\n ) do subproblem, node\n @variable(subproblem, 5.0 <= reservoir <= 15.0, SDDP.State, initial_value = 10.0)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n spill >= 0\n inflow\n demand\n end)\n @constraints(\n subproblem,\n begin\n reservoir.out == reservoir.in - hydro_generation - spill + inflow\n hydro_generation + thermal_generation == demand\n end\n )\n @stageobjective(subproblem, 10 * spill + thermal_generation)\n SDDP.parameterize(subproblem, Ω) do ω\n JuMP.fix(inflow, ω.inflow)\n JuMP.fix(demand, ω.demand)\n end\n end\n SDDP.train(\n model;\n time_limit = 2.0,\n log_frequency = 100,\n sampling_scheme = SDDP.InSampleMonteCarlo(terminate_on_cycle = true),\n cycle_discretization_delta = 0.1,\n )\n @test SDDP.calculate_bound(model) ≈ 119.167 atol = 0.1\n\n results = SDDP.simulate(model, 500)\n objectives = [sum(s[:stage_objective] for s in simulation) for simulation in results]\n sample_mean = round(Statistics.mean(objectives); digits = 2)\n sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2)\n println(\"Confidence_interval = $(sample_mean) ± $(sample_ci)\")\n @test sample_mean - sample_ci <= 119.167 <= sample_mean + sample_ci\n return\nend\n\ninfinite_hydro_thermal(cut_type = SDDP.SINGLE_CUT)\ninfinite_hydro_thermal(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 1\n State variables : 1\n Scenarios : Inf\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 1e+01]\n Non-zero Bounds range [5e+00, 2e+01]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 100 2.500000e+00 1.188652e+02 6.088090e-01 1 1068\n 200 0.000000e+00 1.191654e+02 6.703730e-01 1 2128\n 300 2.500000e+00 1.191667e+02 7.410278e-01 1 3196\n 400 5.000000e+00 1.191667e+02 8.381920e-01 1 4260\n 500 5.000000e+01 1.191667e+02 9.325140e-01 1 5340\n 600 0.000000e+00 1.191667e+02 1.029124e+00 1 6412\n 700 2.500000e+01 1.191667e+02 1.153126e+00 1 7476\n 800 3.250000e+01 1.191667e+02 1.294672e+00 1 8540\n 900 2.500000e+01 1.191667e+02 1.443019e+00 1 9600\n 1000 1.000000e+01 1.191667e+02 1.623346e+00 1 10672\n 1100 2.500000e+01 1.191667e+02 1.803401e+00 1 11732\n\nTerminating training with status: time_limit\n------------------------------------------------------------------------------\nConfidence_interval = 119.48 ± 13.34\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 1\n State variables : 1\n Scenarios : Inf\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 1e+01]\n Non-zero Bounds range [5e+00, 2e+01]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 100 5.000000e+01 1.188481e+02 5.209899e-02 1 1080\n 200 0.000000e+00 1.191649e+02 1.118321e-01 1 2116\n 300 1.500000e+02 1.191667e+02 1.785772e-01 1 3176\n 400 7.500000e+01 1.191667e+02 2.789431e-01 1 4232\n 500 0.000000e+00 1.191667e+02 3.678532e-01 1 5292\n 600 0.000000e+00 1.191667e+02 4.683032e-01 1 6356\n 700 2.500000e+01 1.191667e+02 5.931132e-01 1 7396\n 800 2.500000e+01 1.191667e+02 7.317741e-01 1 8468\n 900 1.000000e+01 1.191667e+02 8.899691e-01 1 9520\n 1000 8.250000e+01 1.191667e+02 1.099913e+00 1 10576\n 1100 2.500000e+01 1.191667e+02 1.298142e+00 1 11640\n 1200 0.000000e+00 1.191667e+02 1.506646e+00 1 12700\n 1300 7.500000e+01 1.191667e+02 1.712604e+00 1 13748\n 1400 5.000000e+00 1.191667e+02 1.974799e+00 1 14820\n\nTerminating training with status: time_limit\n------------------------------------------------------------------------------\nConfidence_interval = 116.19 ± 12.96\n","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"","category":"page"},{"location":"examples/infinite_horizon_hydro_thermal/","page":"Infinite horizon hydro-thermal","title":"Infinite horizon hydro-thermal","text":"This page was generated using Literate.jl.","category":"page"},{"location":"apireference/#api_reference_list","page":"API Reference","title":"API Reference","text":"","category":"section"},{"location":"apireference/#Policy-graphs","page":"API Reference","title":"Policy graphs","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.Graph\nSDDP.add_node\nSDDP.add_edge\nSDDP.add_ambiguity_set\nSDDP.LinearGraph\nSDDP.MarkovianGraph\nSDDP.LinearPolicyGraph\nSDDP.MarkovianPolicyGraph\nSDDP.PolicyGraph","category":"page"},{"location":"apireference/#SDDP.Graph","page":"API Reference","title":"SDDP.Graph","text":"Graph(root_node::T) where T\n\nCreate an empty graph struture with the root node root_node.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.add_node","page":"API Reference","title":"SDDP.add_node","text":"add_node(graph::Graph{T}, node::T) where T\n\nAdd a node to the graph graph.\n\nExamples\n\nadd_node(graph, :A)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_edge","page":"API Reference","title":"SDDP.add_edge","text":"add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where T\n\nAdd an edge to the graph graph.\n\nExamples\n\nadd_edge(graph, 1 => 2, 0.9)\nadd_edge(graph, :root => :A, 1.0)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_ambiguity_set","page":"API Reference","title":"SDDP.add_ambiguity_set","text":"add_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Vector{Float64})\n\nAdd set to the belief partition of graph.\n\nlipschitz is a vector of Lipschitz constants, with one element for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.\n\nExamples\n\ngraph = LinearGraph(3)\nadd_ambiguity_set(graph, [1, 2], [1e3, 1e2])\nadd_ambiguity_set(graph, [3], [1e5])\n\n\n\n\n\nadd_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Float64)\n\nAdd set to the belief partition of graph.\n\nlipschitz is a Lipschitz constant for each node in set. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space.\n\nExamples\n\ngraph = LinearGraph(3)\nadd_ambiguity_set(graph, [1, 2], 1e3)\nadd_ambiguity_set(graph, [3], 1e5)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.LinearGraph","page":"API Reference","title":"SDDP.LinearGraph","text":"LinearGraph(stages::Int)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.MarkovianGraph","page":"API Reference","title":"SDDP.MarkovianGraph","text":"MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})\n\nConstruct a Markovian graph from the vector of transition matrices.\n\ntransition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.\n\nThe dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.\n\n\n\n\n\nMarkovianGraph(;\n stages::Int,\n transition_matrix::Matrix{Float64},\n root_node_transition::Vector{Float64}\n)\n\nConstruct a Markovian graph object with stages number of stages and time-independent Markov transition probabilities.\n\ntransition_matrix must be a square matrix, and the probability of transitioning from Markov state i in stage t to Markov state j in stage t + 1 is given by transition_matrix[i, j].\n\nroot_node_transition[i] is the probability of transitioning from the root node to Markov state i in the first stage.\n\n\n\n\n\nMarkovianGraph(\n simulator::Function; budget::Union{Int, Vector{Int}}, scenarios::Int = 1000\n)\n\nConstruct a Markovian graph by fitting Markov chain to scenarios generated by simulator().\n\nbudget is the total number of nodes in the resulting Markov chain. This can either be specified as a single Int, in which case we will attempt to intelligently distributed the nodes between stages. Alternatively, budget can be a Vector{Int}, which details the number of Markov state to have in each stage.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.LinearPolicyGraph","page":"API Reference","title":"SDDP.LinearPolicyGraph","text":"LinearPolicyGraph(builder::Function; stages::Int, kwargs...)\n\nCreate a linear policy graph with stages number of stages.\n\nSee SDDP.PolicyGraph for the other keyword arguments.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.MarkovianPolicyGraph","page":"API Reference","title":"SDDP.MarkovianPolicyGraph","text":"MarkovianPolicyGraph(\n builder::Function;\n transition_matrices::Vector{Array{Float64, 2}},\n kwargs...\n)\n\nCreate a Markovian policy graph based on the transition matrices given in transition_matrices.\n\ntransition_matrices[t][i, j] gives the probability of transitioning from Markov state i in stage t - 1 to Markov state j in stage t.\n\nThe dimension of the first transition matrix should be (1, N), and transition_matrics[1][1, i] is the probability of transitioning from the root node to the Markov state i.\n\nSee SDDP.MarkovianGraph for other ways of specifying a Markovian policy graph. See SDDP.PolicyGraph for the other keyword arguments.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.PolicyGraph","page":"API Reference","title":"SDDP.PolicyGraph","text":"PolicyGraph(\n builder::Function,\n graph::Graph{T};\n sense::Symbol = :Min,\n lower_bound = -Inf,\n upper_bound = Inf,\n optimizer = nothing,\n bellman_function = nothing,\n direct_mode::Bool = false,\n integrality_handler = ContinuousRelaxation(),\n) where {T}\n\nConstruct a policy graph based on the graph structure of graph. (See SDDP.Graph for details.)\n\nExample\n\nfunction builder(subproblem::JuMP.Model, index)\n # ... subproblem definition ...\nend\n\nmodel = PolicyGraph(\n builder,\n graph;\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n direct_mode = false\n)\n\nOr, using the Julia do ... end syntax:\n\nmodel = PolicyGraph(\n graph;\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n direct_mode = true\n) do subproblem, index\n # ... subproblem definitions ...\nend\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Subproblem-definition","page":"API Reference","title":"Subproblem definition","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"@stageobjective\nSDDP.parameterize\nSDDP.add_objective_state\nSDDP.objective_state\nSDDP.Noise","category":"page"},{"location":"apireference/#SDDP.@stageobjective","page":"API Reference","title":"SDDP.@stageobjective","text":"@stageobjective(subproblem, expr)\n\nSet the stage-objective of subproblem to expr.\n\nExample\n\n@stageobjective(subproblem, 2x + y)\n\n\n\n\n\n","category":"macro"},{"location":"apireference/#SDDP.parameterize","page":"API Reference","title":"SDDP.parameterize","text":"parameterize(modify::Function,\n subproblem::JuMP.Model,\n realizations::Vector{T},\n probability::Vector{Float64} = fill(1.0 / length(realizations))\n ) where T\n\nAdd a parameterization function modify to subproblem. The modify function takes one argument and modifies subproblem based on the realization of the noise sampled from realizations with corresponding probabilities probability.\n\nIn order to conduct an out-of-sample simulation, modify should accept arguments that are not in realizations (but still of type T).\n\nExample\n\nSDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω\n JuMP.set_upper_bound(x, ω)\nend\n\n\n\n\n\nparameterize(node::Node, noise)\n\nParameterize node node with the noise noise.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.add_objective_state","page":"API Reference","title":"SDDP.add_objective_state","text":"add_objective_state(update::Function, subproblem::JuMP.Model; kwargs...)\n\nAdd an objective state variable to subproblem.\n\nRequired kwargs are:\n\ninitial_value: The initial value of the objective state variable at the root node.\nlipschitz: The lipschitz constant of the objective state variable.\n\nSetting a tight value for the lipschitz constant can significantly improve the speed of convergence.\n\nOptional kwargs are:\n\nlower_bound: A valid lower bound for the objective state variable. Can be -Inf.\nupper_bound: A valid upper bound for the objective state variable. Can be +Inf.\n\nSetting tight values for these optional variables can significantly improve the speed of convergence.\n\nIf the objective state is N-dimensional, each keyword argument must be an NTuple{N, Float64}. For example, initial_value = (0.0, 1.0).\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.objective_state","page":"API Reference","title":"SDDP.objective_state","text":"objective_state(subproblem::JuMP.Model)\n\nReturn the current objective state of the problem.\n\nCan only be called from SDDP.parameterize.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.Noise","page":"API Reference","title":"SDDP.Noise","text":"Noise(support, probability)\n\nAn atom of a discrete random variable at the point of support support and associated probability probability.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Training-the-policy","page":"API Reference","title":"Training the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.numerical_stability_report\nSDDP.train\nSDDP.termination_status\nSDDP.write_cuts_to_file\nSDDP.read_cuts_from_file\nSDDP.write_log_to_csv","category":"page"},{"location":"apireference/#SDDP.numerical_stability_report","page":"API Reference","title":"SDDP.numerical_stability_report","text":"numerical_stability_report([io::IO=stdout,] model::PolicyGraph,\n by_node::Bool=false, print=true, warn::Bool=true)\n\nPrint a report identifying possible numeric stability issues.\n\nIf by_node, print a report for each node in the graph.\nIf print, print to io.\nIf warn, warn if the coefficients may cause numerical issues.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.train","page":"API Reference","title":"SDDP.train","text":"SDDP.train(model::PolicyGraph; kwargs...)\n\nTrain the policy for model. Keyword arguments:\n\niteration_limit::Int: number of iterations to conduct before termination.\ntime_limit::Float64: number of seconds to train before termination.\nstoping_rules: a vector of SDDP.AbstractStoppingRules.\nprint_level::Int: control the level of printing to the screen. Defaults to 1. Set to 0 to disable all printing.\nlog_file::String: filepath at which to write a log of the training progress. Defaults to SDDP.log.\nlog_frequency::Int: control the frequency with which the logging is outputted (iterations/log). Defaults to 1.\nrun_numerical_stability_report::Bool: generate (and print) a numerical stability report prior to solve. Defaults to true.\nrefine_at_similar_nodes::Bool: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In almost all cases this should be set to true.\ncut_deletion_minimum::Int: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver specific; however, smaller values result in smaller subproblems (and therefore quicker solves), at the expense of more time spent performing cut selection.\nrisk_measure: the risk measure to use at each node. Defaults to Expectation.\nsampling_scheme: a sampling scheme to use on the forward pass of the algorithm. Defaults to InSampleMonteCarlo.\nbackward_sampling_scheme: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to CompleteSampler.\ncut_type: choose between SDDP.SINGLE_CUT and SDDP.MULTI_CUT versions of SDDP.\ndashboard::Bool: open a visualization of the training over time. Defaults to false.\nparallel_scheme::AbstractParallelScheme: specify a scheme for solving in parallel. Defaults to Serial().\nforwad_pass::AbstractForwardPass: specify a scheme to use for the forward passes.\n\nThere is also a special option for infinite horizon problems\n\ncycle_discretization_delta: the maximum distance between states allowed on the forward pass. This is for advanced users only and needs to be used in conjunction with a different sampling_scheme.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.termination_status","page":"API Reference","title":"SDDP.termination_status","text":"termination_status(model::PolicyGraph)\n\nQuery the reason why the training stopped.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.write_cuts_to_file","page":"API Reference","title":"SDDP.write_cuts_to_file","text":"write_cuts_to_file(model::PolicyGraph{T}, filename::String) where {T}\n\nWrite the cuts that form the policy in model to filename in JSON format.\n\nSee also SDDP.read_cuts_from_file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.read_cuts_from_file","page":"API Reference","title":"SDDP.read_cuts_from_file","text":"read_cuts_from_file(\n model::PolicyGraph{T}, filename::String;\n node_name_parser::Function = _node_name_parser) where {T}\n\nRead cuts (saved using SDDP.write_cuts_to_file) from filename into model.\n\nSince T can be an arbitrary Julia type, the conversion to JSON is lossy. When reading, read_cuts_from_file only supports T=Int, T=NTuple{N, Int}, and T=Symbol. If you have manually created a policy graph with a different node type T, provide a function node_name_parser with the signature node_name_parser(T, name::String)::T where {T} that returns the name of each node given the string name name.\n\nSee also SDDP.write_cuts_to_file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.write_log_to_csv","page":"API Reference","title":"SDDP.write_log_to_csv","text":"write_log_to_csv(model::PolicyGraph, filename::String)\n\nWrite the log of the most recent training to a csv for post-analysis.\n\nAssumes that the model has been trained via SDDP.train.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Stopping-rules","page":"API Reference","title":"Stopping rules","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractStoppingRule\nSDDP.stopping_rule_status\nSDDP.convergence_test","category":"page"},{"location":"apireference/#SDDP.AbstractStoppingRule","page":"API Reference","title":"SDDP.AbstractStoppingRule","text":"AbstractStoppingRule\n\nThe abstract type for the stopping-rule interface.\n\nYou need to define the following methods:\n\nSDDP.stopping_rule_status\nSDDP.convergence_test\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.stopping_rule_status","page":"API Reference","title":"SDDP.stopping_rule_status","text":"stopping_rule_status(::AbstractStoppingRule)::Symbol\n\nReturn a symbol describing the stopping rule.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.convergence_test","page":"API Reference","title":"SDDP.convergence_test","text":"convergence_test(model::PolicyGraph, log::Vector{Log}, ::AbstractStoppingRule)::Bool\n\nReturn a Bool indicating if the algorithm should terminate the training.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Sampling-schemes","page":"API Reference","title":"Sampling schemes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractSamplingScheme\nSDDP.sample_scenario\nSDDP.InSampleMonteCarlo\nSDDP.OutOfSampleMonteCarlo","category":"page"},{"location":"apireference/#SDDP.AbstractSamplingScheme","page":"API Reference","title":"SDDP.AbstractSamplingScheme","text":"AbstractSamplingScheme\n\nThe abstract type for the sampling-scheme interface.\n\nYou need to define the following methods:\n\nSDDP.sample_scenario\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.sample_scenario","page":"API Reference","title":"SDDP.sample_scenario","text":"sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where T\n\nSample a scenario from the policy graph graph based on the sampling scheme.\n\nReturns ::Tuple{Vector{Tuple{T, <:Any}}, Bool}, where the first element is the scenario, and the second element is a Boolean flag indicating if the scenario was terminated due to the detection of a cycle.\n\nThe scenario is a list of tuples (type Vector{Tuple{T, <:Any}}) where the first component of each tuple is the index of the node, and the second component is the stagewise-independent noise term observed in that node.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.InSampleMonteCarlo","page":"API Reference","title":"SDDP.InSampleMonteCarlo","text":"InSampleMonteCarlo(;\n max_depth::Int = 0,\n terminate_on_cycle::Function = false,\n terminate_on_dummy_leaf::Function = true,\n rollout_limit::Function = (i::Int) -> typemax(Int)\n)\n\nA Monte Carlo sampling scheme using the in-sample data from the policy graph definition.\n\nIf terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.\n\nNote that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.\n\nYou can use rollout_limit to set iteration specific depth limits. For example:\n\nInSampleMonteCarlo(rollout_limit = i -> 2 * i)\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.OutOfSampleMonteCarlo","page":"API Reference","title":"SDDP.OutOfSampleMonteCarlo","text":"OutOfSampleMonteCarlo(\n f::Function, graph::PolicyGraph;\n use_insample_transition::Bool = false,\n max_depth::Int = 0,\n terminate_on_cycle::Bool = false,\n terminate_on_dummy_leaf::Bool = true,\n rollout_limit::Function = i -> typemax(Int),\n)\n\nCreate a Monte Carlo sampler using out-of-sample probabilities and/or supports for the stagewise-independent noise terms, and out-of-sample probabilities for the node-transition matrix.\n\nf is a function that takes the name of a node and returns a tuple containing a vector of new SDDP.Noise terms for the children of that node, and a vector of new SDDP.Noise terms for the stagewise-independent noise.\n\nIf f is called with the name of the root node (e.g., 0 in a linear policy graph, (0, 1) in a Markovian Policy Graph), then return a vector of SDDP.Noise for the children of the root node.\n\nIf use_insample_transition, the in-sample transition probabilities will be used. Therefore, f should only return a vector of the stagewise-independent noise terms, and f will not be called for the root node.\n\nIf terminate_on_cycle, terminate the forward pass once a cycle is detected. If max_depth > 0, return once max_depth nodes have been sampled. If terminate_on_dummy_leaf, terminate the forward pass with 1 - probability of sampling a child node.\n\nNote that if terminate_on_cycle = false and terminate_on_dummy_leaf = false then max_depth must be set > 0.\n\nYou can use rollout_limit to set iteration specific depth limits. For example:\n\nOutOfSampleMonteCarlo(rollout_limit = i -> 2 * i)\n\nExample\n\n# Given linear policy graph `graph` with `T` stages:\nsampler = OutOfSampleMonteCarlo(graph) do node\n if node == 0\n return [SDDP.Noise(1, 1.0)]\n else\n noise_terms = [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]\n children = node < T ? [SDDP.Noise(node + 1, 0.9)] : SDDP.Noise{Int}[]\n return children, noise_terms\n end\nend\n\n# Given linear policy graph `graph` with `T` stages:\nsampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node\n return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)]\nend\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Parallel-schemes","page":"API Reference","title":"Parallel schemes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractParallelScheme\nSDDP.Serial\nSDDP.Asynchronous","category":"page"},{"location":"apireference/#SDDP.AbstractParallelScheme","page":"API Reference","title":"SDDP.AbstractParallelScheme","text":"AbstractParallelScheme\n\nAbstract type for different parallelism schemes.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Serial","page":"API Reference","title":"SDDP.Serial","text":"Serial()\n\nRun SDDP in serial mode.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.Asynchronous","page":"API Reference","title":"SDDP.Asynchronous","text":"Asynchronous(init_callback::Function, slave_pids::Vector{Int} = workers())\n\nRun SDDP in asynchronous mode workers with pid's slave_pids.\n\nAfter initializing the models on each worker, call init_callback(model). Note that init_callback is run locally on the worker and not on the master thread.\n\n\n\n\n\nAsynchronous(slave_pids::Vector{Int} = workers())\n\nRun SDDP in asynchronous mode workers with pid's slave_pids.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Forward-passes","page":"API Reference","title":"Forward passes","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractForwardPass\nSDDP.DefaultForwardPass\nSDDP.RevisitingForwardPass","category":"page"},{"location":"apireference/#SDDP.AbstractForwardPass","page":"API Reference","title":"SDDP.AbstractForwardPass","text":"AbstractForwardPass\n\nAbstract type for different forward passes.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.DefaultForwardPass","page":"API Reference","title":"SDDP.DefaultForwardPass","text":"DefaultForwardPass()\n\nThe default forward pass.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.RevisitingForwardPass","page":"API Reference","title":"SDDP.RevisitingForwardPass","text":"RevisitingForwardPass(\n period::Int = 500;\n sub_pass::AbstractForwardPass = DefaultForwardPass()\n)\n\nA forward pass scheme that generate period new forward passes (using sub_pass), then revisits all previously explored forward passes. This can be useful to encourage convergence at a diversity of points in the state-space.\n\nSet period = typemax(Int) to disable.\n\nFor example, if period = 2, then the forward passes will be revisited as follows: 1, 2, 1, 2, 3, 4, 1, 2, 3, 4, 5, 6, 1, 2, ....\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Risk-Measures","page":"API Reference","title":"Risk Measures","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractRiskMeasure\nSDDP.adjust_probability","category":"page"},{"location":"apireference/#SDDP.AbstractRiskMeasure","page":"API Reference","title":"SDDP.AbstractRiskMeasure","text":"AbstractRiskMeasure\n\nThe abstract type for the risk measure interface.\n\nYou need to define the following methods:\n\nSDDP.adjust_probability\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.adjust_probability","page":"API Reference","title":"SDDP.adjust_probability","text":"adjust_probability(measure::Expectation\n risk_adjusted_probability::Vector{Float64},\n original_probability::Vector{Float64},\n noise_support::Vector{Noise{T}},\n objective_realizations::Vector{Float64},\n is_minimization::Bool) where T\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Integrality-handlers","page":"API Reference","title":"Integrality handlers","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.AbstractIntegralityHandler\nSDDP.ContinuousRelaxation\nSDDP.SDDiP","category":"page"},{"location":"apireference/#SDDP.AbstractIntegralityHandler","page":"API Reference","title":"SDDP.AbstractIntegralityHandler","text":"AbstractIntegralityHandler\n\nThe abstract type for the integrality handlers interface.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.ContinuousRelaxation","page":"API Reference","title":"SDDP.ContinuousRelaxation","text":"ContinuousRelaxation()\n\nThe continuous relaxation integrality handler. Duals are obtained in the backward pass by solving a continuous relaxation of each subproblem. Integrality constraints are retained in policy simulation.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.SDDiP","page":"API Reference","title":"SDDP.SDDiP","text":"SDDiP(; iteration_limit::Int = 100, atol::Float64, rtol::Float64)\n\nThe SDDiP integrality handler introduced by Zou, J., Ahmed, S. & Sun, X.A. Math. Program. (2019) 175: 461. Stochastic dual dynamic integer programming. https://doi.org/10.1007/s10107-018-1249-5.\n\nCalculates duals by solving the Lagrangian dual for each subproblem. Kelley's method is used to compute Lagrange multipliers. iteration_limit controls the maximum number of iterations, and atol and rtol are the absolute and relative tolerances used in the termination criteria.\n\nAll state variables are assumed to take nonnegative values only.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Simulating-the-policy","page":"API Reference","title":"Simulating the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.simulate\nSDDP.calculate_bound\nSDDP.Historical","category":"page"},{"location":"apireference/#SDDP.simulate","page":"API Reference","title":"SDDP.simulate","text":"simulate(\n model::PolicyGraph,\n number_replications::Int = 1,\n variables::Vector{Symbol} = Symbol[];\n sampling_scheme::AbstractSamplingScheme =\n InSampleMonteCarlo(),\n custom_recorders = Dict{Symbol, Function}(),\n require_duals::Bool = true,\n skip_undefined_variables::Bool = false,\n parallel_scheme::AbstractParallelScheme = Serial(),\n incoming_state::Dict{String,Float64} = _intial_state(model),\n )::Vector{Vector{Dict{Symbol, Any}}}\n\nPerform a simulation of the policy model with number_replications replications using the sampling scheme sampling_scheme.\n\nUse incoming_state to pass an initial value of the state variable, if it differs from that at the root node. Each key should be the string name of the state variable.\n\nReturns a vector with one element for each replication. Each element is a vector with one-element for each node in the scenario that was sampled. Each element in that vector is a dictionary containing information about the subproblem that was solved.\n\nIn that dictionary there are four special keys:\n\n:node_index, which records the index of the sampled node in the policy model\n:noise_term, which records the noise observed at the node\n:stage_objective, which records the stage-objective of the subproblem\n:bellman_term, which records the cost/value-to-go of the node.\n\nThe sum of :stageobjective + :bellmanterm will equal the objective value of the solved subproblem.\n\nIn addition to the special keys, the dictionary will contain the result of JuMP.value(subproblem[key]) for each key in variables. This is useful to obtain the primal value of the state and control variables.\n\nFor more complicated data, the custom_recorders keyword argument can be used.\n\ndata = Dict{Symbol, Any}()\nfor (key, recorder) in custom_recorders\n data[key] = foo(subproblem)\nend\n\nFor example, to record the dual of a constraint named my_constraint, pass the following:\n\nsimulation_results = SDDP.simulate(model, 2;\n custom_recorders = Dict{Symbol, Function}(\n :constraint_dual => (sp) -> JuMP.dual(sp[:my_constraint])\n )\n)\n\nThe value of the dual in the first stage of the second replication can be accessed as:\n\nsimulation_results[2][1][:constraint_dual]\n\nIf you do not require dual variables (or if they are not available), pass require_duals = false.\n\nIf you attempt to simulate the value of a variable that is only defined in some of the stage problems, an error will be thrown. To over-ride this (and return a NaN instead), pass skip_undefined_variables = true.\n\nUse parallel_scheme::[AbstractParallelScheme](@ref) to specify a scheme for simulating in parallel. Defaults to Serial.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.calculate_bound","page":"API Reference","title":"SDDP.calculate_bound","text":"SDDP.calculate_bound(model::PolicyGraph, state::Dict{Symbol, Float64},\n risk_measure=Expectation())\n\nCalculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is risk_measure.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.Historical","page":"API Reference","title":"SDDP.Historical","text":"Historical(scenarios::Vector{Vector{Tuple{T, S}}},\n probability::Vector{Float64})\n\nA sampling scheme that samples a scenario from the vector of scenarios scenarios according to probability. If probability omitted, defaults to uniform probability.\n\nExample\n\nHistorical(\n [\n [(1, 0.5), (2, 1.0), (3, 0.5)],\n [(1, 0.5), (2, 0.0), (3, 1.0)],\n [(1, 1.0), (2, 0.0), (3, 0.0)]\n ],\n [0.2, 0.5, 0.3]\n)\n\n\n\n\n\nHistorical(scenario::Vector{Tuple{T, S}})\n\nA deterministic sampling scheme that always samples scenario with probability 1.\n\nExample\n\nHistorical([(1, 0.5), (2, 1.5), (3, 0.75)])\n\n\n\n\n\n","category":"type"},{"location":"apireference/#Decision-rules","page":"API Reference","title":"Decision rules","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.DecisionRule\nSDDP.evaluate","category":"page"},{"location":"apireference/#SDDP.DecisionRule","page":"API Reference","title":"SDDP.DecisionRule","text":"DecisionRule(model::PolicyGraph{T}; node::T)\n\nCreate a decision rule for node node in model.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.evaluate","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n rule::DecisionRule;\n incoming_state::Dict{Symbol, Float64},\n noise = nothing,\n controls_to_record = Symbol[],\n)\n\nEvalute the decision rule rule at the point described by the incoming_state and noise.\n\nIf the node is deterministic, omit the noise argument.\n\nPass a list of symbols to controls_to_record to save the optimal primal solution corresponding to the names registered in the model.\n\n\n\n\n\nevaluate(\n V::ValueFunction,\n point::Dict{Symbol, Float64},\n objective_state = nothing,\n belief_state = nothing\n)\n\nEvaluate the value function V at point in the state-space.\n\nReturns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.\n\n\n\n\n\nevalute(V::ValueFunction{Nothing, Nothing}; kwargs...)\n\nEvalute the value function V at the point in the state-space specified by kwargs.\n\nExample\n\nevaluate(V; volume = 1)\n\n\n\n\n\nevaluate(\n model::PolicyGraph{T}, test_scenarios::TestScenarios{T, S}\n) where {T, S}\n\nEvaluate the performance of the policy contained in model after a call to train on the scenarios specified by test_scenarios.\n\nExample\n\nmodel, test_scenarios = read_from_file(\"my_model.sof.json\")\ntrain(model; iteration_limit = 100)\nsimulations = evaluate(model, test_scenarios)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Visualizing-the-policy","page":"API Reference","title":"Visualizing the policy","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.SpaghettiPlot\nSDDP.add_spaghetti\nSDDP.publication_plot\nSDDP.ValueFunction\nSDDP.evaluate(::SDDP.ValueFunction, ::Dict{Symbol,Float64})\nSDDP.plot","category":"page"},{"location":"apireference/#SDDP.SpaghettiPlot","page":"API Reference","title":"SDDP.SpaghettiPlot","text":"SDDP.SpaghettiPlot(; stages, scenarios)\n\nInitialize a new SpaghettiPlot with stages stages and scenarios number of replications.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.add_spaghetti","page":"API Reference","title":"SDDP.add_spaghetti","text":"SDDP.add_spaghetti(data_function::Function, plt::SpaghettiPlot; kwargs...)\n\nDescription\n\nAdd a new figure to the SpaghettiPlot plt, where the y-value of the scenarioth line when x = stage is given by data_function(plt.simulations[scenario][stage]).\n\nKeyword arguments\n\nxlabel: set the xaxis label\nylabel: set the yaxis label\ntitle: set the title of the plot\nymin: set the minimum y value\nymax: set the maximum y value\ncumulative: plot the additive accumulation of the value across the stages\ninterpolate: interpolation method for lines between stages.\n\nDefaults to \"linear\" see the d3 docs \tfor all options.\n\nExamples\n\nsimulations = simulate(model, 10)\nplt = SDDP.spaghetti_plot(simulations)\nSDDP.add_spaghetti(plt; title = \"Stage objective\") do data\n\treturn data[:stage_objective]\nend\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.publication_plot","page":"API Reference","title":"SDDP.publication_plot","text":"SDDP.publication_plot(\n data_function, simulations;\n quantile = [0.0, 0.1, 0.25, 0.5, 0.75, 0.9, 1.0],\n kwargs...)\n\nCreate a Plots.jl recipe plot of the simulations.\n\nSee Plots.jl for the list of keyword arguments.\n\nExample\n\nSDDP.publication_plot(simulations; title = \"My title\") do data\n return data[:stage_objective]\nend\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.ValueFunction","page":"API Reference","title":"SDDP.ValueFunction","text":"ValueFunction\n\nA representation of the value function. SDDP.jl uses the following unique representation of the value function that is undocumented in the literature.\n\nIt supports three types of state variables:\n\nx - convex \"resource\" states\nb - concave \"belief\" states\ny - concave \"objective\" states\n\nIn addition, we have three types of cuts:\n\nSingle-cuts (also called \"average\" cuts in the literature), which involve the risk-adjusted expectation of the cost-to-go.\nMulti-cuts, which use a different cost-to-go term for each realization w.\nRisk-cuts, which correspond to the facets of the dual interpretation of a coherent risk measure.\n\nTherefore, ValueFunction returns a JuMP model of the following form:\n\nV(x, b, y) = min: μᵀb + νᵀy + θ\n s.t. # \"Single\" / \"Average\" cuts\n μᵀb(j) + νᵀy(j) + θ >= α(j) + xᵀβ(j), ∀ j ∈ J\n # \"Multi\" cuts\n μᵀb(k) + νᵀy(k) + φ(w) >= α(k, w) + xᵀβ(k, w), ∀w ∈ Ω, k ∈ K\n # \"Risk-set\" cuts\n θ ≥ Σ{p(k, w) * φ(w)}_w - μᵀb(k) - νᵀy(k), ∀ k ∈ K\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.evaluate-Tuple{SDDP.ValueFunction,Dict{Symbol,Float64}}","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n V::ValueFunction,\n point::Dict{Symbol, Float64},\n objective_state = nothing,\n belief_state = nothing\n)\n\nEvaluate the value function V at point in the state-space.\n\nReturns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables.\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.plot","page":"API Reference","title":"SDDP.plot","text":"plot(plt::SpaghettiPlot[, filename::String]; open::Bool = true)\n\nThe SpaghettiPlot plot plt to filename. If filename is not given, it will be saved to a temporary directory. If open = true, then a browser window will be opened to display the resulting HTML file.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Debugging-the-model","page":"API Reference","title":"Debugging the model","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.write_subproblem_to_file\nSDDP.deterministic_equivalent","category":"page"},{"location":"apireference/#SDDP.write_subproblem_to_file","page":"API Reference","title":"SDDP.write_subproblem_to_file","text":"write_subproblem_to_file(node::Node, filename::String; throw_error::Bool = false)\n\nWrite the subproblem contained in node to the file filename.\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.deterministic_equivalent","page":"API Reference","title":"SDDP.deterministic_equivalent","text":"deterministic_equivalent(\n pg::PolicyGraph{T},\n optimizer = nothing;\n time_limit::Union{Real, Nothing} = 60.0\n)\n\nForm a JuMP model that represents the deterministic equivalent of the problem.\n\nExamples\n\ndeterministic_equivalent(model)\ndeterministic_equivalent(model, GLPK.Optimizer)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#StochOptFormat","page":"API Reference","title":"StochOptFormat","text":"","category":"section"},{"location":"apireference/","page":"API Reference","title":"API Reference","text":"SDDP.write_to_file\nSDDP.read_from_file\nBase.write(::IO, ::SDDP.PolicyGraph)\nBase.read(::IO, ::Type{SDDP.PolicyGraph})\nSDDP.evaluate(::SDDP.PolicyGraph{T}, ::SDDP.TestScenarios{T}) where {T}\nSDDP.TestScenarios\nSDDP.TestScenario","category":"page"},{"location":"apireference/#SDDP.write_to_file","page":"API Reference","title":"SDDP.write_to_file","text":"write_to_file(\n model::PolicyGraph,\n filename::String;\n compression::MOI.FileFormats.AbstractCompressionScheme =\n MOI.FileFormats.AutomaticCompression(),\n kwargs...\n)\n\nWrite model to filename in the StochOptFormat file format.\n\nPass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.\n\nSee Base.write(::IO, ::PolicyGraph) for information on the keyword arguments that can be provided.\n\nWARNING: THIS FUNCTION IS EXPERIMENTAL. SEE THE FULL WARNING IN Base.write(::IO, ::PolicyGraph).\n\nExample\n\nwrite_to_file(model, \"my_model.sof.json\"; test_scenarios = 10)\n\n\n\n\n\n","category":"function"},{"location":"apireference/#SDDP.read_from_file","page":"API Reference","title":"SDDP.read_from_file","text":"read_from_file(\n filename::String;\n compression::MOI.FileFormats.AbstractCompressionScheme =\n MOI.FileFormats.AutomaticCompression(),\n kwargs...\n)::Tuple{PolicyGraph, TestScenarios}\n\nReturn a tuple containing a PolicyGraph object and a TestScenarios read from filename in the StochOptFormat file format.\n\nPass an argument to compression to override the default of automatically detecting the file compression to use based on the extension of filename.\n\nSee Base.read(::IO, ::Type{PolicyGraph}) for information on the keyword arguments that can be provided.\n\nWARNING: THIS FUNCTION IS EXPERIMENTAL. SEE THE FULL WARNING IN Base.read(::IO, ::Type{PolicyGraph}).\n\nExample\n\nmodel, test_scenarios = read_from_file(\"my_model.sof.json\")\n\n\n\n\n\n","category":"function"},{"location":"apireference/#Base.write-Tuple{IO,SDDP.PolicyGraph}","page":"API Reference","title":"Base.write","text":"Base.write(\n io::IO,\n model::PolicyGraph;\n test_scenarios::Union{Int, TestScenarios} = 1_000,\n kwargs...\n)\n\nWrite model to io in the StochOptFormat file format.\n\nPass an Int to test_scenarios (default 1_000) to specify the number of test scenarios to generate using the InSampleMonteCarlo sampling scheme. Alternatively, pass a TestScenarios object to manually specify the test scenarios to use.\n\nAny additional kwargs passed to write will be stored in the top-level of the resulting StochOptFormat file. Valid arguments include name, author, date, and description.\n\nWARNING: THIS FUNCTION IS EXPERIMENTAL. THINGS MAY CHANGE BETWEEN COMMITS. YOU SHOULD NOT RELY ON THIS FUNCTIONALITY AS A LONG-TERM FILE FORMAT (YET).\n\nIn addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:\n\nJuMP.fix\nJuMP.set_lower_bound\nJuMP.set_upper_bound\nJuMP.set_normalized_rhs\nChanges to the constant or affine terms in a stage objective\n\nIf your model uses something other than this, this function will silently write an incorrect formulation of the problem.\n\nExample\n\nopen(\"my_model.sof.json\", \"w\") do io\n write(\n io,\n model;\n test_scenarios = 10,\n name = \"MyModel\",\n author = \"@odow\",\n date = \"2020-07-20\",\n description = \"Example problem for the SDDP.jl documentation\",\n )\nend\n\n\n\n\n\n","category":"method"},{"location":"apireference/#Base.read-Tuple{IO,Type{SDDP.PolicyGraph}}","page":"API Reference","title":"Base.read","text":"Base.read(\n io::IO,\n ::Type{PolicyGraph};\n bound::Float64 = 1e6,\n)::Tuple{PolicyGraph, TestScenarios}\n\nReturn a tuple containing a PolicyGraph object and a TestScenarios read from io in the StochOptFormat file format.\n\nSee also: evaluate.\n\nWARNING: THIS FUNCTION IS EXPERIMENTAL. THINGS MAY CHANGE BETWEEN COMMITS. YOU SHOULD NOT RELY ON THIS FUNCTIONALITY AS A LONG-TERM FILE FORMAT (YET).\n\nIn addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include:\n\nAdditive random variables in the constraints or in the objective\nMultiplicative random variables in the objective\n\nIf your model uses something other than this, this function may throw an error or silently build a non-convex model.\n\nExample\n\nopen(\"my_model.sof.json\", \"r\") do io\n model, test_scenarios = read(io, PolicyGraph)\nend\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.evaluate-Union{Tuple{T}, Tuple{PolicyGraph{T},TestScenarios{T,S} where S}} where T","page":"API Reference","title":"SDDP.evaluate","text":"evaluate(\n model::PolicyGraph{T}, test_scenarios::TestScenarios{T, S}\n) where {T, S}\n\nEvaluate the performance of the policy contained in model after a call to train on the scenarios specified by test_scenarios.\n\nExample\n\nmodel, test_scenarios = read_from_file(\"my_model.sof.json\")\ntrain(model; iteration_limit = 100)\nsimulations = evaluate(model, test_scenarios)\n\n\n\n\n\n","category":"method"},{"location":"apireference/#SDDP.TestScenarios","page":"API Reference","title":"SDDP.TestScenarios","text":"TestScenarios{T, S}(scenarios::Vector{TestScenario{T, S}})\n\nAn AbstractSamplingScheme based on a vector of scenarios.\n\nEach scenario is a vector of Tuple{T, S} where the first element is the node to visit and the second element is the realization of the stagewise-independent noise term. Pass nothing if the node is deterministic.\n\n\n\n\n\n","category":"type"},{"location":"apireference/#SDDP.TestScenario","page":"API Reference","title":"SDDP.TestScenario","text":"TestScenario{T, S}(probability::Float64, scenario::Vector{Tuple{T, S}})\n\nA single scenario for testing.\n\nSee also: TestScenarios.\n\n\n\n\n\n","category":"type"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/FAST_hydro_thermal.jl\"","category":"page"},{"location":"examples/FAST_hydro_thermal/#FAST:-the-hydro-thermal-problem","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"","category":"section"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"An implementation of the Hydro-thermal example from FAST","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"using SDDP, GLPK, Test\n\nfunction fast_hydro_thermal()\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(2),\n bellman_function = SDDP.BellmanFunction(lower_bound = 0.0),\n optimizer = GLPK.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x <= 8, SDDP.State, initial_value = 0.0)\n @variables(sp, begin\n y >= 0\n p >= 0\n ξ\n end)\n @constraints(sp, begin\n p + y >= 6\n x.out <= x.in - y + ξ\n end)\n RAINFALL = (t == 1 ? [6] : [2, 10])\n SDDP.parameterize(sp, RAINFALL) do ω\n JuMP.fix(ξ, ω)\n end\n @stageobjective(sp, 5 * p)\n end\n\n det = SDDP.deterministic_equivalent(model, GLPK.Optimizer)\n JuMP.optimize!(det)\n @test JuMP.objective_value(det) == 10\n\n SDDP.train(model, iteration_limit = 10, log_frequency = 5)\n @test SDDP.calculate_bound(model) == 10\n return\nend\n\nfast_hydro_thermal()","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 2\n State variables : 1\n Scenarios : 2.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 5e+00]\n Non-zero Bounds range [8e+00, 8e+00]\n Non-zero RHS range [6e+00, 6e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 5 2.000000e+01 1.000000e+01 1.373608e-01 1 25\n 10 0.000000e+00 1.000000e+01 1.384289e-01 1 50\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"","category":"page"},{"location":"examples/FAST_hydro_thermal/","page":"FAST: the hydro-thermal problem","title":"FAST: the hydro-thermal problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/StochDynamicProgramming.jl_multistock.jl\"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/#StochDynamicProgramming:-the-multistock-problem","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"","category":"section"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"This example comes from StochDynamicProgramming.jl.","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"using SDDP, GLPK, Test\n\nfunction test_multistock_example()\n model = SDDP.LinearPolicyGraph(\n stages = 5,\n lower_bound = -5.0,\n optimizer = GLPK.Optimizer,\n ) do subproblem, stage\n @variable(subproblem, 0 <= stock[i = 1:3] <= 1, SDDP.State, initial_value = 0.5)\n @variables(subproblem, begin\n 0 <= control[i = 1:3] <= 0.5\n ξ[i = 1:3] # Dummy for RHS noise.\n end)\n @constraints(\n subproblem,\n begin\n sum(control) - 0.5 * 3 <= 0\n [i = 1:3], stock[i].out == stock[i].in + control[i] - ξ[i]\n end\n )\n Ξ = collect(Base.product((0.0, 0.15, 0.3), (0.0, 0.15, 0.3), (0.0, 0.15, 0.3)))[:]\n SDDP.parameterize(subproblem, Ξ) do ω\n JuMP.fix.(ξ, ω)\n end\n @stageobjective(subproblem, (sin(3 * stage) - 1) * sum(control))\n end\n SDDP.train(\n model,\n iteration_limit = 100,\n cut_type = SDDP.SINGLE_CUT,\n log_frequency = 10,\n )\n @test SDDP.calculate_bound(model) ≈ -4.349 atol = 0.01\n\n simulation_results = SDDP.simulate(model, 5000)\n @test length(simulation_results) == 5000\n μ = SDDP.Statistics.mean(\n sum(data[:stage_objective] for data in simulation)\n for simulation in simulation_results\n )\n @test μ ≈ -4.349 atol = 0.1\n return\nend\n\ntest_multistock_example()","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 5\n State variables : 3\n Scenarios : 1.43489e+07\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [3e-01, 2e+00]\n Non-zero Bounds range [5e-01, 5e+00]\n Non-zero RHS range [2e+00, 2e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 -4.309634e+00 -4.439095e+00 2.057929e-01 1 1400\n 20 -4.322989e+00 -4.397822e+00 2.559309e-01 1 2800\n 30 -4.523959e+00 -4.381545e+00 3.303719e-01 1 4200\n 40 -4.660005e+00 -4.374287e+00 4.145219e-01 1 5600\n 50 -4.185415e+00 -4.366846e+00 4.963410e-01 1 7000\n 60 -4.202727e+00 -4.362645e+00 5.962589e-01 1 8400\n 70 -4.285215e+00 -4.359083e+00 6.876440e-01 1 9800\n 80 -4.494438e+00 -4.355947e+00 7.982230e-01 1 11200\n 90 -4.716859e+00 -4.352416e+00 9.042630e-01 1 12600\n 100 -4.921763e+00 -4.350669e+00 1.041511e+00 1 14000\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"","category":"page"},{"location":"examples/StochDynamicProgramming.jl_multistock/","page":"StochDynamicProgramming: the multistock problem","title":"StochDynamicProgramming: the multistock problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/12_belief_states.jl\"","category":"page"},{"location":"tutorial/12_belief_states/#Advanced-II:-belief-states","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"","category":"section"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"SDDP.jl includes an implementation of the algorithm described in Dowson, O., Morton, D.P., & Pagnoncelli, B. (2019). Partially observable multistage stochastic programming. [link]","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"Proper documentation will be forthcoming.","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"In the mean-time, here is most of what you need to know.","category":"page"},{"location":"tutorial/12_belief_states/#Defining-the-ambiguity-partition","page":"Advanced II: belief states","title":"Defining the ambiguity partition","text":"","category":"section"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"Given a SDDP.Graph object (see Create a general policy graph for details), we can define the ambiguity partition using SDDP.add_ambiguity_set.","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"For example, first we create a Markovian graph:","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"using SDDP\n\nG = SDDP.MarkovianGraph([[0.5 0.5], [0.2 0.8; 0.8 0.2]])","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"Root\n (0, 1)\nNodes\n (1, 1)\n (1, 2)\n (2, 1)\n (2, 2)\nArcs\n (0, 1) => (1, 1) w.p. 0.5\n (0, 1) => (1, 2) w.p. 0.5\n (1, 1) => (2, 1) w.p. 0.2\n (1, 1) => (2, 2) w.p. 0.8\n (1, 2) => (2, 1) w.p. 0.8\n (1, 2) => (2, 2) w.p. 0.2\n","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"Then we add an ambiguity set over the nodes in the first stage:","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"SDDP.add_ambiguity_set(G, [(1, 1), (1, 2)])","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"Then we add an ambiguity set over the nodes in the second stage:","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"SDDP.add_ambiguity_set(G, [(2, 1), (2, 2)])","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"This results in the graph:","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"G","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"Root\n (0, 1)\nNodes\n (1, 1)\n (1, 2)\n (2, 1)\n (2, 2)\nArcs\n (0, 1) => (1, 1) w.p. 0.5\n (0, 1) => (1, 2) w.p. 0.5\n (1, 1) => (2, 1) w.p. 0.2\n (1, 1) => (2, 2) w.p. 0.8\n (1, 2) => (2, 1) w.p. 0.8\n (1, 2) => (2, 2) w.p. 0.2\nPartition\n {\n (1, 1)\n (1, 2)\n } {\n (2, 1)\n (2, 2)\n }\n","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"","category":"page"},{"location":"tutorial/12_belief_states/","page":"Advanced II: belief states","title":"Advanced II: belief states","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/the_farmers_problem.jl\"","category":"page"},{"location":"examples/the_farmers_problem/#The-farmer's-problem","page":"The farmer's problem","title":"The farmer's problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"This problem is taken from Section 1.1 of the book Birge, J. R., & Louveaux, F. (2011). Introduction to Stochastic Programming. New York, NY: Springer New York. Paragraphs in quotes are taken verbatim.","category":"page"},{"location":"examples/the_farmers_problem/#Problem-description","page":"The farmer's problem","title":"Problem description","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Consider a European farmer who specializes in raising wheat, corn, and sugar beets on his 500 acres of land. During the winter, [they want] to decide how much land to devote to each crop.The farmer knows that at least 200 tons (T) of wheat and 240 T of corn are needed for cattle feed. These amounts can be raised on the farm or bought from a wholesaler. Any production in excess of the feeding requirement would be sold.Over the last decade, mean selling prices have been \\$170 and \\$150 per ton of wheat and corn, respectively. The purchase prices are 40% more than this due to the wholesaler’s margin and transportation costs.Another profitable crop is sugar beet, which [they expect] to sell at \\$36/T; however, the European Commission imposes a quota on sugar beet production. Any amount in excess of the quota can be sold only at \\$10/T. The farmer’s quota for next year is 6000 T.\"Based on past experience, the farmer knows that the mean yield on [their] land is roughly 2.5 T, 3 T, and 20 T per acre for wheat, corn, and sugar beets, respectively.[To introduce uncertainty,] assume some correlation among the yields of the different crops. A very simplified representation of this would be to assume that years are good, fair, or bad for all crops, resulting in above average, average, or below average yields for all crops. To fix these ideas, above and below average indicate a yield 20% above or below the mean yield.","category":"page"},{"location":"examples/the_farmers_problem/#Problem-data","page":"The farmer's problem","title":"Problem data","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The area of the farm.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MAX_AREA = 500.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"500.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"There are three crops:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"CROPS = [:wheat, :corn, :sugar_beet]","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"3-element Array{Symbol,1}:\n :wheat \n :corn \n :sugar_beet","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Each of the crops has a different planting cost (\\$/acre).","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"PLANTING_COST = Dict(\n :wheat => 150.0,\n :corn => 230.0,\n :sugar_beet => 260.0\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :wheat => 150.0\n :sugar_beet => 260.0\n :corn => 230.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The farmer requires a minimum quantity of wheat and corn, but not of sugar beet (tonnes).","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MIN_QUANTITIES = Dict(\n :wheat => 200.0,\n :corn => 240.0,\n :sugar_beet => 0.0\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :wheat => 200.0\n :sugar_beet => 0.0\n :corn => 240.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"In Europe, there is a quota system for producing crops. The farmer owns the following quota for each crop (tonnes):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"QUOTA_MAX = Dict(\n :wheat => Inf,\n :corn => Inf,\n :sugar_beet => 6_000.0\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :wheat => Inf\n :sugar_beet => 6000.0\n :corn => Inf","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The farmer can sell crops produced under the quota for the following amounts (\\$/tonne):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SELL_IN_QUOTA = Dict(\n :wheat => 170.0,\n :corn => 150.0,\n :sugar_beet => 36.0\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :wheat => 170.0\n :sugar_beet => 36.0\n :corn => 150.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"If they sell more than their alloted quota, the farmer earns the following on each tonne of crop above the quota (\\$/tonne):","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SELL_NO_QUOTA = Dict(\n :wheat => 0.0,\n :corn => 0.0,\n :sugar_beet => 10.0\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :wheat => 0.0\n :sugar_beet => 10.0\n :corn => 0.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The purchase prices for wheat and corn are 40% more than their sales price. However, the description does not address the purchase price of sugar beet. Therefore, we use a large value of \\$1,000/tonne.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"BUY_PRICE = Dict(\n :wheat => 238.0,\n :corn => 210.0,\n :sugar_beet => 1_000.0\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :wheat => 238.0\n :sugar_beet => 1000.0\n :corn => 210.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"On average, each crop has the following yield in tonnes/acre:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"MEAN_YIELD = Dict(\n :wheat => 2.5,\n :corn => 3.0,\n :sugar_beet => 20.0\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :wheat => 2.5\n :sugar_beet => 20.0\n :corn => 3.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"However, the yield is random. In good years, the yield is +20% above average, and in bad years, the yield is -20% below average.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"YIELD_MULTIPLIER = Dict(\n :good => 1.2,\n :fair => 1.0,\n :bad => 0.8\n)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Dict{Symbol,Float64} with 3 entries:\n :bad => 0.8\n :good => 1.2\n :fair => 1.0","category":"page"},{"location":"examples/the_farmers_problem/#Mathematical-formulation","page":"The farmer's problem","title":"Mathematical formulation","text":"","category":"section"},{"location":"examples/the_farmers_problem/#SDDP.jl-code","page":"The farmer's problem","title":"SDDP.jl code","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"note: Note\nIn what follows, we make heavy use of the fact that you can look up variables by their symbol name in a JuMP model as follows:@variable(model, x)\nmodel[:x]Read the JuMP documentation if this isn't familiar to you.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First up, load SDDP.jl and a solver. For this example, we use GLPK.jl.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"using SDDP, GLPK","category":"page"},{"location":"examples/the_farmers_problem/#State-variables","page":"The farmer's problem","title":"State variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"State variables are the information that flows between stages. In our example, the state variables are the areas of land devoted to growing each crop.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function add_state_variables(subproblem)\n @variable(subproblem, area[c = CROPS] >= 0, SDDP.State, initial_value=0)\nend","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"add_state_variables (generic function with 1 method)","category":"page"},{"location":"examples/the_farmers_problem/#First-stage-problem","page":"The farmer's problem","title":"First stage problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We can only plant a maximum of 500 acres, and we want to minimize the planting cost","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function create_first_stage_problem(subproblem)\n @constraint(subproblem,\n sum(subproblem[:area][c].out for c in CROPS) <= MAX_AREA)\n @stageobjective(subproblem,\n -sum(PLANTING_COST[c] * subproblem[:area][c].out for c in CROPS))\nend","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"create_first_stage_problem (generic function with 1 method)","category":"page"},{"location":"examples/the_farmers_problem/#Second-stage-problem","page":"The farmer's problem","title":"Second stage problem","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now let's consider the second stage problem. This is more complicated than the first stage, so we've broken it down into four sections:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"control variables\nconstraints\nthe objective\nthe uncertainty","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First, let's add the second stage control variables.","category":"page"},{"location":"examples/the_farmers_problem/#Variables","page":"The farmer's problem","title":"Variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We add four types of control variables. Technically, the yield isn't a control variable. However, we add it as a dummy \"helper\" variable because it will be used when we add uncertainty.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_variables(subproblem)\n @variables(subproblem, begin\n 0 <= yield[c=CROPS] # tonnes/acre\n 0 <= buy[c=CROPS] # tonnes\n 0 <= sell_in_quota[c=CROPS] <= QUOTA_MAX[c] # tonnes\n 0 <= sell_no_quota[c=CROPS] # tonnes\n end)\nend","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"second_stage_variables (generic function with 1 method)","category":"page"},{"location":"examples/the_farmers_problem/#Constraints","page":"The farmer's problem","title":"Constraints","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"We need to define is the minimum quantity constraint. This ensures that MIN_QUANTITIES[c] of each crop is produced.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_constraint_min_quantity(subproblem)\n @constraint(subproblem, [c=CROPS],\n subproblem[:yield][c] + subproblem[:buy][c] -\n subproblem[:sell_in_quota][c] - subproblem[:sell_no_quota][c] >=\n MIN_QUANTITIES[c])\nend","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"second_stage_constraint_min_quantity (generic function with 1 method)","category":"page"},{"location":"examples/the_farmers_problem/#Objective","page":"The farmer's problem","title":"Objective","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"The objective of the second stage is to maximise revenue from selling crops, less the cost of buying corn and wheat if necessary to meet the minimum quantity constraint.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_objective(subproblem)\n @stageobjective(subproblem,\n sum(\n SELL_IN_QUOTA[c] * subproblem[:sell_in_quota][c] +\n SELL_NO_QUOTA[c] * subproblem[:sell_no_quota][c] -\n BUY_PRICE[c] * subproblem[:buy][c]\n for c in CROPS)\n )\nend","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"second_stage_objective (generic function with 1 method)","category":"page"},{"location":"examples/the_farmers_problem/#Random-variables","page":"The farmer's problem","title":"Random variables","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Then, in the SDDP.parameterize function, we set the coefficient using JuMP.set_normalized_coefficient.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"function second_stage_uncertainty(subproblem)\n @constraint(subproblem, uncertainty[c=CROPS],\n 1.0 * subproblem[:area][c].in == subproblem[:yield][c])\n SDDP.parameterize(subproblem, [:good, :fair, :bad]) do ω\n for c in CROPS\n JuMP.set_normalized_coefficient(\n uncertainty[c],\n subproblem[:area][c].in,\n MEAN_YIELD[c] * YIELD_MULTIPLIER[ω]\n )\n end\n end\nend","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"second_stage_uncertainty (generic function with 1 method)","category":"page"},{"location":"examples/the_farmers_problem/#Putting-it-all-together","page":"The farmer's problem","title":"Putting it all together","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now we're ready to build the multistage stochastic programming model. In addition to the things already discussed, we need a few extra pieces of information.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"First, we are maximizing, so we set sense = :Max. Second, we need to provide a valid upper bound. (See Choosing an initial bound for more on this.) We know from Birge and Louveaux that the optimal solution is \\$108,390. So, let's choose \\$500,000 just to be safe.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Here is the full model.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"model = SDDP.LinearPolicyGraph(\n stages = 2,\n sense = :Max,\n upper_bound = 500_000.0,\n optimizer = GLPK.Optimizer,\n direct_mode = false\n ) do subproblem, stage\n add_state_variables(subproblem)\n if stage == 1\n create_first_stage_problem(subproblem)\n else\n second_stage_variables(subproblem)\n second_stage_constraint_min_quantity(subproblem)\n second_stage_uncertainty(subproblem)\n second_stage_objective(subproblem)\n end\nend","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"A policy graph with 2 nodes.\n Node indices: 1, 2\n","category":"page"},{"location":"examples/the_farmers_problem/#Training-a-policy","page":"The farmer's problem","title":"Training a policy","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Now that we've built a model, we need to train it using SDDP.train. The keyword iteration_limit stops the training after 20 iterations. See Choose a stopping rule for other ways to stop the training.","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"SDDP.train(model; iteration_limit = 20)","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 2\n State variables : 3\n Scenarios : 3.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 2e+01]\n Non-zero Objective range [1e+00, 1e+03]\n Non-zero Bounds range [6e+03, Inf]\n Non-zero RHS range [2e+02, 5e+02]\nWARNING: numerical stability issues detected\n - Bounds range contains large coefficients\nVery large or small absolute values of coefficients\ncan cause numerical stability issues. Consider\nreformulating the model.\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 -9.800000e+04 1.320000e+05 1.399040e-03 1 6\n 2 4.800000e+04 1.290000e+05 1.778126e-03 1 12\n 3 1.086000e+05 1.268139e+05 2.170086e-03 1 18\n 4 8.163785e+04 1.219636e+05 2.536058e-03 1 24\n 5 1.550000e+05 1.121210e+05 2.890110e-03 1 30\n 6 1.121210e+05 1.104195e+05 3.273964e-03 1 36\n 7 1.047405e+05 1.095042e+05 3.637075e-03 1 42\n 8 5.125161e+04 1.088611e+05 3.978968e-03 1 48\n 9 1.088611e+05 1.088021e+05 4.387140e-03 1 54\n 10 1.093355e+05 1.084962e+05 4.873037e-03 1 60\n 11 4.823846e+04 1.083900e+05 5.429983e-03 1 66\n 12 1.093500e+05 1.083900e+05 5.961180e-03 1 72\n 13 1.093500e+05 1.083900e+05 6.410122e-03 1 78\n 14 1.670000e+05 1.083900e+05 6.814003e-03 1 84\n 15 1.093500e+05 1.083900e+05 7.220984e-03 1 90\n 16 4.882000e+04 1.083900e+05 7.596016e-03 1 96\n 17 4.882000e+04 1.083900e+05 7.940054e-03 1 102\n 18 1.093500e+05 1.083900e+05 8.342028e-03 1 108\n 19 1.093500e+05 1.083900e+05 8.748055e-03 1 114\n 20 4.882000e+04 1.083900e+05 9.086132e-03 1 120\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/the_farmers_problem/#Checking-the-policy","page":"The farmer's problem","title":"Checking the policy","text":"","category":"section"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"Birge and Louveaux report that the optimal objective value is \\$108,390. Check that we got the correct solution using SDDP.calculate_bound:","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"@assert SDDP.calculate_bound(model) == 108_390.0","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"","category":"page"},{"location":"examples/the_farmers_problem/","page":"The farmer's problem","title":"The farmer's problem","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#Upgrade-from-the-old-SDDP.jl","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"SDDP.jl under went a major re-write to be compatible with JuMP v0.19 and Julia v1.0.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"Some of the highlights of the new release include","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"Support for \"multi-cut\"s\nSupport for stagewise-independent noise in the constraint matrix\nA way to simulate out-of-sample realizations of the stagewise-independent noise terms\nExtensible ways to get information such as dual variables out of a simulation\nSupport for infinite horizon multistage stochastic programs\nExtensible stopping rules\nExtensible sampling schemes\nImproved cut selection routines\nBetter checks for numerical issues\nA much tidier (and simpler) implementation, with ample commenting throughout the code base","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#Syntax-changes","page":"Upgrade from the old SDDP.jl","title":"Syntax changes","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"The complete re-write has resulted in a painful upgrading process as users must simultaneously upgrade from Julia 0.6 to Julia 1.0, from JuMP 0.18 to JuMP 0.19, and from the old syntax of SDDP.jl to the new. In this section, we outline some of the larger changes. For more information, we recommend reading the updated tutorials, beginning with Basic I: first steps","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#SDDPModel","page":"Upgrade from the old SDDP.jl","title":"SDDPModel","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"SDDPModel has been replaced in favor of a more general approach to formulating multistage stochastic optimization problems.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"For basic use-cases, SDDPModel has been replaced by LinearPolicyGraph.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"model = SDDPModel(stages = 3) do subproblem, t\n # subproblem definition.\nend\n\n# becomes\n\nmodel = SDDP.LinearPolicyGraph(stages = 3) do subproblem, t\n # subproblem definition.\nend","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"If you used the markov_transition feature, SDDPModel has been replaced by MarkovianPolicyGraph.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"model = SDDPModel(\n markov_transition = Array{Float64, 2}[\n [ 1.0 ]',\n [ 0.75 0.25 ],\n [ 0.75 0.25 ; 0.25 0.75 ]\n ]) do subproblem, t, i\n # subproblem definition.\nend\n\n# becomes\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64, 2}[\n [ 1.0 ]',\n [ 0.75 0.25 ],\n [ 0.75 0.25 ; 0.25 0.75 ]\n ]) do subproblem, node\n t, i = node\n # subproblem definition.\nend","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#solver","page":"Upgrade from the old SDDP.jl","title":"solver =","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"JuMP 0.19 changed the way that solvers are passed to JuMP models.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"SDDPModel(solver = GurobiSolver(OutputFlag = 0))\n\n# becomes\n\nLinearPolicyGraph(optimizer = with_optimizer(Gurobi.Optimizer, OutputFlag = 0))","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#@state","page":"Upgrade from the old SDDP.jl","title":"@state","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"We changed how to specify state variables.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"@state(subproblem, 0 <= x <= 1, x0==2)\n\n# becomes\n\n@variable(subproblem, 0 <= x <= 1, SDDP.State, initial_value = 2)","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"In addition, instead of having to create an incoming state x0 and an outgoing state x, we now refer to x.in and x.out. Here is another example:","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"@state(subproblem, 0 <= x[i=1:2] <= 1, x0==i)\n\n# becomes\n\n@variable(subproblem, 0 <= x[i=1:2] <= 1, SDDP.State, initial_value = i)","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"x0[1]\n\n# becomes\n\nx[1].in","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"x[2]\n\n# becomes\n\nx[2].out","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#@rhsnoise","page":"Upgrade from the old SDDP.jl","title":"@rhsnoise","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"We removed @rhsnoise. This results in more lines of code, but more flexibility.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"@rhsnoise(subproblem, ω = [1, 2, 3], 2x <= ω)\nsetnoiseprobability!(subproblem, [0.5, 0.2, 0.3])\n\n# becomes\n\n@variable(subproblem, ω)\n@constraint(subproblem, 2x <= ω)\nSDDP.parameterize(subproblem, [1, 2, 3], [0.5, 0.2, 0.3]) do ϕ\n JuMP.fix(ω, ϕ)\nend","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"If you had multiple @rhsnoise constraints, use:","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"@rhsnoise(subproblem, ω = [1, 2, 3], 2x <= ω)\n@rhsnoise(subproblem, ω = [4, 5, 6], y >= 3ω)\nsetnoiseprobability!(subproblem, [0.5, 0.2, 0.3])\n\n# becomes\n\n@variable(subproblem, ω[1:2])\n@constraint(subproblem, 2x <= ω[1])\n@constraint(subproblem, y >= 3 * ω[2])\nSDDP.parameterize(subproblem, [(1, 4), (2, 5), (3, 6)], [0.5, 0.2, 0.3]) do ϕ\n JuMP.fix(ω[1], ϕ[1])\n JuMP.fix(ω[2], ϕ[2])\nend","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#@stageobjective","page":"Upgrade from the old SDDP.jl","title":"@stageobjective","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"@stageobjective no longer accepts a random list of parameters. Use SDDP.parameterize instead.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"@stageobjective(subproblem, ω = [1, 2, 3], ω * x)\nsetnoiseprobability!(subproblem, [0.5, 0.2, 0.3])\n\n# becomes\n\nSDDP.parameterize(subproblem, [1, 2, 3], [0.5, 0.2, 0.3]) do ω\n @stageobjective(subproblem, ω * x)\nend","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#SDDP.solve","page":"Upgrade from the old SDDP.jl","title":"SDDP.solve","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"SDDP.solve has been replaced by SDDP.train. See the docs for a complete list of the new options as most things have changed.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"note: Note\nParallel training has not (yet) been implemented.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#Plotting","page":"Upgrade from the old SDDP.jl","title":"Plotting","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"Much of the syntax for plotting has changed. See Basic V: plotting for the new syntax.","category":"page"},{"location":"guides/upgrade_from_the_old_sddp/#Price-interpolation","page":"Upgrade from the old SDDP.jl","title":"Price interpolation","text":"","category":"section"},{"location":"guides/upgrade_from_the_old_sddp/","page":"Upgrade from the old SDDP.jl","title":"Upgrade from the old SDDP.jl","text":"The syntax for models with stagewise-dependent objective processes has completely changed. See Advanced I: objective states for details.","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/#Add-a-multi-dimensional-state-variable","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"","category":"section"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"DocTestSetup = quote\n using SDDP, GLPK\nend","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"Just like normal JuMP variables, it is possible to create containers of state variables.","category":"page"},{"location":"guides/add_a_multidimensional_state_variable/","page":"Add a multi-dimensional state variable","title":"Add a multi-dimensional state variable","text":"julia> model = SDDP.LinearPolicyGraph(\n stages=1, lower_bound = 0, optimizer = GLPK.Optimizer\n ) do subproblem, t\n # A scalar state variable.\n @variable(subproblem, x >= 0, SDDP.State, initial_value = 0)\n println(\"Lower bound of outgoing x is: \", JuMP.lower_bound(x.out))\n # A vector of state variables.\n @variable(subproblem, y[i = 1:2] >= i, SDDP.State, initial_value = i)\n println(\"Lower bound of outgoing y[1] is: \", JuMP.lower_bound(y[1].out))\n # A JuMP.Containers.DenseAxisArray of state variables.\n @variable(subproblem,\n z[i = 3:4, j = [:A, :B]] >= i, SDDP.State, initial_value = i)\n println(\"Lower bound of outgoing z[3, :B] is: \", JuMP.lower_bound(z[3, :B].out))\n end;\nLower bound of outgoing x is: 0.0\nLower bound of outgoing y[1] is: 1.0\nLower bound of outgoing z[3, :B] is: 3.0","category":"page"},{"location":"guides/add_multidimensional_noise_Terms/#Add-multi-dimensional-noise-terms","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"","category":"section"},{"location":"guides/add_multidimensional_noise_Terms/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"DocTestSetup = quote\n using SDDP, GLPK\nend","category":"page"},{"location":"guides/add_multidimensional_noise_Terms/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"Multi-dimensional stagewise-independent random variables can be created by forming the Cartesian product of the random variables.","category":"page"},{"location":"guides/add_multidimensional_noise_Terms/","page":"Add multi-dimensional noise terms","title":"Add multi-dimensional noise terms","text":"julia> model = SDDP.LinearPolicyGraph(\n stages=3, lower_bound = 0, optimizer = GLPK.Optimizer\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n support = [(value = v, coefficient = c) for v in [1, 2] for c in [3, 4, 5]]\n probability = [pv * pc for pv in [0.5, 0.5] for pc in [0.3, 0.5, 0.2]]\n SDDP.parameterize(subproblem, support, probability) do ω\n JuMP.fix(x.out, ω.value)\n @stageobjective(subproblem, ω.coefficient * x.out)\n println(\"ω is: \", ω)\n end\n end;\n\njulia> SDDP.simulate(model, 1);\nω is: (value = 1, coefficient = 4)\nω is: (value = 1, coefficient = 3)\nω is: (value = 2, coefficient = 4)","category":"page"},{"location":"guides/add_a_risk_measure/#Add-a-risk-measure","page":"Add a risk measure","title":"Add a risk measure","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"DocTestSetup = quote\n using SDDP, GLPK\nend","category":"page"},{"location":"guides/add_a_risk_measure/#Training-a-risk-averse-model","page":"Add a risk measure","title":"Training a risk-averse model","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.jl supports a variety of risk measures. Two common ones are SDDP.Expectation and SDDP.WorstCase. Let's see how to train a policy using them. There are three possible ways.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"If the same risk measure is used at every node in the policy graph, we can just pass an instance of one of the risk measures to the risk_measure keyword argument of the SDDP.train function.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = SDDP.WorstCase(),\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"However, if you want different risk measures at different nodes, there are two options. First, you can pass risk_measure a dictionary of risk measures, with one entry for each node. The keys of the dictionary are the indices of the nodes.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = Dict(\n 1 => SDDP.Expectation(),\n 2 => SDDP.WorstCase()\n ),\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"An alternative method is to pass risk_measure a function that takes one argument, the index of a node, and returns an instance of a risk measure:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.train(\n model,\n risk_measure = (node_index) -> begin\n if node_index == 1\n return SDDP.Expectation()\n else\n return SDDP.WorstCase()\n end\n end,\n iteration_limit = 10\n)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"note: Note\nIf you simulate the policy, the simulated value is the risk-neutral value of the policy.","category":"page"},{"location":"guides/add_a_risk_measure/#Risk-measures","page":"Add a risk measure","title":"Risk measures","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"To illustrate the risk-measures included in SDDP.jl, we consider a discrete random variable with four outcomes.","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"The random variable is supported on the values 1, 2, 3, and 4:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"julia> noise_supports = [1, 2, 3, 4]\n4-element Array{Int64,1}:\n 1\n 2\n 3\n 4","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"The associated probability of each outcome is as follows:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"julia> nominal_probability = [0.1, 0.2, 0.3, 0.4]\n4-element Array{Float64,1}:\n 0.1\n 0.2\n 0.3\n 0.4","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"With each outcome ω, the agent observes a cost Z(ω):","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"julia> cost_realizations = [5.0, 4.0, 6.0, 2.0]\n4-element Array{Float64,1}:\n 5.0\n 4.0\n 6.0\n 2.0","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"We assume that we are minimizing:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"julia> is_minimization = true\ntrue","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"Finally, we create a vector that will be used to store the risk-adjusted probabilities:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"julia> risk_adjusted_probability = zeros(4)\n4-element Array{Float64,1}:\n 0.0\n 0.0\n 0.0\n 0.0","category":"page"},{"location":"guides/add_a_risk_measure/#Expectation","page":"Add a risk measure","title":"Expectation","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Expectation","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Expectation","page":"Add a risk measure","title":"SDDP.Expectation","text":"Expectation()\n\nThe Expectation risk measure. Identical to taking the expectation with respect to the nominal distribution.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.Expectation(),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\n\nrisk_adjusted_probability\n\n# output\n\n4-element Array{Float64,1}:\n 0.1\n 0.2\n 0.3\n 0.4","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Expectation is the default risk measure in SDDP.jl.","category":"page"},{"location":"guides/add_a_risk_measure/#Worst-case","page":"Add a risk measure","title":"Worst-case","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.WorstCase","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.WorstCase","page":"Add a risk measure","title":"SDDP.WorstCase","text":"WorstCase()\n\nThe worst-case risk measure. Places all of the probability weight on the worst outcome.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.WorstCase(),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\n\nrisk_adjusted_probability\n\n# output\n\n4-element Array{Float64,1}:\n 0.0\n 0.0\n 1.0\n 0.0","category":"page"},{"location":"guides/add_a_risk_measure/#Average-value-at-risk-(AV@R)","page":"Add a risk measure","title":"Average value at risk (AV@R)","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.AVaR","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.AVaR","page":"Add a risk measure","title":"SDDP.AVaR","text":"AVaR(β)\n\nThe average value at risk (AV@R) risk measure.\n\nComputes the expectation of the β fraction of worst outcomes. β must be in [0, 1]. When β=1, this is equivalent to the Expectation risk measure. When β=0, this is equivalent to the WorstCase risk measure.\n\nAV@R is also known as the conditional value at risk (CV@R) or expected shortfall.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.AVaR(0.5),\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\n\nround.(risk_adjusted_probability, digits = 1)\n\n# output\n\n4-element Array{Float64,1}:\n 0.2\n 0.2\n 0.6\n 0.0","category":"page"},{"location":"guides/add_a_risk_measure/#Convex-combination-of-risk-measures","page":"Add a risk measure","title":"Convex combination of risk measures","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"Using the axioms of coherent risk measures, it is easy to show that any convex combination of coherent risk measures is also a coherent risk measure. Convex combinations of risk measures can be created directly:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"julia> cvx_comb_measure = 0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()\nA convex combination of 0.5 * SDDP.Expectation() + 0.5 * SDDP.WorstCase()","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n cvx_comb_measure,\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\n\nrisk_adjusted_probability\n\n# output\n\n4-element Array{Float64,1}:\n 0.05\n 0.1\n 0.65\n 0.2","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"As a special case, the SDDP.EAVaR risk-measure is a convex combination of SDDP.Expectation and SDDP.AVaR:","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"julia> risk_measure = SDDP.EAVaR(beta=0.25, lambda=0.4)\nA convex combination of 0.4 * SDDP.Expectation() + 0.6 * SDDP.AVaR(0.25)","category":"page"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.EAVaR","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.EAVaR","page":"Add a risk measure","title":"SDDP.EAVaR","text":"EAVaR(;lambda=1.0, beta=1.0)\n\nA risk measure that is a convex combination of Expectation and Average Value @ Risk (also called Conditional Value @ Risk).\n\n λ * E[x] + (1 - λ) * AV@R(1-β)[x]\n\nKeyword Arguments\n\nlambda: Convex weight on the expectation ((1-lambda) weight is put on the AV@R component. Inreasing values of lambda are less risk averse (more weight on expectation).\nbeta: The quantile at which to calculate the Average Value @ Risk. Increasing values of beta are less risk averse. If beta=0, then the AV@R component is the worst case risk measure.\n\n\n\n\n\n","category":"function"},{"location":"guides/add_a_risk_measure/#Distributionally-robust","page":"Add a risk measure","title":"Distributionally robust","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.jl supports two types of distrbutionally robust risk measures: the modified Χ² method of Philpott et al. (2018), and a method based on the Wasserstein distance metric.","category":"page"},{"location":"guides/add_a_risk_measure/#Modified-Chi-squard","page":"Add a risk measure","title":"Modified Chi-squard","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.ModifiedChiSquared","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.ModifiedChiSquared","page":"Add a risk measure","title":"SDDP.ModifiedChiSquared","text":"ModifiedChiSquared(radius::Float64; minimum_std=1e-5)\n\nThe distributionally robust SDDP risk measure of Philpott, A., de Matos, V., Kapelevich, L. Distributionally robust SDDP. Computational Management Science (2018) 165:431-454.\n\nIf the uncorrected standard deviation of the objecive realizations is less than minimum_std, then the risk-measure will default to Expectation().\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.adjust_probability(\n SDDP.ModifiedChiSquared(0.5),\n risk_adjusted_probability,\n [0.25, 0.25, 0.25, 0.25],\n noise_supports,\n cost_realizations,\n is_minimization\n)\n\nround.(risk_adjusted_probability, digits = 4)\n\n# output\n\n4-element Array{Float64,1}:\n 0.3333\n 0.0447\n 0.622\n 0.0","category":"page"},{"location":"guides/add_a_risk_measure/#Wasserstein","page":"Add a risk measure","title":"Wasserstein","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Wasserstein","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Wasserstein","page":"Add a risk measure","title":"SDDP.Wasserstein","text":"Wasserstein(norm::Function, solver_factory; alpha::Float64)\n\nA distributionally-robust risk measure based on the Wasserstein distance.\n\nAs alpha increases, the measure becomes more risk-averse. When alpha=0, the measure is equivalent to the expectation operator. As alpha increases, the measure approaches the Worst-case risk measure.\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"risk_measure = SDDP.Wasserstein(\n GLPK.Optimizer; alpha=0.5) do x, y\n return abs(x - y)\nend\n\nSDDP.adjust_probability(\n risk_measure,\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\n\nround.(risk_adjusted_probability, digits = 1)\n\n# output\n\n4-element Array{Float64,1}:\n 0.1\n 0.1\n 0.8\n 0.0","category":"page"},{"location":"guides/add_a_risk_measure/#Entropic","page":"Add a risk measure","title":"Entropic","text":"","category":"section"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"SDDP.Entropic","category":"page"},{"location":"guides/add_a_risk_measure/#SDDP.Entropic","page":"Add a risk measure","title":"SDDP.Entropic","text":"Entropic(γ::Float64)\n\nThe entropic risk measure as described by Dowson, Morton, and Pagnoncelli (2020). Multistage stochastic programs with the entropic risk measure. http://www.optimization-online.org/DB_HTML/2020/08/7984.html\n\n\n\n\n\n","category":"type"},{"location":"guides/add_a_risk_measure/","page":"Add a risk measure","title":"Add a risk measure","text":"risk_measure = SDDP.Entropic(0.1)\n\nSDDP.adjust_probability(\n risk_measure,\n risk_adjusted_probability,\n nominal_probability,\n noise_supports,\n cost_realizations,\n is_minimization\n)\n\nround.(risk_adjusted_probability, digits = 4)\n\n# output\n\n4-element Array{Float64,1}:\n 0.11\n 0.1991\n 0.3648\n 0.326","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/infinite_horizon_trivial.jl\"","category":"page"},{"location":"examples/infinite_horizon_trivial/#Infinite-horizon-trivial","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"","category":"section"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"using SDDP, GLPK, Test\n\nfunction infinite_trivial()\n graph = SDDP.Graph(\n :root_node, [:week], [(:root_node => :week, 1.0), (:week => :week, 0.9)]\n )\n model = SDDP.PolicyGraph(\n graph,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n ) do subproblem, node\n @variable(subproblem, state, SDDP.State, initial_value = 0)\n @constraint(subproblem, state.in == state.out)\n @stageobjective(subproblem, 2.0)\n end\n SDDP.train(model; iteration_limit = 100, log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 2.0 / (1 - 0.9) atol = 1e-3\n return\nend\n\ninfinite_trivial()","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 1\n State variables : 1\n Scenarios : Inf\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 1e+00]\n Non-zero Bounds range [0e+00, 0e+00]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 1.400000e+01 1.999947e+01 1.269007e-02 1 208\n 20 2.000000e+01 2.000000e+01 2.885199e-02 1 420\n 30 8.000000e+00 2.000000e+01 5.411196e-02 1 692\n 40 1.200000e+01 2.000000e+01 8.066416e-02 1 920\n 50 1.800000e+01 2.000000e+01 1.195760e-01 1 1182\n 60 4.000000e+00 2.000000e+01 1.645830e-01 1 1454\n 70 2.000000e+00 2.000000e+01 1.948640e-01 1 1618\n 80 3.000000e+01 2.000000e+01 2.402132e-01 1 1852\n 90 1.000000e+02 2.000000e+01 2.934611e-01 1 2102\n 100 2.000000e+00 2.000000e+01 3.781960e-01 1 2396\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"","category":"page"},{"location":"examples/infinite_horizon_trivial/","page":"Infinite horizon trivial","title":"Infinite horizon trivial","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/04_markov_uncertainty.jl\"","category":"page"},{"location":"tutorial/04_markov_uncertainty/#Basic-IV:-Markov-uncertainty","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"","category":"section"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"In our three tutorials (Basic I: first steps, Basic II: adding uncertainty, and Basic III: objective uncertainty), we formulated a simple hydrothermal scheduling problem with stagewise-independent noise in the right-hand side of the constraints and in the objective function. Now, in this tutorial, we introduce some stagewise-dependent uncertainty using a Markov chain.","category":"page"},{"location":"tutorial/04_markov_uncertainty/#Formulating-the-problem","page":"Basic IV: Markov uncertainty","title":"Formulating the problem","text":"","category":"section"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"In this tutorial we consider a Markov chain with two climate states: wet and dry. Each Markov state is associated with an integer, in this case the wet climate state is Markov state 1 and the dry climate state is Markov state 2. In the wet climate state, the probability of the high inflow increases to 50%, and the probability of the low inflow decreases to 1/6. In the dry climate state, the converse happens. There is also persistence in the climate state: the probability of remaining in the current state is 75%, and the probability of transitioning to the other climate state is 25%. We assume that the first stage starts in the wet climate state.","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"Here is a picture of the model we're going to implement.","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"(Image: Markovian policy graph)","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"There are five nodes in our graph. Each node is named by a tuple (t, i), where t is the stage for t=1,2,3, and i is the Markov state for i=1,2. As before, the wavy lines denote the stagewise-independent random variable.","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"For each stage, we need to provide a Markov transition matrix. This is an MxN matrix, where the element A[i, j] gives the probability of transitioning from Markov state i in the previous stage to Markov state j in the current stage. The first stage is special because we assume there is a \"zero'th\" stage which has one Markov state (the round node in the graph above). Furthermore, the number of columns in the transition matrix of a stage (i.e. the number of Markov states) must equal the number of rows in the next stage's transition matrix. For our example, the vector of Markov transition matrices is given by:","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"T = Array{Float64, 2}[\n [ 1.0 ]',\n [ 0.75 0.25 ],\n [ 0.75 0.25 ; 0.25 0.75 ]\n]","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"3-element Array{Array{Float64,2},1}:\n [1.0] \n [0.75 0.25] \n [0.75 0.25; 0.25 0.75]","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"note: Note\nMake sure to add the ' after the first transition matrix so Julia can distinguish between a vector and a matrix.","category":"page"},{"location":"tutorial/04_markov_uncertainty/#Creating-a-model","page":"Basic IV: Markov uncertainty","title":"Creating a model","text":"","category":"section"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"using SDDP, GLPK\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75)\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64, 2}[\n [ 1.0 ]',\n [ 0.75 0.25 ],\n [ 0.75 0.25 ; 0.25 0.75 ]\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, node\n # Unpack the stage and Markov index.\n t, markov_state = node\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end)\n # Note how we can use `markov_state` to dispatch an `if` statement.\n probability = if markov_state == 1 # wet climate state\n [1/6, 1/3, 1/2]\n else # dry climate state\n [1/2, 1/3, 1/6]\n end\n\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"A policy graph with 5 nodes.\n Node indices: (1, 1), (2, 1), (2, 2), (3, 1), (3, 2)\n","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"tip: Tip\nFor more information on SDDP.MarkovianPolicyGraphs, read Create a general policy graph.","category":"page"},{"location":"tutorial/04_markov_uncertainty/#Training-and-simulating-the-policy","page":"Basic IV: Markov uncertainty","title":"Training and simulating the policy","text":"","category":"section"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"As in the previous three tutorials, we train the policy:","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"SDDP.train(model; iteration_limit = 10)","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 5\n State variables : 1\n Scenarios : 1.08000e+02\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 2e+02]\n Non-zero Bounds range [2e+02, 2e+02]\n Non-zero RHS range [2e+02, 2e+02]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 2.250000e+04 5.588397e+03 1.218171e-01 1 18\n 2 1.315909e+04 7.975336e+03 1.229231e-01 1 36\n 3 1.875000e+03 7.975336e+03 1.238880e-01 1 54\n 4 1.125000e+04 7.975336e+03 1.250000e-01 1 72\n 5 1.875000e+03 8.072917e+03 1.260631e-01 1 90\n 6 1.875000e+03 8.072917e+03 1.270490e-01 1 108\n 7 1.875000e+03 8.072917e+03 1.280081e-01 1 126\n 8 2.750000e+04 8.072917e+03 1.290519e-01 1 144\n 9 5.000000e+03 8.072917e+03 1.302831e-01 1 162\n 10 5.000000e+03 8.072917e+03 1.312740e-01 1 180\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"Instead of performing a Monte Carlo simulation like the previous tutorials, we may want to simulate one particular sequence of noise realizations. This historical simulation can also be conducted by passing a SDDP.Historical sampling scheme to the sampling_scheme keyword of the SDDP.simulate function.","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"We can confirm that the historical sequence of nodes was visited by querying the :node_index key of the simulation results.","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"simulations = SDDP.simulate(\n model,\n sampling_scheme = SDDP.Historical([\n ((1, 1), Ω[1]),\n ((2, 2), Ω[3]),\n ((3, 1), Ω[2])\n ])\n)\n\n[stage[:node_index] for stage in simulations[1]]","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"3-element Array{Tuple{Int64,Int64},1}:\n (1, 1)\n (2, 2)\n (3, 1)","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"This concludes our fourth tutorial for SDDP.jl. In the next tutorial, Basic V: plotting we discuss the plotting utilities included in SDDP.jl.","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"","category":"page"},{"location":"tutorial/04_markov_uncertainty/","page":"Basic IV: Markov uncertainty","title":"Basic IV: Markov uncertainty","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/air_conditioning.jl\"","category":"page"},{"location":"examples/air_conditioning/#Air-conditioning","page":"Air conditioning","title":"Air conditioning","text":"","category":"section"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Taken from Anthony Papavasiliou's notes on SDDP","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Consider the following problem","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"Produce air conditioners for 3 months\n200 units/month at 100 \\$/unit\nOvertime costs 300 \\$/unit\nKnown demand of 100 units for period 1\nEqually likely demand, 100 or 300 units, for periods 2, 3\nStorage cost is 50 \\$/unit\nAll demand must be met","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"The known optimal solution is \\$62,500","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"using SDDP, GLPK, Test\n\nfunction air_conditioning_model(integrality_handler)\n model = SDDP.LinearPolicyGraph(\n stages = 3,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer,\n integrality_handler = integrality_handler,\n ) do sp, stage\n @variable(sp, 0 <= stored_production <= 100, Int, SDDP.State, initial_value = 0)\n @variable(sp, 0 <= production <= 200, Int)\n @variable(sp, overtime >= 0, Int)\n @variable(sp, demand)\n DEMAND = [[100.0], [100.0, 300.0], [100.0, 300.0]]\n SDDP.parameterize(ω -> JuMP.fix(demand, ω), sp, DEMAND[stage])\n @constraint(\n sp,\n stored_production.out == stored_production.in + production + overtime - demand\n )\n @stageobjective(sp, 100 * production + 300 * overtime + 50 * stored_production.out)\n end\n SDDP.train(model, iteration_limit = 20, log_frequency = 10)\n @test SDDP.calculate_bound(model) ≈ 62_500.0\n return\nend\n\nfor integrality_handler in [SDDP.SDDiP(), SDDP.ContinuousRelaxation()]\n air_conditioning_model(integrality_handler)\nend","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 7\n Scenarios : 4.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 6e+01]\n Non-zero Objective range [1e+00, 3e+02]\n Non-zero Bounds range [1e+02, 2e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 6.000000e+04 6.250000e+04 2.515037e+00 1 80\n 20 6.000000e+04 6.250000e+04 2.589600e+00 1 160\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 4.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 3e+02]\n Non-zero Bounds range [1e+02, 2e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 4.000000e+04 6.250000e+04 5.189896e-03 1 80\n 20 6.000000e+04 6.250000e+04 9.039879e-03 1 160\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"","category":"page"},{"location":"examples/air_conditioning/","page":"Air conditioning","title":"Air conditioning","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/sldp_example_two.jl\"","category":"page"},{"location":"examples/sldp_example_two/#SLDP:-example-2","page":"SLDP: example 2","title":"SLDP: example 2","text":"","category":"section"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"This example is derived from Section 4.3 of the paper: Ahmed, S., Cabral, F. G., & da Costa, B. F. P. (2019). Stochastic Lipschitz Dynamic Programming. Optimization Online. PDF","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"using SDDP, GLPK, Test\n\nfunction sldp_example_two(; first_stage_integer::Bool = true, N = 2)\n model = SDDP.LinearPolicyGraph(\n stages = 2,\n lower_bound = -100.0,\n optimizer = GLPK.Optimizer,\n ) do sp, t\n @variable(sp, 0 <= x[1:2] <= 5, SDDP.State, initial_value = 0.0)\n if t == 1\n if first_stage_integer\n @variable(sp, 0 <= u[1:2] <= 5, Int)\n @constraint(sp, [i = 1:2], u[i] == x[i].out)\n end\n @stageobjective(sp, -1.5 * x[1].out - 4 * x[2].out)\n else\n @variable(sp, 0 <= y[1:4] <= 1, Bin)\n @variable(sp, ω[1:2])\n @stageobjective(sp, -16 * y[1] - 19 * y[2] - 23 * y[3] - 28 * y[4])\n @constraint(sp, 2 * y[1] + 3 * y[2] + 4 * y[3] + 5 * y[4] <= ω[1] - x[1].in)\n @constraint(sp, 6 * y[1] + 1 * y[2] + 3 * y[3] + 2 * y[4] <= ω[2] - x[2].in)\n steps = range(5, stop = 15, length = N)\n SDDP.parameterize(sp, [[i, j] for i in steps for j in steps]) do φ\n JuMP.fix.(ω, φ)\n end\n end\n end\n SDDP.train(model, iteration_limit = 100, log_frequency = 10)\n bound = SDDP.calculate_bound(model)\n\n if N == 2\n @test bound <= -57.0\n elseif N == 3\n @test bound <= -59.33\n elseif N == 6\n @test bound <= -61.22\n end\n return\nend\n\nsldp_example_two(N = 2)\nsldp_example_two(N = 3)\nsldp_example_two(N = 6)","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 2\n State variables : 2\n Scenarios : 4.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 6e+00]\n Non-zero Objective range [1e+00, 3e+01]\n Non-zero Bounds range [1e+00, 1e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 -4.500000e+01 -5.809615e+01 4.281044e-03 1 70\n 20 -9.800000e+01 -5.809615e+01 7.774115e-03 1 140\n 30 -4.500000e+01 -5.809615e+01 1.145697e-02 1 210\n 40 -4.500000e+01 -5.809615e+01 1.543713e-02 1 280\n 50 -4.500000e+01 -5.809615e+01 1.916695e-02 1 350\n 60 -4.700000e+01 -5.809615e+01 2.320004e-02 1 420\n 70 -4.700000e+01 -5.809615e+01 4.452205e-02 1 490\n 80 -4.700000e+01 -5.809615e+01 4.856801e-02 1 560\n 90 -9.800000e+01 -5.809615e+01 5.247402e-02 1 630\n 100 -4.700000e+01 -5.809615e+01 5.645800e-02 1 700\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 2\n State variables : 2\n Scenarios : 9.00000e+00\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 6e+00]\n Non-zero Objective range [1e+00, 3e+01]\n Non-zero Bounds range [1e+00, 1e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 -4.700000e+01 -6.196125e+01 1.137900e-02 1 120\n 20 -4.238462e+01 -6.196125e+01 1.616096e-02 1 240\n 30 -4.238462e+01 -6.196125e+01 2.253103e-02 1 360\n 40 -4.500000e+01 -6.196125e+01 2.748489e-02 1 480\n 50 -7.560000e+01 -6.196125e+01 3.326893e-02 1 600\n 60 -9.800000e+01 -6.196125e+01 3.911591e-02 1 720\n 70 -9.800000e+01 -6.196125e+01 4.473495e-02 1 840\n 80 -4.500000e+01 -6.196125e+01 5.068803e-02 1 960\n 90 -4.700000e+01 -6.196125e+01 5.634189e-02 1 1080\n 100 -4.700000e+01 -6.196125e+01 6.295800e-02 1 1200\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 2\n State variables : 2\n Scenarios : 3.60000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 6e+00]\n Non-zero Objective range [1e+00, 3e+01]\n Non-zero Bounds range [1e+00, 1e+02]\n Non-zero RHS range [0e+00, 0e+00]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 -6.946154e+01 -6.554113e+01 2.032495e-02 1 390\n 20 -7.760000e+01 -6.554113e+01 3.390980e-02 1 780\n 30 -5.530769e+01 -6.554113e+01 4.841280e-02 1 1170\n 40 -7.071429e+01 -6.554113e+01 7.885599e-02 1 1560\n 50 -8.760000e+01 -6.554113e+01 9.226584e-02 1 1950\n 60 -4.300000e+01 -6.554113e+01 1.064448e-01 1 2340\n 70 -6.946154e+01 -6.554113e+01 1.210530e-01 1 2730\n 80 -4.780000e+01 -6.554113e+01 1.340020e-01 1 3120\n 90 -4.484615e+01 -6.554113e+01 1.479130e-01 1 3510\n 100 -4.300000e+01 -6.554113e+01 1.640818e-01 1 3900\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"","category":"page"},{"location":"examples/sldp_example_two/","page":"SLDP: example 2","title":"SLDP: example 2","text":"This page was generated using Literate.jl.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/02_adding_uncertainty.jl\"","category":"page"},{"location":"tutorial/02_adding_uncertainty/#Basic-II:-adding-uncertainty","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"","category":"section"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"In the previous tutorial, Basic I: first steps, we created a deterministic hydro-thermal scheduling model. In this tutorial, we extend the problem by adding uncertainty.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"The key development is that we are going to implement a function called SDDP.parameterize, which modifies the subproblem based on a realization of the uncertainty omega.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"Notably missing from our previous model were inflows. Inflows are the water that flows into the reservoir through rainfall or rivers. These inflows are uncertain, and are the cause of the main trade-off in hydro-thermal scheduling: the desire to use water now to generate cheap electricity, against the risk that future inflows will be low, leading to blackouts or expensive thermal generation.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"For our simple model, we assume that the inflows can be modelled by a discrete distribution with the three outcomes given in the following table:","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"ω 0 50 100\nP(ω) 1/3 1/3 1/3","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"The value of the noise (the random variable) is observed by the agent at the start of each stage. This makes the problem a wait-and-see or hazard-decision formulation.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"To represent this, we can draw the following picture. The wavy lines denote the uncertainty arriving into the start of each stage (node).","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"(Image: Linear policy graph)","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"In addition to adding this uncertainty to the model, we also need to modify the dynamics to include inflow:","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"volume.out = volume.in + inflow - hydro_generation - hydro_spill","category":"page"},{"location":"tutorial/02_adding_uncertainty/#Creating-a-model","page":"Basic II: adding uncertainty","title":"Creating a model","text":"","category":"section"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"To add an uncertain variable to the model, we create a new JuMP variable inflow, and then call the function SDDP.parameterize. The SDDP.parameterize function takes three arguments: the subproblem, a vector of realizations, and a corresponding vector of probabilities.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"using SDDP, GLPK\n\nmodel = SDDP.LinearPolicyGraph(\n stages = 3,\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, t\n # Define the state variable.\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n # Define the control variables.\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n # Define the constraints\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n demand_constraint, thermal_generation + hydro_generation == 150.0\n end)\n # Define the objective for each stage `t`. Note that we can use `t` as an\n # index for t = 1, 2, 3.\n fuel_cost = [50.0, 100.0, 150.0]\n @stageobjective(subproblem, fuel_cost[t] * thermal_generation)\n # Parameterize the subproblem.\n SDDP.parameterize(subproblem, [0.0, 50.0, 100.0], [1/3, 1/3, 1/3]) do ω\n JuMP.fix(inflow, ω)\n end\nend","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"A policy graph with 3 nodes.\n Node indices: 1, 2, 3\n","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"Note how we use the JuMP function JuMP.fix to set the value of the inflow variable to ω.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"note: Note\nSDDP.parameterize can only be called once in each subproblem definition!","category":"page"},{"location":"tutorial/02_adding_uncertainty/#Training-and-simulating-the-policy","page":"Basic II: adding uncertainty","title":"Training and simulating the policy","text":"","category":"section"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"As in Basic I: first steps, we train the policy:","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"SDDP.train(model; iteration_limit = 10)","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 2.70000e+01\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 2e+02]\n Non-zero Bounds range [2e+02, 2e+02]\n Non-zero RHS range [2e+02, 2e+02]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 1.500000e+04 5.000000e+03 1.778126e-03 1 12\n 2 7.500000e+03 8.333333e+03 2.192020e-03 1 24\n 3 8.750000e+03 8.333333e+03 2.607107e-03 1 36\n 4 7.500000e+03 8.333333e+03 3.015995e-03 1 48\n 5 7.500000e+03 8.333333e+03 3.402948e-03 1 60\n 6 2.500000e+03 8.333333e+03 3.843069e-03 1 72\n 7 5.000000e+03 8.333333e+03 4.255056e-03 1 84\n 8 2.000000e+04 8.333333e+03 4.863977e-03 1 96\n 9 5.000000e+03 8.333333e+03 5.532026e-03 1 108\n 10 5.000000e+03 8.333333e+03 6.032944e-03 1 120\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"We can also simulate the policy. Note that this time, the simulation is stochastic. One common approach to quantify the quality of the policy is to perform a Monte Carlo simulation and then form a confidence interval for the expected cost. This confidence interval is an estimate for the upper bound.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"In addition to the confidence interval, we can calculate the lower bound using SDDP.calculate_bound.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"using Statistics\n\nsimulations = SDDP.simulate(model, 500)\n\nobjective_values = [\n sum(stage[:stage_objective] for stage in sim) for sim in simulations\n]\n\nμ = round(mean(objective_values), digits = 2)\n\nci = round(1.96 * std(objective_values) / sqrt(500), digits = 2)\n\nprintln(\"Confidence interval: \", μ, \" ± \", ci)\nprintln(\"Lower bound: \", round(SDDP.calculate_bound(model), digits = 2))","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"Confidence interval: 8360.0 ± 412.58\nLower bound: 8333.33\n","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"In addition to simulating the primal values of variables, we can also pass SDDP.jl custom recorder functions. Each of these functions takes one argument, the JuMP subproblem, and returns anything you can compute. For example, the dual of the demand constraint (which we named demand_constraint) corresponds to the price we should charge for electricity, since it represents the cost of each additional unit of demand. To calculate this, we can go","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"simulations = SDDP.simulate(\n model,\n 1,\n custom_recorders = Dict{Symbol, Function}(\n :price => (sp) -> JuMP.dual(sp[:demand_constraint])\n )\n)\n\nprices = [stage[:price] for stage in simulations[1]]","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"3-element Array{Float64,1}:\n 50.0\n 100.0\n 150.0","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"This concludes our second tutorial for SDDP.jl. In the next tutorial, Basic III: objective uncertainty, we extend the uncertainty to the fuel cost.","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"","category":"page"},{"location":"tutorial/02_adding_uncertainty/","page":"Basic II: adding uncertainty","title":"Basic II: adding uncertainty","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/objective_state_newsvendor.jl\"","category":"page"},{"location":"examples/objective_state_newsvendor/#Newsvendor","page":"Newsvendor","title":"Newsvendor","text":"","category":"section"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"This example is based on the classical newsvendor problem, but features an AR(1) spot-price.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":" V(x[t-1], ω[t]) = max p[t] × u[t]\n subject to x[t] = x[t-1] - u[t] + ω[t]\n u[t] ∈ [0, 1]\n x[t] ≥ 0\n p[t] = p[t-1] + ϕ[t]","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"The initial conditions are","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"x[0] = 2.0\np[0] = 1.5\nω[t] ~ {0, 0.05, 0.10, ..., 0.45, 0.5} with uniform probability.\nϕ[t] ~ {-0.25, -0.125, 0.125, 0.25} with uniform probability.","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"using SDDP, GLPK, Statistics, Test\n\nfunction joint_distribution(; kwargs...)\n names = tuple([first(kw) for kw in kwargs]...)\n values = tuple([last(kw) for kw in kwargs]...)\n output_type = NamedTuple{names,Tuple{eltype.(values)...}}\n distribution = map(output_type, Base.product(values...))\n return distribution[:]\nend\n\nfunction newsvendor_example(; cut_type)\n model = SDDP.PolicyGraph(\n SDDP.LinearGraph(3),\n sense = :Max,\n upper_bound = 50.0,\n optimizer = GLPK.Optimizer,\n ) do subproblem, stage\n @variables(subproblem, begin\n x >= 0, (SDDP.State, initial_value = 2)\n 0 <= u <= 1\n w\n end)\n @constraint(subproblem, x.out == x.in - u + w)\n SDDP.add_objective_state(\n subproblem,\n initial_value = 1.5,\n lower_bound = 0.75,\n upper_bound = 2.25,\n lipschitz = 100.0,\n ) do y, ω\n return y + ω.price_noise\n end\n noise_terms = joint_distribution(\n demand = 0:0.05:0.5,\n price_noise = [-0.25, -0.125, 0.125, 0.25],\n )\n SDDP.parameterize(subproblem, noise_terms) do ω\n JuMP.fix(w, ω.demand)\n price = SDDP.objective_state(subproblem)\n @stageobjective(subproblem, price * u)\n end\n end\n SDDP.train(\n model,\n iteration_limit = 100,\n log_frequency = 10,\n time_limit = 20.0,\n cut_type = cut_type,\n )\n @test SDDP.calculate_bound(model) ≈ 4.04 atol = 0.05\n results = SDDP.simulate(model, 500)\n objectives = [sum(s[:stage_objective] for s in simulation) for simulation in results]\n @test round(Statistics.mean(objectives); digits = 2) ≈ 4.04 atol = 0.1\n return\nend\n\nnewsvendor_example(cut_type = SDDP.SINGLE_CUT)\nnewsvendor_example(cut_type = SDDP.MULTI_CUT)","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 8.51840e+04\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [8e-01, 2e+00]\n Non-zero Objective range [1e+00, 2e+00]\n Non-zero Bounds range [1e+00, 1e+02]\n Non-zero RHS range [5e+01, 5e+01]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 4.125000e+00 4.921922e+00 2.560701e-01 1 1350\n 20 4.262500e+00 4.879594e+00 3.277521e-01 1 2700\n 30 4.875000e+00 4.862969e+00 4.020412e-01 1 4050\n 40 3.212500e+00 4.091557e+00 4.995720e-01 1 5400\n 50 3.975000e+00 4.090943e+00 5.977092e-01 1 6750\n 60 3.750000e+00 4.089418e+00 7.183552e-01 1 8100\n 70 4.181250e+00 4.089082e+00 8.350441e-01 1 9450\n 80 4.475000e+00 4.088775e+00 9.817441e-01 1 10800\n 90 3.000000e+00 4.088664e+00 1.125125e+00 1 12150\n 100 4.987500e+00 4.087655e+00 1.308156e+00 1 13500\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : 8.51840e+04\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [8e-01, 2e+00]\n Non-zero Objective range [1e+00, 2e+00]\n Non-zero Bounds range [1e+00, 1e+02]\n Non-zero RHS range [5e+01, 5e+01]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 10 3.968750e+00 5.144542e+00 4.420900e-01 1 1350\n 20 5.500000e+00 4.054077e+00 1.600345e+00 1 2700\n 30 3.668750e+00 4.045451e+00 3.986031e+00 1 4050\n 40 3.931250e+00 4.044862e+00 7.805006e+00 1 5400\n 50 5.143750e+00 4.044405e+00 1.280667e+01 1 6750\n 60 4.687500e+00 4.041622e+00 1.929829e+01 1 8100\n\nTerminating training with status: time_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"","category":"page"},{"location":"examples/objective_state_newsvendor/","page":"Newsvendor","title":"Newsvendor","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/Hydro_thermal.jl\"","category":"page"},{"location":"examples/Hydro_thermal/#Hydro-thermal-scheduling","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"","category":"section"},{"location":"examples/Hydro_thermal/#Problem-Description","page":"Hydro-thermal scheduling","title":"Problem Description","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"In a hydro-thermal problem, the agent controls a hydro-electric generator and reservoir. Each time period, they need to choose a generation quantity from thermal g_t, and hydro g_h, in order to meed demand w_d, which is a stagewise-independent random variable. The state variable, x, is the quantity of water in the reservoir at the start of each time period, and it has a minimum level of 5 units and a maximum level of 15 units. We assume that there are 10 units of water in the reservoir at the start of time, so that x_0 = 10. The state-variable is connected through time by the water balance constraint: x.out = x.in - g_t - s + w_i, where x.out is the quantity of water at the end of the time period, x.in is the quantity of water at the start of the time period, s is the quantity of water spilled from the reservoir, and w_i is a stagewise-independent random variable that represents the inflow into the reservoir during the time period.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"We assume that there are three stages, t=1, 2, 3, representing summer-fall, winter, and spring, and that we are solving this problem in an infinite-horizon setting with a discount factor of 0.95.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"In each stage, the agent incurs the cost of spillage, plus the cost of thermal generation. We assume that the cost of thermal generation is dependent on the stage t = 1, 2, 3, and that in each stage, w is drawn from the set (w_i, w_d) = {(0, 7.5), (3, 5), (10, 2.5)} with equal probability.","category":"page"},{"location":"examples/Hydro_thermal/#Importing-packages","page":"Hydro-thermal scheduling","title":"Importing packages","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"For this example, in addition to SDDP, we need GLPK as a solver and Statisitics to compute the mean of our simulations.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"using GLPK\nusing SDDP\nusing Statistics","category":"page"},{"location":"examples/Hydro_thermal/#Constructing-the-policy-graph","page":"Hydro-thermal scheduling","title":"Constructing the policy graph","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"There are three stages in our problem, so we construct a linear policy graph with three stages using SDDP.LinearGraph:","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"graph = SDDP.LinearGraph(3)","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Root\n 0\nNodes\n 1\n 2\n 3\nArcs\n 0 => 1 w.p. 1.0\n 1 => 2 w.p. 1.0\n 2 => 3 w.p. 1.0\n","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Then, because we want to solve an infinite-horizon problem, we add an additional edge between node 3 and node 1 with probability 0.95:","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"SDDP.add_edge(graph, 3 => 1, 0.95)","category":"page"},{"location":"examples/Hydro_thermal/#Constructing-the-model","page":"Hydro-thermal scheduling","title":"Constructing the model","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Much of the macro code (i.e., lines starting with @) in the first part of the following should be familiar to users of JuMP.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Inside the do-end block, sp is a standard JuMP model, and t is an index for the state variable that will be called with t = 1, 2, 3.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"The state variable x, constructed by passing the SDDP.State tag to @variable is actually a Julia struct with two fields: x.in and x.out corresponding to the incoming and outgoing state variables respectively. Both x.in and x.out are standard JuMP variables. The initial_value keyword provides the value of the state variable in the root node (i.e., x_0).","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Compared to a JuMP model, one key difference is that we use @stageobjective instead of @objective. The SDDP.parameterize function takes a list of supports for w and parameterizes the JuMP model sp by setting the right-hand sides of the appropriate constraints (note how the constraints initially have a right-hand side of 0). By default, it is assumed that the realizations have uniform probability, but a probability mass vector can also be provided.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"model = SDDP.PolicyGraph(\n graph, sense = :Min, lower_bound = 0.0, optimizer = GLPK.Optimizer\n) do sp, t\n @variable(sp, 5 <= x <= 15, SDDP.State, initial_value = 10)\n @variable(sp, g_t >= 0)\n @variable(sp, g_h >= 0)\n @variable(sp, s >= 0)\n @constraint(sp, balance, x.out - x.in + g_h + s == 0)\n @constraint(sp, demand, g_h + g_t == 0)\n @stageobjective(sp, s + t * g_t)\n SDDP.parameterize(sp, [[0, 7.5], [3, 5], [10, 2.5]]) do w\n set_normalized_rhs(balance, w[1])\n set_normalized_rhs(demand, w[2])\n end\nend","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"A policy graph with 3 nodes.\n Node indices: 1, 2, 3\n","category":"page"},{"location":"examples/Hydro_thermal/#Training-the-policy","page":"Hydro-thermal scheduling","title":"Training the policy","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Once a model has been constructed, the next step is to train the policy. This can be achieved using SDDP.train. There are many options that can be passed, but iteration_limit terminates the training after the prescribed number of SDDP iterations.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"SDDP.train(model, iteration_limit = 100)","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 3\n State variables : 1\n Scenarios : Inf\n Solver : serial mode\n\nNumerical stability report\n Non-zero Matrix range [1e+00, 1e+00]\n Non-zero Objective range [1e+00, 3e+00]\n Non-zero Bounds range [5e+00, 2e+01]\n Non-zero RHS range [3e+00, 1e+01]\nNo problems detected\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 6.150000e+01 2.887652e+01 2.774260e-01 1 75\n 2 1.449669e+02 7.608300e+01 2.850680e-01 1 246\n 3 2.050000e+01 8.073466e+01 2.864082e-01 1 273\n 4 1.872207e+02 1.066419e+02 2.931981e-01 1 408\n 5 2.950000e+01 1.133084e+02 2.954102e-01 1 447\n 6 4.545914e+01 1.188873e+02 2.984512e-01 1 498\n 7 5.843433e+02 1.833509e+02 3.486202e-01 1 1329\n 8 1.576828e+02 1.900028e+02 3.589442e-01 1 1536\n 9 2.846617e+02 1.985914e+02 3.721671e-01 1 1803\n 10 4.799673e+01 2.003678e+02 3.748291e-01 1 1854\n 11 3.585548e+02 2.095325e+02 3.964591e-01 1 2229\n 12 7.923240e+01 2.113305e+02 4.018362e-01 1 2316\n 13 4.574087e+02 2.190355e+02 4.375622e-01 1 2883\n 14 1.677303e+02 2.217032e+02 4.542720e-01 1 3066\n 15 2.257704e+02 2.229001e+02 4.671640e-01 1 3261\n 16 4.488564e+02 2.254794e+02 4.917371e-01 1 3624\n 17 7.606693e+02 2.283348e+02 5.594931e-01 1 4515\n 18 6.577244e+02 2.309508e+02 6.067171e-01 1 5190\n 19 6.004063e+02 2.315665e+02 6.542692e-01 1 5793\n 20 1.007839e+03 2.332841e+02 7.172620e-01 1 6612\n 21 2.179070e+02 2.336207e+02 7.423301e-01 1 6819\n 22 8.946569e+01 2.336207e+02 7.488241e-01 1 6906\n 23 2.526158e+02 2.339271e+02 7.753282e-01 1 7233\n 24 1.350000e+01 2.339297e+02 7.773371e-01 1 7260\n 25 3.342290e+01 2.339297e+02 7.793391e-01 1 7287\n 26 2.229492e+01 2.340416e+02 7.812591e-01 1 7314\n 27 6.964455e+02 2.347008e+02 8.332262e-01 1 7965\n 28 1.291218e+02 2.349041e+02 8.477671e-01 1 8148\n 29 3.296347e+02 2.350328e+02 8.702791e-01 1 8427\n 30 7.773622e+01 2.350621e+02 8.743701e-01 1 8478\n 31 7.251851e+02 2.354581e+02 9.510782e-01 1 9273\n 32 3.757829e+01 2.354843e+02 9.588392e-01 1 9360\n 33 2.891402e+01 2.354894e+02 9.636412e-01 1 9411\n 34 4.936242e+02 2.356233e+02 1.005543e+00 1 9846\n 35 8.767827e+01 2.356680e+02 1.021234e+00 1 9921\n 36 6.984319e+01 2.357004e+02 1.031901e+00 1 10032\n 37 2.779356e+02 2.357733e+02 1.058561e+00 1 10287\n 38 7.764054e+02 2.358597e+02 1.141951e+00 1 11082\n 39 7.319763e+02 2.360151e+02 1.212256e+00 1 11769\n 40 2.323488e+01 2.360229e+02 1.215960e+00 1 11808\n 41 5.597979e+02 2.361036e+02 1.265894e+00 1 12315\n 42 4.190125e+02 2.361495e+02 1.303936e+00 1 12654\n 43 8.001395e+02 2.362025e+02 1.395917e+00 1 13401\n 44 7.621165e+02 2.362657e+02 1.497650e+00 1 14280\n 45 1.686385e+02 2.362778e+02 1.520462e+00 1 14463\n 46 8.297786e+01 2.362785e+02 1.528285e+00 1 14526\n 47 1.884435e+02 2.362952e+02 1.559817e+00 1 14781\n 48 2.938250e+02 2.363050e+02 1.596138e+00 1 15084\n 49 9.437528e+00 2.363098e+02 1.600747e+00 1 15123\n 50 1.400053e+01 2.363098e+02 1.604414e+00 1 15150\n 51 1.224471e+02 2.363153e+02 1.620528e+00 1 15261\n 52 5.940317e+02 2.363363e+02 1.701284e+00 1 15876\n 53 3.809379e+01 2.363363e+02 1.705607e+00 1 15915\n 54 1.320170e+02 2.363402e+02 1.715354e+00 1 16002\n 55 2.475648e+02 2.363431e+02 1.746415e+00 1 16269\n 56 1.464981e+02 2.363511e+02 1.768080e+00 1 16452\n 57 2.605968e+02 2.363551e+02 1.800417e+00 1 16719\n 58 5.484973e+02 2.363742e+02 1.878893e+00 1 17334\n 59 1.504047e+02 2.363761e+02 1.901327e+00 1 17505\n 60 3.590302e+02 2.363837e+02 1.946230e+00 1 17856\n 61 1.000000e+01 2.363837e+02 1.947931e+00 1 17871\n 62 6.441702e+01 2.363849e+02 1.952629e+00 1 17910\n 63 6.299930e+01 2.363854e+02 1.957477e+00 1 17949\n 64 2.845905e+02 2.363867e+02 1.998858e+00 1 18264\n 65 2.914768e+02 2.363938e+02 2.045399e+00 1 18603\n 66 1.028202e+02 2.363944e+02 2.061058e+00 1 18714\n 67 6.094906e+02 2.364014e+02 2.174343e+00 1 19401\n 68 5.948264e+01 2.364028e+02 2.180924e+00 1 19452\n 69 1.229986e+02 2.364033e+02 2.187044e+00 1 19503\n 70 1.674931e+02 2.364046e+02 2.211174e+00 1 19686\n 71 3.075101e+02 2.364103e+02 2.253909e+00 1 20013\n 72 7.651383e+01 2.364105e+02 2.265114e+00 1 20100\n 73 1.680028e+02 2.364133e+02 2.291475e+00 1 20271\n 74 2.019897e+02 2.364141e+02 2.314893e+00 1 20430\n 75 9.050247e+01 2.364153e+02 2.329998e+00 1 20541\n 76 2.500000e+00 2.364156e+02 2.332170e+00 1 20556\n 77 6.503023e+00 2.364156e+02 2.334040e+00 1 20571\n 78 1.650006e+02 2.364161e+02 2.350405e+00 1 20682\n 79 2.500000e+00 2.364162e+02 2.352353e+00 1 20697\n 80 1.575320e+02 2.364169e+02 2.389422e+00 1 20940\n 81 1.000000e+01 2.364170e+02 2.391308e+00 1 20955\n 82 1.184987e+02 2.364182e+02 2.411749e+00 1 21090\n 83 3.249793e+01 2.364182e+02 2.415454e+00 1 21117\n 84 5.539890e+02 2.364198e+02 2.492925e+00 1 21600\n 85 1.684911e+02 2.364209e+02 2.517838e+00 1 21747\n 86 1.875034e+02 2.364212e+02 2.548345e+00 1 21942\n 87 1.175035e+02 2.364222e+02 2.578417e+00 1 22077\n 88 1.129952e+02 2.364222e+02 2.585391e+00 1 22128\n 89 1.169508e+03 2.364272e+02 2.769096e+00 1 23271\n 90 6.456965e+01 2.364274e+02 2.778722e+00 1 23334\n 91 2.300042e+01 2.364277e+02 2.784661e+00 1 23373\n 92 4.520075e+02 2.364286e+02 2.857701e+00 1 23832\n 93 2.230039e+02 2.364292e+02 2.881797e+00 1 23979\n 94 3.920006e+02 2.364297e+02 2.940407e+00 1 24330\n 95 3.444960e+02 2.364303e+02 3.000010e+00 1 24681\n 96 2.640124e+02 2.364308e+02 3.070162e+00 1 25020\n 97 1.140020e+02 2.364312e+02 3.086032e+00 1 25119\n 98 1.564964e+02 2.364312e+02 3.117775e+00 1 25314\n 99 4.130027e+02 2.364320e+02 3.171189e+00 1 25641\n 100 3.154952e+02 2.364323e+02 3.225700e+00 1 25968\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\n","category":"page"},{"location":"examples/Hydro_thermal/#Simulating-the-policy","page":"Hydro-thermal scheduling","title":"Simulating the policy","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"After training, we can simulate the policy using SDDP.simulate.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"sims = SDDP.simulate(model, 100, [:g_t])\nmu = round(mean([s[1][:g_t] for s in sims]), digits = 2)\nprintln(\"On average, $(mu) units of thermal are used in the first stage.\")","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"On average, 2.31 units of thermal are used in the first stage.\n","category":"page"},{"location":"examples/Hydro_thermal/#Extracting-the-water-values","page":"Hydro-thermal scheduling","title":"Extracting the water values","text":"","category":"section"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"Finally, we can use SDDP.ValueFunction and SDDP.evaluate to obtain and evaluate the value function at different points in the state-space. Note that since we are minimizing, the price has a negative sign: each additional unit of water leads to a decrease in the the expected long-run cost.","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"V = SDDP.ValueFunction(model[1])\ncost, price = SDDP.evaluate(V, x = 10)","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"(233.5568813464907, Dict(:x=>-0.66001))","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"","category":"page"},{"location":"examples/Hydro_thermal/","page":"Hydro-thermal scheduling","title":"Hydro-thermal scheduling","text":"This page was generated using Literate.jl.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/examples/hydro_valley.jl\"","category":"page"},{"location":"examples/hydro_valley/#Hydro-valleys","page":"Hydro valleys","title":"Hydro valleys","text":"","category":"section"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"This problem is a version of the hydro-thermal scheduling problem. The goal is to operate two hydro-dams in a valley chain over time in the face of inflow and price uncertainty.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"Turbine response curves are modelled by piecewise linear functions which map the flow rate into a power. These can be controlled by specifying the breakpoints in the piecewise linear function as the knots in the Turbine struct.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"The model can be created using the hydrovalleymodel function. It has a few keyword arguments to allow automated testing of the library. hasstagewiseinflows determines if the RHS noise constraint should be added. hasmarkovprice determines if the price uncertainty (modelled by a markov chain) should be added.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"In the third stage, the markov chain has some unreachable states to test some code-paths in the library.","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"We can also set the sense to :Min or :Max (the objective and bound are flipped appropriately).","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"using SDDP, GLPK, Test, Random\n\nstruct Turbine\n flowknots::Vector{Float64}\n powerknots::Vector{Float64}\nend\n\nstruct Reservoir\n min::Float64\n max::Float64\n initial::Float64\n turbine::Turbine\n spill_cost::Float64\n inflows::Vector{Float64}\nend\n\nfunction hydrovalleymodel(;\n hasstagewiseinflows::Bool = true,\n hasmarkovprice::Bool = true,\n sense::Symbol = :Max,\n)\n\n valley_chain = [\n Reservoir(0, 200, 200, Turbine([50, 60, 70], [55, 65, 70]), 1000, [0, 20, 50]),\n Reservoir(0, 200, 200, Turbine([50, 60, 70], [55, 65, 70]), 1000, [0, 0, 20]),\n ]\n\n turbine(i) = valley_chain[i].turbine\n\n # Prices[t, markov state]\n prices = [\n 1 2 0\n 2 1 0\n 3 4 0\n ]\n\n # Transition matrix\n if hasmarkovprice\n transition = Array{Float64,2}[[1.0]', [0.6 0.4], [0.6 0.4 0.0; 0.3 0.7 0.0]]\n else\n transition = [ones(Float64, (1, 1)) for t = 1:3]\n end\n\n flipobj = (sense == :Max) ? 1.0 : -1.0\n lower = (sense == :Max) ? -Inf : -1e6\n upper = (sense == :Max) ? 1e6 : Inf\n\n N = length(valley_chain)\n\n # Initialise SDDP Model\n m = SDDP.MarkovianPolicyGraph(\n sense = sense,\n lower_bound = lower,\n upper_bound = upper,\n transition_matrices = transition,\n optimizer = GLPK.Optimizer,\n ) do subproblem, node\n t, markov_state = node\n\n # ------------------------------------------------------------------\n # SDDP State Variables\n # Level of upper reservoir\n @variable(\n subproblem,\n valley_chain[r].min <= reservoir[r = 1:N] <= valley_chain[r].max,\n SDDP.State,\n initial_value = valley_chain[r].initial\n )\n\n # ------------------------------------------------------------------\n # Additional variables\n @variables(\n subproblem,\n begin\n outflow[r = 1:N] >= 0\n spill[r = 1:N] >= 0\n inflow[r = 1:N] >= 0\n generation_quantity >= 0 # Total quantity of water\n # Proportion of levels to dispatch on\n 0 <= dispatch[r = 1:N, level = 1:length(turbine(r).flowknots)] <= 1\n rainfall[i = 1:N]\n end\n )\n\n # ------------------------------------------------------------------\n # Constraints\n @constraints(\n subproblem,\n begin\n # flow from upper reservoir\n reservoir[1].out == reservoir[1].in + inflow[1] - outflow[1] - spill[1]\n\n # other flows\n flow[i = 2:N],\n reservoir[i].out ==\n reservoir[i].in + inflow[i] - outflow[i] - spill[i] +\n outflow[i-1] +\n spill[i-1]\n\n # Total quantity generated\n generation_quantity == sum(\n turbine(r).powerknots[level] * dispatch[r, level] for r = 1:N\n for level = 1:length(turbine(r).powerknots)\n )\n\n # ------------------------------------------------------------------\n # Flow out\n turbineflow[r = 1:N],\n outflow[r] == sum(\n turbine(r).flowknots[level] * dispatch[r, level]\n for level = 1:length(turbine(r).flowknots)\n )\n\n # Dispatch combination of levels\n dispatched[r = 1:N],\n sum(dispatch[r, level] for level = 1:length(turbine(r).flowknots)) <= 1\n end\n )\n\n # rainfall noises\n if hasstagewiseinflows && t > 1 # in future stages random inflows\n @constraint(subproblem, inflow_noise[i = 1:N], inflow[i] <= rainfall[i])\n\n SDDP.parameterize(\n subproblem,\n [\n (valley_chain[1].inflows[i], valley_chain[2].inflows[i])\n for i = 1:length(transition)\n ],\n ) do ω\n for i = 1:N\n JuMP.fix(rainfall[i], ω[i])\n end\n end\n else # in the first stage deterministic inflow\n @constraint(\n subproblem,\n initial_inflow_noise[i = 1:N],\n inflow[i] <= valley_chain[i].inflows[1]\n )\n end\n\n # ------------------------------------------------------------------\n # Objective Function\n if hasmarkovprice\n @stageobjective(\n subproblem,\n flipobj * (\n prices[t, markov_state] * generation_quantity -\n sum(valley_chain[i].spill_cost * spill[i] for i = 1:N)\n )\n )\n else\n @stageobjective(\n subproblem,\n flipobj * (\n prices[t, 1] * generation_quantity -\n sum(valley_chain[i].spill_cost * spill[i] for i = 1:N)\n )\n )\n end\n end\nend\n\nfunction test_hydro_valley_model()\n\n # For repeatability\n Random.seed!(11111)\n\n # deterministic\n deterministic_model =\n hydrovalleymodel(hasmarkovprice = false, hasstagewiseinflows = false)\n SDDP.train(\n deterministic_model,\n iteration_limit = 10,\n cut_deletion_minimum = 1,\n print_level = 0,\n )\n @test SDDP.calculate_bound(deterministic_model) ≈ 835.0 atol = 1e-3\n\n # stagewise inflows\n stagewise_model = hydrovalleymodel(hasmarkovprice = false)\n SDDP.train(stagewise_model, iteration_limit = 20, print_level = 0)\n @test SDDP.calculate_bound(stagewise_model) ≈ 838.33 atol = 1e-2\n\n # markov prices\n markov_model = hydrovalleymodel(hasstagewiseinflows = false)\n SDDP.train(markov_model, iteration_limit = 10, print_level = 0)\n @test SDDP.calculate_bound(markov_model) ≈ 851.8 atol = 1e-2\n\n # stagewise inflows and markov prices\n markov_stagewise_model =\n hydrovalleymodel(hasstagewiseinflows = true, hasmarkovprice = true)\n SDDP.train(markov_stagewise_model, iteration_limit = 10, print_level = 0)\n @test SDDP.calculate_bound(markov_stagewise_model) ≈ 855.0 atol = 1.0\n\n # risk averse stagewise inflows and markov prices\n riskaverse_model = hydrovalleymodel()\n SDDP.train(\n riskaverse_model,\n risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.66),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(riskaverse_model) ≈ 828.157 atol = 1.0\n\n # stagewise inflows and markov prices\n worst_case_model = hydrovalleymodel(sense = :Min)\n SDDP.train(\n worst_case_model,\n risk_measure = SDDP.EAVaR(lambda = 0.5, beta = 0.0),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(worst_case_model) ≈ -780.867 atol = 1.0\n\n # stagewise inflows and markov prices\n cutselection_model = hydrovalleymodel()\n SDDP.train(\n cutselection_model,\n iteration_limit = 10,\n print_level = 0,\n cut_deletion_minimum = 2,\n )\n @test SDDP.calculate_bound(cutselection_model) ≈ 855.0 atol = 1.0\n\n # Distributionally robust Optimization\n dro_model = hydrovalleymodel(hasmarkovprice = false)\n SDDP.train(\n dro_model,\n risk_measure = SDDP.ModifiedChiSquared(sqrt(2 / 3) - 1e-6),\n iteration_limit = 10,\n print_level = 0,\n )\n @test SDDP.calculate_bound(dro_model) ≈ 835.0 atol = 1.0\n\n dro_model = hydrovalleymodel(hasmarkovprice = false)\n SDDP.train(\n dro_model,\n risk_measure = SDDP.ModifiedChiSquared(1 / 6),\n iteration_limit = 20,\n print_level = 0,\n )\n @test SDDP.calculate_bound(dro_model) ≈ 836.695 atol = 1.0\n # (Note) radius ≈ sqrt(2/3), will set all noise probabilities to zero except the worst case noise\n # (Why?):\n # The distance from the uniform distribution (the assumed \"true\" distribution)\n # to a corner of a unit simplex is sqrt(S-1)/sqrt(S) if we have S scenarios. The corner\n # of a unit simplex is just a unit vector, i.e.: [0 ... 0 1 0 ... 0]. With this probability\n # vector, only one noise has a non-zero probablity.\n # In the worst case rhsnoise (0 inflows) the profit is:\n # Reservoir1: 70 * $3 + 70 * $2 + 65 * $1 +\n # Reservoir2: 70 * $3 + 70 * $2 + 70 * $1\n ### = $835\nend\n\ntest_hydro_valley_model()","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"Test Passed","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"","category":"page"},{"location":"examples/hydro_valley/","page":"Hydro valleys","title":"Hydro valleys","text":"This page was generated using Literate.jl.","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/#Add-noise-in-the-constraint-matrix","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"","category":"section"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"DocTestSetup = quote\n using SDDP, GLPK\nend","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"SDDP.jl supports coefficients in the constraint matrix through the JuMP.set_normalized_coefficient function.","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"julia> model = SDDP.LinearPolicyGraph(\n stages=3, lower_bound = 0, optimizer = GLPK.Optimizer\n ) do subproblem, t\n @variable(subproblem, x, SDDP.State, initial_value = 0.0)\n @constraint(subproblem, emissions, 1x.out <= 1)\n SDDP.parameterize(subproblem, [0.2, 0.5, 1.0]) do ω\n JuMP.set_normalized_coefficient(emissions, x.out, ω)\n println(emissions)\n end\n @stageobjective(subproblem, -x.out)\n end\nA policy graph with 3 nodes.\n Node indices: 1, 2, 3\n\njulia> SDDP.simulate(model, 1);\nemissions : x_out <= 1.0\nemissions : 0.2 x_out <= 1.0\nemissions : 0.5 x_out <= 1.0","category":"page"},{"location":"guides/add_noise_in_the_constraint_matrix/","page":"Add noise in the constraint matrix","title":"Add noise in the constraint matrix","text":"note: Note\nJuMP will normalize constraints by moving all variables to the left-hand side. Thus, @constraint(model, 0 <= 1 - x.out) becomes x.out <= 1. JuMP.set_normalized_coefficient sets the coefficient on the normalized constraint.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"EditURL = \"https://github.com/odow/SDDP.jl/blob/master/docs/src/tutorial/05_plotting.jl\"","category":"page"},{"location":"tutorial/05_plotting/#Basic-V:-plotting","page":"Basic V: plotting","title":"Basic V: plotting","text":"","category":"section"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"In our previous tutorials, we formulated, solved, and simulated multistage stochastic optimization problems. However, we haven't really investigated what the solution looks like. Luckily, SDDP.jl includes a number of plotting tools to help us do that. In this tutorial, we explain the tools and make some pretty pictures.","category":"page"},{"location":"tutorial/05_plotting/#Preliminaries","page":"Basic V: plotting","title":"Preliminaries","text":"","category":"section"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"The next two plot types help visualize the policy. Thus, we first need to create a policy and simulate some trajectories. So, let's take the model from Basic IV: Markov uncertainty, train it for 20 iterations, and then simulate 100 Monte Carlo realizations of the policy.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"using SDDP, GLPK\n\nΩ = [\n (inflow = 0.0, fuel_multiplier = 1.5),\n (inflow = 50.0, fuel_multiplier = 1.0),\n (inflow = 100.0, fuel_multiplier = 0.75)\n]\n\nmodel = SDDP.MarkovianPolicyGraph(\n transition_matrices = Array{Float64, 2}[\n [1.0]', [0.75 0.25], [0.75 0.25 ; 0.25 0.75]\n ],\n sense = :Min,\n lower_bound = 0.0,\n optimizer = GLPK.Optimizer\n) do subproblem, node\n t, markov_state = node\n @variable(subproblem, 0 <= volume <= 200, SDDP.State, initial_value = 200)\n @variables(subproblem, begin\n thermal_generation >= 0\n hydro_generation >= 0\n hydro_spill >= 0\n inflow\n end)\n @constraints(subproblem, begin\n volume.out == volume.in + inflow - hydro_generation - hydro_spill\n thermal_generation + hydro_generation == 150.0\n end)\n probability = markov_state == 1 ? [1/6, 1/3, 1/2] : [1/2, 1/3, 1/6]\n fuel_cost = [50.0, 100.0, 150.0]\n SDDP.parameterize(subproblem, Ω, probability) do ω\n JuMP.fix(inflow, ω.inflow)\n @stageobjective(subproblem,\n ω.fuel_multiplier * fuel_cost[t] * thermal_generation\n )\n end\nend\n\nSDDP.train(model, iteration_limit = 20, run_numerical_stability_report = false)\n\nsimulations = SDDP.simulate(\n model,\n 100,\n [:volume, :thermal_generation, :hydro_generation, :hydro_spill])\n\nprintln(\"Completed $(length(simulations)) simulations.\")","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"------------------------------------------------------------------------------\n SDDP.jl (c) Oscar Dowson, 2017-21\n\nProblem\n Nodes : 5\n State variables : 1\n Scenarios : 1.08000e+02\n Solver : serial mode\n\n Iteration Simulation Bound Time (s) Proc. ID # Solves\n 1 2.250000e+04 5.588397e+03 1.022887e-02 1 18\n 2 1.315909e+04 7.975336e+03 1.137590e-02 1 36\n 3 1.875000e+03 7.975336e+03 1.241302e-02 1 54\n 4 1.125000e+04 7.975336e+03 1.343608e-02 1 72\n 5 1.875000e+03 8.072917e+03 1.464987e-02 1 90\n 6 1.875000e+03 8.072917e+03 1.565003e-02 1 108\n 7 1.875000e+03 8.072917e+03 1.696491e-02 1 126\n 8 2.750000e+04 8.072917e+03 1.815796e-02 1 144\n 9 5.000000e+03 8.072917e+03 1.919103e-02 1 162\n 10 5.000000e+03 8.072917e+03 2.019596e-02 1 180\n 11 1.875000e+03 8.072917e+03 2.125287e-02 1 198\n 12 1.625000e+04 8.072917e+03 2.237105e-02 1 216\n 13 1.875000e+03 8.072917e+03 2.375102e-02 1 234\n 14 5.000000e+03 8.072917e+03 2.485800e-02 1 252\n 15 1.875000e+03 8.072917e+03 2.596498e-02 1 270\n 16 1.125000e+04 8.072917e+03 2.710485e-02 1 288\n 17 1.875000e+03 8.072917e+03 2.818799e-02 1 306\n 18 1.125000e+04 8.072917e+03 2.935600e-02 1 324\n 19 2.437500e+04 8.072917e+03 3.067398e-02 1 342\n 20 1.875000e+03 8.072917e+03 3.188586e-02 1 360\n\nTerminating training with status: iteration_limit\n------------------------------------------------------------------------------\nCompleted 100 simulations.\n","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"Great! Now we have some data in simulations to visualize.","category":"page"},{"location":"tutorial/05_plotting/#Spaghetti-plots","page":"Basic V: plotting","title":"Spaghetti plots","text":"","category":"section"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"The first plotting utility we discuss is a spaghetti plot (you'll understand the name when you see the graph).","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"To create a spaghetti plot, begin by creating a new SDDP.SpaghettiPlot instance as follows:","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"plt = SDDP.SpaghettiPlot(simulations)","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"A spaghetti plot with 100 scenarios and 3 stages.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"We can add plots to plt using the SDDP.add_spaghetti function.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"SDDP.add_spaghetti(plt; title = \"Reservoir volume\") do data\n return data[:volume].out\nend","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"You don't have just return values from the simulation, you can compute things too.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"SDDP.add_spaghetti(\n plt; title = \"Fuel cost\", ymin = 0, ymax = 250\n) do data\n if data[:thermal_generation] > 0\n return data[:stage_objective] / data[:thermal_generation]\n else # No thermal generation, so return 0.0.\n return 0.0\n end\nend","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"Note that there are many keyword arguments in addition to title. For example, we fixed the minimum and maximum values of the y-axis using ymin and ymax. See the SDDP.add_spaghetti documentation for all the arguments.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"Having built the plot, we now need to display it using SDDP.plot.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"SDDP.plot(plt, \"spaghetti_plot.html\")","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"This should open a webpage that looks like this one.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"Using the mouse, you can highlight individual trajectories by hovering over them. This makes it possible to visualize a single trajectory across multiple dimensions.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"If you click on the plot, then trajectories that are close to the mouse pointer are shown darker and those further away are shown lighter.","category":"page"},{"location":"tutorial/05_plotting/#Publication-plots","page":"Basic V: plotting","title":"Publication plots","text":"","category":"section"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"Instead of the interactive Javascript plots, you can also create some publication ready plots using the SDDP.publication_plot function.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"info: Info\nYou need to install the Plots.jl package for this to work. We used the GR backend (gr()), but any Plots.jl backend should work.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"SDDP.publication_plot implements a plot recipe to create ribbon plots of each variable against the stages. The first argument is the vector of simulation dictionaries and the second argument is the dictionary key that you want to plot. Standard Plots.jl keyword arguments such as title and xlabel can be used to modify the look of each plot. By default, the plot displays ribbons of the 0-100, 10-90, and 25-75 percentiles. The dark, solid line in the middle is the median (i.e. 50'th percentile).","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"using Plots\nplot(\n SDDP.publication_plot(simulations, title = \"Outgoing volume\") do data\n return data[:volume].out\n end,\n SDDP.publication_plot(simulations, title = \"Thermal generation\") do data\n return data[:thermal_generation]\n end,\n xlabel = \"Stage\",\n ylims = (0, 200),\n layout = (1, 2),\n margin_bottom = 5,\n size = (1000, 300)\n)","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"This should open a plot window with a plot that looks like:","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"(Image: publication plot)","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"You can save this plot as a PDF using the Plots.jl function savefig:","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"Plots.savefig(\"my_picture.pdf\")","category":"page"},{"location":"tutorial/05_plotting/#Plotting-the-value-function","page":"Basic V: plotting","title":"Plotting the value function","text":"","category":"section"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"You can obtain an object representing the value function of a node using SDDP.ValueFunction.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"V = SDDP.ValueFunction(model[(1, 1)])","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"A value function for node (1, 1)","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"The value function can be evaluated using SDDP.evaluate.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"SDDP.evaluate(V; volume = 1)","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"(21423.813388991963, Dict(:volume=>-102.083))","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"evaluate returns the heigh of the value function, and a subgradient with respect to the convex state variables.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"You can also plot the value function using SDDP.plot","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"SDDP.plot(V, volume = 0:200, filename = \"value_function.html\")","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"This should open a webpage that looks like this one.","category":"page"},{"location":"tutorial/05_plotting/#Convergence-dashboard","page":"Basic V: plotting","title":"Convergence dashboard","text":"","category":"section"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"If the text-based logging isn't to your liking, you can open a visualization of the training by passing dashboard = true to SDDP.train.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"SDDP.train(model; dashboard = true)","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"By default, dashboard = false because there is an initial overhead associated with opening and preparing the plot.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"warning: Warning\nThe dashboard is experimental. There are known bugs associated with it, e.g., SDDP.jl#226.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"This concludes our fifth tutorial for SDDP.jl. In our next tutorial, Basic VI: words of warning we discuss some of the issues that you should be aware of when creating your own models.","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"","category":"page"},{"location":"tutorial/05_plotting/","page":"Basic V: plotting","title":"Basic V: plotting","text":"This page was generated using Literate.jl.","category":"page"}] -} +var documenterSearchIndex = {"docs": []} +