diff --git a/src/algorithm.jl b/src/algorithm.jl index 295346754..f10092b2e 100644 --- a/src/algorithm.jl +++ b/src/algorithm.jl @@ -693,6 +693,11 @@ function backward_pass( for other_index in options.similar_children[node_index] copied_probability = similar(items.probability) other_node = model[other_index] + # Need to check that every child of other_node is in this + # list + other_children = Set(c.term for c in other_node.children) + # The order of setdiff(A, B) matters. + @assert isempty(setdiff(other_children, Set(items.nodes))) for (idx, child_index) in enumerate(items.nodes) copied_probability[idx] = get(options.Φ, (other_index, child_index), 0.0) * @@ -754,7 +759,16 @@ function solve_all_children( ) where {T} length_scenario_path = length(scenario_path) for child in node.children - if isapprox(child.probability, 0.0; atol = 1e-6) + # We _do_ want to solve zero probability nodes, because they might allow + # us to cut share between similar nodes of a Markovian policy graph. If + # the user put them in, assume that they're there for a reason. + # + # If we have a belief state, then skip the node. I don't really know + # why, but tests failed when I tried to remove this. + # + # See SDDP.jl#796 and SDDP.jl#797 for more discussion. + if belief_state !== nothing && + isapprox(child.probability, 0.0; atol = 1e-6) continue end child_node = model[child.term] @@ -865,6 +879,9 @@ function calculate_bound( current_belief = initialize_belief(model) # Solve all problems that are children of the root node. for child in model.root_children + # It's okay to skip nodes with zero probability. + # + # See SDDP.jl#796 and SDDP.jl#797 for more discussion. if isapprox(child.probability, 0.0; atol = 1e-6) continue end diff --git a/src/user_interface.jl b/src/user_interface.jl index 8070c75ce..645c7e0e6 100644 --- a/src/user_interface.jl +++ b/src/user_interface.jl @@ -478,16 +478,8 @@ function MarkovianGraph(transition_matrices::Vector{Matrix{Float64}}) for markov_state in 1:size(transition, 2) for last_markov_state in 1:size(transition, 1) probability = transition[last_markov_state, markov_state] - if 0.0 < probability <= 1.0 - push!( - edges, - ( - (stage - 1, last_markov_state) => - (stage, markov_state), - probability, - ), - ) - end + edge = (stage - 1, last_markov_state) => (stage, markov_state) + push!(edges, (edge, probability)) end end end diff --git a/test/algorithm.jl b/test/algorithm.jl index 44123557f..c878f7eb0 100644 --- a/test/algorithm.jl +++ b/test/algorithm.jl @@ -387,6 +387,64 @@ function test_numerical_difficulty_callback() return end +function test_Markovian_zero_edges() + function build_model() + return SDDP.MarkovianPolicyGraph(; + transition_matrices = Matrix{Float64}[ + reshape([1.0], 1, 1), + [0.5 0.5], + [1.0 0.0; 0.0 1.0], + ], + optimizer = HiGHS.Optimizer, + sense = :Min, + lower_bound = 0.0, + ) do sp, node + t, i = node + @variable(sp, 0 <= x <= 100, SDDP.State, initial_value = 0) + @variable(sp, 0 <= u_p <= 200) + @variable(sp, u_o >= 0) + @constraint(sp, x.out == x.in + u_p + u_o - (200 * i - 100)) + @stageobjective(sp, (100 + i) * u_p + 300 * u_o + 50 * x.out) + return + end + end + # refine_at_similar_nodes = true + model = build_model() + sp_21, sp_22 = model[(2, 1)].subproblem, model[(2, 2)].subproblem + n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false) + n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false) + @test n_21 == n_22 == 1 + SDDP.train(model; iteration_limit = 1, print_level = 0) + n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false) + n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false) + @test n_21 == n_22 == 2 + SDDP.train(model; print_level = 0, add_to_existing_cuts = true) + @test isapprox(SDDP.calculate_bound(model), 6.565e4; atol = 1e-4) + # refine_at_similar_nodes = false + model = build_model() + sp_21, sp_22 = model[(2, 1)].subproblem, model[(2, 2)].subproblem + n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false) + n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false) + @test n_21 == n_22 == 1 + SDDP.train( + model; + iteration_limit = 1, + print_level = 0, + refine_at_similar_nodes = false, + ) + n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false) + n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false) + @test 1 <= n_21 <= 2 && 1 <= n_22 <= 2 && n_21 != n_22 + SDDP.train( + model; + print_level = 0, + add_to_existing_cuts = true, + refine_at_similar_nodes = false, + ) + @test isapprox(SDDP.calculate_bound(model), 6.565e4; atol = 1e-4) + return +end + function test_root_node_risk_measure() model = SDDP.LinearPolicyGraph(; stages = 3, diff --git a/test/visualization/value_functions.jl b/test/visualization/value_functions.jl index 30eb6b153..0ee8e6102 100644 --- a/test/visualization/value_functions.jl +++ b/test/visualization/value_functions.jl @@ -136,7 +136,6 @@ function test_ValueFunction_belief_state() b = Dict((1, 1) => 0.8, (1, 2) => 0.2) (y, duals) = SDDP.evaluate(V11, Dict(:x => 1.0); belief_state = b) @test duals[:x] ≈ y ≈ 1.68 - V12 = SDDP.ValueFunction(model[(1, 2)]) (y, duals) = SDDP.evaluate(V12, Dict(:x => 1.0); belief_state = b) @test duals[:x] ≈ y ≈ 1.68