Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix cut sharing in a graph with zero-probability arcs #797

Open
wants to merge 9 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 18 additions & 1 deletion src/algorithm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -693,6 +693,11 @@ function backward_pass(
for other_index in options.similar_children[node_index]
copied_probability = similar(items.probability)
other_node = model[other_index]
# Need to check that every child of other_node is in this
# list
other_children = Set(c.term for c in other_node.children)
# The order of setdiff(A, B) matters.
@assert isempty(setdiff(other_children, Set(items.nodes)))
for (idx, child_index) in enumerate(items.nodes)
copied_probability[idx] =
get(options.Φ, (other_index, child_index), 0.0) *
Expand Down Expand Up @@ -754,7 +759,16 @@ function solve_all_children(
) where {T}
length_scenario_path = length(scenario_path)
for child in node.children
if isapprox(child.probability, 0.0; atol = 1e-6)
# We _do_ want to solve zero probability nodes, because they might allow
# us to cut share between similar nodes of a Markovian policy graph. If
# the user put them in, assume that they're there for a reason.
#
# If we have a belief state, then skip the node. I don't really know
# why, but tests failed when I tried to remove this.
#
# See SDDP.jl#796 and SDDP.jl#797 for more discussion.
if belief_state !== nothing &&
isapprox(child.probability, 0.0; atol = 1e-6)
continue
end
child_node = model[child.term]
Expand Down Expand Up @@ -865,6 +879,9 @@ function calculate_bound(
current_belief = initialize_belief(model)
# Solve all problems that are children of the root node.
for child in model.root_children
# It's okay to skip nodes with zero probability.
#
# See SDDP.jl#796 and SDDP.jl#797 for more discussion.
if isapprox(child.probability, 0.0; atol = 1e-6)
continue
end
Expand Down
12 changes: 2 additions & 10 deletions src/user_interface.jl
Original file line number Diff line number Diff line change
Expand Up @@ -478,16 +478,8 @@ function MarkovianGraph(transition_matrices::Vector{Matrix{Float64}})
for markov_state in 1:size(transition, 2)
for last_markov_state in 1:size(transition, 1)
probability = transition[last_markov_state, markov_state]
if 0.0 < probability <= 1.0
Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

So @adow031: should we add zero-probability arcs for Markov transition matrices or keep the graph sparse?

push!(
edges,
(
(stage - 1, last_markov_state) =>
(stage, markov_state),
probability,
),
)
end
edge = (stage - 1, last_markov_state) => (stage, markov_state)
push!(edges, (edge, probability))
end
end
end
Expand Down
58 changes: 58 additions & 0 deletions test/algorithm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -387,6 +387,64 @@ function test_numerical_difficulty_callback()
return
end

function test_Markovian_zero_edges()
function build_model()
return SDDP.MarkovianPolicyGraph(;
transition_matrices = Matrix{Float64}[
reshape([1.0], 1, 1),
[0.5 0.5],
[1.0 0.0; 0.0 1.0],
],
optimizer = HiGHS.Optimizer,
sense = :Min,
lower_bound = 0.0,
) do sp, node
t, i = node
@variable(sp, 0 <= x <= 100, SDDP.State, initial_value = 0)
@variable(sp, 0 <= u_p <= 200)
@variable(sp, u_o >= 0)
@constraint(sp, x.out == x.in + u_p + u_o - (200 * i - 100))
@stageobjective(sp, (100 + i) * u_p + 300 * u_o + 50 * x.out)
return
end
end
# refine_at_similar_nodes = true
model = build_model()
sp_21, sp_22 = model[(2, 1)].subproblem, model[(2, 2)].subproblem
n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false)
n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false)
@test n_21 == n_22 == 1
SDDP.train(model; iteration_limit = 1, print_level = 0)
n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false)
n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false)
@test n_21 == n_22 == 2
SDDP.train(model; print_level = 0, add_to_existing_cuts = true)
@test isapprox(SDDP.calculate_bound(model), 6.565e4; atol = 1e-4)
# refine_at_similar_nodes = false
model = build_model()
sp_21, sp_22 = model[(2, 1)].subproblem, model[(2, 2)].subproblem
n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false)
n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false)
@test n_21 == n_22 == 1
SDDP.train(
model;
iteration_limit = 1,
print_level = 0,
refine_at_similar_nodes = false,
)
n_21 = num_constraints(sp_21; count_variable_in_set_constraints = false)
n_22 = num_constraints(sp_22; count_variable_in_set_constraints = false)
@test 1 <= n_21 <= 2 && 1 <= n_22 <= 2 && n_21 != n_22
SDDP.train(
model;
print_level = 0,
add_to_existing_cuts = true,
refine_at_similar_nodes = false,
)
@test isapprox(SDDP.calculate_bound(model), 6.565e4; atol = 1e-4)
return
end

function test_root_node_risk_measure()
model = SDDP.LinearPolicyGraph(;
stages = 3,
Expand Down
1 change: 0 additions & 1 deletion test/visualization/value_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -136,7 +136,6 @@ function test_ValueFunction_belief_state()
b = Dict((1, 1) => 0.8, (1, 2) => 0.2)
(y, duals) = SDDP.evaluate(V11, Dict(:x => 1.0); belief_state = b)
@test duals[:x] ≈ y ≈ 1.68

V12 = SDDP.ValueFunction(model[(1, 2)])
(y, duals) = SDDP.evaluate(V12, Dict(:x => 1.0); belief_state = b)
@test duals[:x] ≈ y ≈ 1.68
Expand Down
Loading