Skip to content

Commit

Permalink
data and example cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
harshangrjn committed Dec 8, 2021
1 parent ddffd72 commit b0c705a
Show file tree
Hide file tree
Showing 10 changed files with 73 additions and 132 deletions.
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@ LaplacianOpt.jl Change Log
### v0.1.6
- Fixed Tikzgraph issue for tests
- Transitioned from LightGraphs to Graphs
- Updated and cleaned up `examples/solver.jl`
- `src/data.jl` cleanup for handling `"optimizer"`
- Minor docs update

### v0.1.5
Expand Down
4 changes: 2 additions & 2 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ TikzPictures = "37f6aa50-8035-52d0-81c2-5a1d08754b2d"

[compat]
DataStructures = "~0.17, ~0.18"
JSON = "~0.21"
JuMP = "^0.21.0"
Graphs = "~1.4"
JSON = "~0.21"
JuMP = "^0.21"
MathOptInterface = "~0.9, 0.10"
Memento = "~1.0, ~1.1, 1"
TikzGraphs = "~1.1, 1"
Expand Down
12 changes: 7 additions & 5 deletions docs/src/quickguide.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,18 +7,20 @@ After the installation of LaplacianOpt and CPLEX (use GLPK if open-source is pre
```julia
import LaplacianOpt as LOpt
using JuMP
using CPLEX
using Gurobi

params = Dict{String, Any}(
"num_nodes" => 5,
"instance" => 1,
"optimizer" => "cplex"
"instance" => 1
)

lom_optimizer = JuMP.optimizer_with_attributes(CPLEX.Optimizer)
results = LOpt.run_LOpt_model(params, lom_optimizer)
lopt_optimizer = JuMP.optimizer_with_attributes(Gurobi.Optimizer, "presolve" => 1)
results = LOpt.run_LOpt_model(params, lopt_optimizer)
```

!!! tip
Run times of [LaplacianOpt](https://github.com/harshangrjn/LaplacianOpt.jl)'s mathematical optimization models are significantly faster using [Gurobi](https://www.gurobi.com) as the underlying mixed-integer programming (MIP) solver. Note that this solver's individual-usage license is available [free](https://www.gurobi.com/academia/academic-program-and-licenses/) for academic purposes.

# Extracting results
The run commands (for example, `run_LOpt_model`) in LaplacianOpt return detailed results in the form of a dictionary. This dictionary can be used for further processing of the results. For example, for the given instance of a complete graph, the algorithm's runtime and the optimal objective value (maximum algebraic connectivity) can be accessed with,

Expand Down
1 change: 0 additions & 1 deletion examples/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,5 +7,4 @@ LaplacianOpt = "bb20392f-64fb-4001-92e8-14b3aedd5a9e"
Revise = "295af30f-e4ad-537b-8983-00126c2a3abe"

[compat]
JuMP = "^0.21.0"
julia = "1"
10 changes: 5 additions & 5 deletions examples/ex.jl
Original file line number Diff line number Diff line change
Expand Up @@ -7,23 +7,23 @@ using Gurobi
include("solver.jl")

result_all = zeros(10,3)
for i=1:10
lopt_optimizer = get_gurobi()

for i=1:1
#-------------------------------#
# User-defined inputs #
#-------------------------------#
params = Dict{String, Any}(
"num_nodes" => 8,
"instance" => i,
"eigen_cuts_full" => true,
"soc_linearized_cuts" => true,
"optimizer" => "gurobi"
"soc_linearized_cuts" => true
)

#-------------------------------------------#
# Optimization model and solution #
#-------------------------------------------#
lom_optimizer = get_solver(params)
result = LOpt.run_LOpt_model(params, lom_optimizer)
result = LOpt.run_LOpt_model(params, lopt_optimizer)

result_all[i,1] = i
result_all[i,2] = result["objective"]
Expand Down
136 changes: 42 additions & 94 deletions examples/solver.jl
Original file line number Diff line number Diff line change
@@ -1,106 +1,54 @@
"""
get_solver(params)
This function returns the JuMP optimizer with its appropriate attributes, based on
user-defined inputs in `params` dictionary.
"""
function get_solver(params::Dict{String,Any})
optimizers_list = ["cplex", "cbc", "ipopt", "juniper", "alpine", "glpk", "gurobi"]

# Optimizer
if !("optimizer" in keys(params))
error("Input a valid MIP optimizer")
end

if !(params["optimizer"] in optimizers_list)
error("Specified optimizer does not belong in the pre-defined list. Add your optimizer separately with it's attributes")
end

if "presolve" in keys(params)
presolve = params["presolve"]
else
# default value
presolve = true
end

if "optimizer_log" in keys(params)
optimizer_log = params["optimizer_log"]
else
# default value
optimizer_log = true
end
#=====================================#
# MIP solvers (commercial, but fast) #
#=====================================#

function get_gurobi()
return JuMP.optimizer_with_attributes(Gurobi.Optimizer,
MOI.Silent() => false,
# "MIPFocus" => 3, # Focus on optimality over feasibility
"Presolve" => 1)
end

# Mixed-integer programming optimizers
if params["optimizer"] == "cplex" # commercial
return get_cplex(presolve, optimizer_log)

elseif params["optimizer"] == "gurobi" # commercial
return get_gurobi(presolve, optimizer_log)
function get_cplex()
return JuMP.optimizer_with_attributes(CPLEX.Optimizer,
MOI.Silent() => false,
# "CPX_PARAM_EPGAP" => 1E-4,
# "CPX_PARAM_MIPEMPHASIS" => 2 # Focus on optimality over feasibility
"CPX_PARAM_PREIND" => 1)
end

elseif params["optimizer"] == "cbc" # open-source
return get_cbc(presolve, optimizer_log)
#======================================#
# MIP solvers (open-source, but slow) #
#======================================#

elseif params["optimizer"] == "glpk" # open-source
return get_glpk(presolve, optimizer_log)

# Local mixed-integer nonlinear programming optimizers
elseif params["optimizer"] == "ipopt" # open-source
return get_ipopt(presolve, optimizer_log)

elseif params["optimizer"] == "juniper" # open-source
return get_juniper(presolve, optimizer_log)

# Global NLP/MINLP optimizer
elseif params["optimizer"] == "alpine" # open-source
alpine = JuMP.optimizer_with_attributes(Alpine.Optimizer,
"nlp_solver" => get_ipopt(presolve, optimizer_log),
"minlp_solver" => get_juniper(presolve, optimizer_log),
"mip_solver" => get_cplex(presolve, optimizer_log),
"presolve_bt" => false,
"presolve_max_iter" => 10,
"presolve_bp" => false,
"disc_ratio" => 10)
return alpine
end
function get_cbc()
return JuMP.optimizer_with_attributes(Cbc.Optimizer,
MOI.Silent() => false)
end

function get_cplex(presolve::Bool, optimizer_log::Bool)
cplex = JuMP.optimizer_with_attributes(CPLEX.Optimizer,
MOI.Silent() => !optimizer_log,
"CPX_PARAM_PREIND" => presolve)
return cplex
function get_glpk()
return JuMP.optimizer_with_attributes(GLPK.Optimizer,
MOI.Silent() => false)
end

function get_gurobi(presolve::Bool, optimizer_log::Bool)
gurobi = JuMP.optimizer_with_attributes(Gurobi.Optimizer,
MOI.Silent() => !optimizer_log,
"Presolve" => presolve)
return gurobi
end
#========================================================#
# Continuous nonlinear programming solver (open-source) #
#========================================================#

function get_ipopt(presolve::Bool, optimizer_log::Bool)
ipopt = JuMP.optimizer_with_attributes(Ipopt.Optimizer,
MOI.Silent() => !optimizer_log,
function get_ipopt()
return JuMP.optimizer_with_attributes(Ipopt.Optimizer,
MOI.Silent() => true,
"sb" => "yes",
"max_iter" => 1E4)
return ipopt
"max_iter" => Int(1E4))
end

function get_cbc(presolve::Bool, optimizer_log::Bool)
cbc = JuMP.optimizer_with_attributes(Cbc.Optimizer,
MOI.Silent() => !optimizer_log)
return cbc
end

function get_juniper(presolve::Bool, optimizer_log::Bool)
juniper = JuMP.optimizer_with_attributes(Juniper.Optimizer,
MOI.Silent() => !optimizer_log,
"mip_solver" => get_cplex(presolve, optimizer_log),
"nl_solver" => get_ipopt(presolve, optimizer_log))
return juniper
end

function get_glpk(presolve::Bool, optimizer_log::Bool)
glpk = JuMP.optimizer_with_attributes(GLPK.Optimizer,
MOI.Silent() => !optimizer_log)
return glpk
end
#=================================================================#
# Local mixed-integer nonlinear programming solver (open-source) #
#=================================================================#
function get_juniper()
return JuMP.optimizer_with_attributes(Juniper.Optimizer,
MOI.Silent() => false,
"mip_solver" => get_gurobi(),
"nl_solver" => get_ipopt())
end
10 changes: 5 additions & 5 deletions src/constraints.jl
Original file line number Diff line number Diff line change
Expand Up @@ -45,13 +45,13 @@ function constraint_lazycallback_wrapper(lom::LaplacianOptModel)

status = JuMP.callback_node_status(cb_cuts, lom.model)

if status == MOI.CALLBACK_NODE_STATUS_FRACTIONAL
# if status == MOI.CALLBACK_NODE_STATUS_FRACTIONAL

if lom.data["optimizer"] != "glpk"
Memento.info(_LOGGER, "Callback status: solution is fractional")
end
# if lom.data["optimizer"] != "glpk"
# Memento.info(_LOGGER, "Callback status: solution is fractional")
# end

elseif status == MOI.CALLBACK_NODE_STATUS_UNKNOWN
if status == MOI.CALLBACK_NODE_STATUS_UNKNOWN
Memento.error(_LOGGER, "Callback status: unknown - solution may not be integer feasible")

else status == MOI.CALLBACK_NODE_STATUS_INTEGER
Expand Down
13 changes: 5 additions & 8 deletions src/data.jl
Original file line number Diff line number Diff line change
Expand Up @@ -31,13 +31,6 @@ function get_data(params::Dict{String, Any})
solution_type = "exact"
end

# Optimizer
if "optimizer" in keys(params)
optimizer = params["optimizer"]
else
Memento.error(_LOGGER, "Input a valid MIP optimizer")
end

# Relax Integrality
if "relax_integrality" in keys(params)
relax_integrality = params["relax_integrality"]
Expand Down Expand Up @@ -123,9 +116,13 @@ function get_data(params::Dict{String, Any})
"lazycuts_logging" => lazycuts_logging,
"topology_flow_cuts" => topology_flow_cuts,
"lazy_callback_status" => lazy_callback_status,
"optimizer" => optimizer,
"relax_integrality" => relax_integrality)

# Optimizer
if "optimizer" in keys(params)
data["optimizer"] = params["optimizer"]
end

return data
end

Expand Down
13 changes: 4 additions & 9 deletions test/lo_model_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,6 @@
"data_type" => "old",

"solution_type" => "exact",
"optimizer" => "glpk",
"presolve" => true,
"optimizer_log" => true,
"relax_integrality" => false
Expand Down Expand Up @@ -55,8 +54,7 @@ end

params = Dict{String, Any}(
"num_nodes" => 5,
"instance" => 1,
"optimizer" => "glpk"
"instance" => 1
)

result_mst = LaplacianOpt.run_MaxSpanTree_model(params, glpk_optimizer)
Expand Down Expand Up @@ -85,8 +83,7 @@ end
"instance" => 5,
"data_type" => "old",
"eigen_cuts_full" => false,
"topology_flow_cuts" => true,
"optimizer" => "glpk"
"topology_flow_cuts" => true
)

result_mst = LaplacianOpt.run_MaxSpanTree_model(params, glpk_optimizer, lazy_callback=true)
Expand All @@ -112,8 +109,7 @@ end
"instance" => 5,
"eigen_cuts_full" => false,
"soc_linearized_cuts" => true,
"topology_flow_cuts" => true,
"optimizer" => "glpk"
"topology_flow_cuts" => true
)

result_1 = LaplacianOpt.run_LOpt_model(params_1, glpk_optimizer)
Expand All @@ -131,8 +127,7 @@ end
"num_nodes" => 5,
"instance" => 5,
"eigen_cuts_full" => true,
"soc_linearized_cuts" => true,
"optimizer" => "glpk"
"soc_linearized_cuts" => true
)

result_2 = LaplacianOpt.run_LOpt_model(params_2, glpk_optimizer)
Expand Down
4 changes: 1 addition & 3 deletions test/log_tests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,16 @@
params = Dict{String, Any}(
"num_nodes" => 5,
"instance" => 1,
"optimizer" => "glpk",
"tol_zero" => 1E-4,
"tol_psd" => 1E-3,
"eigen_cuts_full" => true,
"topology_flow_cuts" => true,
"lazycuts_logging" => true
)

lom_optimizer = get_solver(params)
data = LOpt.get_data(params)
model_lopt = LOpt.build_LOModel(data)
result_lopt = LOpt.optimize_LOModel!(model_lopt, optimizer = lom_optimizer)
result_lopt = LOpt.optimize_LOModel!(model_lopt, optimizer = glpk_optimizer)

LOpt.visualize_solution(result_lopt, data, visualizing_tool = "tikz", plot_file_format = "tex")
LOpt.visualize_solution(result_lopt, data, visualizing_tool = "tikz", plot_file_format = "tex", display_edge_weights=true)
Expand Down

0 comments on commit b0c705a

Please sign in to comment.