diff --git a/examples/uc/eagle/3scen_nofw.sh b/examples/uc/eagle/3scen_nofw.sh new file mode 100644 index 000000000..f1e536ffb --- /dev/null +++ b/examples/uc/eagle/3scen_nofw.sh @@ -0,0 +1,20 @@ +#!/bin/bash +#SBATCH --nodes=2 # Number of nodes +#SBATCH --ntasks=9 # Request 9 CPU cores +#SBATCH --time=00:10:00 # Job should run for up to 5 minutes +#SBATCH --account=msoc # Where to charge NREL Hours +#SBATCH --mail-user=Bernard.Knueven@nrel.gov # If you want email notifications +#SBATCH --mail-type=BEGIN,END,FAIL # When you want email notifications +#SBATCH --output=3scen_nofw.%j.out # %j will be replaced with the job ID + +module load conda +module load xpressmp +module load openmpi/4.1.0/gcc-8.4.0 + +conda activate mpi-sppy + +export OMPI_MCA_btl=self,tcp + +cd ${HOME}/software/mpi-sppy/examples/uc + +srun python -u -m mpi4py uc_cylinders.py --bundles-per-rank=0 --max-iterations=5 --default-rho=1.0 --num-scens=3 --max-solver-threads=2 --solver-name=xpress_persistent --rel-gap=0.00001 --abs-gap=1 --lagrangian-iter0-mipgap=1e-7 --lagrangian --xhatshuffle --ph-mipgaps-json=phmipgaps.json diff --git a/examples/uc/eagle/50scen_nofw.sh b/examples/uc/eagle/50scen_nofw.sh new file mode 100644 index 000000000..7181a2874 --- /dev/null +++ b/examples/uc/eagle/50scen_nofw.sh @@ -0,0 +1,20 @@ +#!/bin/bash +#SBATCH --nodes=9 # Number of nodes +#SBATCH --ntasks=150 # Request 150 CPU cores +#SBATCH --time=00:10:00 # Job should run for up to 5 minutes +#SBATCH --account=msoc # Where to charge NREL Hours +#SBATCH --mail-user=Bernard.Knueven@nrel.gov # If you want email notifications +#SBATCH --mail-type=BEGIN,END,FAIL # When you want email notifications +#SBATCH --output=50scen_nofw.%j.out # %j will be replaced with the job ID + +module load conda +module load xpressmp +module load openmpi/4.1.0/gcc-8.4.0 + +conda activate mpi-sppy + +export OMPI_MCA_btl=self,tcp + +cd ${HOME}/software/mpi-sppy/examples/uc + +srun python -u -m mpi4py uc_cylinders.py --bundles-per-rank=0 --max-iterations=5 --default-rho=1.0 --num-scens=50 --max-solver-threads=2 --solver-name=xpress_persistent --rel-gap=0.00001 --abs-gap=1 --lagrangian-iter0-mipgap=1e-7 --lagrangian --xhatshuffle --ph-mipgaps-json=phmipgaps.json diff --git a/examples/uc/phmipgaps.json b/examples/uc/phmipgaps.json index 4d4a1fd2d..92574b2e1 100644 --- a/examples/uc/phmipgaps.json +++ b/examples/uc/phmipgaps.json @@ -2,6 +2,5 @@ "0": 0.02, "1": 0.02, "2": 0.01, - "10": 0.001, - "20": 0.0001 + "10": 0.001 } diff --git a/examples/uc/ptdf_ext.py b/examples/uc/ptdf_ext.py new file mode 100644 index 000000000..1590ec147 --- /dev/null +++ b/examples/uc/ptdf_ext.py @@ -0,0 +1,273 @@ +# Copyright 2020 by B. Knueven, D. Mildebrath, C. Muir, J-P Watson, and D.L. Woodruff +# This software is distributed under the 3-clause BSD License. + +import pyomo.environ as pyo +from pyomo.common.collections import ComponentMap, ComponentSet +from mpisppy.extensions.extension import Extension +from mpisppy.utils.sputils import is_persistent + +import egret.common.lazy_ptdf_utils as lpu +from egret.models.unit_commitment import (_lazy_ptdf_check_violations, + _lazy_ptdf_log_terminate_on_violations, + _lazy_ptdf_warmstart_copy_violations, + _lazy_ptdf_solve, + _lazy_ptdf_normal_terminatation, + _lazy_ptdf_violation_adder, + ) +import logging +from egret.common.log import logger + +logger.setLevel(logging.ERROR) + +_egret_ptdf_options = ('rel_ptdf_tol', 'abs_ptdf_tol', 'abs_flow_tol', 'rel_flow_tol', + 'lazy_rel_flow_tol', 'max_violations_per_iteration', 'lazy', + 'branch_kv_threshold', 'kv_threshold_type', 'active_flow_tol',) + +class PTDFExtension(Extension): + ''' Abstract base class for extensions to general SPBase objects. + + Args: + ph (PHBase): The PHBase object for the current model + ''' + def __init__(self, spopt_object, **kwargs): + # attach self.opt + super().__init__(spopt_object) + + self.pre_lp_iteration_limit = kwargs.get('pre_lp_iteration_limit', 0) + self.lp_iteration_limit = kwargs.get('lp_iteration_limit', 0) + self.iteration_limit = kwargs.get('iteration_limit', 100000) + self.verbose = kwargs.get('verbose',False) + + # Egret PTDF options + self.egret_ptdf_options = {} + for option in _egret_ptdf_options: + if option in kwargs: + self.egret_ptdf_options[option] = kwargs[option] + + if self.verbose: + logger.setLevel(logging.INFO) + + self.initial_pass_complete = ComponentSet() + self.bundling = self.opt.bundling + + self.vars_to_load = ComponentMap() + self.time_periods = ComponentMap() + self.bundle_conditional_probability = ComponentMap() + + def pre_solve(self, subproblem): + if subproblem not in self.initial_pass_complete: + self.spoke_name = None + if self.opt.spcomm is not None: + self.spoke_name = self.opt.spcomm.__class__.__name__ + self._initial_pass(subproblem) + self.initial_pass_complete.add(subproblem) + + def post_solve(self, subproblem, results): + scenario_blocks = self._get_scenario_blocks(subproblem) + termination_cond, results, iterations = \ + self._mip_pass(subproblem, scenario_blocks, results) + return results + + def _is_transmission_constrained(self, scenario_blocks): + for s in scenario_blocks: + if len(s.TransmissionLines) == 0: + return False + return True + + def _get_scenario_blocks(self, subproblem): + if self.bundling: + return tuple( subproblem.component(sname) \ + for sname in subproblem._ef_scenario_names ) + else: + return ( subproblem, ) + + def _initial_pass(self, subproblem): + # get vars_to_load for later + scenario_blocks = self._get_scenario_blocks(subproblem) + if not self._is_transmission_constrained(scenario_blocks): + return + if is_persistent(subproblem._solver_plugin): + subproblem_vars_to_load = [] + for s in scenario_blocks: + for t in s.TimePeriods: + b = s.TransmissionBlock[t] + assert isinstance(b.p_nw, pyo.Var) + subproblem_vars_to_load.extend(b.p_nw.values()) + + self.vars_to_load[subproblem] = subproblem_vars_to_load + else: + self.vars_to_load[subproblem] = None + + for s in scenario_blocks: + self.time_periods[s] = s.TimePeriods + if self.bundling: + self.bundle_conditional_probability[s] = \ + s._mpisppy_data.bundle_conditional_probability + else: + self.bundle_conditional_probability[s] = 1. + + # load in user-specified PTDF options + for k,v in self.egret_ptdf_options: + s._ptdf_options[k] = v + + self.tee = ("tee-rank0-solves" in self.opt.options + and self.opt.options['tee-rank0-solves'] + and self.opt.cylinder_rank == 0 + ) + + if (self.pre_lp_iteration_limit + self.lp_iteration_limit) == 0: + return + + # relax the initial subproblems + for s in scenario_blocks: + lpu.uc_instance_binary_relaxer(s, subproblem._solver_plugin) + + # solve the model + for k,val in self.opt.current_solver_options.items(): + subproblem._solver_plugin.options[k] = val + + if is_persistent(subproblem._solver_plugin): + results = subproblem._solver_plugin.solve(subproblem, tee=self.tee, save_results=False, load_solutions=False) + subproblem._solver_plugin.load_vars(self.vars_to_load[subproblem]) + else: + results = subproblem._solver_plugin.solve(subproblem, tee=self.tee, load_solutions=False) + subproblem.solutions.load_from(results) + + if self.pre_lp_iteration_limit > 0: + lp_warmstart_termination_cond, results, lp_warmstart_iterations = \ + self._pre_lp_pass(subproblem, scenario_blocks) + + if self.lp_iteration_limit > 0: + lp_termination_cond, results, lp_iterations = \ + self._lp_pass(subproblem, scenario_blocks) + + if self.lp_cleanup_phase: + tot_removed = 0 + for s in scenario_blocks: + for t,b in s.TransmissionBlock.items(): + tot_removed += lpu.remove_inactive(b, subproblem._solver_plugin, + t, prepend_str=f"[LP cleanup phase on rank {self.opt.global_rank}] ") + logger.info(f"[LP cleanup phase on rank {self.opt.global_rank} for {self.spoke_name}] removed {tot_removed} inactive flow constraint(s)") + # enforce binaries in subproblems + for s in scenario_blocks: + lpu.uc_instance_binary_enforcer(s, subproblem._solver_plugin) + + # mpi-sppy will solve the MIP + return + + def _do_pass(self, subproblem, scenario_blocks, time_periods, vars_to_load, + prepend_str, iteration_limit, add_all_lazy_violations=False, + results=None, pre_lp_cleanup=False): + + persistent_solver = is_persistent(subproblem._solver_plugin) + for i in range(iteration_limit): + flows, viol_lazy = ComponentMap(), ComponentMap() + terminate_this_iter, all_viol_in_model = ComponentMap(), ComponentMap() + for s in scenario_blocks: + flows[s], viol_num, mon_viol_num, viol_lazy[s] = \ + _lazy_ptdf_check_violations(s, s.model_data, time_periods[s], + s._ptdf_options, prepend_str) + + terminate_this_iter[s], all_viol_in_model[s] = \ + _lazy_ptdf_log_terminate_on_violations(viol_num, mon_viol_num, + i, prepend_str) + + all_viol_in_model = all(all_viol_in_model.values()) + terminate_this_iter = all(terminate_this_iter.values()) + if terminate_this_iter and not add_all_lazy_violations: + if pre_lp_cleanup: + results = self._pre_lp_cleanup(subproblem, scenario_blocks, + persistent_solver, time_periods, prepend_str) + return _lazy_ptdf_normal_terminatation(all_viol_in_model, results, i, prepend_str) + + for s in scenario_blocks: + _lazy_ptdf_violation_adder(s, s.model_data, flows[s], viol_lazy[s], time_periods[s], + subproblem._solver_plugin, s._ptdf_options, prepend_str, i, + obj_multi=self.bundle_conditional_probability[s]) + + if terminate_this_iter and add_all_lazy_violations: + if pre_lp_cleanup: + results = self._pre_lp_cleanup(subproblem, scenario_blocks, + persistent_solver, time_periods, prepend_str) + return _lazy_ptdf_normal_terminatation(all_viol_in_model, results, i, prepend_str) + + results = _lazy_ptdf_solve(subproblem, subproblem._solver_plugin, persistent_solver, + symbolic_solver_labels=False, solver_tee=self.tee, + vars_to_load=vars_to_load, solve_method_options=None) + else: + if pre_lp_cleanup: + results = self._pre_lp_cleanup(subproblem, scenario_blocks, + persistent_solver, time_periods, prepend_str) + return lpu.LazyPTDFTerminationCondition.ITERATION_LIMIT, results, i + + def _pre_lp_cleanup(self, subproblem, scenario_blocks, persistent_solver, time_periods, prepend_str): + if persistent_solver: + # unpack lpu._load_pf_slacks into a single call to load_vars + vars_to_load = [] + for s in scenario_blocks: + for t in time_periods[s]: + b = s.TransmissionBlock[t] + vars_to_load.extend(b.pf_slack_pos.values()) + vars_to_load.extend(b.pf_slack_neg.values()) + vars_to_load.extend(b.pfi_slack_pos.values()) + vars_to_load.extend(b.pfi_slack_neg.values()) + vars_to_load.extend(b.pfc_slack_pos.values()) + vars_to_load.extend(b.pfc_slack_neg.values()) + if vars_to_load: + subproblem._solver_plugin.load_vars(vars_to_load) + + for s in scenario_blocks: + _lazy_ptdf_warmstart_copy_violations(s, s.model_data, time_periods[s], + subproblem._solver_plugin, s._ptdf_options, prepend_str, + obj_multi=self.bundle_conditional_probability[s]) + + # the basis is usually not a good start now + # so reset the solver if we can + subproblem._solver_plugin.reset() + + results = _lazy_ptdf_solve(subproblem, subproblem._solver_plugin, persistent_solver, + symbolic_solver_labels=False, solver_tee=self.tee, + vars_to_load=self.vars_to_load[subproblem], + solve_method_options=None) + return results + + def _pre_lp_pass(self, subproblem, scenario_blocks): + vars_to_load_t_subset = ComponentMap() + t_subset = ComponentMap() + persistent_solver = is_persistent(subproblem._solver_plugin) + + if persistent_solver: + vars_to_load = [] + else: + vars_to_load = None + for s in scenario_blocks: + max_demand_time = max(s.TotalDemand, key=s.TotalDemand.__getitem__) + t_subset[s] = [max_demand_time,] + if persistent_solver: + transmission_block = s.TransmissionBlock[max_demand_time] + assert isinstance(transmission_block.p_nw, pyo.Var) + vars_to_load.extend(transmission_block.p_nw.values()) + + return self._do_pass(subproblem, scenario_blocks, t_subset, vars_to_load, + prepend_str=f"[LP warmstart phase on rank {self.opt.global_rank} for {self.spoke_name}] ", + iteration_limit=self.pre_lp_iteration_limit, + add_all_lazy_violations=False, + results=None, pre_lp_cleanup=True) + + def _lp_pass(self, subproblem, scenario_blocks): + return self._do_pass(subproblem, scenario_blocks, + self.time_periods, self.vars_to_load[subproblem], + prepend_str=f"[LP phase on rank {self.opt.global_rank} for {self.spoke_name}] ", + iteration_limit=self.lp_iteration_limit, + add_all_lazy_violations=True, + results=None, pre_lp_cleanup=False) + + def _mip_pass(self, subproblem, scenario_blocks, results): + if not self._is_transmission_constrained(scenario_blocks): + return None, results, 0 + return self._do_pass(subproblem, scenario_blocks, + self.time_periods, self.vars_to_load[subproblem], + prepend_str=f"[MIP phase on rank {self.opt.global_rank} for {self.spoke_name}] ", + iteration_limit=self.iteration_limit, + add_all_lazy_violations=False, + results=results, pre_lp_cleanup=False) diff --git a/examples/uc/uc_cylinders.py b/examples/uc/uc_cylinders.py index 85afea22c..46f2c109f 100644 --- a/examples/uc/uc_cylinders.py +++ b/examples/uc/uc_cylinders.py @@ -21,6 +21,10 @@ import mpisppy.utils.cfg_vanilla as vanilla from mpisppy.extensions.cross_scen_extension import CrossScenarioExtension +from ptdf_ext import PTDFExtension + +input_dir = "/projects/watertap/bknueven/Texas7K/egret_instances/2018-08-01/" + def _parse_args(): cfg = config.Config() cfg.popular_args() @@ -52,6 +56,10 @@ def _parse_args(): description="Run with async projective hedging instead of progressive hedging", domain=bool, default=False) + cfg.add_to_config("add_contingency_constraints", + description="Use Egret to monitor all possible non-disconnnection contingencies (default False)", + domain=bool, + default=False) cfg.parse_command_line("uc_cylinders") return cfg @@ -70,15 +78,19 @@ def main(): fixer_tol = cfg.fixer_tol cross_scenario_cuts = cfg.cross_scenario_cuts - scensavail = [3,5,10,25,50,100] + scensavail = [2,3,4,5,10,25,50,100] if num_scen not in scensavail: raise RuntimeError("num-scen was {}, but must be in {}".\ format(num_scen, scensavail)) scenario_creator_kwargs = { - "scenario_count": num_scen, - "path": str(num_scen) + "scenarios_r1", - } + "scenario_count": num_scen, + "path": input_dir + str(num_scen) + "scenarios_r1", + "add_contingency_constraints": cfg.add_contingency_constraints, + } + if num_scen == 4: + scenario_creator_kwargs["path"] = str(num_scen) + "scenarios_rtsgmlc" + scenario_creator = uc.scenario_creator scenario_denouement = uc.scenario_denouement all_scenario_names = [f"Scenario{i+1}" for i in range(num_scen)] @@ -100,7 +112,7 @@ def main(): rho_setter = rho_setter) # Extend and/or correct the vanilla dictionary - ext_classes = [Gapper] + ext_classes = [Gapper, PTDFExtension] if fixer: ext_classes.append(Fixer) if cross_scenario_cuts: @@ -144,24 +156,29 @@ def main(): # FWPH spoke if fwph: fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke["opt_kwargs"]["extensions"] = PTDFExtension # Standard Lagrangian bound spoke if lagrangian: lagrangian_spoke = vanilla.lagrangian_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter = rho_setter) + lagrangian_spoke["opt_kwargs"]["extensions"] = PTDFExtension # xhat looper bound spoke if xhatlooper: xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke["opt_kwargs"]["extensions"] = PTDFExtension # xhat shuffle bound spoke if xhatshuffle: xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatshuffle_spoke["opt_kwargs"]["extensions"] = PTDFExtension # cross scenario cut spoke if cross_scenario_cuts: cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + cross_scenario_cuts_spoke["opt_kwargs"]["extensions"] = PTDFExtension list_of_spoke_dict = list() if fwph: diff --git a/examples/uc/uc_funcs.py b/examples/uc/uc_funcs.py index 6599d0681..b86c93b4e 100644 --- a/examples/uc/uc_funcs.py +++ b/examples/uc/uc_funcs.py @@ -20,7 +20,8 @@ import egret.models.unit_commitment as uc -def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=None): +def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=None, + add_contingency_constraints=False): """ All this does is create the concrete model instance. Notes: @@ -30,22 +31,53 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non #print("Building instance for scenario =", scenario_name) scennum = sputils.extract_num(scenario_name) - uc_model_params = pdp.get_uc_model() + root_node_dat = os.path.isfile(os.path.join(path,"RootNode.dat")) + scenario_1_dat = os.path.isfile(os.path.join(path,"Scenario_1.dat")) + scenario_1_json = os.path.isfile(os.path.join(path,"Scenario_1.json")) + scenario_1_json_gz = os.path.isfile(os.path.join(path,"Scenario_1.json.gz")) - scenario_data = DataPortal(model=uc_model_params) - scenario_data.load(filename=path+os.sep+"RootNode.dat") - scenario_data.load(filename=path+os.sep+"Node"+str(scennum)+".dat") + assert not (root_node_dat and scenario_1_dat) + assert not (root_node_dat and scenario_1_json) + assert not (scenario_1_dat and scenario_1_json) - scenario_params = uc_model_params.create_instance(scenario_data, - report_timing=False, - name=scenario_name) + if root_node_dat: + uc_model_params = pdp.get_uc_model() - scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + scenario_data = DataPortal(model=uc_model_params) + scenario_data.load(filename=path+os.sep+"RootNode.dat") + scenario_data.load(filename=path+os.sep+"Node"+str(scennum)+".dat") - ## TODO: use the "power_balance_constraints" for now. In the future, networks should be - ## handled with a custom callback -- also consider other base models - scenario_instance = uc.create_tight_unit_commitment_model(scenario_md, - network_constraints='power_balance_constraints') + scenario_params = uc_model_params.create_instance(scenario_data, + report_timing=False, + name=scenario_name) + + scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + + elif scenario_1_dat: + uc_model_params = pdp.get_uc_model() + + scenario_data = DataPortal(model=uc_model_params) + scenario_data.load(filename=path+os.sep+"Scenario_"+str(scennum)+".dat") + + scenario_params = uc_model_params.create_instance(scenario_data, + report_timing=False, + name=scenario_name) + + scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + + elif scenario_1_json: + scenario_md = md.ModelData(os.path.join(path, f"Scenario_{scennum}.json")) + + elif scenario_1_json_gz: + scenario_md = md.ModelData(os.path.join(path, f"Scenario_{scennum}.json.gz")) + + else: + raise RuntimeError(f"Can't determine stochastic UC data type in directory {path}") + + if add_contingency_constraints: + _add_all_contingency_constraints(scenario_md) + + scenario_instance = uc.create_tight_unit_commitment_model(scenario_md, slack_type=uc.SlackType.TRANSMISSION_LIMITS) # hold over string attribute from Egret, # causes warning wth LShaped/Benders @@ -57,7 +89,7 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non # TBD: there are legacy functions here that should probably be factored. def scenario_creator(scenario_name, scenario_count=None, path=None, - num_scens=None, seedoffset=0): + num_scens=None, seedoffset=0, **kwargs): # Do some calculations that might be needed by confidence interval software scennum = sputils.extract_num(scenario_name) @@ -65,17 +97,17 @@ def scenario_creator(scenario_name, scenario_count=None, path=None, newname = f"Scenario{newnum}" return pysp2_callback(scenario_name, scenario_count=scenario_count, - path=path, num_scens=num_scens, seedoffset=seedoffset) + path=path, num_scens=num_scens, seedoffset=seedoffset, **kwargs) def pysp2_callback(scenario_name, scenario_count=None, path=None, - num_scens=None, seedoffset=0): + num_scens=None, seedoffset=0, **kwargs): ''' The callback needs to create an instance and then attach the nodes to it in a list _mpisppy_node_list ordered by stages. Optionally attach _PHrho. ''' instance = pysp_instance_creation_callback( - scenario_name, scenario_count=scenario_count, path=path, + scenario_name, scenario_count=scenario_count, path=path, **kwargs, ) #Add the probability of the scenario @@ -129,7 +161,7 @@ def scenario_rhos(scenario_instance, rho_scale_factor=0.1): avg_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, avg_power) + min_cost #max_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, max_power) + min_cost - computed_rho = rho_scale_factor * avg_cost + computed_rho = max(rho_scale_factor * avg_cost, rho_scale_factor*0.1) computed_rhos.append((id(scenario_instance.UnitOn[g,t]), computed_rho)) return computed_rhos @@ -270,6 +302,20 @@ def scenario_tree_solution_writer( solution_dir, sname, scenario, bundling ): mds = uc._save_uc_results(scenario, relaxed=False) mds.write(file_name) +def _add_all_contingency_constraints(md): + from egret.model_library.transmission.tx_calc import (construct_connection_graph, + get_N_minus_1_branches, + ) + + branches = dict(md.elements(element_type='branch')) + mapping_bus_to_idx = { bus_n: i for i, bus_n in enumerate(md.data['elements']['bus'].keys()) } + graph = construct_connection_graph(branches, mapping_bus_to_idx) + all_connected_contigencies = get_N_minus_1_branches(graph, branches, mapping_bus_to_idx) + + md.data['elements']['contingency'] = \ + { bn : {'branch_contingency': bn} for bn in all_connected_contigencies } + print(f"Added {len(md.data['elements']['contingency'])} contingencies") + #========= def scenario_names_creator(scnt,start=0): # (only for Amalgamator): return the full list of names @@ -326,4 +372,3 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, sca["seedoffset"] = seed sca["num_scens"] = sample_branching_factors[0] # two-stage problem return scenario_creator(sname, **sca) - diff --git a/mpisppy/cylinders/lagrangian_bounder.py b/mpisppy/cylinders/lagrangian_bounder.py index 5d4ddf238..d1a86dca9 100644 --- a/mpisppy/cylinders/lagrangian_bounder.py +++ b/mpisppy/cylinders/lagrangian_bounder.py @@ -73,6 +73,10 @@ def main(self): self.bound = self.trivial_bound + # change to the iterk solver options + # after first trivial bound solve + self.opt.current_solver_options = self.opt.options["iterk_solver_options"] + dk_iter = 1 while not self.got_kill_signal(): if self.new_Ws: diff --git a/mpisppy/cylinders/xhatshufflelooper_bounder.py b/mpisppy/cylinders/xhatshufflelooper_bounder.py index 9b98d3861..4c351122e 100644 --- a/mpisppy/cylinders/xhatshufflelooper_bounder.py +++ b/mpisppy/cylinders/xhatshufflelooper_bounder.py @@ -48,6 +48,18 @@ def xhatbase_prep(self): if abs(1 - self.opt.E1) > self.opt.E1_tolerance: raise ValueError(f"Total probability of scenarios was {self.opt.E1} "+\ f"(E1_tolerance is {self.opt.E1_tolerance})") + + verbose = self.opt.options['verbose'] + teeme = False + if "tee-rank0-solves" in self.opt.options: + teeme = self.opt.options['tee-rank0-solves'] + self.opt.solve_loop( + solver_options=self.opt.current_solver_options, + dtiming=False, + gripe=True, + tee=teeme, + verbose=verbose + ) ### end iter0 stuff (but note: no need for iter 0 solves in an xhatter) xhatter.post_iter0() diff --git a/mpisppy/extensions/extension.py b/mpisppy/extensions/extension.py index 4830eed33..5e282a9b8 100644 --- a/mpisppy/extensions/extension.py +++ b/mpisppy/extensions/extension.py @@ -97,6 +97,15 @@ def __init__(self, ph, ext_classes): name = constr.__name__ self.extdict[name] = constr(ph) + def pre_solve(self, subproblem): + for lobject in self.extdict.values(): + lobject.pre_solve(subproblem) + + def post_solve(self, subproblem, results): + for lobject in self.extdict.values(): + results = lobject.post_solve(subproblem, results) + return results + def pre_iter0(self): for lobject in self.extdict.values(): lobject.pre_iter0() diff --git a/mpisppy/spopt.py b/mpisppy/spopt.py index 58fad1aef..884f6b2ca 100644 --- a/mpisppy/spopt.py +++ b/mpisppy/spopt.py @@ -217,6 +217,11 @@ def _vb(msg): if self.extensions is not None: results = self.extobject.post_solve(s, results) + if pyo.check_optimal_termination(results): + if self.is_minimizing: + s._mpisppy_data.outer_bound = results.Problem[0].Lower_bound + else: + s._mpisppy_data.outer_bound = results.Problem[0].Upper_bound return pyomo_solve_time + set_objective_time # set_objective_time added Feb 2023 diff --git a/mpisppy/utils/sputils.py b/mpisppy/utils/sputils.py index d1c5a331b..dcb435c62 100644 --- a/mpisppy/utils/sputils.py +++ b/mpisppy/utils/sputils.py @@ -263,7 +263,7 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, obj_func = scenario_objs[0] # Select the first objective try: EF_instance.EF_Obj.expr += scenario_instance._mpisppy_probability * obj_func.expr - EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability + EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability except AttributeError as e: raise AttributeError("Scenario " + sname + " has no specified " "probability. Specify a value for the attribute " @@ -272,6 +272,14 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # appropraite scaling of EFs used as bundles. EF_instance.EF_Obj.expr /= EF_instance._mpisppy_probability + for sn in EF_instance._ef_scenario_names: + scenario_mpisppy_data = EF_instance.component(sn).component('_mpisppy_data') + if scenario_mpisppy_data is None: + break # none will be in a bundle + # else part of a bundle + scenario_mpisppy_data.bundle_conditional_probability = \ + scenario_instance._mpisppy_probability / EF_instance._mpisppy_probability + # For each node in the scenario tree, we need to collect the # nonanticipative vars and create the constraints for them, # which we do using a reference variable. diff --git a/mpisppy/utils/vanilla.py b/mpisppy/utils/vanilla.py index aa3c4232a..209d4ecb6 100644 --- a/mpisppy/utils/vanilla.py +++ b/mpisppy/utils/vanilla.py @@ -58,6 +58,8 @@ def shared_options(args): shoptions["iter0_solver_options"]["mipgap"] = args.iter0_mipgap if _hasit(args, "iterk_mipgap"): shoptions["iterk_solver_options"]["mipgap"] = args.iterk_mipgap + if _hasit(args, "bundles_per_rank"): + shoptions["bundles_per_rank"] = args.bundles_per_rank return shoptions def add_multistage_options(cylinder_dict,all_nodenames,branching_factors): diff --git a/mpisppy/utils/xhat_eval.py b/mpisppy/utils/xhat_eval.py index 83e4dab62..d1b80c817 100644 --- a/mpisppy/utils/xhat_eval.py +++ b/mpisppy/utils/xhat_eval.py @@ -30,16 +30,19 @@ class Xhat_Eval(mpisppy.spopt.SPOpt): """ See SPOpt for list of args. """ def __init__( - self, - options, - all_scenario_names, - scenario_creator, - scenario_denouement=None, - all_nodenames=None, - mpicomm=None, - scenario_creator_kwargs=None, - variable_probability=None, - ): + self, + options, + all_scenario_names, + scenario_creator, + scenario_denouement=None, + all_nodenames=None, + mpicomm=None, + extensions=None, + extension_kwargs=None, + scenario_creator_kwargs=None, + variable_probability=None, + E1_tolerance=1e-5 + ): super().__init__( options, @@ -48,16 +51,23 @@ def __init__( scenario_denouement=scenario_denouement, all_nodenames=all_nodenames, mpicomm=mpicomm, + extensions=extensions, + extension_kwargs=extension_kwargs, scenario_creator_kwargs=scenario_creator_kwargs, variable_probability=variable_probability, + E1_tolerance=E1_tolerance ) self.verbose = self.options['verbose'] + self.current_solver_options = self.options['iter0_solver_options'] if 'iter0_solver_options' in self.options else ( + self.options['solver_options'] if 'solver_options' in self.options else dict() ) + #TODO: CHANGE THIS AFTER UPDATE self.PH_extensions = None self._subproblems_solvers_created = False + def _lazy_create_solvers(self):