diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 000000000..101d8c5a4 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,2 @@ +# PR 426: Run "ruff format ." +710da08e36961b80cd368662b5af7a6757fcaedf diff --git a/.github/workflows/test_pr_and_main.yml b/.github/workflows/test_pr_and_main.yml index 825ccb4ae..7f6181f6b 100644 --- a/.github/workflows/test_pr_and_main.yml +++ b/.github/workflows/test_pr_and_main.yml @@ -24,6 +24,15 @@ jobs: - uses: actions/checkout@v4 - uses: chartboost/ruff-action@v1 + ruff-format: + name: Ruff Format + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: chartboost/ruff-action@v1 + with: + args: 'format --check' + nompi4py: name: no mpi4py runs-on: ubuntu-latest diff --git a/doc/src/conf.py b/doc/src/conf.py index 0d7eab6a9..e4b81682c 100644 --- a/doc/src/conf.py +++ b/doc/src/conf.py @@ -28,9 +28,9 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -sys.path.insert(0, os.path.abspath('.')) -sys.path.insert(0, os.path.abspath('../../examples/distr')) -sys.path.insert(0, os.path.abspath('../../examples/stoch_distr')) +sys.path.insert(0, os.path.abspath(".")) +sys.path.insert(0, os.path.abspath("../../examples/distr")) +sys.path.insert(0, os.path.abspath("../../examples/stoch_distr")) # -- General configuration ------------------------------------------------ @@ -42,45 +42,47 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx.ext.ifconfig', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest'] - #'sphinx.ext.githubpages'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "sphinx.ext.ifconfig", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.autosummary", + "sphinx.ext.doctest", +] +#'sphinx.ext.githubpages'] viewcode_import = True -#napoleon_include_private_with_doc = True +# napoleon_include_private_with_doc = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The root toctree document. -root_doc = 'index' +root_doc = "index" # General information about the project. -project = u'mpi-sppy' -copyright = u'2020-2024' -author = u'mpi-sppy Developers' +project = "mpi-sppy" +copyright = "2020-2024" +author = "mpi-sppy Developers" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '0.12.2' +version = "0.12.2" # The full version, including alpha/beta/rc tags. -release = '0.12.2.dev0' +release = "0.12.2.dev0" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -92,10 +94,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -107,8 +109,7 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -html_theme = 'sphinx_rtd_theme' - +html_theme = "sphinx_rtd_theme" # Theme options are theme-specific and customize the look and feel of a theme @@ -120,15 +121,15 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] -#html_favicon = "../logos/pyomo/favicon.ico" +# html_favicon = "../logos/pyomo/favicon.ico" # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'mpi-sppy' +htmlhelp_basename = "mpi-sppy" # -- Options for LaTeX output --------------------------------------------- @@ -137,15 +138,12 @@ # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -155,8 +153,7 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (root_doc, 'mpisppy.tex', 'mpisppy Documentation', - 'mpisppy', 'manual'), + (root_doc, "mpisppy.tex", "mpisppy Documentation", "mpisppy", "manual"), ] @@ -164,10 +161,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (root_doc, 'mpisppy', 'mpisppy Documentation', - [author], 1) -] +man_pages = [(root_doc, "mpisppy", "mpisppy Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -176,10 +170,16 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (root_doc, 'mpisppy', 'mpisppy Documentation', - author, 'mpisppy', 'One line description of project.', - 'Miscellaneous'), + ( + root_doc, + "mpisppy", + "mpisppy Documentation", + author, + "mpisppy", + "One line description of project.", + "Miscellaneous", + ), ] -#autodoc_member_order = 'bysource' -#autodoc_member_order = 'groupwise' +# autodoc_member_order = 'bysource' +# autodoc_member_order = 'groupwise' diff --git a/examples/acopf3/ACtree.py b/examples/acopf3/ACtree.py index 61a185891..f5eeb5b91 100644 --- a/examples/acopf3/ACtree.py +++ b/examples/acopf3/ACtree.py @@ -6,7 +6,7 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#Tree ideas; dlw Fall 2019 +# Tree ideas; dlw Fall 2019 # Stage numbers and scenario numbers are one-based, but lists are all zero-based # Tree node numbers used in node names are zero-based. # NOTE: we *do* have leaf nodes, but mpisppy just completes the scen with them @@ -16,9 +16,11 @@ import numpy as np import copy + def FixFast(minutes): return True + """ At each scenario tree node: a link can fail and/or @@ -28,7 +30,8 @@ def FixFast(minutes): """ -class ACTree(): + +class ACTree: """ Data for a scenario tree for ACOPF. @@ -42,8 +45,18 @@ class ACTree(): Repairer (function with argument t in minutes) returns True for repair LineList (list of int) All lines in the electric grid """ - def __init__(self, NumStages, BFs, seed, acstream, FailProb, - StageDurations, Repairer, LineList): + + def __init__( + self, + NumStages, + BFs, + seed, + acstream, + FailProb, + StageDurations, + Repairer, + LineList, + ): self.NumStages = NumStages self.BFs = BFs self.seed = seed @@ -54,45 +67,46 @@ def __init__(self, NumStages, BFs, seed, acstream, FailProb, self.LineList = LineList self.numscens = 1 - for s in range(self.NumStages-1): + for s in range(self.NumStages - 1): self.numscens *= self.BFs[s] - + # now make the tree by making the root - self.rootnode = TreeNode(None, self, - [i+1 for i in range(self.numscens)], - "ROOT", 1.0, acstream) + self.rootnode = TreeNode( + None, self, [i + 1 for i in range(self.numscens)], "ROOT", 1.0, acstream + ) def Nodes_for_Scenario(self, scen_num): """ Return a list of nodes for a given scenario number (one-based) """ - assert(scen_num <= self.numscens) + assert scen_num <= self.numscens retlist = [self.rootnode] # There must be a clever arithmetic way to do this, but... - for stage in range(2, self.NumStages+1): + for stage in range(2, self.NumStages + 1): for kid in retlist[-1].kids: if scen_num in kid.ScenarioList: retlist.append(kid) break - assert(len(retlist) == self.NumStages) + assert len(retlist) == self.NumStages return retlist def All_Nodenames(self): - """ Return a list of all node names""" + """Return a list of all node names""" + # there is a arithmetic way, but I will use the general tree way def _progenynames(node): # return my name and progeny names if they are not leaves retval = [node.Name] - if node.stage < self.NumStages: # include leaves + if node.stage < self.NumStages: # include leaves for kid in node.kids: retval += _progenynames(kid) return retval - + allnames = _progenynames(self.rootnode) return allnames - - -class TreeNode(): + + +class TreeNode: """ Data for a tree node, but the node creates its own children @@ -109,9 +123,9 @@ class TreeNode(): kids (list of nodes) the children of this node """ - def __init__(self, Parent, TreeInfo, ScenarioList, Name, CondProb, acstream): - self.sn = 1 # to attach serial numbers where needed + def __init__(self, Parent, TreeInfo, ScenarioList, Name, CondProb, acstream): + self.sn = 1 # to attach serial numbers where needed self.CondProb = CondProb self.Name = Name self.Parent = Parent @@ -122,7 +136,7 @@ def __init__(self, Parent, TreeInfo, ScenarioList, Name, CondProb, acstream): self.FailedLines = [] self.LinesUp = copy.deepcopy(TreeInfo.LineList) else: - self.stage = Parent.stage+1 + self.stage = Parent.stage + 1 self.FailedLines = copy.deepcopy(Parent.FailedLines) self.LinesUp = copy.deepcopy(Parent.LinesUp) # bring lines up? (mo is minutes out) @@ -133,7 +147,7 @@ def __init__(self, Parent, TreeInfo, ScenarioList, Name, CondProb, acstream): removals.append((line, mo)) self.LinesUp.append(line) else: - mo += TreeInfo.StageDurations[self.stage-1] + mo += TreeInfo.StageDurations[self.stage - 1] self.FailedLines[ell] = (line, mo) for r in removals: self.FailedLines.remove(r) @@ -142,28 +156,32 @@ def __init__(self, Parent, TreeInfo, ScenarioList, Name, CondProb, acstream): for line in self.LinesUp: if acstream.rand() < TreeInfo.FailProb: self.LinesUp.remove(line) - self.FailedLines.append\ - ((line, TreeInfo.StageDurations[self.stage-1])) + self.FailedLines.append( + (line, TreeInfo.StageDurations[self.stage - 1]) + ) if self.stage <= TreeInfo.NumStages: # spawn children self.kids = [] if self.stage < TreeInfo.NumStages: - bf = TreeInfo.BFs[self.stage-1] - self.sn += 1 # serial number for non-leaf, non-ROOT nodes + bf = TreeInfo.BFs[self.stage - 1] + self.sn += 1 # serial number for non-leaf, non-ROOT nodes else: bf = 1 # leaf node for b in range(bf): # divide up the scenario list - plist = self.ScenarioList # typing aid - first = b*len(plist) // bf - last = (b+1)*len(plist) // bf - scenlist = plist[first: last] + plist = self.ScenarioList # typing aid + first = b * len(plist) // bf + last = (b + 1) * len(plist) // bf + scenlist = plist[first:last] newname = self.Name + "_" + str(b) if self.stage < TreeInfo.NumStages: - prevbf = TreeInfo.BFs[self.stage-2] - self.kids.append(TreeNode(self, TreeInfo, scenlist, - newname, 1/prevbf, acstream)) - + prevbf = TreeInfo.BFs[self.stage - 2] + self.kids.append( + TreeNode( + self, TreeInfo, scenlist, newname, 1 / prevbf, acstream + ) + ) + def pprint(self): print("Node Name={}, Stage={}".format(self.Name, self.stage)) if self.Parent is not None: @@ -172,14 +190,15 @@ def pprint(self): print(" (no parent)") print(" FailedLines={}".format(self.FailedLines)) -if __name__ == "__main__": +if __name__ == "__main__": acstream = np.random.RandomState() - - testtree = ACTree(3, [2, 2], 1134, acstream, - 0.2, [5,15,30], FixFast, [0,1,2,3,4,5]) - for sn in range(1,testtree.numscens+1): - print ("nodes in scenario {}".format(sn)) + + testtree = ACTree( + 3, [2, 2], 1134, acstream, 0.2, [5, 15, 30], FixFast, [0, 1, 2, 3, 4, 5] + ) + for sn in range(1, testtree.numscens + 1): + print("nodes in scenario {}".format(sn)) for node in testtree.Nodes_for_Scenario(sn): # print (" ",node.Name) # print(" ", node.FailedLines) diff --git a/examples/acopf3/ccopf2wood.py b/examples/acopf3/ccopf2wood.py index 3aaf1f52a..1c4fcabc9 100644 --- a/examples/acopf3/ccopf2wood.py +++ b/examples/acopf3/ccopf2wood.py @@ -11,19 +11,21 @@ # (see the first lines of main() to change instances) import os import numpy as np + # Hub and spoke SPBase classes from mpisppy.opt.ph import PH + # Hub and spoke SPCommunicator classes from mpisppy.cylinders.xhatspecific_bounder import XhatSpecificInnerBound from mpisppy.cylinders.hub import PHHub + # Make it all go from mpisppy.spin_the_wheel import WheelSpinner from mpisppy.utils.xhat_eval import Xhat_Eval # the problem import ACtree as etree -from ccopf_multistage import pysp2_callback,\ - scenario_denouement, _md_dict, FixFast +from ccopf_multistage import pysp2_callback, scenario_denouement, _md_dict, FixFast import pyomo.environ as pyo import socket @@ -31,6 +33,7 @@ import datetime as dt import mpisppy.MPI as mpi + comm_global = mpi.COMM_WORLD global_rank = comm_global.Get_rank() n_proc = comm_global.Get_size() @@ -48,11 +51,11 @@ def main(): # pglib_opf_case2383wp_k.m # pglib_opf_case2000_tamu.m # do not use pglib_opf_case89_pegase - egret_path_to_data = "/thirdparty/"+casename + egret_path_to_data = "/thirdparty/" + casename number_of_stages = 3 stage_duration_minutes = [5, 15, 30] if len(sys.argv) != number_of_stages + 3: - print ("Usage: python ccop_multistage bf1 bf2 iters scenperbun(!) solver") + print("Usage: python ccop_multistage bf1 bf2 iters scenperbun(!) solver") exit(1) branching_factors = [int(sys.argv[1]), int(sys.argv[2])] PHIterLimit = int(sys.argv[3]) @@ -67,7 +70,7 @@ def main(): # create an arbitrary xhat for xhatspecific to use xhat_dict = {"ROOT": "Scenario_1"} for i in range(branching_factors[0]): - xhat_dict["ROOT_"+str(i)] = "Scenario_"+str(1 + i*branching_factors[1]) + xhat_dict["ROOT_" + str(i)] = "Scenario_" + str(1 + i * branching_factors[1]) scenario_creator_kwargs = { "convex_relaxation": True, @@ -78,7 +81,7 @@ def main(): solver = pyo.SolverFactory(solver_name) scenario_creator_kwargs["solver"] = None ##if "gurobi" in solver_name: - ##solver.options["BarHomogeneous"] = 1 + ##solver.options["BarHomogeneous"] = 1 else: solver = pyo.SolverFactory(solver_name) if "gurobi" in solver_name: @@ -89,11 +92,11 @@ def main(): if verbose: print("start data dump") print(list(md_dict.elements("generator"))) - for this_branch in md_dict.elements("branch"): - print("TYPE=",type(this_branch)) - print("B=",this_branch) - print("IN SERVICE=",this_branch[1]["in_service"]) - print("GENERATOR SET=",md_dict.attributes("generator")) + for this_branch in md_dict.elements("branch"): + print("TYPE=", type(this_branch)) + print("B=", this_branch) + print("IN SERVICE=", this_branch[1]["in_service"]) + print("GENERATOR SET=", md_dict.attributes("generator")) print("end data dump") lines = list() @@ -101,20 +104,25 @@ def main(): lines.append(this_branch[0]) acstream = np.random.RandomState() - - scenario_creator_kwargs["etree"] = etree.ACTree(number_of_stages, - branching_factors, - seed, - acstream, - a_line_fails_prob, - stage_duration_minutes, - repair_fct, - lines) + + scenario_creator_kwargs["etree"] = etree.ACTree( + number_of_stages, + branching_factors, + seed, + acstream, + a_line_fails_prob, + stage_duration_minutes, + repair_fct, + lines, + ) scenario_creator_kwargs["acstream"] = acstream - all_scenario_names=["Scenario_"+str(i)\ - for i in range(1,len(scenario_creator_kwargs["etree"].\ - rootnode.ScenarioList)+1)] + all_scenario_names = [ + "Scenario_" + str(i) + for i in range( + 1, len(scenario_creator_kwargs["etree"].rootnode.ScenarioList) + 1 + ) + ] all_nodenames = scenario_creator_kwargs["etree"].All_Nodenames() options = dict() @@ -143,7 +151,7 @@ def main(): options["smoothed"] = 0 options["defaultPHp"] = 5000 - options["defaultPHbeta"] = .05 + options["defaultPHbeta"] = 0.05 # try to do something interesting for bundles per rank if scenperbun > 0: @@ -163,18 +171,17 @@ def main(): nbunstr = str(options["bundles_per_rank"]) else: nbunstr = "0" - oline = "\n"+ str(start_time)+","+socket.gethostname() - oline += ","+str(branching_factors[0])+","+str(branching_factors[1]) - oline += ", "+str(seed) + ", "+str(options["solver_name"]) - oline += ", "+str(n_proc) + ", "+ nbunstr - oline += ", "+str(options["PHIterLimit"]) - oline += ", "+str(options["convthresh"]) + oline = "\n" + str(start_time) + "," + socket.gethostname() + oline += "," + str(branching_factors[0]) + "," + str(branching_factors[1]) + oline += ", " + str(seed) + ", " + str(options["solver_name"]) + oline += ", " + str(n_proc) + ", " + nbunstr + oline += ", " + str(options["PHIterLimit"]) + oline += ", " + str(options["convthresh"]) with open(appfile, "a") as f: f.write(oline) print(oline) - # PH hub options["tee-rank0-solves"] = False hub_dict = { @@ -189,31 +196,32 @@ def main(): "scenario_creator_kwargs": scenario_creator_kwargs, # "rho_setter": rho_setter.ph_rhosetter_callback, "extensions": None, - "all_nodenames":all_nodenames, - } + "all_nodenames": all_nodenames, + }, } xhat_options = options.copy() - xhat_options['bundles_per_rank'] = 0 # no bundles for xhat - xhat_options["xhat_specific_options"] = {"xhat_solver_options": - options["iterk_solver_options"], - "xhat_scenario_dict": xhat_dict, - "csvname": "specific.csv"} + xhat_options["bundles_per_rank"] = 0 # no bundles for xhat + xhat_options["xhat_specific_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "xhat_scenario_dict": xhat_dict, + "csvname": "specific.csv", + } ub2 = { - 'spoke_class': XhatSpecificInnerBound, + "spoke_class": XhatSpecificInnerBound, "spoke_kwargs": dict(), "opt_class": Xhat_Eval, - 'opt_kwargs': { - 'options': xhat_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': pysp2_callback, - 'scenario_denouement': scenario_denouement, + "opt_kwargs": { + "options": xhat_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": pysp2_callback, + "scenario_denouement": scenario_denouement, "scenario_creator_kwargs": scenario_creator_kwargs, - 'all_nodenames': all_nodenames + "all_nodenames": all_nodenames, }, } - + list_of_spoke_dict = [ub2] wheel_spinner = WheelSpinner(hub_dict, list_of_spoke_dict) wheel_spinner.spin() @@ -222,12 +230,11 @@ def main(): ph_end_time = dt.datetime.now() IB = wheel_spinner.BestInnerBound OB = wheel_spinner.BestOuterBound - print("BestInnerBound={} and BestOuterBound={}".\ - format(IB, OB)) + print("BestInnerBound={} and BestOuterBound={}".format(IB, OB)) with open(appfile, "a") as f: - f.write(", "+str(IB)+", "+str(OB)+", "+str(spcomm.opt._PHIter)) - f.write(", "+str((ph_end_time - start_time).total_seconds())) + f.write(", " + str(IB) + ", " + str(OB) + ", " + str(spcomm.opt._PHIter)) + f.write(", " + str((ph_end_time - start_time).total_seconds())) + - -if __name__=='__main__': +if __name__ == "__main__": main() diff --git a/examples/acopf3/ccopf_multistage.py b/examples/acopf3/ccopf_multistage.py index 9d0989c8d..e70fa77cf 100644 --- a/examples/acopf3/ccopf_multistage.py +++ b/examples/acopf3/ccopf_multistage.py @@ -31,13 +31,15 @@ n_proc = comm.Get_size() -#======= repair functions ===== +# ======= repair functions ===== def FixFast(minutes): return True + def FixNever(minutes): return False + def FixGaussian(minutes, acstream, mu, sigma): """ Return True if the line has been repaired. @@ -46,18 +48,20 @@ def FixGaussian(minutes, acstream, mu, sigma): mu, sigma (float): repair time is N(mu, sigma) """ # spell it out... - Z = (minutes-mu)/sigma + Z = (minutes - mu) / sigma u = acstream.rand() retval = u < scipy.norm.cdf(Z) return retval -#======= end repair functions ===== - + +# ======= end repair functions ===== + + def _md_dict(epath): path = str(egret.__path__) left = path.find("'") - right = path.find("'", left+1) - egretrootpath = path[left+1:right] + right = path.find("'", left + 1) + egretrootpath = path[left + 1 : right] if epath[0] != os.sep: test_case = os.path.join(egretrootpath, epath) else: @@ -65,7 +69,7 @@ def _md_dict(epath): # look at egret/data/model_data.py for the format specification of md_dict return create_ModelData(test_case) - + #################################### def pysp2_callback( @@ -88,7 +92,7 @@ def pysp2_callback( Args: scenario_name (str): - Put the scenario number on the end + Put the scenario number on the end etree (): Default is None. solver (str): @@ -137,20 +141,25 @@ def lines_up_and_down(stage_md_dict, enode): else: print("enode.LinesUp=", enode.LinesUp) print("enode.FailedLines=", enode.FailedLines) - raise RuntimeError("Branch (line) {} neither up nor down in scenario {}".\ - format(this_branch[0], scenario_name)) + raise RuntimeError( + "Branch (line) {} neither up nor down in scenario {}".format( + this_branch[0], scenario_name + ) + ) + def _egret_model(md_dict): # the exact acopf model is hard-wired here: - md_dict.data['system']['load_mismatch_cost'] = load_mismatch_cost - md_dict.data['system']['q_load_mismatch_cost'] = q_load_mismatch_cost + md_dict.data["system"]["load_mismatch_cost"] = load_mismatch_cost + md_dict.data["system"]["q_load_mismatch_cost"] = q_load_mismatch_cost if not convex_relaxation: - pyomod, mdict = eac.create_riv_acopf_model(md_dict, - include_feasibility_slack=True) + pyomod, mdict = eac.create_riv_acopf_model( + md_dict, include_feasibility_slack=True + ) else: - pyomod, mdict = eac_relax.create_soc_relaxation(md_dict, - include_feasibility_slack=True, - use_linear_relaxation=False) + pyomod, mdict = eac_relax.create_soc_relaxation( + md_dict, include_feasibility_slack=True, use_linear_relaxation=False + ) return pyomod, mdict # pull the number off the end of the scenario name @@ -167,27 +176,31 @@ def _egret_model(md_dict): generator_names = generator_set["names"] # the following creates the first stage model - full_scenario_model.stage_models[1], model_dict = _egret_model( - first_stage_md_dict) + full_scenario_model.stage_models[1], model_dict = _egret_model(first_stage_md_dict) full_scenario_model.stage_models[1].obj.deactivate() - setattr(full_scenario_model, - "stage_models_"+str(1), - full_scenario_model.stage_models[1]) - - for stage in range(2, numstages+1): - #print ("stage={}".format(stage)) + setattr( + full_scenario_model, + "stage_models_" + str(1), + full_scenario_model.stage_models[1], + ) + + for stage in range(2, numstages + 1): + # print ("stage={}".format(stage)) stage_md_dict = copy.deepcopy(first_stage_md_dict) - #print ("debug: processing node {}".format(enodes[stage-1].Name)) - lines_up_and_down(stage_md_dict, enodes[stage-1]) - - full_scenario_model.stage_models[stage], model_dict = \ - _egret_model(stage_md_dict) + # print ("debug: processing node {}".format(enodes[stage-1].Name)) + lines_up_and_down(stage_md_dict, enodes[stage - 1]) + + full_scenario_model.stage_models[stage], model_dict = _egret_model( + stage_md_dict + ) full_scenario_model.stage_models[stage].obj.deactivate() - setattr(full_scenario_model, - "stage_models_"+str(stage), - full_scenario_model.stage_models[stage]) + setattr( + full_scenario_model, + "stage_models_" + str(stage), + full_scenario_model.stage_models[stage], + ) def aggregate_ramping_rule(m): """ @@ -196,46 +209,63 @@ def aggregate_ramping_rule(m): """ retval = 0 for stage in range(1, numstages): - retval += sum((full_scenario_model.stage_models[stage+1].pg[this_gen]\ - - full_scenario_model.stage_models[stage].pg[this_gen])**2\ - for this_gen in generator_names) + retval += sum( + ( + full_scenario_model.stage_models[stage + 1].pg[this_gen] + - full_scenario_model.stage_models[stage].pg[this_gen] + ) + ** 2 + for this_gen in generator_names + ) return retval + full_scenario_model.ramping = pyo.Expression(rule=aggregate_ramping_rule) - full_scenario_model.objective = pyo.Objective(expr=\ - ramp_coeff * full_scenario_model.ramping+\ - sum(full_scenario_model.stage_models[stage].obj.expr\ - for stage in range(1,numstages+1))) - + full_scenario_model.objective = pyo.Objective( + expr=ramp_coeff * full_scenario_model.ramping + + sum( + full_scenario_model.stage_models[stage].obj.expr + for stage in range(1, numstages + 1) + ) + ) + inst = full_scenario_model # end code from PySP1 - + node_list = list() parent_name = None for sm1, enode in enumerate(etree.Nodes_for_Scenario(scen_num)): stage = sm1 + 1 if stage < etree.NumStages: - node_list.append(scenario_tree.ScenarioNode( - name=enode.Name, - cond_prob=enode.CondProb, - stage=stage, - cost_expression=inst.stage_models[stage].obj, - nonant_list=[inst.stage_models[stage].pg, - inst.stage_models[stage].qg], - scen_model=inst, parent_name=parent_name)) + node_list.append( + scenario_tree.ScenarioNode( + name=enode.Name, + cond_prob=enode.CondProb, + stage=stage, + cost_expression=inst.stage_models[stage].obj, + nonant_list=[ + inst.stage_models[stage].pg, + inst.stage_models[stage].qg, + ], + scen_model=inst, + parent_name=parent_name, + ) + ) parent_name = enode.Name if verbose: - print(f"\nScenario creation for {scenario_name} on rank {rank}" - f" FailedLines (line,minutes)={enode.FailedLines}") - + print( + f"\nScenario creation for {scenario_name} on rank {rank}" + f" FailedLines (line,minutes)={enode.FailedLines}" + ) + inst._mpisppy_node_list = node_list inst._mpisppy_probability = 1 / etree.numscens # solve it so subsequent code will have a good start if solver is not None: print(f"scenario creation callback is solving {scenario_name} on rank {rank}") - solver.solve(inst, tee=tee) #symbolic_solver_labels=True, keepfiles=True) + solver.solve(inst, tee=tee) # symbolic_solver_labels=True, keepfiles=True) # attachments inst._enodes = enodes @@ -243,9 +273,9 @@ def aggregate_ramping_rule(m): return inst -#============================================================================= -def scenario_denouement(rank, scenario_name, scenario): +# ============================================================================= +def scenario_denouement(rank, scenario_name, scenario): print("Solution for scenario=%s - rank=%d" % (scenario_name, rank)) print("LINE OUTAGE INFORMATION") @@ -253,19 +283,20 @@ def scenario_denouement(rank, scenario_name, scenario): enode.pprint() stages = list(scenario.stage_models.keys()) - gens = sorted((g for g in getattr(scenario, "stage_models_"+str(1)).pg.keys())) + gens = sorted((g for g in getattr(scenario, "stage_models_" + str(1)).pg.keys())) for gen in gens: - print("GEN: %4s PG:" % gen, end="") previous_val = None for stage in stages: - current_val = pyo.value(getattr(scenario, "stage_models_"+str(stage)).pg[gen]) + current_val = pyo.value( + getattr(scenario, "stage_models_" + str(stage)).pg[gen] + ) if previous_val is None: print("%6.2f -->> " % current_val, end=" ") else: - print("%6.2f" % (current_val-previous_val), end=" ") + print("%6.2f" % (current_val - previous_val), end=" ") previous_val = current_val print("") @@ -273,22 +304,25 @@ def scenario_denouement(rank, scenario_name, scenario): previous_val = None for stage in stages: - current_val = pyo.value(getattr(scenario, "stage_models_"+str(stage)).qg[gen]) + current_val = pyo.value( + getattr(scenario, "stage_models_" + str(stage)).qg[gen] + ) if previous_val is None: print("%6.2f -->> " % current_val, end=" ") else: - print("%6.2f" % (current_val-previous_val), end=" ") + print("%6.2f" % (current_val - previous_val), end=" ") previous_val = current_val print("") - print("") -#========================= + +# ========================= if __name__ == "__main__": # as of April 27, 2020 __main__ has been updated only for EF print("EF only") import mpisppy.MPI as mpi + n_proc = mpi.COMM_WORLD.Get_size() # for error check # start options solver_name = "cplex" @@ -305,11 +339,11 @@ def scenario_denouement(rank, scenario_name, scenario): # pglib_opf_case2000_tamu.m # do not use pglib_opf_case89_pegase # this path starts at egret - egret_path_to_data = "/thirdparty/"+casename + egret_path_to_data = "/thirdparty/" + casename if len(sys.argv) != 4: - print ("Usage: python ccop_multistage bf1 bf2 bf3") - print ("e.g., for three-stage use python ccop_multistage 2 3 0") + print("Usage: python ccop_multistage bf1 bf2 bf3") + print("e.g., for three-stage use python ccop_multistage 2 3 0") exit(1) if int(sys.argv[3]) == 0: number_of_stages = 3 @@ -322,18 +356,22 @@ def scenario_denouement(rank, scenario_name, scenario): branching_factors = [int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])] allbutlast = branching_factors[0] * branching_factors[1] - nscen = allbutlast * branching_factors[number_of_stages-2] + nscen = allbutlast * branching_factors[number_of_stages - 2] seed = 1134 a_line_fails_prob = 0.2 repair_fct = FixFast verbose = False if allbutlast % n_proc != 0 and n_proc % allbutlast != 0: - raise RuntimeError("allbutlast={} must divide n_proc={}, or vice-versa".\ - format(allbutlast, n_proc)) + raise RuntimeError( + "allbutlast={} must divide n_proc={}, or vice-versa".format( + allbutlast, n_proc + ) + ) # TBD: comment this out and try -np 4 with bfs of 2 3 0 (strange sense error) if nscen % n_proc != 0: - raise RuntimeError("nscen={} must be a multiple of n_proc={}".\ - format(nscen, n_proc)) + raise RuntimeError( + "nscen={} must be a multiple of n_proc={}".format(nscen, n_proc) + ) convex_relaxation = True print(f"Convex relaxation = {convex_relaxation}") @@ -345,14 +383,14 @@ def scenario_denouement(rank, scenario_name, scenario): } md_dict = _md_dict(egret_path_to_data) - if verbose and rank==0: + if verbose and rank == 0: print("start data dump") print(list(md_dict.elements("generator"))) - for this_branch in md_dict.elements("branch"): - print("TYPE=",type(this_branch)) - print("B=",this_branch) - print("IN SERVICE=",this_branch[1]["in_service"]) - print("GENERATOR SET=",md_dict.attributes("generator")) + for this_branch in md_dict.elements("branch"): + print("TYPE=", type(this_branch)) + print("B=", this_branch) + print("IN SERVICE=", this_branch[1]["in_service"]) + print("GENERATOR SET=", md_dict.attributes("generator")) print("end data dump") lines = list() @@ -360,23 +398,28 @@ def scenario_denouement(rank, scenario_name, scenario): lines.append(this_branch[0]) acstream = np.random.RandomState() - scenario_creator_kwargs["etree"] = ET.ACTree(number_of_stages, - branching_factors, - seed, - acstream, - a_line_fails_prob, - stage_duration_minutes, - repair_fct, - lines) + scenario_creator_kwargs["etree"] = ET.ACTree( + number_of_stages, + branching_factors, + seed, + acstream, + a_line_fails_prob, + stage_duration_minutes, + repair_fct, + lines, + ) scenario_creator_kwargs["acstream"] = acstream - scenario_names=["Scenario_"+str(i)\ - for i in range(1,len(scenario_creator_kwargs["etree"].rootnode.ScenarioList)+1)] + scenario_names = [ + "Scenario_" + str(i) + for i in range( + 1, len(scenario_creator_kwargs["etree"].rootnode.ScenarioList) + 1 + ) + ] - # end options - ef = sputils.create_EF(scenario_names, - pysp2_callback, - scenario_creator_kwargs=scenario_creator_kwargs) + ef = sputils.create_EF( + scenario_names, pysp2_callback, scenario_creator_kwargs=scenario_creator_kwargs + ) ###solver.options["BarHomogeneous"] = 1 if "gurobi" in solver_name: solver.options["BarHomogeneous"] = 1 @@ -384,17 +427,23 @@ def scenario_denouement(rank, scenario_name, scenario): results = solver.solve(ef, tee=True) pyo.assert_optimal_termination(results) - print('EF objective value:', pyo.value(ef.EF_Obj)) + print("EF objective value:", pyo.value(ef.EF_Obj)) sputils.ef_nonants_csv(ef, "vardump.csv") - for (sname, smodel) in sputils.ef_scenarios(ef): - print (sname) + for sname, smodel in sputils.ef_scenarios(ef): + print(sname) for stage in smodel.stage_models: - print (" Stage {}".format(stage)) + print(" Stage {}".format(stage)) for gen in smodel.stage_models[stage].pg: - print (" gen={} pg={}, qg={}"\ - .format(gen, - pyo.value(smodel.stage_models[stage].pg[gen]), - pyo.value(smodel.stage_models[stage].qg[gen]))) - print (" obj={}".format(pyo.value(smodel.objective))) - print("EF objective value for case {}={}".\ - format(pyo.value(casename), pyo.value(ef.EF_Obj))) + print( + " gen={} pg={}, qg={}".format( + gen, + pyo.value(smodel.stage_models[stage].pg[gen]), + pyo.value(smodel.stage_models[stage].qg[gen]), + ) + ) + print(" obj={}".format(pyo.value(smodel.objective))) + print( + "EF objective value for case {}={}".format( + pyo.value(casename), pyo.value(ef.EF_Obj) + ) + ) diff --git a/examples/acopf3/fourstage.py b/examples/acopf3/fourstage.py index 86cab0600..869599f87 100644 --- a/examples/acopf3/fourstage.py +++ b/examples/acopf3/fourstage.py @@ -11,19 +11,21 @@ # (see the first lines of main() to change instances) import os import numpy as np + # Hub and spoke SPBase classes from mpisppy.opt.ph import PH + # Hub and spoke SPCommunicator classes from mpisppy.cylinders.xhatspecific_bounder import XhatSpecificInnerBound from mpisppy.cylinders.hub import PHHub + # Make it all go from mpisppy.spin_the_wheel import WheelSpinner from mpisppy.utils.xhat_eval import Xhat_Eval # the problem import ACtree as etree -from ccopf_multistage import pysp2_callback,\ - scenario_denouement, _md_dict, FixFast +from ccopf_multistage import pysp2_callback, scenario_denouement, _md_dict, FixFast import rho_setter import pyomo.environ as pyo @@ -32,6 +34,7 @@ import datetime as dt import mpisppy.MPI as mpi + comm_global = mpi.COMM_WORLD global_rank = comm_global.Get_rank() n_proc = comm_global.Get_size() @@ -49,11 +52,11 @@ def main(): # pglib_opf_case2383wp_k.m # pglib_opf_case2000_tamu.m # do not use pglib_opf_case89_pegase - egret_path_to_data = "/thirdparty/"+casename + egret_path_to_data = "/thirdparty/" + casename number_of_stages = 4 stage_duration_minutes = [5, 15, 30, 30] if len(sys.argv) != number_of_stages + 3: - print ("Usage: python fourstage.py bf1 bf2 bf3 iters scenperbun(!)") + print("Usage: python fourstage.py bf1 bf2 bf3 iters scenperbun(!)") exit(1) branching_factors = [int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3])] PHIterLimit = int(sys.argv[4]) @@ -61,18 +64,18 @@ def main(): solver_name = sys.argv[6] seed = 1134 - a_line_fails_prob = 0.1 # try 0.2 for more excitement + a_line_fails_prob = 0.1 # try 0.2 for more excitement repair_fct = FixFast verbose = False # end options # create an arbitrary xhat for xhatspecific to use xhat_dict = {"ROOT": "Scenario_1"} for i in range(branching_factors[0]): - st1scen = 1 + i*(branching_factors[1] + branching_factors[2]) - xhat_dict["ROOT_"+str(i)] = "Scenario_"+str(st1scen) + st1scen = 1 + i * (branching_factors[1] + branching_factors[2]) + xhat_dict["ROOT_" + str(i)] = "Scenario_" + str(st1scen) for j in range(branching_factors[2]): - st2scen = st1scen + j*branching_factors[2] - xhat_dict["ROOT_"+str(i)+'_'+str(j)] = "Scenario_"+str(st2scen) + st2scen = st1scen + j * branching_factors[2] + xhat_dict["ROOT_" + str(i) + "_" + str(j)] = "Scenario_" + str(st2scen) convex_relaxation = True scenario_creator_kwargs = { @@ -85,7 +88,7 @@ def main(): solver = pyo.SolverFactory(solver_name) scenario_creator_kwargs["solver"] = None ##if "gurobi" in solver_name: - ##solver.options["BarHomogeneous"] = 1 + ##solver.options["BarHomogeneous"] = 1 else: solver_name = "ipopt" solver = pyo.SolverFactory(solver_name) @@ -97,11 +100,11 @@ def main(): if verbose: print("start data dump") print(list(md_dict.elements("generator"))) - for this_branch in md_dict.elements("branch"): - print("TYPE=",type(this_branch)) - print("B=",this_branch) - print("IN SERVICE=",this_branch[1]["in_service"]) - print("GENERATOR SET=",md_dict.attributes("generator")) + for this_branch in md_dict.elements("branch"): + print("TYPE=", type(this_branch)) + print("B=", this_branch) + print("IN SERVICE=", this_branch[1]["in_service"]) + print("GENERATOR SET=", md_dict.attributes("generator")) print("end data dump") lines = list() @@ -109,20 +112,25 @@ def main(): lines.append(this_branch[0]) acstream = np.random.RandomState() - - scenario_creator_kwargs["etree"] = etree.ACTree(number_of_stages, - branching_factors, - seed, - acstream, - a_line_fails_prob, - stage_duration_minutes, - repair_fct, - lines) + + scenario_creator_kwargs["etree"] = etree.ACTree( + number_of_stages, + branching_factors, + seed, + acstream, + a_line_fails_prob, + stage_duration_minutes, + repair_fct, + lines, + ) scenario_creator_kwargs["acstream"] = acstream - all_scenario_names=["Scenario_"+str(i)\ - for i in range(1,len(scenario_creator_kwargs["etree"].\ - rootnode.ScenarioList)+1)] + all_scenario_names = [ + "Scenario_" + str(i) + for i in range( + 1, len(scenario_creator_kwargs["etree"].rootnode.ScenarioList) + 1 + ) + ] all_nodenames = scenario_creator_kwargs["etree"].All_Nodenames() options = dict() @@ -167,18 +175,17 @@ def main(): nbunstr = str(options["bundles_per_rank"]) else: nbunstr = "0" - oline = "\n"+ str(start_time)+","+socket.gethostname() - oline += ","+str(branching_factors[0])+","+str(branching_factors[1]) - oline += ", "+str(seed) + ", "+str(options["solver_name"]) - oline += ", "+str(n_proc) + ", "+ nbunstr - oline += ", "+str(options["PHIterLimit"]) - oline += ", "+str(options["convthresh"]) + oline = "\n" + str(start_time) + "," + socket.gethostname() + oline += "," + str(branching_factors[0]) + "," + str(branching_factors[1]) + oline += ", " + str(seed) + ", " + str(options["solver_name"]) + oline += ", " + str(n_proc) + ", " + nbunstr + oline += ", " + str(options["PHIterLimit"]) + oline += ", " + str(options["convthresh"]) with open(appfile, "a") as f: f.write(oline) print(oline) - # PH hub options["tee-rank0-solves"] = True hub_dict = { @@ -189,35 +196,36 @@ def main(): "options": options, "all_scenario_names": all_scenario_names, "scenario_creator": pysp2_callback, - 'scenario_denouement': scenario_denouement, + "scenario_denouement": scenario_denouement, "scenario_creator_kwargs": scenario_creator_kwargs, "rho_setter": rho_setter.ph_rhosetter_callback, "extensions": None, - "all_nodenames":all_nodenames, - } + "all_nodenames": all_nodenames, + }, } xhat_options = options.copy() - xhat_options['bundles_per_rank'] = 0 # no bundles for xhat - xhat_options["xhat_specific_options"] = {"xhat_solver_options": - options["iterk_solver_options"], - "xhat_scenario_dict": xhat_dict, - "csvname": "specific.csv"} + xhat_options["bundles_per_rank"] = 0 # no bundles for xhat + xhat_options["xhat_specific_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "xhat_scenario_dict": xhat_dict, + "csvname": "specific.csv", + } ub2 = { - 'spoke_class': XhatSpecificInnerBound, + "spoke_class": XhatSpecificInnerBound, "spoke_kwargs": dict(), "opt_class": Xhat_Eval, - 'opt_kwargs': { - 'options': xhat_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': pysp2_callback, - 'scenario_denouement': scenario_denouement, + "opt_kwargs": { + "options": xhat_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": pysp2_callback, + "scenario_denouement": scenario_denouement, "scenario_creator_kwargs": scenario_creator_kwargs, - 'all_nodenames': all_nodenames + "all_nodenames": all_nodenames, }, } - + list_of_spoke_dict = [ub2] wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() @@ -226,12 +234,18 @@ def main(): ph_end_time = dt.datetime.now() IB = wheel.spcomm.BestInnerBound OB = wheel.spcomm.BestOuterBound - print("BestInnerBound={} and BestOuterBound={}".\ - format(IB, OB)) + print("BestInnerBound={} and BestOuterBound={}".format(IB, OB)) with open(appfile, "a") as f: - f.write(", "+str(IB)+", "+str(OB)+", "+str(wheel.spcomm.opt._PHIter)) - f.write(", "+str((ph_end_time - start_time).total_seconds())) - - -if __name__=='__main__': + f.write( + ", " + + str(IB) + + ", " + + str(OB) + + ", " + + str(wheel.spcomm.opt._PHIter) + ) + f.write(", " + str((ph_end_time - start_time).total_seconds())) + + +if __name__ == "__main__": main() diff --git a/examples/acopf3/rho_setter.py b/examples/acopf3/rho_setter.py index 7c4284003..9fedaafac 100644 --- a/examples/acopf3/rho_setter.py +++ b/examples/acopf3/rho_setter.py @@ -6,23 +6,22 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -# January 2020; rho_setter for ccopf from n-1 stuff of a few years ago +# January 2020; rho_setter for ccopf from n-1 stuff of a few years ago + def generator_p_cost(md, g, output_level): # g is an egret tuple (num, dict); md is egret model data - if md.attributes("generator")['p_cost'][g[0]]['cost_curve_type']\ - != 'polynomial': - print ("rho setter wants polynomial, returning 1") + if md.attributes("generator")["p_cost"][g[0]]["cost_curve_type"] != "polynomial": + print("rho setter wants polynomial, returning 1") return 1 - CCVs = md.attributes("generator")['p_cost'][g[0]]['values'] - a0 = CCVs[2] #???? delete this comment after checking + CCVs = md.attributes("generator")["p_cost"][g[0]]["values"] + a0 = CCVs[2] # ???? delete this comment after checking a1 = CCVs[1] a2 = CCVs[0] retval = a0 + a1 * output_level + a2 * output_level**2 if retval == 0: - retval = 0.99 + retval = 0.99 return retval - def ph_rhosetter_callback(scen): @@ -33,14 +32,14 @@ def ph_rhosetter_callback(scen): numstages = len(scen._mpisppy_node_list) generator_set = scen._egret_md.attributes("generator") generator_names = generator_set["names"] - + for egen in generator_names: for stage in range(1, numstages): - pgen = scen.stage_models[stage].pg[egen] # pgen is a Var - + pgen = scen.stage_models[stage].pg[egen] # pgen is a Var + lb = pgen.lb ub = pgen.ub - mid = lb + (ub - lb)/2.0 + mid = lb + (ub - lb) / 2.0 cost_at_mid = generator_p_cost(scen._egret_md, egen, mid) rho = cost_at_mid * MyRhoFactor idv = id(pgen) diff --git a/examples/afew.py b/examples/afew.py index 5bb5520ae..ce3c72318 100644 --- a/examples/afew.py +++ b/examples/afew.py @@ -30,10 +30,12 @@ badguys = dict() + def do_one(dirname, progname, np, argstring): os.chdir(dirname) - runstring = "mpiexec {} -np {} python -m mpi4py {} {}".\ - format(mpiexec_arg, np, progname, argstring) + runstring = "mpiexec {} -np {} python -m mpi4py {} {}".format( + mpiexec_arg, np, progname, argstring + ) print(runstring) code = os.system(runstring) if code != 0: @@ -45,30 +47,46 @@ def do_one(dirname, progname, np, argstring): # for farmer, the first arg is num_scens and is required -do_one("farmer", "farmer_cylinders.py", 3, - "--num-scens=3 --bundles-per-rank=0 --max-iterations=50 " - "--default-rho=1 --display-convergence-detail " - "--solver-name={} --xhatshuffle --lagrangian --use-norm-rho-updater".format(solver_name)) -do_one("farmer", "farmer_lshapedhub.py", 2, - "--num-scens=3 --bundles-per-rank=0 --max-iterations=50 " - "--solver-name={} --rel-gap=0.0 " - " --xhatlshaped --max-solver-threads=1".format(solver_name)) -do_one("sizes", - "sizes_cylinders.py", - 4, - "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " - "--iter0-mipgap=0.01 --iterk-mipgap=0.001 --linearize-proximal-terms " - " --xhatshuffle --lagrangian --fwph " - "--default-rho=1 --solver-name={} --display-progress".format(solver_name)) +do_one( + "farmer", + "farmer_cylinders.py", + 3, + "--num-scens=3 --bundles-per-rank=0 --max-iterations=50 " + "--default-rho=1 --display-convergence-detail " + "--solver-name={} --xhatshuffle --lagrangian --use-norm-rho-updater".format( + solver_name + ), +) +do_one( + "farmer", + "farmer_lshapedhub.py", + 2, + "--num-scens=3 --bundles-per-rank=0 --max-iterations=50 " + "--solver-name={} --rel-gap=0.0 " + " --xhatlshaped --max-solver-threads=1".format(solver_name), +) +do_one( + "sizes", + "sizes_cylinders.py", + 4, + "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " + "--iter0-mipgap=0.01 --iterk-mipgap=0.001 --linearize-proximal-terms " + " --xhatshuffle --lagrangian --fwph " + "--default-rho=1 --solver-name={} --display-progress".format(solver_name), +) -do_one("hydro", "hydro_cylinders_pysp.py", 3, - "--bundles-per-rank=0 --max-iterations=100 " - "--default-rho=1 --xhatshuffle --lagrangian " - "--solver-name={}".format(solver_name)) +do_one( + "hydro", + "hydro_cylinders_pysp.py", + 3, + "--bundles-per-rank=0 --max-iterations=100 " + "--default-rho=1 --xhatshuffle --lagrangian " + "--solver-name={}".format(solver_name), +) if len(badguys) > 0: print("\nBad Guys:") - for i,v in badguys.items(): + for i, v in badguys.items(): print("Directory={}".format(i)) for c in v: print(" {}".format(c)) diff --git a/examples/aircond/aircond_ama.py b/examples/aircond/aircond_ama.py index 44e5e8dc0..572979d3b 100644 --- a/examples/aircond/aircond_ama.py +++ b/examples/aircond/aircond_ama.py @@ -10,44 +10,51 @@ An example of using amalgamator with 3 cylinders for a multistage problem To execute this: mpiexec -np 3 python aircond_ama.py --default-rho=1 --branching-factors "3 3" --lagrangian --xhatshuffle --solver-name=cplex - + WARNING: do not use the num-scens argument on the command line """ + import numpy as np import pyomo.common.config as pyofig import mpisppy.utils.amalgamator as amalgamator import mpisppy.utils.config as config import mpisppy.tests.examples.aircond as aircond + def main(): - solution_files = {"first_stage_solution":"aircond_first_stage.csv", - "tree_solution":"aircond_full_solution" - } + solution_files = { + "first_stage_solution": "aircond_first_stage.csv", + "tree_solution": "aircond_full_solution", + } cfg = config.Config() cfg.quick_assign("mstage", bool, True) - cfg.quick_assign("cylinders", pyofig.ListOf(str), ['ph','xhatshuffle','lagrangian']) + cfg.quick_assign( + "cylinders", pyofig.ListOf(str), ["ph", "xhatshuffle", "lagrangian"] + ) cfg.quick_assign("first_stage_solution_args", dict, solution_files) - + ama_options = amalgamator.Amalgamator_parser(cfg, aircond.inparser_adder) - - #Here, we need to change something specified by the command line - #That's why we cannot use amalgamator.from_module - if ama_options.get('num_scens', None) is not None: - raise RuntimeError("Do not use num_scens here, we want to solve the problem for the whole sample scenario tree") - - num_scens = np.prod(cfg['branching_factors']) + + # Here, we need to change something specified by the command line + # That's why we cannot use amalgamator.from_module + if ama_options.get("num_scens", None) is not None: + raise RuntimeError( + "Do not use num_scens here, we want to solve the problem for the whole sample scenario tree" + ) + + num_scens = np.prod(cfg["branching_factors"]) scenario_names = aircond.scenario_names_creator(num_scens, 0) - - ama =amalgamator.Amalgamator(cfg, - scenario_names, - aircond.scenario_creator, - aircond.kw_creator) + + ama = amalgamator.Amalgamator( + cfg, scenario_names, aircond.scenario_creator, aircond.kw_creator + ) ama.run() if ama.on_hub: print("first_stage_solution=", ama.first_stage_solution) print("inner bound=", ama.best_inner_bound) print("outer bound=", ama.best_outer_bound) + if __name__ == "__main__": main() diff --git a/examples/aircond/aircond_cylinders.py b/examples/aircond/aircond_cylinders.py index bc467ff9c..b157a3646 100644 --- a/examples/aircond/aircond_cylinders.py +++ b/examples/aircond/aircond_cylinders.py @@ -14,7 +14,10 @@ import itertools from mpisppy import global_toc from mpisppy.spin_the_wheel import WheelSpinner -from mpisppy.utils.sputils import first_stage_nonant_npy_serializer, option_string_to_dict +from mpisppy.utils.sputils import ( + first_stage_nonant_npy_serializer, + option_string_to_dict, +) from mpisppy.utils import config import mpisppy.utils.cfg_vanilla as vanilla import mpisppy.tests.examples.aircond as aircond @@ -25,10 +28,11 @@ # construct a node-scenario dictionary a priori for xhatspecific_spoke, # according to naming convention for this problem -def make_node_scenario_dict_balanced(BFs,leaf_nodes=True,start=1): - """ creates a node-scenario dictionary for aircond problem - Args: - BFs (list of ints): branching factors for each stage + +def make_node_scenario_dict_balanced(BFs, leaf_nodes=True, start=1): + """creates a node-scenario dictionary for aircond problem + Args: + BFs (list of ints): branching factors for each stage leaf_nodes (bool): include/exclude leaf nodes (default True) start (int): starting value for scenario number (default 1) @@ -39,29 +43,28 @@ def make_node_scenario_dict_balanced(BFs,leaf_nodes=True,start=1): nodenames = make_nodenames_balanced(BFs, leaf_nodes) num_scens = np.prod(BFs) scenario_names = aircond.scenario_names_creator(num_scens, start) - - num_stages = len(BFs)+1 - node_scenario_dict ={"ROOT": scenario_names[0]} - + + num_stages = len(BFs) + 1 + node_scenario_dict = {"ROOT": scenario_names[0]} + for i in range(BFs[0]): node_scenario_dict[nodenames[i]] = scenario_names[i] if num_stages >= 3: - for stage in range(2,num_stages+leaf_nodes): - node_scenario_dict = _recursive_node_dict(node_scenario_dict, - nodenames, - scenario_names, - BFs, - stage) + for stage in range(2, num_stages + leaf_nodes): + node_scenario_dict = _recursive_node_dict( + node_scenario_dict, nodenames, scenario_names, BFs, stage + ) return node_scenario_dict + def _recursive_node_dict(current_dict, nodenames, scenario_names, BFs, stage): - """ recursive call for make_node_scenario_dict for 3+ stage problems - Args: + """recursive call for make_node_scenario_dict for 3+ stage problems + Args: current_dict (dict): current dictionary for scenarios nodenames (list): distinct non-leaf nodes (see make_nodenames_balanced) scenario_names (list): scenario names low to high - BFs(list): branching factors for each stage + BFs(list): branching factors for each stage stage (int): current problem stage Returns: @@ -69,37 +72,37 @@ def _recursive_node_dict(current_dict, nodenames, scenario_names, BFs, stage): """ # How far along in the node_idx are we? - node_offset = int(np.sum([np.prod(BFs[:i]) for i in range(1,stage-1)])) + node_offset = int(np.sum([np.prod(BFs[:i]) for i in range(1, stage - 1)])) # scen_offset = 1 - + # Check if we're handling leaf nodes: - if stage == len(BFs)+1: + if stage == len(BFs) + 1: scen_offset = 0 - - for i in range(np.prod(BFs[:(stage-1)])): + + for i in range(np.prod(BFs[: (stage - 1)])): node_idx = i + node_offset - scen_idx = 0 if i == 0 else int(np.prod(BFs[(stage-1):]))*i - scen_offset + scen_idx = 0 if i == 0 else int(np.prod(BFs[(stage - 1) :])) * i - scen_offset current_dict[nodenames[node_idx]] = scenario_names[scen_idx] - + return current_dict.copy() -def make_nodenames_balanced(BFs, leaf_nodes=False, root = True): - """ creates a list of node names as in aircond example +def make_nodenames_balanced(BFs, leaf_nodes=False, root=True): + """creates a list of node names as in aircond example Args: BFs (list): branching factor for each stage leaf_nodes (bool): if False, exclude leaf node names root (bool): if False, no "ROOT_" string on non-root nodes - - Returns: + + Returns: nodenames (list): list of all nodenames, "ROOT" is last, ordered as: 0, 1, 2, ..., BFs[0], 0_0, 0_1, ..., 0_BFs[1], 1_0, 1_1, ... , 1_BFs[1], ... , BFs[0]_BFs[1], ... , BFs[0]...BFs[-2] """ if not leaf_nodes: - BFs = BFs[:-1] # exclude leaf nodes + BFs = BFs[:-1] # exclude leaf nodes # Constructs all nodenames # 0, 1, 2, ..., BFs[0], 00, 01, ..., 0BFs[1], 10, 11, ... , @@ -107,10 +110,10 @@ def make_nodenames_balanced(BFs, leaf_nodes=False, root = True): max_str_len = len(BFs) lists = [[str(k) for k in range(BFs[0])]] - for i in range(max_str_len-1): + for i in range(max_str_len - 1): the_list = [] for j in range(len(lists[i])): - the_list.append([lists[i][j]+"_"+str(a) for a in range(BFs[i+1])]) + the_list.append([lists[i][j] + "_" + str(a) for a in range(BFs[i + 1])]) lists.append(list(itertools.chain(*the_list))) strings = list(itertools.chain(*lists)) @@ -121,17 +124,18 @@ def make_nodenames_balanced(BFs, leaf_nodes=False, root = True): for string in strings: rt_string = "ROOT_" + string nodenames.append(rt_string) - else: + else: nodenames = strings # add root node (it is nicer to keep it at the end) nodenames.append("ROOT") - + return nodenames - + + def _parse_args(): # create and return a Config object with values from the command line cfg = config.Config() - + cfg.multistage() cfg.ph_args() cfg.two_sided_args() @@ -142,32 +146,41 @@ def _parse_args(): cfg.lagranger_args() cfg.xhatspecific_args() cfg.mip_options() - cfg.aph_args() + cfg.aph_args() aircond.inparser_adder(cfg) # not aircondB - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) - cfg.add_to_config("write_solution", - description="Write various solution files (default False)", - domain=bool, - default=False) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) + cfg.add_to_config( + "write_solution", + description="Write various solution files (default False)", + domain=bool, + default=False, + ) # special "proper" bundle arguments pickle_bundle.pickle_bundle_config(cfg) - cfg.add_to_config("EF_directly", - description="Solve the EF directly instead of using cylinders (default False)", - domain=bool, - default=False) + cfg.add_to_config( + "EF_directly", + description="Solve the EF directly instead of using cylinders (default False)", + domain=bool, + default=False, + ) # DLW March 2022: this should be in baseparsers and vanilla some day - cfg.add_to_config("solver_options", - description="Space separarated string with = for arguments (default None)", - domain=str, - default=None) + cfg.add_to_config( + "solver_options", + description="Space separarated string with = for arguments (default None)", + domain=str, + default=None, + ) cfg.parse_command_line("aircond_cylinders") return cfg + def main(): cfg = _parse_args() @@ -182,9 +195,11 @@ def main(): pickle_bundle.check_args(cmdargs) assert cfg.scenarios_per_bundle is not None if cfg.pickle_bundles_dir is not None: - raise ValueError("Do not give a --pickle-bundles-dir to this program. " - "This program only takes --unpickle-bundles-dir. " - "Use bundle_pickler.py to create bundles.") + raise ValueError( + "Do not give a --pickle-bundles-dir to this program. " + "This program only takes --unpickle-bundles-dir. " + "Use bundle_pickler.py to create bundles." + ) with_xhatspecific = cfg.xhatspecific with_lagrangian = cfg.lagrangian @@ -193,7 +208,7 @@ def main(): with_xhatshuffle = cfg.xhatshuffle # This is multi-stage, so we need to supply node names - #all_nodenames = ["ROOT"] # all trees must have this node + # all_nodenames = ["ROOT"] # all trees must have this node # The rest is a naming convention invented for this problem. # Note that mpisppy does not have nodes at the leaves, # and node names must end in a serial number. @@ -203,16 +218,20 @@ def main(): # This code needs to know the correct scenarios per bundle and the number of # bundles in --branching-factors (one element in the list). bsize = int(cfg.scenarios_per_bundle) - assert len(BFs) == 1, "for proper bundles, --branching-factors should be the number of bundles" + assert ( + len(BFs) == 1 + ), "for proper bundles, --branching-factors should be the number of bundles" numbuns = BFs[0] - all_scenario_names = [f"Bundle_{bn*bsize}_{(bn+1)*bsize-1}" for bn in range(numbuns)] + all_scenario_names = [ + f"Bundle_{bn*bsize}_{(bn+1)*bsize-1}" for bn in range(numbuns) + ] refmodule = aircondB primal_rho_setter = None global_toc("WARNING: not using rho setters with proper bundles") - + else: ScenCount = np.prod(BFs) - all_scenario_names = [f"scen{i}" for i in range(ScenCount)] #Scens are 0-based + all_scenario_names = [f"scen{i}" for i in range(ScenCount)] # Scens are 0-based refmodule = aircond primal_rho_setter = refmodule.primal_rho_setter @@ -223,7 +242,7 @@ def main(): scenario_creator = refmodule.scenario_creator # TBD: consider using aircond instead of refmodule and pare down aircondB.py scenario_denouement = refmodule.scenario_denouement - + if cfg.EF_directly: """ ama_options = {"EF-mstage": not proper_bundles, @@ -235,8 +254,7 @@ def main(): "tee_ef_solves":False, } """ - ama = amalgamator.from_module(refmodule, - cfg, use_command_line=False) + ama = amalgamator.from_module(refmodule, cfg, use_command_line=False) ama.run() print("EF inner bound=", ama.best_inner_bound) print("EF outer bound=", ama.best_outer_bound) @@ -245,54 +263,68 @@ def main(): # if we are still here, we are running cylinders # Things needed for vanilla cylinders beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) - + if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = None, - all_nodenames=all_nodenames) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=None, + all_nodenames=all_nodenames, + ) else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = primal_rho_setter, - all_nodenames=all_nodenames) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=primal_rho_setter, + all_nodenames=all_nodenames, + ) # Standard Lagrangian bound spoke if with_lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = primal_rho_setter, - all_nodenames = all_nodenames) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=primal_rho_setter, + all_nodenames=all_nodenames, + ) # Indepdent Lagranger bound spoke if with_lagranger: - lagranger_spoke = vanilla.lagranger_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = primal_rho_setter, - all_nodenames = all_nodenames) + lagranger_spoke = vanilla.lagranger_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=primal_rho_setter, + all_nodenames=all_nodenames, + ) # Indepdent FWPH bound spoke if with_fwph: - fwph_spoke = vanilla.fwph_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames = all_nodenames) + fwph_spoke = vanilla.fwph_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + ) # xhat specific bound spoke if with_xhatspecific: - xhatspecific_spoke = vanilla.xhatspecific_spoke(*beans, - xhat_scenario_dict, - all_nodenames=all_nodenames, - scenario_creator_kwargs=scenario_creator_kwargs) - - #xhat shuffle looper bound spoke - + xhatspecific_spoke = vanilla.xhatspecific_spoke( + *beans, + xhat_scenario_dict, + all_nodenames=all_nodenames, + scenario_creator_kwargs=scenario_creator_kwargs, + ) + + # xhat shuffle looper bound spoke + if with_xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, - all_nodenames=all_nodenames, - scenario_creator_kwargs=scenario_creator_kwargs) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, + all_nodenames=all_nodenames, + scenario_creator_kwargs=scenario_creator_kwargs, + ) list_of_spoke_dict = list() if with_lagrangian: list_of_spoke_dict.append(lagrangian_spoke) @@ -315,30 +347,40 @@ def main(): sd["opt_kwargs"]["options"]["iterk_solver_options"].update(soptions) if with_xhatspecific: - xhatspecific_spoke["opt_kwargs"]["options"]["xhat_looper_options"]["xhat_solver_options"].update(soptions) + xhatspecific_spoke["opt_kwargs"]["options"]["xhat_looper_options"][ + "xhat_solver_options" + ].update(soptions) if with_xhatshuffle: - xhatshuffle_spoke["opt_kwargs"]["options"]["xhat_looper_options"]["xhat_solver_options"].update(soptions) + xhatshuffle_spoke["opt_kwargs"]["options"]["xhat_looper_options"][ + "xhat_solver_options" + ].update(soptions) # special code to get a trace for xhatshuffle if with_xhatshuffle and cfg.trace_prefix is not None: - xhatshuffle_spoke["opt_kwargs"]["options"]['shuffle_running_trace_prefix'] = cfg.trace_prefix + xhatshuffle_spoke["opt_kwargs"]["options"]["shuffle_running_trace_prefix"] = ( + cfg.trace_prefix + ) wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() - fname = 'aircond_cyl_nonants.npy' + fname = "aircond_cyl_nonants.npy" if wheel.global_rank == 0: - print("BestInnerBound={} and BestOuterBound={}".\ - format(wheel.BestInnerBound, wheel.BestOuterBound)) + print( + "BestInnerBound={} and BestOuterBound={}".format( + wheel.BestInnerBound, wheel.BestOuterBound + ) + ) if cfg.write_solution: print(f"Writing first stage solution to {fname}") # all ranks need to participate because only the winner will write if cfg.write_solution: - wheel.write_first_stage_solution('aircond_first_stage.csv') - wheel.write_tree_solution('aircond_full_solution') - wheel.write_first_stage_solution(fname, - first_stage_solution_writer=first_stage_nonant_npy_serializer) + wheel.write_first_stage_solution("aircond_first_stage.csv") + wheel.write_tree_solution("aircond_full_solution") + wheel.write_first_stage_solution( + fname, first_stage_solution_writer=first_stage_nonant_npy_serializer + ) global_toc("Solutions written.") + if __name__ == "__main__": main() - diff --git a/examples/aircond/aircond_ef.py b/examples/aircond/aircond_ef.py index a1cad0d5a..ef7935be8 100644 --- a/examples/aircond/aircond_ef.py +++ b/examples/aircond/aircond_ef.py @@ -15,45 +15,51 @@ aircondstream = np.random.RandomState() -#============================ +# ============================ if __name__ == "__main__": - bfs = [4, 3, 2] - num_scens = np.prod(bfs) #To check with a full tree - ama_options = { "EF-mstage": True, - "EF_solver_name": "cplex", # "gurobi_direct", - "num_scens": num_scens, - "_mpisppy_probability": 1/num_scens, - "branching_factors":bfs, - "mu_dev":0, - "sigma_dev":40, - "BeginInventory": 50.0, - "QuadShortCoeff": 0.3, - "start_ups":False, - "start_seed":0, - "tee_ef_solves":False - } - refmodel = "mpisppy.tests.examples.aircond" - - #We use from_module to build easily an Amalgamator object - ama = amalgamator.from_module(refmodel, - ama_options,use_command_line=False) + num_scens = np.prod(bfs) # To check with a full tree + ama_options = { + "EF-mstage": True, + "EF_solver_name": "cplex", # "gurobi_direct", + "num_scens": num_scens, + "_mpisppy_probability": 1 / num_scens, + "branching_factors": bfs, + "mu_dev": 0, + "sigma_dev": 40, + "BeginInventory": 50.0, + "QuadShortCoeff": 0.3, + "start_ups": False, + "start_seed": 0, + "tee_ef_solves": False, + } + refmodel = "mpisppy.tests.examples.aircond" + + # We use from_module to build easily an Amalgamator object + ama = amalgamator.from_module(refmodel, ama_options, use_command_line=False) ama.run() print("inner bound=", ama.best_inner_bound) print("outer bound=", ama.best_outer_bound) - print ("quitting early") + print("quitting early") quit() from mpisppy.confidence_intervals.mmw_ci import MMWConfidenceIntervals + options = ama.options - options['solver_options'] = options['EF_solver_options'] + options["solver_options"] = options["EF_solver_options"] xhat = sputils.nonant_cache_from_ef(ama.ef) - - + num_batches = 10 batch_size = 100 - - mmw = MMWConfidenceIntervals(refmodel, options, xhat, num_batches,batch_size=batch_size, start = num_scens, - verbose=False) - r=mmw.run(objective_gap=True) + + mmw = MMWConfidenceIntervals( + refmodel, + options, + xhat, + num_batches, + batch_size=batch_size, + start=num_scens, + verbose=False, + ) + r = mmw.run(objective_gap=True) print(r) diff --git a/examples/aircond/aircond_seqsampling.py b/examples/aircond/aircond_seqsampling.py index d134e3070..a421aa550 100644 --- a/examples/aircond/aircond_seqsampling.py +++ b/examples/aircond/aircond_seqsampling.py @@ -17,53 +17,57 @@ def main(cfg): - """ Code for aircond sequential sampling (in a function for easier testing) + """Code for aircond sequential sampling (in a function for easier testing) Uses the global config data. Returns: - results (dict): the solution, gap confidence interval and T + results (dict): the solution, gap confidence interval and T """ refmodelname = "mpisppy.tests.examples.aircond" BFs = cfg.branching_factors num_scens = np.prod(BFs) - scenario_names = ['Scenario' + str(i) for i in range(num_scens)] - - xhat_gen_kwargs = {"scenario_names": scenario_names, - "solver_name": cfg.solver_name, - "solver_options": None, - "branching_factors" : BFs, - "mu_dev": cfg.mu_dev, - "sigma_dev": cfg.sigma_dev, - "start_ups": False, - "start_seed": cfg.seed, - } + scenario_names = ["Scenario" + str(i) for i in range(num_scens)] + + xhat_gen_kwargs = { + "scenario_names": scenario_names, + "solver_name": cfg.solver_name, + "solver_options": None, + "branching_factors": BFs, + "mu_dev": cfg.mu_dev, + "sigma_dev": cfg.sigma_dev, + "start_ups": False, + "start_seed": cfg.seed, + } cfg.quick_assign("xhat_gen_kwargs", dict, xhat_gen_kwargs) if cfg.BM_vs_BPL == "BM": # Bayraksan and Morton - sampler = multi_seqsampling.IndepScens_SeqSampling(refmodelname, - aircond.xhat_generator_aircond, - cfg, - stochastic_sampling=False, - stopping_criterion="BM", - solving_type="EF_mstage", - ) + sampler = multi_seqsampling.IndepScens_SeqSampling( + refmodelname, + aircond.xhat_generator_aircond, + cfg, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="EF_mstage", + ) else: # must be BPL ss = int(cfg.BPL_n0min) != 0 - sampler = multi_seqsampling.IndepScens_SeqSampling(refmodelname, - aircond.xhat_generator_aircond, - cfg, - stochastic_sampling=ss, - stopping_criterion="BPL", - solving_type="EF_mstage", - ) - + sampler = multi_seqsampling.IndepScens_SeqSampling( + refmodelname, + aircond.xhat_generator_aircond, + cfg, + stochastic_sampling=ss, + stopping_criterion="BPL", + solving_type="EF_mstage", + ) + xhat = sampler.run() return xhat + def _parse_args(): # create a Config object and parse into it cfg = config.Config() @@ -74,28 +78,33 @@ def _parse_args(): conf_config.BPL_config(cfg) # --help will show both BM and BPL aircond.inparser_adder(cfg) - - cfg.add_to_config("solver_name", - description = "solver name (e.g. gurobi)", - domain = str, - default=None) - - cfg.add_to_config("seed", - description="Seed for random numbers (default is 1134)", - domain=int, - default=1134) - - cfg.add_to_config("BM_vs_BPL", - description="BM or BPL for Bayraksan and Morton or B and Pierre Louis", - domain=str, - default=None) - cfg.add_to_config("xhat1_file", - description="File to which xhat1 should be (e.g. to process with zhat4hat.py)", - domain=str, - default=None) + + cfg.add_to_config( + "solver_name", description="solver name (e.g. gurobi)", domain=str, default=None + ) + + cfg.add_to_config( + "seed", + description="Seed for random numbers (default is 1134)", + domain=int, + default=1134, + ) + + cfg.add_to_config( + "BM_vs_BPL", + description="BM or BPL for Bayraksan and Morton or B and Pierre Louis", + domain=str, + default=None, + ) + cfg.add_to_config( + "xhat1_file", + description="File to which xhat1 should be (e.g. to process with zhat4hat.py)", + domain=str, + default=None, + ) cfg.parse_command_line("farmer_sequential") - + if cfg.BM_vs_BPL is None: raise RuntimeError("--BM-vs-BPL must be given.") if cfg.BM_vs_BPL != "BM" and cfg.BM_vs_BPL != "BPL": @@ -103,8 +112,8 @@ def _parse_args(): return cfg -if __name__ == '__main__': +if __name__ == "__main__": cfg = _parse_args() cfg.quick_assign("EF_mstage", bool, True) @@ -113,5 +122,7 @@ def _parse_args(): if cfg.xhat1_file is not None: print(f"Writing xhat1 to {cfg.xhat1_file}.npy") - root_nonants =np.fromiter((v for v in results["Candidate_solution"]["ROOT"]), float) + root_nonants = np.fromiter( + (v for v in results["Candidate_solution"]["ROOT"]), float + ) np.save(cfg.xhat1_file, root_nonants) diff --git a/examples/aircond/bundle_pickler.py b/examples/aircond/bundle_pickler.py index 9076951ad..624d5645a 100644 --- a/examples/aircond/bundle_pickler.py +++ b/examples/aircond/bundle_pickler.py @@ -24,6 +24,7 @@ # construct a node-scenario dictionary a priori for xhatspecific_spoke, # according to naming convention for this problem + def _parse_args(): cfg = config.Config() cfg.multistage() @@ -33,8 +34,8 @@ def _parse_args(): return cfg -def main(): +def main(): cfg = _parse_args() assert cfg.pickle_bundles_dir is not None assert cfg.scenarios_per_bundle is not None @@ -64,13 +65,12 @@ def main(): local_slice = slices[my_rank] local_bundle_names = [f"Bundle_{bn*bsize}_{(bn+1)*bsize-1}" for bn in local_slice] - #print(f"{slices=}") - #print(f"{local_bundle_names=}") - + # print(f"{slices=}") + # print(f"{local_bundle_names=}") + for bname in local_bundle_names: aircondB.scenario_creator(bname, **kwargs) - + if __name__ == "__main__": main() - diff --git a/examples/battery/battery.py b/examples/battery/battery.py index 05b9fbb84..306898b35 100644 --- a/examples/battery/battery.py +++ b/examples/battery/battery.py @@ -6,24 +6,28 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Solution to the Lagrangian relaxation of a hybrid solar-battery storage - system from the paper: +"""Solution to the Lagrangian relaxation of a hybrid solar-battery storage +system from the paper: - B. Singh and B. Knueven. Lagrangian relaxation based heuristics for a - chance-constrained optimization model of a hybrid solar-battery storage - system. Submitted, 2019. +B. Singh and B. Knueven. Lagrangian relaxation based heuristics for a +chance-constrained optimization model of a hybrid solar-battery storage +system. Submitted, 2019. - This code solves the Lagrangian relaxation (4) from the paper, not the - original chance-constrained problem (2) (because PH cannot be directly - applied to chance-constrained two-stage programs). -''' +This code solves the Lagrangian relaxation (4) from the paper, not the +original chance-constrained problem (2) (because PH cannot be directly +applied to chance-constrained two-stage programs). +""" import numpy as np import pyomo.environ as pyo import mpisppy.scenario_tree as stree + def scenario_creator( - scenario_name, solar_filename=None, use_LP=False, lam=None, + scenario_name, + solar_filename=None, + use_LP=False, + lam=None, ): """ Args: @@ -42,101 +46,149 @@ def scenario_creator( raise RuntimeError("kwarg `lam` is required") data = getData(solar_filename) - num_scenarios = data['solar'].shape[0] + num_scenarios = data["solar"].shape[0] scenario_index = extract_scenario_index(scenario_name) if (scenario_index < 0) or (scenario_index >= num_scenarios): - raise RuntimeError('Provided scenario index is invalid (must lie in ' - '{0,1,...' + str(num_scenarios-1) + '} inclusive)') + raise RuntimeError( + "Provided scenario index is invalid (must lie in " + "{0,1,..." + str(num_scenarios - 1) + "} inclusive)" + ) model = pyo.ConcreteModel() - T = range(data['T']) - Tm1 = range(data['T']-1) + T = range(data["T"]) + Tm1 = range(data["T"] - 1) model.y = pyo.Var(T, within=pyo.NonNegativeReals) - model.p = pyo.Var(T, bounds=(0, data['cMax'])) - model.q = pyo.Var(T, bounds=(0, data['dMax'])) - model.x = pyo.Var(T, bounds=(data['eMin'], data['eMax'])) - if (use_LP): + model.p = pyo.Var(T, bounds=(0, data["cMax"])) + model.q = pyo.Var(T, bounds=(0, data["dMax"])) + model.x = pyo.Var(T, bounds=(data["eMin"], data["eMax"])) + if use_LP: model.z = pyo.Var([0], within=pyo.UnitInterval) else: model.z = pyo.Var([0], within=pyo.Binary) - ''' "Flow balance" constraints ''' + """ "Flow balance" constraints """ + def flow_balance_constraint_rule(model, t): - return model.x[t+1]==model.x[t] + \ - data['eff'] * model.p[t] - (1/data['eff']) * model.q[t] + return ( + model.x[t + 1] + == model.x[t] + data["eff"] * model.p[t] - (1 / data["eff"]) * model.q[t] + ) + model.flow_constr = pyo.Constraint(Tm1, rule=flow_balance_constraint_rule) - - ''' Big-M constraints ''' + + """ Big-M constraints """ + def big_M_constraint_rule(model, t): - return model.y[t] - model.q[t] + model.p[t] \ - <= data['solar'][scenario_index,t] + \ - data['M'][scenario_index,t] * model.z[0] # Why indexed?? + return ( + model.y[t] - model.q[t] + model.p[t] + <= data["solar"][scenario_index, t] + + data["M"][scenario_index, t] * model.z[0] + ) # Why indexed?? + model.big_m_constr = pyo.Constraint(T, rule=big_M_constraint_rule) - ''' Objective function (must be minimization or PH crashes) ''' - model.obj = pyo.Objective(expr=-pyo.dot_product(data['rev'], model.y) - + data['char'] * pyo.quicksum(model.p) - + data['disc'] * pyo.quicksum(model.q) + lam * model.z[0], - sense=pyo.minimize) + """ Objective function (must be minimization or PH crashes) """ + model.obj = pyo.Objective( + expr=-pyo.dot_product(data["rev"], model.y) + + data["char"] * pyo.quicksum(model.p) + + data["disc"] * pyo.quicksum(model.q) + + lam * model.z[0], + sense=pyo.minimize, + ) def fscr(model): return pyo.dot_product(data["rev"], model.y) + model.first_stage_cost = pyo.Expression(rule=fscr) model._mpisppy_node_list = [ - stree.ScenarioNode(name='ROOT', cond_prob=1., stage=1, + stree.ScenarioNode( + name="ROOT", + cond_prob=1.0, + stage=1, cost_expression=model.first_stage_cost, - nonant_list=[model.y], scen_model=model) + nonant_list=[model.y], + scen_model=model, + ) ] return model + def scenario_denouement(rank, scenario_name, scenario): pass + def getData(solar_filename): - ''' Define problem parameters based on the Singh-Knueven paper ''' + """Define problem parameters based on the Singh-Knueven paper""" data = { - 'T' : 24, # Number of time periods (hours) |T| - 'N' : 50, # Number of scenarios |\Omega| - 'eff' : 0.9, # Battery efficiency eta - 'eMax' : 960, # Maximum energy that can be stored in the battery - 'eMin' : 192, # Minimum energy that can be stored in the battery - 'rev' : np.array( # Marginal revenue earned at each hour R_t - [0.0189, 0.0172, 0.0155, 0.0148, 0.0146, 0.0151, 0.0173, 0.0219, - 0.0227, 0.0226, 0.0235, 0.0242, 0.0250, 0.0261, 0.0285, 0.0353, - 0.0531, 0.0671, 0.0438, 0.0333, 0.0287, 0.0268, 0.0240, 0.0211]), - 'char' : 0.0256, # Cost of charging the battery - 'disc' : 0.0256, # Cost of discharging the battery - 'cMax' : 480, # Maximum charging rate of battery - 'dMax' : 480, # Maximum discharging rate of battery - 'eps' : 0.05, # Chance constraint violation probability - 'x0' : 0.5 * 960,# Initial level of battery charge + "T": 24, # Number of time periods (hours) |T| + "N": 50, # Number of scenarios |\Omega| + "eff": 0.9, # Battery efficiency eta + "eMax": 960, # Maximum energy that can be stored in the battery + "eMin": 192, # Minimum energy that can be stored in the battery + "rev": np.array( # Marginal revenue earned at each hour R_t + [ + 0.0189, + 0.0172, + 0.0155, + 0.0148, + 0.0146, + 0.0151, + 0.0173, + 0.0219, + 0.0227, + 0.0226, + 0.0235, + 0.0242, + 0.0250, + 0.0261, + 0.0285, + 0.0353, + 0.0531, + 0.0671, + 0.0438, + 0.0333, + 0.0287, + 0.0268, + 0.0240, + 0.0211, + ] + ), + "char": 0.0256, # Cost of charging the battery + "disc": 0.0256, # Cost of discharging the battery + "cMax": 480, # Maximum charging rate of battery + "dMax": 480, # Maximum discharging rate of battery + "eps": 0.05, # Chance constraint violation probability + "x0": 0.5 * 960, # Initial level of battery charge } - data['prob'] = 1/data['N'] * np.ones(data['N']) # Scenario probabilities - data['solar'] = np.loadtxt(solar_filename, delimiter=',') - data['M'] = getBigM(data) + data["prob"] = 1 / data["N"] * np.ones(data["N"]) # Scenario probabilities + data["solar"] = np.loadtxt(solar_filename, delimiter=",") + data["M"] = getBigM(data) return data + def getBigM(data): - ''' Compute big-M values as given in Corollary 1 - - This method assumes that all scenarios are equally likely - ''' - base = np.min([data['dMax'], data['eff'] * (data['eMax'] - data['eMin'])]) - M = base * np.ones((data['N'], data['T'])) - data['solar'] - ell = np.int64(np.floor(data['N'] * data['eps']) + 1) - M += np.sort(data['solar'], axis=0)[-ell,:] + """Compute big-M values as given in Corollary 1 + + This method assumes that all scenarios are equally likely + """ + base = np.min([data["dMax"], data["eff"] * (data["eMax"] - data["eMin"])]) + M = base * np.ones((data["N"], data["T"])) - data["solar"] + ell = np.int64(np.floor(data["N"] * data["eps"]) + 1) + M += np.sort(data["solar"], axis=0)[-ell, :] return M + def extract_scenario_index(scenario_name): ix = -1 - while (ix >= -len(scenario_name) and scenario_name[ix] in '0123456789'): + while ix >= -len(scenario_name) and scenario_name[ix] in "0123456789": ix -= 1 - if (ix == -1): - raise RuntimeError('Provided scenario name must end with a number') - return int(scenario_name[ix+1:]) + if ix == -1: + raise RuntimeError("Provided scenario name must end with a number") + return int(scenario_name[ix + 1 :]) + -if __name__=='__main__': - print('No main for battery.py') +if __name__ == "__main__": + print("No main for battery.py") diff --git a/examples/battery/batteryext.py b/examples/battery/batteryext.py index daacce700..4eb60ea6c 100644 --- a/examples/battery/batteryext.py +++ b/examples/battery/batteryext.py @@ -8,8 +8,8 @@ ############################################################################### import mpisppy.extension -class BatteryExtension(mpisppy.extension.Extension): +class BatteryExtension(mpisppy.extension.Extension): def __init__(self, ph, rank, n_proc): self.cylinder_rank = rank self.ph = ph @@ -21,8 +21,8 @@ def post_iter0(self): pass def miditer(self, PHiter, conv): - if (self.cylinder_rank == 0): - print('{itr:3d} {conv:12.4e}'.format(itr=PHiter, conv=conv)) + if self.cylinder_rank == 0: + print("{itr:3d} {conv:12.4e}".format(itr=PHiter, conv=conv)) def enditer(self, PHiter): pass diff --git a/examples/battery/batterymain.py b/examples/battery/batterymain.py index b418848c8..206a2582d 100644 --- a/examples/battery/batterymain.py +++ b/examples/battery/batterymain.py @@ -12,47 +12,55 @@ ###from batteryext import BatteryExtension + def main(): scenario_creator_kwargs = { - 'solar_filename' : 'solar.csv', - 'use_LP' : False, - 'lam' : 467, # Dual weight for dualized chance constr + "solar_filename": "solar.csv", + "use_LP": False, + "lam": 467, # Dual weight for dualized chance constr } PH_options = { - 'solver_name' : 'gurobi', - 'PHIterLimit' : 250, - 'defaultPHrho' : 1e-1, - 'convthresh' : 1e-3, - 'verbose' : False, - 'display_timing' : False, - 'display_progress' : True, - 'iter0_solver_options' : dict(), - 'iterk_solver_options' : dict(), - 'tee-rank0-solves': False, + "solver_name": "gurobi", + "PHIterLimit": 250, + "defaultPHrho": 1e-1, + "convthresh": 1e-3, + "verbose": False, + "display_timing": False, + "display_progress": True, + "iter0_solver_options": dict(), + "iterk_solver_options": dict(), + "tee-rank0-solves": False, } # extensions? - names = ['s' + str(i) for i in range(50)] - ph = PH(PH_options, names, scenario_creator, scenario_denouement, - scenario_creator_kwargs=scenario_creator_kwargs) + names = ["s" + str(i) for i in range(50)] + ph = PH( + PH_options, + names, + scenario_creator, + scenario_denouement, + scenario_creator_kwargs=scenario_creator_kwargs, + ) ph.ph_main() res = get_solutions(ph) - for (sname, sol) in res.items(): - z = sol['z'] + for sname, sol in res.items(): + z = sol["z"] print(sname, z) + def get_solutions(ph): result = dict() - for (sname, scenario) in ph.local_scenarios.items(): + for sname, scenario in ph.local_scenarios.items(): result[sname] = dict() for node in scenario._mpisppy_node_list: for var in node.nonant_vardata_list: result[sname][var.name] = var.value - result[sname]['z'] = scenario.z[0].value + result[sname]["z"] = scenario.z[0].value return result -if __name__=='__main__': + +if __name__ == "__main__": main() diff --git a/examples/distr/distr.py b/examples/distr/distr.py index 0a30ce086..f93e1d9cd 100644 --- a/examples/distr/distr.py +++ b/examples/distr/distr.py @@ -21,15 +21,15 @@ def inter_arcs_adder(region_dict, inter_region_dict): - """This function adds to the region_dict the inter-region arcs + """This function adds to the region_dict the inter-region arcs Args: region_dict (dict): dictionary for the current scenario \n inter_region_dict (dict): dictionary of the inter-region relations Returns: - local_dict (dict): - This dictionary copies region_dict, completes the already existing fields of + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of region_dict to represent the inter-region arcs and their capcities and costs """ ### Note: The cost of the arc is here chosen to be split equally between the source region and target region @@ -38,21 +38,24 @@ def inter_arcs_adder(region_dict, inter_region_dict): # but also contains local information that can be linked to the other scenarios (with inter-region arcs) local_dict = region_dict for inter_arc in inter_region_dict["arcs"]: - source,target = inter_arc - region_source,node_source = source - region_target,node_target = target + source, target = inter_arc + region_source, node_source = source + region_target, node_target = target if region_source == region_dict["name"] or region_target == region_dict["name"]: arc = node_source, node_target local_dict["arcs"].append(arc) - local_dict["flow capacities"][arc] = inter_region_dict["capacities"][inter_arc] - local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc]/2 - #print(f"scenario name = {region_dict['name']} \n {local_dict=} ") + local_dict["flow capacities"][arc] = inter_region_dict["capacities"][ + inter_arc + ] + local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc] / 2 + # print(f"scenario name = {region_dict['name']} \n {local_dict=} ") return local_dict + ###Creates the model when local_dict is given def min_cost_distr_problem(local_dict, cfg, sense=pyo.minimize, max_revenue=None): - """ Create an arcs formulation of network flow + """Create an arcs formulation of network flow Args: local_dict (dict): dictionary representing a region including the inter region arcs \n @@ -71,9 +74,11 @@ def min_cost_distr_problem(local_dict, cfg, sense=pyo.minimize, max_revenue=None if a[1] in local_dict["nodes"]: arcsin[a[1]].append(a) - model = pyo.ConcreteModel(name='MinCostFlowArcs') - def flowBounds_rule(model, i,j): - return (0, local_dict["flow capacities"][(i,j)]) + model = pyo.ConcreteModel(name="MinCostFlowArcs") + + def flowBounds_rule(model, i, j): + return (0, local_dict["flow capacities"][(i, j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x def slackBounds_rule(model, n): @@ -86,39 +91,72 @@ def slackBounds_rule(model, n): # Should be (0,0) but to avoid infeasibility we add a negative slack variable return (None, 0) else: - return (0,0) + return (0, 0) else: raise ValueError(f"unknown node type for node {n}") - + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) if cfg.ensure_xhat_feas: # too big penalty to allow the stack to be non-zero - model.MinCost = pyo.Objective(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) \ - + sum(2*max_revenue*(-model.y[n]) for n in local_dict["distribution center nodes"]) , - sense=sense) - + model.MinCost = pyo.Objective( + expr=sum( + local_dict["flow costs"][a] * model.flow[a] for a in local_dict["arcs"] + ) + + sum( + local_dict["production costs"][n] + * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["factory nodes"] + ) + + sum( + local_dict["revenues"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["buyer nodes"] + ) + + sum( + 2 * max_revenue * (-model.y[n]) + for n in local_dict["distribution center nodes"] + ), + sense=sense, + ) + else: - model.MinCost = pyo.Objective(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , - sense=sense) - + model.MinCost = pyo.Objective( + expr=sum( + local_dict["flow costs"][a] * model.flow[a] for a in local_dict["arcs"] + ) + + sum( + local_dict["production costs"][n] + * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["factory nodes"] + ) + + sum( + local_dict["revenues"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["buyer nodes"] + ), + sense=sense, + ) + def FlowBalance_rule(m, n): - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] + return ( + sum(m.flow[a] for a in arcsout[n]) + - sum(m.flow[a] for a in arcsin[n]) + + m.y[n] + == local_dict["supply"][n] + ) + model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) return model ###Creates the scenario -def scenario_creator(scenario_name, inter_region_dict=None, cfg=None, data_params=None, all_nodes_dict=None): +def scenario_creator( + scenario_name, + inter_region_dict=None, + cfg=None, + data_params=None, + all_nodes_dict=None, +): """Creates the model, which should include the consensus variables. \n However, this function shouldn't attach the consensus variables to root nodes, as it is done in admmWrapper. @@ -128,32 +166,42 @@ def scenario_creator(scenario_name, inter_region_dict=None, cfg=None, data_param Returns: Pyomo ConcreteModel: the instantiated model - """ - assert (inter_region_dict is not None) - assert (cfg is not None) + """ + assert inter_region_dict is not None + assert cfg is not None if cfg.scalable: - assert (data_params is not None) - assert (all_nodes_dict is not None) + assert data_params is not None + assert all_nodes_dict is not None region_creation_starting_time = time.time() - region_dict = distr_data.scalable_region_dict_creator(scenario_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) + region_dict = distr_data.scalable_region_dict_creator( + scenario_name, + all_nodes_dict=all_nodes_dict, + cfg=cfg, + data_params=data_params, + ) region_creation_end_time = time.time() - print(f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}") + print( + f"time for creating region {scenario_name}: {region_creation_end_time - region_creation_starting_time}" + ) else: region_dict = distr_data.region_dict_creator(scenario_name) # Adding inter region arcs nodes and associated features local_dict = inter_arcs_adder(region_dict, inter_region_dict) # Generating the model - model = min_cost_distr_problem(local_dict, cfg, max_revenue=data_params["max revenue"]) + model = min_cost_distr_problem( + local_dict, cfg, max_revenue=data_params["max revenue"] + ) + + # varlist = list() + # sputils.attach_root_node(model, model.MinCost, varlist) - #varlist = list() - #sputils.attach_root_node(model, model.MinCost, varlist) - return model ###Functions required in other files, which constructions are specific to the problem -def scenario_denouement(rank, scenario_name, scenario, eps=10**(-6)): + +def scenario_denouement(rank, scenario_name, scenario, eps=10 ** (-6)): """for each scenario prints its name and the final variable values Args: @@ -163,9 +211,11 @@ def scenario_denouement(rank, scenario_name, scenario, eps=10**(-6)): eps (float, opt): ensures that the dummy slack variables introduced have small values """ for var in scenario.y: - if 'DC' in var: - if (abs(scenario.y[var].value) > eps): - print(f"The penalty slack {scenario.y[var].name} is too big, its absolute value is {abs(scenario.y[var].value)}") + if "DC" in var: + if abs(scenario.y[var].value) > eps: + print( + f"The penalty slack {scenario.y[var].name} is too big, its absolute value is {abs(scenario.y[var].value)}" + ) return print(f"flow values for {scenario_name}") scenario.flow.pprint() @@ -180,27 +230,31 @@ def consensus_vars_creator(num_scens, inter_region_dict, all_scenario_names): Args: num_scens (int): select the number of scenarios (regions) wanted - + Returns: - dict: dictionary which keys are the regions and values are the list of consensus variables + dict: dictionary which keys are the regions and values are the list of consensus variables present in the region """ - # Due to the small size of inter_region_dict, it is not given as argument but rather created. + # Due to the small size of inter_region_dict, it is not given as argument but rather created. consensus_vars = {} for inter_arc in inter_region_dict["arcs"]: - source,target = inter_arc - region_source,node_source = source - region_target,node_target = target + source, target = inter_arc + region_source, node_source = source + region_target, node_target = target arc = node_source, node_target - vstr = f"flow[{arc}]" #variable name as string + vstr = f"flow[{arc}]" # variable name as string - #adds inter region arcs in the source region - if region_source not in consensus_vars: #initiates consensus_vars[region_source] + # adds inter region arcs in the source region + if ( + region_source not in consensus_vars + ): # initiates consensus_vars[region_source] consensus_vars[region_source] = list() consensus_vars[region_source].append(vstr) - #adds inter region arcs in the target region - if region_target not in consensus_vars: #initiates consensus_vars[region_target] + # adds inter region arcs in the target region + if ( + region_target not in consensus_vars + ): # initiates consensus_vars[region_target] consensus_vars[region_target] = list() consensus_vars[region_target].append(vstr) for region in all_scenario_names: @@ -231,29 +285,35 @@ def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} """ kwargs = { - "all_nodes_dict" : all_nodes_dict, - "inter_region_dict" : inter_region_dict, - "cfg" : cfg, - "data_params" : data_params, - } + "all_nodes_dict": all_nodes_dict, + "inter_region_dict": inter_region_dict, + "cfg": cfg, + "data_params": data_params, + } return kwargs def inparser_adder(cfg): - #requires the user to give the number of scenarios + # requires the user to give the number of scenarios cfg.num_scens_required() - cfg.add_to_config("scalable", - description="decides whether a sclale model is used", - domain=bool, - default=False) - - cfg.add_to_config("mnpr", - description="max number of nodes per region and per type", - domain=int, - default=4) - - cfg.add_to_config("ensure_xhat_feas", - description="adds slacks with high costs to ensure the feasibility of xhat yet maintaining the optimal", - domain=bool, - default=False) \ No newline at end of file + cfg.add_to_config( + "scalable", + description="decides whether a sclale model is used", + domain=bool, + default=False, + ) + + cfg.add_to_config( + "mnpr", + description="max number of nodes per region and per type", + domain=int, + default=4, + ) + + cfg.add_to_config( + "ensure_xhat_feas", + description="adds slacks with high costs to ensure the feasibility of xhat yet maintaining the optimal", + domain=bool, + default=False, + ) diff --git a/examples/distr/distr_admm_cylinders.py b/examples/distr/distr_admm_cylinders.py index 82ead40dc..ff7570f67 100644 --- a/examples/distr/distr_admm_cylinders.py +++ b/examples/distr/distr_admm_cylinders.py @@ -22,6 +22,7 @@ write_solution = False + def _parse_args(): # create a config object and parse cfg = config.Config() @@ -34,19 +35,25 @@ def _parse_args(): cfg.lagrangian_args() cfg.ph_ob_args() cfg.tracking_args() - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) cfg.parse_command_line("distr_admm_cylinders") return cfg # This need to be executed long before the cylinders are created -def _count_cylinders(cfg): +def _count_cylinders(cfg): count = 1 - cfglist = ["xhatxbar", "lagrangian", "ph_ob"] # All the cfg arguments that create a new cylinders + cfglist = [ + "xhatxbar", + "lagrangian", + "ph_ob", + ] # All the cfg arguments that create a new cylinders # Add to this list any cfg attribute that would create a spoke for cylname in cfglist: if cfg[cylname]: @@ -55,94 +62,120 @@ def _count_cylinders(cfg): def main(): - cfg = _parse_args() - if cfg.default_rho is None: # and rho_setter is None - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + if cfg.default_rho is None: # and rho_setter is None + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) if cfg.scalable: import json + json_file_path = "data_params.json" # Read the JSON file - with open(json_file_path, 'r') as file: + with open(json_file_path, "r") as file: start_time_creating_data = time.time() data_params = json.load(file) all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) - all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] - inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + all_DC_nodes = [ + DC_node + for region in all_nodes_dict + for DC_node in all_nodes_dict[region]["distribution center nodes"] + ] + inter_region_dict = distr_data.scalable_inter_region_dict_creator( + all_DC_nodes, cfg, data_params + ) end_time_creating_data = time.time() creating_data_time = end_time_creating_data - start_time_creating_data print(f"{creating_data_time=}") else: - inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_scens) + inter_region_dict = distr_data.inter_region_dict_creator( + num_scens=cfg.num_scens + ) all_nodes_dict = None - data_params = {"max revenue": 1200} # hard-coded because the model is hard-coded + data_params = { + "max revenue": 1200 + } # hard-coded because the model is hard-coded ph_converger = None options = {} all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) scenario_creator = distr.scenario_creator - scenario_creator_kwargs = distr.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) - consensus_vars = distr.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) + scenario_creator_kwargs = distr.kw_creator( + all_nodes_dict, cfg, inter_region_dict, data_params + ) + consensus_vars = distr.consensus_vars_creator( + cfg.num_scens, inter_region_dict, all_scenario_names + ) n_cylinders = _count_cylinders(cfg) - admm = admmWrapper.AdmmWrapper(options, - all_scenario_names, - scenario_creator, - consensus_vars, - n_cylinders=n_cylinders, - mpicomm=MPI.COMM_WORLD, - scenario_creator_kwargs=scenario_creator_kwargs, - ) + admm = admmWrapper.AdmmWrapper( + options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + ) # Things needed for vanilla cylinders - scenario_creator = admm.admmWrapper_scenario_creator ##change needed because of the wrapper + scenario_creator = ( + admm.admmWrapper_scenario_creator + ) ##change needed because of the wrapper scenario_creator_kwargs = None scenario_denouement = distr.scenario_denouement - #note that the admmWrapper scenario_creator wrapper doesn't take any arguments + # note that the admmWrapper scenario_creator wrapper doesn't take any arguments variable_probability = admm.var_prob_list beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = None, - variable_probability=variable_probability) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=None, + variable_probability=variable_probability, + ) else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - ph_converger=ph_converger, - rho_setter=None, - variable_probability=variable_probability) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=None, + variable_probability=variable_probability, + ) # FWPH spoke DOES NOT WORK with variable probability # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) - + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) # ph outer bounder spoke if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None, - variable_probability=variable_probability) + ph_ob_spoke = vanilla.ph_ob_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=None, + variable_probability=variable_probability, + ) # xhat looper bound spoke if cfg.xhatxbar: - xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatxbar_spoke = vanilla.xhatxbar_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) list_of_spoke_dict = list() if cfg.lagrangian: @@ -152,17 +185,21 @@ def main(): if cfg.xhatxbar: list_of_spoke_dict.append(xhatxbar_spoke) - assert n_cylinders == 1 + len(list_of_spoke_dict), f"n_cylinders = {n_cylinders}, len(list_of_spoke_dict) = {len(list_of_spoke_dict)}" + assert ( + n_cylinders == 1 + len(list_of_spoke_dict) + ), f"n_cylinders = {n_cylinders}, len(list_of_spoke_dict) = {len(list_of_spoke_dict)}" wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() if write_solution: - wheel.write_first_stage_solution('distr_soln.csv') - wheel.write_first_stage_solution('distr_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('distr_full_solution') - + wheel.write_first_stage_solution("distr_soln.csv") + wheel.write_first_stage_solution( + "distr_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution("distr_full_solution") + if global_rank == 0: best_objective = wheel.spcomm.BestInnerBound print(f"{best_objective=}") diff --git a/examples/distr/distr_data.py b/examples/distr/distr_data.py index d27b610a1..b6d4ba06c 100644 --- a/examples/distr/distr_data.py +++ b/examples/distr/distr_data.py @@ -27,58 +27,88 @@ def inter_region_dict_creator(num_scens): num_scens (int): select the number of scenarios (regions) wanted Returns: - dict: + dict: Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n The arcs are used as keys for the dictionaries of costs and capacities """ - inter_region_dict={} + inter_region_dict = {} if num_scens == 2: - inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] - inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} - inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + inter_region_dict["arcs"] = [(("Region1", "DC1"), ("Region2", "DC2"))] + inter_region_dict["costs"] = {(("Region1", "DC1"), ("Region2", "DC2")): 100} + inter_region_dict["capacities"] = {(("Region1", "DC1"), ("Region2", "DC2")): 70} elif num_scens == 3: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - } - + inter_region_dict["arcs"] = [ + (("Region1", "DC1"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region1", "DC1")), + (("Region1", "DC1"), ("Region3", "DC3_1")), + (("Region3", "DC3_1"), ("Region1", "DC1")), + (("Region2", "DC2"), ("Region3", "DC3_2")), + (("Region3", "DC3_2"), ("Region2", "DC2")), + ] + inter_region_dict["costs"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region1", "DC1")): 50, + (("Region1", "DC1"), ("Region3", "DC3_1")): 200, + (("Region3", "DC3_1"), ("Region1", "DC1")): 200, + (("Region2", "DC2"), ("Region3", "DC3_2")): 200, + (("Region3", "DC3_2"), ("Region2", "DC2")): 200, + } + inter_region_dict["capacities"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 70, + (("Region2", "DC2"), ("Region1", "DC1")): 100, + (("Region1", "DC1"), ("Region3", "DC3_1")): 50, + (("Region3", "DC3_1"), ("Region1", "DC1")): 50, + (("Region2", "DC2"), ("Region3", "DC3_2")): 50, + (("Region3", "DC3_2"), ("Region2", "DC2")): 50, + } + elif num_scens == 4: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ - (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ - (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ - (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ - } - + inter_region_dict["arcs"] = [ + (("Region1", "DC1"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region1", "DC1")), + (("Region1", "DC1"), ("Region3", "DC3_1")), + (("Region3", "DC3_1"), ("Region1", "DC1")), + (("Region2", "DC2"), ("Region3", "DC3_2")), + (("Region3", "DC3_2"), ("Region2", "DC2")), + (("Region1", "DC1"), ("Region4", "DC4")), + (("Region4", "DC4"), ("Region1", "DC1")), + (("Region4", "DC4"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region4", "DC4")), + ] + inter_region_dict["costs"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region1", "DC1")): 50, + (("Region1", "DC1"), ("Region3", "DC3_1")): 200, + (("Region3", "DC3_1"), ("Region1", "DC1")): 200, + (("Region2", "DC2"), ("Region3", "DC3_2")): 200, + (("Region3", "DC3_2"), ("Region2", "DC2")): 200, + (("Region1", "DC1"), ("Region4", "DC4")): 30, + (("Region4", "DC4"), ("Region1", "DC1")): 50, + (("Region4", "DC4"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region4", "DC4")): 70, + } + inter_region_dict["capacities"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 70, + (("Region2", "DC2"), ("Region1", "DC1")): 100, + (("Region1", "DC1"), ("Region3", "DC3_1")): 50, + (("Region3", "DC3_1"), ("Region1", "DC1")): 50, + (("Region2", "DC2"), ("Region3", "DC3_2")): 50, + (("Region3", "DC3_2"), ("Region2", "DC2")): 50, + (("Region1", "DC1"), ("Region4", "DC4")): 100, + (("Region4", "DC4"), ("Region1", "DC1")): 60, + (("Region4", "DC4"), ("Region2", "DC2")): 20, + (("Region2", "DC2"), ("Region4", "DC4")): 40, + } + return inter_region_dict def region_dict_creator(scenario_name): - """ Create a scenario for the inter-region max profit distribution example. + """Create a scenario for the inter-region max profit distribution example. - The convention for node names is: + The convention for node names is: Symbol + number of the region (+ _ + number of the example if needed), with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n For instance: F3_1 is the 1st factory node of region 3. \n @@ -98,11 +128,12 @@ def region_dict_creator(scenario_name): "flow costs" (dict[a] of float) : costs per unit flow on each arc \n "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n """ + def _is_partition(L, *lists): # Step 1: Verify that the union of all sublists contains all elements of L if set(L) != set().union(*lists): return False - + # Step 2: Ensure each element in L appears in exactly one sublist for item in L: count = 0 @@ -111,106 +142,186 @@ def _is_partition(L, *lists): count += 1 if count != 1: return False - + return True - if scenario_name == "Region1" : + + if scenario_name == "Region1": # Creates data for Region1 - region_dict={"name": "Region1"} + region_dict = {"name": "Region1"} region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] - region_dict["factory nodes"] = ["F1_1","F1_2"] - region_dict["buyer nodes"] = ["B1_1","B1_2"] - region_dict["distribution center nodes"]= ["DC1"] - region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} - region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), - ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + region_dict["factory nodes"] = ["F1_1", "F1_2"] + region_dict["buyer nodes"] = ["B1_1", "B1_2"] + region_dict["distribution center nodes"] = ["DC1"] + region_dict["supply"] = { + "F1_1": 80, + "F1_2": 70, + "B1_1": -60, + "B1_2": -90, + "DC1": 0, + } + region_dict["arcs"] = [ + ("F1_1", "DC1"), + ("F1_2", "DC1"), + ("DC1", "B1_1"), + ("DC1", "B1_2"), + ("F1_1", "B1_1"), + ("F1_2", "B1_2"), + ] region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} # most capacities are 50, so start there and then override region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} - region_dict["flow capacities"][("F1_1","B1_1")] = None - region_dict["flow capacities"][("F1_2","B1_2")] = None - region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, - ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} - - elif scenario_name=="Region2": + region_dict["flow capacities"][("F1_1", "B1_1")] = None + region_dict["flow capacities"][("F1_2", "B1_2")] = None + region_dict["flow costs"] = { + ("F1_1", "DC1"): 300, + ("F1_2", "DC1"): 500, + ("DC1", "B1_1"): 200, + ("DC1", "B1_2"): 400, + ("F1_1", "B1_1"): 700, + ("F1_2", "B1_2"): 1000, + } + + elif scenario_name == "Region2": # Creates data for Region2 - region_dict={"name": "Region2"} + region_dict = {"name": "Region2"} region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] region_dict["factory nodes"] = list() - region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] - region_dict["distribution center nodes"]= ["DC2"] + region_dict["buyer nodes"] = ["B2_1", "B2_2", "B2_3"] + region_dict["distribution center nodes"] = ["DC2"] region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} - region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + region_dict["arcs"] = [("DC2", "B2_1"), ("DC2", "B2_2"), ("DC2", "B2_3")] region_dict["production costs"] = {} - region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} - region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} - region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} - - elif scenario_name == "Region3" : + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3": 1200} + region_dict["flow capacities"] = { + ("DC2", "B2_1"): 200, + ("DC2", "B2_2"): 150, + ("DC2", "B2_3"): 100, + } + region_dict["flow costs"] = { + ("DC2", "B2_1"): 100, + ("DC2", "B2_2"): 200, + ("DC2", "B2_3"): 300, + } + + elif scenario_name == "Region3": # Creates data for Region3 - region_dict={"name": "Region3"} + region_dict = {"name": "Region3"} region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] - region_dict["factory nodes"] = ["F3_1","F3_2"] - region_dict["buyer nodes"] = ["B3_1","B3_2"] - region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] - region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} - region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), - ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + region_dict["factory nodes"] = ["F3_1", "F3_2"] + region_dict["buyer nodes"] = ["B3_1", "B3_2"] + region_dict["distribution center nodes"] = ["DC3_1", "DC3_2"] + region_dict["supply"] = { + "F3_1": 80, + "F3_2": 60, + "B3_1": -100, + "B3_2": -100, + "DC3_1": 0, + "DC3_2": 0, + } + region_dict["arcs"] = [ + ("F3_1", "DC3_1"), + ("F3_2", "DC3_2"), + ("DC3_1", "B3_1"), + ("DC3_2", "B3_2"), + ("DC3_1", "DC3_2"), + ("DC3_2", "DC3_1"), + ] region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} - region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, - ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} - region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, - ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} - + region_dict["flow capacities"] = { + ("F3_1", "DC3_1"): 80, + ("F3_2", "DC3_2"): 60, + ("DC3_1", "B3_1"): 100, + ("DC3_2", "B3_2"): 100, + ("DC3_1", "DC3_2"): 70, + ("DC3_2", "DC3_1"): 50, + } + region_dict["flow costs"] = { + ("F3_1", "DC3_1"): 100, + ("F3_2", "DC3_2"): 100, + ("DC3_1", "B3_1"): 201, + ("DC3_2", "B3_2"): 200, + ("DC3_1", "DC3_2"): 100, + ("DC3_2", "DC3_1"): 100, + } + elif scenario_name == "Region4": # Creates data for Region4 - region_dict={"name": "Region4"} + region_dict = {"name": "Region4"} region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] - region_dict["factory nodes"] = ["F4_1","F4_2"] - region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["factory nodes"] = ["F4_1", "F4_2"] + region_dict["buyer nodes"] = ["B4_1", "B4_2"] region_dict["distribution center nodes"] = ["DC4"] - region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} - region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + region_dict["supply"] = { + "F4_1": 200, + "F4_2": 30, + "B4_1": -100, + "B4_2": -100, + "DC4": 0, + } + region_dict["arcs"] = [ + ("F4_1", "DC4"), + ("F4_2", "DC4"), + ("DC4", "B4_1"), + ("DC4", "B4_2"), + ] region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} - region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} - region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} - + region_dict["flow capacities"] = { + ("F4_1", "DC4"): 80, + ("F4_2", "DC4"): 60, + ("DC4", "B4_1"): 100, + ("DC4", "B4_2"): 100, + } + region_dict["flow costs"] = { + ("F4_1", "DC4"): 100, + ("F4_2", "DC4"): 80, + ("DC4", "B4_1"): 90, + ("DC4", "B4_2"): 70, + } + else: - raise RuntimeError (f"unknown Region name {scenario_name}") + raise RuntimeError(f"unknown Region name {scenario_name}") - assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + assert _is_partition( + region_dict["nodes"], + region_dict["factory nodes"], + region_dict["buyer nodes"], + region_dict["distribution center nodes"], + ) return region_dict + if __name__ == "__main__": - #creating the json files - for num_scens in range(2,5): - inter_region_dict_path = f'json_dicts.inter_region_dict{num_scens}.json' + # creating the json files + for num_scens in range(2, 5): + inter_region_dict_path = f"json_dicts.inter_region_dict{num_scens}.json" data = inter_region_dict_creator(num_scens) # Write the data to the JSON file - with open(inter_region_dict_path, 'w') as json_file: + with open(inter_region_dict_path, "w") as json_file: json.dump(data, json_file, indent=4) - for i in range(1,5): - region_dict_path = f'json_dicts.region{i}_dict.json' + for i in range(1, 5): + region_dict_path = f"json_dicts.region{i}_dict.json" data = region_dict_creator(f"Region{i}") # Write the data to the JSON file - with open(region_dict_path, 'w') as json_file: + with open(region_dict_path, "w") as json_file: json.dump(data, json_file, indent=4) ######################################################################################################################## # Scalable datasets + def parse_node_name(name): - """ decomposes the name, for example "DC1_2 gives" "DC",1,2 + """decomposes the name, for example "DC1_2 gives" "DC",1,2 Args: name (str): name of the node @@ -218,20 +329,20 @@ def parse_node_name(name): triplet (str, int, int): type of node ("DC", "F" or "B"), number of the region, and number of the node """ # Define the regular expression pattern - pattern = r'^([A-Za-z]+)(\d+)(?:_(\d+))?$' - + pattern = r"^([A-Za-z]+)(\d+)(?:_(\d+))?$" + # Match the pattern against the provided name match = re.match(pattern, name) - + if match: # Extract the prefix, the first number, and the second number (if present) prefix = match.group(1) first_number = match.group(2) - second_number = match.group(3) if match.group(3) is not None else '1' + second_number = match.group(3) if match.group(3) is not None else "1" assert prefix in ["DC", "F", "B"] return prefix, int(first_number), int(second_number) else: - raise RuntimeError (f"the node {name} can't be well decomposed") + raise RuntimeError(f"the node {name} can't be well decomposed") def _node_num(max_node_per_region, node_type, region_num, count): @@ -242,11 +353,18 @@ def _node_num(max_node_per_region, node_type, region_num, count): Returns: int: a number specific to the node. This allows to have unrelated seeds """ - node_types = ["DC", "F", "B"] #no need to include the dummy nodes as they are added automatically - return (max_node_per_region * region_num + count) * len(node_types) + node_types.index(node_type) + node_types = [ + "DC", + "F", + "B", + ] # no need to include the dummy nodes as they are added automatically + return (max_node_per_region * region_num + count) * len( + node_types + ) + node_types.index(node_type) + -def _pseudo_random_arc(node_1, node_2, prob, cfg, intra=True): #in a Region - """decides pseudo_randomly whether an arc will be created based on the two nodes +def _pseudo_random_arc(node_1, node_2, prob, cfg, intra=True): # in a Region + """decides pseudo_randomly whether an arc will be created based on the two nodes Args: node_1 (str): name of the source node @@ -260,23 +378,33 @@ def _pseudo_random_arc(node_1, node_2, prob, cfg, intra=True): #in a Region """ max_node_per_region = cfg.mnpr node_type1, region_num1, count1 = parse_node_name(node_1) - node_type2, region_num2, count2 = parse_node_name(node_2) + node_type2, region_num2, count2 = parse_node_name(node_2) node_num1 = _node_num(max_node_per_region, node_type1, region_num1, count1) node_num2 = _node_num(max_node_per_region, node_type2, region_num2, count2) if intra: - assert region_num1 == region_num2, f"supposed to happen in a region ({intra=}), but {region_num1, region_num2=}" + assert ( + region_num1 == region_num2 + ), f"supposed to happen in a region ({intra=}), but {region_num1, region_num2=}" else: - if region_num1 == region_num2: # inside a region, however intra=False so no connexion + if ( + region_num1 == region_num2 + ): # inside a region, however intra=False so no connexion return False - max_node_num = _node_num(max_node_per_region, "B", cfg.num_scens+1, max_node_per_region+1) # maximum number possible - np.random.seed(min(node_num1, node_num2) * max_node_num + max(node_num1, node_num2)) # it is symmetrical + max_node_num = _node_num( + max_node_per_region, "B", cfg.num_scens + 1, max_node_per_region + 1 + ) # maximum number possible + np.random.seed( + min(node_num1, node_num2) * max_node_num + max(node_num1, node_num2) + ) # it is symmetrical random_number = np.random.rand() # Determine if the event occurs boo = random_number < prob return boo -def _intra_arc_creator(my_dict, node_1, node_2, cfg, arc, arc_params, my_seed, intra=True): +def _intra_arc_creator( + my_dict, node_1, node_2, cfg, arc, arc_params, my_seed, intra=True +): """if the arc is chosen randomly to be constructed, it is added to the dictionary with its cost and capacity Args: @@ -297,25 +425,29 @@ def _intra_arc_creator(my_dict, node_1, node_2, cfg, arc, arc_params, my_seed, i if _pseudo_random_arc(node_1, node_2, prob, cfg, intra=intra): my_dict["arcs"].append(arc) np.random.seed(my_seed % 2**32) - if intra: # not the same names used + if intra: # not the same names used cost_name = "flow costs" capacity_name = "flow capacities" else: cost_name = "costs" capacity_name = "capacities" - my_dict[cost_name][arc] = max(np.random.normal(mean_cost,cv_cost),0) - np.random.seed((2**31+my_seed) % 2**32) - my_dict[capacity_name][arc] = max(np.random.normal(mean_capacity,cv_capacity),0) + my_dict[cost_name][arc] = max(np.random.normal(mean_cost, cv_cost), 0) + np.random.seed((2**31 + my_seed) % 2**32) + my_dict[capacity_name][arc] = max( + np.random.normal(mean_capacity, cv_capacity), 0 + ) -def scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params): # same as inter_region_dict_creator but the scalable version +def scalable_inter_region_dict_creator( + all_DC_nodes, cfg, data_params +): # same as inter_region_dict_creator but the scalable version inter_region_arc_params = data_params["inter_region_arc"] - inter_region_dict={} + inter_region_dict = {} inter_region_dict["arcs"] = list() inter_region_dict["costs"] = {} inter_region_dict["capacities"] = {} count = 0 - for node_1 in all_DC_nodes: #although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't + for node_1 in all_DC_nodes: # although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't for node_2 in all_DC_nodes: if node_1 != node_2: _, region_num1, _ = parse_node_name(node_1) @@ -323,7 +455,16 @@ def scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params): # same a _, region_num2, _ = parse_node_name(node_2) target = f"Region{region_num2}", node_2 arc = source, target - _intra_arc_creator(inter_region_dict, node_1, node_2, cfg, arc, inter_region_arc_params, count, intra=False) + _intra_arc_creator( + inter_region_dict, + node_1, + node_2, + cfg, + arc, + inter_region_arc_params, + count, + intra=False, + ) count += 1 return inter_region_dict @@ -339,58 +480,93 @@ def all_nodes_dict_creator(cfg, data_params): """ all_nodes_dict = {} num_scens = cfg.num_scens - max_node_per_region = cfg.mnpr # maximum node node of a certain type in any region + max_node_per_region = cfg.mnpr # maximum node node of a certain type in any region all_nodes_dict = {} production_costs_mean = data_params["production costs mean"] - production_costs_cv = data_params["production costs cv"] #coefficient of variation + production_costs_cv = data_params["production costs cv"] # coefficient of variation revenues_mean = data_params["revenues mean"] revenues_cv = data_params["revenues cv"] supply_factory_mean = data_params["supply factory mean"] supply_factory_cv = data_params["supply factory cv"] supply_buyer_mean = data_params["supply buyer mean"] supply_buyer_cv = data_params["supply buyer cv"] - for i in range(1, num_scens+1): + for i in range(1, num_scens + 1): region_name = f"Region{i}" all_nodes_dict[region_name] = {} node_types = ["DC", "F", "B"] all_nodes_dict[region_name]["nodes"] = [] - association_types = {"DC": "distribution center nodes", "F": "factory nodes", "B": "buyer nodes"} + association_types = { + "DC": "distribution center nodes", + "F": "factory nodes", + "B": "buyer nodes", + } all_nodes_dict[region_name]["production costs"] = {} all_nodes_dict[region_name]["revenues"] = {} - all_nodes_dict[region_name]["supply"] = {} + all_nodes_dict[region_name]["supply"] = {} for node_type in node_types: - node_base_num = _node_num(max_node_per_region, node_type, i, 1) #the first node that will be created will have this number + node_base_num = _node_num( + max_node_per_region, node_type, i, 1 + ) # the first node that will be created will have this number # That helps us to have a seed, thanks to that we choose an integer which will be the number of nodes of this type np.random.seed(node_base_num) if node_type == "F" or node_type == "B": m = np.random.randint(0, max_node_per_region) else: - m = np.random.randint(1, int(np.sqrt(max_node_per_region))+1) - all_nodes_dict[region_name][association_types[node_type]] = [node_type + str(i) + "_" +str(j) for j in range(1, m+1)] - all_nodes_dict[region_name]["nodes"] += all_nodes_dict[region_name][association_types[node_type]] + m = np.random.randint(1, int(np.sqrt(max_node_per_region)) + 1) + all_nodes_dict[region_name][association_types[node_type]] = [ + node_type + str(i) + "_" + str(j) for j in range(1, m + 1) + ] + all_nodes_dict[region_name]["nodes"] += all_nodes_dict[region_name][ + association_types[node_type] + ] if node_type == "F": count = 1 - for node_name in all_nodes_dict[region_name][association_types[node_type]]: - np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2**28) - all_nodes_dict[region_name]["production costs"][node_name] = max(0,np.random.normal(production_costs_mean,production_costs_cv)) - np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2*2**28) - all_nodes_dict[region_name]["supply"][node_name] = max(0,np.random.normal(supply_factory_mean,supply_factory_cv)) #positive + for node_name in all_nodes_dict[region_name][ + association_types[node_type] + ]: + np.random.seed( + _node_num(max_node_per_region, node_type, i, count) + 2**28 + ) + all_nodes_dict[region_name]["production costs"][node_name] = max( + 0, np.random.normal(production_costs_mean, production_costs_cv) + ) + np.random.seed( + _node_num(max_node_per_region, node_type, i, count) + 2 * 2**28 + ) + all_nodes_dict[region_name]["supply"][node_name] = max( + 0, np.random.normal(supply_factory_mean, supply_factory_cv) + ) # positive count += 1 if node_type == "B": count = 1 - for node_name in all_nodes_dict[region_name][association_types[node_type]]: - np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2**28) - all_nodes_dict[region_name]["revenues"][node_name] = min(max(0, np.random.normal(revenues_mean,revenues_cv)), data_params["max revenue"]) - np.random.seed(_node_num(max_node_per_region, node_type, i, count) + 2*2**28) - all_nodes_dict[region_name]["supply"][node_name] = - max(0, np.random.normal(supply_buyer_mean,supply_buyer_cv)) #negative + for node_name in all_nodes_dict[region_name][ + association_types[node_type] + ]: + np.random.seed( + _node_num(max_node_per_region, node_type, i, count) + 2**28 + ) + all_nodes_dict[region_name]["revenues"][node_name] = min( + max(0, np.random.normal(revenues_mean, revenues_cv)), + data_params["max revenue"], + ) + np.random.seed( + _node_num(max_node_per_region, node_type, i, count) + 2 * 2**28 + ) + all_nodes_dict[region_name]["supply"][node_name] = -max( + 0, np.random.normal(supply_buyer_mean, supply_buyer_cv) + ) # negative count += 1 if node_type == "DC": - for node_name in all_nodes_dict[region_name][association_types[node_type]]: + for node_name in all_nodes_dict[region_name][ + association_types[node_type] + ]: all_nodes_dict[region_name]["supply"][node_name] = 0 return all_nodes_dict -def scalable_region_dict_creator(scenario_name, all_nodes_dict=None, cfg=None, data_params=None): # same as region_dict_creator but the scalable version +def scalable_region_dict_creator( + scenario_name, all_nodes_dict=None, cfg=None, data_params=None +): # same as region_dict_creator but the scalable version assert all_nodes_dict is not None assert cfg is not None assert data_params is not None @@ -400,17 +576,33 @@ def scalable_region_dict_creator(scenario_name, all_nodes_dict=None, cfg=None, d region_dict["arcs"] = list() region_dict["flow costs"] = {} region_dict["flow capacities"] = {} - count = 2**30 # to have unrelated data with the production_costs - for node_1 in local_nodes_dict["nodes"]: #although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't + count = 2**30 # to have unrelated data with the production_costs + for node_1 in local_nodes_dict[ + "nodes" + ]: # although inter_region_dict["costs"] and ["capacities"] could be done with comprehension, "arcs" can't for node_2 in local_nodes_dict["nodes"]: if node_1 != node_2: node_type1, _, _ = parse_node_name(node_1) node_type2, _, _ = parse_node_name(node_2) - arcs_association = {("F","DC") : data_params["arc_F_DC"], ("DC", "B") : data_params["arc_DC_B"], ("F", "B") : data_params["arc_F_B"], ("DC", "DC"): data_params["arc_DC_DC"]} + arcs_association = { + ("F", "DC"): data_params["arc_F_DC"], + ("DC", "B"): data_params["arc_DC_B"], + ("F", "B"): data_params["arc_F_B"], + ("DC", "DC"): data_params["arc_DC_DC"], + } arc_type = (node_type1, node_type2) if arc_type in arcs_association: arc_params = arcs_association[arc_type] arc = (node_1, node_2) - _intra_arc_creator(region_dict, node_1, node_2, cfg, arc, arc_params, my_seed=count, intra=True) + _intra_arc_creator( + region_dict, + node_1, + node_2, + cfg, + arc, + arc_params, + my_seed=count, + intra=True, + ) count += 1 return region_dict diff --git a/examples/distr/distr_ef.py b/examples/distr/distr_ef.py index 935e4e687..49b836506 100644 --- a/examples/distr/distr_ef.py +++ b/examples/distr/distr_ef.py @@ -18,25 +18,26 @@ import mpisppy.utils.sputils as sputils from mpisppy.utils import config from mpisppy import MPI + global_rank = MPI.COMM_WORLD.Get_rank() write_solution = True + def _parse_args(): # create a config object and parse cfg = config.Config() distr.inparser_adder(cfg) - cfg.add_to_config("solver_name", - description="Choice of the solver", - domain=str, - default=None) + cfg.add_to_config( + "solver_name", description="Choice of the solver", domain=str, default=None + ) cfg.parse_command_line("distr_ef") return cfg -def solve_EF_directly(admm,solver_name): +def solve_EF_directly(admm, solver_name): scenario_names = admm.local_scenario_names scenario_creator = admm.admmWrapper_scenario_creator @@ -46,57 +47,77 @@ def solve_EF_directly(admm,solver_name): nonant_for_fixed_vars=False, ) solver = pyo.SolverFactory(solver_name) - if 'persistent' in solver_name: + if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) solver.solve(tee=True) else: - solver.solve(ef, tee=True, symbolic_solver_labels=True,) + solver.solve( + ef, + tee=True, + symbolic_solver_labels=True, + ) return ef -def main(): +def main(): cfg = _parse_args() if cfg.scalable: import json + json_file_path = "data_params.json" # Read the JSON file - with open(json_file_path, 'r') as file: + with open(json_file_path, "r") as file: data_params = json.load(file) all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) - all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] - inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + all_DC_nodes = [ + DC_node + for region in all_nodes_dict + for DC_node in all_nodes_dict[region]["distribution center nodes"] + ] + inter_region_dict = distr_data.scalable_inter_region_dict_creator( + all_DC_nodes, cfg, data_params + ) else: - inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_scens) + inter_region_dict = distr_data.inter_region_dict_creator( + num_scens=cfg.num_scens + ) all_nodes_dict = None data_params = {"max revenue": 1200} options = {} all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) scenario_creator = distr.scenario_creator - scenario_creator_kwargs = distr.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) - consensus_vars = distr.consensus_vars_creator(cfg.num_scens, inter_region_dict, all_scenario_names) + scenario_creator_kwargs = distr.kw_creator( + all_nodes_dict, cfg, inter_region_dict, data_params + ) + consensus_vars = distr.consensus_vars_creator( + cfg.num_scens, inter_region_dict, all_scenario_names + ) n_cylinders = 1 - admm = admmWrapper.AdmmWrapper(options, - all_scenario_names, - scenario_creator, - consensus_vars, - n_cylinders=n_cylinders, - mpicomm=MPI.COMM_WORLD, - scenario_creator_kwargs=scenario_creator_kwargs, - ) + admm = admmWrapper.AdmmWrapper( + options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + ) solved_ef = solve_EF_directly(admm, cfg.solver_name) with open("ef.txt", "w") as f: solved_ef.pprint(f) - print ("******* model written to ef.txt *******") + print("******* model written to ef.txt *******") solution_file_name = "solution_distr.txt" - sputils.write_ef_first_stage_solution(solved_ef, - solution_file_name,) + sputils.write_ef_first_stage_solution( + solved_ef, + solution_file_name, + ) print(f"ef solution written to {solution_file_name}") print(f"EF objective: {pyo.value(solved_ef.EF_Obj)}") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/distr/globalmodel.py b/examples/distr/globalmodel.py index 88428bba8..94c16ff28 100644 --- a/examples/distr/globalmodel.py +++ b/examples/distr/globalmodel.py @@ -17,18 +17,20 @@ import distr_data from mpisppy.utils import config + def _parse_args(): # create a config object and parse cfg = config.Config() distr.inparser_adder(cfg) - cfg.add_to_config("solver_name", - description="which solver", - domain=str, - default=None, - argparse_args = {"required": True}, - ) + cfg.add_to_config( + "solver_name", + description="which solver", + domain=str, + default=None, + argparse_args={"required": True}, + ) cfg.parse_command_line("globalmodel") - + return cfg @@ -43,19 +45,31 @@ def global_dict_creator(num_scens, start=0): dict: dictionary with the information to create a min cost distribution problem """ inter_region_dict = distr_data.inter_region_dict_creator(num_scens) - global_dict={} - for i in range(start,start+num_scens): - scenario_name=f"Region{i+1}" + global_dict = {} + for i in range(start, start + num_scens): + scenario_name = f"Region{i+1}" region_dict = distr_data.region_dict_creator(scenario_name) for key in region_dict: - if key in ["nodes","factory nodes", "buyer nodes", "distribution center nodes", "arcs"]: + if key in [ + "nodes", + "factory nodes", + "buyer nodes", + "distribution center nodes", + "arcs", + ]: if i == start: - global_dict[key]=[] + global_dict[key] = [] for x in region_dict[key]: global_dict[key].append(x) - if key in ["supply", "production costs","revenues", "flow capacities", "flow costs"]: + if key in [ + "supply", + "production costs", + "revenues", + "flow capacities", + "flow costs", + ]: if i == start: - global_dict[key]={} + global_dict[key] = {} for key2 in region_dict[key]: global_dict[key][key2] = region_dict[key][key2] @@ -64,21 +78,26 @@ def _extract_arc(a): node_source = source[1] node_target = target[1] return (node_source, node_target) - + for a in inter_region_dict["arcs"]: global_dict["arcs"].append(_extract_arc(a)) for a in inter_region_dict["costs"]: - global_dict["flow costs"][_extract_arc(a)] = inter_region_dict["costs"][a] + global_dict["flow costs"][_extract_arc(a)] = inter_region_dict["costs"][a] for a in inter_region_dict["capacities"]: - global_dict["flow capacities"][_extract_arc(a)] = inter_region_dict["capacities"][a] + global_dict["flow capacities"][_extract_arc(a)] = inter_region_dict[ + "capacities" + ][a] return global_dict + def main(): """ - do all the work + do all the work """ cfg = _parse_args() - assert cfg.scalable is False, "the global model example has not been adapted for the scalable example" + assert ( + cfg.scalable is False + ), "the global model example has not been adapted for the scalable example" model = distr.min_cost_distr_problem(global_dict_creator(num_scens=cfg.num_scens)) solver_name = cfg.solver_name @@ -92,7 +111,7 @@ def main(): count = 0 for obj in objectives: objective_value = pyo.value(obj) - count += 1 + count += 1 assert count == 1, f"only one objective function is authorized, there are {count}" print(f"Objective '{obj}' value: {objective_value}") diff --git a/examples/farmer/cs_farmer.py b/examples/farmer/cs_farmer.py index b7f006660..4ec4ed878 100644 --- a/examples/farmer/cs_farmer.py +++ b/examples/farmer/cs_farmer.py @@ -23,14 +23,13 @@ from mpisppy.extensions.extension import MultiExtension from mpisppy.extensions.cross_scen_extension import CrossScenarioExtension from mpisppy.cylinders.lagrangian_bounder import LagrangianOuterBound -from mpisppy.cylinders.hub import PHHub +from mpisppy.cylinders.hub import PHHub from mpisppy.cylinders.cross_scen_spoke import CrossScenarioCutSpoke from mpisppy.opt.lshaped import LShapedMethod from mpisppy.utils.xhat_eval import Xhat_Eval - # Make it all go from mpisppy.spin_the_wheel import WheelSpinner from mpisppy.log import setup_logger @@ -46,16 +45,22 @@ def _usage(): - print("usage mpiexec -np {N} python -m mpi4py cs_farmer.py {crops_multiplier} {scen_count} {bundles_per_rank} {PHIterLimit}") + print( + "usage mpiexec -np {N} python -m mpi4py cs_farmer.py {crops_multiplier} {scen_count} {bundles_per_rank} {PHIterLimit}" + ) print("e.g., mpiexec -np 3 python -m mpi4py cs_farmer.py 1 3 0 50") sys.exit(1) if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG, filename='dlw.log', - filemode='w', format='(%(threadName)-10s) %(message)s') - setup_logger(f'dtm{global_rank}', f'dtm{global_rank}.log') - dtm = logging.getLogger(f'dtm{global_rank}') + logging.basicConfig( + level=logging.DEBUG, + filename="dlw.log", + filemode="w", + format="(%(threadName)-10s) %(message)s", + ) + setup_logger(f"dtm{global_rank}", f"dtm{global_rank}.log") + dtm = logging.getLogger(f"dtm{global_rank}") if len(sys.argv) != 5: _usage() @@ -68,17 +73,17 @@ def _usage(): scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement - all_scenario_names = ['scen{}'.format(sn) for sn in range(scen_count)] - rho_setter = farmer._rho_setter if hasattr(farmer, '_rho_setter') else None + all_scenario_names = ["scen{}".format(sn) for sn in range(scen_count)] + rho_setter = farmer._rho_setter if hasattr(farmer, "_rho_setter") else None scenario_creator_kwargs = { - 'use_integer': True, + "use_integer": True, "crops_multiplier": crops_multiplier, "sense": pyo.maximize, } hub_ph_options = { "solver_name": "xpress_persistent", - 'bundles_per_rank': bundles_per_rank, # 0 = no bundles + "bundles_per_rank": bundles_per_rank, # 0 = no bundles "asynchronousPH": False, "PHIterLimit": PHIterLimit, "defaultPHrho": 0.5, @@ -90,11 +95,11 @@ def _usage(): "tee-rank0-solves": False, "iter0_solver_options": None, "iterk_solver_options": None, - "cross_scen_options":{"check_bound_improve_iterations":2}, + "cross_scen_options": {"check_bound_improve_iterations": 2}, } - multi_ext = { 'ext_classes' : [CrossScenarioExtension] } - #multi_ext = { 'ext_classes' : [] } + multi_ext = {"ext_classes": [CrossScenarioExtension]} + # multi_ext = { 'ext_classes' : [] } # PH hub hub_dict = { @@ -109,30 +114,29 @@ def _usage(): "rho_setter": rho_setter, "extensions": MultiExtension, "extension_kwargs": multi_ext, - } + }, } # TBD: this should have a different rho setter than the optimizer options = copy.deepcopy(hub_ph_options) # many will not be used - options['bundles_per_rank'] = 0 # no bundles for xhat - options["xhat_looper_options"] = \ - { - "xhat_solver_options": None, - "scen_limit": 3, - "dump_prefix": "delme", - "csvname": "looper.csv", - } + options["bundles_per_rank"] = 0 # no bundles for xhat + options["xhat_looper_options"] = { + "xhat_solver_options": None, + "scen_limit": 3, + "dump_prefix": "delme", + "csvname": "looper.csv", + } ub_spoke = { - 'spoke_class': XhatShuffleInnerBound, + "spoke_class": XhatShuffleInnerBound, "spoke_kwargs": dict(), "opt_class": Xhat_Eval, - 'opt_kwargs': { - 'options': options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, - 'scenario_denouement': scenario_denouement, + "opt_kwargs": { + "options": options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, + "scenario_denouement": scenario_denouement, "scenario_creator_kwargs": scenario_creator_kwargs, }, } @@ -140,42 +144,46 @@ def _usage(): ls_options = { "root_solver": "xpress_persistent", "sp_solver": "xpress_persistent", - "sp_solver_options": {"threads":1}, - } + "sp_solver_options": {"threads": 1}, + } cut_spoke = { - 'spoke_class': CrossScenarioCutSpoke, + "spoke_class": CrossScenarioCutSpoke, "spoke_kwargs": dict(), "opt_class": LShapedMethod, - 'opt_kwargs': { - 'options': ls_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, - 'scenario_denouement': scenario_denouement, - "scenario_creator_kwargs": scenario_creator_kwargs + "opt_kwargs": { + "options": ls_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, + "scenario_denouement": scenario_denouement, + "scenario_creator_kwargs": scenario_creator_kwargs, }, } lagrangian_spoke = { "spoke_class": LagrangianOuterBound, "spoke_kwargs": dict(), - "opt_class": PHBase, - 'opt_kwargs': { - 'options': hub_ph_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, + "opt_class": PHBase, + "opt_kwargs": { + "options": hub_ph_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - 'scenario_denouement': scenario_denouement, + "scenario_denouement": scenario_denouement, }, } - list_of_spoke_dict = (ub_spoke, cut_spoke, ) #lagrangian_spoke) - #list_of_spoke_dict = (ub_spoke, ) + list_of_spoke_dict = ( + ub_spoke, + cut_spoke, + ) # lagrangian_spoke) + # list_of_spoke_dict = (ub_spoke, ) wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() if wheel.global_rank == 0: # we are the reporting hub rank - print(f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}") + print( + f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}" + ) - print("End time={} for global rank={}". \ - format(datetime.datetime.now(), global_rank)) + print("End time={} for global rank={}".format(datetime.datetime.now(), global_rank)) diff --git a/examples/farmer/farmer.py b/examples/farmer/farmer.py index 2d620b68f..cd4ef6f03 100644 --- a/examples/farmer/farmer.py +++ b/examples/farmer/farmer.py @@ -13,8 +13,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2018 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -28,12 +28,17 @@ # Use this random stream: farmerstream = np.random.RandomState() + def scenario_creator( - scenario_name, use_integer=False, sense=pyo.minimize, crops_multiplier=1, - num_scens=None, seedoffset=0 + scenario_name, + use_integer=False, + sense=pyo.minimize, + crops_multiplier=1, + num_scens=None, + seedoffset=0, ): - """ Create a scenario for the (scalable) farmer example. - + """Create a scenario for the (scalable) farmer example. + Args: scenario_name (str): Name of the scenario to construct. @@ -46,24 +51,24 @@ def scenario_creator( Factor to control scaling. There will be three times this many crops. Default is 1. num_scens (int, optional): - Number of scenarios. We use it to compute _mpisppy_probability. + Number of scenarios. We use it to compute _mpisppy_probability. Default is None. seedoffset (int): used by confidence interval code """ # scenario_name has the form e.g. scen12, foobar7 # The digits are scraped off the right of scenario_name using regex then # converted mod 3 into one of the below avg./avg./above avg. scenarios - scennum = sputils.extract_num(scenario_name) - basenames = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario'] - basenum = scennum % 3 - groupnum = scennum // 3 - scenname = basenames[basenum]+str(groupnum) + scennum = sputils.extract_num(scenario_name) + basenames = ["BelowAverageScenario", "AverageScenario", "AboveAverageScenario"] + basenum = scennum % 3 + groupnum = scennum // 3 + scenname = basenames[basenum] + str(groupnum) # The RNG is seeded with the scenario number so that it is # reproducible when used with multiple threads. # NOTE: if you want to do replicates, you will need to pass a seed # as a kwarg to scenario_creator then use seed+scennum as the seed argument. - farmerstream.seed(scennum+seedoffset) + farmerstream.seed(scennum + seedoffset) # Check for minimization vs. maximization if sense not in [pyo.minimize, pyo.maximize]: @@ -81,13 +86,14 @@ def scenario_creator( # there is only one node associated with the scenario--leaf nodes are # ignored). varlist = [model.DevotedAcreage] - sputils.attach_root_node(model, model.FirstStageCost, varlist) - - #Add the probability of the scenario - if num_scens is not None : - model._mpisppy_probability = 1/num_scens + sputils.attach_root_node(model, model.FirstStageCost, varlist) + + # Add the probability of the scenario + if num_scens is not None: + model._mpisppy_probability = 1 / num_scens return model + def pysp_instance_creation_callback( scenario_name, use_integer=False, sense=pyo.minimize, crops_multiplier=1 ): @@ -99,15 +105,15 @@ def pysp_instance_creation_callback( # scenarios come in groups of three scengroupnum = sputils.extract_num(scenario_name) scenario_base_name = scenario_name.rstrip("0123456789") - + model = pyo.ConcreteModel(scenario_name) def crops_init(m): retval = [] for i in range(crops_multiplier): - retval.append("WHEAT"+str(i)) - retval.append("CORN"+str(i)) - retval.append("SUGAR_BEETS"+str(i)) + retval.append("WHEAT" + str(i)) + retval.append("CORN" + str(i)) + retval.append("SUGAR_BEETS" + str(i)) return retval model.CROPS = pyo.Set(initialize=crops_init) @@ -121,63 +127,66 @@ def crops_init(m): def _scale_up_data(indict): outdict = {} for i in range(crops_multiplier): - for crop in ['WHEAT', 'CORN', 'SUGAR_BEETS']: - outdict[crop+str(i)] = indict[crop] + for crop in ["WHEAT", "CORN", "SUGAR_BEETS"]: + outdict[crop + str(i)] = indict[crop] return outdict - + model.PriceQuota = _scale_up_data( - {'WHEAT':100000.0,'CORN':100000.0,'SUGAR_BEETS':6000.0}) + {"WHEAT": 100000.0, "CORN": 100000.0, "SUGAR_BEETS": 6000.0} + ) model.SubQuotaSellingPrice = _scale_up_data( - {'WHEAT':170.0,'CORN':150.0,'SUGAR_BEETS':36.0}) + {"WHEAT": 170.0, "CORN": 150.0, "SUGAR_BEETS": 36.0} + ) model.SuperQuotaSellingPrice = _scale_up_data( - {'WHEAT':0.0,'CORN':0.0,'SUGAR_BEETS':10.0}) + {"WHEAT": 0.0, "CORN": 0.0, "SUGAR_BEETS": 10.0} + ) model.CattleFeedRequirement = _scale_up_data( - {'WHEAT':200.0,'CORN':240.0,'SUGAR_BEETS':0.0}) + {"WHEAT": 200.0, "CORN": 240.0, "SUGAR_BEETS": 0.0} + ) model.PurchasePrice = _scale_up_data( - {'WHEAT':238.0,'CORN':210.0,'SUGAR_BEETS':100000.0}) + {"WHEAT": 238.0, "CORN": 210.0, "SUGAR_BEETS": 100000.0} + ) model.PlantingCostPerAcre = _scale_up_data( - {'WHEAT':150.0,'CORN':230.0,'SUGAR_BEETS':260.0}) + {"WHEAT": 150.0, "CORN": 230.0, "SUGAR_BEETS": 260.0} + ) # # Stochastic Data # Yield = {} - Yield['BelowAverageScenario'] = \ - {'WHEAT':2.0,'CORN':2.4,'SUGAR_BEETS':16.0} - Yield['AverageScenario'] = \ - {'WHEAT':2.5,'CORN':3.0,'SUGAR_BEETS':20.0} - Yield['AboveAverageScenario'] = \ - {'WHEAT':3.0,'CORN':3.6,'SUGAR_BEETS':24.0} + Yield["BelowAverageScenario"] = {"WHEAT": 2.0, "CORN": 2.4, "SUGAR_BEETS": 16.0} + Yield["AverageScenario"] = {"WHEAT": 2.5, "CORN": 3.0, "SUGAR_BEETS": 20.0} + Yield["AboveAverageScenario"] = {"WHEAT": 3.0, "CORN": 3.6, "SUGAR_BEETS": 24.0} def Yield_init(m, cropname): # yield as in "crop yield" crop_base_name = cropname.rstrip("0123456789") if scengroupnum != 0: - return Yield[scenario_base_name][crop_base_name]+farmerstream.rand() + return Yield[scenario_base_name][crop_base_name] + farmerstream.rand() else: return Yield[scenario_base_name][crop_base_name] - model.Yield = pyo.Param(model.CROPS, - within=pyo.NonNegativeReals, - initialize=Yield_init, - mutable=True) + model.Yield = pyo.Param( + model.CROPS, within=pyo.NonNegativeReals, initialize=Yield_init, mutable=True + ) # # Variables # - if (use_integer): - model.DevotedAcreage = pyo.Var(model.CROPS, - within=pyo.NonNegativeIntegers, - bounds=(0.0, model.TOTAL_ACREAGE)) + if use_integer: + model.DevotedAcreage = pyo.Var( + model.CROPS, + within=pyo.NonNegativeIntegers, + bounds=(0.0, model.TOTAL_ACREAGE), + ) else: - model.DevotedAcreage = pyo.Var(model.CROPS, - bounds=(0.0, model.TOTAL_ACREAGE)) + model.DevotedAcreage = pyo.Var(model.CROPS, bounds=(0.0, model.TOTAL_ACREAGE)) model.QuantitySubQuotaSold = pyo.Var(model.CROPS, bounds=(0.0, None)) model.QuantitySuperQuotaSold = pyo.Var(model.CROPS, bounds=(0.0, None)) @@ -193,12 +202,25 @@ def ConstrainTotalAcreage_rule(model): model.ConstrainTotalAcreage = pyo.Constraint(rule=ConstrainTotalAcreage_rule) def EnforceCattleFeedRequirement_rule(model, i): - return model.CattleFeedRequirement[i] <= (model.Yield[i] * model.DevotedAcreage[i]) + model.QuantityPurchased[i] - model.QuantitySubQuotaSold[i] - model.QuantitySuperQuotaSold[i] - - model.EnforceCattleFeedRequirement = pyo.Constraint(model.CROPS, rule=EnforceCattleFeedRequirement_rule) + return ( + model.CattleFeedRequirement[i] + <= (model.Yield[i] * model.DevotedAcreage[i]) + + model.QuantityPurchased[i] + - model.QuantitySubQuotaSold[i] + - model.QuantitySuperQuotaSold[i] + ) + + model.EnforceCattleFeedRequirement = pyo.Constraint( + model.CROPS, rule=EnforceCattleFeedRequirement_rule + ) def LimitAmountSold_rule(model, i): - return model.QuantitySubQuotaSold[i] + model.QuantitySuperQuotaSold[i] - (model.Yield[i] * model.DevotedAcreage[i]) <= 0.0 + return ( + model.QuantitySubQuotaSold[i] + + model.QuantitySuperQuotaSold[i] + - (model.Yield[i] * model.DevotedAcreage[i]) + <= 0.0 + ) model.LimitAmountSold = pyo.Constraint(model.CROPS, rule=LimitAmountSold_rule) @@ -211,63 +233,79 @@ def EnforceQuotas_rule(model, i): def ComputeFirstStageCost_rule(model): return pyo.sum_product(model.PlantingCostPerAcre, model.DevotedAcreage) + model.FirstStageCost = pyo.Expression(rule=ComputeFirstStageCost_rule) def ComputeSecondStageCost_rule(model): expr = pyo.sum_product(model.PurchasePrice, model.QuantityPurchased) expr -= pyo.sum_product(model.SubQuotaSellingPrice, model.QuantitySubQuotaSold) - expr -= pyo.sum_product(model.SuperQuotaSellingPrice, model.QuantitySuperQuotaSold) + expr -= pyo.sum_product( + model.SuperQuotaSellingPrice, model.QuantitySuperQuotaSold + ) return expr + model.SecondStageCost = pyo.Expression(rule=ComputeSecondStageCost_rule) def total_cost_rule(model): - if (sense == pyo.minimize): + if sense == pyo.minimize: return model.FirstStageCost + model.SecondStageCost return -model.FirstStageCost - model.SecondStageCost - model.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, - sense=sense) + + model.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, sense=sense) return model + # begin functions not needed by farmer_cylinders # (but needed by special codes such as confidence intervals) -#========= -def scenario_names_creator(num_scens,start=None): +# ========= +def scenario_names_creator(num_scens, start=None): # (only for Amalgamator): return the full list of num_scens scenario names # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"scen{i}" for i in range(start,start+num_scens)] - + if start is None: + start = 0 + return [f"scen{i}" for i in range(start, start + num_scens)] -#========= +# ========= def inparser_adder(cfg): # add options unique to farmer cfg.num_scens_required() - cfg.add_to_config("crops_multiplier", - description="number of crops will be three times this (default 1)", - domain=int, - default=1) - - cfg.add_to_config("farmer_with_integers", - description="make the version that has integers (default False)", - domain=bool, - default=False) - - -#========= + cfg.add_to_config( + "crops_multiplier", + description="number of crops will be three times this (default 1)", + domain=int, + default=1, + ) + + cfg.add_to_config( + "farmer_with_integers", + description="make the version that has integers (default False)", + domain=bool, + default=False, + ) + + +# ========= def kw_creator(cfg): # (for Amalgamator): linked to the scenario_creator and inparser_adder - kwargs = {"use_integer": cfg.get('farmer_with_integers', False), - "crops_multiplier": cfg.get('crops_multiplier', 1), - "num_scens" : cfg.get('num_scens', None), - } + kwargs = { + "use_integer": cfg.get("farmer_with_integers", False), + "crops_multiplier": cfg.get("crops_multiplier", 1), + "num_scens": cfg.get("num_scens", None), + } return kwargs -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - """ Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. + +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + """Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. (this function supports zhat and confidence interval code) Args: sname (string): scenario name to be created @@ -290,12 +328,16 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, # end functions not needed by farmer_cylinders -#============================ +# ============================ def scenario_denouement(rank, scenario_name, scenario): sname = scenario_name s = scenario - if sname == 'scen0': + if sname == "scen0": print("Arbitrary sanity checks:") - print ("SUGAR_BEETS0 for scenario",sname,"is", - pyo.value(s.DevotedAcreage["SUGAR_BEETS0"])) - print ("FirstStageCost for scenario",sname,"is", pyo.value(s.FirstStageCost)) + print( + "SUGAR_BEETS0 for scenario", + sname, + "is", + pyo.value(s.DevotedAcreage["SUGAR_BEETS0"]), + ) + print("FirstStageCost for scenario", sname, "is", pyo.value(s.FirstStageCost)) diff --git a/examples/farmer/farmer_ama.py b/examples/farmer/farmer_ama.py index e0175a01f..c8ea24368 100644 --- a/examples/farmer/farmer_ama.py +++ b/examples/farmer/farmer_ama.py @@ -7,28 +7,42 @@ # full copyright and license information. ############################################################################### """ -An example of using amalgamator and solving directly the EF +An example of using amalgamator and solving directly the EF To execute this: python farmer_ama.py --num-scens=10 --crops-multiplier=3 --farmer-with-integer - + WARNING: num-scens must be specified ! """ + import mpisppy.utils.amalgamator as amalgamator from mpisppy.utils import config -def main(): +def main(): cfg = config.Config() - cfg.add_and_assign("EF_2stage", description="EF 2stage vsus mstage", domain=bool, default=None, value=True) - cfg.add_and_assign("first_stage_solution_csv", description="where to write soln", domain=str, default=None, value="farmer_first_stage.csv") - - #The module can be a local file + cfg.add_and_assign( + "EF_2stage", + description="EF 2stage vsus mstage", + domain=bool, + default=None, + value=True, + ) + cfg.add_and_assign( + "first_stage_solution_csv", + description="where to write soln", + domain=str, + default=None, + value="farmer_first_stage.csv", + ) + + # The module can be a local file ama = amalgamator.from_module("farmer", cfg) ama.run() print("first_stage_solution=", ama.first_stage_solution) print("inner bound=", ama.best_inner_bound) print("outer bound=", ama.best_outer_bound) + if __name__ == "__main__": main() diff --git a/examples/farmer/farmer_cylinders.py b/examples/farmer/farmer_cylinders.py index 11d516676..073aeff64 100644 --- a/examples/farmer/farmer_cylinders.py +++ b/examples/farmer/farmer_cylinders.py @@ -26,6 +26,7 @@ write_solution = True + def _parse_args(): # create a config object and parse cfg = config.Config() @@ -45,37 +46,46 @@ def _parse_args(): cfg.wxbar_read_write_args() cfg.tracking_args() cfg.reduced_costs_args() - cfg.add_to_config("crops_mult", - description="There will be 3x this many crops (default 1)", - domain=int, - default=1) - cfg.add_to_config("use_norm_rho_updater", - description="Use the norm rho updater extension", - domain=bool, - default=False) - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) + cfg.add_to_config( + "crops_mult", + description="There will be 3x this many crops (default 1)", + domain=int, + default=1, + ) + cfg.add_to_config( + "use_norm_rho_updater", + description="Use the norm rho updater extension", + domain=bool, + default=False, + ) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) cfg.parse_command_line("farmer_cylinders") return cfg def main(): - cfg = _parse_args() num_scen = cfg.num_scens crops_multiplier = cfg.crops_mult - rho_setter = farmer._rho_setter if hasattr(farmer, '_rho_setter') else None + rho_setter = farmer._rho_setter if hasattr(farmer, "_rho_setter") else None if cfg.default_rho is None and rho_setter is None: - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) if cfg.use_norm_rho_converger: if not cfg.use_norm_rho_updater: - raise RuntimeError("--use-norm-rho-converger requires --use-norm-rho-updater") + raise RuntimeError( + "--use-norm-rho-converger requires --use-norm-rho-updater" + ) else: ph_converger = NormRhoConverger elif cfg.primal_dual_converger: @@ -85,11 +95,11 @@ def main(): scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement - all_scenario_names = ['scen{}'.format(sn) for sn in range(num_scen)] + all_scenario_names = ["scen{}".format(sn) for sn in range(num_scen)] scenario_creator_kwargs = { - 'use_integer': False, + "use_integer": False, "crops_multiplier": crops_multiplier, - 'sense': pyo.minimize + "sense": pyo.minimize, } # Things needed for vanilla cylinders @@ -97,67 +107,82 @@ def main(): if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = rho_setter) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=rho_setter, + ) else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - ph_converger=ph_converger, - rho_setter = rho_setter) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=rho_setter, + ) if cfg.primal_dual_converger: - hub_dict['opt_kwargs']['options']\ - ['primal_dual_converger_options'] = { - 'verbose': True, - 'tol': cfg.primal_dual_converger_tol, - 'tracking': True} + hub_dict["opt_kwargs"]["options"]["primal_dual_converger_options"] = { + "verbose": True, + "tol": cfg.primal_dual_converger_tol, + "tracking": True, + } ## hack in adaptive rho if cfg.use_norm_rho_updater: extension_adder(hub_dict, NormRhoUpdater) - hub_dict['opt_kwargs']['options']['norm_rho_options'] = {'verbose': True} - + hub_dict["opt_kwargs"]["options"]["norm_rho_options"] = {"verbose": True} if cfg.reduced_costs: vanilla.add_reduced_costs_fixer(hub_dict, cfg) # FWPH spoke if cfg.fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # Special Lagranger bound spoke if cfg.lagranger: - lagranger_spoke = vanilla.lagranger_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagranger_spoke = vanilla.lagranger_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # ph outer bounder spoke if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + ph_ob_spoke = vanilla.ph_ob_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # xhat looper bound spoke if cfg.xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if cfg.xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + if cfg.reduced_costs: - reduced_costs_spoke = vanilla.reduced_costs_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) + reduced_costs_spoke = vanilla.reduced_costs_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) list_of_spoke_dict = list() if cfg.fwph: @@ -179,10 +204,13 @@ def main(): wheel.spin() if write_solution: - wheel.write_first_stage_solution('farmer_plant.csv') - wheel.write_first_stage_solution('farmer_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('farmer_full_solution') + wheel.write_first_stage_solution("farmer_plant.csv") + wheel.write_first_stage_solution( + "farmer_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution("farmer_full_solution") + if __name__ == "__main__": main() diff --git a/examples/farmer/farmer_ef.py b/examples/farmer/farmer_ef.py index df4edb58b..74d03f300 100644 --- a/examples/farmer/farmer_ef.py +++ b/examples/farmer/farmer_ef.py @@ -19,7 +19,9 @@ def main_no_cfg(): # Some parameters from sys.argv and some hard-wired. if len(sys.argv) != 5: - print("usage python farmer_ef.py {crops_multiplier} {scen_count} {use_integer} {solver_name}") + print( + "usage python farmer_ef.py {crops_multiplier} {scen_count} {use_integer} {solver_name}" + ) print("e.g., python farmer_ef.py 1 3 1 gurobi") quit() @@ -29,13 +31,13 @@ def main_no_cfg(): scen_count = int(sys.argv[2]) use_integer = int(sys.argv[3]) solver_name = sys.argv[4] - + scenario_creator_kwargs = { "use_integer": use_integer, "crops_multiplier": crops_multiplier, } - scenario_names = ['Scenario' + str(i) for i in range(scen_count)] + scenario_names = ["Scenario" + str(i) for i in range(scen_count)] ef = sputils.create_EF( scenario_names, @@ -44,11 +46,15 @@ def main_no_cfg(): ) solver = pyo.SolverFactory(solver_name) - if 'persistent' in solver_name: + if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) solver.solve(tee=True) else: - solver.solve(ef, tee=True, symbolic_solver_labels=True,) + solver.solve( + ef, + tee=True, + symbolic_solver_labels=True, + ) return ef @@ -69,24 +75,28 @@ def main_with_cfg(): ) sroot, solver_name, solver_options = solver_spec.solver_specification(cfg, "EF") - + solver = pyo.SolverFactory(solver_name) if solver_options is not None: # We probably could just assign the dictionary in one line... - for option_key,option_value in solver_options.items(): + for option_key, option_value in solver_options.items(): solver.options[option_key] = option_value - if 'persistent' in solver_name: + if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) results = solver.solve(tee=True) else: - results = solver.solve(ef, tee=True, symbolic_solver_labels=True,) + results = solver.solve( + ef, + tee=True, + symbolic_solver_labels=True, + ) if not pyo.check_optimal_termination(results): print("Warning: solver reported non-optimal termination status") - + return ef -if __name__ == '__main__': +if __name__ == "__main__": # show two ways to get parameters use_cfg = False if not use_cfg: diff --git a/examples/farmer/farmer_lshapedhub.py b/examples/farmer/farmer_lshapedhub.py index f16c88b09..45e04117d 100644 --- a/examples/farmer/farmer_lshapedhub.py +++ b/examples/farmer/farmer_lshapedhub.py @@ -27,10 +27,12 @@ def _parse_args(): cfg.two_sided_args() cfg.fwph_args() cfg.xhatlshaped_args() - cfg.add_to_config("crops_mult", - description="There will be 3x this many crops (default 1)", - domain=int, - default=1) + cfg.add_to_config( + "crops_mult", + description="There will be 3x this many crops (default 1)", + domain=int, + default=1, + ) cfg.parse_command_line("farmer_cylinders") return cfg @@ -39,7 +41,7 @@ def _parse_args(): def main(): cfg = _parse_args() - # Need default_rho for FWPH, without you get + # Need default_rho for FWPH, without you get # uninitialized numeric value error if cfg.fwph and cfg.default_rho is None: print("Must specify a default_rho if using FWPH") @@ -63,17 +65,19 @@ def main(): # Options for the L-shaped method at the hub # Bounds only valid for 3 scenarios, I think? Need to ask Chris - spo = None if cfg.max_solver_threads is None else {"threads": cfg.max_solver_threads} + spo = ( + None if cfg.max_solver_threads is None else {"threads": cfg.max_solver_threads} + ) options = { "root_solver": cfg.solver_name, "sp_solver": cfg.solver_name, - "sp_solver_options" : spo, - #"valid_eta_lb": {i: -432000 for i in all_scenario_names}, + "sp_solver_options": spo, + # "valid_eta_lb": {i: -432000 for i in all_scenario_names}, "max_iter": cfg.max_iterations, "verbose": False, - "root_scenarios":[all_scenario_names[len(all_scenario_names)//2]] - } - + "root_scenarios": [all_scenario_names[len(all_scenario_names) // 2]], + } + # L-shaped hub hub_dict = { "hub_class": LShapedHub, @@ -84,7 +88,7 @@ def main(): }, }, "opt_class": LShapedMethod, - "opt_kwargs": { # Args passed to LShapedMethod __init__ + "opt_kwargs": { # Args passed to LShapedMethod __init__ "options": options, "all_scenario_names": all_scenario_names, "scenario_creator": scenario_creator, @@ -94,11 +98,14 @@ def main(): # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - - if xhatlshaped: - xhatlshaped_spoke = vanilla.xhatlshaped_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + if xhatlshaped: + xhatlshaped_spoke = vanilla.xhatlshaped_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) list_of_spoke_dict = list() if fwph: diff --git a/examples/farmer/farmer_ph.py b/examples/farmer/farmer_ph.py index e54de9152..af38cbfd8 100644 --- a/examples/farmer/farmer_ph.py +++ b/examples/farmer/farmer_ph.py @@ -6,8 +6,8 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -# This shows using progressive hedging (PH) algorithm and using its smoothed -# version (SPH) to solve the farmer problem. +# This shows using progressive hedging (PH) algorithm and using its smoothed +# version (SPH) to solve the farmer problem. from mpisppy.opt.ph import PH from mpisppy.convergers.primal_dual_converger import PrimalDualConverger @@ -19,11 +19,15 @@ def main(): # Two available type of input parameters, one without smoothing and the other with smoothing if len(sys.argv) != 7 and len(sys.argv) != 10: - print("usage python farmer_ph.py {crops_multiplier} {scen_count} {use_int} {rho} " - "{itermax} {solver_name} ") + print( + "usage python farmer_ph.py {crops_multiplier} {scen_count} {use_int} {rho} " + "{itermax} {solver_name} " + ) print("e.g., python farmer_ph.py 1 3 0 1 300 xpress") - print("or python farmer_ph_multi.py {crops_multiplier} {scen_count} {use_int} {rho} " - "{itermax} {smooth_type} {pvalue_or_pratio} {beta} {solver_name}") + print( + "or python farmer_ph_multi.py {crops_multiplier} {scen_count} {use_int} {rho} " + "{itermax} {smooth_type} {pvalue_or_pratio} {beta} {solver_name}" + ) print("e.g., python farmer_ph.py 1 3 0 1 300 1 0.1 0.1 xpress") quit() crops_multiplier = int(sys.argv[1]) @@ -44,7 +48,7 @@ def main(): "solver_name": solver_name, "PHIterLimit": itermax, "defaultPHrho": rho, - "convthresh": -1e-8, # turn off convthresh and only use primal dual converger + "convthresh": -1e-8, # turn off convthresh and only use primal dual converger "verbose": False, "display_progress": True, "display_timing": True, @@ -54,14 +58,14 @@ def main(): "smoothed": smooth_type, "defaultPHp": pvalue, "defaultPHbeta": beta, - "primal_dual_converger_options" : {"tol" : 1e-5}, - 'xhat_closest_options': {'xhat_solver_options': {}, 'keep_solution':True} + "primal_dual_converger_options": {"tol": 1e-5}, + "xhat_closest_options": {"xhat_solver_options": {}, "keep_solution": True}, } scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement - all_scenario_names = ['scen{}'.format(sn) for sn in range(num_scen)] + all_scenario_names = ["scen{}".format(sn) for sn in range(num_scen)] scenario_creator_kwargs = { - 'use_integer': use_int, + "use_integer": use_int, "crops_multiplier": crops_multiplier, } ph = PH( @@ -70,19 +74,21 @@ def main(): scenario_creator, scenario_denouement, scenario_creator_kwargs=scenario_creator_kwargs, - extensions = XhatClosest, - ph_converger = PrimalDualConverger + extensions=XhatClosest, + ph_converger=PrimalDualConverger, ) ph.ph_main() variables = ph.gather_var_values_to_rank0() - for (scenario_name, variable_name) in variables: + for scenario_name, variable_name in variables: variable_value = variables[scenario_name, variable_name] print(scenario_name, variable_name, variable_value) - - # Use the closest per scenario solution to xbar for evaluating objective + + # Use the closest per scenario solution to xbar for evaluating objective if ph.tree_solution_available: - print(f"Final objective from XhatClosest: {ph.extobject._final_xhat_closest_obj}") + print( + f"Final objective from XhatClosest: {ph.extobject._final_xhat_closest_obj}" + ) -if __name__=='__main__': - main() \ No newline at end of file +if __name__ == "__main__": + main() diff --git a/examples/farmer/farmer_rho_demo.py b/examples/farmer/farmer_rho_demo.py index 99c21322b..a2e74492d 100644 --- a/examples/farmer/farmer_rho_demo.py +++ b/examples/farmer/farmer_rho_demo.py @@ -30,72 +30,88 @@ write_solution = False + def _parse_args(): # create a config object and parse cfg = config.Config() - + cfg.num_scens_required() cfg.popular_args() cfg.two_sided_args() - cfg.ph_args() - cfg.aph_args() + cfg.ph_args() + cfg.aph_args() cfg.xhatlooper_args() cfg.fwph_args() cfg.lagrangian_args() cfg.lagranger_args() cfg.ph_ob_args() cfg.xhatshuffle_args() - cfg.dynamic_gradient_args() # gets gradient args for free - cfg.add_to_config("crops_mult", - description="There will be 3x this many crops (default 1)", - domain=int, - default=1) - cfg.add_to_config("use_norm_rho_updater", - description="Use the norm rho updater extension", - domain=bool, - default=False) - cfg.add_to_config("use-norm-rho-converger", - description="Use the norm rho converger", - domain=bool, - default=False) - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) - cfg.add_to_config("use_norm_rho_converger", - description="Use the norm rho converger", - domain=bool, - default=False) + cfg.dynamic_gradient_args() # gets gradient args for free + cfg.add_to_config( + "crops_mult", + description="There will be 3x this many crops (default 1)", + domain=int, + default=1, + ) + cfg.add_to_config( + "use_norm_rho_updater", + description="Use the norm rho updater extension", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use-norm-rho-converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use_norm_rho_converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) cfg.parse_command_line("farmer_demo") return cfg - + def main(): - cfg = _parse_args() crops_multiplier = cfg.crops_mult rho_setter = None # non-grad rho setter? if cfg.default_rho is None and rho_setter is None: - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) if cfg.use_norm_rho_converger: if not cfg.use_norm_rho_updater: - raise RuntimeError("--use-norm-rho-converger requires --use-norm-rho-updater") + raise RuntimeError( + "--use-norm-rho-converger requires --use-norm-rho-updater" + ) elif cfg.grad_rho_setter: - raise RuntimeError("You cannot have--use-norm-rho-converger and --grad-rho-setter") + raise RuntimeError( + "You cannot have--use-norm-rho-converger and --grad-rho-setter" + ) else: ph_converger = NormRhoConverger else: ph_converger = None - + scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement all_scenario_names = farmer.scenario_names_creator(cfg.num_scens) scenario_creator_kwargs = { - 'use_integer': False, + "use_integer": False, "crops_multiplier": crops_multiplier, } @@ -110,54 +126,70 @@ def main(): raise RuntimeError("APH not supported in this example.") else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=MultiExtension, - ph_converger=ph_converger, - rho_setter=rho_setter) # non-grad rho setter - hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes" : ext_classes} - hub_dict['opt_kwargs']['extensions'] = MultiExtension # DLW: ???? (seems to not matter) - - #gradient extension kwargs + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=MultiExtension, + ph_converger=ph_converger, + rho_setter=rho_setter, + ) # non-grad rho setter + hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes": ext_classes} + hub_dict["opt_kwargs"]["extensions"] = ( + MultiExtension # DLW: ???? (seems to not matter) + ) + + # gradient extension kwargs if cfg.grad_rho_setter: - ext_classes.append(Gradient_extension) - hub_dict['opt_kwargs']['options']['gradient_extension_options'] = {'cfg': cfg} - + ext_classes.append(Gradient_extension) + hub_dict["opt_kwargs"]["options"]["gradient_extension_options"] = {"cfg": cfg} + ## Gabe's (way pre-pandemic) adaptive rho if cfg.use_norm_rho_updater: - ext_classes.append(NormRhoUpdater) - hub_dict['opt_kwargs']['options']['norm_rho_options'] = {'verbose': True} + ext_classes.append(NormRhoUpdater) + hub_dict["opt_kwargs"]["options"]["norm_rho_options"] = {"verbose": True} # FWPH spoke if cfg.fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # Special Lagranger bound spoke if cfg.lagranger: - lagranger_spoke = vanilla.lagranger_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagranger_spoke = vanilla.lagranger_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # ph outer bounder spoke if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + ph_ob_spoke = vanilla.ph_ob_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # xhat looper bound spoke if cfg.xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if cfg.xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + list_of_spoke_dict = list() if cfg.fwph: list_of_spoke_dict.append(fw_spoke) @@ -166,7 +198,7 @@ def main(): if cfg.lagranger: list_of_spoke_dict.append(lagranger_spoke) if cfg.ph_ob: - list_of_spoke_dict.append(ph_ob_spoke) + list_of_spoke_dict.append(ph_ob_spoke) if cfg.xhatlooper: list_of_spoke_dict.append(xhatlooper_spoke) if cfg.xhatshuffle: @@ -176,13 +208,16 @@ def main(): wheel.spin() # If you want to write W and rho in a file : - #grad.grad_cost_and_rho('farmer', cfg) + # grad.grad_cost_and_rho('farmer', cfg) if write_solution: - wheel.write_first_stage_solution('farmer_plant.csv') - wheel.write_first_stage_solution('farmer_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('farmer_full_solution') + wheel.write_first_stage_solution("farmer_plant.csv") + wheel.write_first_stage_solution( + "farmer_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution("farmer_full_solution") + if __name__ == "__main__": main() diff --git a/examples/farmer/farmer_seqsampling.py b/examples/farmer/farmer_seqsampling.py index 2a4eacd25..89c8a1497 100644 --- a/examples/farmer/farmer_seqsampling.py +++ b/examples/farmer/farmer_seqsampling.py @@ -12,7 +12,7 @@ # argument to choose between BM (Bayraksan and Morton) and # BPL (Bayraksan and Pierre Louis) but provides all # the command line parameters for both (so either way, -# many command line parameters will be ignored). +# many command line parameters will be ignored). import numpy as np import farmer @@ -23,12 +23,18 @@ import mpisppy.confidence_intervals.confidence_config as confidence_config -#============================ -def xhat_generator_farmer(scenario_names, solver_name=None, solver_options=None, - use_integer=False, crops_multiplier=1, start_seed=None): - ''' +# ============================ +def xhat_generator_farmer( + scenario_names, + solver_name=None, + solver_options=None, + use_integer=False, + crops_multiplier=1, + start_seed=None, +): + """ For sequential sampling. - Takes scenario names as input and provide the best solution for the + Takes scenario names as input and provide the best solution for the approximate problem associated with the scenarios. Parameters ---------- @@ -50,42 +56,41 @@ def xhat_generator_farmer(scenario_names, solver_name=None, solver_options=None, ------- xhat: str A generated xhat, solution to the approximate problem induced by scenario_names. - - NOTE: This tool only works when the file is in mpisppy. In SPInstances, + + NOTE: This tool only works when the file is in mpisppy. In SPInstances, you must change the from_module line. - ''' + """ num_scens = len(scenario_names) cfg = config.Config() cfg.quick_assign("EF_2stage", bool, True) cfg.quick_assign("EF_solver_name", str, solver_name) - #cfg.quick_assign("solver_name", str, solver_name) # amalgamator wants this + # cfg.quick_assign("solver_name", str, solver_name) # amalgamator wants this cfg.quick_assign("EF_solver_options", dict, solver_options) cfg.quick_assign("num_scens", int, num_scens) - cfg.quick_assign("_mpisppy_probability", float, 1/num_scens) + cfg.quick_assign("_mpisppy_probability", float, 1 / num_scens) cfg.quick_assign("start_seed", int, start_seed) - - #We use from_module to build easily an Amalgamator object + + # We use from_module to build easily an Amalgamator object ama = amalgamator.from_module("farmer", cfg, use_command_line=False) - #Correcting the building by putting the right scenarios. + # Correcting the building by putting the right scenarios. ama.scenario_names = scenario_names ama.verbose = False ama.run() - + # get the xhat xhat = sputils.nonant_cache_from_ef(ama.ef) - return {'ROOT': xhat['ROOT']} - + return {"ROOT": xhat["ROOT"]} def main(cfg): - """ Code for farmer sequential sampling (in a function for easier testing) + """Code for farmer sequential sampling (in a function for easier testing) Args: args (parseargs): the command line arguments object from parseargs Returns: - results (dict): the solution, gap confidence interval and T + results (dict): the solution, gap confidence interval and T """ refmodelname = "farmer" @@ -93,43 +98,47 @@ def main(cfg): assert cfg.EF_solver_name is not None solver_name = cfg.EF_solver_name crops_multiplier = cfg.crops_multiplier - - scenario_names = ['Scenario' + str(i) for i in range(scen_count)] - - xhat_gen_kwargs = {"scenario_names": scenario_names, - "solver_name": solver_name, - "solver_options": None, - "use_integer": False, - "crops_multiplier": crops_multiplier, - "start_seed": 0, + + scenario_names = ["Scenario" + str(i) for i in range(scen_count)] + + xhat_gen_kwargs = { + "scenario_names": scenario_names, + "solver_name": solver_name, + "solver_options": None, + "use_integer": False, + "crops_multiplier": crops_multiplier, + "start_seed": 0, } cfg.quick_assign("xhat_gen_kwargs", dict, xhat_gen_kwargs) # Note that as of July 2022, we are not using conditional args so cfg has everything if cfg.BM_vs_BPL == "BM": - sampler = seqsampling.SeqSampling(refmodelname, - xhat_generator_farmer, - cfg, - stochastic_sampling=False, - stopping_criterion="BM", - solving_type="EF_2stage", - ) + sampler = seqsampling.SeqSampling( + refmodelname, + xhat_generator_farmer, + cfg, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="EF_2stage", + ) else: # must be BPL ss = int(cfg.BPL_n0min) != 0 - sampler = seqsampling.SeqSampling(refmodelname, - xhat_generator_farmer, - cfg, - stochastic_sampling=ss, - stopping_criterion="BPL", - solving_type="EF_2stage", - ) + sampler = seqsampling.SeqSampling( + refmodelname, + xhat_generator_farmer, + cfg, + stochastic_sampling=ss, + stopping_criterion="BPL", + solving_type="EF_2stage", + ) xhat = sampler.run() return xhat + def _parse_args(): # create a config object and parse cfg = config.Config() - + cfg.num_scens_required() farmer.inparser_adder(cfg) @@ -139,40 +148,52 @@ def _parse_args(): confidence_config.BM_config(cfg) confidence_config.BPL_config(cfg) # --help will show both BM and BPL - cfg.add_to_config("BM_vs_BPL", - description="BM or BPL for Bayraksan and Morton or B and Pierre Louis", - domain=str, - default=None) - cfg.add_to_config("xhat1_file", - description="File to which xhat1 should be (e.g. to process with zhat4hat.py)", - domain=str, - default=None) + cfg.add_to_config( + "BM_vs_BPL", + description="BM or BPL for Bayraksan and Morton or B and Pierre Louis", + domain=str, + default=None, + ) + cfg.add_to_config( + "xhat1_file", + description="File to which xhat1 should be (e.g. to process with zhat4hat.py)", + domain=str, + default=None, + ) cfg.popular_args() cfg.two_sided_args() - cfg.ph_args() - cfg.aph_args() + cfg.ph_args() + cfg.aph_args() cfg.xhatlooper_args() cfg.fwph_args() cfg.lagrangian_args() cfg.lagranger_args() cfg.xhatshuffle_args() - cfg.add_to_config("use_norm_rho_updater", - description="Use the norm rho updater extension", - domain=bool, - default=False) - cfg.add_to_config("use-norm-rho-converger", - description="Use the norm rho converger", - domain=bool, - default=False) - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) - cfg.add_to_config("use_norm_rho_converger", - description="Use the norm rho converger", - domain=bool, - default=False) + cfg.add_to_config( + "use_norm_rho_updater", + description="Use the norm rho updater extension", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use-norm-rho-converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use_norm_rho_converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) cfg.parse_command_line("farmer_sequential") @@ -180,18 +201,19 @@ def _parse_args(): raise RuntimeError("--BM-vs_BPL must be given.") if cfg.BM_vs_BPL != "BM" and cfg.BM_vs_BPL != "BPL": raise RuntimeError(f"--BM-vs_BPL must be BM or BPL (you gave {cfg.BM_vs_BMPL})") - - return cfg + return cfg -if __name__ == '__main__': +if __name__ == "__main__": cfg = _parse_args() - + results = main(cfg) print("Final gap confidence interval results:", results) if cfg.xhat1_file is not None: print(f"Writing xhat1 to {cfg.xhat1_file}.npy") - root_nonants =np.fromiter((v for v in results["Candidate_solution"]["ROOT"]), float) + root_nonants = np.fromiter( + (v for v in results["Candidate_solution"]["ROOT"]), float + ) np.save(cfg.xhat1_file, root_nonants) diff --git a/examples/farmer/from_pysp/abstract.py b/examples/farmer/from_pysp/abstract.py index 685f0ab23..a286aa0ab 100644 --- a/examples/farmer/from_pysp/abstract.py +++ b/examples/farmer/from_pysp/abstract.py @@ -17,8 +17,11 @@ import mpisppy.utils.sputils as sputils import pyomo.environ as pyo + def _print_usage(): print('Usage: "abstract.py solver" where solver is a pyomo solver name') + + if len(sys.argv) < 2: _print_usage() sys.exit() @@ -34,38 +37,45 @@ def _print_usage(): _print_usage() sys.exit() -pref = os.path.join("..","PySP","abstract") -farmer = PySPModel(model=os.path.join(pref,"ReferenceModel.py"), - scenario_tree=os.path.join(pref,"ScenarioStructure.dat"), - data_dir=pref) +pref = os.path.join("..", "PySP", "abstract") +farmer = PySPModel( + model=os.path.join(pref, "ReferenceModel.py"), + scenario_tree=os.path.join(pref, "ScenarioStructure.dat"), + data_dir=pref, +) -phoptions = {'defaultPHrho': 1.0, - 'solver_name':solver_name, - 'PHIterLimit': 50, - 'convthresh': 0.01, - 'verbose': False, - 'display_progress': True, - 'display_timing': False, - 'iter0_solver_options': None, - 'iterk_solver_options': None - } +phoptions = { + "defaultPHrho": 1.0, + "solver_name": solver_name, + "PHIterLimit": 50, + "convthresh": 0.01, + "verbose": False, + "display_progress": True, + "display_timing": False, + "iter0_solver_options": None, + "iterk_solver_options": None, +} -ph = PH( options = phoptions, - all_scenario_names = farmer.all_scenario_names, - scenario_creator = farmer.scenario_creator, - scenario_denouement = farmer.scenario_denouement, - ) +ph = PH( + options=phoptions, + all_scenario_names=farmer.all_scenario_names, + scenario_creator=farmer.scenario_creator, + scenario_denouement=farmer.scenario_denouement, +) ph.ph_main() -ef = sputils.create_EF(farmer.all_scenario_names, - farmer.scenario_creator) +ef = sputils.create_EF(farmer.all_scenario_names, farmer.scenario_creator) solver = pyo.SolverFactory(solver_name) -if 'persistent' in solver_name: +if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) solver.solve(tee=True) else: - solver.solve(ef, tee=True, symbolic_solver_labels=True,) + solver.solve( + ef, + tee=True, + symbolic_solver_labels=True, + ) print(f"EF objective: {pyo.value(ef.EF_Obj)}") farmer.close() diff --git a/examples/farmer/from_pysp/concrete_ampl.py b/examples/farmer/from_pysp/concrete_ampl.py index 28fc44764..03791834b 100644 --- a/examples/farmer/from_pysp/concrete_ampl.py +++ b/examples/farmer/from_pysp/concrete_ampl.py @@ -15,8 +15,11 @@ from mpisppy.opt.ph import PH from mpisppy.extensions.xhatclosest import XhatClosest + def _print_usage(): print('Usage: "concrete_ampl.py solver" where solver is a pyomo solver name') + + if len(sys.argv) < 2: _print_usage() sys.exit() @@ -30,29 +33,32 @@ def _print_usage(): _print_usage() sys.exit() -pref = os.path.join("..","PySP") -farmer = PySPModel(model=os.path.join(pref,"concrete", - "ReferenceModel.py"), - scenario_tree=os.path.join(pref,"ScenarioStructure.dat")) - -phoptions = {'defaultPHrho': 1.0, - 'solver_name':sys.argv[1], - 'PHIterLimit': 50, - 'convthresh': 0.01, - 'verbose': False, - 'display_progress': True, - 'display_timing': False, - 'iter0_solver_options': None, - 'iterk_solver_options': None, - 'xhat_closest_options': {'xhat_solver_options': {}, 'keep_solution':True}, - } - -ph = PH( options = phoptions, - all_scenario_names = farmer.all_scenario_names, - scenario_creator = farmer.scenario_creator, - scenario_denouement = farmer.scenario_denouement, - extensions = XhatClosest, - ) +pref = os.path.join("..", "PySP") +farmer = PySPModel( + model=os.path.join(pref, "concrete", "ReferenceModel.py"), + scenario_tree=os.path.join(pref, "ScenarioStructure.dat"), +) + +phoptions = { + "defaultPHrho": 1.0, + "solver_name": sys.argv[1], + "PHIterLimit": 50, + "convthresh": 0.01, + "verbose": False, + "display_progress": True, + "display_timing": False, + "iter0_solver_options": None, + "iterk_solver_options": None, + "xhat_closest_options": {"xhat_solver_options": {}, "keep_solution": True}, +} + +ph = PH( + options=phoptions, + all_scenario_names=farmer.all_scenario_names, + scenario_creator=farmer.scenario_creator, + scenario_denouement=farmer.scenario_denouement, + extensions=XhatClosest, +) ph.ph_main() diff --git a/examples/farmer/schur_complement.py b/examples/farmer/schur_complement.py index 1fa923552..73e3341a6 100644 --- a/examples/farmer/schur_complement.py +++ b/examples/farmer/schur_complement.py @@ -26,16 +26,18 @@ def solve_with_extensive_form(scen_count): - scenario_names = ['Scenario' + str(i) for i in range(scen_count)] + scenario_names = ["Scenario" + str(i) for i in range(scen_count)] options = dict() - options['solver'] = 'cplex_direct' + options["solver"] = "cplex_direct" scenario_kwargs = dict() - scenario_kwargs['use_integer'] = False - - opt = ef.ExtensiveForm(options=options, - all_scenario_names=scenario_names, - scenario_creator=farmer.scenario_creator, - scenario_creator_kwargs=scenario_kwargs) + scenario_kwargs["use_integer"] = False + + opt = ef.ExtensiveForm( + options=options, + all_scenario_names=scenario_names, + scenario_creator=farmer.scenario_creator, + scenario_creator_kwargs=scenario_kwargs, + ) results = opt.solve_extensive_form() if not pyo.check_optimal_termination(results): print("Warning: solver reported non-optimal termination status") @@ -44,24 +46,26 @@ def solve_with_extensive_form(scen_count): def solve_with_sc(scen_count, linear_solver=None): - scenario_names = ['Scenario' + str(i) for i in range(scen_count)] + scenario_names = ["Scenario" + str(i) for i in range(scen_count)] options = dict() if linear_solver is not None: - options['linalg'] = dict() - options['linalg']['solver'] = linear_solver + options["linalg"] = dict() + options["linalg"]["solver"] = linear_solver scenario_kwargs = dict() - scenario_kwargs['use_integer'] = False - opt = sc.SchurComplement(options=options, - all_scenario_names=scenario_names, - scenario_creator=farmer.scenario_creator, - scenario_creator_kwargs=scenario_kwargs) + scenario_kwargs["use_integer"] = False + opt = sc.SchurComplement( + options=options, + all_scenario_names=scenario_names, + scenario_creator=farmer.scenario_creator, + scenario_creator_kwargs=scenario_kwargs, + ) results = opt.solve() print(f"SchurComplement solver status: {results}") opt.report_var_values_at_rank0() return opt -if __name__ == '__main__': +if __name__ == "__main__": scen_count = int(sys.argv[1]) # solve_with_extensive_form(scen_count) solve_with_sc(scen_count) diff --git a/examples/generic_tester.py b/examples/generic_tester.py index 69c337e6e..bafff970a 100644 --- a/examples/generic_tester.py +++ b/examples/generic_tester.py @@ -28,7 +28,7 @@ from numpy.linalg import norm # Write solutions here, then delete. (.. makes it local to examples) -XHAT_TEMP = os.path.join("..","AAA_delete_this_from_generic_tester") +XHAT_TEMP = os.path.join("..", "AAA_delete_this_from_generic_tester") solver_name = "gurobi_persistent" if len(sys.argv) > 1: @@ -51,6 +51,7 @@ badguys = dict() # bad return code xhat_losers = dict() # does not match baseline well enough + def egret_avail(): try: import egret @@ -59,14 +60,15 @@ def egret_avail(): path = str(egret.__path__) left = path.find("'") - right = path.find("'", left+1) - egretrootpath = path[left+1:right] + right = path.find("'", left + 1) + egretrootpath = path[left + 1 : right] egret_thirdparty_path = os.path.join(egretrootpath, "thirdparty") if os.path.exists(os.path.join(egret_thirdparty_path, "pglib-opf-master")): return True from egret.thirdparty.get_pglib_opf import get_pglib_opf + get_pglib_opf(egret_thirdparty_path) return True @@ -81,15 +83,16 @@ def _append_soln_output_dir(argstring, outbase): def rebaseline_xhat(dirname, modname, np, argstring, baseline_dir): # Add the write output to the command line, then do one # note: baseline_dir must exist (and probably needs to be pushed to github) - fullarg = _append_soln_output_dir(argstring, - os.path.join("..", baseline_dir, modname)) + fullarg = _append_soln_output_dir( + argstring, os.path.join("..", baseline_dir, modname) + ) do_one(dirname, modname, np, fullarg) # do not check a baseline_dir, write one def _check_baseline_xhat(modname, argstring, baseline_dir, tol=1e-6): # return true if OK, False otherwise # compare the baseline_dir to XHAT_TEMP - + xhat = np.load(os.path.join(XHAT_TEMP, f"{modname}.npy")) base_xhat = np.load(os.path.join(baseline_dir, f"{modname}.npy")) d2 = norm(xhat - base_xhat, 2) @@ -102,7 +105,7 @@ def _check_baseline_xhat(modname, argstring, baseline_dir, tol=1e-6): def _xhat_dir_setup(modname): if os.path.exists(XHAT_TEMP): - shutil.rmtree(XHAT_TEMP) + shutil.rmtree(XHAT_TEMP) os.makedirs(XHAT_TEMP) # .. is prepended elsewhere return os.path.join(XHAT_TEMP, modname) @@ -110,20 +113,21 @@ def _xhat_dir_setup(modname): def _xhat_dir_teardown(): if os.path.exists(XHAT_TEMP): - shutil.rmtree(XHAT_TEMP) + shutil.rmtree(XHAT_TEMP) def do_one(dirname, modname, np, argstring, xhat_baseline_dir=None, tol=1e-6): - """ return the code""" + """return the code""" os.chdir(dirname) # taking us down a level from examples if xhat_baseline_dir is not None: - fullarg = _append_soln_output_dir(argstring, _xhat_dir_setup(modname)) + fullarg = _append_soln_output_dir(argstring, _xhat_dir_setup(modname)) else: # we might be making a baseline, or just not using one fullarg = argstring - runstring = "mpiexec {} -np {} python -u -m mpi4py -m mpisppy.generic_cylinders --module-name {} {}".\ - format(mpiexec_arg, np, modname, fullarg) + runstring = "mpiexec {} -np {} python -u -m mpi4py -m mpisppy.generic_cylinders --module-name {} {}".format( + mpiexec_arg, np, modname, fullarg + ) # The top process output seems to be cached by github actions # so we need oputput in the system call to help debug code = os.system("echo {} && {}".format(runstring, runstring)) @@ -143,56 +147,62 @@ def do_one(dirname, modname, np, argstring, xhat_baseline_dir=None, tol=1e-6): os.chdir("..") return code + ###################### main code ###################### # directory names are given relative to the examples directory whence this runs -farmeref = (f"--EF --num-scens 3 --EF-solver-name={solver_name}") -#rebaseline_xhat("farmer", "farmer", 1, farmeref, "test_data/farmeref_baseline") -do_one("farmer", "farmer", 1, farmeref, xhat_baseline_dir = "test_data/farmeref_baseline") +farmeref = f"--EF --num-scens 3 --EF-solver-name={solver_name}" +# rebaseline_xhat("farmer", "farmer", 1, farmeref, "test_data/farmeref_baseline") +do_one("farmer", "farmer", 1, farmeref, xhat_baseline_dir="test_data/farmeref_baseline") -hydroef = (f"--EF --branching-factors '3 3' --EF-solver-name={solver_name}") -#rebaseline_xhat("hydro", "hydro", 1, hydroef, "test_data/hydroef_baseline") +hydroef = f"--EF --branching-factors '3 3' --EF-solver-name={solver_name}" +# rebaseline_xhat("hydro", "hydro", 1, hydroef, "test_data/hydroef_baseline") do_one("hydro", "hydro", 1, hydroef, xhat_baseline_dir="test_data/hydroef_baseline") -hydroa = ("--max-iterations 100 --bundles-per-rank=0 --default-rho 1 " - "--lagrangian --xhatshuffle --rel-gap 0.001 --branching-factors '3 3' " - f"--stage2EFsolvern {solver_name} --solver-name={solver_name}") -#rebaseline_xhat("hydro", "hydro", 3, hydroa, "test_data/hydroa_baseline") -do_one("hydro", "hydro", 3, hydroa, xhat_baseline_dir = "test_data/hydroa_baseline") +hydroa = ( + "--max-iterations 100 --bundles-per-rank=0 --default-rho 1 " + "--lagrangian --xhatshuffle --rel-gap 0.001 --branching-factors '3 3' " + f"--stage2EFsolvern {solver_name} --solver-name={solver_name}" +) +# rebaseline_xhat("hydro", "hydro", 3, hydroa, "test_data/hydroa_baseline") +do_one("hydro", "hydro", 3, hydroa, xhat_baseline_dir="test_data/hydroa_baseline") if not nouc: - uca = ("--num-scens 5 --max-iterations 3 --max-solver-threads 4 " - "--default-rho 1 --lagrangian --xhatshuffle --rel-gap 0.01 " - f" --solver-name={solver_name}") - #rebaseline_xhat("uc", "uc_funcs", 3, uca, "test_data/uca_baseline") + uca = ( + "--num-scens 5 --max-iterations 3 --max-solver-threads 4 " + "--default-rho 1 --lagrangian --xhatshuffle --rel-gap 0.01 " + f" --solver-name={solver_name}" + ) + # rebaseline_xhat("uc", "uc_funcs", 3, uca, "test_data/uca_baseline") do_one("uc", "uc_funcs", 3, uca, xhat_baseline_dir="test_data/uca_baseline") # This particular sizes command line is not very deterministic, also # linearize prox to help xpress. - sizesa = ("--linearize-proximal-terms " - " --num-scens=10 --bundles-per-rank=0 --max-iterations=5" - " --default-rho=5 --lagrangian --xhatshuffle" - " --iter0-mipgap=0.01 --iterk-mipgap=0.001 --rel-gap 0.001" - f" --solver-name={solver_name}") - #rebaseline_xhat("sizes", "sizes", 3, sizesa, "test_data/sizesa_baseline") - do_one("sizes", "sizes", 3, sizesa, xhat_baseline_dir = "test_data/sizesa_baseline") + sizesa = ( + "--linearize-proximal-terms " + " --num-scens=10 --bundles-per-rank=0 --max-iterations=5" + " --default-rho=5 --lagrangian --xhatshuffle" + " --iter0-mipgap=0.01 --iterk-mipgap=0.001 --rel-gap 0.001" + f" --solver-name={solver_name}" + ) + # rebaseline_xhat("sizes", "sizes", 3, sizesa, "test_data/sizesa_baseline") + do_one("sizes", "sizes", 3, sizesa, xhat_baseline_dir="test_data/sizesa_baseline") - #### final processing #### if len(badguys) > 0: print("\nBad Guys:") - for i,v in badguys.items(): + for i, v in badguys.items(): print("Directory={}".format(i)) for c in v: print(" {}".format(c)) sys.exit(1) elif len(xhat_losers) > 0: print("\nXhat Losers:") - for i,v in xhat_losers.items(): + for i, v in xhat_losers.items(): print("Directory={}".format(i)) for c in v: print(" {}".format(c)) diff --git a/examples/hydro/hydro_cylinders.py b/examples/hydro/hydro_cylinders.py index 3e43a80c2..6a6f46ac1 100644 --- a/examples/hydro/hydro_cylinders.py +++ b/examples/hydro/hydro_cylinders.py @@ -20,6 +20,7 @@ write_solution = True + def _parse_args(): # create a Config object, get values for it, and return it cfg = config.Config() @@ -31,10 +32,12 @@ def _parse_args(): cfg.lagrangian_args() cfg.xhatspecific_args() - cfg.add_to_config(name ="stage2EFsolvern", - description="Solver to use for xhatlooper stage2ef option (default None)", - domain = str, - default=None) + cfg.add_to_config( + name="stage2EFsolvern", + description="Solver to use for xhatlooper stage2ef option (default None)", + domain=str, + default=None, + ) cfg.parse_command_line("hydro_cylinders") return cfg @@ -61,34 +64,36 @@ def main(): scenario_creator = hydro.scenario_creator scenario_denouement = hydro.scenario_denouement rho_setter = None - + # Things needed for vanilla cylinders beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) - + # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = rho_setter, - all_nodenames = all_nodenames, - ) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=rho_setter, + all_nodenames=all_nodenames, + ) # Standard Lagrangian bound spoke if lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter, - all_nodenames = all_nodenames, - ) - + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + all_nodenames=all_nodenames, + ) # xhat looper bound spoke - + if xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, - all_nodenames=all_nodenames, - scenario_creator_kwargs=scenario_creator_kwargs, - ) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, + all_nodenames=all_nodenames, + scenario_creator_kwargs=scenario_creator_kwargs, + ) list_of_spoke_dict = list() if lagrangian: @@ -98,18 +103,25 @@ def main(): if cfg.stage2EFsolvern is not None: assert xhatshuffle is not None, "xhatshuffle is required for stage2EFsolvern" - xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg["stage2EFsolvern"] - xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg["branching_factors"] + xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg[ + "stage2EFsolvern" + ] + xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg[ + "branching_factors" + ] wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() if wheel.global_rank == 0: # we are the reporting hub rank - print(f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}") - + print( + f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}" + ) + if write_solution: - wheel.write_first_stage_solution('hydro_first_stage.csv') - wheel.write_tree_solution('hydro_full_solution') + wheel.write_first_stage_solution("hydro_first_stage.csv") + wheel.write_tree_solution("hydro_full_solution") + if __name__ == "__main__": main() diff --git a/examples/hydro/hydro_cylinders_pysp.py b/examples/hydro/hydro_cylinders_pysp.py index 8955cce80..2bf5467ba 100644 --- a/examples/hydro/hydro_cylinders_pysp.py +++ b/examples/hydro/hydro_cylinders_pysp.py @@ -17,8 +17,8 @@ write_solution = True -def main(): +def main(): cfg = hydro_cylinders._parse_args() # we will ignore the branching factors xhatshuffle = cfg.xhatshuffle @@ -27,25 +27,31 @@ def main(): # This is multi-stage, so we need to supply node names hydro = PySPModel("./PySP/models/", "./PySP/nodedata/") rho_setter = None - + # Things needed for vanilla cylinders - beans = (cfg, hydro.scenario_creator, hydro.scenario_denouement, hydro.all_scenario_names) - + beans = ( + cfg, + hydro.scenario_creator, + hydro.scenario_denouement, + hydro.all_scenario_names, + ) + # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - ph_extensions=None, - rho_setter = rho_setter, - all_nodenames=hydro.all_nodenames) + hub_dict = vanilla.ph_hub( + *beans, + ph_extensions=None, + rho_setter=rho_setter, + all_nodenames=hydro.all_nodenames, + ) # Standard Lagrangian bound spoke if lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - rho_setter = rho_setter, - all_nodenames=hydro.all_nodenames) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, rho_setter=rho_setter, all_nodenames=hydro.all_nodenames + ) if xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, - hydro.all_nodenames) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, hydro.all_nodenames) list_of_spoke_dict = list() if lagrangian: @@ -57,13 +63,16 @@ def main(): wheel.spin() if wheel.global_rank == 0: # we are the reporting hub rank - print(f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}") - + print( + f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}" + ) + if write_solution: - wheel.write_first_stage_solution('hydro_first_stage.csv') - wheel.write_tree_solution('hydro_full_solution') + wheel.write_first_stage_solution("hydro_first_stage.csv") + wheel.write_tree_solution("hydro_full_solution") hydro.close() + if __name__ == "__main__": main() diff --git a/examples/hydro/hydro_ef.py b/examples/hydro/hydro_ef.py index 7af467ed6..c64ec4190 100644 --- a/examples/hydro/hydro_ef.py +++ b/examples/hydro/hydro_ef.py @@ -26,8 +26,8 @@ all_scenario_names, hydro.scenario_creator, scenario_creator_kwargs={"branching_factors": BFs}, - all_nodenames=all_nodenames + all_nodenames=all_nodenames, ) ef.solve_extensive_form(tee=True) -print(f'hydro objective value {pyo.value(ef.ef.EF_Obj)}') +print(f"hydro objective value {pyo.value(ef.ef.EF_Obj)}") ef.report_var_values_at_rank0("Hydro") diff --git a/examples/netdes/data/rename.py b/examples/netdes/data/rename.py index 7b31c3882..76c40bac4 100644 --- a/examples/netdes/data/rename.py +++ b/examples/netdes/data/rename.py @@ -8,17 +8,14 @@ ############################################################################### import subprocess -piper = subprocess.Popen(['ls'], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT) +piper = subprocess.Popen(["ls"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) stdout, stderr = piper.communicate() -fnames = str(stdout).split('\\n')[1:-1] +fnames = str(stdout).split("\\n")[1:-1] for fname in fnames: - row = fname.replace('.txt', '').split('-')[2:] + row = fname.replace(".txt", "").split("-")[2:] nnodes, nscens, density, idn = row[0], row[1], row[3], row[4] idn = idn[1:] - density = 'L' if density == '30' else 'H' - new_fname = '-'.join(['network', nnodes, nscens, density, idn]) - cmd = ['cp', fname, new_fname + '.dat'] - + density = "L" if density == "30" else "H" + new_fname = "-".join(["network", nnodes, nscens, density, idn]) + cmd = ["cp", fname, new_fname + ".dat"] diff --git a/examples/netdes/drivertest.py b/examples/netdes/drivertest.py index 2e9b4a4bb..d4fcde71b 100644 --- a/examples/netdes/drivertest.py +++ b/examples/netdes/drivertest.py @@ -13,17 +13,19 @@ from mpisppy.phbase import PHBase from mpisppy.opt.ph import PH from mpisppy.fwph.fwph import FWPH + # Hub and spoke SPCommunicator classes from mpisppy.cylinders.fwph_spoke import FrankWolfeOuterBound from mpisppy.cylinders.lagrangian_bounder import LagrangianOuterBound from mpisppy.cylinders.xhatlooper_bounder import XhatLooperInnerBound from mpisppy.cylinders.hub import PHHub + # Make it all go from mpisppy.spin_the_wheel import WheelSpinner from mpisppy.utils.xhat_eval import Xhat_Eval -if __name__=="__main__": +if __name__ == "__main__": """ For testing and debugging only Race a Lagrangian spoke against an FWPH spoke @@ -41,8 +43,8 @@ hub_ph_options = { "solver_name": "gurobi_persistent", "PHIterLimit": 1000, - "defaultPHrho": 10000, # Big for netdes - "convthresh": 0., + "defaultPHrho": 10000, # Big for netdes + "convthresh": 0.0, "verbose": False, "display_progress": False, "display_timing": False, @@ -65,14 +67,14 @@ "all_scenario_names": scenario_names, "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - } + }, } ph_options = { "solver_name": "gurobi_persistent", "PHIterLimit": 1000, - "defaultPHrho": 10000, # Big for netdes - "convthresh": 0., # To prevent FWPH from terminating + "defaultPHrho": 10000, # Big for netdes + "convthresh": 0.0, # To prevent FWPH from terminating "verbose": False, "display_progress": False, "display_timing": False, @@ -83,7 +85,7 @@ # FWPH spoke fw_options = { "FW_iter_limit": 10, - "FW_weight": 0., # Or 1.? I forget what this does, honestly + "FW_weight": 0.0, # Or 1.? I forget what this does, honestly "FW_conv_thresh": 1e-4, "solver_name": "gurobi_persistent", "FW_verbose": False, @@ -98,7 +100,7 @@ "all_scenario_names": scenario_names, "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - } + }, } # Standard Lagrangian bound spoke @@ -111,17 +113,18 @@ "all_scenario_names": scenario_names, "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - } + }, } # xhat looper bound spoke xhat_options = hub_ph_options.copy() - xhat_options['bundles_per_rank'] = 0 # no bundles for xhat - xhat_options["xhat_looper_options"] = {"xhat_solver_options":\ - ph_options["iterk_solver_options"], - "scen_limit": 3, - "dump_prefix": "delme", - "csvname": "looper.csv"} + xhat_options["bundles_per_rank"] = 0 # no bundles for xhat + xhat_options["xhat_looper_options"] = { + "xhat_solver_options": ph_options["iterk_solver_options"], + "scen_limit": 3, + "dump_prefix": "delme", + "csvname": "looper.csv", + } xhatlooper_spoke = { "spoke_class": XhatLooperInnerBound, "spoke_kwargs": dict(), @@ -131,7 +134,7 @@ "all_scenario_names": scenario_names, "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - } + }, } list_of_spoke_dict = (fw_spoke, lagrangian_spoke, xhatlooper_spoke) diff --git a/examples/netdes/netdes.py b/examples/netdes/netdes.py index 3a2308a9a..946eeeaf9 100644 --- a/examples/netdes/netdes.py +++ b/examples/netdes/netdes.py @@ -6,12 +6,12 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Implementation of a simple network design problem. See README for more info +"""Implementation of a simple network design problem. See README for more info - Created: 9 November 2019 by DTM +Created: 9 November 2019 by DTM - Scenario indices are ZERO based -''' +Scenario indices are ZERO based +""" import os import mpisppy.utils.sputils as sputils @@ -23,102 +23,116 @@ def scenario_creator(scenario_name, path=None): if path is None: - raise RuntimeError('Must provide the name of the .dat file ' - 'containing the instance data via the ' - 'path argument to scenario_creator') - + raise RuntimeError( + "Must provide the name of the .dat file " + "containing the instance data via the " + "path argument to scenario_creator" + ) + scenario_ix = _get_scenario_ix(scenario_name) model = build_scenario_model(path, scenario_ix) # now attach the one and only scenario tree node - sputils.attach_root_node(model, model.FirstStageCost, [model.x[:,:], ]) - + sputils.attach_root_node( + model, + model.FirstStageCost, + [ + model.x[:, :], + ], + ) + return model def build_scenario_model(fname, scenario_ix): data = parse(fname, scenario_ix=scenario_ix) - num_nodes = data['N'] - adj = data['A'] # Adjacency matrix - edges = data['el'] # Edge list - c = data['c'] # First-stage cost matrix (per edge) - d = data['d'] # Second-stage cost matrix (per edge) - u = data['u'] # Capacity of each arc - b = data['b'] # Demand of each node - p = data['p'] # Probability of scenario + num_nodes = data["N"] + adj = data["A"] # Adjacency matrix + edges = data["el"] # Edge list + c = data["c"] # First-stage cost matrix (per edge) + d = data["d"] # Second-stage cost matrix (per edge) + u = data["u"] # Capacity of each arc + b = data["b"] # Demand of each node + p = data["p"] # Probability of scenario model = pyo.ConcreteModel() - model.x = pyo.Var(edges, domain=pyo.Binary) # First stage vars - model.y = pyo.Var(edges, domain=pyo.NonNegativeReals) # Second stage vars + model.x = pyo.Var(edges, domain=pyo.Binary) # First stage vars + model.y = pyo.Var(edges, domain=pyo.NonNegativeReals) # Second stage vars model.edges = edges model._mpisppy_probability = p - ''' Objective ''' - model.FirstStageCost = pyo.quicksum(c[e] * model.x[e] for e in edges) + """ Objective """ + model.FirstStageCost = pyo.quicksum(c[e] * model.x[e] for e in edges) model.SecondStageCost = pyo.quicksum(d[e] * model.y[e] for e in edges) obj_expr = model.FirstStageCost + model.SecondStageCost model.MinCost = pyo.Objective(expr=obj_expr, sense=pyo.minimize) - ''' Variable upper bound constraints on each edge ''' + """ Variable upper bound constraints on each edge """ model.vubs = pyo.ConstraintList() for e in edges: - expr = model.y[e] - u[e] * model.x[e] + expr = model.y[e] - u[e] * model.x[e] model.vubs.add(expr <= 0) - ''' Flow balance constraints for each node ''' + """ Flow balance constraints for each node """ model.bals = pyo.ConstraintList() for i in range(num_nodes): - in_nbs = np.where(adj[:,i] > 0)[0] - out_nbs = np.where(adj[i,:] > 0)[0] - lhs = pyo.quicksum(model.y[i,j] for j in out_nbs) - \ - pyo.quicksum(model.y[j,i] for j in in_nbs) + in_nbs = np.where(adj[:, i] > 0)[0] + out_nbs = np.where(adj[i, :] > 0)[0] + lhs = pyo.quicksum(model.y[i, j] for j in out_nbs) - pyo.quicksum( + model.y[j, i] for j in in_nbs + ) model.bals.add(lhs == b[i]) return model - - def scenario_denouement(rank, scenario_name, scenario): pass + def _get_scenario_ix(sname): - ''' Get the scenario index from the given scenario name by strpiping all - digits off of the right of the scenario name, until a non-digit is - encountered. - ''' + """Get the scenario index from the given scenario name by strpiping all + digits off of the right of the scenario name, until a non-digit is + encountered. + """ i = len(sname) - 1 - while (i > 0 and sname[i-1].isdigit()): + while i > 0 and sname[i - 1].isdigit(): i -= 1 return int(sname[i:]) + ########## helper functions ######## -#========= -def scenario_names_creator(num_scens,start=None): + +# ========= +def scenario_names_creator(num_scens, start=None): # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"Scenario{i}" for i in range(start,start+num_scens)] + if start is None: + start = 0 + return [f"Scenario{i}" for i in range(start, start + num_scens)] -#========= +# ========= def inparser_adder(cfg): # add options unique to sizes # we don't want num_scens from the command line cfg.mip_options() - cfg.add_to_config("instance_name", - description="netdes instance name (e.g., network-10-20-L-01)", - domain=str, - default=None) - cfg.add_to_config("netdes_data_path", - description="path to detdes data (e.g., ./data)", - domain=str, - default=None) - - -#========= + cfg.add_to_config( + "instance_name", + description="netdes instance name (e.g., network-10-20-L-01)", + domain=str, + default=None, + ) + cfg.add_to_config( + "netdes_data_path", + description="path to detdes data (e.g., ./data)", + domain=str, + default=None, + ) + + +# ========= def kw_creator(cfg): # linked to the scenario_creator and inparser_adder # side-effect is dealing with num_scens @@ -126,18 +140,27 @@ def kw_creator(cfg): ns = int(inst.split("-")[-3]) if hasattr(cfg, "num_scens"): if cfg.num_scens != ns: - raise RuntimeError(f"Argument num-scens={cfg.num_scens} does not match the number " - "implied by instance name={ns} " - "\n(--num-scens is not needed for netdes)") + raise RuntimeError( + f"Argument num-scens={cfg.num_scens} does not match the number " + "implied by instance name={ns} " + "\n(--num-scens is not needed for netdes)" + ) else: - cfg.add_and_assign("num_scens","number of scenarios", int, None, ns) + cfg.add_and_assign("num_scens", "number of scenarios", int, None, ns) path = os.path.join(cfg.netdes_data_path, f"{inst}.dat") kwargs = {"path": path} return kwargs -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - """ Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. + +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + """Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. (this function supports zhat and confidence interval code) Args: sname (string): scenario name to be created @@ -156,8 +179,9 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, sca["num_scens"] = sample_branching_factors[0] # two-stage problem return scenario_creator(sname, **sca) + ######## end helper functions ######### -if __name__=='__main__': - print('netdes.py has no main') +if __name__ == "__main__": + print("netdes.py has no main") diff --git a/examples/netdes/netdes_cylinders.py b/examples/netdes/netdes_cylinders.py index 232a775aa..46b5b0bd2 100644 --- a/examples/netdes/netdes_cylinders.py +++ b/examples/netdes/netdes_cylinders.py @@ -15,6 +15,7 @@ write_solution = True + def _parse_args(): cfg = config.Config() cfg.num_scens_optional() @@ -29,22 +30,27 @@ def _parse_args(): cfg.slammax_args() cfg.cross_scenario_cuts_args() cfg.reduced_costs_args() - cfg.add_to_config("instance_name", - description="netdes instance name (e.g., network-10-20-L-01)", - domain=str, - default=None) + cfg.add_to_config( + "instance_name", + description="netdes instance name (e.g., network-10-20-L-01)", + domain=str, + default=None, + ) cfg.parse_command_line("netdes_cylinders") return cfg - + + def main(): cfg = _parse_args() inst = cfg.instance_name num_scen = int(inst.split("-")[-3]) if cfg.num_scens is not None and cfg.num_scens != num_scen: - raise RuntimeError(f"Argument num-scens={cfg.num_scens} does not match the number " - "implied by instance name={num_scen} " - "\n(--num-scens is not needed for netdes)") + raise RuntimeError( + f"Argument num-scens={cfg.num_scens} does not match the number " + "implied by instance name={num_scen} " + "\n(--num-scens is not needed for netdes)" + ) fwph = cfg.fwph xhatlooper = cfg.xhatlooper @@ -59,7 +65,7 @@ def main(): path = f"{netdes.__file__[:-10]}/data/{inst}.dat" scenario_creator = netdes.scenario_creator - scenario_denouement = netdes.scenario_denouement + scenario_denouement = netdes.scenario_denouement all_scenario_names = [f"Scen{i}" for i in range(num_scen)] scenario_creator_kwargs = {"path": path} @@ -72,53 +78,66 @@ def main(): ph_ext = None # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=ph_ext, - rho_setter = None) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=ph_ext, + rho_setter=None, + ) if cross_scenario_cuts: - hub_dict["opt_kwargs"]["options"]["cross_scen_options"]\ - = {"check_bound_improve_iterations" : cfg.cross_scenario_iter_cnt} - + hub_dict["opt_kwargs"]["options"]["cross_scen_options"] = { + "check_bound_improve_iterations": cfg.cross_scenario_iter_cnt + } + if cfg.reduced_costs: vanilla.add_reduced_costs_fixer(hub_dict, cfg) # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) if subgradient: - subgradient_spoke = vanilla.subgradient_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) - + subgradient_spoke = vanilla.subgradient_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) + # xhat looper bound spoke if xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # slam up bound spoke if slammax: - slammax_spoke = vanilla.slammax_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + slammax_spoke = vanilla.slammax_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # cross scenario cuts spoke if cross_scenario_cuts: - cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) if cfg.reduced_costs: - reduced_costs_spoke = vanilla.reduced_costs_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) + reduced_costs_spoke = vanilla.reduced_costs_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) list_of_spoke_dict = list() if fwph: @@ -142,8 +161,8 @@ def main(): wheel.spin() if write_solution: - wheel.write_first_stage_solution('netdes_build.csv') - wheel.write_tree_solution('netdes_full_solution') + wheel.write_first_stage_solution("netdes_build.csv") + wheel.write_tree_solution("netdes_full_solution") if __name__ == "__main__": diff --git a/examples/netdes/netdes_ef.py b/examples/netdes/netdes_ef.py index d0748bfcc..150dfd41e 100644 --- a/examples/netdes/netdes_ef.py +++ b/examples/netdes/netdes_ef.py @@ -6,13 +6,14 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Solve the EF of the network problems -''' +"""Solve the EF of the network problems""" + from mpisppy.opt.ef import ExtensiveForm from netdes import scenario_creator import pyomo.environ as pyo import sys + def main(): # inst = "network-10-20-L-01" if len(sys.argv) == 1: @@ -38,5 +39,6 @@ def main(): print("Warning: solver reported non-optimal termination status") print("Netdes objective value:", pyo.value(ef.ef.EF_Obj)) -if __name__=="__main__": + +if __name__ == "__main__": main() diff --git a/examples/netdes/netdes_extension.py b/examples/netdes/netdes_extension.py index eb752866c..8f77f715d 100644 --- a/examples/netdes/netdes_extension.py +++ b/examples/netdes/netdes_extension.py @@ -10,8 +10,8 @@ import mpisppy.utils.sputils as sputils from mpisppy.extensions.extension import Extension -class NetworkDesignTracker(Extension): +class NetworkDesignTracker(Extension): def __init__(self, ph): self.ph = ph self.cylinder_rank = ph.cylinder_rank @@ -20,16 +20,15 @@ def pre_iter0(self): pass def post_iter0(self): - print('Just completed iteration 0') + print("Just completed iteration 0") def miditer(self): - ''' Currently hard-coded for no parallelism - ''' - print('About to do the solve in iteration', self.ph._PHIter) + """Currently hard-coded for no parallelism""" + print("About to do the solve in iteration", self.ph._PHIter) sols = self.get_sol() print_sol(sols, print_all=False) - if (self.ph._PHIter < 5): + if self.ph._PHIter < 5: return models = self.ph.local_scenarios @@ -37,58 +36,58 @@ def miditer(self): edges = arb_model.edges num_scenarios = len(sols) - bundling = not hasattr(arb_model, '_solver_plugin') - if (bundling): + bundling = not hasattr(arb_model, "_solver_plugin") + if bundling: arb_bundle_model = get_arb_elt(self.ph.local_subproblems) persistent = sputils.is_persistent(arb_bundle_model._solver_plugin) else: persistent = sputils.is_persistent(arb_model._solver_plugin) for edge in edges: - if (arb_model.x[edge].fixed): # Fixed in one model = fixed in all + if arb_model.x[edge].fixed: # Fixed in one model = fixed in all continue - sol_values = {name: sols[name][edge]['value'] for name in sols} + sol_values = {name: sols[name][edge]["value"] for name in sols} total = sum(sol_values.values()) if (total < 0.1) or (total > num_scenarios - 0.1): - ''' All scenarios agree ''' + """ All scenarios agree """ pass - if (total > (num_scenarios//2) + 1.9): - print(f'Fixing edge {edge} to 1') - for (sname, model) in models.items(): + if total > (num_scenarios // 2) + 1.9: + print(f"Fixing edge {edge} to 1") + for sname, model in models.items(): model.x[edge].fix(1.0) - if (persistent): - if (bundling): - solver = self.get_solver(sname) + if persistent: + if bundling: + solver = self.get_solver(sname) else: solver = model._solver_plugin solver.update_var(model.x[edge]) fixed_edges = [edge for edge in edges if arb_model.x[edge].fixed] - print(f'Fixed {len(fixed_edges)} total edges') + print(f"Fixed {len(fixed_edges)} total edges") def get_solver(self, sname): - ''' Move this to PHBase if it is successful - Need to add some checks for bundling first - ''' + """Move this to PHBase if it is successful + Need to add some checks for bundling first + """ rank = self.cylinder_rank names = self.ph.names_in_bundles[rank] bunnum = None - for (num, scens) in names.items(): - if (sname in scens): + for num, scens in names.items(): + if sname in scens: bunnum = num break - if (bunnum is None): - raise RuntimeError(f'Could not find {sname}') - bname = f'rank{rank}bundle{bunnum}' + if bunnum is None: + raise RuntimeError(f"Could not find {sname}") + bname = f"rank{rank}bundle{bunnum}" return self.ph.local_subproblems[bname]._solver_plugin def enditer(self): - ''' Variable fixing must be done in miditer, so that a solve takes - place after the variables are fixed (otherwise, we could end up with - and infeasible solution). - ''' + """Variable fixing must be done in miditer, so that a solve takes + place after the variables are fixed (otherwise, we could end up with + and infeasible solution). + """ pass def post_everything(self): @@ -99,43 +98,54 @@ def get_sol(self): arb_model = get_arb_elt(ph.local_scenarios) edges = arb_model.edges res = { - name: { - edge: { - 'value': pyo.value(model.x[edge]), - 'fixed': model.x[edge].fixed, - } for edge in edges - } for (name, model) in ph.local_scenarios.items() - } + name: { + edge: { + "value": pyo.value(model.x[edge]), + "fixed": model.x[edge].fixed, + } + for edge in edges + } + for (name, model) in ph.local_scenarios.items() + } return res + # My helper functions here def get_arb_elt(dictionary): - ''' Return an arbitrary element of the dictionary ''' + """Return an arbitrary element of the dictionary""" if not (len(dictionary)): return None return next(iter(dictionary.values())) + def print_sol(sol_dict, print_all=True): edges = list(get_arb_elt(sol_dict).keys()) for edge in edges: - if (not print_all): - tot = sum(sol_dict[sname][edge]['value'] for sname in sol_dict) - if (tot >= 0.9): # At least one scenario wants to take the edge - row = ''.join(['x' if sol_dict[sname][edge]['value'] > 0.1 else '.' - for sname in sol_dict]) - row = f'{edge[0]:2d}-->{edge[1]:2d} ' + row - fixed = [sol_dict[sname][edge]['fixed'] for sname in sol_dict] - assert(all(fixed) or (not any(fixed))) - if (fixed[0]): - row += ' <-- fixed' + if not print_all: + tot = sum(sol_dict[sname][edge]["value"] for sname in sol_dict) + if tot >= 0.9: # At least one scenario wants to take the edge + row = "".join( + [ + "x" if sol_dict[sname][edge]["value"] > 0.1 else "." + for sname in sol_dict + ] + ) + row = f"{edge[0]:2d}-->{edge[1]:2d} " + row + fixed = [sol_dict[sname][edge]["fixed"] for sname in sol_dict] + assert all(fixed) or (not any(fixed)) + if fixed[0]: + row += " <-- fixed" print(row) else: - row = ''.join(['x' if sol_dict[sname][edge]['value'] > 0.1 else '.' - for sname in sol_dict]) - row = f'{edge[0]:2d}-->{edge[1]:2d} ' + row - fixed = [sol_dict[sname][edge]['fixed'] for sname in sol_dict] - assert(all(fixed) or (not any(fixed))) - if (fixed[0]): - row += ' <-- fixed' + row = "".join( + [ + "x" if sol_dict[sname][edge]["value"] > 0.1 else "." + for sname in sol_dict + ] + ) + row = f"{edge[0]:2d}-->{edge[1]:2d} " + row + fixed = [sol_dict[sname][edge]["fixed"] for sname in sol_dict] + assert all(fixed) or (not any(fixed)) + if fixed[0]: + row += " <-- fixed" print(row) - diff --git a/examples/netdes/netdes_ph.py b/examples/netdes/netdes_ph.py index 4a4d5162d..16cfb9719 100644 --- a/examples/netdes/netdes_ph.py +++ b/examples/netdes/netdes_ph.py @@ -6,8 +6,8 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Solve the EF of the network problems -''' +"""Solve the EF of the network problems""" + from netdes import scenario_creator, scenario_denouement from mpisppy.opt.ph import PH from mpisppy.convergers.primal_dual_converger import PrimalDualConverger @@ -19,7 +19,7 @@ def main(): msg = ( "Give instance name, then PH maxiters " - + "then rho, then smooth_type, then pvalue_or_pratio, then beta \n" + + "then rho, then smooth_type, then pvalue_or_pratio, then beta \n" + "(e.g., network-10-20-L-01 100 10000 0 0.0 1.0)" ) if len(sys.argv) != 7: @@ -27,54 +27,57 @@ def main(): quit() # solver_name = 'gurobi' fname = "data" + os.sep + sys.argv[1] + ".dat" - num_scen = int(fname.split('-')[2]) - scenario_names = ['Scen' + str(i) for i in range(num_scen)] + num_scen = int(fname.split("-")[2]) + scenario_names = ["Scen" + str(i) for i in range(num_scen)] scenario_creator_kwargs = {"path": fname} - ''' Now solve with PH to see what happens (very little, I imagine) ''' + """ Now solve with PH to see what happens (very little, I imagine) """ PH_options = { - 'solver_name' : 'gurobi_persistent', - 'PHIterLimit' : int(sys.argv[2]), - 'defaultPHrho' : float(sys.argv[3]), - 'convthresh' : -1e-8, - 'verbose' : False, - 'display_progress' : True, - 'display_timing' : True, - 'iter0_solver_options' : {"threads": 1}, - 'iterk_solver_options' : {"threads": 1}, - 'bundles_per_rank' : 0, # 0 = no bundles + "solver_name": "gurobi_persistent", + "PHIterLimit": int(sys.argv[2]), + "defaultPHrho": float(sys.argv[3]), + "convthresh": -1e-8, + "verbose": False, + "display_progress": True, + "display_timing": True, + "iter0_solver_options": {"threads": 1}, + "iterk_solver_options": {"threads": 1}, + "bundles_per_rank": 0, # 0 = no bundles "display_convergence_detail": False, - 'xhat_closest_options' : {'xhat_solver_options': {}, 'keep_solution':True}, + "xhat_closest_options": {"xhat_solver_options": {}, "keep_solution": True}, "smoothed": int(sys.argv[4]), "defaultPHp": float(sys.argv[5]), "defaultPHbeta": float(sys.argv[6]), - "primal_dual_converger_options" : {"tol" : 1e-5} + "primal_dual_converger_options": {"tol": 1e-5}, } - + ph = PH( - PH_options, + PH_options, scenario_names, scenario_creator, scenario_denouement, # extensions=NetworkDesignTracker, extensions=XhatClosest, - ph_converger = PrimalDualConverger, + ph_converger=PrimalDualConverger, scenario_creator_kwargs=scenario_creator_kwargs, ) conv, obj, triv = ph.ph_main() # Obj includes prox (only ok if we find a non-ant soln) - + ph.post_solve_bound(verbose=False) variables = ph.gather_var_values_to_rank0() - for (scenario_name, variable_name) in variables: + for scenario_name, variable_name in variables: variable_value = variables[scenario_name, variable_name] print(scenario_name, variable_name, variable_value) if ph.tree_solution_available: - print(f"Final objective from XhatClosest: {ph.extobject._final_xhat_closest_obj}") + print( + f"Final objective from XhatClosest: {ph.extobject._final_xhat_closest_obj}" + ) else: print(f"Final objective from XhatClosest: {float('inf')}") -if __name__=='__main__': + +if __name__ == "__main__": main() diff --git a/examples/netdes/parse.py b/examples/netdes/parse.py index aef06ebc8..8f2126412 100644 --- a/examples/netdes/parse.py +++ b/examples/netdes/parse.py @@ -6,27 +6,30 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Parse data from an instance file +"""Parse data from an instance file + +TODO: Currently storing the (dense) adjacency matrix. Cheaper not to. +""" - TODO: Currently storing the (dense) adjacency matrix. Cheaper not to. -''' import numpy as np + def main(): - fname = 'data/network-10-10-L-01.dat' + fname = "data/network-10-10-L-01.dat" data = parse(fname) - print(data['d'][9]) - + print(data["d"][9]) + scen = parse(fname, scenario_ix=9) - print(scen['d']) + print(scen["d"]) + def parse(fname, scenario_ix=None): - with open(fname, 'r') as f: - ''' Skip file header ''' - while (not f.readline().startswith('+')): + with open(fname, "r") as f: + """ Skip file header """ + while not f.readline().startswith("+"): continue - ''' Read basic network data ''' + """ Read basic network data """ N = int(f.readline().strip()) density = float(f.readline().strip()) ratio = int(f.readline().strip()) @@ -35,12 +38,13 @@ def parse(fname, scenario_ix=None): K = int(f.readline().strip()) p = _toVector(f.readline().strip()) - if (scenario_ix is not None): - ''' Read the provided scenario ''' - if (scenario_ix < 0 or scenario_ix >= K): - raise ValueError('Provided scenario index ({}) could ' - 'not be found ({} total scenarios)'.format( - scenario_ix, K)) + if scenario_ix is not None: + """ Read the provided scenario """ + if scenario_ix < 0 or scenario_ix >= K: + raise ValueError( + "Provided scenario index ({}) could " + "not be found ({} total scenarios)".format(scenario_ix, K) + ) for k in range(scenario_ix): for _ in range(4): f.readline() @@ -50,7 +54,7 @@ def parse(fname, scenario_ix=None): b = _toVector(f.readline().strip(), dtype=np.float64) p = p[scenario_ix] else: - ''' Read all scenarios ''' + """ Read all scenarios """ d = [None for _ in range(K)] u = [None for _ in range(K)] b = [None for _ in range(K)] @@ -60,34 +64,36 @@ def parse(fname, scenario_ix=None): u[i] = _toMatrix(f.readline().strip(), dtype=np.float64) b[i] = _toVector(f.readline().strip(), dtype=np.float64) - ''' Construct edge list ''' - ix, iy = np.where(A>0) - el = [(ix[i],iy[i]) for i in range(len(ix))] + """ Construct edge list """ + ix, iy = np.where(A > 0) + el = [(ix[i], iy[i]) for i in range(len(ix))] data = { - 'N': N, - 'density': density, - 'ratio': ratio, - 'A': A, - 'c': c, - 'K': K, - 'p': p, - 'd': d, - 'u': u, - 'b': b, - 'el': el, + "N": N, + "density": density, + "ratio": ratio, + "A": A, + "c": c, + "K": K, + "p": p, + "d": d, + "u": u, + "b": b, + "el": el, } return data - + + def _toVector(line, dtype=np.float64): - return np.array(line.split(','), dtype=dtype) + return np.array(line.split(","), dtype=dtype) + def _toMatrix(line, dtype=np.float64): - rows = line.split(';') + rows = line.split(";") for i in range(len(rows)): - rows[i] = rows[i].split(',') + rows[i] = rows[i].split(",") return np.array(rows, dtype=dtype) -if __name__=='__main__': +if __name__ == "__main__": main() diff --git a/examples/run_all.py b/examples/run_all.py index e36d4851d..97c10b81e 100644 --- a/examples/run_all.py +++ b/examples/run_all.py @@ -37,11 +37,13 @@ if len(sys.argv) > 3: nouc = True if sys.argv[3] != "nouc": - raise RuntimeError("Third arg can only be nouc (you have {})".\ - format(sys.argv[3])) + raise RuntimeError( + "Third arg can only be nouc (you have {})".format(sys.argv[3]) + ) badguys = dict() + def egret_avail(): try: import egret @@ -50,22 +52,25 @@ def egret_avail(): path = str(egret.__path__) left = path.find("'") - right = path.find("'", left+1) - egretrootpath = path[left+1:right] + right = path.find("'", left + 1) + egretrootpath = path[left + 1 : right] egret_thirdparty_path = os.path.join(egretrootpath, "thirdparty") if os.path.exists(os.path.join(egret_thirdparty_path, "pglib-opf-master")): return True from egret.thirdparty.get_pglib_opf import get_pglib_opf + get_pglib_opf(egret_thirdparty_path) return True + def do_one(dirname, progname, np, argstring): - """ return the code""" + """return the code""" os.chdir(dirname) - runstring = "mpiexec {} -np {} python -u -m mpi4py {} {}".\ - format(mpiexec_arg, np, progname, argstring) + runstring = "mpiexec {} -np {} python -u -m mpi4py {} {}".format( + mpiexec_arg, np, progname, argstring + ) # The top process output seems to be cached by github actions # so we need oputput in the system call to help debug code = os.system("echo {} && {}".format(runstring, runstring)) @@ -74,50 +79,52 @@ def do_one(dirname, progname, np, argstring): badguys[dirname] = [runstring] else: badguys[dirname].append(runstring) - if '/' not in dirname: + if "/" not in dirname: os.chdir("..") else: - os.chdir("../..") # hack for one level of subdirectories + os.chdir("../..") # hack for one level of subdirectories return code + def time_one(ID, dirname, progname, np, argstring): - """ same as do_one, but also check the running time. - ID must be unique and ID.perf.csv will be(come) a local file name - and should be allowed to sit on your machine in your examples directory. - Do not record a time for a bad guy.""" + """same as do_one, but also check the running time. + ID must be unique and ID.perf.csv will be(come) a local file name + and should be allowed to sit on your machine in your examples directory. + Do not record a time for a bad guy.""" if ID in time_one.ID_check: raise RuntimeError(f"Duplicate time_one ID={ID}") else: time_one.ID_check.append(ID) - listfname = ID+".perf.csv" + listfname = ID + ".perf.csv" start = dt.now() code = do_one(dirname, progname, np, argstring) finish = dt.now() - runsecs = (finish-start).total_seconds() + runsecs = (finish - start).total_seconds() if code != 0: - return # Nothing to see here, folks. + return # Nothing to see here, folks. # get a reference time start = dt.now() - for i in range(int(1e7)): # don't change this unless you *really* have to + for i in range(int(1e7)): # don't change this unless you *really* have to if (i % 2) == 0: foo = i * i - bar = str(i)+"!" + bar = str(i) + "!" del foo del bar finish = dt.now() - refsecs = (finish-start).total_seconds() + refsecs = (finish - start).total_seconds() if os.path.isfile(listfname): timelistdf = pd.read_csv(listfname) timelistdf.loc[len(timelistdf.index)] = [str(finish), refsecs, runsecs] else: print(f"{listfname} will be created.") - timelistdf = pd.DataFrame([[finish, refsecs, runsecs]], - columns=["datetime", "reftime", "time"]) + timelistdf = pd.DataFrame( + [[finish, refsecs, runsecs]], columns=["datetime", "reftime", "time"] + ) # Quick look for trouble if len(timelistdf) > 0: @@ -128,11 +135,16 @@ def time_one(ID, dirname, progname, np, argstring): lastscaled = lastrunsecs / lastrefsecs deltafrac = (thisscaled - lastscaled) / lastscaled if deltafrac > 0.1: - print(f"**** WARNING: {100*deltafrac}% time increase for {ID}, see {listfname}") + print( + f"**** WARNING: {100*deltafrac}% time increase for {ID}, see {listfname}" + ) timelistdf.to_csv(listfname, index=False) + + time_one.ID_check = list() + def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): # assume that the dirname matches the module name @@ -140,15 +152,16 @@ def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): # solve ef, save .npy file (file name hardcoded in progname at the moment) code = os.system("echo {} && {}".format(runefstring, runefstring)) - if code!=0: + if code != 0: if dirname not in badguys: badguys[dirname] = [runefstring] else: badguys[dirname].append(runefstring) # run mmw, remove .npy file else: - runstring = "python -m mpisppy.confidence_intervals.mmw_conf {} --xhatpath {} {}".\ - format(dirname, npyfile, mmwargstring) + runstring = "python -m mpisppy.confidence_intervals.mmw_conf {} --xhatpath {} {}".format( + dirname, npyfile, mmwargstring + ) code = os.system("echo {} && {}".format(runstring, runstring)) if code != 0: if dirname not in badguys: @@ -159,195 +172,306 @@ def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): os.remove(npyfile) os.chdir("..") -do_one("farmer", "farmer_ef.py", 1, - "1 3 {}".format(solver_name)) + +do_one("farmer", "farmer_ef.py", 1, "1 3 {}".format(solver_name)) # for farmer_cylinders, the first arg is num_scens and is required -do_one("farmer", "farmer_cylinders.py", 3, - "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={} " - "--primal-dual-converger --primal-dual-converger-tol=0.5 --lagrangian --xhatshuffle " - "--intra-hub-conv-thresh -0.1 --rel-gap=1e-6".format(solver_name)) -do_one("farmer", "farmer_cylinders.py", 5, - "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={} " - "--use-norm-rho-converger --use-norm-rho-updater --rel-gap=1e-6 --lagrangian --lagranger " - "--xhatshuffle --fwph --W-fname=out_ws.txt --Xbar-fname=out_xbars.txt " - "--ph-track-progress --track-convergence=4 --track-xbar=4 --track-nonants=4 " - "--track-duals=4".format(solver_name)) -do_one("farmer", "farmer_cylinders.py", 5, - "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={} " - "--use-norm-rho-converger --use-norm-rho-updater --lagrangian --lagranger --xhatshuffle --fwph " - "--init-W-fname=out_ws.txt --init-Xbar-fname=out_xbars.txt --ph-track-progress --track-convergence=4 " "--track-xbar=4 --track-nonants=4 --track-duals=4 ".format(solver_name)) -do_one("farmer", "farmer_lshapedhub.py", 2, - "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 " - "--solver-name={} --rel-gap=0.0 " - "--xhatlshaped --max-solver-threads=1".format(solver_name)) -do_one("farmer", "farmer_cylinders.py", 3, - "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 " - "--default-rho=1 " - "--solver-name={} --lagranger --xhatlooper".format(solver_name)) -do_one("farmer", "farmer_cylinders.py", 3, - "--num-scens 6 --bundles-per-rank=2 --max-iterations=50 " - "--default-rho=1 --lagrangian --xhatshuffle " - "--solver-name={}".format(solver_name)) -do_one("farmer", "farmer_cylinders.py", 4, - "--num-scens 6 --bundles-per-rank=2 --max-iterations=50 " - "--fwph-stop-check-tol 0.1 " - "--default-rho=1 --solver-name={} --lagrangian --xhatshuffle --fwph".format(solver_name)) -do_one("farmer", "farmer_cylinders.py", 2, - "--num-scens 6 --bundles-per-rank=2 --max-iterations=50 " - "--default-rho=1 " - "--solver-name={} --xhatshuffle".format(solver_name)) -do_one("farmer", "farmer_cylinders.py", 3, - "--num-scens 3 --bundles-per-rank=0 --max-iterations=1 " - "--default-rho=1 --tee-rank0-solves " - "--solver-name={} --lagrangian --xhatshuffle".format(solver_name)) -time_one("FarmerLinProx", "farmer", "farmer_cylinders.py", 3, - "--num-scens 3 --default-rho=1.0 --max-iterations=50 " - "--display-progress --rel-gap=0.0 --abs-gap=0.0 " - "--linearize-proximal-terms --proximal-linearization-tolerance=1.e-6 " - "--solver-name={} --lagrangian --xhatshuffle".format(solver_name)) +do_one( + "farmer", + "farmer_cylinders.py", + 3, + "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={} " + "--primal-dual-converger --primal-dual-converger-tol=0.5 --lagrangian --xhatshuffle " + "--intra-hub-conv-thresh -0.1 --rel-gap=1e-6".format(solver_name), +) +do_one( + "farmer", + "farmer_cylinders.py", + 5, + "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={} " + "--use-norm-rho-converger --use-norm-rho-updater --rel-gap=1e-6 --lagrangian --lagranger " + "--xhatshuffle --fwph --W-fname=out_ws.txt --Xbar-fname=out_xbars.txt " + "--ph-track-progress --track-convergence=4 --track-xbar=4 --track-nonants=4 " + "--track-duals=4".format(solver_name), +) +do_one( + "farmer", + "farmer_cylinders.py", + 5, + "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={} " + "--use-norm-rho-converger --use-norm-rho-updater --lagrangian --lagranger --xhatshuffle --fwph " + "--init-W-fname=out_ws.txt --init-Xbar-fname=out_xbars.txt --ph-track-progress --track-convergence=4 " + "--track-xbar=4 --track-nonants=4 --track-duals=4 ".format(solver_name), +) +do_one( + "farmer", + "farmer_lshapedhub.py", + 2, + "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 " + "--solver-name={} --rel-gap=0.0 " + "--xhatlshaped --max-solver-threads=1".format(solver_name), +) +do_one( + "farmer", + "farmer_cylinders.py", + 3, + "--num-scens 3 --bundles-per-rank=0 --max-iterations=50 " + "--default-rho=1 " + "--solver-name={} --lagranger --xhatlooper".format(solver_name), +) +do_one( + "farmer", + "farmer_cylinders.py", + 3, + "--num-scens 6 --bundles-per-rank=2 --max-iterations=50 " + "--default-rho=1 --lagrangian --xhatshuffle " + "--solver-name={}".format(solver_name), +) +do_one( + "farmer", + "farmer_cylinders.py", + 4, + "--num-scens 6 --bundles-per-rank=2 --max-iterations=50 " + "--fwph-stop-check-tol 0.1 " + "--default-rho=1 --solver-name={} --lagrangian --xhatshuffle --fwph".format( + solver_name + ), +) +do_one( + "farmer", + "farmer_cylinders.py", + 2, + "--num-scens 6 --bundles-per-rank=2 --max-iterations=50 " + "--default-rho=1 " + "--solver-name={} --xhatshuffle".format(solver_name), +) +do_one( + "farmer", + "farmer_cylinders.py", + 3, + "--num-scens 3 --bundles-per-rank=0 --max-iterations=1 " + "--default-rho=1 --tee-rank0-solves " + "--solver-name={} --lagrangian --xhatshuffle".format(solver_name), +) +time_one( + "FarmerLinProx", + "farmer", + "farmer_cylinders.py", + 3, + "--num-scens 3 --default-rho=1.0 --max-iterations=50 " + "--display-progress --rel-gap=0.0 --abs-gap=0.0 " + "--linearize-proximal-terms --proximal-linearization-tolerance=1.e-6 " + "--solver-name={} --lagrangian --xhatshuffle".format(solver_name), +) do_one("farmer/from_pysp", "concrete_ampl.py", 1, solver_name) do_one("farmer/from_pysp", "abstract.py", 1, solver_name) -do_one("farmer", - "farmer_cylinders.py", - 2, - f"--num-scens 3 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatshuffle --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=1.0 --abs-gap=1 --aph-sleep-seconds=0.01 --run-async --solver-name={solver_name}") -do_one("farmer", - "farmer_cylinders.py", - 2, - f"--num-scens 3 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatlooper --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=0.25 --abs-gap=1 --display-convergence-detail --aph-sleep-seconds=0.01 --run-async --solver-name={solver_name}") -do_one("farmer", - "farmer_cylinders.py", - 2, - f"--num-scens 30 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatlooper --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=1 --abs-gap=1 --aph-sleep-seconds=0.01 --run-async --bundles-per-rank=5 --solver-name={solver_name}") - -do_one("farmer", - "farmer_cylinders.py", 4, - f"--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={solver_name} --lagrangian --xhatshuffle --fwph --max-stalled-iters 1") - -do_one("farmer", - "farmer_cylinders.py", - 2, - f"--num-scens 30 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatshuffle --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=0.5 --abs-gap=1 --aph-sleep-seconds=0.01 --run-async --bundles-per-rank=5 --solver-name={solver_name}") - -do_one("farmer", - "farmer_ama.py", - 3, - f"--num-scens=10 --crops-multiplier=3 --farmer-with-integer --EF-solver-name={solver_name}") - -do_one("farmer", - "farmer_seqsampling.py", - 1, - f"--num-scens 3 --crops-multiplier=1 --EF-solver-name={solver_name} " - "--BM-h 2 --BM-q 1.3 --confidence-level 0.95 --BM-vs-BPL BM") - -do_one("farmer", - "farmer_seqsampling.py", - 1, - f"--num-scens 3 --crops-multiplier=1 --EF-solver-name={solver_name} " - "--BPL-c0 25 --BPL-eps 100 --confidence-level 0.95 --BM-vs-BPL BPL") - -do_one("netdes", "netdes_cylinders.py", 5, - "--max-iterations=3 --instance-name=network-10-20-L-01 " - "--solver-name={} --rel-gap=0.0 --default-rho=1 --presolve " - "--slammax --lagrangian --xhatshuffle --cross-scenario-cuts --max-solver-threads=2".format(solver_name)) +do_one( + "farmer", + "farmer_cylinders.py", + 2, + f"--num-scens 3 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatshuffle --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=1.0 --abs-gap=1 --aph-sleep-seconds=0.01 --run-async --solver-name={solver_name}", +) +do_one( + "farmer", + "farmer_cylinders.py", + 2, + f"--num-scens 3 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatlooper --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=0.25 --abs-gap=1 --display-convergence-detail --aph-sleep-seconds=0.01 --run-async --solver-name={solver_name}", +) +do_one( + "farmer", + "farmer_cylinders.py", + 2, + f"--num-scens 30 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatlooper --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=1 --abs-gap=1 --aph-sleep-seconds=0.01 --run-async --bundles-per-rank=5 --solver-name={solver_name}", +) + +do_one( + "farmer", + "farmer_cylinders.py", + 4, + f"--num-scens 3 --bundles-per-rank=0 --max-iterations=50 --default-rho=1 --solver-name={solver_name} --lagrangian --xhatshuffle --fwph --max-stalled-iters 1", +) + +do_one( + "farmer", + "farmer_cylinders.py", + 2, + f"--num-scens 30 --max-iterations=10 --default-rho=1.0 --display-progress --bundles-per-rank=0 --xhatshuffle --aph-gamma=1.0 --aph-nu=1.0 --aph-frac-needed=1.0 --aph-dispatch-frac=0.5 --abs-gap=1 --aph-sleep-seconds=0.01 --run-async --bundles-per-rank=5 --solver-name={solver_name}", +) + +do_one( + "farmer", + "farmer_ama.py", + 3, + f"--num-scens=10 --crops-multiplier=3 --farmer-with-integer --EF-solver-name={solver_name}", +) + +do_one( + "farmer", + "farmer_seqsampling.py", + 1, + f"--num-scens 3 --crops-multiplier=1 --EF-solver-name={solver_name} " + "--BM-h 2 --BM-q 1.3 --confidence-level 0.95 --BM-vs-BPL BM", +) + +do_one( + "farmer", + "farmer_seqsampling.py", + 1, + f"--num-scens 3 --crops-multiplier=1 --EF-solver-name={solver_name} " + "--BPL-c0 25 --BPL-eps 100 --confidence-level 0.95 --BM-vs-BPL BPL", +) + +do_one( + "netdes", + "netdes_cylinders.py", + 5, + "--max-iterations=3 --instance-name=network-10-20-L-01 " + "--solver-name={} --rel-gap=0.0 --default-rho=1 --presolve " + "--slammax --lagrangian --xhatshuffle --cross-scenario-cuts --max-solver-threads=2".format( + solver_name + ), +) # sizes is slow for xpress so try linearizing the proximal term. -do_one("sizes", - "sizes_cylinders.py", - 3, - "--linearize-proximal-terms " - "--num-scens=10 --bundles-per-rank=0 --max-iterations=5 " - "--default-rho=1 --lagrangian --xhatshuffle " - "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " - "--solver-name={}".format(solver_name)) - -do_one("sizes", - "sizes_cylinders.py", - 3, - "--linearize-proximal-terms " - "--num-scens=10 --bundles-per-rank=0 --max-iterations=5 " - "--default-rho=1 --lagrangian --xhatxbar " - "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " - "--solver-name={}".format(solver_name)) - -do_one("sizes", - "sizes_cylinders.py", - 4, - "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " - "--iter0-mipgap=0.01 --iterk-mipgap=0.005 " - "--default-rho=1 --lagrangian --xhatshuffle --fwph " - "--solver-name={} --display-progress".format(solver_name)) +do_one( + "sizes", + "sizes_cylinders.py", + 3, + "--linearize-proximal-terms " + "--num-scens=10 --bundles-per-rank=0 --max-iterations=5 " + "--default-rho=1 --lagrangian --xhatshuffle " + "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " + "--solver-name={}".format(solver_name), +) + +do_one( + "sizes", + "sizes_cylinders.py", + 3, + "--linearize-proximal-terms " + "--num-scens=10 --bundles-per-rank=0 --max-iterations=5 " + "--default-rho=1 --lagrangian --xhatxbar " + "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " + "--solver-name={}".format(solver_name), +) + +do_one( + "sizes", + "sizes_cylinders.py", + 4, + "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " + "--iter0-mipgap=0.01 --iterk-mipgap=0.005 " + "--default-rho=1 --lagrangian --xhatshuffle --fwph " + "--solver-name={} --display-progress".format(solver_name), +) do_one("sizes", "sizes_pysp.py", 1, "3 {}".format(solver_name)) -do_one("sslp", - "sslp_cylinders.py", - 4, - "--instance-name=sslp_15_45_10 --bundles-per-rank=2 " - "--max-iterations=5 --default-rho=1 " - "--subgradient --xhatshuffle --fwph " - "--linearize-proximal-terms " - "--rel-gap=0.0 " - "--solver-name={} --fwph-stop-check-tol 0.01".format(solver_name)) - -do_one("sslp", - "sslp_cylinders.py", - 3, - "--instance-name=sslp_15_45_10 --bundles-per-rank=0 " - "--max-iterations=5 --default-rho=1 " - "--reduced-costs --rc-fixer --xhatshuffle " - "--linearize-proximal-terms " - "--rel-gap=0.0 " - "--solver-name={}".format(solver_name)) - -do_one("hydro", "hydro_cylinders.py", 3, - "--branching-factors \"3 3\" --bundles-per-rank=0 --max-iterations=100 " - "--default-rho=1 --xhatshuffle --lagrangian " - "--solver-name={}".format(solver_name)) -do_one("hydro", "hydro_cylinders.py", 3, - "--branching-factors \'3 3\' --bundles-per-rank=0 --max-iterations=100 " - "--default-rho=1 --xhatshuffle --lagrangian " - "--solver-name={} --stage2EFsolvern={}".format(solver_name, solver_name)) - -do_one("hydro", "hydro_cylinders_pysp.py", 3, - "--bundles-per-rank=0 --max-iterations=100 " - "--default-rho=1 --xhatshuffle --lagrangian " - "--solver-name={}".format(solver_name)) +do_one( + "sslp", + "sslp_cylinders.py", + 4, + "--instance-name=sslp_15_45_10 --bundles-per-rank=2 " + "--max-iterations=5 --default-rho=1 " + "--subgradient --xhatshuffle --fwph " + "--linearize-proximal-terms " + "--rel-gap=0.0 " + "--solver-name={} --fwph-stop-check-tol 0.01".format(solver_name), +) + +do_one( + "sslp", + "sslp_cylinders.py", + 3, + "--instance-name=sslp_15_45_10 --bundles-per-rank=0 " + "--max-iterations=5 --default-rho=1 " + "--reduced-costs --rc-fixer --xhatshuffle " + "--linearize-proximal-terms " + "--rel-gap=0.0 " + "--solver-name={}".format(solver_name), +) + +do_one( + "hydro", + "hydro_cylinders.py", + 3, + '--branching-factors "3 3" --bundles-per-rank=0 --max-iterations=100 ' + "--default-rho=1 --xhatshuffle --lagrangian " + "--solver-name={}".format(solver_name), +) +do_one( + "hydro", + "hydro_cylinders.py", + 3, + "--branching-factors '3 3' --bundles-per-rank=0 --max-iterations=100 " + "--default-rho=1 --xhatshuffle --lagrangian " + "--solver-name={} --stage2EFsolvern={}".format(solver_name, solver_name), +) + +do_one( + "hydro", + "hydro_cylinders_pysp.py", + 3, + "--bundles-per-rank=0 --max-iterations=100 " + "--default-rho=1 --xhatshuffle --lagrangian " + "--solver-name={}".format(solver_name), +) do_one("hydro", "hydro_ef.py", 1, solver_name) # the next might hang with 6 ranks -do_one("aircond", "aircond_cylinders.py", 3, - "--branching-factors \'4 3 2\' --bundles-per-rank=0 --max-iterations=100 " - "--default-rho=1 --lagrangian --xhatshuffle " - "--solver-name={}".format(solver_name)) -do_one("aircond", "aircond_ama.py", 3, - "--branching-factors \'3 3\' --bundles-per-rank=0 --max-iterations=100 " - "--default-rho=1 --lagrangian --xhatshuffle " - "--solver-name={}".format(solver_name)) -time_one("AircondAMA", "aircond", "aircond_ama.py", 3, - "--branching-factors \'3 3\' --bundles-per-rank=0 --max-iterations=100 " - "--default-rho=1 --lagrangian --xhatshuffle " - "--solver-name={}".format(solver_name)) - -do_one("aircond", - "aircond_seqsampling.py", - 1, - f"--branching-factors \'3 2\' --seed 1134 --solver-name={solver_name} " - "--BM-h 2 --BM-q 1.3 --confidence-level 0.95 --BM-vs-BPL BM") -do_one("aircond", - "aircond_seqsampling.py", - 1, - f"--branching-factors \'3 2\' --seed 1134 --solver-name={solver_name} " - "--BPL-c0 25 --BPL-eps 100 --confidence-level 0.95 --BM-vs-BPL BPL") - -#=========MMW TESTS========== +do_one( + "aircond", + "aircond_cylinders.py", + 3, + "--branching-factors '4 3 2' --bundles-per-rank=0 --max-iterations=100 " + "--default-rho=1 --lagrangian --xhatshuffle " + "--solver-name={}".format(solver_name), +) +do_one( + "aircond", + "aircond_ama.py", + 3, + "--branching-factors '3 3' --bundles-per-rank=0 --max-iterations=100 " + "--default-rho=1 --lagrangian --xhatshuffle " + "--solver-name={}".format(solver_name), +) +time_one( + "AircondAMA", + "aircond", + "aircond_ama.py", + 3, + "--branching-factors '3 3' --bundles-per-rank=0 --max-iterations=100 " + "--default-rho=1 --lagrangian --xhatshuffle " + "--solver-name={}".format(solver_name), +) + +do_one( + "aircond", + "aircond_seqsampling.py", + 1, + f"--branching-factors '3 2' --seed 1134 --solver-name={solver_name} " + "--BM-h 2 --BM-q 1.3 --confidence-level 0.95 --BM-vs-BPL BM", +) +do_one( + "aircond", + "aircond_seqsampling.py", + 1, + f"--branching-factors '3 2' --seed 1134 --solver-name={solver_name} " + "--BPL-c0 25 --BPL-eps 100 --confidence-level 0.95 --BM-vs-BPL BPL", +) + +# =========MMW TESTS========== # do_one_mmw is special -do_one_mmw("farmer", f"python farmer_ef.py 3 3 {solver_name}", "farmer_cyl_nonants.npy", f"--MMW-num-batches=5 --confidence-level 0.95 --MMW-batch-size=10 --start-scen 4 --EF-solver-name={solver_name}") +do_one_mmw( + "farmer", + f"python farmer_ef.py 3 3 {solver_name}", + "farmer_cyl_nonants.npy", + f"--MMW-num-batches=5 --confidence-level 0.95 --MMW-batch-size=10 --start-scen 4 --EF-solver-name={solver_name}", +) -#============================ +# ============================ if egret_avail(): do_one("acopf3", "ccopf2wood.py", 2, f"2 3 2 0 {solver_name}") @@ -356,81 +480,116 @@ def do_one_mmw(dirname, runefstring, npyfile, mmwargstring): # sizes kills the github tests using xpress # so we use linearized proximal terms -do_one("sizes", - "special_cylinders.py", - 3, - "--lagrangian --xhatshuffle " - "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " - "--iter0-mipgap=0.01 --iterk-mipgap=0.001 --linearize-proximal-terms " - "--default-rho=1 --solver-name={} --display-progress".format(solver_name)) +do_one( + "sizes", + "special_cylinders.py", + 3, + "--lagrangian --xhatshuffle " + "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " + "--iter0-mipgap=0.01 --iterk-mipgap=0.001 --linearize-proximal-terms " + "--default-rho=1 --solver-name={} --display-progress".format(solver_name), +) if not nouc and egret_avail(): print("\nSlow runs ahead...\n") # 3-scenario UC - do_one("uc", "uc_ef.py", 1, solver_name+" 3") - - do_one("uc", "gradient_uc_cylinders.py", 15, - "--bundles-per-rank=0 --max-iterations=100 --default-rho=1 " - "--xhatshuffle --ph-ob --num-scens=5 --max-solver-threads=2 " - "--lagrangian-iter0-mipgap=1e-7 --ph-mipgaps-json=phmipgaps.json " - f"--solver-name={solver_name} --xhatpath uc_cyl_nonants.npy " - "--rel-gap 0.00001 --abs-gap=1 --intra-hub-conv-thresh=-1 " - "--grad-rho-setter --grad-order-stat 0.5 " - "--grad-dynamic-primal-crit") - - do_one("uc", "uc_cylinders.py", 4, - "--bundles-per-rank=0 --max-iterations=2 " - "--default-rho=1 --num-scens=3 --max-solver-threads=2 " - "--lagrangian-iter0-mipgap=1e-7 --fwph " - " --lagrangian --xhatshuffle " - "--ph-mipgaps-json=phmipgaps.json " - "--solver-name={}".format(solver_name)) - do_one("uc", "uc_lshaped.py", 2, - "--bundles-per-rank=0 --max-iterations=5 " - "--default-rho=1 --num-scens=3 --xhatlshaped " - "--solver-name={} --max-solver-threads=1".format(solver_name)) - do_one("uc", "uc_cylinders.py", 3, - "--run-aph --bundles-per-rank=0 --max-iterations=2 " - "--default-rho=1 --num-scens=3 --max-solver-threads=2 " - "--lagrangian-iter0-mipgap=1e-7 --lagrangian --xhatshuffle " - "--ph-mipgaps-json=phmipgaps.json " - "--solver-name={}".format(solver_name)) + do_one("uc", "uc_ef.py", 1, solver_name + " 3") + + do_one( + "uc", + "gradient_uc_cylinders.py", + 15, + "--bundles-per-rank=0 --max-iterations=100 --default-rho=1 " + "--xhatshuffle --ph-ob --num-scens=5 --max-solver-threads=2 " + "--lagrangian-iter0-mipgap=1e-7 --ph-mipgaps-json=phmipgaps.json " + f"--solver-name={solver_name} --xhatpath uc_cyl_nonants.npy " + "--rel-gap 0.00001 --abs-gap=1 --intra-hub-conv-thresh=-1 " + "--grad-rho-setter --grad-order-stat 0.5 " + "--grad-dynamic-primal-crit", + ) + + do_one( + "uc", + "uc_cylinders.py", + 4, + "--bundles-per-rank=0 --max-iterations=2 " + "--default-rho=1 --num-scens=3 --max-solver-threads=2 " + "--lagrangian-iter0-mipgap=1e-7 --fwph " + " --lagrangian --xhatshuffle " + "--ph-mipgaps-json=phmipgaps.json " + "--solver-name={}".format(solver_name), + ) + do_one( + "uc", + "uc_lshaped.py", + 2, + "--bundles-per-rank=0 --max-iterations=5 " + "--default-rho=1 --num-scens=3 --xhatlshaped " + "--solver-name={} --max-solver-threads=1".format(solver_name), + ) + do_one( + "uc", + "uc_cylinders.py", + 3, + "--run-aph --bundles-per-rank=0 --max-iterations=2 " + "--default-rho=1 --num-scens=3 --max-solver-threads=2 " + "--lagrangian-iter0-mipgap=1e-7 --lagrangian --xhatshuffle " + "--ph-mipgaps-json=phmipgaps.json " + "--solver-name={}".format(solver_name), + ) # as of May 2022, this one works well, but outputs some crazy messages - do_one("uc", "uc_ama.py", 3, - "--bundles-per-rank=0 --max-iterations=2 " - "--default-rho=1 --num-scens=3 " - "--fixer-tol=1e-2 --lagranger --xhatshuffle " - "--solver-name={}".format(solver_name)) + do_one( + "uc", + "uc_ama.py", + 3, + "--bundles-per-rank=0 --max-iterations=2 " + "--default-rho=1 --num-scens=3 " + "--fixer-tol=1e-2 --lagranger --xhatshuffle " + "--solver-name={}".format(solver_name), + ) # 10-scenario UC - time_one("UC_cylinder10scen", "uc", "uc_cylinders.py", 3, - "--bundles-per-rank=5 --max-iterations=2 " - "--default-rho=1 --num-scens=10 --max-solver-threads=2 " - "--lagrangian-iter0-mipgap=1e-7 " - "--ph-mipgaps-json=phmipgaps.json " - "--lagrangian --xhatshuffle " - "--solver-name={}".format(solver_name)) + time_one( + "UC_cylinder10scen", + "uc", + "uc_cylinders.py", + 3, + "--bundles-per-rank=5 --max-iterations=2 " + "--default-rho=1 --num-scens=10 --max-solver-threads=2 " + "--lagrangian-iter0-mipgap=1e-7 " + "--ph-mipgaps-json=phmipgaps.json " + "--lagrangian --xhatshuffle " + "--solver-name={}".format(solver_name), + ) # note that fwph takes a long time to do one iteration - do_one("uc", "uc_cylinders.py", 4, - "--bundles-per-rank=5 --max-iterations=2 " - "--default-rho=1 --num-scens=10 --max-solver-threads=2 " - " --lagrangian --xhatshuffle --fwph " - "--lagrangian-iter0-mipgap=1e-7 " - "--ph-mipgaps-json=phmipgaps.json " - "--solver-name={}".format(solver_name)) - do_one("uc", "uc_cylinders.py", 5, - "--bundles-per-rank=5 --max-iterations=2 " - "--default-rho=1 --num-scens=10 --max-solver-threads=2 " - " --lagrangian --xhatshuffle --fwph " - "--lagrangian-iter0-mipgap=1e-7 --cross-scenario-cuts " - "--ph-mipgaps-json=phmipgaps.json --cross-scenario-iter-cnt=4 " - "--solver-name={}".format(solver_name)) + do_one( + "uc", + "uc_cylinders.py", + 4, + "--bundles-per-rank=5 --max-iterations=2 " + "--default-rho=1 --num-scens=10 --max-solver-threads=2 " + " --lagrangian --xhatshuffle --fwph " + "--lagrangian-iter0-mipgap=1e-7 " + "--ph-mipgaps-json=phmipgaps.json " + "--solver-name={}".format(solver_name), + ) + do_one( + "uc", + "uc_cylinders.py", + 5, + "--bundles-per-rank=5 --max-iterations=2 " + "--default-rho=1 --num-scens=10 --max-solver-threads=2 " + " --lagrangian --xhatshuffle --fwph " + "--lagrangian-iter0-mipgap=1e-7 --cross-scenario-cuts " + "--ph-mipgaps-json=phmipgaps.json --cross-scenario-iter-cnt=4 " + "--solver-name={}".format(solver_name), + ) # this one takes a long time, so I moved it into the uc section do_one("sizes", "sizes_demo.py", 1, " {}".format(solver_name)) if len(badguys) > 0: print("\nBad Guys:") - for i,v in badguys.items(): + for i, v in badguys.items(): print("Directory={}".format(i)) for c in v: print(" {}".format(c)) diff --git a/examples/sizes/config/rhosetter.py b/examples/sizes/config/rhosetter.py index 4d2cc810b..fb0eaa8bf 100644 --- a/examples/sizes/config/rhosetter.py +++ b/examples/sizes/config/rhosetter.py @@ -10,34 +10,36 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ -#@pyomobook: +# @pyomobook: def ph_rhosetter_callback(ph, scenario_tree, scenario): - - MyRhoFactor = 1.0 + MyRhoFactor = 1.0 - root_node = scenario_tree.findRootNode() + root_node = scenario_tree.findRootNode() - si = scenario._instance - sm = si._ScenarioTreeSymbolMap + si = scenario._instance + sm = si._ScenarioTreeSymbolMap - for i in si.ProductSizes: + for i in si.ProductSizes: + ph.setRhoOneScenario( + root_node, + scenario, + sm.getSymbol(si.NumProducedFirstStage[i]), + si.UnitProductionCosts[i] * MyRhoFactor * 0.001, + ) - ph.setRhoOneScenario( - root_node, - scenario, - sm.getSymbol(si.NumProducedFirstStage[i]), - si.UnitProductionCosts[i] * MyRhoFactor * 0.001) + for j in si.ProductSizes: + if j <= i: + ph.setRhoOneScenario( + root_node, + scenario, + sm.getSymbol(si.NumUnitsCutFirstStage[i, j]), + si.UnitReductionCost * MyRhoFactor * 0.001, + ) - for j in si.ProductSizes: - if j <= i: - ph.setRhoOneScenario( - root_node, - scenario, - sm.getSymbol(si.NumUnitsCutFirstStage[i,j]), - si.UnitReductionCost * MyRhoFactor * 0.001) -#@:pyomobook + +# @:pyomobook diff --git a/examples/sizes/sizes.py b/examples/sizes/sizes.py index 538d97ddb..b420db1a7 100644 --- a/examples/sizes/sizes.py +++ b/examples/sizes/sizes.py @@ -10,6 +10,7 @@ import models.ReferenceModel as ref import mpisppy.utils.sputils as sputils + def scenario_creator(scenario_name, scenario_count=None): if scenario_count not in (3, 10): raise RuntimeError( @@ -35,35 +36,44 @@ def scenario_creator(scenario_name, scenario_count=None): def scenario_denouement(rank, scenario_name, scenario): pass + ########## helper functions ######## -#========= -def scenario_names_creator(num_scens,start=None): + +# ========= +def scenario_names_creator(num_scens, start=None): # (only for Amalgamator): return the full list of num_scens scenario names # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"Scenario{i+1}" for i in range(start,start+num_scens)] + if start is None: + start = 0 + return [f"Scenario{i+1}" for i in range(start, start + num_scens)] -#========= +# ========= def inparser_adder(cfg): # add options unique to sizes cfg.num_scens_required() cfg.mip_options() -#========= +# ========= def kw_creator(cfg): # (for Amalgamator): linked to the scenario_creator and inparser_adder if cfg.num_scens not in (3, 10): - raise RuntimeError(f"num_scen must the 3 or 10; was {cfg.num_scen}") + raise RuntimeError(f"num_scen must the 3 or 10; was {cfg.num_scen}") kwargs = {"scenario_count": cfg.num_scens} return kwargs -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - """ Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. + +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + """Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. (this function supports zhat and confidence interval code) Args: sname (string): scenario name to be created @@ -82,6 +92,7 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, sca["num_scens"] = sample_branching_factors[0] # two-stage problem return scenario_creator(sname, **sca) + ######## end helper functions ######### ########## a customized rho setter ############# @@ -91,8 +102,9 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, # a gradient based rho setter. # note that _rho_setter is a reserved name.... + def _rho_setter(scen, **kwargs): - """ rho values for the scenario. + """rho values for the scenario. Args: scen (pyo.ConcreteModel): the scenario Returns: @@ -103,7 +115,7 @@ def _rho_setter(scen, **kwargs): if "RF" in kwargs and isinstance(kwargs["RF"], float): RF = kwargs["RF"] - + cutrho = scen.UnitReductionCost * RF for i in scen.ProductSizes: @@ -120,7 +132,7 @@ def _rho_setter(scen, **kwargs): def id_fix_list_fct(s): - """ specify tuples used by the fixer. + """specify tuples used by the fixer. Args: s (ConcreteModel): the sizes instance. diff --git a/examples/sizes/sizes_cylinders.py b/examples/sizes/sizes_cylinders.py index 293480c96..b2da7c62b 100644 --- a/examples/sizes/sizes_cylinders.py +++ b/examples/sizes/sizes_cylinders.py @@ -13,9 +13,10 @@ from mpisppy.extensions.fixer import Fixer import mpisppy.utils.cfg_vanilla as vanilla + def _parse_args(): cfg = config.Config() - + cfg.popular_args() cfg.num_scens_required() # but not positional: you need --num-scens cfg.ph_args() @@ -31,8 +32,8 @@ def _parse_args(): cfg.parse_command_line("sizes_cylinders") return cfg + def main(): - cfg = _parse_args() num_scen = cfg.num_scens @@ -48,25 +49,27 @@ def main(): if num_scen not in (3, 10): raise RuntimeError(f"num_scen must the 3 or 10; was {num_scen}") - + scenario_creator_kwargs = {"scenario_count": num_scen} scenario_creator = sizes.scenario_creator scenario_denouement = sizes.scenario_denouement all_scenario_names = [f"Scenario{i+1}" for i in range(num_scen)] rho_setter = sizes._rho_setter - + if fixer: ph_ext = Fixer else: ph_ext = None # Things needed for vanilla cylinders - beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) + beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=ph_ext, - rho_setter = rho_setter) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=ph_ext, + rho_setter=rho_setter, + ) if reduced_costs: vanilla.add_reduced_costs_fixer(hub_dict, cfg) @@ -79,11 +82,13 @@ def main(): } if cfg.default_rho is None: # since we are using a rho_setter anyway - hub_dict.opt_kwargs.options["defaultPHrho"] = 1 - + hub_dict.opt_kwargs.options["defaultPHrho"] = 1 + # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if lagrangian: @@ -106,7 +111,7 @@ def main(): *beans, scenario_creator_kwargs=scenario_creator_kwargs, ) - + # xhat using xbar bound spoke if xhatxbar: xhatxbar_spoke = vanilla.xhatxbar_spoke( @@ -115,10 +120,12 @@ def main(): ) if reduced_costs: - reduced_costs_spoke = vanilla.reduced_costs_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) - + reduced_costs_spoke = vanilla.reduced_costs_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) + list_of_spoke_dict = list() if fwph: list_of_spoke_dict.append(fw_spoke) diff --git a/examples/sizes/sizes_demo.py b/examples/sizes/sizes_demo.py index 972c271bb..3aaab11e9 100644 --- a/examples/sizes/sizes_demo.py +++ b/examples/sizes/sizes_demo.py @@ -18,15 +18,11 @@ from mpisppy.extensions.mipgapper import Gapper from mpisppy.extensions.avgminmaxer import MinMaxAvg from mpisppy.extensions.wtracker_extension import Wtracker_extension -from sizes import scenario_creator, \ - scenario_denouement, \ - _rho_setter, \ - id_fix_list_fct +from sizes import scenario_creator, scenario_denouement, _rho_setter, id_fix_list_fct ScenCount = 10 # 3 or 10 if __name__ == "__main__": - if len(sys.argv) != 2: print("usage: python sizes_demo.py solver_name") quit() @@ -45,19 +41,21 @@ options["iter0_solver_options"] = {"mipgap": 0.01} # another way options["iterk_solver_options"] = {"mipgap": 0.005} - options["xhat_looper_options"] = {"xhat_solver_options":\ - options["iterk_solver_options"], - "scen_limit": 3, - "dump_prefix": "delme", - "csvname": "looper.csv"} - options["xhat_closest_options"] = {"xhat_solver_options":\ - options["iterk_solver_options"], - "csvname": "closest.csv"} - options["xhat_specific_options"] = {"xhat_solver_options": - options["iterk_solver_options"], - "xhat_scenario_dict": \ - {"ROOT": "Scenario3"}, - "csvname": "specific.csv"} + options["xhat_looper_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "scen_limit": 3, + "dump_prefix": "delme", + "csvname": "looper.csv", + } + options["xhat_closest_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "csvname": "closest.csv", + } + options["xhat_specific_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "xhat_scenario_dict": {"ROOT": "Scenario3"}, + "csvname": "specific.csv", + } fixoptions = {} fixoptions["verbose"] = True fixoptions["boundtol"] = 0.01 @@ -65,19 +63,15 @@ options["fixeroptions"] = fixoptions - options["gapperoptions"] = {"verbose": True, - "mipgapdict": {0: 0.01, - 1: 0.009, - 5: 0.005, - 10: 0.001}} - options["wtracker_options"] ={"wlen": 4, - "reportlen": 6, - "stdevthresh": 0.1} - + options["gapperoptions"] = { + "verbose": True, + "mipgapdict": {0: 0.01, 1: 0.009, 5: 0.005, 10: 0.001}, + } + options["wtracker_options"] = {"wlen": 4, "reportlen": 6, "stdevthresh": 0.1} all_scenario_names = list() for sn in range(ScenCount): - all_scenario_names.append("Scenario"+str(sn+1)) + all_scenario_names.append("Scenario" + str(sn + 1)) # end hardwire ######### EF ######## @@ -88,12 +82,12 @@ scenario_creator, scenario_creator_kwargs={"scenario_count": ScenCount}, ) - if 'persistent' in options["solver_name"]: + if "persistent" in options["solver_name"]: solver.set_instance(ef, symbolic_solver_labels=True) solver.options["mipgap"] = 0.01 results = solver.solve(ef, tee=options["verbose"]) - print('EF objective value:', pyo.value(ef.EF_Obj)) - #mpisppy.utils.sputils.ef_nonants_csv(ef, "vardump.csv") + print("EF objective value:", pyo.value(ef.EF_Obj)) + # mpisppy.utils.sputils.ef_nonants_csv(ef, "vardump.csv") #### first PH #### #####multi_ext = {"ext_classes": [Fixer, Gapper, XhatLooper, XhatClosest]} @@ -104,20 +98,20 @@ scenario_creator, scenario_denouement, scenario_creator_kwargs={"scenario_count": ScenCount}, - rho_setter=_rho_setter, + rho_setter=_rho_setter, extensions=MultiExtension, extension_kwargs=multi_ext, ) - + conv, obj, tbound = ph.ph_main() if ph.cylinder_rank == 0: - print ("Trival bound =",tbound) + print("Trival bound =", tbound) - #print("Quitting early.") - #quit() + # print("Quitting early.") + # quit() ############ test W and xbar writers and special joint reader ############ - + newph = mpisppy.opt.ph.PH( options, all_scenario_names, @@ -126,12 +120,11 @@ scenario_creator_kwargs={"scenario_count": ScenCount}, ) - options["W_and_xbar_writer"] = {"Wcsvdir": "Wdir", - "xbarcsvdir": "xbardir"} + options["W_and_xbar_writer"] = {"Wcsvdir": "Wdir", "xbarcsvdir": "xbardir"} conv, obj, tbound = newph.ph_main() ##### - + newph = mpisppy.opt.ph.PH( options, all_scenario_names, @@ -140,15 +133,15 @@ scenario_creator_kwargs={"scenario_count": ScenCount}, ) - options["W_and_xbar_reader"] = {"Wcsvdir": "Wdir", - "xbarcsvdir": "xbardir"} + options["W_and_xbar_reader"] = {"Wcsvdir": "Wdir", "xbarcsvdir": "xbardir"} conv, obj, tbound = newph.ph_main() quit() ############################# test xhatspecific ############### from mpisppy.xhatspecific import XhatSpecific - print ("... testing xhat specific....") + + print("... testing xhat specific....") newph = mpisppy.opt.ph.PH( options, all_scenario_names, @@ -157,14 +150,13 @@ scenario_creator_kwargs={"scenario_count": ScenCount}, ) - options["xhat_specific_options"] = {"xhat_solver_options": - options["iterk_solver_options"], - "xhat_scenario_dict": \ - {"ROOT": "Scenario3"}, - "csvname": "specific.csv"} + options["xhat_specific_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "xhat_scenario_dict": {"ROOT": "Scenario3"}, + "csvname": "specific.csv", + } - conv = newph.ph_main(rho_setter=_rho_setter, - extensions=XhatSpecific) + conv = newph.ph_main(rho_setter=_rho_setter, extensions=XhatSpecific) ######### bundles ######### options["bundles_per_rank"] = 2 @@ -177,7 +169,7 @@ scenario_denouement, scenario_creator_kwargs={"scenario_count": ScenCount}, ) - + conv = ph.ph_main(rho_setter=_rho_setter) ### avg, min, max extension ##### @@ -193,8 +185,5 @@ ) ph.options["PHIterLimit"] = 3 - options["avgminmax_name"] = "FirstStageCost" + options["avgminmax_name"] = "FirstStageCost" conv, obj, bnd = ph.ph_main() - - - diff --git a/examples/sizes/sizes_ef.py b/examples/sizes/sizes_ef.py index bc403d677..aa2b294d5 100644 --- a/examples/sizes/sizes_ef.py +++ b/examples/sizes/sizes_ef.py @@ -18,15 +18,15 @@ all_scenario_names = list() for sn in range(ScenCount): - all_scenario_names.append("Scenario"+str(sn+1)) + all_scenario_names.append("Scenario" + str(sn + 1)) ef = mpisppy.utils.sputils.create_EF( all_scenario_names, scenario_creator, scenario_creator_kwargs={"scenario_count": ScenCount}, ) -if 'persistent' in solver.name: +if "persistent" in solver.name: solver.set_instance(ef, symbolic_solver_labels=True) solver.options["mipgap"] = 0.0001 results = solver.solve(ef, tee=True) -print('EF objective value:', pyo.value(ef.EF_Obj)) +print("EF objective value:", pyo.value(ef.EF_Obj)) diff --git a/examples/sizes/sizes_ph.py b/examples/sizes/sizes_ph.py index 8068bc377..1ac98963d 100644 --- a/examples/sizes/sizes_ph.py +++ b/examples/sizes/sizes_ph.py @@ -14,15 +14,14 @@ import mpisppy.opt.ph from mpisppy.convergers.primal_dual_converger import PrimalDualConverger from mpisppy.extensions.xhatclosest import XhatClosest -from sizes import scenario_creator, \ - scenario_denouement, \ - _rho_setter +from sizes import scenario_creator, scenario_denouement, _rho_setter if __name__ == "__main__": - if len(sys.argv) != 8: - print("usage: python sizes_ph.py scenNum maxiter rho smooth_type pvalue_or_pratio beta proxlin") + print( + "usage: python sizes_ph.py scenNum maxiter rho smooth_type pvalue_or_pratio beta proxlin" + ) print("eg: python sizes_ph.py 3 100 1 0 0.0 1.0 1") quit() ScenCount = int(sys.argv[1]) # 3 or 10 @@ -41,9 +40,12 @@ options["iter0_solver_options"] = {"mipgap": 0.01, "threads": 1} # another way options["iterk_solver_options"] = {"mipgap": 0.005, "threads": 1} - - options["xhat_closest_options"] = {"xhat_solver_options": options["iterk_solver_options"], "keep_solution":True} - options["primal_dual_converger_options"] = {"tol" : 1e-2} + + options["xhat_closest_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "keep_solution": True, + } + options["primal_dual_converger_options"] = {"tol": 1e-2} options["smoothed"] = int(sys.argv[4]) options["defaultPHp"] = float(sys.argv[5]) @@ -53,8 +55,7 @@ all_scenario_names = list() for sn in range(ScenCount): - all_scenario_names.append("Scenario"+str(sn+1)) - + all_scenario_names.append("Scenario" + str(sn + 1)) ph = mpisppy.opt.ph.PH( options, @@ -62,21 +63,20 @@ scenario_creator, scenario_denouement, scenario_creator_kwargs={"scenario_count": ScenCount}, - rho_setter=_rho_setter, - ph_converger = PrimalDualConverger, + rho_setter=_rho_setter, + ph_converger=PrimalDualConverger, extensions=XhatClosest, ) - + ph.ph_main() variables = ph.gather_var_values_to_rank0() - for (scenario_name, variable_name) in variables: + for scenario_name, variable_name in variables: variable_value = variables[scenario_name, variable_name] print(scenario_name, variable_name, variable_value) if ph.tree_solution_available: - print(f"Final objective from XhatClosest: {ph.extobject._final_xhat_closest_obj}") + print( + f"Final objective from XhatClosest: {ph.extobject._final_xhat_closest_obj}" + ) else: print(f"Final objective from XhatClosest: {float('inf')}") - - - diff --git a/examples/sizes/sizes_pysp.py b/examples/sizes/sizes_pysp.py index 59eec904b..876244dff 100644 --- a/examples/sizes/sizes_pysp.py +++ b/examples/sizes/sizes_pysp.py @@ -12,13 +12,17 @@ from mpisppy.utils.pysp_model import PySPModel from mpisppy.opt.ef import ExtensiveForm + def _print_usage(): - print('Usage: "sizes_pysp.py num_scen solver" where num_scen is 3 or 10 and solver is a pyomo solver name') + print( + 'Usage: "sizes_pysp.py num_scen solver" where num_scen is 3 or 10 and solver is a pyomo solver name' + ) + if len(sys.argv) < 3: _print_usage() sys.exit() -elif int(sys.argv[1]) not in [3,10]: +elif int(sys.argv[1]) not in [3, 10]: _print_usage() sys.exit() @@ -35,15 +39,18 @@ def _print_usage(): num_scen = int(sys.argv[1]) solver = sys.argv[2] -sizes = PySPModel(model='./models/ReferenceModel.py', - scenario_tree=f'./SIZES{num_scen}/ScenarioStructure.dat', - ) +sizes = PySPModel( + model="./models/ReferenceModel.py", + scenario_tree=f"./SIZES{num_scen}/ScenarioStructure.dat", +) -ef = ExtensiveForm(options={'solver':solver}, - all_scenario_names=sizes.all_scenario_names, - scenario_creator=sizes.scenario_creator, - model_name='sizes_EF') +ef = ExtensiveForm( + options={"solver": solver}, + all_scenario_names=sizes.all_scenario_names, + scenario_creator=sizes.scenario_creator, + model_name="sizes_EF", +) ef.solve_extensive_form(tee=True) diff --git a/examples/sizes/special_cylinders.py b/examples/sizes/special_cylinders.py index 6efc51e5b..524e03d32 100644 --- a/examples/sizes/special_cylinders.py +++ b/examples/sizes/special_cylinders.py @@ -14,6 +14,7 @@ from mpisppy.utils import config import mpisppy.utils.cfg_vanilla as vanilla + def _parse_args(): cfg = config.Config() @@ -33,7 +34,6 @@ def _parse_args(): def main(): - cfg = _parse_args() num_scen = cfg.num_scens @@ -63,11 +63,13 @@ def main(): # Things needed for vanilla cylinders beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=ph_ext, - rho_setter = rho_setter, - variable_probability = variable_probability) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=ph_ext, + rho_setter=rho_setter, + variable_probability=variable_probability, + ) if fixer: hub_dict["opt_kwargs"]["options"]["fixeroptions"] = { @@ -81,21 +83,29 @@ def main(): # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # xhat looper bound spoke if xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) list_of_spoke_dict = list() if fwph: diff --git a/examples/sizes/special_sizes.py b/examples/sizes/special_sizes.py index 8bacb2d91..f8eee56c2 100644 --- a/examples/sizes/special_sizes.py +++ b/examples/sizes/special_sizes.py @@ -11,6 +11,7 @@ import models.ReferenceModel as ref import mpisppy.utils.sputils as sputils + def scenario_creator(scenario_name, scenario_count=None): if scenario_count not in (3, 10): raise ValueError( @@ -42,7 +43,7 @@ def scenario_denouement(rank, scenario_name, scenario): def _rho_setter(scen): - """ rho values for the scenario. + """rho values for the scenario. Args: scen (pyo.ConcreteModel): the scenario Returns: @@ -64,8 +65,9 @@ def _rho_setter(scen): return retlist + def _variable_probability(scen): - """ per variable probabilities + """per variable probabilities Args: scen (pyo.ConcreteModel): the scenario Returns: @@ -80,13 +82,13 @@ def _variable_probability(scen): if scen._PySP_scenario_name == "Scenario1": retlist.append((idv, 0)) else: - retlist.append((idv, 1/(scencnt-1))) - + retlist.append((idv, 1 / (scencnt - 1))) + return retlist def id_fix_list_fct(s): - """ specify tuples used by the fixer. + """specify tuples used by the fixer. Args: s (ConcreteModel): the sizes instance. diff --git a/examples/sslp/sslp.py b/examples/sslp/sslp.py index 1daa836d0..a3e3e243c 100644 --- a/examples/sslp/sslp.py +++ b/examples/sslp/sslp.py @@ -25,11 +25,11 @@ def scenario_creator(scenario_name, data_dir=None): - """ The callback needs to create an instance and then attach - the PySP nodes to it in a list _mpisppy_node_list ordered by stages. - Optionally attach _PHrho. + """The callback needs to create an instance and then attach + the PySP nodes to it in a list _mpisppy_node_list ordered by stages. + Optionally attach _PHrho. """ - if data_dir is None: + if data_dir is None: raise ValueError("kwarg `data_dir` is required for SSLP scenario_creator") fname = data_dir + os.sep + scenario_name + ".dat" model = ref.model.create_instance(fname, name=scenario_name) @@ -41,7 +41,7 @@ def scenario_creator(scenario_name, data_dir=None): ) ] model._mpisppy_probability = "uniform" - + return model @@ -51,31 +51,36 @@ def scenario_denouement(rank, scenario_name, scenario): ########## helper functions ######## -#========= -def scenario_names_creator(num_scens,start=None): + +# ========= +def scenario_names_creator(num_scens, start=None): # one-based scenarios # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=1 - return [f"Scenario{i}" for i in range(start,start+num_scens)] + if start is None: + start = 1 + return [f"Scenario{i}" for i in range(start, start + num_scens)] -#========= +# ========= def inparser_adder(cfg): # add options unique to sizes # we don't want num_scens from the command line cfg.mip_options() - cfg.add_to_config("instance_name", - description="sslp instance name (e.g., sslp_15_45_10)", - domain=str, - default=None) - cfg.add_to_config("sslp_data_path", - description="path to sslp data (e.g., ./data)", - domain=str, - default=None) + cfg.add_to_config( + "instance_name", + description="sslp instance name (e.g., sslp_15_45_10)", + domain=str, + default=None, + ) + cfg.add_to_config( + "sslp_data_path", + description="path to sslp data (e.g., ./data)", + domain=str, + default=None, + ) -#========= +# ========= def kw_creator(cfg): # linked to the scenario_creator and inparser_adder # side-effect is dealing with num_scens @@ -83,19 +88,27 @@ def kw_creator(cfg): ns = int(inst.split("_")[-1]) if hasattr(cfg, "num_scens"): if cfg.num_scens != ns: - raise RuntimeError(f"Argument num-scens={cfg.num_scens} does not match the number " - "implied by instance name={ns} " - "\n(--num-scens is not needed for sslp)") + raise RuntimeError( + f"Argument num-scens={cfg.num_scens} does not match the number " + "implied by instance name={ns} " + "\n(--num-scens is not needed for sslp)" + ) else: - cfg.add_and_assign("num_scens","number of scenarios", int, None, ns) + cfg.add_and_assign("num_scens", "number of scenarios", int, None, ns) data_dir = os.path.join(cfg.sslp_data_path, inst, "scenariodata") kwargs = {"data_dir": data_dir} return kwargs -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - """ Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + """Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. (this function supports zhat and confidence interval code) Args: sname (string): scenario name to be created @@ -114,23 +127,25 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, sca["num_scens"] = sample_branching_factors[0] # two-stage problem return scenario_creator(sname, **sca) + ######## end helper functions ######### + # special helper function def id_fix_list_fct(s): - """ specify tuples used by the classic (non-RC-based) fixer. - - Args: - s (ConcreteModel): the sizes instance. - Returns: - i0, ik (tuples): one for iter 0 and other for general iterations. - Var id, threshold, nb, lb, ub - The threshold is on the square root of the xbar squared differnce - nb, lb an bu an "no bound", "upper" and "lower" and give the numver - of iterations or None for ik and for i0 anything other than None - or None. In both cases, None indicates don't fix. - Note: - This is just here to provide an illustration, we don't run long enough. + """specify tuples used by the classic (non-RC-based) fixer. + + Args: + s (ConcreteModel): the sizes instance. + Returns: + i0, ik (tuples): one for iter 0 and other for general iterations. + Var id, threshold, nb, lb, ub + The threshold is on the square root of the xbar squared differnce + nb, lb an bu an "no bound", "upper" and "lower" and give the numver + of iterations or None for ik and for i0 anything other than None + or None. In both cases, None indicates don't fix. + Note: + This is just here to provide an illustration, we don't run long enough. """ # iter0tuples = [ @@ -189,10 +204,10 @@ def id_fix_list_fct(s): options["display_timing"] = False options["display_progress"] = True - options["primal_dual_converger_options"] = {"tol" : 1e-6} + options["primal_dual_converger_options"] = {"tol": 1e-6} options["display_convergence_detail"] = True options["smoothed"] = True - options["defaultPHp"] = .5 + options["defaultPHp"] = 0.5 options["defaultPHbeta"] = 0.1 ### async section ### @@ -231,7 +246,7 @@ def id_fix_list_fct(s): scenario_creator, scenario_denouement, scenario_creator_kwargs={"data_dir": data_dir}, - ph_converger = PrimalDualConverger + ph_converger=PrimalDualConverger, ) if ph.cylinder_rank == 0: diff --git a/examples/sslp/sslp_cylinders.py b/examples/sslp/sslp_cylinders.py index 638004240..52027c728 100644 --- a/examples/sslp/sslp_cylinders.py +++ b/examples/sslp/sslp_cylinders.py @@ -13,16 +13,19 @@ from mpisppy.utils import config import mpisppy.utils.cfg_vanilla as vanilla + def _parse_args(): cfg = config.Config() cfg.popular_args() - cfg.num_scens_optional() + cfg.num_scens_optional() cfg.ph_args() - cfg.add_to_config("instance_name", - description="sslp instance name (e.g., sslp_15_45_10)", - domain=str, - default=None, - argparse_args = {"required": True}) + cfg.add_to_config( + "instance_name", + description="sslp instance name (e.g., sslp_15_45_10)", + domain=str, + default=None, + argparse_args={"required": True}, + ) cfg.two_sided_args() cfg.fixer_args() @@ -42,9 +45,11 @@ def main(): inst = cfg.instance_name num_scen = int(inst.split("_")[-1]) if cfg.num_scens is not None and cfg.num_scens != num_scen: - raise RuntimeError("Argument num-scens={} does not match the number " - "implied by instance name={} " - "\n(--num-scens is not needed for sslp)") + raise RuntimeError( + "Argument num-scens={} does not match the number " + "implied by instance name={} " + "\n(--num-scens is not needed for sslp)" + ) fwph = cfg.fwph fixer = cfg.fixer @@ -58,9 +63,11 @@ def main(): if cfg.default_rho is None: raise RuntimeError("The --default-rho option must be specified") - scenario_creator_kwargs = {"data_dir": f"{sslp.__file__[:-8]}/data/{inst}/scenariodata"} + scenario_creator_kwargs = { + "data_dir": f"{sslp.__file__[:-8]}/data/{inst}/scenariodata" + } scenario_creator = sslp.scenario_creator - scenario_denouement = sslp.scenario_denouement + scenario_denouement = sslp.scenario_denouement all_scenario_names = [f"Scenario{i+1}" for i in range(num_scen)] if fixer: @@ -72,10 +79,12 @@ def main(): beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=ph_ext, - rho_setter = None) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=ph_ext, + rho_setter=None, + ) if fixer: hub_dict["opt_kwargs"]["options"]["fixeroptions"] = { @@ -83,38 +92,44 @@ def main(): "boundtol": fixer_tol, "id_fix_list_fct": sslp.id_fix_list_fct, } - + if reduced_costs: vanilla.add_reduced_costs_fixer(hub_dict, cfg) # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) if subgradient: - subgradient_spoke = vanilla.subgradient_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) - + subgradient_spoke = vanilla.subgradient_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) + # xhat looper bound spoke if xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + # reduced costs spoke if reduced_costs: - reduced_costs_spoke = vanilla.reduced_costs_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) - + reduced_costs_spoke = vanilla.reduced_costs_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) + list_of_spoke_dict = list() if fwph: list_of_spoke_dict.append(fw_spoke) diff --git a/examples/sslp/sslp_ef.py b/examples/sslp/sslp_ef.py index 10f536d35..ca1652f00 100644 --- a/examples/sslp/sslp_ef.py +++ b/examples/sslp/sslp_ef.py @@ -6,14 +6,15 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Solve the EF of the sslp problems -''' +"""Solve the EF of the sslp problems""" + from mpisppy.opt.ef import ExtensiveForm from sslp import scenario_creator import pyomo.environ as pyo import sys import os + def main(): # inst = "sslp_15_45_5" if len(sys.argv) == 1: @@ -41,5 +42,6 @@ def main(): print("Warning: Non-optimal termination condition from Pyomo") print("sslp objective value:", pyo.value(ef.ef.EF_Obj)) -if __name__=="__main__": + +if __name__ == "__main__": main() diff --git a/examples/stoch_distr/globalmodel.py b/examples/stoch_distr/globalmodel.py index e12141525..cbb468df5 100644 --- a/examples/stoch_distr/globalmodel.py +++ b/examples/stoch_distr/globalmodel.py @@ -17,18 +17,20 @@ import examples.distr.distr_data as distr_data from mpisppy.utils import config + def _parse_args(): # create a config object and parse cfg = config.Config() stoch_distr.inparser_adder(cfg) - cfg.add_to_config("solver_name", - description="which solver", - domain=str, - default=None, - argparse_args = {"required": True}, - ) + cfg.add_to_config( + "solver_name", + description="which solver", + domain=str, + default=None, + argparse_args={"required": True}, + ) cfg.parse_command_line("globalmodel") - + return cfg @@ -43,19 +45,31 @@ def global_dict_creator(num_admm_subproblems, start=0): dict: dictionary with the information to create a min cost distribution problem """ inter_region_dict = distr_data.inter_region_dict_creator(num_admm_subproblems) - global_dict={} - for i in range(start,start+num_admm_subproblems): - scenario_name=f"Region{i+1}" + global_dict = {} + for i in range(start, start + num_admm_subproblems): + scenario_name = f"Region{i+1}" region_dict = distr_data.region_dict_creator(scenario_name) for key in region_dict: - if key in ["nodes","factory nodes", "buyer nodes", "distribution center nodes", "arcs"]: + if key in [ + "nodes", + "factory nodes", + "buyer nodes", + "distribution center nodes", + "arcs", + ]: if i == start: - global_dict[key]=[] + global_dict[key] = [] for x in region_dict[key]: global_dict[key].append(x) - if key in ["supply", "production costs","revenues", "flow capacities", "flow costs"]: + if key in [ + "supply", + "production costs", + "revenues", + "flow capacities", + "flow costs", + ]: if i == start: - global_dict[key]={} + global_dict[key] = {} for key2 in region_dict[key]: global_dict[key][key2] = region_dict[key][key2] @@ -64,22 +78,29 @@ def _extract_arc(a): node_source = source[1] node_target = target[1] return (node_source, node_target) - + for a in inter_region_dict["arcs"]: global_dict["arcs"].append(_extract_arc(a)) for a in inter_region_dict["costs"]: - global_dict["flow costs"][_extract_arc(a)] = inter_region_dict["costs"][a] + global_dict["flow costs"][_extract_arc(a)] = inter_region_dict["costs"][a] for a in inter_region_dict["capacities"]: - global_dict["flow capacities"][_extract_arc(a)] = inter_region_dict["capacities"][a] + global_dict["flow capacities"][_extract_arc(a)] = inter_region_dict[ + "capacities" + ][a] return global_dict + def main(): """ - do all the work + do all the work """ cfg = _parse_args() - print("won't give the good results because there is only one scenario, and the scennum on which are based the seeds are different") - model = stoch_distr.min_cost_distr_problem(global_dict_creator(cfg.num_admm_subproblems), cfg) + print( + "won't give the good results because there is only one scenario, and the scennum on which are based the seeds are different" + ) + model = stoch_distr.min_cost_distr_problem( + global_dict_creator(cfg.num_admm_subproblems), cfg + ) solver_name = cfg.solver_name opt = pyo.SolverFactory(solver_name) @@ -92,10 +113,10 @@ def main(): count = 0 for obj in objectives: objective_value = pyo.value(obj) - count += 1 + count += 1 assert count == 1, f"only one objective function is authorized, there are {count}" print(f"Objective '{obj}' value: {objective_value}") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/stoch_distr/stoch_distr.py b/examples/stoch_distr/stoch_distr.py index bcf645a5b..a69bab6ff 100644 --- a/examples/stoch_distr/stoch_distr.py +++ b/examples/stoch_distr/stoch_distr.py @@ -22,15 +22,15 @@ def inter_arcs_adder(region_dict, inter_region_dict): - """This function adds to the region_dict the inter-region arcs + """This function adds to the region_dict the inter-region arcs Args: region_dict (dict): dictionary for the current scenario \n inter_region_dict (dict): dictionary of the inter-region relations Returns: - local_dict (dict): - This dictionary copies region_dict, completes the already existing fields of + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of region_dict to represent the inter-region arcs and their capcities and costs """ ### Note: The cost of the arc is here chosen to be split equally between the source region and target region @@ -39,22 +39,26 @@ def inter_arcs_adder(region_dict, inter_region_dict): # but also contains local information that can be linked to the other scenarios (with inter-region arcs) local_dict = region_dict for inter_arc in inter_region_dict["arcs"]: - source,target = inter_arc - region_source,node_source = source - region_target,node_target = target + source, target = inter_arc + region_source, node_source = source + region_target, node_target = target if region_source == region_dict["name"] or region_target == region_dict["name"]: arc = node_source, node_target local_dict["arcs"].append(arc) - local_dict["flow capacities"][arc] = inter_region_dict["capacities"][inter_arc] - local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc]/2 - #print(f"scenario name = {region_dict['name']} \n {local_dict=} ") + local_dict["flow capacities"][arc] = inter_region_dict["capacities"][ + inter_arc + ] + local_dict["flow costs"][arc] = inter_region_dict["costs"][inter_arc] / 2 + # print(f"scenario name = {region_dict['name']} \n {local_dict=} ") return local_dict ###Creates the model when local_dict is given, local_dict depends on the subproblem -def min_cost_distr_problem(local_dict, cfg, stoch_scenario_name, sense=pyo.minimize, max_revenue=None): - """ Create an arcs formulation of network flow for the region and stochastic scenario considered. +def min_cost_distr_problem( + local_dict, cfg, stoch_scenario_name, sense=pyo.minimize, max_revenue=None +): + """Create an arcs formulation of network flow for the region and stochastic scenario considered. Args: local_dict (dict): dictionary representing a region including the inter region arcs \n @@ -78,9 +82,11 @@ def min_cost_distr_problem(local_dict, cfg, stoch_scenario_name, sense=pyo.minim if a[1] in local_dict["nodes"]: arcsin[a[1]].append(a) - model = pyo.ConcreteModel(name='MinCostFlowArcs') - def flowBounds_rule(model, i,j): - return (0, local_dict["flow capacities"][(i,j)]) + model = pyo.ConcreteModel(name="MinCostFlowArcs") + + def flowBounds_rule(model, i, j): + return (0, local_dict["flow capacities"][(i, j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x def slackBounds_rule(model, n): @@ -93,62 +99,102 @@ def slackBounds_rule(model, n): # Should be (0,0) but to avoid infeasibility we add a negative slack variable return (None, 0) else: - return (0,0) + return (0, 0) else: raise ValueError(f"unknown node type for node {n}") - + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) def inventoryBounds_rule(model, n): - return (0,local_dict["supply"][n]) + return (0, local_dict["supply"][n]) ### We can store everything to make sure it is feasible, but it will never be interesting model.inventory = pyo.Var(local_dict["factory nodes"], bounds=inventoryBounds_rule) - model.FirstStageCost = pyo.Expression(expr=\ - sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"])) - + model.FirstStageCost = pyo.Expression( + expr=sum( + local_dict["production costs"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["factory nodes"] + ) + ) + if cfg.ensure_xhat_feas: # too big penalty to allow the stack to be non-zero - model.SecondStageCost = pyo.Expression(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) \ - + sum(2*max_revenue*(-model.y[n]) for n in local_dict["distribution center nodes"]) \ - - sum(local_dict["production costs"][n]/2*model.inventory[n] for n in local_dict["factory nodes"])) + model.SecondStageCost = pyo.Expression( + expr=sum( + local_dict["flow costs"][a] * model.flow[a] for a in local_dict["arcs"] + ) + + sum( + local_dict["revenues"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["buyer nodes"] + ) + + sum( + 2 * max_revenue * (-model.y[n]) + for n in local_dict["distribution center nodes"] + ) + - sum( + local_dict["production costs"][n] / 2 * model.inventory[n] + for n in local_dict["factory nodes"] + ) + ) else: - model.SecondStageCost = pyo.Expression(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) \ - - sum(local_dict["production costs"][n]/2*model.inventory[n] for n in local_dict["factory nodes"]) - ) - - model.MinCost = pyo.Objective(expr=model.FirstStageCost + model.SecondStageCost, sense=sense) - + model.SecondStageCost = pyo.Expression( + expr=sum( + local_dict["flow costs"][a] * model.flow[a] for a in local_dict["arcs"] + ) + + sum( + local_dict["revenues"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["buyer nodes"] + ) + - sum( + local_dict["production costs"][n] / 2 * model.inventory[n] + for n in local_dict["factory nodes"] + ) + ) + + model.MinCost = pyo.Objective( + expr=model.FirstStageCost + model.SecondStageCost, sense=sense + ) + def FlowBalance_rule(m, n): if n in local_dict["factory nodes"]: # We generate pseudo randomly the loss on each factory node node_type, region_num, count = distr_data.parse_node_name(n) node_num = distr_data._node_num(cfg.mnpr, node_type, region_num, count) - np.random.seed(node_num+cfg.initial_seed+(scennum+1)*2**20) #2**20 avoids the correlation with the scalable example data - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.inventory[n] \ - == (local_dict["supply"][n] - m.y[n]) * min(1,max(0,1-np.random.normal(cfg.spm,cfg.cv)/100)) # We add the loss + np.random.seed( + node_num + cfg.initial_seed + (scennum + 1) * 2**20 + ) # 2**20 avoids the correlation with the scalable example data + return sum(m.flow[a] for a in arcsout[n]) - sum( + m.flow[a] for a in arcsin[n] + ) + m.inventory[n] == (local_dict["supply"][n] - m.y[n]) * min( + 1, max(0, 1 - np.random.normal(cfg.spm, cfg.cv) / 100) + ) # We add the loss else: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] - model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + return ( + sum(m.flow[a] for a in arcsout[n]) + - sum(m.flow[a] for a in arcsin[n]) + + m.y[n] + == local_dict["supply"][n] + ) + + model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) return model ###Functions required in other files, which constructions are specific to the problem + ###Creates the scenario -def scenario_creator(admm_stoch_subproblem_scenario_name, inter_region_dict=None, cfg=None, data_params=None, all_nodes_dict=None): +def scenario_creator( + admm_stoch_subproblem_scenario_name, + inter_region_dict=None, + cfg=None, + data_params=None, + all_nodes_dict=None, +): """Creates the model, which should include the consensus variables. \n - However, this function shouldn't attach the consensus variables for the admm subproblems as it is done in stoch_admmWrapper. + However, this function shouldn't attach the consensus variables for the admm subproblems as it is done in stoch_admmWrapper. Therefore, only the stochastic tree as it would be represented without the decomposition needs to be created. Args: @@ -160,27 +206,40 @@ def scenario_creator(admm_stoch_subproblem_scenario_name, inter_region_dict=None Returns: Pyomo ConcreteModel: the instantiated model """ - assert (inter_region_dict is not None) - assert (cfg is not None) - admm_subproblem_name, stoch_scenario_name = split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_name) + assert inter_region_dict is not None + assert cfg is not None + admm_subproblem_name, stoch_scenario_name = ( + split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_name) + ) if cfg.scalable: - assert (data_params is not None) - assert (all_nodes_dict is not None) - region_dict = distr_data.scalable_region_dict_creator(admm_subproblem_name, all_nodes_dict=all_nodes_dict, cfg=cfg, data_params=data_params) + assert data_params is not None + assert all_nodes_dict is not None + region_dict = distr_data.scalable_region_dict_creator( + admm_subproblem_name, + all_nodes_dict=all_nodes_dict, + cfg=cfg, + data_params=data_params, + ) else: region_dict = distr_data.region_dict_creator(admm_subproblem_name) # Adding inter region arcs nodes and associated features local_dict = inter_arcs_adder(region_dict, inter_region_dict) # Generating the model - model = min_cost_distr_problem(local_dict, cfg, stoch_scenario_name, max_revenue=data_params["max revenue"]) + model = min_cost_distr_problem( + local_dict, cfg, stoch_scenario_name, max_revenue=data_params["max revenue"] + ) + + sputils.attach_root_node( + model, model.FirstStageCost, [model.y[n] for n in local_dict["factory nodes"]] + ) - sputils.attach_root_node(model, model.FirstStageCost, [model.y[n] for n in local_dict["factory nodes"]]) - return model -def scenario_denouement(rank, admm_stoch_subproblem_scenario_name, scenario, eps=10**(-6)): +def scenario_denouement( + rank, admm_stoch_subproblem_scenario_name, scenario, eps=10 ** (-6) +): """For each admm stochastic scenario subproblem prints its name and the final variable values Args: @@ -189,19 +248,23 @@ def scenario_denouement(rank, admm_stoch_subproblem_scenario_name, scenario, eps scenario (Pyomo ConcreteModel): the instantiated model eps (float, opt): ensures that the dummy slack variables introduced have small values """ - #print(f"slack values for the distribution centers for {admm_stoch_subproblem_scenario_name=} at {rank=}") + # print(f"slack values for the distribution centers for {admm_stoch_subproblem_scenario_name=} at {rank=}") for var in scenario.y: - if 'DC' in var: - if (abs(scenario.y[var].value) > eps): - print(f"The penalty slack {scenario.y[var].name} is too big, its absolute value is {abs(scenario.y[var].value)}") - #assert (abs(scenario.y[var].value) < eps), "the penalty slack is too big" - #scenario.y[var].pprint() - if 'F' in var: - if (scenario.inventory[var].value > 10*eps): - print(f"In {rank=} for {admm_stoch_subproblem_scenario_name}, the inventory {scenario.inventory[var].name} is big, its value is {scenario.inventory[var].value}. Although it is rare after convergence, it makes sense if in a scenario it is really interesting to produce.") + if "DC" in var: + if abs(scenario.y[var].value) > eps: + print( + f"The penalty slack {scenario.y[var].name} is too big, its absolute value is {abs(scenario.y[var].value)}" + ) + # assert (abs(scenario.y[var].value) < eps), "the penalty slack is too big" + # scenario.y[var].pprint() + if "F" in var: + if scenario.inventory[var].value > 10 * eps: + print( + f"In {rank=} for {admm_stoch_subproblem_scenario_name}, the inventory {scenario.inventory[var].name} is big, its value is {scenario.inventory[var].value}. Although it is rare after convergence, it makes sense if in a scenario it is really interesting to produce." + ) scenario.inventory[var].pprint() - #assert (abs(scenario.inventory[var].value) < eps), "the inventory is too big" - #scenario.inventory[var].pprint() + # assert (abs(scenario.inventory[var].value) < eps), "the inventory is too big" + # scenario.inventory[var].pprint() return print(f"flow values for {admm_stoch_subproblem_scenario_name=} at {rank=}") scenario.flow.pprint() @@ -209,38 +272,49 @@ def scenario_denouement(rank, admm_stoch_subproblem_scenario_name, scenario, eps scenario.y.pprint() -def consensus_vars_creator(admm_subproblem_names, stoch_scenario_name, inter_region_dict=None, cfg=None, data_params=None, all_nodes_dict=None): +def consensus_vars_creator( + admm_subproblem_names, + stoch_scenario_name, + inter_region_dict=None, + cfg=None, + data_params=None, + all_nodes_dict=None, +): """The following function creates the consensus variables dictionary thanks to the inter-region dictionary. \n This dictionary has redundant information, but is useful for admmWrapper. Args: admm_subproblem_names (list of str): name of the admm subproblems (regions) \n - stoch_scenario_name (str): name of any stochastic_scenario, it is only used - in this example to access the non anticipative variables (which are common to + stoch_scenario_name (str): name of any stochastic_scenario, it is only used + in this example to access the non anticipative variables (which are common to every stochastic scenario) and their stage. \n kwargs (opt): necessary in this example to call the scenario creator - + Returns: - dict: dictionary which keys are the subproblems and values are the list of + dict: dictionary which keys are the subproblems and values are the list of pairs (consensus_variable_name (str), stage (int)). """ consensus_vars = {} for inter_arc in inter_region_dict["arcs"]: - source,target = inter_arc - region_source,node_source = source - region_target,node_target = target + source, target = inter_arc + region_source, node_source = source + region_target, node_target = target arc = node_source, node_target - vstr = f"flow[{arc}]" #variable name as string, y is the slack + vstr = f"flow[{arc}]" # variable name as string, y is the slack - #adds inter_region_arcs in the source region - if region_source not in consensus_vars: #initiates consensus_vars[region_source] + # adds inter_region_arcs in the source region + if ( + region_source not in consensus_vars + ): # initiates consensus_vars[region_source] consensus_vars[region_source] = list() - consensus_vars[region_source].append((vstr,2)) + consensus_vars[region_source].append((vstr, 2)) - #adds inter_region_arcs in the target region - if region_target not in consensus_vars: #initiates consensus_vars[region_target] + # adds inter_region_arcs in the target region + if ( + region_target not in consensus_vars + ): # initiates consensus_vars[region_target] consensus_vars[region_target] = list() - consensus_vars[region_target].append((vstr,2)) + consensus_vars[region_target].append((vstr, 2)) # With the scalable example some regions may have no consensus_vars # This is not realistic because it would imply that the region is not linked @@ -248,13 +322,21 @@ def consensus_vars_creator(admm_subproblem_names, stoch_scenario_name, inter_reg if admm_subproblem_name not in consensus_vars: print(f"WARNING: {admm_subproblem_name} has no consensus_vars") consensus_vars[admm_subproblem_name] = list() - + # now add the parents. It doesn't depend on the stochastic scenario so we chose one and - # then we go through the models (created by scenario creator) for all the admm_stoch_subproblem_scenario + # then we go through the models (created by scenario creator) for all the admm_stoch_subproblem_scenario # which have this scenario as an ancestor (parent) in the tree for admm_subproblem_name in admm_subproblem_names: - admm_stoch_subproblem_scenario_name = combining_names(admm_subproblem_name,stoch_scenario_name) - model = scenario_creator(admm_stoch_subproblem_scenario_name, inter_region_dict=inter_region_dict, cfg=cfg, data_params=data_params, all_nodes_dict=all_nodes_dict) + admm_stoch_subproblem_scenario_name = combining_names( + admm_subproblem_name, stoch_scenario_name + ) + model = scenario_creator( + admm_stoch_subproblem_scenario_name, + inter_region_dict=inter_region_dict, + cfg=cfg, + data_params=data_params, + all_nodes_dict=all_nodes_dict, + ) for node in model._mpisppy_node_list: for var in node.nonant_list: if var.name not in consensus_vars[admm_subproblem_name]: @@ -286,13 +368,15 @@ def admm_subproblem_names_creator(num_admm_subproblems): return [f"Region{i+1}" for i in range(num_admm_subproblems)] -def combining_names(admm_subproblem_name,stoch_scenario_name): +def combining_names(admm_subproblem_name, stoch_scenario_name): # Used to create the admm_stoch_subproblem_scenario_name return f"ADMM_STOCH_{admm_subproblem_name}_{stoch_scenario_name}" -def admm_stoch_subproblem_scenario_names_creator(admm_subproblem_names,stoch_scenario_names): - """ Creates the list of the admm stochastic subproblem scenarios, which are the admm subproblems given a scenario +def admm_stoch_subproblem_scenario_names_creator( + admm_subproblem_names, stoch_scenario_names +): + """Creates the list of the admm stochastic subproblem scenarios, which are the admm subproblems given a scenario Args: admm_subproblem_names (list of str): names of the admm subproblem @@ -302,14 +386,16 @@ def admm_stoch_subproblem_scenario_names_creator(admm_subproblem_names,stoch_sce list of str: name of the admm stochastic subproblem scenarios """ ### The order is important, we may want all the subproblems to appear consecutively in a scenario - return [combining_names(admm_subproblem_name,stoch_scenario_name) \ - for stoch_scenario_name in stoch_scenario_names \ - for admm_subproblem_name in admm_subproblem_names ] + return [ + combining_names(admm_subproblem_name, stoch_scenario_name) + for stoch_scenario_name in stoch_scenario_names + for admm_subproblem_name in admm_subproblem_names + ] def split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_name): - """ Returns the admm_subproblem_name and the stoch_scenario_name given an admm_stoch_subproblem_scenario_name. - This function, specific to the problem, is the reciprocal function of ``combining_names`` which creates the + """Returns the admm_subproblem_name and the stoch_scenario_name given an admm_stoch_subproblem_scenario_name. + This function, specific to the problem, is the reciprocal function of ``combining_names`` which creates the admm_stoch_subproblem_scenario_name given the admm_subproblem_name and the stoch_scenario_name. Args: @@ -319,8 +405,10 @@ def split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_nam (str,str): admm_subproblem_name, stoch_scenario_name """ # Method specific to our example and because the admm_subproblem_name and stoch_scenario_name don't include "_" - splitted = admm_stoch_subproblem_scenario_name.split('_') - assert (len(splitted) == 4), "no underscore should be attached to admm_subproblem_name nor stoch_scenario_name" + splitted = admm_stoch_subproblem_scenario_name.split("_") + assert ( + len(splitted) == 4 + ), "no underscore should be attached to admm_subproblem_name nor stoch_scenario_name" admm_subproblem_name = splitted[2] stoch_scenario_name = splitted[3] return admm_subproblem_name, stoch_scenario_name @@ -335,65 +423,77 @@ def kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params): dict (str): the kwargs that are used in distr.scenario_creator, which are included in cfg. """ kwargs = { - "all_nodes_dict" : all_nodes_dict, - "inter_region_dict" : inter_region_dict, - "cfg" : cfg, - "data_params" : data_params, - } + "all_nodes_dict": all_nodes_dict, + "inter_region_dict": inter_region_dict, + "cfg": cfg, + "data_params": data_params, + } return kwargs def inparser_adder(cfg): - """Adding to the config argument, specific elements to our problems. In this case the numbers of stochastic scenarios + """Adding to the config argument, specific elements to our problems. In this case the numbers of stochastic scenarios and admm subproblems which are required + elements used for random number generation + possibility to scale Args: cfg (config): specifications for the problem given on the command line """ cfg.add_to_config( - "num_stoch_scens", - description="Number of stochastic scenarios (default None)", - domain=int, - default=None, - argparse_args = {"required": True} - ) - + "num_stoch_scens", + description="Number of stochastic scenarios (default None)", + domain=int, + default=None, + argparse_args={"required": True}, + ) + cfg.add_to_config( - "num_admm_subproblems", - description="Number of admm subproblems (regions)", - domain=int, - default=None, - argparse_args = {"required": True} - ) + "num_admm_subproblems", + description="Number of admm subproblems (regions)", + domain=int, + default=None, + argparse_args={"required": True}, + ) ### For the pseudo-random scenarios - cfg.add_to_config("spm", - description="mean percentage of scrap loss at the production", - domain=float, - default=5) - - cfg.add_to_config("cv", - description="coefficient of variation of the loss at the production", - domain=float, - default=20) - - cfg.add_to_config("initial_seed", - description="initial seed for generating the loss", - domain=int, - default=0) - + cfg.add_to_config( + "spm", + description="mean percentage of scrap loss at the production", + domain=float, + default=5, + ) + + cfg.add_to_config( + "cv", + description="coefficient of variation of the loss at the production", + domain=float, + default=20, + ) + + cfg.add_to_config( + "initial_seed", + description="initial seed for generating the loss", + domain=int, + default=0, + ) + ### For the scalable example - cfg.add_to_config("scalable", - description="decides whether a scalable model is used", - domain=bool, - default=False) - - cfg.add_to_config("mnpr", - description="max number of nodes per region and per type", - domain=int, - default=4) - - cfg.add_to_config("ensure_xhat_feas", - description="adds slacks with high costs to ensure the feasibility of xhat yet maintaining the optimal", - domain=bool, - default=False) \ No newline at end of file + cfg.add_to_config( + "scalable", + description="decides whether a scalable model is used", + domain=bool, + default=False, + ) + + cfg.add_to_config( + "mnpr", + description="max number of nodes per region and per type", + domain=int, + default=4, + ) + + cfg.add_to_config( + "ensure_xhat_feas", + description="adds slacks with high costs to ensure the feasibility of xhat yet maintaining the optimal", + domain=bool, + default=False, + ) diff --git a/examples/stoch_distr/stoch_distr_admm_cylinders.py b/examples/stoch_distr/stoch_distr_admm_cylinders.py index 507ecddbb..1c749c6f5 100644 --- a/examples/stoch_distr/stoch_distr_admm_cylinders.py +++ b/examples/stoch_distr/stoch_distr_admm_cylinders.py @@ -19,10 +19,12 @@ from mpisppy.utils import config import mpisppy.utils.cfg_vanilla as vanilla from mpisppy import MPI + global_rank = MPI.COMM_WORLD.Get_rank() write_solution = False + def _parse_args(): # create a config object and parse cfg = config.Config() @@ -36,10 +38,12 @@ def _parse_args(): cfg.lagrangian_args() cfg.ph_ob_args() cfg.tracking_args() - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) cfg.parse_command_line("stoch_distr_admm_cylinders") return cfg @@ -47,7 +51,11 @@ def _parse_args(): def _count_cylinders(cfg): count = 1 - cfglist = ["xhatxbar", "lagrangian", "ph_ob"] # All the cfg arguments that create a new cylinders + cfglist = [ + "xhatxbar", + "lagrangian", + "ph_ob", + ] # All the cfg arguments that create a new cylinders # Add to this list any cfg attribute that would create a spoke for cylname in cfglist: if cfg[cylname]: @@ -55,88 +63,129 @@ def _count_cylinders(cfg): return count -def _make_admm(cfg, n_cylinders, all_nodes_dict, inter_region_dict, data_params, verbose=None): +def _make_admm( + cfg, n_cylinders, all_nodes_dict, inter_region_dict, data_params, verbose=None +): options = {} - admm_subproblem_names = stoch_distr.admm_subproblem_names_creator(cfg.num_admm_subproblems) - stoch_scenario_names = stoch_distr.stoch_scenario_names_creator(num_stoch_scens=cfg.num_stoch_scens) - all_admm_stoch_subproblem_scenario_names = stoch_distr.admm_stoch_subproblem_scenario_names_creator(admm_subproblem_names,stoch_scenario_names) - - split_admm_stoch_subproblem_scenario_name = stoch_distr.split_admm_stoch_subproblem_scenario_name - + admm_subproblem_names = stoch_distr.admm_subproblem_names_creator( + cfg.num_admm_subproblems + ) + stoch_scenario_names = stoch_distr.stoch_scenario_names_creator( + num_stoch_scens=cfg.num_stoch_scens + ) + all_admm_stoch_subproblem_scenario_names = ( + stoch_distr.admm_stoch_subproblem_scenario_names_creator( + admm_subproblem_names, stoch_scenario_names + ) + ) + + split_admm_stoch_subproblem_scenario_name = ( + stoch_distr.split_admm_stoch_subproblem_scenario_name + ) + scenario_creator = stoch_distr.scenario_creator - scenario_creator_kwargs = stoch_distr.kw_creator(all_nodes_dict, cfg, inter_region_dict, data_params) - stoch_scenario_name = stoch_scenario_names[0] # choice of any scenario - consensus_vars = stoch_distr.consensus_vars_creator(admm_subproblem_names, stoch_scenario_name, **scenario_creator_kwargs) - admm = stoch_admmWrapper.Stoch_AdmmWrapper(options, - all_admm_stoch_subproblem_scenario_names, - split_admm_stoch_subproblem_scenario_name, - admm_subproblem_names, - stoch_scenario_names, - scenario_creator, - consensus_vars, - n_cylinders=n_cylinders, - mpicomm=MPI.COMM_WORLD, - scenario_creator_kwargs=scenario_creator_kwargs, - verbose=verbose, - BFs=None, - ) + scenario_creator_kwargs = stoch_distr.kw_creator( + all_nodes_dict, cfg, inter_region_dict, data_params + ) + stoch_scenario_name = stoch_scenario_names[0] # choice of any scenario + consensus_vars = stoch_distr.consensus_vars_creator( + admm_subproblem_names, stoch_scenario_name, **scenario_creator_kwargs + ) + admm = stoch_admmWrapper.Stoch_AdmmWrapper( + options, + all_admm_stoch_subproblem_scenario_names, + split_admm_stoch_subproblem_scenario_name, + admm_subproblem_names, + stoch_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + verbose=verbose, + BFs=None, + ) return admm, all_admm_stoch_subproblem_scenario_names - -def _wheel_creator(cfg, n_cylinders, scenario_creator, variable_probability, all_nodenames, all_admm_stoch_subproblem_scenario_names, scenario_creator_kwargs=None): #the wrapper doesn't need any kwarg + +def _wheel_creator( + cfg, + n_cylinders, + scenario_creator, + variable_probability, + all_nodenames, + all_admm_stoch_subproblem_scenario_names, + scenario_creator_kwargs=None, +): # the wrapper doesn't need any kwarg ph_converger = None - #Things needed for vanilla cylinders + # Things needed for vanilla cylinders scenario_denouement = stoch_distr.scenario_denouement - beans = (cfg, scenario_creator, scenario_denouement, all_admm_stoch_subproblem_scenario_names) + beans = ( + cfg, + scenario_creator, + scenario_denouement, + all_admm_stoch_subproblem_scenario_names, + ) if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = None, - variable_probability=variable_probability, - all_nodenames=all_nodenames) #needs to be modified, all_nodenames is None only for 2 stage problems + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=None, + variable_probability=variable_probability, + all_nodenames=all_nodenames, + ) # needs to be modified, all_nodenames is None only for 2 stage problems else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - ph_converger=ph_converger, - rho_setter=None, - variable_probability=variable_probability, - all_nodenames=all_nodenames) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=None, + variable_probability=variable_probability, + all_nodenames=all_nodenames, + ) # FWPH spoke doesn't work with variable probability # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None, - all_nodenames=all_nodenames) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=None, + all_nodenames=all_nodenames, + ) # FWPH spoke : does not work here but may be called in global model. if cfg.fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # ph outer bounder spoke if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None, - all_nodenames=all_nodenames, - variable_probability=variable_probability) + ph_ob_spoke = vanilla.ph_ob_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=None, + all_nodenames=all_nodenames, + variable_probability=variable_probability, + ) # xhat looper bound spoke if cfg.xhatxbar: - xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames, - variable_probability=variable_probability) - + xhatxbar_spoke = vanilla.xhatxbar_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + variable_probability=variable_probability, + ) list_of_spoke_dict = list() if cfg.fwph: @@ -148,7 +197,9 @@ def _wheel_creator(cfg, n_cylinders, scenario_creator, variable_probability, all if cfg.xhatxbar: list_of_spoke_dict.append(xhatxbar_spoke) - assert n_cylinders == 1 + len(list_of_spoke_dict), f"{n_cylinders=},{len(list_of_spoke_dict)=}" + assert n_cylinders == 1 + len( + list_of_spoke_dict + ), f"{n_cylinders=},{len(list_of_spoke_dict)=}" wheel = WheelSpinner(hub_dict, list_of_spoke_dict) @@ -158,52 +209,81 @@ def _wheel_creator(cfg, n_cylinders, scenario_creator, variable_probability, all def main(cfg): assert cfg.fwph_args is not None, "fwph does not support variable probability" - if cfg.default_rho is None: # and rho_setter is None - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + if cfg.default_rho is None: # and rho_setter is None + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) if cfg.scalable: import json + json_file_path = "../distr/data_params.json" # Read the JSON file - with open(json_file_path, 'r') as file: - + with open(json_file_path, "r") as file: data_params = json.load(file) # In distr_data num_admm_subproblems is called num_scens - cfg.add_to_config("num_scens", - description="num admm subproblems", - domain=int, - default=cfg.num_admm_subproblems) - + cfg.add_to_config( + "num_scens", + description="num admm subproblems", + domain=int, + default=cfg.num_admm_subproblems, + ) + all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) - all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] - inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + all_DC_nodes = [ + DC_node + for region in all_nodes_dict + for DC_node in all_nodes_dict[region]["distribution center nodes"] + ] + inter_region_dict = distr_data.scalable_inter_region_dict_creator( + all_DC_nodes, cfg, data_params + ) else: - inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_admm_subproblems) + inter_region_dict = distr_data.inter_region_dict_creator( + num_scens=cfg.num_admm_subproblems + ) all_nodes_dict = None - data_params = {"max revenue": 1200} # hard-coded because the model is hard-coded + data_params = { + "max revenue": 1200 + } # hard-coded because the model is hard-coded n_cylinders = _count_cylinders(cfg) - admm, all_admm_stoch_subproblem_scenario_names = _make_admm(cfg, n_cylinders, all_nodes_dict, inter_region_dict, data_params) - - scenario_creator = admm.admmWrapper_scenario_creator # scenario_creator on a local scale - #note that the stoch_admmWrapper scenario_creator wrapper doesn't take any arguments + admm, all_admm_stoch_subproblem_scenario_names = _make_admm( + cfg, n_cylinders, all_nodes_dict, inter_region_dict, data_params + ) + + scenario_creator = ( + admm.admmWrapper_scenario_creator + ) # scenario_creator on a local scale + # note that the stoch_admmWrapper scenario_creator wrapper doesn't take any arguments variable_probability = admm.var_prob_list all_nodenames = admm.all_nodenames - wheel = _wheel_creator(cfg, n_cylinders, scenario_creator, variable_probability, all_nodenames, all_admm_stoch_subproblem_scenario_names) + wheel = _wheel_creator( + cfg, + n_cylinders, + scenario_creator, + variable_probability, + all_nodenames, + all_admm_stoch_subproblem_scenario_names, + ) wheel.spin() if write_solution: - wheel.write_first_stage_solution('stoch_distr_soln.csv') - wheel.write_first_stage_solution('stoch_distr_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('stoch_distr_full_solution') + wheel.write_first_stage_solution("stoch_distr_soln.csv") + wheel.write_first_stage_solution( + "stoch_distr_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution("stoch_distr_full_solution") if global_rank == 0: - best_objective = wheel.spcomm.BestInnerBound #* len(all_admm_stoch_subproblem_scenario_names) + best_objective = ( + wheel.spcomm.BestInnerBound + ) # * len(all_admm_stoch_subproblem_scenario_names) print(f"{best_objective=}") if __name__ == "__main__": cfg = _parse_args() - main(cfg) \ No newline at end of file + main(cfg) diff --git a/examples/stoch_distr/stoch_distr_ef.py b/examples/stoch_distr/stoch_distr_ef.py index ec5d9aa7d..5156a4988 100644 --- a/examples/stoch_distr/stoch_distr_ef.py +++ b/examples/stoch_distr/stoch_distr_ef.py @@ -10,7 +10,7 @@ # This file can be executed thanks to python stoch_distr_ef.py --num-stoch-scens 2 --solver-name xpress --num-stoch-scens 2 --num-admm-subproblems 3 --scalable --mnpr 6 -# Solves the stochastic distribution problem +# Solves the stochastic distribution problem import stoch_distr import stoch_distr_admm_cylinders import examples.distr.distr_data as distr_data @@ -19,25 +19,26 @@ import mpisppy.utils.sputils as sputils from mpisppy.utils import config from mpisppy import MPI + global_rank = MPI.COMM_WORLD.Get_rank() write_solution = True + def _parse_args(): # create a config object and parse cfg = config.Config() stoch_distr.inparser_adder(cfg) - cfg.add_to_config("solver_name", - description="Choice of the solver", - domain=str, - default=None) + cfg.add_to_config( + "solver_name", description="Choice of the solver", domain=str, default=None + ) cfg.parse_command_line("stoch_distr_ef") return cfg -def solve_EF_directly(admm,solver_name): +def solve_EF_directly(admm, solver_name): local_scenario_names = admm.local_admm_stoch_subproblem_scenarios_names scenario_creator = admm.admmWrapper_scenario_creator @@ -47,50 +48,69 @@ def solve_EF_directly(admm,solver_name): nonant_for_fixed_vars=False, ) solver = pyo.SolverFactory(solver_name) - if 'persistent' in solver_name: + if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) solver.solve(tee=True) else: - solver.solve(ef, tee=True, symbolic_solver_labels=True,) + solver.solve( + ef, + tee=True, + symbolic_solver_labels=True, + ) return ef -def main(): +def main(): cfg = _parse_args() if cfg.scalable: import json + json_file_path = "../distr/data_params.json" # Read the JSON file - with open(json_file_path, 'r') as file: + with open(json_file_path, "r") as file: data_params = json.load(file) # In distr_data num_admm_subproblems is called num_scens - cfg.add_to_config("num_scens", - description="num admm subproblems", - domain=int, - default=cfg.num_admm_subproblems) + cfg.add_to_config( + "num_scens", + description="num admm subproblems", + domain=int, + default=cfg.num_admm_subproblems, + ) all_nodes_dict = distr_data.all_nodes_dict_creator(cfg, data_params) - all_DC_nodes = [DC_node for region in all_nodes_dict for DC_node in all_nodes_dict[region]["distribution center nodes"]] - inter_region_dict = distr_data.scalable_inter_region_dict_creator(all_DC_nodes, cfg, data_params) + all_DC_nodes = [ + DC_node + for region in all_nodes_dict + for DC_node in all_nodes_dict[region]["distribution center nodes"] + ] + inter_region_dict = distr_data.scalable_inter_region_dict_creator( + all_DC_nodes, cfg, data_params + ) else: - inter_region_dict = distr_data.inter_region_dict_creator(num_scens=cfg.num_admm_subproblems) + inter_region_dict = distr_data.inter_region_dict_creator( + num_scens=cfg.num_admm_subproblems + ) all_nodes_dict = None data_params = {"max revenue": 1200} - - n_cylinders = 1 # There is no spoke so we only use one cylinder - admm, _ = stoch_distr_admm_cylinders._make_admm(cfg, n_cylinders, all_nodes_dict, inter_region_dict, data_params) + + n_cylinders = 1 # There is no spoke so we only use one cylinder + admm, _ = stoch_distr_admm_cylinders._make_admm( + cfg, n_cylinders, all_nodes_dict, inter_region_dict, data_params + ) solved_ef = solve_EF_directly(admm, cfg.solver_name) with open("ef.txt", "w") as f: solved_ef.pprint(f) - print ("******* model written to ef.txt *******") + print("******* model written to ef.txt *******") solution_file_name = "solution_distr.txt" - sputils.write_ef_first_stage_solution(solved_ef, - solution_file_name,) + sputils.write_ef_first_stage_solution( + solved_ef, + solution_file_name, + ) print(f"EF solution written to {solution_file_name}") print(f"EF objective: {pyo.value(solved_ef.EF_Obj)}") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/examples/tryone.py b/examples/tryone.py index a5e277ecc..28566bc33 100644 --- a/examples/tryone.py +++ b/examples/tryone.py @@ -30,10 +30,12 @@ badguys = dict() + def do_one(dirname, progname, np, argstring): os.chdir(dirname) - runstring = "mpiexec {} -np {} python -m mpi4py {} {}".\ - format(mpiexec_arg, np, progname, argstring) + runstring = "mpiexec {} -np {} python -m mpi4py {} {}".format( + mpiexec_arg, np, progname, argstring + ) print(runstring) code = os.system(runstring) if code != 0: @@ -43,28 +45,33 @@ def do_one(dirname, progname, np, argstring): badguys[dirname].append(runstring) os.chdir("..") + print("** Starting sizes_demo **") do_one("sizes", "sizes_demo.py", 1, " {}".format(solver_name)) print("** Starting regular sizes **") -do_one("sizes", - "sizes_cylinders.py", - 4, - "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " - "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " - "--default-rho=1 --solver-name={} --with-display-progress".format(solver_name)) +do_one( + "sizes", + "sizes_cylinders.py", + 4, + "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " + "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " + "--default-rho=1 --solver-name={} --with-display-progress".format(solver_name), +) print("** Starting special sizes **") -do_one("sizes", - "special_cylinders.py", - 4, - "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " - "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " - "--default-rho=1 --solver-name={} --with-display-progress".format(solver_name)) +do_one( + "sizes", + "special_cylinders.py", + 4, + "--num-scens=3 --bundles-per-rank=0 --max-iterations=5 " + "--iter0-mipgap=0.01 --iterk-mipgap=0.001 " + "--default-rho=1 --solver-name={} --with-display-progress".format(solver_name), +) if len(badguys) > 0: print("\nBad Guys:") - for i,v in badguys.items(): + for i, v in badguys.items(): print("Directory={}".format(i)) for c in v: print(" {}".format(c)) diff --git a/examples/uc/cs_uc.py b/examples/uc/cs_uc.py index c2734375d..80cf17e9a 100644 --- a/examples/uc/cs_uc.py +++ b/examples/uc/cs_uc.py @@ -26,18 +26,15 @@ from mpisppy.opt.lshaped import LShapedMethod - from mpisppy.extensions.fixer import Fixer from mpisppy.extensions.mipgapper import Gapper + # Make it all go from mpisppy.spin_the_wheel import WheelSpinner from mpisppy.log import setup_logger # the problem -from uc_funcs import scenario_creator, \ - scenario_denouement, \ - _rho_setter, \ - id_fix_list_fct +from uc_funcs import scenario_creator, scenario_denouement, _rho_setter, id_fix_list_fct # mpi setup fullcomm = mpi.COMM_WORLD @@ -46,22 +43,28 @@ def _usage(): print( - "usage: mpiexec -np {N} python -m mpi4py cs_uc.py {ScenCount} {bundles_per_rank} {PHIterLimit} {fixer|nofixer") + "usage: mpiexec -np {N} python -m mpi4py cs_uc.py {ScenCount} {bundles_per_rank} {PHIterLimit} {fixer|nofixer" + ) print("e.g., mpiexec -np 3 python -m mpi4py cs_uc.py 10 0 5 fixer") sys.exit(1) if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG, filename='dlw.log', - filemode='w', format='(%(threadName)-10s) %(message)s') - setup_logger(f'dtm{global_rank}', f'dtm{global_rank}.log') - dtm = logging.getLogger(f'dtm{global_rank}') + logging.basicConfig( + level=logging.DEBUG, + filename="dlw.log", + filemode="w", + format="(%(threadName)-10s) %(message)s", + ) + setup_logger(f"dtm{global_rank}", f"dtm{global_rank}.log") + dtm = logging.getLogger(f"dtm{global_rank}") if len(sys.argv) != 5: _usage() - print("Start time={} for global rank={}". \ - format(datetime.datetime.now(), global_rank)) + print( + "Start time={} for global rank={}".format(datetime.datetime.now(), global_rank) + ) try: ScenCount = int(sys.argv[1]) @@ -74,11 +77,11 @@ def _usage(): elif sys.argv[4] == "nofixer": usefixer = False else: - print ("The last arg must be fixer or else nofixer.") + print("The last arg must be fixer or else nofixer.") _usage() - assert (ScenCount in [3, 5, 10, 25, 50]) - all_scenario_names = ['Scenario{}'.format(sn + 1) for sn in range(ScenCount)] + assert ScenCount in [3, 5, 10, 25, 50] + all_scenario_names = ["Scenario{}".format(sn + 1) for sn in range(ScenCount)] # sent to the scenario creator scenario_creator_kwargs = { "scenario_count": ScenCount, @@ -87,7 +90,7 @@ def _usage(): hub_ph_options = { "solver_name": "gurobi_persistent", - 'bundles_per_rank': bundles_per_rank, # 0 = no bundles + "bundles_per_rank": bundles_per_rank, # 0 = no bundles "asynchronousPH": False, "PHIterLimit": PHIterLimit, "defaultPHrho": 1, @@ -101,11 +104,7 @@ def _usage(): "mipgap": 1e-7, # So the trivial bound is accurate "threads": 2, }, - "iterk_solver_options": { - "mipgap": 0.001, - "threads": 2, - "method":1 - }, + "iterk_solver_options": {"mipgap": 0.001, "threads": 2, "method": 1}, "fixeroptions": { "verbose": False, "boundtol": 0.01, @@ -115,14 +114,14 @@ def _usage(): "verbose": False, "mipgapdict": dict(), # Setting this changes iter0_solver_options }, - "cross_scen_options":{"valid_eta_bound": {i:0 for i in all_scenario_names}}, + "cross_scen_options": {"valid_eta_bound": {i: 0 for i in all_scenario_names}}, } if usefixer: multi_ext = {"ext_classes": [Fixer, Gapper]} else: multi_ext = {"ext_classes": [Gapper]} - multi_ext['ext_classes'].append(CrossScenarioExtension) + multi_ext["ext_classes"].append(CrossScenarioExtension) # PH hub hub_dict = { @@ -137,23 +136,22 @@ def _usage(): "rho_setter": _rho_setter, "extensions": MultiExtension, "extension_kwargs": multi_ext, - } + }, } # TBD: this should have a different rho setter than the optimizer # maybe we should copy the options sometimes ph_options = hub_ph_options # many will not be used - ph_options["xhat_looper_options"] = \ - { - "xhat_solver_options": { - "mipgap": 0.001, - "threads": 2, - }, - "scen_limit": 3, - "dump_prefix": "delme", - "csvname": "looper.csv", - } + ph_options["xhat_looper_options"] = { + "xhat_solver_options": { + "mipgap": 0.001, + "threads": 2, + }, + "scen_limit": 3, + "dump_prefix": "delme", + "csvname": "looper.csv", + } """ "xhat_closest_options": { "xhat_solver_options": { @@ -173,14 +171,14 @@ def _usage(): """ ub_spoke = { - 'spoke_class': XhatLooperInnerBound, + "spoke_class": XhatLooperInnerBound, "spoke_kwargs": dict(), "opt_class": Xhat_Eval, - 'opt_kwargs': { - 'options': ph_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, - 'scenario_denouement': scenario_denouement, + "opt_kwargs": { + "options": ph_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, + "scenario_denouement": scenario_denouement, "scenario_creator_kwargs": scenario_creator_kwargs, }, } @@ -188,18 +186,18 @@ def _usage(): ls_options = { "root_solver": "gurobi_persistent", "sp_solver": "gurobi_persistent", - "sp_solver_options":{"threads":1}, - } + "sp_solver_options": {"threads": 1}, + } cut_spoke = { - 'spoke_class': CrossScenarioCutSpoke, + "spoke_class": CrossScenarioCutSpoke, "spoke_kwargs": dict(), "opt_class": LShapedMethod, - 'opt_kwargs': { - 'options': ls_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, - 'scenario_denouement': scenario_denouement, - "scenario_creator_kwargs": scenario_creator_kwargs + "opt_kwargs": { + "options": ls_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, + "scenario_denouement": scenario_denouement, + "scenario_creator_kwargs": scenario_creator_kwargs, }, } @@ -209,7 +207,8 @@ def _usage(): wheel.spin() if wheel.global_rank == 0: # we are the reporting hub rank - print(f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}") + print( + f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}" + ) - print("End time={} for global rank={}". \ - format(datetime.datetime.now(), global_rank)) + print("End time={} for global rank={}".format(datetime.datetime.now(), global_rank)) diff --git a/examples/uc/gradient_uc_cylinders.py b/examples/uc/gradient_uc_cylinders.py index 03898d52a..655f95db5 100644 --- a/examples/uc/gradient_uc_cylinders.py +++ b/examples/uc/gradient_uc_cylinders.py @@ -27,49 +27,59 @@ import mpisppy.utils.cfg_vanilla as vanilla from mpisppy.extensions.cross_scen_extension import CrossScenarioExtension + def _parse_args(): cfg = config.Config() cfg.popular_args() - cfg.num_scens_required() + cfg.num_scens_required() cfg.ph_args() cfg.two_sided_args() - cfg.aph_args() + cfg.aph_args() cfg.fixer_args() cfg.fwph_args() cfg.lagrangian_args() cfg.xhatlooper_args() cfg.xhatshuffle_args() cfg.cross_scenario_cuts_args() - cfg.dynamic_gradient_args() # gets you gradient_args for free + cfg.dynamic_gradient_args() # gets you gradient_args for free cfg.ph_ob_args() - cfg.add_to_config("ph_mipgaps_json", - description="json file with mipgap schedule (default None)", - domain=str, - default=None) - cfg.add_to_config("solution_dir", - description="writes a tree solution to the provided directory" - " (default None)", - domain=str, - default=None) - cfg.add_to_config("xhat_closest_tree", - description="Uses XhatClosest to compute a tree solution after" - " PH termination (default False)", - domain=bool, - default=False) - cfg.add_to_config("run_aph", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) - cfg.add_to_config("use_cost_based_rho", - description="Run with standard UC cost based rho setter (not gradient based)", - domain=bool, - default=False) + cfg.add_to_config( + "ph_mipgaps_json", + description="json file with mipgap schedule (default None)", + domain=str, + default=None, + ) + cfg.add_to_config( + "solution_dir", + description="writes a tree solution to the provided directory" + " (default None)", + domain=str, + default=None, + ) + cfg.add_to_config( + "xhat_closest_tree", + description="Uses XhatClosest to compute a tree solution after" + " PH termination (default False)", + domain=bool, + default=False, + ) + cfg.add_to_config( + "run_aph", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use_cost_based_rho", + description="Run with standard UC cost based rho setter (not gradient based)", + domain=bool, + default=False, + ) cfg.parse_command_line("gradient_uc_cylinders") return cfg def main(): - cfg = _parse_args() num_scen = cfg.num_scens @@ -82,11 +92,12 @@ def main(): fixer_tol = cfg.fixer_tol cross_scenario_cuts = cfg.cross_scenario_cuts - scensavail = [3,5,10,25,50,100] + scensavail = [3, 5, 10, 25, 50, 100] if num_scen not in scensavail: - raise RuntimeError("num-scen was {}, but must be in {}".\ - format(num_scen, scensavail)) - + raise RuntimeError( + "num-scen was {}, but must be in {}".format(num_scen, scensavail) + ) + scenario_creator_kwargs = { "scenario_count": num_scen, "path": str(num_scen) + "scenarios_r1", @@ -98,38 +109,43 @@ def main(): rho_setter = uc._rho_setter else: rho_setter = None - + # Things needed for vanilla cylinders beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) ### start ph spoke ### if cfg.run_aph: - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=MultiExtension, - rho_setter = rho_setter) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=MultiExtension, + rho_setter=rho_setter, + ) else: - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=MultiExtension, - rho_setter = rho_setter) - + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=MultiExtension, + rho_setter=rho_setter, + ) + # Extend and/or correct the vanilla dictionary - ext_classes = [Gapper] + ext_classes = [Gapper] if fixer: ext_classes.append(Fixer) if cross_scenario_cuts: ext_classes.append(CrossScenarioExtension) if cfg.xhat_closest_tree: ext_classes.append(XhatClosest) - + if cfg.grad_rho_setter: ext_classes.append(Gradient_extension) - - hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes" : ext_classes} + + hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes": ext_classes} if cross_scenario_cuts: - hub_dict["opt_kwargs"]["options"]["cross_scen_options"]\ - = {"check_bound_improve_iterations" : cfg.cross_scenario_iter_cnt} + hub_dict["opt_kwargs"]["options"]["cross_scen_options"] = { + "check_bound_improve_iterations": cfg.cross_scenario_iter_cnt + } if fixer: hub_dict["opt_kwargs"]["options"]["fixeroptions"] = { @@ -139,12 +155,12 @@ def main(): } if cfg.xhat_closest_tree: hub_dict["opt_kwargs"]["options"]["xhat_closest_options"] = { - "xhat_solver_options" : dict(), - "keep_solution" : True + "xhat_solver_options": dict(), + "keep_solution": True, } if cfg.grad_rho_setter: - hub_dict['opt_kwargs']['options']['gradient_extension_options'] = {'cfg': cfg} + hub_dict["opt_kwargs"]["options"]["gradient_extension_options"] = {"cfg": cfg} if cfg.ph_mipgaps_json is not None: with open(cfg.ph_mipgaps_json) as fin: @@ -154,41 +170,53 @@ def main(): mipgapdict = None hub_dict["opt_kwargs"]["options"]["gapperoptions"] = { "verbose": cfg.verbose, - "mipgapdict": mipgapdict - } - + "mipgapdict": mipgapdict, + } + if cfg.default_rho is None: # since we are using a rho_setter anyway - hub_dict.opt_kwcfg.options["defaultPHrho"] = 1 + hub_dict.opt_kwcfg.options["defaultPHrho"] = 1 ### end ph spoke ### - + # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # xhat looper bound spoke if xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # ph outer bounder spoke if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + ph_ob_spoke = vanilla.ph_ob_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # xhat shuffle bound spoke if xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + # cross scenario cut spoke if cross_scenario_cuts: - cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) list_of_spoke_dict = list() if fwph: @@ -196,7 +224,7 @@ def main(): if lagrangian: list_of_spoke_dict.append(lagrangian_spoke) if cfg.ph_ob: - list_of_spoke_dict.append(ph_ob_spoke) + list_of_spoke_dict.append(ph_ob_spoke) if xhatlooper: list_of_spoke_dict.append(xhatlooper_spoke) if xhatshuffle: @@ -210,8 +238,11 @@ def main(): if cfg.solution_dir is not None: wheel.write_tree_solution(cfg.solution_dir, uc.scenario_tree_solution_writer) - wheel.write_first_stage_solution('uc_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) + wheel.write_first_stage_solution( + "uc_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + if __name__ == "__main__": - main() + main() diff --git a/examples/uc/rhoconfig10.py b/examples/uc/rhoconfig10.py index f833c68e4..bdc722d4e 100644 --- a/examples/uc/rhoconfig10.py +++ b/examples/uc/rhoconfig10.py @@ -11,32 +11,36 @@ # TBD - we can't get the rho scale factor into the callback easily, so we hard-code for now. rho_scale_factor = 1.0 -def ph_rhosetter_callback(ph, scenario_tree, scenario): - - root_node = scenario_tree.findRootNode() - scenario_instance = scenario._instance - - symbol_map = scenario_instance._ScenarioTreeSymbolMap +def ph_rhosetter_callback(ph, scenario_tree, scenario): + root_node = scenario_tree.findRootNode() - # sorting to allow for sane verbose output... - for b in sorted(scenario_instance.Buses): + scenario_instance = scenario._instance - for t in sorted(scenario_instance.TimePeriods): + symbol_map = scenario_instance._ScenarioTreeSymbolMap - for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): + # sorting to allow for sane verbose output... + for b in sorted(scenario_instance.Buses): + for t in sorted(scenario_instance.TimePeriods): + for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): + min_power = value(scenario_instance.MinimumPowerOutput[g]) + max_power = value(scenario_instance.MaximumPowerOutput[g]) + avg_power = min_power + ((max_power - min_power) / 2.0) - min_power = value(scenario_instance.MinimumPowerOutput[g]) - max_power = value(scenario_instance.MaximumPowerOutput[g]) - avg_power = min_power + ((max_power - min_power) / 2.0) + min_cost = value(scenario_instance.MinimumProductionCost[g]) - min_cost = value(scenario_instance.MinimumProductionCost[g]) + avg_cost = ( + scenario_instance.ComputeProductionCosts( + scenario_instance, g, t, avg_power + ) + + min_cost + ) - avg_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, avg_power) + min_cost - - rho = rho_scale_factor * avg_cost + rho = rho_scale_factor * avg_cost - ph.setRhoOneScenario(root_node, - scenario, - symbol_map.getSymbol(scenario_instance.UnitOn[g,t]), - rho) + ph.setRhoOneScenario( + root_node, + scenario, + symbol_map.getSymbol(scenario_instance.UnitOn[g, t]), + rho, + ) diff --git a/examples/uc/simple_ef.py b/examples/uc/simple_ef.py index 4f445c08f..ceac2c148 100644 --- a/examples/uc/simple_ef.py +++ b/examples/uc/simple_ef.py @@ -26,11 +26,15 @@ ) solver = pyo.SolverFactory(solver_name) -if 'persistent' in solver_name: +if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) results = solver.solve(tee=True) else: - results = solver.solve(ef, tee=True, symbolic_solver_labels=True,) + results = solver.solve( + ef, + tee=True, + symbolic_solver_labels=True, + ) ###results = ef.solve_extensive_form(tee=True) pyo.assert_optimal_termination(results) diff --git a/examples/uc/uc3wood.py b/examples/uc/uc3wood.py index c603e00ad..8b7b16977 100644 --- a/examples/uc/uc3wood.py +++ b/examples/uc/uc3wood.py @@ -17,46 +17,54 @@ # Hub and spoke SPBase classes from mpisppy.phbase import PHBase from mpisppy.opt.ph import PH + # Hub and spoke SPCommunicator classes from mpisppy.cylinders.lagrangian_bounder import LagrangianOuterBound from mpisppy.cylinders.xhatlooper_bounder import XhatLooperInnerBound from mpisppy.cylinders.hub import PHHub + # extensions for the hub from mpisppy.extensions.extension import MultiExtension from mpisppy.extensions.fixer import Fixer from mpisppy.extensions.mipgapper import Gapper + # Make it all go from mpisppy.spin_the_wheel import WheelSpinner from mpisppy.utils.xhat_eval import Xhat_Eval from mpisppy.log import setup_logger # the problem -from uc_funcs import scenario_creator, \ - scenario_denouement, \ - _rho_setter, \ - id_fix_list_fct +from uc_funcs import scenario_creator, scenario_denouement, _rho_setter, id_fix_list_fct # mpi setup fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() + def _usage(): - print("usage: mpiexec -np {N} python -m mpi4py uc3wood.py {ScenCount} {bundles_per_rank} {PHIterLimit} {fixer|nofixer") + print( + "usage: mpiexec -np {N} python -m mpi4py uc3wood.py {ScenCount} {bundles_per_rank} {PHIterLimit} {fixer|nofixer" + ) print("e.g., mpiexec -np 3 python -m mpi4py uc3wood.py 10 0 5 fixer") sys.exit(1) if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG, filename='dlw.log', - filemode='w', format='(%(threadName)-10s) %(message)s') - setup_logger(f'dtm{global_rank}', f'dtm{global_rank}.log') - dtm = logging.getLogger(f'dtm{global_rank}') + logging.basicConfig( + level=logging.DEBUG, + filename="dlw.log", + filemode="w", + format="(%(threadName)-10s) %(message)s", + ) + setup_logger(f"dtm{global_rank}", f"dtm{global_rank}.log") + dtm = logging.getLogger(f"dtm{global_rank}") if len(sys.argv) != 5: _usage() - print("Start time={} for global rank={}".\ - format(datetime.datetime.now(), global_rank)) + print( + "Start time={} for global rank={}".format(datetime.datetime.now(), global_rank) + ) try: ScenCount = int(sys.argv[1]) @@ -69,11 +77,11 @@ def _usage(): elif sys.argv[4] == "nofixer": usefixer = False else: - print ("The last arg must be fixer or else nofixer.") + print("The last arg must be fixer or else nofixer.") _usage() - - assert(ScenCount in [3,5,10,25,50]) - all_scenario_names = ['Scenario{}'.format(sn+1) for sn in range(ScenCount)] + + assert ScenCount in [3, 5, 10, 25, 50] + all_scenario_names = ["Scenario{}".format(sn + 1) for sn in range(ScenCount)] # sent to the scenario creator scenario_creator_kwargs = { "scenario_count": ScenCount, @@ -82,7 +90,7 @@ def _usage(): hub_ph_options = { "solver_name": "gurobi_persistent", - 'bundles_per_rank': bundles_per_rank, # 0 = no bundles + "bundles_per_rank": bundles_per_rank, # 0 = no bundles "asynchronousPH": False, "PHIterLimit": PHIterLimit, "defaultPHrho": 1, @@ -92,8 +100,8 @@ def _usage(): "display_timing": False, "display_progress": False, "tee-rank0-solves": False, - "iter0_solver_options" : { - "mipgap": 1e-7, # So the trivial bound is accurate + "iter0_solver_options": { + "mipgap": 1e-7, # So the trivial bound is accurate "threads": 2, }, "iterk_solver_options": { @@ -107,7 +115,7 @@ def _usage(): }, "gapperoptions": { "verbose": False, - "mipgapdict": dict() , # Setting this changes iter0_solver_options + "mipgapdict": dict(), # Setting this changes iter0_solver_options }, } if usefixer: @@ -128,29 +136,28 @@ def _usage(): "rho_setter": _rho_setter, "extensions": MultiExtension, "extension_kwargs": multi_ext, - } + }, } # TBD: this should have a different rho setter than the optimizer lagrangian_spoke = { "spoke_class": LagrangianOuterBound, "spoke_kwargs": dict(), - "opt_class": PHBase, - 'opt_kwargs': { - 'options': hub_ph_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, + "opt_class": PHBase, + "opt_kwargs": { + "options": hub_ph_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, "rho_setter": _rho_setter, - 'scenario_denouement': scenario_denouement, + "scenario_denouement": scenario_denouement, }, } # maybe we should copy the options sometimes xhat_options = hub_ph_options.copy() - xhat_options['bundles_per_rank'] = 0 # no bundles for xhat - xhat_options["xhat_looper_options"] =\ - { + xhat_options["bundles_per_rank"] = 0 # no bundles for xhat + xhat_options["xhat_looper_options"] = { "xhat_solver_options": { "mipgap": 0.001, "threads": 2, @@ -177,14 +184,14 @@ def _usage(): }, """ ub_spoke = { - 'spoke_class': XhatLooperInnerBound, + "spoke_class": XhatLooperInnerBound, "spoke_kwargs": dict(), "opt_class": Xhat_Eval, - 'opt_kwargs': { - 'options': xhat_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, - 'scenario_denouement': scenario_denouement, + "opt_kwargs": { + "options": xhat_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, + "scenario_denouement": scenario_denouement, "scenario_creator_kwargs": scenario_creator_kwargs, }, } @@ -195,6 +202,7 @@ def _usage(): wheel.spin() if wheel.global_rank == 0: # we are the reporting hub rank - print(f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}") - print("End time={} for global rank={}".\ - format(datetime.datetime.now(), global_rank)) + print( + f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}" + ) + print("End time={} for global rank={}".format(datetime.datetime.now(), global_rank)) diff --git a/examples/uc/uc4wood.py b/examples/uc/uc4wood.py index bfba16e1f..853f9c709 100644 --- a/examples/uc/uc4wood.py +++ b/examples/uc/uc4wood.py @@ -20,46 +20,54 @@ from mpisppy.opt.ph import PH from mpisppy.fwph.fwph import FWPH from mpisppy.utils.xhat_eval import Xhat_Eval + # Hub and spoke SPCommunicator classes from mpisppy.cylinders.fwph_spoke import FrankWolfeOuterBound from mpisppy.cylinders.lagrangian_bounder import LagrangianOuterBound from mpisppy.cylinders.xhatlooper_bounder import XhatLooperInnerBound from mpisppy.cylinders.hub import PHHub + # extensions for the hub from mpisppy.extensions.extension import MultiExtension from mpisppy.extensions.fixer import Fixer from mpisppy.extensions.mipgapper import Gapper + # Make it all go from mpisppy.spin_the_wheel import WheelSpinner from mpisppy.log import setup_logger # the problem -from uc_funcs import scenario_creator, \ - scenario_denouement, \ - _rho_setter, \ - id_fix_list_fct +from uc_funcs import scenario_creator, scenario_denouement, _rho_setter, id_fix_list_fct # mpi setup fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() + def _usage(): - print("usage: mpiexec -np {N} python -m mpi4py uc4wood.py {ScenCount} {bundles_per_rank} {PHIterLimit} {fixer|nofixer") + print( + "usage: mpiexec -np {N} python -m mpi4py uc4wood.py {ScenCount} {bundles_per_rank} {PHIterLimit} {fixer|nofixer" + ) print("e.g., mpiexec -np 4 python -m mpi4py uc4wood.py 10 0 5 fixer") sys.exit(1) if __name__ == "__main__": - logging.basicConfig(level=logging.DEBUG, filename='dlw.log', - filemode='w', format='(%(threadName)-10s) %(message)s') - setup_logger(f'dtm{global_rank}', f'dtm{global_rank}.log') - dtm = logging.getLogger(f'dtm{global_rank}') + logging.basicConfig( + level=logging.DEBUG, + filename="dlw.log", + filemode="w", + format="(%(threadName)-10s) %(message)s", + ) + setup_logger(f"dtm{global_rank}", f"dtm{global_rank}.log") + dtm = logging.getLogger(f"dtm{global_rank}") if len(sys.argv) != 5: _usage() - print("Start time={} for global rank={}".\ - format(datetime.datetime.now(), global_rank)) + print( + "Start time={} for global rank={}".format(datetime.datetime.now(), global_rank) + ) try: ScenCount = int(sys.argv[1]) @@ -72,19 +80,19 @@ def _usage(): elif sys.argv[4] == "nofixer": usefixer = False else: - print ("The last arg must be fixer or else nofixer.") + print("The last arg must be fixer or else nofixer.") _usage() - - assert(ScenCount in [3,5,10,25,50]) - all_scenario_names = ['Scenario{}'.format(sn+1) for sn in range(ScenCount)] + + assert ScenCount in [3, 5, 10, 25, 50] + all_scenario_names = ["Scenario{}".format(sn + 1) for sn in range(ScenCount)] scenario_creator_kwargs = { "scenario_count": ScenCount, "path": str(ScenCount) + "scenarios_r1", } - + hub_ph_options = { "solver_name": "gurobi_persistent", - 'bundles_per_rank': bundles_per_rank, # 0 = no bundles + "bundles_per_rank": bundles_per_rank, # 0 = no bundles "asynchronousPH": False, "PHIterLimit": PHIterLimit, "defaultPHrho": 1, @@ -94,8 +102,8 @@ def _usage(): "display_timing": False, "display_progress": False, "tee-rank0-solves": False, - "iter0_solver_options" : { - "mipgap": 1e-7, # So the trivial bound is accurate (but wastes time in hub) + "iter0_solver_options": { + "mipgap": 1e-7, # So the trivial bound is accurate (but wastes time in hub) "threads": 2, }, "iterk_solver_options": { @@ -109,7 +117,7 @@ def _usage(): }, "gapperoptions": { "verbose": False, - "mipgapdict": dict() , # Setting this changes iter0_solver_options + "mipgapdict": dict(), # Setting this changes iter0_solver_options }, } if usefixer: @@ -130,13 +138,13 @@ def _usage(): "rho_setter": _rho_setter, "extensions": MultiExtension, "extension_kwargs": multi_ext, - } + }, } # FWPH spoke fw_options = { "FW_iter_limit": 10, - "FW_weight": 0., # Or 1.? I forget what this does, honestly + "FW_weight": 0.0, # Or 1.? I forget what this does, honestly "FW_conv_thresh": 1e-4, "solver_name": "gurobi_persistent", "FW_verbose": False, @@ -151,28 +159,27 @@ def _usage(): "all_scenario_names": all_scenario_names, "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - } + }, } lagrangian_spoke = { "spoke_class": LagrangianOuterBound, "spoke_kwargs": dict(), - "opt_class": PHBase, - 'opt_kwargs': { - 'options': hub_ph_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, + "opt_class": PHBase, + "opt_kwargs": { + "options": hub_ph_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, "rho_setter": _rho_setter, - 'scenario_denouement': scenario_denouement, + "scenario_denouement": scenario_denouement, }, } # maybe we should copy the options sometimes xhat_options = hub_ph_options.copy() - xhat_options['bundles_per_rank'] = 0 # no bundles for xhat - xhat_options["xhat_looper_options"] =\ - { + xhat_options["bundles_per_rank"] = 0 # no bundles for xhat + xhat_options["xhat_looper_options"] = { "xhat_solver_options": { "mipgap": 0.001, "threads": 2, @@ -200,14 +207,14 @@ def _usage(): """ ub_spoke = { - 'spoke_class': XhatLooperInnerBound, + "spoke_class": XhatLooperInnerBound, "spoke_kwargs": dict(), "opt_class": Xhat_Eval, - 'opt_kwargs': { - 'options': xhat_options, - 'all_scenario_names': all_scenario_names, - 'scenario_creator': scenario_creator, - 'scenario_denouement': scenario_denouement, + "opt_kwargs": { + "options": xhat_options, + "all_scenario_names": all_scenario_names, + "scenario_creator": scenario_creator, + "scenario_denouement": scenario_denouement, "scenario_creator_kwargs": scenario_creator_kwargs, }, } @@ -218,6 +225,7 @@ def _usage(): wheel.spin() if wheel.global_rank == 0: # we are the reporting hub rank - print(f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}") - print("End time={} for global rank={}".\ - format(datetime.datetime.now(), global_rank)) + print( + f"BestInnerBound={wheel.BestInnerBound} and BestOuterBound={wheel.BestOuterBound}" + ) + print("End time={} for global rank={}".format(datetime.datetime.now(), global_rank)) diff --git a/examples/uc/uc_ama.py b/examples/uc/uc_ama.py index 34da9c6d5..35844752c 100644 --- a/examples/uc/uc_ama.py +++ b/examples/uc/uc_ama.py @@ -7,31 +7,62 @@ # full copyright and license information. ############################################################################### """ -An example of using amalgamator with 3 cylinders and one extension +An example of using amalgamator with 3 cylinders and one extension To execute this: mpiexec -np 3 python uc_ama.py --default-rho=1 --num-scens=3 --fixer-tol=1e-2 - + WARNING: num-scens must be taken as a valid value, i.e. among (3,5,10,25,50,100) """ + import mpisppy.utils.amalgamator as amalgamator from uc_funcs import id_fix_list_fct from mpisppy.utils import config import pyomo.common.config as pyofig + def main(): - solution_files = {"first_stage_solution":"uc_first_stage.csv", - #"tree_solution":"uc_ama_full_solution" - #It takes too long to right the full solution - } + solution_files = { + "first_stage_solution": "uc_first_stage.csv", + # "tree_solution":"uc_ama_full_solution" + # It takes too long to right the full solution + } cfg = config.Config() - cfg.add_and_assign("id_fix_list_fct", "fct used by fixer extension", - domain=None, default=None, - value = id_fix_list_fct) - cfg.add_and_assign("2stage", description="2stage vsus mstage", domain=bool, default=None, value=True) - cfg.add_and_assign("cylinders", description="list of cylinders", domain=pyofig.ListOf(str), default=None, value=['ph','xhatshuffle','lagranger']) - cfg.add_and_assign("extensions", description="list of extensions", domain=pyofig.ListOf(str), default=None, value= ['fixer']) - cfg.add_and_assign("write_solution", description="list of extensions", domain=None, default=None, value=solution_files) + cfg.add_and_assign( + "id_fix_list_fct", + "fct used by fixer extension", + domain=None, + default=None, + value=id_fix_list_fct, + ) + cfg.add_and_assign( + "2stage", + description="2stage vsus mstage", + domain=bool, + default=None, + value=True, + ) + cfg.add_and_assign( + "cylinders", + description="list of cylinders", + domain=pyofig.ListOf(str), + default=None, + value=["ph", "xhatshuffle", "lagranger"], + ) + cfg.add_and_assign( + "extensions", + description="list of extensions", + domain=pyofig.ListOf(str), + default=None, + value=["fixer"], + ) + cfg.add_and_assign( + "write_solution", + description="list of extensions", + domain=None, + default=None, + value=solution_files, + ) ama = amalgamator.from_module("uc_funcs", cfg) ama.run() @@ -40,5 +71,6 @@ def main(): print("inner bound=", ama.best_inner_bound) print("outer bound=", ama.best_outer_bound) + if __name__ == "__main__": main() diff --git a/examples/uc/uc_cylinders.py b/examples/uc/uc_cylinders.py index 7c1413864..c10dc6bc8 100644 --- a/examples/uc/uc_cylinders.py +++ b/examples/uc/uc_cylinders.py @@ -26,6 +26,7 @@ import mpisppy.utils.cfg_vanilla as vanilla from mpisppy.extensions.cross_scen_extension import CrossScenarioExtension + def _parse_args(): cfg = config.Config() cfg.popular_args() @@ -41,30 +42,37 @@ def _parse_args(): cfg.cross_scenario_cuts_args() cfg.reduced_costs_args() cfg.tracking_args() - cfg.add_to_config("ph_mipgaps_json", - description="json file with mipgap schedule (default None)", - domain=str, - default=None) - cfg.add_to_config("solution_dir", - description="writes a tree solution to the provided directory" - " (default None)", - domain=str, - default=None) - cfg.add_to_config("xhat_closest_tree", - description="Uses XhatClosest to compute a tree solution after" - " PH termination (default False)", - domain=bool, - default=False) - cfg.add_to_config("run_aph", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) + cfg.add_to_config( + "ph_mipgaps_json", + description="json file with mipgap schedule (default None)", + domain=str, + default=None, + ) + cfg.add_to_config( + "solution_dir", + description="writes a tree solution to the provided directory" + " (default None)", + domain=str, + default=None, + ) + cfg.add_to_config( + "xhat_closest_tree", + description="Uses XhatClosest to compute a tree solution after" + " PH termination (default False)", + domain=bool, + default=False, + ) + cfg.add_to_config( + "run_aph", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) cfg.parse_command_line("uc_cylinders") return cfg def main(): - cfg = _parse_args() num_scen = cfg.num_scens @@ -78,10 +86,11 @@ def main(): cross_scenario_cuts = cfg.cross_scenario_cuts reduced_costs = cfg.reduced_costs - scensavail = [3,5,10,25,50,100] + scensavail = [3, 5, 10, 25, 50, 100] if num_scen not in scensavail: - raise RuntimeError("num-scen was {}, but must be in {}".\ - format(num_scen, scensavail)) + raise RuntimeError( + "num-scen was {}, but must be in {}".format(num_scen, scensavail) + ) scenario_creator_kwargs = { "scenario_count": num_scen, @@ -97,18 +106,22 @@ def main(): ### start ph spoke ### if cfg.run_aph: - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=MultiExtension, - rho_setter = rho_setter) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=MultiExtension, + rho_setter=rho_setter, + ) else: - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=MultiExtension, - rho_setter = rho_setter) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=MultiExtension, + rho_setter=rho_setter, + ) # Extend and/or correct the vanilla dictionary - ext_classes = [Gapper] + ext_classes = [Gapper] if fixer: ext_classes.append(Fixer) if cross_scenario_cuts: @@ -116,10 +129,11 @@ def main(): if cfg.xhat_closest_tree: ext_classes.append(XhatClosest) - hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes" : ext_classes} + hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes": ext_classes} if cross_scenario_cuts: - hub_dict["opt_kwargs"]["options"]["cross_scen_options"]\ - = {"check_bound_improve_iterations" : cfg.cross_scenario_iter_cnt} + hub_dict["opt_kwargs"]["options"]["cross_scen_options"] = { + "check_bound_improve_iterations": cfg.cross_scenario_iter_cnt + } if fixer: hub_dict["opt_kwargs"]["options"]["fixeroptions"] = { @@ -129,8 +143,8 @@ def main(): } if cfg.xhat_closest_tree: hub_dict["opt_kwargs"]["options"]["xhat_closest_options"] = { - "xhat_solver_options" : dict(), - "keep_solution" : True + "xhat_solver_options": dict(), + "keep_solution": True, } if cfg.ph_mipgaps_json is not None: @@ -141,8 +155,8 @@ def main(): mipgapdict = None hub_dict["opt_kwargs"]["options"]["gapperoptions"] = { "verbose": cfg.verbose, - "mipgapdict": mipgapdict - } + "mipgapdict": mipgapdict, + } if cfg.default_rho is None: # since we are using a rho_setter anyway @@ -154,30 +168,40 @@ def main(): # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # xhat looper bound spoke if xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # cross scenario cut spoke if cross_scenario_cuts: - cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) if reduced_costs: - reduced_costs_spoke = vanilla.reduced_costs_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) + reduced_costs_spoke = vanilla.reduced_costs_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) list_of_spoke_dict = list() if fwph: @@ -199,8 +223,10 @@ def main(): if cfg.solution_dir is not None: wheel.write_tree_solution(cfg.solution_dir, uc.scenario_tree_solution_writer) - wheel.write_first_stage_solution('uc_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) + wheel.write_first_stage_solution( + "uc_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) if __name__ == "__main__": diff --git a/examples/uc/uc_ef.py b/examples/uc/uc_ef.py index 1b12fccf1..8a01ca928 100644 --- a/examples/uc/uc_ef.py +++ b/examples/uc/uc_ef.py @@ -29,4 +29,6 @@ print(f"{scen_count}-scenario UC objective value:", pyo.value(ef.ef.EF_Obj)) if ef.tree_solution_available: print("Writing tree solution") - ef.write_tree_solution(f"{scen_count}scenario_EF_solution", uc.scenario_tree_solution_writer) + ef.write_tree_solution( + f"{scen_count}scenario_EF_solution", uc.scenario_tree_solution_writer + ) diff --git a/examples/uc/uc_funcs.py b/examples/uc/uc_funcs.py index 1a36af031..e88c557ca 100644 --- a/examples/uc/uc_funcs.py +++ b/examples/uc/uc_funcs.py @@ -31,25 +31,28 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non - The uc_cylinders.py code has a `scenario_count` kwarg that gets passed to the spokes, but it seems to be unused here... """ - #print("Building instance for scenario =", scenario_name) + # print("Building instance for scenario =", scenario_name) scennum = sputils.extract_num(scenario_name) uc_model_params = pdp.get_uc_model() scenario_data = DataPortal(model=uc_model_params) - scenario_data.load(filename=path+os.sep+"RootNode.dat") - scenario_data.load(filename=path+os.sep+"Node"+str(scennum)+".dat") + scenario_data.load(filename=path + os.sep + "RootNode.dat") + scenario_data.load(filename=path + os.sep + "Node" + str(scennum) + ".dat") - scenario_params = uc_model_params.create_instance(scenario_data, - report_timing=False, - name=scenario_name) + scenario_params = uc_model_params.create_instance( + scenario_data, report_timing=False, name=scenario_name + ) - scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + scenario_md = md.ModelData( + pdp.create_model_data_dict_params(scenario_params, keep_names=True) + ) ## TODO: use the "power_balance_constraints" for now. In the future, networks should be ## handled with a custom callback -- also consider other base models - scenario_instance = uc.create_tight_unit_commitment_model(scenario_md, - network_constraints='power_balance_constraints') + scenario_instance = uc.create_tight_unit_commitment_model( + scenario_md, network_constraints="power_balance_constraints" + ) # hold over string attribute from Egret, # causes warning wth LShaped/Benders @@ -60,25 +63,36 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non # TBD: there are legacy functions here that should probably be factored. -def scenario_creator(scenario_name, scenario_count=None, path=None, - num_scens=None, seedoffset=0): - return pysp2_callback(scenario_name, scenario_count=scenario_count, - path=path, num_scens=num_scens, seedoffset=seedoffset) -def pysp2_callback(scenario_name, scenario_count=None, path=None, - num_scens=None, seedoffset=0): - ''' The callback needs to create an instance and then attach - the nodes to it in a list _mpisppy_node_list ordered by stages. - Optionally attach _PHrho. - ''' +def scenario_creator( + scenario_name, scenario_count=None, path=None, num_scens=None, seedoffset=0 +): + return pysp2_callback( + scenario_name, + scenario_count=scenario_count, + path=path, + num_scens=num_scens, + seedoffset=seedoffset, + ) + + +def pysp2_callback( + scenario_name, scenario_count=None, path=None, num_scens=None, seedoffset=0 +): + """The callback needs to create an instance and then attach + the nodes to it in a list _mpisppy_node_list ordered by stages. + Optionally attach _PHrho. + """ instance = pysp_instance_creation_callback( - scenario_name, scenario_count=scenario_count, path=path, + scenario_name, + scenario_count=scenario_count, + path=path, ) - #Add the probability of the scenario - if num_scens is not None : - instance._mpisppy_probability = 1/num_scens + # Add the probability of the scenario + if num_scens is not None: + instance._mpisppy_probability = 1 / num_scens # now attach the one and only tree node (ROOT is a reserved word) # UnitOn[*,*] is the only set of nonant variables @@ -92,100 +106,117 @@ def pysp2_callback(scenario_name, scenario_count=None, path=None, [instance.UnitStart, instance.UnitStop, instance.StartupIndicator], )] """ - sputils.attach_root_node(instance, - instance.StageCost["Stage_1"], - [instance.UnitOn], - nonant_ef_suppl_list = [instance.UnitStart, - instance.UnitStop, - instance.StartupIndicator]) + sputils.attach_root_node( + instance, + instance.StageCost["Stage_1"], + [instance.UnitOn], + nonant_ef_suppl_list=[ + instance.UnitStart, + instance.UnitStop, + instance.StartupIndicator, + ], + ) return instance + def scenario_denouement(rank, scenario_name, scenario): -# print("First stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["FirstStage"])) -# print("Second stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["SecondStage"])) + # print("First stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["FirstStage"])) + # print("Second stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["SecondStage"])) pass -def scenario_rhosa(scenario_instance): +def scenario_rhosa(scenario_instance): return scenario_rhos(scenario_instance) -def _rho_setter(scenario_instance): +def _rho_setter(scenario_instance): return scenario_rhos(scenario_instance) + def scenario_rhos(scenario_instance, rho_scale_factor=0.1): computed_rhos = [] for t in scenario_instance.TimePeriods: for g in scenario_instance.ThermalGenerators: - min_power = pyo.value(scenario_instance.MinimumPowerOutput[g,t]) - max_power = pyo.value(scenario_instance.MaximumPowerOutput[g,t]) + min_power = pyo.value(scenario_instance.MinimumPowerOutput[g, t]) + max_power = pyo.value(scenario_instance.MaximumPowerOutput[g, t]) avg_power = min_power + ((max_power - min_power) / 2.0) - min_cost = pyo.value(scenario_instance.MinimumProductionCost[g,t]) + min_cost = pyo.value(scenario_instance.MinimumProductionCost[g, t]) - avg_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, avg_power) + min_cost - #max_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, max_power) + min_cost + avg_cost = ( + scenario_instance.ComputeProductionCosts( + scenario_instance, g, t, avg_power + ) + + min_cost + ) + # max_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, max_power) + min_cost computed_rho = rho_scale_factor * avg_cost - computed_rhos.append((id(scenario_instance.UnitOn[g,t]), computed_rho)) - + computed_rhos.append((id(scenario_instance.UnitOn[g, t]), computed_rho)) + return computed_rhos -def scenario_rhos_trial_from_file(scenario_instance, rho_scale_factor=0.01, - fname=None): - ''' First computes the standard rho values (as computed by scenario_rhos() - above). Then reads rho values from the specified file (raises error if - no file specified) which is a csv formatted (var_name,rho_value). If - the rho_value specified in the file is strictly positive, it replaces - the value computed by scenario_rhos(). - - DTM: I wrote this function to test some specific things--I don't think - this will have a general purpose use, and can probably be deleted. - ''' - if (fname is None): - raise RuntimeError('Please provide an "fname" kwarg to ' - 'the "rho_setter_kwargs" option in options') - computed_rhos = scenario_rhos(scenario_instance, - rho_scale_factor=rho_scale_factor) + +def scenario_rhos_trial_from_file(scenario_instance, rho_scale_factor=0.01, fname=None): + """First computes the standard rho values (as computed by scenario_rhos() + above). Then reads rho values from the specified file (raises error if + no file specified) which is a csv formatted (var_name,rho_value). If + the rho_value specified in the file is strictly positive, it replaces + the value computed by scenario_rhos(). + + DTM: I wrote this function to test some specific things--I don't think + this will have a general purpose use, and can probably be deleted. + """ + if fname is None: + raise RuntimeError( + 'Please provide an "fname" kwarg to ' + 'the "rho_setter_kwargs" option in options' + ) + computed_rhos = scenario_rhos(scenario_instance, rho_scale_factor=rho_scale_factor) try: trial_rhos = _get_saved_rhos(fname) except Exception: - raise RuntimeError('Formatting issue in specified rho file ' + fname + - '. Format should be (variable_name,rho_value) for ' - 'each row, with no blank lines, and no ' - 'extra/commented lines') - + raise RuntimeError( + "Formatting issue in specified rho file " + + fname + + ". Format should be (variable_name,rho_value) for " + "each row, with no blank lines, and no " + "extra/commented lines" + ) + index = 0 for b in sorted(scenario_instance.Buses): for t in sorted(scenario_instance.TimePeriods): for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): - var = scenario_instance.UnitOn[g,t] + var = scenario_instance.UnitOn[g, t] try: trial_rho = trial_rhos[var.name] except KeyError: - raise RuntimeError(var.name + ' is missing from ' - 'the specified rho file ' + fname) - if (trial_rho >= 1e-14): - print('Using a trial rho') + raise RuntimeError( + var.name + " is missing from " "the specified rho file " + fname + ) + if trial_rho >= 1e-14: + print("Using a trial rho") computed_rhos[index] = (id(var), trial_rho) index += 1 - + return computed_rhos + def _get_saved_rhos(fname): - ''' Return a dict of trial rho values, indexed by variable name. - ''' + """Return a dict of trial rho values, indexed by variable name.""" rhos = dict() - with open(fname, 'r') as f: + with open(fname, "r") as f: for line in f: - line = line.split(',') - vname = ','.join(line[:-1]) + line = line.split(",") + vname = ",".join(line[:-1]) rho = float(line[-1]) rhos[vname] = rho return rhos + def id_fix_list_fct(scenario_instance): - """ specify tuples used by the fixer. + """specify tuples used by the fixer. Args: s (ConcreteModel): the sizes instance. @@ -205,15 +236,21 @@ def id_fix_list_fct(scenario_instance): for b in sorted(scenario_instance.Buses): for t in sorted(scenario_instance.TimePeriods): for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): - - iter0tuples.append(fixer.Fixer_tuple(scenario_instance.UnitOn[g,t], - th=0.01, nb=None, lb=0, ub=None)) - - iterktuples.append(fixer.Fixer_tuple(scenario_instance.UnitOn[g,t], - th=0.01, nb=None, lb=6, ub=6)) + iter0tuples.append( + fixer.Fixer_tuple( + scenario_instance.UnitOn[g, t], th=0.01, nb=None, lb=0, ub=None + ) + ) + + iterktuples.append( + fixer.Fixer_tuple( + scenario_instance.UnitOn[g, t], th=0.01, nb=None, lb=6, ub=6 + ) + ) return iter0tuples, iterktuples + def write_solution(spcomm, opt_dict, solution_dir): from mpisppy.cylinders.xhatshufflelooper_bounder import XhatShuffleInnerBound from mpisppy.extensions.xhatclosest import XhatClosest @@ -237,12 +274,17 @@ def write_solution(spcomm, opt_dict, solution_dir): # do some checks, to make sure the solution we print will be nonantipative if best_strata_rank != 0: - assert opt_dict["spoke_class"] in (XhatShuffleInnerBound, ) - else: # this is the hub, TODO: also could check for XhatSpecific - assert opt_dict["opt_class"] in (PH, ) + assert opt_dict["spoke_class"] in (XhatShuffleInnerBound,) + else: # this is the hub, TODO: also could check for XhatSpecific + assert opt_dict["opt_class"] in (PH,) assert XhatClosest in opt_dict["opt_kwargs"]["extension_kwargs"]["ext_classes"] - assert "keep_solution" in opt_dict["opt_kwargs"]["options"]["xhat_closest_options"] - assert opt_dict["opt_kwargs"]["options"]["xhat_closest_options"]["keep_solution"] is True + assert ( + "keep_solution" in opt_dict["opt_kwargs"]["options"]["xhat_closest_options"] + ) + assert ( + opt_dict["opt_kwargs"]["options"]["xhat_closest_options"]["keep_solution"] + is True + ) ## if we've passed the above checks, the scenarios should have the tree solution @@ -255,57 +297,66 @@ def write_solution(spcomm, opt_dict, solution_dir): spcomm.cylinder_comm.Barrier() for sname, s in spcomm.opt.local_scenarios.items(): - file_name = os.path.join(solution_dir, sname+'.json') + file_name = os.path.join(solution_dir, sname + ".json") mds = uc._save_uc_results(s, relaxed=False) mds.write(file_name) return -def scenario_tree_solution_writer( solution_dir, sname, scenario, bundling ): - file_name = os.path.join(solution_dir, sname+'.json') + +def scenario_tree_solution_writer(solution_dir, sname, scenario, bundling): + file_name = os.path.join(solution_dir, sname + ".json") mds = uc._save_uc_results(scenario, relaxed=False) mds.write(file_name) -#========= -def scenario_names_creator(scnt,start=0): + +# ========= +def scenario_names_creator(scnt, start=0): # (only for Amalgamator): return the full list of names - return [F"Scenario{i+1}" for i in range(start,scnt+start)] + return [f"Scenario{i+1}" for i in range(start, scnt + start)] + -#========= +# ========= def inparser_adder(cfg): # (only for Amalgamator): add command options unique to uc cfg.num_scens_required() - cfg.add_to_config("UC_count_for_path", - description="Mainly for confidence intervals to give a prefix for the directory providing the scenario data but will be overridden if scen_count is greater (default 0)", - domain=int, - default=0) - return() + cfg.add_to_config( + "UC_count_for_path", + description="Mainly for confidence intervals to give a prefix for the directory providing the scenario data but will be overridden if scen_count is greater (default 0)", + domain=int, + default=0, + ) + return () -#========= +# ========= def kw_creator(options): # (only for Amalgamator and MMW_conf): linked to the scenario_creator and inparser_adder if "path" in options: path = options["path"] - num_scens = options.get('num_scens', 0) + num_scens = options.get("num_scens", 0) else: - UCC = options.get('UC_count_for_path', 0) - args = options.get('args') + UCC = options.get("UC_count_for_path", 0) + args = options.get("args") UCC = args.UC_count_for_path if hasattr(args, "UC_count_for_path") else UCC - num_scens = options.get('num_scens', 0) + num_scens = options.get("num_scens", 0) scens_for_path = max(num_scens, UCC) path = str(scens_for_path) + "scenarios_r1" num_scens = None if num_scens == 0 else num_scens - kwargs = { - "scenario_count": num_scens, - "path": path - } + kwargs = {"scenario_count": num_scens, "path": path} return kwargs -#============================ -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - """ Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. + +# ============================ +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + """Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. Args: sname (string): scenario name to be created stage (int >=1 ): for stages > 1, fix data based on sname in earlier stages @@ -322,4 +373,3 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, sca["seedoffset"] = seed sca["num_scens"] = sample_branching_factors[0] # two-stage problem return scenario_creator(sname, **sca) - diff --git a/examples/uc/uc_lshaped.py b/examples/uc/uc_lshaped.py index af24dc7c2..7733b24cf 100644 --- a/examples/uc/uc_lshaped.py +++ b/examples/uc/uc_lshaped.py @@ -21,7 +21,7 @@ def _parse_args(): cfg = config.Config() cfg.popular_args() - cfg.num_scens_required() + cfg.num_scens_required() cfg.ph_args() cfg.two_sided_args() cfg.fwph_args() @@ -33,7 +33,7 @@ def _parse_args(): def main(): cfg = _parse_args() - # Need default_rho for FWPH, without you get + # Need default_rho for FWPH, without you get # uninitialized numeric value error if cfg.fwph and cfg.default_rho is None: print("Must specify a default_rho if using FWPH") @@ -45,28 +45,28 @@ def main(): scenario_creator = uc.scenario_creator scenario_denouement = uc.scenario_denouement - scenario_creator_kwargs = { - "path": f"./{num_scen}scenarios_r1/" - } + scenario_creator_kwargs = {"path": f"./{num_scen}scenarios_r1/"} all_scenario_names = [f"Scenario{i+1}" for i in range(num_scen)] # Things needed for vanilla cylinders beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) # Options for the L-shaped method at the hub - spo = None if cfg.max_solver_threads is None else {"threads": cfg.max_solver_threads} - spo['mipgap'] = 0.005 + spo = ( + None if cfg.max_solver_threads is None else {"threads": cfg.max_solver_threads} + ) + spo["mipgap"] = 0.005 options = { "root_solver": cfg.solver_name, "sp_solver": cfg.solver_name, - "sp_solver_options" : spo, - "root_solver_options" : spo, - #"valid_eta_lb": {n:0. for n in all_scenario_names}, + "sp_solver_options": spo, + "root_solver_options": spo, + # "valid_eta_lb": {n:0. for n in all_scenario_names}, "max_iter": cfg.max_iterations, "verbose": False, - "root_scenarios":[all_scenario_names[len(all_scenario_names)//2]], - } - + "root_scenarios": [all_scenario_names[len(all_scenario_names) // 2]], + } + # L-shaped hub hub_dict = { "hub_class": LShapedHub, @@ -77,7 +77,7 @@ def main(): }, }, "opt_class": LShapedMethod, - "opt_kwargs": { # Args passed to LShapedMethod __init__ + "opt_kwargs": { # Args passed to LShapedMethod __init__ "options": options, "all_scenario_names": all_scenario_names, "scenario_creator": scenario_creator, @@ -87,13 +87,17 @@ def main(): # FWPH spoke if fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - fw_spoke["opt_kwargs"]["PH_options"]["abs_gap"] = 0. + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + fw_spoke["opt_kwargs"]["PH_options"]["abs_gap"] = 0.0 fw_spoke["opt_kwargs"]["PH_options"]["rel_gap"] = 1e-5 fw_spoke["opt_kwargs"]["rho_setter"] = uc.scenario_rhos if xhatlshaped: - xhatlshaped_spoke = vanilla.xhatlshaped_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlshaped_spoke = vanilla.xhatlshaped_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) list_of_spoke_dict = list() if fwph: diff --git a/examples/usar/abstract.py b/examples/usar/abstract.py index 5b8779324..b15782e1c 100644 --- a/examples/usar/abstract.py +++ b/examples/usar/abstract.py @@ -7,6 +7,7 @@ # full copyright and license information. ############################################################################### """Provides an importable function for creating an `AbstractModel`.""" + import pyomo.environ as pyo @@ -33,26 +34,26 @@ def abstract_model() -> pyo.AbstractModel: ab = pyo.AbstractModel() ab.time_horizon = pyo.Param(within=pyo.PositiveIntegers) - ab.times = pyo.Set( - initialize=lambda mod: pyo.RangeSet(0, mod.time_horizon - 1)) + ab.times = pyo.Set(initialize=lambda mod: pyo.RangeSet(0, mod.time_horizon - 1)) ab.depots = pyo.Set() ab.num_active_depots = pyo.Param( - validate=lambda mod, n: 0 <= n <= len(mod.depots), within=pyo.Integers) + validate=lambda mod, n: 0 <= n <= len(mod.depots), within=pyo.Integers + ) ab.sites = pyo.Set() - ab.lives_to_be_saved = pyo.Param( - ab.times, ab.sites, within=pyo.NonNegativeIntegers) + ab.lives_to_be_saved = pyo.Param(ab.times, ab.sites, within=pyo.NonNegativeIntegers) - ab.rescue_times = pyo.Param( - ab.times, ab.sites, within=pyo.NonNegativeIntegers) + ab.rescue_times = pyo.Param(ab.times, ab.sites, within=pyo.NonNegativeIntegers) # For a solution without site cycles, travel times cannot be 0: ab.from_depot_travel_times = pyo.Param( - ab.times, ab.depots, ab.sites, within=pyo.PositiveIntegers) + ab.times, ab.depots, ab.sites, within=pyo.PositiveIntegers + ) ab.inter_site_travel_times = pyo.Param( - ab.times, ab.sites, ab.sites, within=pyo.PositiveIntegers) + ab.times, ab.sites, ab.sites, within=pyo.PositiveIntegers + ) ab.depot_inflows = pyo.Param(ab.times, domain=pyo.NonNegativeIntegers) @@ -61,40 +62,41 @@ def abstract_model() -> pyo.AbstractModel: # Self-loops are prohibited; stays_at_site should be used instead: def no_self_loops(mod, _, s1, s2): return (0, int(s1 != s2)) + ab.site_departures = pyo.Var( - ab.times, ab.sites, ab.sites, domain=pyo.Binary, bounds=no_self_loops) - ab.depot_departures = pyo.Var( - ab.times, ab.depots, ab.sites, domain=pyo.Binary) - ab.stays_at_site = pyo.Var( - ab.times, ab.sites, domain=pyo.Binary) - ab.is_time_from_arrival = pyo.Var( - ab.times, ab.times, ab.sites, domain=pyo.Binary) + ab.times, ab.sites, ab.sites, domain=pyo.Binary, bounds=no_self_loops + ) + ab.depot_departures = pyo.Var(ab.times, ab.depots, ab.sites, domain=pyo.Binary) + ab.stays_at_site = pyo.Var(ab.times, ab.sites, domain=pyo.Binary) + ab.is_time_from_arrival = pyo.Var(ab.times, ab.times, ab.sites, domain=pyo.Binary) def limit_num_active_depots(mod): if len(mod.is_active_depot) == 0: return pyo.Constraint.Feasible return sum(mod.is_active_depot[:]) == mod.num_active_depots + ab.limit_num_active_depots = pyo.Constraint(rule=limit_num_active_depots) def depart_only_active_depots(mod, time, depot, site): - return ( - mod.depot_departures[time, depot, site] - <= mod.is_active_depot[depot] - ) + return mod.depot_departures[time, depot, site] <= mod.is_active_depot[depot] + ab.depart_only_active_depots = pyo.Constraint( - ab.times, ab.depots, ab.sites, rule=depart_only_active_depots) + ab.times, ab.depots, ab.sites, rule=depart_only_active_depots + ) def limit_depot_outflow(mod, time): if len(mod.depot_departures) == 0: return pyo.Constraint.Feasible return sum(mod.depot_departures[time, :, :]) <= mod.depot_inflows[time] + ab.limit_depot_outflow = pyo.Constraint(ab.times, rule=limit_depot_outflow) def set_is_time_from_arrival(mod, time, time_from_arrival, site): num_teams_en_route = 0 if time > 0 and time_from_arrival + 1 < mod.time_horizon: - num_teams_en_route += \ - mod.is_time_from_arrival[time - 1, time_from_arrival + 1, site] + num_teams_en_route += mod.is_time_from_arrival[ + time - 1, time_from_arrival + 1, site + ] for d in mod.depots: if mod.from_depot_travel_times[time, d, site] == time_from_arrival: num_teams_en_route += mod.depot_departures[time, d, site] @@ -105,8 +107,10 @@ def set_is_time_from_arrival(mod, time, time_from_arrival, site): mod.is_time_from_arrival[time, time_from_arrival, site] == num_teams_en_route ) + ab.set_is_time_from_arrival = pyo.Constraint( - ab.times, ab.times, ab.sites, rule=set_is_time_from_arrival) + ab.times, ab.times, ab.sites, rule=set_is_time_from_arrival + ) def flow_conservation(mod, time, site): inflow = mod.is_time_from_arrival[time, 0, site] @@ -117,32 +121,39 @@ def flow_conservation(mod, time, site): + mod.stays_at_site[time, site] ) return inflow == outflow - ab.flow_conservation = pyo.Constraint( - ab.times, ab.sites, rule=flow_conservation) + + ab.flow_conservation = pyo.Constraint(ab.times, ab.sites, rule=flow_conservation) def visit_only_once(mod, site): return sum(mod.is_time_from_arrival[:, 0, site]) <= 1 + ab.visit_only_once = pyo.Constraint(ab.sites, rule=visit_only_once) def fully_service_site(mod, time, site): - between_0_1_if_rescue_underway = sum( - mod.is_time_from_arrival[t, 0, site] - for t in range(time + 1) - if t + mod.rescue_times[t, site] > time - ) / mod.time_horizon + between_0_1_if_rescue_underway = ( + sum( + mod.is_time_from_arrival[t, 0, site] + for t in range(time + 1) + if t + mod.rescue_times[t, site] > time + ) + / mod.time_horizon + ) return mod.stays_at_site[time, site] >= between_0_1_if_rescue_underway - ab.fully_service_site = pyo.Constraint( - ab.times, ab.sites, rule=fully_service_site) + + ab.fully_service_site = pyo.Constraint(ab.times, ab.sites, rule=fully_service_site) def first_stage_cost(mod): return 0 + ab.first_stage_cost = pyo.Expression(rule=first_stage_cost) def lives_saved(mod): return sum( mod.lives_to_be_saved[t, s] * mod.is_time_from_arrival[t, 0, s] - for t in mod.times for s in mod.sites + for t in mod.times + for s in mod.sites ) + ab.objective = pyo.Objective(rule=lives_saved, sense=pyo.maximize) return ab diff --git a/examples/usar/config.py b/examples/usar/config.py index e65180b84..8adfe2ff3 100644 --- a/examples/usar/config.py +++ b/examples/usar/config.py @@ -12,6 +12,7 @@ `add_extensive_form_args` and `add_wheel_spinner_args` add several options needed in extensive_form.py and wheel_spinner.py, respectively. """ + from pyomo.common.config import ConfigDict, ConfigValue @@ -21,19 +22,28 @@ def add_extensive_form_args(cnfg: ConfigDict) -> None: Args: cnfg: Will store the `ExtensiveForm`-specific configuration. """ - cnfg.declare("num_scens", ConfigValue( - domain=int, - description="Positive number of scenarios (int) generated", - )).declare_as_argument(required=True) + cnfg.declare( + "num_scens", + ConfigValue( + domain=int, + description="Positive number of scenarios (int) generated", + ), + ).declare_as_argument(required=True) - cnfg.declare("solver_name", ConfigValue( - description="Optimization solver to be used", - )).declare_as_argument(required=True) + cnfg.declare( + "solver_name", + ConfigValue( + description="Optimization solver to be used", + ), + ).declare_as_argument(required=True) - cnfg.declare("seed", ConfigValue( - domain=int, - description="Seed (int) for the random module", - )).declare_as_argument() + cnfg.declare( + "seed", + ConfigValue( + domain=int, + description="Seed (int) for the random module", + ), + ).declare_as_argument() def add_wheel_spinner_args(cnfg: ConfigDict) -> None: @@ -42,11 +52,14 @@ def add_wheel_spinner_args(cnfg: ConfigDict) -> None: Args: cnfg: Will store the `WheelSpinner`-specific configuration. """ - cnfg.declare("run_async", ConfigValue( - default=False, - domain=bool, - description="Run async projective hedging, not progressive hedging", - )).declare_as_argument() + cnfg.declare( + "run_async", + ConfigValue( + default=False, + domain=bool, + description="Run async projective hedging, not progressive hedging", + ), + ).declare_as_argument() def add_common_args(cnfg: ConfigDict) -> None: @@ -55,28 +68,36 @@ def add_common_args(cnfg: ConfigDict) -> None: Args: cnfg: Will store the general USAR configuration. """ - cnfg.declare("output_dir", ConfigValue( - default=".", - description="Directory for output files (current dir by default)", - )).declare_as_argument() + cnfg.declare( + "output_dir", + ConfigValue( + default=".", + description="Directory for output files (current dir by default)", + ), + ).declare_as_argument() group = "generate_data arguments" for name, domain, desc in ( - ("time_horizon", int, - "Number of time steps (int) considered in the optimization model"), - ("time_unit_minutes", float, - "Number of minutes (float) per time step"), + ( + "time_horizon", + int, + "Number of time steps (int) considered in the optimization model", + ), + ("time_unit_minutes", float, "Number of minutes (float) per time step"), ("num_depots", int, "Number of depots (int) generated"), - ("num_active_depots", int, - "Number of depots (int) allowed to be active"), + ("num_active_depots", int, "Number of depots (int) allowed to be active"), ("num_households", int, "Number of households (int) generated"), - ("constant_rescue_time", int, - "Flat time (int) for each household rescue"), - ("travel_speed", float, - "Distance on the unit square (float) traveled per time step"), - ("constant_depot_inflow", int, - "Number of rescue teams (int) arriving at depots per time step"), + ("constant_rescue_time", int, "Flat time (int) for each household rescue"), + ( + "travel_speed", + float, + "Distance on the unit square (float) traveled per time step", + ), + ( + "constant_depot_inflow", + int, + "Number of rescue teams (int) arriving at depots per time step", + ), ): val = ConfigValue(domain=domain, description=desc) - cnfg.declare(name, val) \ - .declare_as_argument(group=group, required=True) + cnfg.declare(name, val).declare_as_argument(group=group, required=True) diff --git a/examples/usar/extensive_form.py b/examples/usar/extensive_form.py index 7689fcea1..d82e3fbff 100644 --- a/examples/usar/extensive_form.py +++ b/examples/usar/extensive_form.py @@ -13,6 +13,7 @@ with ``--help`` to show the command-line arguments. Additionally, the functions here can be imported and used as documented. """ + import itertools import os @@ -42,7 +43,8 @@ def extensive_form( scenario_names = list(map(str, range(num_scens))) data_dicts = list( - itertools.islice(generate_data(**generate_data_kwargs), num_scens)) + itertools.islice(generate_data(**generate_data_kwargs), num_scens) + ) depot_coords, site_coords = generate_coords(**generate_data_kwargs) scenario_creator_kwargs = { "data_dicts": data_dicts, diff --git a/examples/usar/generate_data.py b/examples/usar/generate_data.py index 1db0721bd..167de06fd 100644 --- a/examples/usar/generate_data.py +++ b/examples/usar/generate_data.py @@ -15,6 +15,7 @@ follows a power law and households can generally survive some minimum amount of time. """ + import itertools import math import random @@ -52,10 +53,10 @@ def generate_coords( random.seed(seed) - depot_coords = \ - [(random.random(), random.random()) for _ in range(num_depots)] - household_coords = \ - [(random.random(), random.random()) for _ in range(num_households)] + depot_coords = [(random.random(), random.random()) for _ in range(num_depots)] + household_coords = [ + (random.random(), random.random()) for _ in range(num_households) + ] return depot_coords, household_coords @@ -63,9 +64,7 @@ def generate_coords( V = TypeVar("V") -def index( - values: Iterable[V], idx: Iterable, *other_idxs: Iterable -) -> Dict[Any, V]: +def index(values: Iterable[V], idx: Iterable, *other_idxs: Iterable) -> Dict[Any, V]: """Indexes `values` for inclusion in a Pyomo data dict. Args: @@ -125,39 +124,43 @@ def generate_data( if eval(param) <= 0: raise ValueError(f"Give a positive value for {param}") - depot_coords, household_coords = \ - generate_coords(num_depots, num_households, seed) # Sets seed + depot_coords, household_coords = generate_coords( + num_depots, num_households, seed + ) # Sets seed def pairwise_times(coords1, coords2): for c1, c2 in itertools.product(coords1, coords2): travel_time = np.linalg.norm(np.subtract(c1, c2)) / travel_speed yield max(1, math.ceil(travel_time)) - from_depot_times = \ - itertools.cycle(pairwise_times(depot_coords, household_coords)) - inter_site_times = \ - itertools.cycle(pairwise_times(household_coords, household_coords)) + from_depot_times = itertools.cycle(pairwise_times(depot_coords, household_coords)) + inter_site_times = itertools.cycle( + pairwise_times(household_coords, household_coords) + ) def sample_from(dist): return dist.ppf(random.random()) while True: - household_sizes = \ - [sample_from(RESCUE_PARTY_SIZE) for _ in range(num_households)] - household_stocks = \ - [sample_from(EMERGENCY_SUPPLIES_STOCK) for _ in household_sizes] - household_survivals_mins = \ - [MIN_SURVIVAL_MINUTES * stock for stock in household_stocks] + household_sizes = [ + sample_from(RESCUE_PARTY_SIZE) for _ in range(num_households) + ] + household_stocks = [ + sample_from(EMERGENCY_SUPPLIES_STOCK) for _ in household_sizes + ] + household_survivals_mins = [ + MIN_SURVIVAL_MINUTES * stock for stock in household_stocks + ] def lives_to_be_saved(): for t in range(time_horizon): for s in range(num_households): - survives_until_t = \ + survives_until_t = ( t * time_unit_minutes <= household_survivals_mins[s] + ) yield household_sizes[s] * survives_until_t - times, depots, sites = \ - map(range, (time_horizon, num_depots, num_households)) + times, depots, sites = map(range, (time_horizon, num_depots, num_households)) yield { None: { "time_horizon": {None: time_horizon}, @@ -166,12 +169,12 @@ def lives_to_be_saved(): "sites": sites, "lives_to_be_saved": index(lives_to_be_saved(), times, sites), "rescue_times": index( - itertools.repeat(constant_rescue_time), times, sites), + itertools.repeat(constant_rescue_time), times, sites + ), "from_depot_travel_times": index( - from_depot_times, times, depots, sites), - "inter_site_travel_times": index( - inter_site_times, times, sites, sites), - "depot_inflows": index( - itertools.repeat(constant_depot_inflow), times), + from_depot_times, times, depots, sites + ), + "inter_site_travel_times": index(inter_site_times, times, sites, sites), + "depot_inflows": index(itertools.repeat(constant_depot_inflow), times), } } diff --git a/examples/usar/plot.py b/examples/usar/plot.py index cd2f21acd..db75491d9 100644 --- a/examples/usar/plot.py +++ b/examples/usar/plot.py @@ -16,6 +16,7 @@ Typically, you will only directly call functions prefixed with "plot\_". """ + import itertools import random from typing import ( @@ -216,8 +217,7 @@ def event_walks(numbered_events: Iterable[NumberedEvent]) -> List[List[int]]: for event in events_at_t: if len(event) == 1 or event[0][0] < t: - available_walks[event[-1][1]].append( - unavailable_walks[event].pop()) + available_walks[event[-1][1]].append(unavailable_walks[event].pop()) elif event[0][0] == t: net_degree[event[0]] -= 1 net_degree[event[-1]] += 1 @@ -239,8 +239,7 @@ def event_walks(numbered_events: Iterable[NumberedEvent]) -> List[List[int]]: out_edges[v].append(dummy_vertex) out_edges[dummy_vertex].append(v) break - eulerian_circuit, num_rotated = \ - hierholzers_alg(out_edges, dummy_vertex) + eulerian_circuit, num_rotated = hierholzers_alg(out_edges, dummy_vertex) # Incorporate Eulerian circuit into walks eulerian_circuit.rotate(-num_rotated) # Need to start at dummy vertex @@ -264,6 +263,7 @@ def event_walks(numbered_events: Iterable[NumberedEvent]) -> List[List[int]]: def filter_keys_by_value(indexed_var: IndexedVar) -> Iterable: """Filters `indexed_var` to just what has value that is truthy.""" + def value_truthyness(key): var = indexed_var[key] try: diff --git a/examples/usar/scenario_creator.py b/examples/usar/scenario_creator.py index 82d52b48e..25bee63f5 100644 --- a/examples/usar/scenario_creator.py +++ b/examples/usar/scenario_creator.py @@ -7,6 +7,7 @@ # full copyright and license information. ############################################################################### """Provides a function which creates `ConcreteModel`\s for scenarios.""" + from typing import Dict, Sequence import pyomo.environ as pyo @@ -43,6 +44,7 @@ def scenario_creator( setattr(concrete, key, val) mpisppy.utils.sputils.attach_root_node( - concrete, concrete.first_stage_cost, [concrete.is_active_depot]) + concrete, concrete.first_stage_cost, [concrete.is_active_depot] + ) return concrete diff --git a/examples/usar/scenario_denouement.py b/examples/usar/scenario_denouement.py index 663c8ed52..91f90c960 100644 --- a/examples/usar/scenario_denouement.py +++ b/examples/usar/scenario_denouement.py @@ -7,12 +7,11 @@ # full copyright and license information. ############################################################################### """Provides a function which is called after optimization.""" + import pyomo.environ as pyo -def scenario_denouement( - rank: int, name: str, scenario: pyo.ConcreteModel -) -> None: +def scenario_denouement(rank: int, name: str, scenario: pyo.ConcreteModel) -> None: """Does nothing (is a no-op). This function is called after optimization finishes. diff --git a/examples/usar/wheel_spinner.py b/examples/usar/wheel_spinner.py index 1fb994851..179792428 100644 --- a/examples/usar/wheel_spinner.py +++ b/examples/usar/wheel_spinner.py @@ -13,6 +13,7 @@ with ``--help`` to show the command-line arguments. Additionally, the functions here can be imported and used as documented. """ + import itertools import os @@ -54,8 +55,7 @@ def wheel_spinner( raise ValueError("Provide a positive integer for num_scens") scenario_names = list(map(str, range(cnfg["num_scens"]))) - data_dicts = list( - itertools.islice(generate_data(**cnfg), cnfg["num_scens"])) + data_dicts = list(itertools.islice(generate_data(**cnfg), cnfg["num_scens"])) depot_coords, site_coords = generate_coords(**cnfg) scenario_creator_kwargs = { "data_dicts": data_dicts, @@ -63,8 +63,7 @@ def wheel_spinner( "site_coords": site_coords, } - vanilla_args = \ - (cnfg, scenario_creator, scenario_denouement, scenario_names) + vanilla_args = (cnfg, scenario_creator, scenario_denouement, scenario_names) vanilla_kwargs = {"scenario_creator_kwargs": scenario_creator_kwargs} vanilla_fn = vanilla.aph_hub if cnfg["run_async"] else vanilla.ph_hub diff --git a/examples/usar/write_solutions.py b/examples/usar/write_solutions.py index e96fa25f2..30784c68c 100644 --- a/examples/usar/write_solutions.py +++ b/examples/usar/write_solutions.py @@ -7,6 +7,7 @@ # full copyright and license information. ############################################################################### """Provides functions to plot the solution of a scenario.""" + import os import matplotlib.pyplot as plt diff --git a/mpi_one_sided_test.py b/mpi_one_sided_test.py index 93c511469..9b2a41de1 100644 --- a/mpi_one_sided_test.py +++ b/mpi_one_sided_test.py @@ -6,31 +6,34 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Basic test script to make sure that the one-sided MPI calls work as - intended. Runs on two processes. Will raise an assertion error if the - Lock/Get/Unlock combination blocks. Otherwise, exits normally and produces - no output. +"""Basic test script to make sure that the one-sided MPI calls work as +intended. Runs on two processes. Will raise an assertion error if the +Lock/Get/Unlock combination blocks. Otherwise, exits normally and produces +no output. - Just because this test passes doesn't mean that MPI one-sided calls will - work as expected. +Just because this test passes doesn't mean that MPI one-sided calls will +work as expected. + +Takes about 4 seconds to execute, with all the sleep() calls. +""" - Takes about 4 seconds to execute, with all the sleep() calls. -''' import mpi4py.MPI as mpi import numpy as np import time + def main(): - assert(mpi.COMM_WORLD.Get_size() == 2) + assert mpi.COMM_WORLD.Get_size() == 2 rank = mpi.COMM_WORLD.Get_rank() array_size = 10 - win = mpi.Win.Allocate(mpi.DOUBLE.size*array_size, mpi.DOUBLE.size, - comm=mpi.COMM_WORLD) - buff = np.ndarray(buffer=win.tomemory(), dtype='d', shape=(array_size,)) - - if (rank == 0): - buff[:] = 3. * np.ones(array_size, dtype='d') + win = mpi.Win.Allocate( + mpi.DOUBLE.size * array_size, mpi.DOUBLE.size, comm=mpi.COMM_WORLD + ) + buff = np.ndarray(buffer=win.tomemory(), dtype="d", shape=(array_size,)) + + if rank == 0: + buff[:] = 3.0 * np.ones(array_size, dtype="d") time.sleep(3) buff[:] = np.arange(array_size) @@ -38,15 +41,15 @@ def main(): win.Put((buff, array_size, mpi.DOUBLE), target_rank=1) win.Unlock(1) - elif (rank == 1): - buff = np.ones(array_size, dtype='d') + elif rank == 1: + buff = np.ones(array_size, dtype="d") time.sleep(1) win.Lock(0) win.Get((buff, array_size, mpi.DOUBLE), target_rank=0) win.Unlock(0) - assert(buff[-1] == 3.) + assert buff[-1] == 3.0 time.sleep(3) @@ -54,10 +57,11 @@ def main(): win.Get((buff, array_size, mpi.DOUBLE), target_rank=1) win.Unlock(1) - assert(buff[-1] == array_size-1) + assert buff[-1] == array_size - 1 - del buff # Important! + del buff # Important! win.Free() -if __name__=='__main__': + +if __name__ == "__main__": main() diff --git a/mpisppy/MPI.py b/mpisppy/MPI.py index a265777cb..ecb163e17 100644 --- a/mpisppy/MPI.py +++ b/mpisppy/MPI.py @@ -9,6 +9,7 @@ try: from mpi4py.MPI import * # noqa: F403 + haveMPI = True except ImportError: @@ -17,7 +18,7 @@ UNDEFINED = -1 SUM = _np.sum - MAX = _np.max + MAX = _np.max MIN = _np.min LOR = _np.logical_or DOUBLE = _np.double @@ -25,66 +26,65 @@ haveMPI = False class _MockMPIComm: - @property def rank(self): return 0 - + @property def size(self): return 1 def allreduce(self, sendobj, op=SUM): return _cp.deepcopy(sendobj) - + def barrier(self): pass def bcast(self, data, root=0): return data - + def gather(self, obj, root=0): if root != self.rank: return else: return [obj] - + def Gatherv(self, sendbuf, recvbuf, root=0): # TBD: check this - _set_data(sendbuf, recvbuf) - + _set_data(sendbuf, recvbuf) + def Allreduce(self, sendbuf, recvbuf, op=SUM): _set_data(sendbuf, recvbuf) - + def Barrier(self): pass def Bcast(self, data, root=0): pass - + def Get_rank(self): return self.rank - + def Get_size(self): return self.size - + def Split(self, color=0, key=0): return _MockMPIComm() - + def _process_BufSpec(bufspec): if isinstance(bufspec, (list, tuple)): bufspec = bufspec[0] return bufspec, bufspec.size, bufspec.dtype - + def _set_data(sendbuf, recvbuf): send_data, send_size, send_type = _process_BufSpec(sendbuf) recv_data, recv_size, recv_type = _process_BufSpec(recvbuf) - + if send_size != recv_size: raise RuntimeError("Send and receive buffers should be of the same size") if send_type != recv_type: raise RuntimeError("Send and receive buffers should be of the same type") - + recv_data[:] = send_data COMM_WORLD = _MockMPIComm() diff --git a/mpisppy/__init__.py b/mpisppy/__init__.py index ff2bbe7c4..8008a660f 100644 --- a/mpisppy/__init__.py +++ b/mpisppy/__init__.py @@ -17,6 +17,9 @@ _global_rank = COMM_WORLD.rank + def global_toc(msg, cond=_global_rank == 0): return tt_timer.toc(msg, delta=False) if cond else None + + global_toc("Initializing mpi-sppy") diff --git a/mpisppy/confidence_intervals/ciutils.py b/mpisppy/confidence_intervals/ciutils.py index 286d4313c..5339a9c0b 100644 --- a/mpisppy/confidence_intervals/ciutils.py +++ b/mpisppy/confidence_intervals/ciutils.py @@ -24,6 +24,7 @@ fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() + def _prime_factors(n): """ Parameters @@ -43,7 +44,7 @@ def _prime_factors(n): retval = {} # collect the 2's - while (n % 2 == 0): + while n % 2 == 0: retval[2] = retval.get(2, 0) + 1 n /= 2 # traverse the odds @@ -57,12 +58,13 @@ def _prime_factors(n): retval[n] = retval.get(n, 0) + 1 return retval -def branching_factors_from_numscens(numscens,num_stages): - """ Create branching factors to be used for sampling a tree. - Since most use cases are balanced trees, for now, we do not create unbalanced trees. + +def branching_factors_from_numscens(numscens, num_stages): + """Create branching factors to be used for sampling a tree. + Since most use cases are balanced trees, for now, we do not create unbalanced trees. If numscens cannot be written as the product of a num_stages branching factors, we take numscens <-numscens+1 - + Parameters ----------- numscens : int @@ -78,24 +80,35 @@ def branching_factors_from_numscens(numscens,num_stages): if num_stages == 2: return None else: - for i in range(2**(num_stages-1)): - n = numscens+i + for i in range(2 ** (num_stages - 1)): + n = numscens + i prime_fact = _prime_factors(n) - if sum(prime_fact.values())>=num_stages-1: #Checking that we have enough factors - branching_factors = [0]*(num_stages-1) - fact_list = [factor for (factor,mult) in prime_fact.items() for i in range(mult) ] - for k in range(num_stages-1): - branching_factors[k] = np.prod([fact_list[(num_stages-1)*i+k] for i in range(1+len(fact_list)//(num_stages-1)) if (num_stages-1)*i+k= num_stages - 1 + ): # Checking that we have enough factors + branching_factors = [0] * (num_stages - 1) + fact_list = [ + factor for (factor, mult) in prime_fact.items() for i in range(mult) + ] + for k in range(num_stages - 1): + branching_factors[k] = np.prod( + [ + fact_list[(num_stages - 1) * i + k] + for i in range(1 + len(fact_list) // (num_stages - 1)) + if (num_stages - 1) * i + k < len(fact_list) + ] + ) return branching_factors raise RuntimeError("branching_factors_from_numscens is not working correctly.") + def scalable_branching_factors(numscens, ref_branching_factors): - ''' + """ This utilitary find a good branching factor list to create a scenario tree containing at least numscens leaf nodes, and scaled like ref_branching_factors. - For instance, if numscens=233 and ref_branching_factors=[5,3,2], it returns [10,6,4], + For instance, if numscens=233 and ref_branching_factors=[5,3,2], it returns [10,6,4], branching factors for a 240-leafs tree - + NOTE: This method increasing in priority first stages branching factors, so that a branching factor is an increasing function of numscens @@ -112,69 +125,81 @@ def scalable_branching_factors(numscens, ref_branching_factors): new_branching_factors: list of int The branching factors for approximately numscens that "looks like" the reference - ''' - numstages = len(ref_branching_factors)+1 - if numscens < 2**(numstages-1): - return [2]*(numstages-1) - mult_coef = (numscens/np.prod(ref_branching_factors))**(1/(numstages-1)) + """ + numstages = len(ref_branching_factors) + 1 + if numscens < 2 ** (numstages - 1): + return [2] * (numstages - 1) + mult_coef = (numscens / np.prod(ref_branching_factors)) ** (1 / (numstages - 1)) # branching_factors have to be positive integers - new_branching_factors = np.maximum(np.floor(np.array(ref_branching_factors)*mult_coef),1.) - i=0 - while np.prod(new_branching_factors)1, G and s are pooled, from a number ArRP of estimators, computed with different scenario trees. - + Parameters ---------- @@ -246,7 +274,7 @@ def gap_estimators(xhat_one, Must be specified for 2 stage, but can be missing for multistage sample_options: dict, optional Only for multistage. Must contain a 'seed' and a 'branching_factors' attribute, - specifying the starting seed and the branching factors + specifying the starting seed and the branching factors of the scenario tree ArRP:int,optional Number of batches (we create a ArRP model). Default is 1 (one batch). @@ -268,166 +296,193 @@ def gap_estimators(xhat_one, ------- G_k and s_k, gap estimator and associated standard deviation estimator. - ''' + """ global_toc("Enter gap_estimators") - if solving_type not in ["EF_2stage","EF_mstage"]: + if solving_type not in ["EF_2stage", "EF_mstage"]: print("solving type=", solving_type) - raise RuntimeError("Only EF solve for the approximate problem is supported yet.") + raise RuntimeError( + "Only EF solve for the approximate problem is supported yet." + ) else: - is_multi = (solving_type=="EF_mstage") - + is_multi = solving_type == "EF_mstage" + m = importlib.import_module(mname) ama.check_module_ama(m) - scenario_creator_kwargs=m.kw_creator(cfg) - + scenario_creator_kwargs = m.kw_creator(cfg) + if is_multi: try: - branching_factors = sample_options['branching_factors'] - start = sample_options['seed'] - except (TypeError,KeyError,RuntimeError): - raise RuntimeError('For multistage problems, sample_options must be a dict with branching_factors and seed attributes.') + branching_factors = sample_options["branching_factors"] + start = sample_options["seed"] + except (TypeError, KeyError, RuntimeError): + raise RuntimeError( + "For multistage problems, sample_options must be a dict with branching_factors and seed attributes." + ) else: start = sputils.extract_num(scenario_names[0]) - if ArRP>1: #Special case : ArRP, G and s are pooled from r>1 estimators. + if ArRP > 1: # Special case : ArRP, G and s are pooled from r>1 estimators. if is_multi: - raise RuntimeError("Pooled estimators are not supported for multistage problems yet.") + raise RuntimeError( + "Pooled estimators are not supported for multistage problems yet." + ) n = len(scenario_names) - if(n%ArRP != 0): - raise RuntimeWarning("You put as an input a number of scenarios"+\ - f" which is not a mutliple of {ArRP}.") - n = n- n%ArRP - G =[] + if n % ArRP != 0: + raise RuntimeWarning( + "You put as an input a number of scenarios" + + f" which is not a mutliple of {ArRP}." + ) + n = n - n % ArRP + G = [] s = [] for k in range(ArRP): - scennames = scenario_names[k*(n//ArRP):(k+1)*(n//ArRP)] - tmp = gap_estimators(xhat_one, mname, - solver_name=solver_name, - scenario_names=scennames, ArRP=1, - cfg=cfg, - scenario_denouement=scenario_denouement, - solver_options=solver_options, - solving_type=solving_type - ) - G.append(tmp['G']) - s.append(tmp['s']) + scennames = scenario_names[k * (n // ArRP) : (k + 1) * (n // ArRP)] + tmp = gap_estimators( + xhat_one, + mname, + solver_name=solver_name, + scenario_names=scennames, + ArRP=1, + cfg=cfg, + scenario_denouement=scenario_denouement, + solver_options=solver_options, + solving_type=solving_type, + ) + G.append(tmp["G"]) + s.append(tmp["s"]) global_toc(f"ArRP {k} of {ArRP}") - #Pooling + # Pooling G = np.mean(G) - s = np.linalg.norm(s)/np.sqrt(n//ArRP) + s = np.linalg.norm(s) / np.sqrt(n // ArRP) return {"G": G, "s": s, "seed": start} - - #A1RP - - #We start by computing the optimal solution to the approximate problem induced by our scenarios + # A1RP + + # We start by computing the optimal solution to the approximate problem induced by our scenarios if is_multi: - #Sample a scenario tree: this is a subtree, but starting from stage 1 - samp_tree = sample_tree.SampleSubtree(mname, - xhats =[], - root_scen=None, - starting_stage=1, - branching_factors=branching_factors, - seed=start, - cfg=cfg, - solver_name=solver_name, - solver_options=solver_options) + # Sample a scenario tree: this is a subtree, but starting from stage 1 + samp_tree = sample_tree.SampleSubtree( + mname, + xhats=[], + root_scen=None, + starting_stage=1, + branching_factors=branching_factors, + seed=start, + cfg=cfg, + solver_name=solver_name, + solver_options=solver_options, + ) samp_tree.run() start += sputils.number_of_nodes(branching_factors) ama_object = samp_tree.ama else: - #We use amalgamator to do it + # We use amalgamator to do it num_scens = len(scenario_names) ama_cfg = cfg() ama_cfg.quick_assign(solving_type, bool, True) ama_cfg.quick_assign("EF_solver_name", str, solver_name) - solver_options_str= sputils.option_dict_to_string(solver_options) # cfg need str + solver_options_str = sputils.option_dict_to_string( + solver_options + ) # cfg need str ama_cfg.quick_assign("EF_solver_options", str, solver_options_str) ama_cfg.quick_assign("num_scens", int, num_scens) - ama_cfg.quick_assign("_mpisppy_probability", float, 1/num_scens) + ama_cfg.quick_assign("_mpisppy_probability", float, 1 / num_scens) ama_cfg.quick_assign("start", int, start) ama_object = ama.from_module(mname, ama_cfg, use_command_line=False) ama_object.scenario_names = scenario_names ama_object.verbose = False ama_object.run() start += len(scenario_names) - - #Optimal solution of the approximate problem + + # Optimal solution of the approximate problem zn_star = ama_object.best_outer_bound - #Associated policies + # Associated policies xstars = sputils.nonant_cache_from_ef(ama_object.ef) - - #Then, we evaluate the function value induced by the scenario at xstar. - + + # Then, we evaluate the function value induced by the scenario at xstar. + if is_multi: # Find feasible policies (i.e. xhats) for every non-leaf nodes - if len(samp_tree.ef._ef_scenario_names)>1: - local_scenarios = {sname:getattr(samp_tree.ef,sname) for sname in samp_tree.ef._ef_scenario_names} + if len(samp_tree.ef._ef_scenario_names) > 1: + local_scenarios = { + sname: getattr(samp_tree.ef, sname) + for sname in samp_tree.ef._ef_scenario_names + } else: - local_scenarios = {samp_tree.ef._ef_scenario_names[0]:samp_tree.ef} - - xhats,start = sample_tree.walking_tree_xhats(mname, - local_scenarios, - xhat_one['ROOT'], - branching_factors, - start, - cfg, - solver_name=solver_name, - solver_options=solver_options) - - #Compute then the average function value with this policy + local_scenarios = {samp_tree.ef._ef_scenario_names[0]: samp_tree.ef} + + xhats, start = sample_tree.walking_tree_xhats( + mname, + local_scenarios, + xhat_one["ROOT"], + branching_factors, + start, + cfg, + solver_name=solver_name, + solver_options=solver_options, + ) + + # Compute then the average function value with this policy scenario_creator_kwargs = samp_tree.ama.kwargs - all_nodenames = sputils.create_nodenames_from_branching_factors(branching_factors) + all_nodenames = sputils.create_nodenames_from_branching_factors( + branching_factors + ) else: - #In a 2 stage problem, the only non-leaf is the ROOT node + # In a 2 stage problem, the only non-leaf is the ROOT node xhats = xhat_one all_nodenames = None - - xhat_eval_options = {"iter0_solver_options": None, - "iterk_solver_options": None, - "display_timing": False, - "solver_name": solver_name, - "verbose": False, - "solver_options":solver_options} - ev = xhat_eval.Xhat_Eval(xhat_eval_options, - scenario_names, - ama_object.scenario_creator, - scenario_denouement, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames = all_nodenames,mpicomm=mpicomm) - #Evaluating xhat and xstar and getting the value of the objective function - #for every (local) scenario + + xhat_eval_options = { + "iter0_solver_options": None, + "iterk_solver_options": None, + "display_timing": False, + "solver_name": solver_name, + "verbose": False, + "solver_options": solver_options, + } + ev = xhat_eval.Xhat_Eval( + xhat_eval_options, + scenario_names, + ama_object.scenario_creator, + scenario_denouement, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + mpicomm=mpicomm, + ) + # Evaluating xhat and xstar and getting the value of the objective function + # for every (local) scenario ev.evaluate(xhats) objs_at_xhat = ev.objs_dict - zn_star=ev.evaluate(xstars) + zn_star = ev.evaluate(xstars) objs_at_xstar = ev.objs_dict - + eval_scen_at_xhat = [] eval_scen_at_xstar = [] scen_probs = [] - for k,s in ev.local_scenarios.items(): + for k, s in ev.local_scenarios.items(): eval_scen_at_xhat.append(objs_at_xhat[k]) eval_scen_at_xstar.append(objs_at_xstar[k]) scen_probs.append(s._mpisppy_probability) - - scen_gaps = np.array(eval_scen_at_xhat)-np.array(eval_scen_at_xstar) - local_gap = np.dot(scen_gaps,scen_probs) - local_ssq = np.dot(scen_gaps**2,scen_probs) - local_prob_sqnorm = np.linalg.norm(scen_probs)**2 - local_obj_at_xhat = np.dot(eval_scen_at_xhat,scen_probs) - local_estim = np.array([local_gap,local_ssq,local_prob_sqnorm,local_obj_at_xhat]) + + scen_gaps = np.array(eval_scen_at_xhat) - np.array(eval_scen_at_xstar) + local_gap = np.dot(scen_gaps, scen_probs) + local_ssq = np.dot(scen_gaps**2, scen_probs) + local_prob_sqnorm = np.linalg.norm(scen_probs) ** 2 + local_obj_at_xhat = np.dot(eval_scen_at_xhat, scen_probs) + local_estim = np.array([local_gap, local_ssq, local_prob_sqnorm, local_obj_at_xhat]) global_estim = np.zeros(4) - ev.mpicomm.Allreduce(local_estim, global_estim, op=mpi.SUM) - G,ssq, prob_sqnorm,obj_at_xhat = global_estim - if global_rank==0 and verbose: + ev.mpicomm.Allreduce(local_estim, global_estim, op=mpi.SUM) + G, ssq, prob_sqnorm, obj_at_xhat = global_estim + if global_rank == 0 and verbose: print(f"G = {G}") - sample_var = (ssq - G**2)/(1-prob_sqnorm) #Unbiased sample variance + sample_var = (ssq - G**2) / (1 - prob_sqnorm) # Unbiased sample variance s = np.sqrt(sample_var) - - use_relative_error = (np.abs(zn_star)>1) - G = correcting_numeric(G,cfg,objfct=obj_at_xhat, - relative_error=use_relative_error) - - #objective_gap removed Sept.29 2022 - return {"G":G,"s":s,"seed":start} + + use_relative_error = np.abs(zn_star) > 1 + G = correcting_numeric( + G, cfg, objfct=obj_at_xhat, relative_error=use_relative_error + ) + + # objective_gap removed Sept.29 2022 + return {"G": G, "s": s, "seed": start} diff --git a/mpisppy/confidence_intervals/confidence_config.py b/mpisppy/confidence_intervals/confidence_config.py index aab82f6b1..8319d85dc 100644 --- a/mpisppy/confidence_intervals/confidence_config.py +++ b/mpisppy/confidence_intervals/confidence_config.py @@ -8,86 +8,112 @@ ############################################################################### # configuration data (and parsers) for confidence intervals -def confidence_config(cfg): - cfg.add_to_config("confidence_level", - description="1-alpha (default 0.95)", - domain=float, - default=0.95) +def confidence_config(cfg): + cfg.add_to_config( + "confidence_level", + description="1-alpha (default 0.95)", + domain=float, + default=0.95, + ) def sequential_config(cfg): - - cfg.add_to_config("sample_size_ratio", - description="xhat/gap sample size ratio (default 1)", - domain=float, - default=1.0) - - cfg.add_to_config("ArRP", - description="How many to pool to comute G and s (default 1)", - domain=int, - default=1) - - cfg.add_to_config("kf_GS", - description="Resampling frequence for CI estimators (default 1)", - domain=int, - default=1) - - cfg.add_to_config("kf_xhat", - description="Resampling frequence for xhat (default 1)", - domain=int, - default=1) + cfg.add_to_config( + "sample_size_ratio", + description="xhat/gap sample size ratio (default 1)", + domain=float, + default=1.0, + ) + + cfg.add_to_config( + "ArRP", + description="How many to pool to comute G and s (default 1)", + domain=int, + default=1, + ) + + cfg.add_to_config( + "kf_GS", + description="Resampling frequence for CI estimators (default 1)", + domain=int, + default=1, + ) + + cfg.add_to_config( + "kf_xhat", + description="Resampling frequence for xhat (default 1)", + domain=int, + default=1, + ) def BM_config(cfg): # Bayraksan and Morton sequential (relative width) - cfg.add_to_config("BM_h", - description="Controls width of confidence interval (default 1.75)", - domain=float, - default=1.75) - - cfg.add_to_config("BM_hprime", - description="Controls tradeoff between width and sample size (default 0.5)", - domain=float, - default=0.5) - - cfg.add_to_config("BM_eps", - description="Controls termination (default 0.2)", - domain=float, - default=0.2) - - cfg.add_to_config("BM_eps_prime", - description="Controls termination (default 0.1)", - domain=float, - default=0.1) - - cfg.add_to_config("BM_p", - description="Controls sample size (default 0.1)", - domain=float, - default=0.1) - - cfg.add_to_config("BM_q", - description="Related to sample size growth (default 1.2)", - domain=float, - default=1.2) + cfg.add_to_config( + "BM_h", + description="Controls width of confidence interval (default 1.75)", + domain=float, + default=1.75, + ) + + cfg.add_to_config( + "BM_hprime", + description="Controls tradeoff between width and sample size (default 0.5)", + domain=float, + default=0.5, + ) + + cfg.add_to_config( + "BM_eps", + description="Controls termination (default 0.2)", + domain=float, + default=0.2, + ) + + cfg.add_to_config( + "BM_eps_prime", + description="Controls termination (default 0.1)", + domain=float, + default=0.1, + ) + + cfg.add_to_config( + "BM_p", + description="Controls sample size (default 0.1)", + domain=float, + default=0.1, + ) + + cfg.add_to_config( + "BM_q", + description="Related to sample size growth (default 1.2)", + domain=float, + default=1.2, + ) def BPL_config(cfg): # Bayraksan and Pierre-Louis - cfg.add_to_config("BPL_eps", - description="Controls termination (default 1)", - domain=float, - default=1) - - cfg.add_to_config("BPL_c0", - description="Starting sample size (default 20)", - domain=int, - default=20) - - cfg.add_to_config("BPL_n0min", - description="Non-zero implies stochastic sampling (default 0)", - domain=int, - default=0) - + cfg.add_to_config( + "BPL_eps", + description="Controls termination (default 1)", + domain=float, + default=1, + ) + + cfg.add_to_config( + "BPL_c0", + description="Starting sample size (default 20)", + domain=int, + default=20, + ) + + cfg.add_to_config( + "BPL_n0min", + description="Non-zero implies stochastic sampling (default 0)", + domain=int, + default=0, + ) diff --git a/mpisppy/confidence_intervals/mmw_ci.py b/mpisppy/confidence_intervals/mmw_ci.py index e44c20869..13951d220 100644 --- a/mpisppy/confidence_intervals/mmw_ci.py +++ b/mpisppy/confidence_intervals/mmw_ci.py @@ -14,56 +14,59 @@ import scipy.stats import importlib from mpisppy import global_toc - + import mpisppy.utils.amalgamator as ama import mpisppy.confidence_intervals.ciutils as ciutils fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() + def remove_None(d): if d is None: return {} d_copy = {} - for (key,value) in d.items(): + for key, value in d.items(): if value is not None: d_copy[key] = value return d_copy -class MMWConfidenceIntervals(): - """Takes a model and options as input. +class MMWConfidenceIntervals: + """Takes a model and options as input. Args: refmodel (str): path of the model we use (e.g. farmer, uc) - cfg (Config): useful options to run amalgamator or xhat_eval, + cfg (Config): useful options to run amalgamator or xhat_eval, including EF_solver_options and EF_solver_name May include the options used to compute xhat xhat_one (dict): Non-anticipative first stage solution, computed before num_batches (int): Number of batches used to compute the MMW estimator - batch_size (int): Size of MMW batches, default None. + batch_size (int): Size of MMW batches, default None. If batch_size is None, then batch_size=options['num_scens'] is used - start (int): first scenario used to run the MMW estimator, + start (int): first scenario used to run the MMW estimator, default None If start is None, then start=options[num_scens]+options[start] is used - + Note: cfg can include the following things: -The type of our solving. for now, MMWci only support EF, so it must have an attribute 'EF-2stage' or 'EF-mstage' set equal to True - Solver-related options ('EF_solver_name' and 'EF_solver_options') - - + + """ - def __init__(self, - refmodel, - cfg, - xhat_one, - num_batches, - batch_size=None, - start=None, - verbose=True, - mpicomm=None - ): + + def __init__( + self, + refmodel, + cfg, + xhat_one, + num_batches, + batch_size=None, + start=None, + verbose=True, + mpicomm=None, + ): self.refmodel = importlib.import_module(refmodel) self.refmodelname = refmodel self.cfg = cfg @@ -71,14 +74,14 @@ def __init__(self, self.num_batches = num_batches self.batch_size = batch_size self.verbose = verbose - self.mpicomm=mpicomm + self.mpicomm = mpicomm - #Getting the start - if start is None : - raise RuntimeError( "Start must be specified") + # Getting the start + if start is None: + raise RuntimeError("Start must be specified") self.start = start - - #Type of our problem + + # Type of our problem if ama._bool_option(cfg, "EF_2stage"): self.type = "EF_2stage" self.multistage = False @@ -86,16 +89,18 @@ def __init__(self, elif ama._bool_option(cfg, "EF_mstage"): self.type = "EF_mstage" self.multistage = True - self.numstages = len(cfg['branching_factors'])+1 + self.numstages = len(cfg["branching_factors"]) + 1 else: - raise RuntimeError("Only EF is currently supported; " - "cfg should have an attribute 'EF-2stage' or 'EF-mstage' set to True") - - #Check if refmodel and args have all needed attributes + raise RuntimeError( + "Only EF is currently supported; " + "cfg should have an attribute 'EF-2stage' or 'EF-mstage' set to True" + ) + + # Check if refmodel and args have all needed attributes everything = ["scenario_names_creator", "scenario_creator", "kw_creator"] if self.multistage: everything[0] = "sample_tree_scen_creator" - + you_can_have_it_all = True for ething in everything: if not hasattr(self.refmodel, ething): @@ -103,90 +108,101 @@ def __init__(self, you_can_have_it_all = False if not you_can_have_it_all: raise RuntimeError(f"Module {refmodel} not complete for MMW") - + if "EF_solver_name" not in self.cfg: raise RuntimeError("EF_solver_name not in Argument list for MMW") def run(self, confidence_level=0.95): - # We get the MMW right term, then xhat, then the MMW left term. - #Compute the nonant xhat (the one used in the left term of MMW (9) ) using + # Compute the nonant xhat (the one used in the left term of MMW (9) ) using # the first scenarios - + ############### get the parameters start = self.start scenario_denouement = self.refmodel.scenario_denouement - #Introducing batches otpions + # Introducing batches otpions num_batches = self.num_batches batch_size = self.batch_size sample_cfg = self.cfg() # ephemeral changes - - #Some options are specific to 2-stage or multi-stage problems + + # Some options are specific to 2-stage or multi-stage problems if self.multistage: - sampling_branching_factors = ciutils.branching_factors_from_numscens(batch_size,self.numstages) - #TODO: Change this to get a more logical way to compute branching_factors + sampling_branching_factors = ciutils.branching_factors_from_numscens( + batch_size, self.numstages + ) + # TODO: Change this to get a more logical way to compute branching_factors batch_size = np.prod(sampling_branching_factors) else: if batch_size == 0: raise RuntimeError("batch size can't be zero for two stage problems") - sample_cfg.quick_assign('num_scens', int, batch_size) - sample_cfg.quick_assign('_mpisppy_probability', float, 1/batch_size) + sample_cfg.quick_assign("num_scens", int, batch_size) + sample_cfg.quick_assign("_mpisppy_probability", float, 1 / batch_size) - #Solver settings - solver_name = self.cfg['EF_solver_name'] - solver_options = self.cfg.get('EF_solver_options') + # Solver settings + solver_name = self.cfg["EF_solver_name"] + solver_options = self.cfg.get("EF_solver_options") solver_options = remove_None(solver_options) - - #Now we compute for each batch the whole Gn term from MMW (9) - G = np.zeros(num_batches) #the Gbar of MMW (10) - #we will compute the mean via a loop (to be parallelized ?) + # Now we compute for each batch the whole Gn term from MMW (9) + + G = np.zeros(num_batches) # the Gbar of MMW (10) + # we will compute the mean via a loop (to be parallelized ?) - for i in range(num_batches) : + for i in range(num_batches): scenstart = None if self.multistage else start - gap_options = {'seed':start,'branching_factors':sampling_branching_factors} if self.multistage else None - scenario_names = self.refmodel.scenario_names_creator(batch_size,start=scenstart) - estim = ciutils.gap_estimators(self.xhat_one, self.refmodelname, - solving_type=self.type, - scenario_names=scenario_names, - sample_options=gap_options, - ArRP=1, - cfg=sample_cfg, - scenario_denouement=scenario_denouement, - solver_name=solver_name, - solver_options=solver_options, - mpicomm=self.mpicomm - ) - Gn = estim['G'] - start = estim['seed'] - - #objective_gap removed Sept.29th 2022 - - if(self.verbose): + gap_options = ( + {"seed": start, "branching_factors": sampling_branching_factors} + if self.multistage + else None + ) + scenario_names = self.refmodel.scenario_names_creator( + batch_size, start=scenstart + ) + estim = ciutils.gap_estimators( + self.xhat_one, + self.refmodelname, + solving_type=self.type, + scenario_names=scenario_names, + sample_options=gap_options, + ArRP=1, + cfg=sample_cfg, + scenario_denouement=scenario_denouement, + solver_name=solver_name, + solver_options=solver_options, + mpicomm=self.mpicomm, + ) + Gn = estim["G"] + start = estim["seed"] + + # objective_gap removed Sept.29th 2022 + + if self.verbose: global_toc(f"Gn={Gn} for the batch {i}") # Left term of LHS of (9) - G[i]=Gn + G[i] = Gn - s_g = np.std(G) #Standard deviation of gap + s_g = np.std(G) # Standard deviation of gap Gbar = np.mean(G) - t_g = scipy.stats.t.ppf(confidence_level,num_batches-1) + t_g = scipy.stats.t.ppf(confidence_level, num_batches - 1) - epsilon_g = t_g*s_g/np.sqrt(num_batches) + epsilon_g = t_g * s_g / np.sqrt(num_batches) - gap_inner_bound = Gbar + epsilon_g + gap_inner_bound = Gbar + epsilon_g gap_outer_bound = 0 - - self.result={"gap_inner_bound": gap_inner_bound, - "gap_outer_bound": gap_outer_bound, - "Gbar": Gbar, - "std": s_g, - "Glist": G} - - return(self.result) - - + + self.result = { + "gap_inner_bound": gap_inner_bound, + "gap_outer_bound": gap_outer_bound, + "Gbar": Gbar, + "std": s_g, + "Glist": G, + } + + return self.result + + if __name__ == "__main__": raise RuntimeError("mmw_ci does not have a __main__") diff --git a/mpisppy/confidence_intervals/mmw_conf.py b/mpisppy/confidence_intervals/mmw_conf.py index 887e66e3e..8c2e36db2 100644 --- a/mpisppy/confidence_intervals/mmw_conf.py +++ b/mpisppy/confidence_intervals/mmw_conf.py @@ -7,7 +7,7 @@ # full copyright and license information. ############################################################################### -# To test: (from confidence_intervals directory; assumes the npy file is in the farmer directory) +# To test: (from confidence_intervals directory; assumes the npy file is in the farmer directory) # python mmw_conf.py mpisppy/tests/examples/farmer.py --xhatpath ../../examples/farmer/farmer_root_nonants.npy --confidence-level 0.95 --EF-solver-name gurobi --MMW-num-batches 3 --MMW-batch-size 5 import re @@ -24,34 +24,48 @@ global_toc("Start mmw_conf") cfg = config.Config() # args for mmw part of things - cfg.add_to_config('xhatpath', - domain=str, - description="path to .npy file with feasible nonant solution xhat", - default=".") - cfg.add_to_config('EF_solver_name', - description="name of solver to be used", - domain=str, - default='') - cfg.add_to_config('confidence_level', - description="defines confidence interval size (default 0.95)", - domain=float, - default=None) #None will set alpha = 0.95 - cfg.add_to_config('start_scen', - description="starting scenario number (perhpas to avoid scenarios used to get solve xhat) default 0", - domain=int, - default=0) - cfg.add_to_config("MMW_num_batches", - description="number of batches used for MMW confidence interval (default 2)", - domain=int, - default=2) - cfg.add_to_config("MMW_batch_size", - description="batch size used for MMW confidence interval, if None then batch_size = num_scens (default to None)", - domain=int, - default=None) #None means take batch_size=num_scens - cfg.add_to_config("solver_options", - description="space separated string of solver options, e.g. 'option1=value1 option2 = value2'", - domain=str, - default='') + cfg.add_to_config( + "xhatpath", + domain=str, + description="path to .npy file with feasible nonant solution xhat", + default=".", + ) + cfg.add_to_config( + "EF_solver_name", + description="name of solver to be used", + domain=str, + default="", + ) + cfg.add_to_config( + "confidence_level", + description="defines confidence interval size (default 0.95)", + domain=float, + default=None, + ) # None will set alpha = 0.95 + cfg.add_to_config( + "start_scen", + description="starting scenario number (perhpas to avoid scenarios used to get solve xhat) default 0", + domain=int, + default=0, + ) + cfg.add_to_config( + "MMW_num_batches", + description="number of batches used for MMW confidence interval (default 2)", + domain=int, + default=2, + ) + cfg.add_to_config( + "MMW_batch_size", + description="batch size used for MMW confidence interval, if None then batch_size = num_scens (default to None)", + domain=int, + default=None, + ) # None means take batch_size=num_scens + cfg.add_to_config( + "solver_options", + description="space separated string of solver options, e.g. 'option1=value1 option2 = value2'", + domain=str, + default="", + ) # now get the extra args from the module mname = sys.argv[1] # will be assigned to the model_module_name config arg @@ -63,11 +77,10 @@ except Exception: raise RuntimeError(f"Could not import module: {mname}") - m.inparser_adder(cfg) # the inprser_adder might want num_scens, but mmw contols the number of scenarios try: - del cfg["num_scens"] + del cfg["num_scens"] except Exception: pass @@ -75,25 +88,29 @@ # the module name is very special because it has to be plucked from argv parser.add_argument( - "model_module_name", help="amalgamator compatible module (often read from argv)", type=str, - ) - cfg.add_to_config("model_module_name", - description="amalgamator compatible module", - domain=str, - default='', - argparse=False) + "model_module_name", + help="amalgamator compatible module (often read from argv)", + type=str, + ) + cfg.add_to_config( + "model_module_name", + description="amalgamator compatible module", + domain=str, + default="", + argparse=False, + ) cfg.model_module_name = mname - + args = parser.parse_args() # from the command line args = cfg.import_argparse(args) - - #parses solver options string + + # parses solver options string solver_options = option_string_to_dict(cfg.solver_options) # convert instance path to module name: - modelpath = re.sub('/','.', cfg.model_module_name) - modelpath = re.sub(r'\.py','', modelpath) + modelpath = re.sub("/", ".", cfg.model_module_name) + modelpath = re.sub(r"\.py", "", modelpath) # Read xhats from xhatpath xhat = ciutils.read_xhat(cfg.xhatpath) @@ -101,20 +118,26 @@ if cfg.MMW_batch_size is None: raise RuntimeError("mmw_conf requires MMW_batch_size") - refmodel = modelpath #Change this path to use a different model + refmodel = modelpath # Change this path to use a different model cfg.quick_assign("EF_2stage", bool, True) # all this script supports for now num_batches = cfg.MMW_num_batches batch_size = cfg.MMW_batch_size - mmw = mmw_ci.MMWConfidenceIntervals(refmodel, cfg, xhat, num_batches, batch_size=batch_size, start = cfg.start_scen, - verbose=True) + mmw = mmw_ci.MMWConfidenceIntervals( + refmodel, + cfg, + xhat, + num_batches, + batch_size=batch_size, + start=cfg.start_scen, + verbose=True, + ) cl = float(cfg.confidence_level) - #objective_gap removed sept29th 2022 + # objective_gap removed sept29th 2022 r = mmw.run(confidence_level=cl) global_toc(r) - diff --git a/mpisppy/confidence_intervals/multi_seqsampling.py b/mpisppy/confidence_intervals/multi_seqsampling.py index c2da535d2..d16b8fc6f 100644 --- a/mpisppy/confidence_intervals/multi_seqsampling.py +++ b/mpisppy/confidence_intervals/multi_seqsampling.py @@ -6,8 +6,8 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -# Code that is producing an xhat and a confidence interval using sequantial sampling -# This extension of SeqSampling works for multistage, using independent +# Code that is producing an xhat and a confidence interval using sequantial sampling +# This extension of SeqSampling works for multistage, using independent # scenarios instead of a single scenario tree. import pyomo.common.config as pyofig @@ -17,7 +17,7 @@ from mpisppy.utils import config import numpy as np from mpisppy import global_toc - + import mpisppy.utils.amalgamator as amalgamator import mpisppy.utils.xhat_eval as xhat_eval import mpisppy.confidence_intervals.ciutils as ciutils @@ -28,56 +28,69 @@ fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() + class IndepScens_SeqSampling(SeqSampling): - def __init__(self, - refmodel, - xhat_generator, - cfg, - stopping_criterion = "BM", - stochastic_sampling = False, - solving_type="EF-mstage", - ): + def __init__( + self, + refmodel, + xhat_generator, + cfg, + stopping_criterion="BM", + stochastic_sampling=False, + solving_type="EF-mstage", + ): super().__init__( - refmodel, - xhat_generator, - cfg, - stochastic_sampling = stochastic_sampling, - stopping_criterion = stopping_criterion, - solving_type = solving_type) - self.numstages = len(self.cfg['branching_factors'])+1 - self.batch_branching_factors = [1]*(self.numstages-1) + refmodel, + xhat_generator, + cfg, + stochastic_sampling=stochastic_sampling, + stopping_criterion=stopping_criterion, + solving_type=solving_type, + ) + self.numstages = len(self.cfg["branching_factors"]) + 1 + self.batch_branching_factors = [1] * (self.numstages - 1) self.batch_size = 1 - - #TODO: Add an override specifier if it exists + + # TODO: Add an override specifier if it exists def run(self, maxit=200): # Do everything as specified by the options (maxit is provided as a safety net). refmodel = self.refmodel - mult = self.sample_size_ratio # used to set m_k= mult*n_k - scenario_denouement = refmodel.scenario_denouement if hasattr(refmodel, "scenario_denouement") else None - #----------------------------Step 0 -------------------------------------# - #Initialization - k=1 - + mult = self.sample_size_ratio # used to set m_k= mult*n_k + scenario_denouement = ( + refmodel.scenario_denouement + if hasattr(refmodel, "scenario_denouement") + else None + ) + # ----------------------------Step 0 -------------------------------------# + # Initialization + k = 1 + if self.stopping_criterion == "BM": - #Finding a constant used to compute nk - r = 2 #TODO : we could add flexibility here - j = np.arange(1,1000) + # Finding a constant used to compute nk + r = 2 # TODO : we could add flexibility here + j = np.arange(1, 1000) if self.BM_q is None: - s = sum(np.power(j,-self.BM_p*np.log(j))) + s = sum(np.power(j, -self.BM_p * np.log(j))) else: - if self.BM_q<1: + if self.BM_q < 1: raise RuntimeError("Parameter BM_q should be greater than 1.") - s = sum(np.exp(-self.BM_p*np.power(j,2*self.BM_q/r))) - self.c = max(1,2*np.log(s/(np.sqrt(2*np.pi)*(1-self.confidence_level)))) - + s = sum(np.exp(-self.BM_p * np.power(j, 2 * self.BM_q / r))) + self.c = max( + 1, 2 * np.log(s / (np.sqrt(2 * np.pi) * (1 - self.confidence_level))) + ) + lower_bound_k = self.sample_size(k, None, None, None) - - #Computing xhat_1. - - #We use sample_size_ratio*n_k observations to compute xhat_k - xhat_branching_factors = ciutils.scalable_branching_factors(mult*lower_bound_k, self.cfg['branching_factors']) + + # Computing xhat_1. + + # We use sample_size_ratio*n_k observations to compute xhat_k + xhat_branching_factors = ciutils.scalable_branching_factors( + mult * lower_bound_k, self.cfg["branching_factors"] + ) mk = np.prod(xhat_branching_factors) - self.xhat_gen_kwargs['start_seed'] = self.SeedCount #TODO: Maybe find a better way to manage seed + self.xhat_gen_kwargs["start_seed"] = ( + self.SeedCount + ) # TODO: Maybe find a better way to manage seed xhat_scenario_names = refmodel.scenario_names_creator(mk) xgo = self.xhat_gen_kwargs.copy() @@ -85,78 +98,95 @@ def run(self, maxit=200): xgo.pop("solver_options", None) # it will be given explicitly xgo.pop("scenario_names", None) # it will be given explicitly xgo["branching_factors"] = xhat_branching_factors - xhat_k = self.xhat_generator(xhat_scenario_names, - solver_options=self.solver_options, - **xgo) + xhat_k = self.xhat_generator( + xhat_scenario_names, solver_options=self.solver_options, **xgo + ) self.SeedCount += sputils.number_of_nodes(xhat_branching_factors) - #----------------------------Step 1 -------------------------------------# - #Computing n_1 and associated scenario names - - nk = np.prod(ciutils.scalable_branching_factors(lower_bound_k, self.cfg['branching_factors'])) #To ensure the same growth that in the one-tree seqsampling + # ----------------------------Step 1 -------------------------------------# + # Computing n_1 and associated scenario names + + nk = np.prod( + ciutils.scalable_branching_factors( + lower_bound_k, self.cfg["branching_factors"] + ) + ) # To ensure the same growth that in the one-tree seqsampling estimator_scenario_names = refmodel.scenario_names_creator(nk) - - #Computing G_nk and s_k associated with xhat_1 - - Gk, sk = self._gap_estimators_with_independent_scenarios(xhat_k, - nk, - estimator_scenario_names, - scenario_denouement) - - - #----------------------------Step 2 -------------------------------------# - - while( self.stop_criterion(Gk,sk,nk) and k1) - Gk = ciutils.correcting_numeric(G,cfg,objfct=obj_at_xhat, - relative_error=use_relative_error) - + + use_relative_error = np.abs(zstar) > 1 + Gk = ciutils.correcting_numeric( + G, cfg, objfct=obj_at_xhat, relative_error=use_relative_error + ) + self.SeedCount = start - - return Gk,sk - + + return Gk, sk + if __name__ == "__main__": # For use by confidence interval develepors who need a quick test. solver_name = "cplex" - #An example of sequential sampling for the aircond model + # An example of sequential sampling for the aircond model import mpisppy.tests.examples.aircond - bfs = [3,3,2] + + bfs = [3, 3, 2] num_scens = np.prod(bfs) scenario_names = mpisppy.tests.examples.aircond.scenario_names_creator(num_scens) - xhat_gen_kwargs = {"scenario_names": scenario_names, - "solver_name": solver_name, - "solver_options": None, - "branching_factors": bfs, - "mu_dev": 0, - "sigma_dev": 40, - "start_ups": False, - "start_seed": 0, - } + xhat_gen_kwargs = { + "scenario_names": scenario_names, + "solver_name": solver_name, + "solver_options": None, + "branching_factors": bfs, + "mu_dev": 0, + "sigma_dev": 40, + "start_ups": False, + "start_seed": 0, + } # create two configs, then use one of them optionsBM = config.Config() confidence_config.confidence_config(optionsBM) confidence_config.sequential_config(optionsBM) - optionsBM.quick_assign('BM_h', float, 0.55) - optionsBM.quick_assign('BM_hprime', float, 0.5,) - optionsBM.quick_assign('BM_eps', float, 0.5,) - optionsBM.quick_assign('BM_eps_prime', float, 0.4,) + optionsBM.quick_assign("BM_h", float, 0.55) + optionsBM.quick_assign( + "BM_hprime", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps_prime", + float, + 0.4, + ) optionsBM.quick_assign("BM_p", float, 0.2) optionsBM.quick_assign("BM_q", float, 1.2) optionsBM.quick_assign("solver_name", str, solver_name) optionsBM.quick_assign("solver_name", str, solver_name) - optionsBM.quick_assign("stopping", str, "BM") # TBD use this and drop stopping_criterion from the constructor + optionsBM.quick_assign( + "stopping", str, "BM" + ) # TBD use this and drop stopping_criterion from the constructor optionsBM.quick_assign("xhat_gen_kwargs", dict, xhat_gen_kwargs) optionsBM.quick_assign("solving_type", str, "EF_mstage") - optionsBM.add_and_assign("branching_factors", description="branching factors", domain=pyofig.ListOf(int), default=None, value=bfs) + optionsBM.add_and_assign( + "branching_factors", + description="branching factors", + domain=pyofig.ListOf(int), + default=None, + value=bfs, + ) optionsBM.quick_assign("start_ups", bool, False) optionsBM.quick_assign("EF_mstage", bool, True) ##optionsBM.quick_assign("cylinders", pyofig.ListOf(str), ['ph']) - + # fixed width, fully sequential optionsFSP = config.Config() confidence_config.confidence_config(optionsFSP) confidence_config.sequential_config(optionsFSP) - optionsFSP.quick_assign('BPL_eps', float, 15.0) + optionsFSP.quick_assign("BPL_eps", float, 15.0) optionsFSP.quick_assign("BPL_c0", int, 50) # starting sample size) optionsFSP.quick_assign("xhat_gen_kwargs", dict, xhat_gen_kwargs) - optionsFSP.quick_assign("ArRP", int, 2) # this must be 1 for any multi-stage problems + optionsFSP.quick_assign( + "ArRP", int, 2 + ) # this must be 1 for any multi-stage problems optionsFSP.quick_assign("stopping", str, "BPL") optionsFSP.quick_assign("solving_type", str, "EF_mstage") optionsFSP.quick_assign("solver_name", str, solver_name) optionsFSP.quick_assign("solver_name", str, solver_name) - optionsFSP.add_and_assign("branching_factors", description="branching factors", domain=pyofig.ListOf(int), default=None, value=bfs) + optionsFSP.add_and_assign( + "branching_factors", + description="branching factors", + domain=pyofig.ListOf(int), + default=None, + value=bfs, + ) optionsFSP.quick_assign("start_ups", bool, False) optionsBM.quick_assign("EF_mstage", bool, True) ##optionsFSP.quick_assign("cylinders", pyofig.ListOf(str), ['ph']) - - aircondpb = IndepScens_SeqSampling("mpisppy.tests.examples.aircond", - xhat_generator_aircond, - optionsBM, - stopping_criterion="BM" - ) + + aircondpb = IndepScens_SeqSampling( + "mpisppy.tests.examples.aircond", + xhat_generator_aircond, + optionsBM, + stopping_criterion="BM", + ) res = aircondpb.run(maxit=50) print(res) - - - diff --git a/mpisppy/confidence_intervals/sample_tree.py b/mpisppy/confidence_intervals/sample_tree.py index a2ef658b5..d93b43bae 100644 --- a/mpisppy/confidence_intervals/sample_tree.py +++ b/mpisppy/confidence_intervals/sample_tree.py @@ -6,7 +6,7 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#Code for sampling for multistage problem +# Code for sampling for multistage problem import numpy as np import mpisppy.MPI as mpi @@ -20,17 +20,17 @@ global_rank = fullcomm.Get_rank() -class SampleSubtree(): - ''' +class SampleSubtree: + """ The Sample Subtree class is generating a scenario tree such that every scenario shares the same common nodes up to a starting stage t. These nodes are the first t nodes of a given scenario 'root_scen'. The aim of this class is to compute a feasible policy at stage t for 'root_scen' - We take as an argument xhats, feasible policies for stages 1 to t-1, and + We take as an argument xhats, feasible policies for stages 1 to t-1, and fix the nonants up to stage t-1 to the values given by 'xhats' for every scenario. - - The run() method is solving directly the Extensive Form. - After callling the method, one can fetch the feasible policy for stage t + + The run() method is solving directly the Extensive Form. + After callling the method, one can fetch the feasible policy for stage t by taking the attribute xhat_at_stage. Parameters @@ -45,7 +45,7 @@ class SampleSubtree(): starting_stage : int Stage t>=1. branching_factors : list of int - Branching factors for sample trees. + Branching factors for sample trees. branching_factors[i] is the branching factor for stage i+2 seed : int Seed to create scenarios. @@ -56,86 +56,114 @@ class SampleSubtree(): solver_options : dict, optional Solver options. The default is None. - ''' - def __init__(self, mname, xhats, root_scen, starting_stage, branching_factors, - seed, cfg, solver_name=None, solver_options=None): + """ + def __init__( + self, + mname, + xhats, + root_scen, + starting_stage, + branching_factors, + seed, + cfg, + solver_name=None, + solver_options=None, + ): self.refmodel = importlib.import_module(mname) - #Checking that refmodel has all the needed attributes - attrs = ["sample_tree_scen_creator","kw_creator","scenario_names_creator"] + # Checking that refmodel has all the needed attributes + attrs = ["sample_tree_scen_creator", "kw_creator", "scenario_names_creator"] for attr in attrs: if not hasattr(self.refmodel, attr): - raise RuntimeError(f"The construction of a sample subtree failed because the reference model has no {attr} function.") + raise RuntimeError( + f"The construction of a sample subtree failed because the reference model has no {attr} function." + ) self.xhats = xhats self.root_scen = root_scen self.stage = starting_stage - self.sampling_branching_factors = branching_factors[(self.stage-1):] + self.sampling_branching_factors = branching_factors[(self.stage - 1) :] self.original_branching_factors = branching_factors self.numscens = np.prod(self.sampling_branching_factors) self.seed = seed self.cfg = cfg self.solver_name = solver_name self.solver_options = solver_options - - #Create an amalgamator object to solve the subtree problem + + # Create an amalgamator object to solve the subtree problem self._create_amalgamator() - def _sample_creator(self, sname, **scenario_creator_kwargs): - ''' + """ This method is similar to scenario_creator function, but for subtrees. Given a scenario names and kwargs, it creates a scenario from our subtree - - WARNING: The multistage model (aka refmodel) must contain a + + WARNING: The multistage model (aka refmodel) must contain a sample_tree_scen_creator function - - ''' + + """ # gymnastics because options get passed around through a variety of paths s_c_k = scenario_creator_kwargs.copy() if "seed" in scenario_creator_kwargs: s_c_k.pop("seed", None) # we want control over the seed here - s = self.refmodel.sample_tree_scen_creator(sname, - given_scenario=self.root_scen, - stage=self.stage, - sample_branching_factors=self.sampling_branching_factors, - seed = self.seed, - **s_c_k) - nlens = {node.name: len(node.nonant_vardata_list) - for node in s._mpisppy_node_list} - - #Fixing the xhats - for k,ndn in enumerate(self.fixed_nodes): + s = self.refmodel.sample_tree_scen_creator( + sname, + given_scenario=self.root_scen, + stage=self.stage, + sample_branching_factors=self.sampling_branching_factors, + seed=self.seed, + **s_c_k, + ) + nlens = { + node.name: len(node.nonant_vardata_list) for node in s._mpisppy_node_list + } + + # Fixing the xhats + for k, ndn in enumerate(self.fixed_nodes): node = s._mpisppy_node_list[k] if len(self.xhats[k]) != nlens[ndn]: - raise RuntimeError("xhats does not have the right size " - f"for stage {k+1}, xhats has {len(self.xhats[k])} nonant variables, but should have {nlens[ndn]} of them") + raise RuntimeError( + "xhats does not have the right size " + f"for stage {k+1}, xhats has {len(self.xhats[k])} nonant variables, but should have {nlens[ndn]} of them" + ) for i in range(nlens[ndn]): - #node.nonant_vardata_list[i].fix(self.xhats[k][i]) + # node.nonant_vardata_list[i].fix(self.xhats[k][i]) node.nonant_vardata_list[i].value = self.xhats[k][i] node.nonant_vardata_list[i].fixed = True return s - + def _create_amalgamator(self): - ''' + """ This method attaches an Amalgamator object to a sample subtree. Changes to self.cfg will be ephemeral. - + WARNING: sample_creator must be called before using for stages beyond 1 - ''' - self.fixed_nodes = ["ROOT"+"_0"*i for i in range(self.stage-1)] + """ + self.fixed_nodes = ["ROOT" + "_0" * i for i in range(self.stage - 1)] self.scenario_creator = self._sample_creator ###cfg = copy.deepcopy(self.cfg) - cfg = self.cfg() # ephemeral - cfg.quick_assign('EF-mstage', domain=str, value=len(self.original_branching_factors) > 1) - cfg.quick_assign('EF-2stage', domain=str, value=len(self.original_branching_factors) <= 1) - cfg.quick_assign('EF_solver_name', domain=str, value=self.solver_name) + cfg = self.cfg() # ephemeral + cfg.quick_assign( + "EF-mstage", domain=str, value=len(self.original_branching_factors) > 1 + ) + cfg.quick_assign( + "EF-2stage", domain=str, value=len(self.original_branching_factors) <= 1 + ) + cfg.quick_assign("EF_solver_name", domain=str, value=self.solver_name) if self.solver_options is not None: - cfg.add_and_assign("solver_options", "solver options dict", dict, None, self.solver_options) - cfg.quick_assign('num_scens', domain=int, value=self.numscens) + cfg.add_and_assign( + "solver_options", "solver options dict", dict, None, self.solver_options + ) + cfg.quick_assign("num_scens", domain=int, value=self.numscens) if "start_seed" not in cfg: - cfg.add_and_assign("start_seed", description="first seed", domain=int, default=None, value=self.seed) + cfg.add_and_assign( + "start_seed", + description="first seed", + domain=int, + default=None, + value=self.seed, + ) #### cfg['_mpisppy_probability'] = 1/self.numscens #Probably not used # TBD: better options flow (Dec 2021; working on it June 2022) if "branching_factors" not in cfg: @@ -144,56 +172,92 @@ def _create_amalgamator(self): if "kwargs" in ama_options: ama_options["branching_factors"] = ama_options["kwargs"]["branching_factors"] """ - scen_names = self.refmodel.scenario_names_creator(self.numscens, - start=self.seed) - denouement = self.refmodel.scenario_denouement if hasattr(self.refmodel, 'scenario_denouement') else None - - self.ama = amalgamator.Amalgamator(cfg, scen_names, - self.scenario_creator, - self.refmodel.kw_creator, - denouement, - verbose = False) + scen_names = self.refmodel.scenario_names_creator( + self.numscens, start=self.seed + ) + denouement = ( + self.refmodel.scenario_denouement + if hasattr(self.refmodel, "scenario_denouement") + else None + ) + + self.ama = amalgamator.Amalgamator( + cfg, + scen_names, + self.scenario_creator, + self.refmodel.kw_creator, + denouement, + verbose=False, + ) + def run(self): - #Running the Amalgamator and attaching the result to the SampleSubtree object - #global_toc("Enter SampleSubTree run") + # Running the Amalgamator and attaching the result to the SampleSubtree object + # global_toc("Enter SampleSubTree run") self.ama.run() self.ef = self.ama.ef self.EF_Obj = self.ama.EF_Obj - pseudo_root = "ROOT"+"_0"*(self.stage-1) - self.xhat_at_stage =[self.ef.ref_vars[(pseudo_root,i)].value for i in range(self.ef._nlens[pseudo_root])] + pseudo_root = "ROOT" + "_0" * (self.stage - 1) + self.xhat_at_stage = [ + self.ef.ref_vars[(pseudo_root, i)].value + for i in range(self.ef._nlens[pseudo_root]) + ] -def _feasible_solution(mname,scenario,xhat_one,branching_factors,seed,cfg, - solver_name=None, solver_options=None): - ''' +def _feasible_solution( + mname, + scenario, + xhat_one, + branching_factors, + seed, + cfg, + solver_name=None, + solver_options=None, +): + """ Given a scenario and a first-stage policy xhat_one, this method computes non-anticipative feasible policies for the following stages. - ''' + """ assert solver_name is not None if xhat_one is None: raise RuntimeError("Xhat_one can't be None for now") ciutils.is_sorted(scenario._mpisppy_node_list) nodenames = [node.name for node in scenario._mpisppy_node_list] - num_stages = len(branching_factors)+1 + num_stages = len(branching_factors) + 1 xhats = [xhat_one] - for t in range(2,num_stages): #We do not compute xhat for the final stage - - subtree = SampleSubtree(mname, xhats, scenario, - t, branching_factors, seed, cfg, - solver_name, solver_options) + for t in range(2, num_stages): # We do not compute xhat for the final stage + subtree = SampleSubtree( + mname, + xhats, + scenario, + t, + branching_factors, + seed, + cfg, + solver_name, + solver_options, + ) subtree.run() xhats.append(subtree.xhat_at_stage) - seed+=sputils.number_of_nodes(branching_factors[(t-1):]) - xhat_dict = {ndn:xhat for (ndn,xhat) in zip(nodenames,xhats)} - return xhat_dict,seed + seed += sputils.number_of_nodes(branching_factors[(t - 1) :]) + xhat_dict = {ndn: xhat for (ndn, xhat) in zip(nodenames, xhats)} + return xhat_dict, seed + -def walking_tree_xhats(mname, local_scenarios, xhat_one,branching_factors, seed, cfg, - solver_name=None, solver_options=None): +def walking_tree_xhats( + mname, + local_scenarios, + xhat_one, + branching_factors, + seed, + cfg, + solver_name=None, + solver_options=None, +): """ - This methods takes a scenario tree (represented by a scenario list) as an input, - a first stage policy xhat_one and several settings, and computes - a feasible policy for every scenario, i.e. finds nonanticipative xhats + This methods takes a scenario tree (represented by a scenario list) as an input, + a first stage policy xhat_one and several settings, and computes + a feasible policy for every scenario, i.e. finds nonanticipative xhats using the SampleSubtree class. We use a tree traversal approach, so that for every non-leaf node of the scenario tree we compute an associated sample tree only once. @@ -207,7 +271,7 @@ def walking_tree_xhats(mname, local_scenarios, xhat_one,branching_factors, seed, xhat_one : list or np.array of float A feasible and nonanticipative first stage policy. branching_factors : list of int - Branching factors for sample trees. + Branching factors for sample trees. branching_factors[i] is the branching factor for stage i+2. seed : int Starting seed to create scenarios. @@ -219,41 +283,55 @@ def walking_tree_xhats(mname, local_scenarios, xhat_one,branching_factors, seed, xhats : dict Dict of values for the nonanticipative variable for every node. keys are node names and values are lists of nonant variables. - + NOTE: The local_scenarios do not need to form a regular tree (unbalanced trees are authorized) """ assert xhat_one is not None, "Xhat_one can't be None for now" - - xhats = {'ROOT':xhat_one} - - #Special case if we only have one scenario - if len(local_scenarios)==1: + + xhats = {"ROOT": xhat_one} + + # Special case if we only have one scenario + if len(local_scenarios) == 1: scen = list(local_scenarios.values())[0] - res = _feasible_solution(mname, scen, xhat_one, branching_factors, seed, cfg, - solver_name=solver_name, - solver_options=solver_options) + res = _feasible_solution( + mname, + scen, + xhat_one, + branching_factors, + seed, + cfg, + solver_name=solver_name, + solver_options=solver_options, + ) return res - - - for k,s in local_scenarios.items(): + + for k, s in local_scenarios.items(): scen_xhats = [] ciutils.is_sorted(s._mpisppy_node_list) for node in s._mpisppy_node_list: if node.name in xhats: - scen_xhats.append(xhats[node.name]) + scen_xhats.append(xhats[node.name]) else: - subtree = SampleSubtree(mname, scen_xhats, s, - node.stage, branching_factors, seed, cfg, - solver_name, solver_options) - subtree.run() - xhat = subtree.xhat_at_stage - - # TBD: is this needed - seed += sputils.number_of_nodes(branching_factors[(node.stage-1):]) - - xhats[node.name] = xhat - scen_xhats.append(xhat) + subtree = SampleSubtree( + mname, + scen_xhats, + s, + node.stage, + branching_factors, + seed, + cfg, + solver_name, + solver_options, + ) + subtree.run() + xhat = subtree.xhat_at_stage + + # TBD: is this needed + seed += sputils.number_of_nodes(branching_factors[(node.stage - 1) :]) + + xhats[node.name] = xhat + scen_xhats.append(xhat) return xhats, seed @@ -262,57 +340,90 @@ def walking_tree_xhats(mname, local_scenarios, xhat_one,branching_factors, seed, # This main program is provided to developers can run quick tests. import pyomo.common.config as pyofig from mpisppy.utils import config - - branching_factors = [3,2,4,4] + + branching_factors = [3, 2, 4, 4] num_scens = np.prod(branching_factors) solver_name = "cplex" mname = "mpisppy.tests.examples.aircond" - ama_options = { "EF-mstage": True, - "num_scens": num_scens, - "_mpisppy_probability": 1/num_scens, - "branching_factors":branching_factors, - } + ama_options = { + "EF-mstage": True, + "num_scens": num_scens, + "_mpisppy_probability": 1 / num_scens, + "branching_factors": branching_factors, + } - cfg = config.Config() - cfg.add_and_assign("EF_mstage", description="EF mstage (vs 2stage)", domain=bool, default=None, value=True) - cfg.add_and_assign("num_scens", description="total scenarios", domain=int, default=None, value=num_scens) - cfg.add_and_assign("_mpisppy_probability", description="unif prob", domain=float, default=None, value=1./num_scens) - cfg.add_and_assign("branching_factors", description="branching factors", domain=pyofig.ListOf(int), default=None, value=branching_factors) - cfg.add_and_assign("EF_solver_name", description="EF Solver", domain=str, default=None, value=solver_name) - #We use from_module to build easily an Amalgamator object + cfg.add_and_assign( + "EF_mstage", + description="EF mstage (vs 2stage)", + domain=bool, + default=None, + value=True, + ) + cfg.add_and_assign( + "num_scens", + description="total scenarios", + domain=int, + default=None, + value=num_scens, + ) + cfg.add_and_assign( + "_mpisppy_probability", + description="unif prob", + domain=float, + default=None, + value=1.0 / num_scens, + ) + cfg.add_and_assign( + "branching_factors", + description="branching factors", + domain=pyofig.ListOf(int), + default=None, + value=branching_factors, + ) + cfg.add_and_assign( + "EF_solver_name", + description="EF Solver", + domain=str, + default=None, + value=solver_name, + ) + # We use from_module to build easily an Amalgamator object ama = amalgamator.from_module(mname, cfg, use_command_line=False) ama.run() - + # get the xhat - xhat_one = sputils.nonant_cache_from_ef(ama.ef)['ROOT'] - - #----------Find a feasible solution for a single scenario------------- + xhat_one = sputils.nonant_cache_from_ef(ama.ef)["ROOT"] + + # ----------Find a feasible solution for a single scenario------------- scenario = ama.ef.scen0 seed = sputils.number_of_nodes(branching_factors) - - xhats,seed = _feasible_solution(mname, scenario, xhat_one, branching_factors, seed, cfg, solver_name=solver_name) + + xhats, seed = _feasible_solution( + mname, scenario, xhat_one, branching_factors, seed, cfg, solver_name=solver_name + ) print(xhats) - - #----------Find feasible solutions for every scenario ------------ - - #Fetching scenarios from EF + + # ----------Find feasible solutions for every scenario ------------ + + # Fetching scenarios from EF scenarios = dict() for k in ama.ef._ef_scenario_names: - #print(f"{k =}") + # print(f"{k =}") scenarios[k] = getattr(ama.ef, k) s = scenarios[k] demands = [s.stage_models[t].Demand for t in s.T] - #print(f"{demands =}") + # print(f"{demands =}") seed = sputils.number_of_nodes(branching_factors) - - xhats,seed = walking_tree_xhats(mname,scenarios,xhat_one,branching_factors,seed,cfg,solver_name=solver_name) - print(xhats) - - - - - - + xhats, seed = walking_tree_xhats( + mname, + scenarios, + xhat_one, + branching_factors, + seed, + cfg, + solver_name=solver_name, + ) + print(xhats) diff --git a/mpisppy/confidence_intervals/seqsampling.py b/mpisppy/confidence_intervals/seqsampling.py index 43c5178eb..3d2d82550 100644 --- a/mpisppy/confidence_intervals/seqsampling.py +++ b/mpisppy/confidence_intervals/seqsampling.py @@ -6,7 +6,7 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -# Code that is producing a xhat and a confidence interval using sequential sampling +# Code that is producing a xhat and a confidence interval using sequential sampling # This is the implementation of the 2 following papers: # [bm2011] Bayraksan, G., Morton,D.P.: A Sequential Sampling Procedure for Stochastic Programming. Operations Research 59(4), 898-913 (2011) # [bpl2012] Bayraksan, G., Pierre-Louis, P.: Fixed-Width Sequential Stopping Rules for a Class of Stochastic Programs, SIAM Journal on Optimization 22(4), 1518-1548 (2012) @@ -32,7 +32,8 @@ print("\nTBD: check seqsampling for start vs start_seed") -#========== +# ========== + def is_needed(cfg, needed_things, message=""): absent = list() @@ -41,15 +42,17 @@ def is_needed(cfg, needed_things, message=""): absent.append(i) print(f"{i =}, {hasattr(cfg,i) =}") if len(absent) > 0: - raise RuntimeError("Some options are missing from this list of required options:\n" - f"{needed_things}\n" - f"missing: {absent}\n" - f"{message}") - + raise RuntimeError( + "Some options are missing from this list of required options:\n" + f"{needed_things}\n" + f"missing: {absent}\n" + f"{message}" + ) + def add_options(cfg, optional_things): - # allow for defaults on options that Bayraksan et al establish - for i,v in optional_things.items(): + # allow for defaults on options that Bayraksan et al establish + for i, v in optional_things.items(): if i not in cfg: # there must be a better way... if isinstance(v, str): @@ -63,10 +66,15 @@ def add_options(cfg, optional_things): elif isinstance(v, dict): cfg.quick_assign(i, dict, v) else: - raise RuntimeError(f"add_options cannot process type {type(v)} for option {i}:{v}") + raise RuntimeError( + f"add_options cannot process type {type(v)} for option {i}:{v}" + ) + -def xhat_generator_farmer(scenario_names, solver_name=None, solver_options=None, crops_multiplier=1): - ''' For developer testing: Given scenario names and +def xhat_generator_farmer( + scenario_names, solver_name=None, solver_options=None, crops_multiplier=1 +): + """For developer testing: Given scenario names and options, create the scenarios and compute the xhat that is minimizing the approximate problem associated with these scenarios. @@ -88,44 +96,45 @@ def xhat_generator_farmer(scenario_names, solver_name=None, solver_options=None, NOTE: this is here for testing during development. - ''' + """ num_scens = len(scenario_names) - + cfg = config.Config() cfg.quick_assign("EF_2stage", bool, True) cfg.quick_assign("EF_solver_name", str, solver_name) cfg.quick_assign("EF_solver_options", dict, solver_options) cfg.quick_assign("num_scens", int, num_scens) - cfg.quick_assign("_mpisppy_probability", float, 1/num_scens) + cfg.quick_assign("_mpisppy_probability", float, 1 / num_scens) - #We use from_module to build easily an Amalgamator object - ama = amalgamator.from_module("mpisppy.tests.examples.farmer", - cfg, use_command_line=False) - #Correcting the building by putting the right scenarios. + # We use from_module to build easily an Amalgamator object + ama = amalgamator.from_module( + "mpisppy.tests.examples.farmer", cfg, use_command_line=False + ) + # Correcting the building by putting the right scenarios. ama.scenario_names = scenario_names ama.run() - + # get the xhat xhat = sputils.nonant_cache_from_ef(ama.ef) return xhat -class SeqSampling(): +class SeqSampling: """ Computing a solution xhat and a confidence interval for the optimality gap sequentially, by taking an increasing number of scenarios. - + Args: refmodel (str): path of the model we use (e.g. farmer, uc) - xhat_generator (function): a function that takes scenario_names (and - and optional solver_name and solver_options) - as input and returns a first stage policy + xhat_generator (function): a function that takes scenario_names (and + and optional solver_name and solver_options) + as input and returns a first stage policy xhat. cfg (Config): multiple parameters, e.g.: - "solver_name", str, the name of the solver we use - - "solver_options", dict containing solver options + - "solver_options", dict containing solver options (default is {}, an empty dict) - "sample_size_ratio", float, the ratio (xhat sample size)/(gap estimators sample size) (default is 1) @@ -137,12 +146,12 @@ class SeqSampling(): (default is 1, always resample completely) - "kf_xhat", int, resampling frequency to compute xhat (default is 1, always resample completely) - -"confidence_level", float, asymptotic confidence level + -"confidence_level", float, asymptotic confidence level of the output confidence interval (default is 0.95) - -Some other parameters, depending on what model + -Some other parameters, depending on what model (BM or BPL, deterministic or sequential sampling) - + stochastic_sampling (bool, default False): should we compute sample sizes using estimators ? if stochastic_sampling is True, we compute sample size using §5 of [Bayraksan and Pierre-Louis] else, we compute them using [Bayraksan and Morton] technique @@ -152,17 +161,20 @@ class SeqSampling(): Must be one of 'EF-2stage' and 'EF-mstage' (for problems with more than 2 stages). Solving methods outside EF are not supported yet. """ - - def __init__(self, - refmodel, - xhat_generator, - cfg, - stochastic_sampling = False, - stopping_criterion = "BM", - solving_type = "None"): - + + def __init__( + self, + refmodel, + xhat_generator, + cfg, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="None", + ): if not isinstance(cfg, config.Config): - raise RuntimeError(f"SeqSampling bad cfg type={type(cfg)}; should be Config") + raise RuntimeError( + f"SeqSampling bad cfg type={type(cfg)}; should be Config" + ) self.refmodel = importlib.import_module(refmodel) self.refmodelname = refmodel self.xhat_generator = xhat_generator @@ -172,11 +184,13 @@ def __init__(self, self.solving_type = solving_type self.sample_size_ratio = cfg.get("sample_size_ratio", 1) self.xhat_gen_kwargs = cfg.get("xhat_gen_kwargs", {}) - - #Check if refmodel has all needed attributes - everything = ["scenario_names_creator", - "scenario_creator", - "kw_creator"] # denouement can be missing. + + # Check if refmodel has all needed attributes + everything = [ + "scenario_names_creator", + "scenario_creator", + "kw_creator", + ] # denouement can be missing. you_can_have_it_all = True for ething in everything: if not hasattr(self.refmodel, ething): @@ -192,63 +206,69 @@ def __init__(self, "kf_xhat": 1, "confidence_level": 0.95} add_options(cfg, optional_options) - """ + """ - if self.stochastic_sampling : + if self.stochastic_sampling: add_options(cfg, {"n0min": 50}) - - + if self.stopping_criterion == "BM": - needed_things = ["BM_eps_prime","BM_hprime","BM_eps","BM_h","BM_p"] + needed_things = ["BM_eps_prime", "BM_hprime", "BM_eps", "BM_h", "BM_p"] is_needed(cfg, needed_things) elif self.stopping_criterion == "BPL": is_needed(cfg, ["BPL_eps"]) - if not self.stochastic_sampling : + if not self.stochastic_sampling: # The Pyomo config object cannot take a function directly - optional_things = {"BPL_c1":2, "functions_dict": {"growth_function":(lambda x : x-1)}} + optional_things = { + "BPL_c1": 2, + "functions_dict": {"growth_function": (lambda x: x - 1)}, + } add_options(cfg, optional_things) else: raise RuntimeError("Only BM and BPL criteria are supported at this time.") # TBD: do a better job with options for oname in cfg: - setattr(self, oname, cfg[oname]) # Set every option as an attribute - sroot, self.solver_name, self.solver_options = solver_spec.solver_specification(cfg, ["EF",""]) + setattr(self, oname, cfg[oname]) # Set every option as an attribute + sroot, self.solver_name, self.solver_options = solver_spec.solver_specification( + cfg, ["EF", ""] + ) assert self.solver_name is not None - - #Check the solving_type, and find if the problem is multistage - two_stage_types = ['EF_2stage'] - multistage_types = ['EF_mstage'] + + # Check the solving_type, and find if the problem is multistage + two_stage_types = ["EF_2stage"] + multistage_types = ["EF_mstage"] if self.solving_type in two_stage_types: self.multistage = False elif self.solving_type in multistage_types: self.multistage = True else: - raise RuntimeError(f"The solving_type {self.solving_type} is not supported. " - f"If you want to run a 2-stage problem, please use a solving_type in {two_stage_types}. " - f"If you want to run a multistage stage problem, please use a solving_type in {multistage_types}") - - #Check the multistage options + raise RuntimeError( + f"The solving_type {self.solving_type} is not supported. " + f"If you want to run a 2-stage problem, please use a solving_type in {two_stage_types}. " + f"If you want to run a multistage stage problem, please use a solving_type in {multistage_types}" + ) + + # Check the multistage options if self.multistage: needed_things = ["branching_factors"] is_needed(cfg, needed_things) # check resampling frequencies - if not hasattr(cfg, 'kf_Gs') or cfg['kf_Gs'] != 1: + if not hasattr(cfg, "kf_Gs") or cfg["kf_Gs"] != 1: print("kf_Gs must be 1 for multi-stage, so assigning 1") cfg.quick_assign("kf_Gs", int, 1) - if not hasattr(cfg, 'kf_xhat') or cfg['kf_xhat'] != 1: + if not hasattr(cfg, "kf_xhat") or cfg["kf_xhat"] != 1: print("kf_Gs must be 1 for multi-stage, so assigning 1") cfg.quick_assign("kf_xhat", int, 1) - - #Get the stopping criterion + + # Get the stopping criterion if self.stopping_criterion == "BM": self.stop_criterion = self.bm_stopping_criterion elif self.stopping_criterion == "BPL": self.stop_criterion = self.bpl_stopping_criterion else: raise RuntimeError("Only BM and BPL criteria are supported.") - - #Get the function computing sample size + + # Get the function computing sample size if self.stochastic_sampling: self.sample_size = self.stochastic_sampsize elif self.stopping_criterion == "BM": @@ -257,297 +277,368 @@ def __init__(self, self.sample_size = self.bpl_fsp_sampsize else: raise RuntimeError("Only BM and BPL sample sizes are supported yet") - - #To be sure to always use new scenarios, we set a ScenCount that is - #telling us how many scenarios has been used so far + + # To be sure to always use new scenarios, we set a ScenCount that is + # telling us how many scenarios has been used so far self.ScenCount = 0 - - #If we are running a multistage problem, we also need a seed count + + # If we are running a multistage problem, we also need a seed count self.SeedCount = 0 - - - def bm_stopping_criterion(self,G,s,nk): + + def bm_stopping_criterion(self, G, s, nk): # arguments defined in [bm2011] - return(G>self.BM_hprime*s+self.BM_eps_prime) - - def bpl_stopping_criterion(self,G,s,nk): + return G > self.BM_hprime * s + self.BM_eps_prime + + def bpl_stopping_criterion(self, G, s, nk): # arguments defined in [bpl2012] - t = scipy.stats.t.ppf(self.confidence_level,nk-1) - sample_error = t*s/np.sqrt(nk) - inflation_factor = 1/np.sqrt(nk) - return(G+sample_error+inflation_factor>self.BPL_eps) - - def bm_sampsize(self,k,G,s,nk_m1, r=2): + t = scipy.stats.t.ppf(self.confidence_level, nk - 1) + sample_error = t * s / np.sqrt(nk) + inflation_factor = 1 / np.sqrt(nk) + return G + sample_error + inflation_factor > self.BPL_eps + + def bm_sampsize(self, k, G, s, nk_m1, r=2): # arguments defined in [bm2011] h = self.BM_h hprime = self.BM_hprime p = self.BM_p q = self.BM_q confidence_level = self.confidence_level - if q is None : + if q is None: # Computing n_k as in (5) of [Bayraksan and Morton, 2009] - if hasattr(self, "c") : + if hasattr(self, "c"): c = self.c else: - if confidence_level is None : - raise RuntimeError("We need the confidence level to compute the constant cp") - j = np.arange(1,1000) - s = sum(np.power(j,-p*np.log(j))) - c = max(1,2*np.log(s/(np.sqrt(2*np.pi)*(1-confidence_level)))) - - lower_bound = (c+2*p* np.log(k)**2)/((h-hprime)**2) - else : + if confidence_level is None: + raise RuntimeError( + "We need the confidence level to compute the constant cp" + ) + j = np.arange(1, 1000) + s = sum(np.power(j, -p * np.log(j))) + c = max( + 1, 2 * np.log(s / (np.sqrt(2 * np.pi) * (1 - confidence_level))) + ) + + lower_bound = (c + 2 * p * np.log(k) ** 2) / ((h - hprime) ** 2) + else: # Computing n_k as in (14) of [Bayraksan and Morton, 2009] - if hasattr(self, "c") : + if hasattr(self, "c"): c = self.c else: - if confidence_level is None : - RuntimeError("We need the confidence level to compute the constant c_pq") - j = np.arange(1,1000) - s = sum(np.exp(-p*np.power(j,2*q/r))) - c = max(1,2*np.log(s/(np.sqrt(2*np.pi)*(1-confidence_level)))) - - lower_bound = (c+2*p*np.power(k,2*q/r))/((h-hprime)**2) - #print(f"nk={lower_bound}") + if confidence_level is None: + RuntimeError( + "We need the confidence level to compute the constant c_pq" + ) + j = np.arange(1, 1000) + s = sum(np.exp(-p * np.power(j, 2 * q / r))) + c = max( + 1, 2 * np.log(s / (np.sqrt(2 * np.pi) * (1 - confidence_level))) + ) + + lower_bound = (c + 2 * p * np.power(k, 2 * q / r)) / ((h - hprime) ** 2) + # print(f"nk={lower_bound}") return int(np.ceil(lower_bound)) - - def bpl_fsp_sampsize(self,k,G,s,nk_m1): + + def bpl_fsp_sampsize(self, k, G, s, nk_m1): # arguments defined in [bpl2012] - return(int(np.ceil(self.BPL_c0+self.BPL_c1*self.functions_dict["growth_function"](k)))) - - def stochastic_sampsize(self,k,G,s,nk_m1): + return int( + np.ceil( + self.BPL_c0 + self.BPL_c1 * self.functions_dict["growth_function"](k) + ) + ) + + def stochastic_sampsize(self, k, G, s, nk_m1): # arguments defined in [bpl2012] - if (k==1): - #Initialization - return(int(np.ceil(max(self.BPL_n0min,np.log(1/self.BPL_eps))))) - #§5 of [Bayraksan and Pierre-Louis] : solving a 2nd degree equation in sqrt(n) - t = scipy.stats.t.ppf(self.confidence_level,nk_m1-1) - a = - self.BPL_eps - b = 1+t*s - c = nk_m1*G - maxroot = -(np.sqrt(b**2-4*a*c)+b)/(2*a) + if k == 1: + # Initialization + return int(np.ceil(max(self.BPL_n0min, np.log(1 / self.BPL_eps)))) + # §5 of [Bayraksan and Pierre-Louis] : solving a 2nd degree equation in sqrt(n) + t = scipy.stats.t.ppf(self.confidence_level, nk_m1 - 1) + a = -self.BPL_eps + b = 1 + t * s + c = nk_m1 * G + maxroot = -(np.sqrt(b**2 - 4 * a * c) + b) / (2 * a) print(f"s={s}, t={t}, G={G}") print(f"a={a}, b={b},c={c},delta={b**2-4*a*c}") print(f"At iteration {k}, we took n_k={int(np.ceil((maxroot**2)))}") - return(int(np.ceil(maxroot**2))) - - - def run(self,maxit=200): - """ Execute a sequental sampling algorithm + return int(np.ceil(maxroot**2)) + + def run(self, maxit=200): + """Execute a sequental sampling algorithm Args: maxit (int): override the stopping criteria based on iterations Returns: {"T":T,"Candidate_solution":final_xhat,"CI":CI,} """ if self.multistage: - raise RuntimeWarning("Multistage sequential sampling can be done " - "using the SeqSampling, but dependent samples\n" - "will be used. The class IndepScens_SeqSampling uses independent samples and therefor has better theoretical support.") + raise RuntimeWarning( + "Multistage sequential sampling can be done " + "using the SeqSampling, but dependent samples\n" + "will be used. The class IndepScens_SeqSampling uses independent samples and therefor has better theoretical support." + ) refmodel = self.refmodel - mult = self.sample_size_ratio # used to set m_k= mult*n_k - - - #----------------------------Step 0 -------------------------------------# - #Initialization - k =1 - - - #Computing the lower bound for n_1 + mult = self.sample_size_ratio # used to set m_k= mult*n_k + + # ----------------------------Step 0 -------------------------------------# + # Initialization + k = 1 + # Computing the lower bound for n_1 if self.stopping_criterion == "BM": - #Finding a constant used to compute nk - r = 2 #TODO : we could add flexibility here - j = np.arange(1,1000) + # Finding a constant used to compute nk + r = 2 # TODO : we could add flexibility here + j = np.arange(1, 1000) if self.BM_q is None: - s = sum(np.power(j,-self.BM_p*np.log(j))) + s = sum(np.power(j, -self.BM_p * np.log(j))) else: - if self.BM_q<1: + if self.BM_q < 1: raise RuntimeError("Parameter q should be greater than 1.") - s = sum(np.exp(-self.BM_p*np.power(j,2*self.BM_q/r))) - self.c = max(1,2*np.log(s/(np.sqrt(2*np.pi)*(1-self.confidence_level)))) - + s = sum(np.exp(-self.BM_p * np.power(j, 2 * self.BM_q / r))) + self.c = max( + 1, 2 * np.log(s / (np.sqrt(2 * np.pi) * (1 - self.confidence_level))) + ) + lower_bound_k = self.sample_size(k, None, None, None) - - #Computing xhat_1. - #We use sample_size_ratio*n_k observations to compute xhat_k + # Computing xhat_1. + + # We use sample_size_ratio*n_k observations to compute xhat_k if self.multistage: - xhat_branching_factors = ciutils.scalable_branching_factors(mult*lower_bound_k, self.cfg['branching_factors']) + xhat_branching_factors = ciutils.scalable_branching_factors( + mult * lower_bound_k, self.cfg["branching_factors"] + ) mk = np.prod(xhat_branching_factors) - self.xhat_gen_kwargs['start_seed'] = self.SeedCount #TODO: Maybe find a better way to manage seed + self.xhat_gen_kwargs["start_seed"] = ( + self.SeedCount + ) # TODO: Maybe find a better way to manage seed xhat_scenario_names = refmodel.scenario_names_creator(mk) - + else: - mk = int(np.floor(mult*lower_bound_k)) - xhat_scenario_names = refmodel.scenario_names_creator(mk, start=self.ScenCount) - self.ScenCount+=mk + mk = int(np.floor(mult * lower_bound_k)) + xhat_scenario_names = refmodel.scenario_names_creator( + mk, start=self.ScenCount + ) + self.ScenCount += mk xgo = self.xhat_gen_kwargs.copy() xgo.pop("solver_name", None) # it will be given explicitly xgo.pop("solver_options", None) # it will be given explicitly xgo.pop("scenario_names", None) # given explicitly - xhat_k = self.xhat_generator(xhat_scenario_names, - solver_name=self.solver_name, - solver_options=self.solver_options, - **xgo) + xhat_k = self.xhat_generator( + xhat_scenario_names, + solver_name=self.solver_name, + solver_options=self.solver_options, + **xgo, + ) - - #----------------------------Step 1 -------------------------------------# - #Computing n_1 and associated scenario names + # ----------------------------Step 1 -------------------------------------# + # Computing n_1 and associated scenario names if self.multistage: self.SeedCount += sputils.number_of_nodes(xhat_branching_factors) - - gap_branching_factors = ciutils.scalable_branching_factors(lower_bound_k, self.cfg['branching_factors']) + + gap_branching_factors = ciutils.scalable_branching_factors( + lower_bound_k, self.cfg["branching_factors"] + ) nk = np.prod(gap_branching_factors) estimator_scenario_names = refmodel.scenario_names_creator(nk) - sample_options = {'branching_factors':gap_branching_factors, 'seed':self.SeedCount} + sample_options = { + "branching_factors": gap_branching_factors, + "seed": self.SeedCount, + } else: - nk = self.ArRP *int(np.ceil(lower_bound_k/self.ArRP)) - estimator_scenario_names = refmodel.scenario_names_creator(nk, - start=self.ScenCount) + nk = self.ArRP * int(np.ceil(lower_bound_k / self.ArRP)) + estimator_scenario_names = refmodel.scenario_names_creator( + nk, start=self.ScenCount + ) sample_options = None - self.ScenCount+= nk - - #Computing G_nkand s_k associated with xhat_1 - - lcfg = self.cfg() # local copy - lcfg.quick_assign('num_scens', int, nk) - scenario_denouement = refmodel.scenario_denouement if hasattr(refmodel, "scenario_denouement") else None - estim = ciutils.gap_estimators(xhat_k, self.refmodelname, - solving_type=self.solving_type, - scenario_names=estimator_scenario_names, - sample_options=sample_options, - ArRP=self.ArRP, - cfg=lcfg, - scenario_denouement=scenario_denouement, - solver_name=self.solver_name, - solver_options=self.solver_options) - Gk,sk = estim['G'],estim['s'] + self.ScenCount += nk + + # Computing G_nkand s_k associated with xhat_1 + + lcfg = self.cfg() # local copy + lcfg.quick_assign("num_scens", int, nk) + scenario_denouement = ( + refmodel.scenario_denouement + if hasattr(refmodel, "scenario_denouement") + else None + ) + estim = ciutils.gap_estimators( + xhat_k, + self.refmodelname, + solving_type=self.solving_type, + scenario_names=estimator_scenario_names, + sample_options=sample_options, + ArRP=self.ArRP, + cfg=lcfg, + scenario_denouement=scenario_denouement, + solver_name=self.solver_name, + solver_options=self.solver_options, + ) + Gk, sk = estim["G"], estim["s"] if self.multistage: - self.SeedCount = estim['seed'] - - #----------------------------Step 2 -------------------------------------# - - while( self.stop_criterion(Gk,sk,nk) and k= mk_m1, "Our sample size should be increasing" - if (k%self.kf_xhat==0): - #We use only new scenarios to compute xhat - xhat_scenario_names = refmodel.scenario_names_creator(int(mult*nk), - start=self.ScenCount) - self.ScenCount+= mk + mk = int(np.floor(mult * lower_bound_k)) + assert mk >= mk_m1, "Our sample size should be increasing" + if k % self.kf_xhat == 0: + # We use only new scenarios to compute xhat + xhat_scenario_names = refmodel.scenario_names_creator( + int(mult * nk), start=self.ScenCount + ) + self.ScenCount += mk else: - #We reuse the previous scenarios - xhat_scenario_names+= refmodel.scenario_names_creator(mult*(nk-nk_m1), - start=self.ScenCount) - self.ScenCount+= mk-mk_m1 - - #Computing xhat_k + # We reuse the previous scenarios + xhat_scenario_names += refmodel.scenario_names_creator( + mult * (nk - nk_m1), start=self.ScenCount + ) + self.ScenCount += mk - mk_m1 + + # Computing xhat_k xgo = self.xhat_gen_kwargs.copy() xgo.pop("solver_name", None) # it will be given explicitly xgo.pop("solver_options", None) # it will be given explicitly xgo.pop("scenario_names", None) # given explicitly - xhat_k = self.xhat_generator(xhat_scenario_names, - solver_name=self.solver_name, - solver_options=self.solver_options, - **xgo) - - #Computing n_k and associated scenario names + xhat_k = self.xhat_generator( + xhat_scenario_names, + solver_name=self.solver_name, + solver_options=self.solver_options, + **xgo, + ) + + # Computing n_k and associated scenario names if self.multistage: self.SeedCount += sputils.number_of_nodes(xhat_branching_factors) - - gap_branching_factors = ciutils.scalable_branching_factors(lower_bound_k, lcfg['branching_factors']) + + gap_branching_factors = ciutils.scalable_branching_factors( + lower_bound_k, lcfg["branching_factors"] + ) nk = np.prod(gap_branching_factors) estimator_scenario_names = refmodel.scenario_names_creator(nk) - sample_options = {'branching_factors':gap_branching_factors, 'seed':self.SeedCount} + sample_options = { + "branching_factors": gap_branching_factors, + "seed": self.SeedCount, + } else: - nk = self.ArRP *int(np.ceil(lower_bound_k/self.ArRP)) - assert nk>= nk_m1, "Our sample size should be increasing" - if (k%self.kf_GS==0): - #We use only new scenarios to compute gap estimators - estimator_scenario_names = refmodel.scenario_names_creator(nk, - start=self.ScenCount) - self.ScenCount+=nk + nk = self.ArRP * int(np.ceil(lower_bound_k / self.ArRP)) + assert nk >= nk_m1, "Our sample size should be increasing" + if k % self.kf_GS == 0: + # We use only new scenarios to compute gap estimators + estimator_scenario_names = refmodel.scenario_names_creator( + nk, start=self.ScenCount + ) + self.ScenCount += nk else: - #We reuse the previous scenarios - estimator_scenario_names+= refmodel.scenario_names_creator((nk-nk_m1), - start=self.ScenCount) - self.ScenCount+= (nk-nk_m1) + # We reuse the previous scenarios + estimator_scenario_names += refmodel.scenario_names_creator( + (nk - nk_m1), start=self.ScenCount + ) + self.ScenCount += nk - nk_m1 sample_options = None - - - #Computing G_k and s_k - lcfg['num_scens'] = nk - estim = ciutils.gap_estimators(xhat_k, self.refmodelname, - solving_type=self.solving_type, - scenario_names=estimator_scenario_names, - sample_options=sample_options, - ArRP=self.ArRP, - cfg=lcfg, - scenario_denouement=scenario_denouement, - solver_name=self.solver_name, - solver_options=self.solver_options) + + # Computing G_k and s_k + lcfg["num_scens"] = nk + estim = ciutils.gap_estimators( + xhat_k, + self.refmodelname, + solving_type=self.solving_type, + scenario_names=estimator_scenario_names, + sample_options=sample_options, + ArRP=self.ArRP, + cfg=lcfg, + scenario_denouement=scenario_denouement, + solver_name=self.solver_name, + solver_options=self.solver_options, + ) if self.multistage: - self.SeedCount = estim['seed'] - Gk,sk = estim['G'],estim['s'] + self.SeedCount = estim["seed"] + Gk, sk = estim["G"], estim["s"] - if (k%10==0) and global_rank==0: + if (k % 10 == 0) and global_rank == 0: print(f"k={k}") print(f"n_k={nk}") print(f"G_k={Gk}") print(f"s_k={sk}") - #----------------------------Step 4 -------------------------------------# - if (k==maxit) : - raise RuntimeError(f"The loop terminated after {maxit} iteration with no acceptable solution") + # ----------------------------Step 4 -------------------------------------# + if k == maxit: + raise RuntimeError( + f"The loop terminated after {maxit} iteration with no acceptable solution" + ) T = k - final_xhat=xhat_k + final_xhat = xhat_k if self.stopping_criterion == "BM": - upper_bound=self.BM_h*sk+self.BM_eps + upper_bound = self.BM_h * sk + self.BM_eps elif self.stopping_criterion == "BPL": upper_bound = self.BPL_eps else: raise RuntimeError("Only BM and BPL criterion are supported yet.") - CI=[0,upper_bound] - global_toc(f"G={Gk} sk={sk}; xhat has been computed with {nk*mult} observations.") - return {"T":T,"Candidate_solution":final_xhat,"CI":CI,} + CI = [0, upper_bound] + global_toc( + f"G={Gk} sk={sk}; xhat has been computed with {nk*mult} observations." + ) + return { + "T": T, + "Candidate_solution": final_xhat, + "CI": CI, + } + if __name__ == "__main__": # for developer testing solver_name = "cplex" - + refmodel = "mpisppy.tests.examples.farmer" - farmer_opt_dict = {"crops_multiplier":3} - + farmer_opt_dict = {"crops_multiplier": 3} + # create three options configurations, then use one of them # relative width optionsBM = config.Config() confidence_config.confidence_config(optionsBM) confidence_config.sequential_config(optionsBM) - optionsBM.quick_assign('BM_h', float, 0.2) - optionsBM.quick_assign('BM_hprime', float, 0.015,) - optionsBM.quick_assign('BM_eps', float, 0.5,) - optionsBM.quick_assign('BM_eps_prime', float, 0.4,) + optionsBM.quick_assign("BM_h", float, 0.2) + optionsBM.quick_assign( + "BM_hprime", + float, + 0.015, + ) + optionsBM.quick_assign( + "BM_eps", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps_prime", + float, + 0.4, + ) optionsBM.quick_assign("BM_p", float, 0.2) optionsBM.quick_assign("BM_q", float, 1.2) optionsBM.quick_assign("solver_name", str, solver_name) - optionsBM.quick_assign("stopping", str, "BM") # TBD use this and drop stopping_criterion from the constructor + optionsBM.quick_assign( + "stopping", str, "BM" + ) # TBD use this and drop stopping_criterion from the constructor optionsBM.quick_assign("xhat_gen_kwargs", dict, farmer_opt_dict) optionsBM.quick_assign("solving_type", str, "EF_2stage") @@ -555,11 +646,13 @@ def run(self,maxit=200): optionsFSP = config.Config() confidence_config.confidence_config(optionsFSP) confidence_config.sequential_config(optionsFSP) - optionsFSP.quick_assign('BPL_eps', float, 50.0) - optionsFSP.quick_assign('BPL_solver_name', str, solver_name) + optionsFSP.quick_assign("BPL_eps", float, 50.0) + optionsFSP.quick_assign("BPL_solver_name", str, solver_name) optionsFSP.quick_assign("BPL_c0", int, 50) # starting sample size) optionsFSP.quick_assign("xhat_gen_kwargs", dict, farmer_opt_dict) - optionsFSP.quick_assign("ArRP", int, 2) # this must be 1 for any multi-stage problems + optionsFSP.quick_assign( + "ArRP", int, 2 + ) # this must be 1 for any multi-stage problems optionsFSP.quick_assign("stopping", str, "BPL") optionsFSP.quick_assign("xhat_gen_kwargs", dict, farmer_opt_dict) optionsFSP.quick_assign("solving_type", str, "EF_2stage") @@ -569,21 +662,22 @@ def run(self,maxit=200): optionsSSP = optionsFSP() # safe copy confidence_config.confidence_config(optionsFSP) confidence_config.sequential_config(optionsFSP) - optionsSSP.quick_assign('BPL_eps', float, 1.0) - optionsSSP.quick_assign('solver_name', str, solver_name) - optionsSSP.quick_assign("BPL_n0min", int, 200) # only for stochastic sampling + optionsSSP.quick_assign("BPL_eps", float, 1.0) + optionsSSP.quick_assign("solver_name", str, solver_name) + optionsSSP.quick_assign("BPL_n0min", int, 200) # only for stochastic sampling optionsSSP.quick_assign("stopping", str, "BPL") optionsSSP.quick_assign("xhat_gen_kwargs", dict, farmer_opt_dict) optionsSSP.quick_assign("solving_type", str, "EF_2stage") optionsSSP.quick_assign("solver_name", str, solver_name) # change the options argument and stopping criterion - our_pb = SeqSampling(refmodel, - xhat_generator_farmer, - optionsSSP, - stochastic_sampling=False, # maybe this should move to the options dict? - stopping_criterion="BPL", - ) + our_pb = SeqSampling( + refmodel, + xhat_generator_farmer, + optionsSSP, + stochastic_sampling=False, # maybe this should move to the options dict? + stopping_criterion="BPL", + ) res = our_pb.run() - print(res) + print(res) diff --git a/mpisppy/confidence_intervals/zhat4xhat.py b/mpisppy/confidence_intervals/zhat4xhat.py index a564985c7..4985dce28 100644 --- a/mpisppy/confidence_intervals/zhat4xhat.py +++ b/mpisppy/confidence_intervals/zhat4xhat.py @@ -19,12 +19,9 @@ from mpisppy.utils.sputils import option_string_to_dict from mpisppy.utils import config -def evaluate_sample_trees(xhat_one, - num_samples, - cfg, - InitSeed=0, - model_module = None): - """ Create and evaluate multiple sampled trees. + +def evaluate_sample_trees(xhat_one, num_samples, cfg, InitSeed=0, model_module=None): + """Create and evaluate multiple sampled trees. Args: xhat_one : list or np.array of float (*not* a dict) A feasible and nonanticipative first stage solution. @@ -34,12 +31,12 @@ def evaluate_sample_trees(xhat_one, model_modules: an imported module with the functions needed by, e.g., amalgamator Returns: zhats (list as np.array): the objective functions - seed (int): the updated seed or offset for scenario name sampling + seed (int): the updated seed or offset for scenario name sampling """ - ''' creates batch_size sample trees with first-stage solution xhat_one + """ creates batch_size sample trees with first-stage solution xhat_one using SampleSubtree class from sample_tree used to approximate E_{xi_2} phi(x_1, xi_2) for confidence interval coverage experiments - ''' + """ cfg = cfg() # so the seed can be ephemeral mname = cfg.model_module_name seed = InitSeed @@ -47,56 +44,66 @@ def evaluate_sample_trees(xhat_one, bfs = cfg["branching_factors"] scenario_count = np.prod(bfs) solver_name = cfg["EF_solver_name"] - #sampling_bfs = ciutils.scalable_BFs(batch_size, bfs) # use for variance? - xhat_eval_options = {"iter0_solver_options": None, - "iterk_solver_options": None, - "display_timing": False, - "solver_name": solver_name, - "verbose": False, - "solver_options":{}} - - cfg.dict_assign('seed', 'seed', int, None, seed) - - for j in range(num_samples): # number of sample trees to create - samp_tree = sample_tree.SampleSubtree(mname, - xhats = [], - root_scen=None, - starting_stage=1, - branching_factors=bfs, - seed=seed, - cfg=cfg, - solver_name=solver_name, - solver_options={}) + # sampling_bfs = ciutils.scalable_BFs(batch_size, bfs) # use for variance? + xhat_eval_options = { + "iter0_solver_options": None, + "iterk_solver_options": None, + "display_timing": False, + "solver_name": solver_name, + "verbose": False, + "solver_options": {}, + } + + cfg.dict_assign("seed", "seed", int, None, seed) + + for j in range(num_samples): # number of sample trees to create + samp_tree = sample_tree.SampleSubtree( + mname, + xhats=[], + root_scen=None, + starting_stage=1, + branching_factors=bfs, + seed=seed, + cfg=cfg, + solver_name=solver_name, + solver_options={}, + ) samp_tree.run() ama_object = samp_tree.ama cfg = ama_object.cfg scenario_creator_kwargs = ama_object.kwargs - if len(samp_tree.ef._ef_scenario_names)>1: - local_scenarios = {sname: getattr(samp_tree.ef, sname) - for sname in samp_tree.ef._ef_scenario_names} + if len(samp_tree.ef._ef_scenario_names) > 1: + local_scenarios = { + sname: getattr(samp_tree.ef, sname) + for sname in samp_tree.ef._ef_scenario_names + } else: - local_scenarios = {samp_tree.ef._ef_scenario_names[0]:samp_tree.ef} - - xhats, seed = sample_tree.walking_tree_xhats(mname, - local_scenarios, - xhat_one, - bfs, - seed, - cfg, - solver_name=solver_name, - solver_options=None) + local_scenarios = {samp_tree.ef._ef_scenario_names[0]: samp_tree.ef} + + xhats, seed = sample_tree.walking_tree_xhats( + mname, + local_scenarios, + xhat_one, + bfs, + seed, + cfg, + solver_name=solver_name, + solver_options=None, + ) # for Xhat_Eval # scenario_creator_kwargs = ama_object.kwargs scenario_names = ama_object.scenario_names all_nodenames = sputils.create_nodenames_from_branching_factors(bfs) # Evaluate objective value of feasible policy for this tree - ev = Xhat_Eval(xhat_eval_options, - scenario_names, - ama_object.scenario_creator, - model_module.scenario_denouement, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames) + ev = Xhat_Eval( + xhat_eval_options, + scenario_names, + ama_object.scenario_creator, + model_module.scenario_denouement, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + ) zhats.append(ev.evaluate(xhats)) @@ -104,57 +111,66 @@ def evaluate_sample_trees(xhat_one, return np.array(zhats), seed + def run_samples(cfg, model_module): - # Does all the work for zhat4xhat + # Does all the work for zhat4xhat # Read xhats from xhatpath xhat_one = ciutils.read_xhat(cfg.xhatpath)["ROOT"] num_samples = cfg.num_samples - zhats,seed = evaluate_sample_trees(xhat_one, num_samples, - cfg, InitSeed=0, - model_module=model_module) + zhats, seed = evaluate_sample_trees( + xhat_one, num_samples, cfg, InitSeed=0, model_module=model_module + ) confidence_level = cfg.confidence_level zhatbar = np.mean(zhats) s_zhat = np.std(np.array(zhats)) - t_zhat = scipy.stats.t.ppf(confidence_level, len(zhats)-1) - eps_z = t_zhat*s_zhat/np.sqrt(len(zhats)) + t_zhat = scipy.stats.t.ppf(confidence_level, len(zhats) - 1) + eps_z = t_zhat * s_zhat / np.sqrt(len(zhats)) - print('zhatbar: ', zhatbar) - print('estimate: ', [zhatbar-eps_z, zhatbar+eps_z]) - print('confidence_level', confidence_level) + print("zhatbar: ", zhatbar) + print("estimate: ", [zhatbar - eps_z, zhatbar + eps_z]) + print("confidence_level", confidence_level) return zhatbar, eps_z + def _parser_setup(): # set up the config object and return it, but don't parse # parsers for the non-model-specific arguments; but the model_module_name will be pulled off first cfg = config.Config() - cfg.add_to_config("EF_solver_name", - description="solver name (default gurobi_direct)", - domain=str, - default="gurobi_direct") + cfg.add_to_config( + "EF_solver_name", + description="solver name (default gurobi_direct)", + domain=str, + default="gurobi_direct", + ) cfg.add_branching_factors() - cfg.add_to_config("num_samples", - description="Number of independent sample trees to construct", - domain=int, - default=10) - cfg.add_to_config("solver_options", - description="space separated string of solver options, e.g. 'option1=value1 option2 = value2'", - domain=str, - default='') - - cfg.add_to_config("xhatpath", - description="path to npy file with xhat", - domain=str, - default='') - - cfg.add_to_config("confidence_level", - description="one minus alpha (default 0.95)", - domain=float, - default=0.95) + cfg.add_to_config( + "num_samples", + description="Number of independent sample trees to construct", + domain=int, + default=10, + ) + cfg.add_to_config( + "solver_options", + description="space separated string of solver options, e.g. 'option1=value1 option2 = value2'", + domain=str, + default="", + ) + + cfg.add_to_config( + "xhatpath", description="path to npy file with xhat", domain=str, default="" + ) + + cfg.add_to_config( + "confidence_level", + description="one minus alpha (default 0.95)", + domain=float, + default=0.95, + ) return cfg @@ -163,15 +179,16 @@ def _main_body(model_module, cfg): lcfg = cfg() # make a copy because of EF-mstage solver_options_dict = option_string_to_dict(cfg.solver_options) - lcfg.add_and_assign("EF_solver_options", "solver options dict", dict, None, solver_options_dict) - + lcfg.add_and_assign( + "EF_solver_options", "solver options dict", dict, None, solver_options_dict + ) + lcfg.quick_assign("EF_mstage", domain=bool, value=True) return run_samples(lcfg, model_module) if __name__ == "__main__": - cfg = _parser_setup() # now get the extra args from the module mname = sys.argv[1] # args.model_module_name eventually @@ -188,19 +205,23 @@ def _main_body(model_module, cfg): del cfg.num_scens except Exception: pass - + parser = cfg.create_parser("zhat4zhat") # the module name is very special because it has to be plucked from argv parser.add_argument( - "model_module_name", help="amalgamator compatible module (often read from argv)", type=str, - ) - cfg.add_to_config("model_module_name", - description="amalgamator compatible module", - domain=str, - default='', - argparse=False) + "model_module_name", + help="amalgamator compatible module (often read from argv)", + type=str, + ) + cfg.add_to_config( + "model_module_name", + description="amalgamator compatible module", + domain=str, + default="", + argparse=False, + ) cfg.model_module_name = mname - + args = parser.parse_args() # from the command line args = cfg.import_argparse(args) diff --git a/mpisppy/convergers/converger.py b/mpisppy/convergers/converger.py index 9b3cd671b..85339a048 100644 --- a/mpisppy/convergers/converger.py +++ b/mpisppy/convergers/converger.py @@ -6,42 +6,44 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Base class for converger objects +"""Base class for converger objects - DTM Dec 2019 +DTM Dec 2019 - Replaces the old implementation in which - convergers were modules rather than classes. +Replaces the old implementation in which +convergers were modules rather than classes. - DLW: as of March 2023 note that user supplied convergers do not compute - ph.conv (which is computed as a scaled norm difference) - and both ph.conv and the user supplied converger, could trigger convergence - (see phbase.py) -''' +DLW: as of March 2023 note that user supplied convergers do not compute +ph.conv (which is computed as a scaled norm difference) +and both ph.conv and the user supplied converger, could trigger convergence +(see phbase.py) +""" import abc + class Converger: - ''' Abstract base class for converger monitors. + """Abstract base class for converger monitors. + + Args: + opt (SPBase): The SPBase object for the current model + """ - Args: - opt (SPBase): The SPBase object for the current model - ''' def __init__(self, opt): self.conv = None # intended to be the value used for comparison @abc.abstractmethod def is_converged(self): - ''' Indicated whether the algorithm has converged. + """Indicated whether the algorithm has converged. - Must return a boolean. If True, the algorithm will terminate at the - current iteration--no more solves will be performed by SPBase. - Otherwise, the iterations will continue. - ''' + Must return a boolean. If True, the algorithm will terminate at the + current iteration--no more solves will be performed by SPBase. + Otherwise, the iterations will continue. + """ pass def post_loops(self): - '''Method called after the termination of the algorithm. - This method is called after the post_loops of any extensions - ''' + """Method called after the termination of the algorithm. + This method is called after the post_loops of any extensions + """ pass diff --git a/mpisppy/convergers/fracintsnotconv.py b/mpisppy/convergers/fracintsnotconv.py index e841fd893..9068940ee 100644 --- a/mpisppy/convergers/fracintsnotconv.py +++ b/mpisppy/convergers/fracintsnotconv.py @@ -6,28 +6,30 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -""" fraction of ints converger to illustrate a custom converger that does not - directly use mpi reduce calls. - DLW Jan 2019 - (DLW March 2023: Not tested with multiple hub ranks and might not work well in that case) +"""fraction of ints converger to illustrate a custom converger that does not +directly use mpi reduce calls. +DLW Jan 2019 +(DLW March 2023: Not tested with multiple hub ranks and might not work well in that case) """ import math import pyomo.environ as pyo import mpisppy.convergers.converger + class FractionalConverger(mpisppy.convergers.converger.Converger): - """ Illustrate a class to contain data used by the converger + """Illustrate a class to contain data used by the converger NOTE: unlike the extensions, we get only our object back so we need to keep references to everything we might want to look at. Args: options (dict): keys are option names - local_scenarios (dict): keys are names, + local_scenarios (dict): keys are names, vals are concrete models with attachments comms (dict): key is node name; val is a comm object rank (int): mpi process rank """ + def __init__(self, phb): options = phb.options self.name = "fractintsnotconv" @@ -37,10 +39,10 @@ def __init__(self, phb): self._comms = phb.comms self._rank = phb.cylinder_rank if self.verbose: - print ("Created converger=",self.name) - + print("Created converger=", self.name) + def _convergence_value(self): - """ compute the fraction of *not* converged ints + """compute the fraction of *not* converged ints Args: self (object): create by prep @@ -49,30 +51,34 @@ def _convergence_value(self): """ numints = 0 numconv = 0 - for k,s in self._local_scenarios.items(): - nlens = s._mpisppy_data.nlens + for k, s in self._local_scenarios.items(): + nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name for i in range(nlens[ndn]): xvar = node.nonant_vardata_list[i] if xvar.is_integer() or xvar.is_binary(): numints += 1 - xb = pyo.value(s._mpisppy_model.xbars[(ndn,i)]) - #print ("dlw debug",xb*xb, pyo.value(s._mpisppy_model.xsqbars[(ndn,i)])) - if math.isclose(xb * xb, pyo.value(s._mpisppy_model.xsqbars[(ndn,i)]), abs_tol=1e-09): + xb = pyo.value(s._mpisppy_model.xbars[(ndn, i)]) + # print ("dlw debug",xb*xb, pyo.value(s._mpisppy_model.xsqbars[(ndn,i)])) + if math.isclose( + xb * xb, + pyo.value(s._mpisppy_model.xsqbars[(ndn, i)]), + abs_tol=1e-09, + ): numconv += 1 if self.verbose: - print (self.name,": numints=",numints) + print(self.name, ": numints=", numints) if numints > 0: retval = 1.0 - numconv / numints else: retval = 0 if self.verbose: - print (self.name,": convergence value=",retval) + print(self.name, ": convergence value=", retval) return retval def is_converged(self): - """ check for convergence + """check for convergence Args: self (object): create by prep @@ -80,4 +86,4 @@ def is_converged(self): converged?: True if converged, False otherwise """ self.conv = self._convergence_value() - return self.conv < self._options['convthresh'] + return self.conv < self._options["convthresh"] diff --git a/mpisppy/convergers/norm_rho_converger.py b/mpisppy/convergers/norm_rho_converger.py index d13c1092b..948034766 100644 --- a/mpisppy/convergers/norm_rho_converger.py +++ b/mpisppy/convergers/norm_rho_converger.py @@ -15,12 +15,14 @@ import mpisppy.MPI as MPI import numpy as np -class NormRhoConverger(mpisppy.convergers.converger.Converger): +class NormRhoConverger(mpisppy.convergers.converger.Converger): def __init__(self, ph): - if 'norm_rho_converger_options' in ph.options and \ - 'verbose' in ph.options['norm_rho_converger_options'] and \ - ph.options['norm_rho_converger_options']['verbose']: + if ( + "norm_rho_converger_options" in ph.options + and "verbose" in ph.options["norm_rho_converger_options"] + and ph.options["norm_rho_converger_options"]["verbose"] + ): self._verbose = True else: self._verbose = False @@ -29,13 +31,16 @@ def __init__(self, ph): def _compute_rho_norm(self, ph): local_rho_norm = np.zeros(1) global_rho_norm = np.zeros(1) - local_rho_norm[0] = sum(s._mpisppy_probability*sum( rho._value for rho in s._mpisppy_model.rho.values())\ - for s in ph.local_scenarios.values() ) + local_rho_norm[0] = sum( + s._mpisppy_probability + * sum(rho._value for rho in s._mpisppy_model.rho.values()) + for s in ph.local_scenarios.values() + ) ph.mpicomm.Allreduce(local_rho_norm, global_rho_norm, op=MPI.SUM) return float(global_rho_norm[0]) def is_converged(self): - """ check for convergence + """check for convergence Args: self (object): create by prep @@ -43,20 +48,24 @@ def is_converged(self): converged?: True if converged, False otherwise """ ## This will never do anything unless the norm rho updater is also used - if not hasattr(self.ph, "_mpisppy_norm_rho_update_inuse")\ - or not self.ph._mpisppy_norm_rho_update_inuse: + if ( + not hasattr(self.ph, "_mpisppy_norm_rho_update_inuse") + or not self.ph._mpisppy_norm_rho_update_inuse + ): raise RuntimeError("NormRhoConverger can only be used if NormRhoUpdater is") - + log_rho_norm = math.log(self._compute_rho_norm(self.ph)) - ret_val = log_rho_norm < self.ph.options['convthresh'] + ret_val = log_rho_norm < self.ph.options["convthresh"] self.conv = log_rho_norm if self._verbose and self.ph.cylinder_rank == 0: print(f"log(|rho|) = {log_rho_norm}") if ret_val: print("Norm rho convergence check passed") else: - print("Adaptive rho convergence check failed " - f"(requires log(|rho|) < {self.ph.options['convthresh']}") + print( + "Adaptive rho convergence check failed " + f"(requires log(|rho|) < {self.ph.options['convthresh']}" + ) print("Continuing PH with updated rho") - return ret_val + return ret_val diff --git a/mpisppy/convergers/norms_and_residuals.py b/mpisppy/convergers/norms_and_residuals.py index 29a6bfc33..4a5a5a0e6 100644 --- a/mpisppy/convergers/norms_and_residuals.py +++ b/mpisppy/convergers/norms_and_residuals.py @@ -25,27 +25,36 @@ def scaled_primal_metric(PHB): for node in s._mpisppy_node_list: ndn = node.name nlen = s._mpisppy_data.nlens[ndn] - xbars = np.fromiter((s._mpisppy_model.xbars[ndn,i]._value - for i in range(nlen)), dtype='d') - nonants_array = np.fromiter((v._value for v in node.nonant_vardata_list), - dtype='d', count=nlen) - scaled_norm = np.divide(np.abs(nonants_array - xbars), xbars, out=np.zeros_like(xbars)) + xbars = np.fromiter( + (s._mpisppy_model.xbars[ndn, i]._value for i in range(nlen)), dtype="d" + ) + nonants_array = np.fromiter( + (v._value for v in node.nonant_vardata_list), dtype="d", count=nlen + ) + scaled_norm = np.divide( + np.abs(nonants_array - xbars), xbars, out=np.zeros_like(xbars) + ) if not s._mpisppy_data.has_variable_probability: - local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum(np.multiply(scaled_norm, scaled_norm)) + local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum( + np.multiply(scaled_norm, scaled_norm) + ) else: # rarely-used overwrite in the event of variable probability - prob_array = np.fromiter((s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] - for ndn_i in s._mpisppy_data.nonant_indices - if ndn_i[0] == ndn), - dtype='d', count=nlen) + prob_array = np.fromiter( + ( + s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] + for ndn_i in s._mpisppy_data.nonant_indices + if ndn_i[0] == ndn + ), + dtype="d", + count=nlen, + ) ### TBD: check this!! local_sum[0] += np.dot(prob_array, scaled_norm**2) PHB.comms["ROOT"].Allreduce(local_sum, global_sum, op=MPI.SUM) return np.sqrt(global_sum[0]) - - def scaled_dual_metric(PHB, w_cache, curr_iter): """ Compute the Compute the scaled norm of the difference between to consecutive Ws. @@ -54,20 +63,29 @@ def scaled_dual_metric(PHB, w_cache, curr_iter): global_sum = np.zeros(1) for sname, s in PHB.local_scenarios.items(): current_w = np.array(w_cache[curr_iter][sname]) - prev_w = np.array(w_cache[curr_iter-1][sname]) + prev_w = np.array(w_cache[curr_iter - 1][sname]) # np.seterr(invalid='ignore') - scaled_norm = np.divide(np.abs(current_w - prev_w), np.abs(current_w), out=np.zeros_like(current_w)) + scaled_norm = np.divide( + np.abs(current_w - prev_w), np.abs(current_w), out=np.zeros_like(current_w) + ) for node in s._mpisppy_node_list: ndn = node.name nlen = s._mpisppy_data.nlens[ndn] if not s._mpisppy_data.has_variable_probability: - local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum(np.multiply(scaled_norm, scaled_norm)) + local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum( + np.multiply(scaled_norm, scaled_norm) + ) else: # rarely-used overwrite in the event of variable probability - prob_array = np.fromiter((s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] - for ndn_i in s._mpisppy_data.nonant_indices - if ndn_i[0] == ndn), - dtype='d', count=nlen) + prob_array = np.fromiter( + ( + s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] + for ndn_i in s._mpisppy_data.nonant_indices + if ndn_i[0] == ndn + ), + dtype="d", + count=nlen, + ) # tbd: does this give use the squared norm? local_sum[0] += np.dot(prob_array, scaled_norm**2) @@ -75,8 +93,6 @@ def scaled_dual_metric(PHB, w_cache, curr_iter): return np.sqrt(global_sum[0]) - - def primal_residuals_norm(PHB): """ Compute the scaled primal residuals Euclidean norm. @@ -87,25 +103,33 @@ def primal_residuals_norm(PHB): for node in s._mpisppy_node_list: ndn = node.name nlen = s._mpisppy_data.nlens[ndn] - xbars = np.fromiter((s._mpisppy_model.xbars[ndn,i]._value - for i in range(nlen)), dtype='d') - nonants_array = np.fromiter((v._value for v in node.nonant_vardata_list), - dtype='d', count=nlen) + xbars = np.fromiter( + (s._mpisppy_model.xbars[ndn, i]._value for i in range(nlen)), dtype="d" + ) + nonants_array = np.fromiter( + (v._value for v in node.nonant_vardata_list), dtype="d", count=nlen + ) resid = np.abs(nonants_array - xbars) if not s._mpisppy_data.has_variable_probability: - local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum(np.multiply(resid, resid)) + local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum( + np.multiply(resid, resid) + ) else: # rarely-used overwrite in the event of variable probability - prob_array = np.fromiter((s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] - for ndn_i in s._mpisppy_data.nonant_indices - if ndn_i[0] == ndn), - dtype='d', count=nlen) + prob_array = np.fromiter( + ( + s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] + for ndn_i in s._mpisppy_data.nonant_indices + if ndn_i[0] == ndn + ), + dtype="d", + count=nlen, + ) local_sum[0] += np.dot(prob_array, resid) PHB.comms["ROOT"].Allreduce(local_sum, global_sum, op=MPI.SUM) return np.sqrt(global_sum[0]) - def dual_residuals_norm(PHB, xbar_cache, curr_iter): """ Compute the scaled primal residuals Euclidean norm. @@ -114,25 +138,31 @@ def dual_residuals_norm(PHB, xbar_cache, curr_iter): global_sum = np.zeros(1) for sname, s in PHB.local_scenarios.items(): current_xbar = np.array(xbar_cache[curr_iter][sname]) - prev_xbar = np.array(xbar_cache[curr_iter-1][sname]) + prev_xbar = np.array(xbar_cache[curr_iter - 1][sname]) for node in s._mpisppy_node_list: ndn = node.name nlen = s._mpisppy_data.nlens[ndn] - rhos = np.fromiter((s._mpisppy_model.rho[(ndn, i)]._value for i in range(nlen)), - dtype='d', count=nlen) + rhos = np.fromiter( + (s._mpisppy_model.rho[(ndn, i)]._value for i in range(nlen)), + dtype="d", + count=nlen, + ) resid = np.multiply(rhos, np.abs(current_xbar - prev_xbar)) if not s._mpisppy_data.has_variable_probability: - local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum(np.multiply(resid, resid)) + local_sum[0] += s._mpisppy_data.prob_coeff[ndn] * np.sum( + np.multiply(resid, resid) + ) else: # rarely-used overwrite in the event of variable probability - prob_array = np.fromiter((s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] - for ndn_i in s._mpisppy_data.nonant_indices - if ndn_i[0] == ndn), - dtype='d', count=nlen) + prob_array = np.fromiter( + ( + s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] + for ndn_i in s._mpisppy_data.nonant_indices + if ndn_i[0] == ndn + ), + dtype="d", + count=nlen, + ) local_sum[0] += np.dot(prob_array, resid) PHB.comms["ROOT"].Allreduce(local_sum, global_sum, op=MPI.SUM) return np.sqrt(global_sum[0]) - - - - diff --git a/mpisppy/convergers/primal_dual_converger.py b/mpisppy/convergers/primal_dual_converger.py index 69ef0cb7d..39a5d9b73 100644 --- a/mpisppy/convergers/primal_dual_converger.py +++ b/mpisppy/convergers/primal_dual_converger.py @@ -14,40 +14,44 @@ from mpisppy import MPI from mpisppy.extensions.phtracker import TrackedData + class PrimalDualConverger(mpisppy.convergers.converger.Converger): - """ Convergence checker for the primal-dual metrics. - Primal convergence is measured as weighted sum over all scenarios s - p_{s} * ||x_{s} - \bar{x}||_1. - Dual convergence is measured as - rho * ||\bar{x}_{t} - \bar{x}_{t-1}||_1 + """Convergence checker for the primal-dual metrics. + Primal convergence is measured as weighted sum over all scenarios s + p_{s} * ||x_{s} - \bar{x}||_1. + Dual convergence is measured as + rho * ||\bar{x}_{t} - \bar{x}_{t-1}||_1 """ + def __init__(self, ph): - """ Initialization method for the PrimalDualConverger class.""" + """Initialization method for the PrimalDualConverger class.""" super().__init__(ph) - self.options = ph.options.get('primal_dual_converger_options', {}) - self._verbose = self.options.get('verbose', False) + self.options = ph.options.get("primal_dual_converger_options", {}) + self._verbose = self.options.get("verbose", False) self._ph = ph - self.convergence_threshold = self.options.get('tol', 1) - self.tracking = self.options.get('tracking', False) + self.convergence_threshold = self.options.get("tol", 1) + self.tracking = self.options.get("tracking", False) self.prev_xbars = self._get_xbars() self._rank = self._ph.cylinder_rank if self.tracking and self._rank == 0: # if phtracker is set up, save the results in the phtracker/hub folder - if 'phtracker_options' in self._ph.options: + if "phtracker_options" in self._ph.options: tracker_options = self._ph.options["phtracker_options"] cylinder_name = tracker_options.get( - "cylinder_name", type(self._ph.spcomm).__name__) - results_folder = tracker_options.get( - "results_folder", "results") + "cylinder_name", type(self._ph.spcomm).__name__ + ) + results_folder = tracker_options.get("results_folder", "results") results_folder = os.path.join(results_folder, cylinder_name) else: - results_folder = self.options.get('results_folder', 'results') - self.tracker = TrackedData('pd', results_folder, plot=True, verbose=self._verbose) + results_folder = self.options.get("results_folder", "results") + self.tracker = TrackedData( + "pd", results_folder, plot=True, verbose=self._verbose + ) os.makedirs(results_folder, exist_ok=True) - self.tracker.initialize_fnames(name=self.options.get('pd_fname', None)) - self.tracker.initialize_df(['iteration', 'primal_gap', 'dual_gap']) + self.tracker.initialize_fnames(name=self.options.get("pd_fname", None)) + self.tracker.initialize_df(["iteration", "primal_gap", "dual_gap"]) def _get_xbars(self): """ @@ -77,12 +81,14 @@ def _compute_primal_convergence(self): for node in s._mpisppy_node_list: ndn = node.name nlen = s._mpisppy_data.nlens[ndn] - x_bars = np.fromiter((s._mpisppy_model.xbars[ndn,i]._value - for i in range(nlen)), dtype='d') + x_bars = np.fromiter( + (s._mpisppy_model.xbars[ndn, i]._value for i in range(nlen)), + dtype="d", + ) nonants_array = np.fromiter( - (v._value for v in node.nonant_vardata_list), - dtype='d', count=nlen) + (v._value for v in node.nonant_vardata_list), dtype="d", count=nlen + ) _l1 = np.abs(x_bars - nonants_array) # invariant to prob_coeff being a scalar or array @@ -93,7 +99,7 @@ def _compute_primal_convergence(self): return global_sum_diff[0] def _compute_dual_residual(self): - """ Compute the dual residual + """Compute the dual residual Returns: global_diff (float): difference between to consecutive x bars @@ -105,12 +111,17 @@ def _compute_dual_residual(self): for node in s._mpisppy_node_list: ndn = node.name nlen = s._mpisppy_data.nlens[ndn] - rhos = np.fromiter((s._mpisppy_model.rho[ndn,i]._value - for i in range(nlen)), dtype='d') - xbars = np.fromiter((s._mpisppy_model.xbars[ndn,i]._value - for i in range(nlen)), dtype='d') - prev_xbars = np.fromiter((self.prev_xbars[ndn,i] - for i in range(nlen)), dtype='d') + rhos = np.fromiter( + (s._mpisppy_model.rho[ndn, i]._value for i in range(nlen)), + dtype="d", + ) + xbars = np.fromiter( + (s._mpisppy_model.xbars[ndn, i]._value for i in range(nlen)), + dtype="d", + ) + prev_xbars = np.fromiter( + (self.prev_xbars[ndn, i] for i in range(nlen)), dtype="d" + ) local_sum_diff[0] += np.sum(rhos * np.abs(xbars - prev_xbars)) @@ -118,7 +129,7 @@ def _compute_dual_residual(self): return global_sum_diff[0] def is_converged(self): - """ check for convergence + """check for convergence Args: self (object): create by prep @@ -132,13 +143,17 @@ def is_converged(self): ret_val = max(primal_gap, dual_gap) <= self.convergence_threshold if self._verbose and self._rank == 0: - print(f"primal gap = {round(primal_gap, 5)}, dual gap = {round(dual_gap, 5)}") + print( + f"primal gap = {round(primal_gap, 5)}, dual gap = {round(dual_gap, 5)}" + ) if ret_val: print("Dual convergence check passed") else: - print("Dual convergence check failed " - f"(requires primal + dual gaps) <= {self.convergence_threshold}") + print( + "Dual convergence check failed " + f"(requires primal + dual gaps) <= {self.convergence_threshold}" + ) if self.tracking and self._rank == 0: self.tracker.add_row([self._ph._PHIter, primal_gap, dual_gap]) self.tracker.write_out_data() @@ -153,18 +168,20 @@ def plot_results(self): conv_data = pd.read_csv(self.tracker.fname) # Create a log-scale plot - plt.semilogy(conv_data['iteration'], conv_data['primal_gap'], label='Primal Gap') - plt.semilogy(conv_data['iteration'], conv_data['dual_gap'], label='Dual Gap') + plt.semilogy( + conv_data["iteration"], conv_data["primal_gap"], label="Primal Gap" + ) + plt.semilogy(conv_data["iteration"], conv_data["dual_gap"], label="Dual Gap") - plt.xlabel('Iteration') - plt.ylabel('Convergence Metric') + plt.xlabel("Iteration") + plt.ylabel("Convergence Metric") plt.legend() plt.savefig(plot_fname) plt.close() def post_everything(self): - ''' + """ Reading the convergence data and plotting the results - ''' + """ if self.tracking and self._rank == 0: - self.plot_results() \ No newline at end of file + self.plot_results() diff --git a/mpisppy/cylinders/cross_scen_spoke.py b/mpisppy/cylinders/cross_scen_spoke.py index 598914477..966aa628a 100644 --- a/mpisppy/cylinders/cross_scen_spoke.py +++ b/mpisppy/cylinders/cross_scen_spoke.py @@ -14,9 +14,14 @@ import pyomo.environ as pyo import mpisppy.cylinders.spoke as spoke + class CrossScenarioCutSpoke(spoke.Spoke): - def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None): - super().__init__(spbase_object, fullcomm, strata_comm, cylinder_comm, options=options) + def __init__( + self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None + ): + super().__init__( + spbase_object, fullcomm, strata_comm, cylinder_comm, options=options + ) def make_windows(self): nscen = len(self.opt.all_scenario_names) @@ -33,24 +38,28 @@ def make_windows(self): ## the _locals will also have the kill signal self.all_nonant_len = vbuflen - self.all_eta_len = nscen*local_scen_count - self._locals = np.zeros(nscen*local_scen_count + vbuflen + 1) - self._coefs = np.zeros(nscen*(nscen + self.nonant_per_scen) + 1 + 1) + self.all_eta_len = nscen * local_scen_count + self._locals = np.zeros(nscen * local_scen_count + vbuflen + 1) + self._coefs = np.zeros(nscen * (nscen + self.nonant_per_scen) + 1 + 1) self._new_locals = False # local, remote # send, receive - self._make_windows(nscen*(self.nonant_per_scen + 1 + 1), nscen*local_scen_count + vbuflen) + self._make_windows( + nscen * (self.nonant_per_scen + 1 + 1), nscen * local_scen_count + vbuflen + ) def _got_kill_signal(self): - ''' returns True if a kill signal was received, - and refreshes the array and _locals''' + """returns True if a kill signal was received, + and refreshes the array and _locals""" self._new_locals = self.spoke_from_hub(self._locals) - return self.remote_write_id == -1 + return self.remote_write_id == -1 def prep_cs_cuts(self): # create a map scenario -> index, this index is used for various lists containing scenario dependent info. - self.scenario_to_index = { scen : indx for indx, scen in enumerate(self.opt.all_scenario_names) } + self.scenario_to_index = { + scen: indx for indx, scen in enumerate(self.opt.all_scenario_names) + } # create concrete model to use as pseudo-root self.opt.root = pyo.ConcreteModel() @@ -62,7 +71,7 @@ def prep_cs_cuts(self): # add copies of the nonanticipatory variables to the root problem # NOTE: the LShaped code expects the nonant vars to be in a particular # order and with a particular *name*. - # We're also creating an index for reference against later + # We're also creating an index for reference against later nonant_vid_to_copy_map = dict() root_vars = list() for v in non_ants: @@ -84,8 +93,9 @@ def prep_cs_cuts(self): self.opt.root.eta = pyo.Var(self.opt.all_scenario_names) self.opt.root.bender = LShapedCutGenerator() - self.opt.root.bender.set_input(root_vars=self.opt.root_vars, - tol=1e-4, comm=self.cylinder_comm) + self.opt.root.bender.set_input( + root_vars=self.opt.root_vars, tol=1e-4, comm=self.cylinder_comm + ) self.opt.root.bender.set_ls(self.opt) ## the below for loop can take some time, @@ -100,12 +110,14 @@ def prep_cs_cuts(self): subproblem_fn_kwargs = dict() # need to modify this to accept in user kwargs as well - subproblem_fn_kwargs['scenario_name'] = scen - self.opt.root.bender.add_subproblem(subproblem_fn=self.opt.create_subproblem, - subproblem_fn_kwargs=subproblem_fn_kwargs, - root_eta=self.opt.root.eta[scen], - subproblem_solver=self.opt.options["sp_solver"], - subproblem_solver_options=self.opt.options["sp_solver_options"]) + subproblem_fn_kwargs["scenario_name"] = scen + self.opt.root.bender.add_subproblem( + subproblem_fn=self.opt.create_subproblem, + subproblem_fn_kwargs=subproblem_fn_kwargs, + root_eta=self.opt.root.eta[scen], + subproblem_solver=self.opt.options["sp_solver"], + subproblem_solver_options=self.opt.options["sp_solver_options"], + ) ## the above for loop can take some time, ## so return early if we get a kill signal, @@ -119,24 +131,25 @@ def prep_cs_cuts(self): ## above. self.opt.set_eta_bounds() self._eta_lb_array = np.fromiter( - (self.opt.valid_eta_lb[s] for s in self.opt.all_scenario_names), - dtype='d', count=len(self.opt.all_scenario_names)) + (self.opt.valid_eta_lb[s] for s in self.opt.all_scenario_names), + dtype="d", + count=len(self.opt.all_scenario_names), + ) self.make_eta_lb_cut() def make_eta_lb_cut(self): ## we'll be storing a matrix as an array ## row_len is the length of each row - row_len = 1+1+len(self.root_nonants) - all_coefs = np.zeros( self.nscen*row_len+1, dtype='d') + row_len = 1 + 1 + len(self.root_nonants) + all_coefs = np.zeros(self.nscen * row_len + 1, dtype="d") for idx, k in enumerate(self.opt.all_scenario_names): ## cut_array -- [ constant, eta_coef, *nonant_coefs ] ## this cut -- [ LB, -1, *0s ], i.e., -1*\eta + LB <= 0 - all_coefs[row_len*idx] = self._eta_lb_array[idx] - all_coefs[row_len*idx+1] = -1 + all_coefs[row_len * idx] = self._eta_lb_array[idx] + all_coefs[row_len * idx + 1] = -1 self.spoke_to_hub(all_coefs) def make_cut(self): - ## cache opt opt = self.opt @@ -158,15 +171,21 @@ def make_cut(self): ## self.nscen == len(opt.all_scenario_names) # compute local min etas - min_eta_vals = np.fromiter(( min(etas[k,sn] for k in opt.local_scenarios) \ - for sn in opt.all_scenario_names ), - dtype='d', count=self.nscen) + min_eta_vals = np.fromiter( + ( + min(etas[k, sn] for k in opt.local_scenarios) + for sn in opt.all_scenario_names + ), + dtype="d", + count=self.nscen, + ) # Allreduce the etas to take the minimum - global_eta_vals = np.empty(self.nscen, dtype='d') + global_eta_vals = np.empty(self.nscen, dtype="d") self.cylinder_comm.Allreduce(min_eta_vals, global_eta_vals, op=MPI.MIN) - eta_lb_viol = (global_eta_vals + np.full_like(global_eta_vals, 1e-3) \ - < self._eta_lb_array).any() + eta_lb_viol = ( + global_eta_vals + np.full_like(global_eta_vals, 1e-3) < self._eta_lb_array + ).any() if eta_lb_viol: self.make_eta_lb_cut() return @@ -179,23 +198,30 @@ def make_cut(self): # sum the local nonants for average computation root_nonants = self.root_nonants - local_nonant_sum = np.fromiter( ( sum(nonants[k, nname, ix] for k in opt.local_scenarios) - for nname, ix in root_nonants), - dtype='d', count=len(root_nonants) ) - + local_nonant_sum = np.fromiter( + ( + sum(nonants[k, nname, ix] for k in opt.local_scenarios) + for nname, ix in root_nonants + ), + dtype="d", + count=len(root_nonants), + ) # Allreduce the xhats to get averages - global_nonant_sum = np.empty(len(local_nonant_sum), dtype='d') - self.cylinder_comm.Allreduce(local_nonant_sum, global_nonant_sum, op = MPI.SUM) + global_nonant_sum = np.empty(len(local_nonant_sum), dtype="d") + self.cylinder_comm.Allreduce(local_nonant_sum, global_nonant_sum, op=MPI.SUM) # need to divide through by the number of different spoke processes global_xbar = global_nonant_sum / self.nscen - local_dist = np.array([0],dtype='d') + local_dist = np.array([0], dtype="d") local_winner = None # iterate through the ranks xhats to get the ranks maximum dist for i, k in enumerate(opt.local_scenarios): - scenario_xhat = np.fromiter( (nonants[k, nname, ix] for nname, ix in root_nonants), - dtype='d', count=len(root_nonants) ) + scenario_xhat = np.fromiter( + (nonants[k, nname, ix] for nname, ix in root_nonants), + dtype="d", + count=len(root_nonants), + ) scenario_dist = np.linalg.norm(scenario_xhat - global_xbar) local_dist[0] = max(local_dist[0], scenario_dist) if local_winner is None: @@ -204,22 +230,24 @@ def make_cut(self): local_winner = k # Allreduce to find the biggest distance - global_dist = np.empty(1, dtype='d') + global_dist = np.empty(1, dtype="d") self.cylinder_comm.Allreduce(local_dist, global_dist, op=MPI.MAX) - vote = np.array([-1], dtype='i') + vote = np.array([-1], dtype="i") if local_dist[0] >= global_dist[0]: vote[0] = self.cylinder_comm.Get_rank() - global_rank = np.empty(1, dtype='i') + global_rank = np.empty(1, dtype="i") self.cylinder_comm.Allreduce(vote, global_rank, op=MPI.MAX) # if we are the winner, grab the xhat and bcast it to the other ranks if self.cylinder_comm.Get_rank() == global_rank[0]: - farthest_xhat = np.fromiter( (nonants[local_winner, nname, ix] - for nname, ix in root_nonants), - dtype='d', count=len(root_nonants) ) + farthest_xhat = np.fromiter( + (nonants[local_winner, nname, ix] for nname, ix in root_nonants), + dtype="d", + count=len(root_nonants), + ) else: - farthest_xhat = np.zeros(len(root_nonants), dtype='d') + farthest_xhat = np.zeros(len(root_nonants), dtype="d") self.cylinder_comm.Bcast(farthest_xhat, root=global_rank) @@ -231,7 +259,7 @@ def make_cut(self): cuts = opt.root.bender.generate_cut() # eta var_id map: - eta_id_map = { id(var) : k for k,var in root_etas.items()} + eta_id_map = {id(var): k for k, var in root_etas.items()} coef_dict = dict() feas_cuts = list() # package cuts, slightly silly in that we reconstruct the coefficients from the cuts @@ -242,7 +270,7 @@ def make_cut(self): raise RuntimeError("BendersCutGenerator returned nonlinear cut") ## create a map from id(var) to index in repn - id_var_to_idx = { id(var) : i for i,var in enumerate(repn.linear_vars) } + id_var_to_idx = {id(var): i for i, var in enumerate(repn.linear_vars)} ## find the eta index for vid in eta_id_map: @@ -255,9 +283,9 @@ def make_cut(self): # each variable should only appear once in repn.linear_vars del id_var_to_idx[vid] break - else: # no break, + else: # no break, # so no scenario recourse cost variables appear in the cut - cut_array = [repn.constant, 0.] + cut_array = [repn.constant, 0.0] # we don't know what scenario, # but since eta_s is 0, it doesn't # matter @@ -276,19 +304,19 @@ def make_cut(self): cut_array.append(0) if scen_name is not None: - coef_dict[scen_name] = np.array(cut_array, dtype='d') + coef_dict[scen_name] = np.array(cut_array, dtype="d") else: - feas_cuts.append( np.array(cut_array, dtype='d') ) + feas_cuts.append(np.array(cut_array, dtype="d")) ## we'll be storing a matrix as an array ## row_len is the length of each row - row_len = 1+1+len(root_nonants) - all_coefs = np.zeros( self.nscen*row_len +1, dtype='d') + row_len = 1 + 1 + len(root_nonants) + all_coefs = np.zeros(self.nscen * row_len + 1, dtype="d") for idx, k in enumerate(opt.all_scenario_names): if k in coef_dict: - all_coefs[row_len*idx:row_len*(idx+1)] = coef_dict[k] + all_coefs[row_len * idx : row_len * (idx + 1)] = coef_dict[k] elif feas_cuts: - all_coefs[row_len*idx:row_len*(idx+1)] = feas_cuts.pop() + all_coefs[row_len * idx : row_len * (idx + 1)] = feas_cuts.pop() self.spoke_to_hub(all_coefs) def main(self): diff --git a/mpisppy/cylinders/fwph_spoke.py b/mpisppy/cylinders/fwph_spoke.py index 1530c514f..fac4ea39a 100644 --- a/mpisppy/cylinders/fwph_spoke.py +++ b/mpisppy/cylinders/fwph_spoke.py @@ -8,9 +8,9 @@ ############################################################################### import mpisppy.cylinders.spoke -class FrankWolfeOuterBound(mpisppy.cylinders.spoke.OuterBoundSpoke): - converger_spoke_char = 'F' +class FrankWolfeOuterBound(mpisppy.cylinders.spoke.OuterBoundSpoke): + converger_spoke_char = "F" def main(self): self.opt.fwph_main() @@ -22,7 +22,7 @@ def sync(self): # The FWPH spoke can call "sync" before it # even starts doing anything, so its possible # to get here without any bound information - if not hasattr(self.opt, '_local_bound'): + if not hasattr(self.opt, "_local_bound"): return # Tell the hub about the most recent bound self.bound = self.opt._local_bound @@ -32,7 +32,7 @@ def finalize(self): # even starts doing anything, so its possible # to get here without any bound information # if we terminated early - if not hasattr(self.opt, '_local_bound'): + if not hasattr(self.opt, "_local_bound"): return self.bound = self.opt._local_bound self.final_bound = self.opt._local_bound diff --git a/mpisppy/cylinders/hub.py b/mpisppy/cylinders/hub.py index 5347c6910..ba1e38253 100644 --- a/mpisppy/cylinders/hub.py +++ b/mpisppy/cylinders/hub.py @@ -20,14 +20,17 @@ from mpisppy import global_toc # Could also pass, e.g., sys.stdout instead of a filename -mpisppy.log.setup_logger("mpisppy.cylinders.Hub", - "hub.log", - level=logging.CRITICAL) +mpisppy.log.setup_logger("mpisppy.cylinders.Hub", "hub.log", level=logging.CRITICAL) logger = logging.getLogger("mpisppy.cylinders.Hub") + class Hub(SPCommunicator): - def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, spokes, options=None): - super().__init__(spbase_object, fullcomm, strata_comm, cylinder_comm, options=options) + def __init__( + self, spbase_object, fullcomm, strata_comm, cylinder_comm, spokes, options=None + ): + super().__init__( + spbase_object, fullcomm, strata_comm, cylinder_comm, options=options + ) assert len(spokes) == self.n_spokes self.local_write_ids = np.zeros(self.n_spokes, dtype=np.int64) self.remote_write_ids = np.zeros(self.n_spokes, dtype=np.int64) @@ -44,7 +47,7 @@ def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, spokes, self.last_ob_idx = None # for termination based on stalling out self.stalled_iter_cnt = 0 - self.last_gap = float('inf') # abs_gap tracker + self.last_gap = float("inf") # abs_gap tracker @abc.abstractmethod def setup_hub(self): @@ -52,22 +55,21 @@ def setup_hub(self): @abc.abstractmethod def sync(self): - """ To be called within the whichever optimization algorithm - is being run on the hub (e.g. PH) + """To be called within the whichever optimization algorithm + is being run on the hub (e.g. PH) """ pass @abc.abstractmethod def is_converged(self): - """ The hub has the ability to halt the optimization algorithm on the - hub before any local convergers. + """The hub has the ability to halt the optimization algorithm on the + hub before any local convergers. """ pass @abc.abstractmethod def current_iteration(self): - """ Returns the current iteration count - however the hub defines it. - """ + """Returns the current iteration count - however the hub defines it.""" pass @abc.abstractmethod @@ -78,10 +80,9 @@ def clear_latest_chars(self): self.latest_ib_char = None self.latest_ob_char = None - def compute_gaps(self): - """ Compute the current absolute and relative gaps, - using the current self.BestInnerBound and self.BestOuterBound + """Compute the current absolute and relative gaps, + using the current self.BestInnerBound and self.BestOuterBound """ if self.opt.is_minimizing: abs_gap = self.BestInnerBound - self.BestOuterBound @@ -102,16 +103,14 @@ def compute_gaps(self): rel_gap = float("inf") return abs_gap, rel_gap - def get_update_string(self): - if self.latest_ib_char is None and \ - self.latest_ob_char is None: - return ' ' + if self.latest_ib_char is None and self.latest_ob_char is None: + return " " if self.latest_ib_char is None: - return self.latest_ob_char + ' ' + return self.latest_ob_char + " " if self.latest_ob_char is None: - return ' ' + self.latest_ib_char - return self.latest_ob_char+' '+self.latest_ib_char + return " " + self.latest_ib_char + return self.latest_ob_char + " " + self.latest_ib_char def screen_trace(self): current_iteration = self.current_iteration() @@ -130,9 +129,15 @@ def screen_trace(self): def determine_termination(self): # return True if termination is indicated, otherwise return False - if not hasattr(self,"options") or self.options is None\ - or ("rel_gap" not in self.options and "abs_gap" not in self.options\ - and "max_stalled_iters" not in self.options): + if ( + not hasattr(self, "options") + or self.options is None + or ( + "rel_gap" not in self.options + and "abs_gap" not in self.options + and "max_stalled_iters" not in self.options + ) + ): return False # Nothing to see here folks... # If we are still here, there is some option for termination @@ -157,11 +162,17 @@ def determine_termination(self): max_stalled_satisfied = True if abs_gap_satisfied: - global_toc(f"Terminating based on inter-cylinder absolute gap {abs_gap:12.4f}") + global_toc( + f"Terminating based on inter-cylinder absolute gap {abs_gap:12.4f}" + ) if rel_gap_satisfied: - global_toc(f"Terminating based on inter-cylinder relative gap {rel_gap*100:12.3f}%") + global_toc( + f"Terminating based on inter-cylinder relative gap {rel_gap*100:12.3f}%" + ) if max_stalled_satisfied: - global_toc(f"Terminating based on max-stalled-iters {self.stalled_iter_cnt}") + global_toc( + f"Terminating based on max-stalled-iters {self.stalled_iter_cnt}" + ) return abs_gap_satisfied or rel_gap_satisfied or max_stalled_satisfied @@ -177,9 +188,9 @@ def hub_finalize(self): self.screen_trace() def receive_innerbounds(self): - """ Get inner bounds from inner bound spokes - NOTE: Does not check if there _are_ innerbound spokes - (but should be harmless to call if there are none) + """Get inner bounds from inner bound spokes + NOTE: Does not check if there _are_ innerbound spokes + (but should be harmless to call if there are none) """ logging.debug("Hub is trying to receive from InnerBounds") for idx in self.innerbound_spoke_indices: @@ -191,9 +202,9 @@ def receive_innerbounds(self): logging.debug("ph back from InnerBounds") def receive_outerbounds(self): - """ Get outer bounds from outer bound spokes - NOTE: Does not check if there _are_ outerbound spokes - (but should be harmless to call if there are none) + """Get outer bounds from outer bound spokes + NOTE: Does not check if there _are_ outerbound spokes + (but should be harmless to call if there are none) """ logging.debug("Hub is trying to receive from OuterBounds") for idx in self.outerbound_spoke_indices: @@ -204,7 +215,7 @@ def receive_outerbounds(self): self.BestOuterBound = self.OuterBoundUpdate(bound, idx) logging.debug("ph back from OuterBounds") - def OuterBoundUpdate(self, new_bound, idx=None, char='*'): + def OuterBoundUpdate(self, new_bound, idx=None, char="*"): current_bound = self.BestOuterBound if self._outer_bound_update(new_bound, current_bound): if idx is None: @@ -217,7 +228,7 @@ def OuterBoundUpdate(self, new_bound, idx=None, char='*'): else: return current_bound - def InnerBoundUpdate(self, new_bound, idx=None, char='*'): + def InnerBoundUpdate(self, new_bound, idx=None, char="*"): current_bound = self.BestInnerBound if self._inner_bound_update(new_bound, current_bound): if idx is None: @@ -234,17 +245,16 @@ def initialize_bound_values(self): if self.opt.is_minimizing: self.BestInnerBound = inf self.BestOuterBound = -inf - self._inner_bound_update = lambda new, old : (new < old) - self._outer_bound_update = lambda new, old : (new > old) + self._inner_bound_update = lambda new, old: (new < old) + self._outer_bound_update = lambda new, old: (new > old) else: self.BestInnerBound = -inf self.BestOuterBound = inf - self._inner_bound_update = lambda new, old : (new > old) - self._outer_bound_update = lambda new, old : (new < old) + self._inner_bound_update = lambda new, old: (new > old) + self._outer_bound_update = lambda new, old: (new < old) def initialize_outer_bound_buffers(self): - """ Initialize value of BestOuterBound, and outer bound receive buffers - """ + """Initialize value of BestOuterBound, and outer bound receive buffers""" self.outerbound_receive_buffers = dict() for idx in self.outerbound_spoke_indices: self.outerbound_receive_buffers[idx] = communicator_array( @@ -252,8 +262,7 @@ def initialize_outer_bound_buffers(self): ) def initialize_inner_bound_buffers(self): - """ Initialize value of BestInnerBound, and inner bound receive buffers - """ + """Initialize value of BestInnerBound, and inner bound receive buffers""" self.innerbound_receive_buffers = dict() for idx in self.innerbound_spoke_indices: self.innerbound_receive_buffers[idx] = communicator_array( @@ -261,36 +270,41 @@ def initialize_inner_bound_buffers(self): ) def initialize_nonants(self): - """ Initialize the buffer for the hub to send nonants - to the appropriate spokes + """Initialize the buffer for the hub to send nonants + to the appropriate spokes """ self.nonant_send_buffer = None for idx in self.nonant_spoke_indices: if self.nonant_send_buffer is None: # for hub outer/inner bounds and kill signal - self.nonant_send_buffer = communicator_array(self.local_lengths[idx - 1]) + self.nonant_send_buffer = communicator_array( + self.local_lengths[idx - 1] + ) elif self.local_lengths[idx - 1] + 1 != len(self.nonant_send_buffer): raise RuntimeError("Nonant buffers disagree on size") def initialize_boundsout(self): - """ Initialize the buffer for the hub to send bounds - to bounds only spokes + """Initialize the buffer for the hub to send bounds + to bounds only spokes """ self.boundsout_send_buffer = None for idx in self.bounds_only_indices: if self.boundsout_send_buffer is None: - self.boundsout_send_buffer = communicator_array(self.local_lengths[idx - 1]) + self.boundsout_send_buffer = communicator_array( + self.local_lengths[idx - 1] + ) if self.local_lengths[idx - 1] != 2: - raise RuntimeError(f'bounds only local length buffers must be 2 (bounds). Currently {self.local_lengths[idx - 1]}') + raise RuntimeError( + f"bounds only local length buffers must be 2 (bounds). Currently {self.local_lengths[idx - 1]}" + ) def _populate_boundsout_cache(self, buf): - """ Populate a given buffer with the current bounds - """ + """Populate a given buffer with the current bounds""" buf[-3] = self.BestOuterBound buf[-2] = self.BestInnerBound def send_boundsout(self): - """ Send bounds to the appropriate spokes + """Send bounds to the appropriate spokes This is called only for spokes which are bounds only. w and nonant spokes are passed bounds through the w and nonant buffers """ @@ -300,7 +314,7 @@ def send_boundsout(self): self.hub_to_spoke(self.boundsout_send_buffer, idx) def initialize_spoke_indices(self): - """ Figure out what types of spokes we have, + """Figure out what types of spokes we have, and sort them into the appropriate classes. Note: @@ -315,16 +329,20 @@ def initialize_spoke_indices(self): self.outerbound_spoke_chars = dict() self.innerbound_spoke_chars = dict() - for (i, spoke) in enumerate(self.spokes): + for i, spoke in enumerate(self.spokes): spoke_class = spoke["spoke_class"] if hasattr(spoke_class, "converger_spoke_types"): for cst in spoke_class.converger_spoke_types: if cst == ConvergerSpokeType.OUTER_BOUND: self.outerbound_spoke_indices.add(i + 1) - self.outerbound_spoke_chars[i+1] = spoke_class.converger_spoke_char + self.outerbound_spoke_chars[i + 1] = ( + spoke_class.converger_spoke_char + ) elif cst == ConvergerSpokeType.INNER_BOUND: self.innerbound_spoke_indices.add(i + 1) - self.innerbound_spoke_chars[i+1] = spoke_class.converger_spoke_char + self.innerbound_spoke_chars[i + 1] = ( + spoke_class.converger_spoke_char + ) elif cst == ConvergerSpokeType.W_GETTER: self.w_spoke_indices.add(i + 1) elif cst == ConvergerSpokeType.NONANT_GETTER: @@ -337,9 +355,9 @@ def initialize_spoke_indices(self): # all _BoundSpoke spokes get hub bounds so we determine which spokes # are "bounds only" - self.bounds_only_indices = \ - (self.outerbound_spoke_indices | self.innerbound_spoke_indices) - \ - (self.w_spoke_indices | self.nonant_spoke_indices) + self.bounds_only_indices = ( + self.outerbound_spoke_indices | self.innerbound_spoke_indices + ) - (self.w_spoke_indices | self.nonant_spoke_indices) self.has_outerbound_spokes = len(self.outerbound_spoke_indices) > 0 self.has_innerbound_spokes = len(self.innerbound_spoke_indices) > 0 @@ -377,14 +395,14 @@ def make_windows(self): self._windows_constructed = True def hub_to_spoke(self, values, spoke_strata_rank): - """ Put the specified values into the specified locally-owned buffer - for the spoke to pick up. + """Put the specified values into the specified locally-owned buffer + for the spoke to pick up. - Notes: - This automatically does the -1 indexing + Notes: + This automatically does the -1 indexing - This assumes that values contains a slot at the end for the - write_id + This assumes that values contains a slot at the end for the + write_id """ expected_length = self.local_lengths[spoke_strata_rank - 1] + 1 if len(values) != expected_length: @@ -403,11 +421,11 @@ def hub_to_spoke(self, values, spoke_strata_rank): window.Unlock(self.strata_rank) def hub_from_spoke(self, values, spoke_num): - """ spoke_num is the rank in the strata_comm, so it is 1-based not 0-based + """spoke_num is the rank in the strata_comm, so it is 1-based not 0-based - Returns: - is_new (bool): Indicates whether the "gotten" values are new, - based on the write_id. + Returns: + is_new (bool): Indicates whether the "gotten" values are new, + based on the write_id. """ expected_length = self.remote_lengths[spoke_num - 1] + 1 if len(values) != expected_length: @@ -431,11 +449,11 @@ def hub_from_spoke(self, values, spoke_num): return True else: new_id = int(values[-1]) - local_val = np.array((new_id,), 'i') - sum_ids = np.zeros(1, 'i') - self.cylinder_comm.Allreduce((local_val, MPI.INT), - (sum_ids, MPI.INT), - op=MPI.SUM) + local_val = np.array((new_id,), "i") + sum_ids = np.zeros(1, "i") + self.cylinder_comm.Allreduce( + (local_val, MPI.INT), (sum_ids, MPI.INT), op=MPI.SUM + ) if new_id != sum_ids[0] / self.cylinder_comm.size: return False @@ -445,10 +463,10 @@ def hub_from_spoke(self, values, spoke_num): return False def send_terminate(self): - """ Send an array of zeros with a -1 appended to the - end to indicate termination. This function puts to the local - buffer, so every spoke will see it simultaneously. - processes (don't need to call them one at a time). + """Send an array of zeros with a -1 appended to the + end to indicate termination. This function puts to the local + buffer, so every spoke will see it simultaneously. + processes (don't need to call them one at a time). """ for rank in range(1, self.n_spokes + 1): dummies = np.zeros(self.local_lengths[rank - 1] + 1) @@ -461,8 +479,8 @@ def send_terminate(self): class PHHub(Hub): def setup_hub(self): - """ Must be called after make_windows(), so that - the hub knows the sizes of all the spokes windows + """Must be called after make_windows(), so that + the hub knows the sizes of all the spokes windows """ if not self._windows_constructed: raise RuntimeError( @@ -511,7 +529,7 @@ def setup_hub(self): def sync(self): """ - Manages communication with Spokes + Manages communication with Spokes """ if self.has_w_spokes: self.send_ws() @@ -537,8 +555,7 @@ def is_converged(self): if not self.has_innerbound_spokes: if self.opt._PHIter == 1: logger.warning( - "PHHub cannot compute convergence without " - "inner bound spokes." + "PHHub cannot compute convergence without " "inner bound spokes." ) ## you still want to output status, even without inner bounders configured @@ -551,7 +568,8 @@ def is_converged(self): if self.opt._PHIter == 1: global_toc( "Without outer bound spokes, no progress " - "will be made on the Best Bound") + "will be made on the Best Bound" + ) ## log some output if self.global_rank == 0: @@ -560,21 +578,21 @@ def is_converged(self): return self.determine_termination() def current_iteration(self): - """ Return the current PH iteration.""" + """Return the current PH iteration.""" return self.opt._PHIter def main(self): - """ SPComm gets attached in self.__init__ """ + """SPComm gets attached in self.__init__""" self.opt.ph_main(finalize=False) def finalize(self): - """ does PH.post_loops, returns Eobj """ + """does PH.post_loops, returns Eobj""" Eobj = self.opt.post_loops(self.opt.extensions) return Eobj def send_nonants(self): - """ Gather nonants and send them to the appropriate spokes - TODO: Will likely fail with bundling + """Gather nonants and send them to the appropriate spokes + TODO: Will likely fail with bundling """ self.opt._save_nonants() ci = 0 ## index to self.nonant_send_buffer @@ -590,8 +608,8 @@ def send_nonants(self): self.hub_to_spoke(nonant_send_buffer, idx) def initialize_ws(self): - """ Initialize the buffer for the hub to send dual weights - to the appropriate spokes + """Initialize the buffer for the hub to send dual weights + to the appropriate spokes """ self.w_send_buffer = None for idx in self.w_spoke_indices: @@ -601,8 +619,7 @@ def initialize_ws(self): raise RuntimeError("W buffers disagree on size") def send_ws(self): - """ Send dual weights to the appropriate spokes - """ + """Send dual weights to the appropriate spokes""" self.opt._populate_W_cache(self.w_send_buffer, padding=3) logging.debug("hub is sending Ws={}".format(self.w_send_buffer)) self._populate_boundsout_cache(self.w_send_buffer) @@ -610,11 +627,11 @@ def send_ws(self): for idx in self.w_spoke_indices: self.hub_to_spoke(self.w_send_buffer, idx) -class LShapedHub(Hub): +class LShapedHub(Hub): def setup_hub(self): - """ Must be called after make_windows(), so that - the hub knows the sizes of all the spokes windows + """Must be called after make_windows(), so that + the hub knows the sizes of all the spokes windows """ if not self._windows_constructed: raise RuntimeError( @@ -663,7 +680,7 @@ def sync(self, send_nonants=True): self.opt.extobject.sync_with_spokes() def is_converged(self): - """ Returns a boolean. If True, then LShaped will terminate + """Returns a boolean. If True, then LShaped will terminate Side-effects: The L-shaped method produces outer bounds during execution, @@ -679,16 +696,16 @@ def is_converged(self): return self.determine_termination() def current_iteration(self): - """ Return the current L-shaped iteration.""" + """Return the current L-shaped iteration.""" return self.opt.iter def main(self): - """ SPComm gets attached in self.__init__ """ + """SPComm gets attached in self.__init__""" self.opt.lshaped_algorithm() def send_nonants(self): - """ Gather nonants and send them to the appropriate spokes - TODO: Will likely fail with bundling + """Gather nonants and send them to the appropriate spokes + TODO: Will likely fail with bundling """ ci = 0 ## index to self.nonant_send_buffer nonant_send_buffer = self.nonant_send_buffer @@ -704,15 +721,15 @@ def send_nonants(self): for idx in self.nonant_spoke_indices: self.hub_to_spoke(nonant_send_buffer, idx) -class APHHub(PHHub): +class APHHub(PHHub): def main(self): - """ SPComm gets attached by self.__init___; holding APH harmless """ + """SPComm gets attached by self.__init___; holding APH harmless""" logger.critical("aph debug main in hub.py") self.opt.APH_main(spcomm=self, finalize=False) def finalize(self): - """ does PH.post_loops, returns Eobj """ + """does PH.post_loops, returns Eobj""" # NOTE: APH_main does NOT pass in extensions # to APH.post_loops Eobj = self.opt.post_loops() diff --git a/mpisppy/cylinders/lagranger_bounder.py b/mpisppy/cylinders/lagranger_bounder.py index b4a893dbd..d809bf85d 100644 --- a/mpisppy/cylinders/lagranger_bounder.py +++ b/mpisppy/cylinders/lagranger_bounder.py @@ -15,17 +15,24 @@ import mpisppy.cylinders.spoke from mpisppy.cylinders.lagrangian_bounder import _LagrangianMixin -class LagrangerOuterBound(_LagrangianMixin, mpisppy.cylinders.spoke.OuterBoundNonantSpoke): - """Indepedent Lagrangian that takes x values as input and updates its own W. - """ - converger_spoke_char = 'A' + +class LagrangerOuterBound( + _LagrangianMixin, mpisppy.cylinders.spoke.OuterBoundNonantSpoke +): + """Indepedent Lagrangian that takes x values as input and updates its own W.""" + + converger_spoke_char = "A" def lagrangian_prep(self): # Scenarios are created here super().lagrangian_prep() - if "lagranger_rho_rescale_factors_json" in self.opt.options and\ - self.opt.options["lagranger_rho_rescale_factors_json"] is not None: - with open(self.opt.options["lagranger_rho_rescale_factors_json"], "r") as fin: + if ( + "lagranger_rho_rescale_factors_json" in self.opt.options + and self.opt.options["lagranger_rho_rescale_factors_json"] is not None + ): + with open( + self.opt.options["lagranger_rho_rescale_factors_json"], "r" + ) as fin: din = json.load(fin) self.rho_rescale_factors = {int(i): float(din[i]) for i in din} else: @@ -35,31 +42,29 @@ def lagrangian_prep(self): def _lagrangian(self, iternum): # see if rho should be rescaled - if self.rho_rescale_factors is not None\ - and iternum in self.rho_rescale_factors: + if self.rho_rescale_factors is not None and iternum in self.rho_rescale_factors: self._rescale_rho(self.rho_rescale_factors[iternum]) return self.lagrangian() - def _rescale_rho(self,rf): + def _rescale_rho(self, rf): # IMPORTANT: the scalings accumulate. # E.g., 0.5 then 2.0 gets you back where you started. - for (sname, scenario) in self.opt.local_scenarios.items(): + for sname, scenario in self.opt.local_scenarios.items(): for ndn_i, xvar in scenario._mpisppy_data.nonant_indices.items(): scenario._mpisppy_model.rho[ndn_i] *= rf def _write_W_and_xbar(self, iternum): if self.opt.options.get("lagranger_write_W", False): - w_fname = 'lagranger_w_vals.csv' - with open(w_fname, 'a') as f: + w_fname = "lagranger_w_vals.csv" + with open(w_fname, "a") as f: writer = csv.writer(f) - writer.writerow(['#iteration number', iternum]) - mpisppy.utils.wxbarutils.write_W_to_file(self.opt, w_fname, - sep_files=False) + writer.writerow(["#iteration number", iternum]) + mpisppy.utils.wxbarutils.write_W_to_file(self.opt, w_fname, sep_files=False) if self.opt.options.get("lagranger_write_xbar", False): - xbar_fname = 'lagranger_xbar_vals.csv' - with open(xbar_fname, 'a') as f: + xbar_fname = "lagranger_xbar_vals.csv" + with open(xbar_fname, "a") as f: writer = csv.writer(f) - writer.writerow(['#iteration number', iternum]) + writer.writerow(["#iteration number", iternum]) mpisppy.utils.wxbarutils.write_xbar_to_file(self.opt, xbar_fname) def _update_weights_and_solve(self, iternum): diff --git a/mpisppy/cylinders/lagrangian_bounder.py b/mpisppy/cylinders/lagrangian_bounder.py index 6a5e5d2c3..6251d24fd 100644 --- a/mpisppy/cylinders/lagrangian_bounder.py +++ b/mpisppy/cylinders/lagrangian_bounder.py @@ -8,8 +8,8 @@ ############################################################################### import mpisppy.cylinders.spoke -class _LagrangianMixin: +class _LagrangianMixin: def lagrangian_prep(self): # Split up PH_Prep? Prox option is important for APH. # Seems like we shouldn't need the Lagrangian stuff, so attach_prox=False @@ -19,46 +19,47 @@ def lagrangian_prep(self): self.opt._create_solvers() def lagrangian(self): - verbose = self.opt.options['verbose'] + verbose = self.opt.options["verbose"] # This is sort of a hack, but might help folks: if "ipopt" in self.opt.options["solver_name"]: print("\n WARNING: An ipopt solver will not give outer bounds\n") teeme = False if "tee-rank0-solves" in self.opt.options: - teeme = self.opt.options['tee-rank0-solves'] + teeme = self.opt.options["tee-rank0-solves"] self.opt.solve_loop( solver_options=self.opt.current_solver_options, dtiming=False, gripe=True, tee=teeme, - verbose=verbose + verbose=verbose, ) - ''' DTM (dlw edits): This is where PHBase Iter0 checks for scenario + """ DTM (dlw edits): This is where PHBase Iter0 checks for scenario probabilities that don't sum to one and infeasibility and will send a kill signal if needed. For now we are relying on the fact that the OPT thread is solving the same models, and hence would detect both of those things on its - own--the Lagrangian spoke doesn't need to check again. ''' + own--the Lagrangian spoke doesn't need to check again. """ return self.opt.Ebound(verbose) def finalize(self): self.final_bound = self.bound - if self.opt.extensions is not None and \ - hasattr(self.opt.extobject, 'post_everything'): + if self.opt.extensions is not None and hasattr( + self.opt.extobject, "post_everything" + ): self.opt.extobject.post_everything() return self.final_bound -class LagrangianOuterBound(_LagrangianMixin, mpisppy.cylinders.spoke.OuterBoundWSpoke): - converger_spoke_char = 'L' +class LagrangianOuterBound(_LagrangianMixin, mpisppy.cylinders.spoke.OuterBoundWSpoke): + converger_spoke_char = "L" def _set_weights_and_solve(self): - self.opt.W_from_flat_list(self.localWs) # Sets the weights + self.opt.W_from_flat_list(self.localWs) # Sets the weights return self.lagrangian() def main(self): - verbose = self.opt.options['verbose'] + verbose = self.opt.options["verbose"] extensions = self.opt.extensions is not None self.lagrangian_prep() diff --git a/mpisppy/cylinders/lshaped_bounder.py b/mpisppy/cylinders/lshaped_bounder.py index b4bc9c406..44f26ed45 100644 --- a/mpisppy/cylinders/lshaped_bounder.py +++ b/mpisppy/cylinders/lshaped_bounder.py @@ -11,26 +11,26 @@ from math import inf from mpisppy.utils.xhat_eval import Xhat_Eval -class XhatLShapedInnerBound(spoke.InnerBoundNonantSpoke): - converger_spoke_char = 'X' +class XhatLShapedInnerBound(spoke.InnerBoundNonantSpoke): + converger_spoke_char = "X" def xhatlshaped_prep(self): - verbose = self.opt.options['verbose'] + verbose = self.opt.options["verbose"] if not isinstance(self.opt, Xhat_Eval): raise RuntimeError("XhatLShapedInnerBound must be used with Xhat_Eval.") teeme = False if "tee-rank0-solves" in self.opt.options: - teeme = self.opt.options['tee-rank0-solves'] + teeme = self.opt.options["tee-rank0-solves"] self.opt.solve_loop( solver_options=self.opt.current_solver_options, dtiming=False, gripe=True, tee=teeme, - verbose=verbose + verbose=verbose, ) self.opt._update_E1() # Apologies for doing this after the solves... if abs(1 - self.opt.E1) > self.opt.E1_tolerance: @@ -40,29 +40,26 @@ def xhatlshaped_prep(self): print("E1_tolerance = ", self.opt.E1_tolerance) quit() infeasP = self.opt.infeas_prob() - if infeasP != 0.: + if infeasP != 0.0: if self.opt.cylinder_rank == 0: print("ERROR") print("Infeasibility detected; E_infeas, E1=", infeasP, self.opt.E1) quit() - self.opt._save_nonants() # make the cache + self.opt._save_nonants() # make the cache self.opt.current_solver_options = self.opt.options["iterk_solver_options"] ### end iter0 stuff def main(self): - self.xhatlshaped_prep() is_minimizing = self.opt.is_minimizing self.ib = inf if is_minimizing else -inf - #xh_iter = 1 + # xh_iter = 1 while not self.got_kill_signal(): - if self.new_nonants: - self.opt._put_nonant_cache(self.localnonants) self.opt._restore_nonants() obj = self.opt.calculate_incumbent(fix_nonants=True) diff --git a/mpisppy/cylinders/ph_ob.py b/mpisppy/cylinders/ph_ob.py index e22d0ee6f..df552c5a9 100644 --- a/mpisppy/cylinders/ph_ob.py +++ b/mpisppy/cylinders/ph_ob.py @@ -18,18 +18,21 @@ import mpisppy.utils.gradient as grad from mpisppy.utils.wtracker import WTracker + class PhOuterBound(mpisppy.cylinders.spoke.OuterBoundSpoke): - """Updates its own W and x using its own rho. - """ - converger_spoke_char = 'B' + """Updates its own W and x using its own rho.""" + + converger_spoke_char = "B" def ph_ob_prep(self): # Scenarios are created here self.opt.PH_Prep(attach_prox=True) self.opt._reenable_W() self.opt._create_solvers() - if "ph_ob_rho_rescale_factors_json" in self.opt.options and\ - self.opt.options["ph_ob_rho_rescale_factors_json"] is not None: + if ( + "ph_ob_rho_rescale_factors_json" in self.opt.options + and self.opt.options["ph_ob_rho_rescale_factors_json"] is not None + ): with open(self.opt.options["ph_ob_rho_rescale_factors_json"], "r") as fin: din = json.load(fin) self.rho_rescale_factors = {int(i): float(din[i]) for i in din} @@ -39,87 +42,97 @@ def ph_ob_prep(self): # use gradient rho self.use_gradient_rho = False if "ph_ob_gradient_rho" in self.opt.options: - assert self.opt.options["ph_ob_gradient_rho"]["cfg"] is not None, "You need to give a cfg to use gradient rho." + assert ( + self.opt.options["ph_ob_gradient_rho"]["cfg"] is not None + ), "You need to give a cfg to use gradient rho." self.use_gradient_rho = True print("PH Outer Bounder uses an iterative gradient-based rho setter") self.cfg = self.opt.options["ph_ob_gradient_rho"]["cfg"] - if "rho_denom" in self.opt.options["ph_ob_gradient_rho"]: - self.cfg.grad_rho_denom = self.opt.options["ph_ob_gradient_rho"]["rho_denom"] - self.cfg.grad_cost_file_out = '_temp_grad_cost_file_ph_ob.csv' - self.cfg.grad_rho_file_out = '_temp_grad_rho_file_ph_ob.csv' # out??? xxxx tbd + if "rho_denom" in self.opt.options["ph_ob_gradient_rho"]: + self.cfg.grad_rho_denom = self.opt.options["ph_ob_gradient_rho"][ + "rho_denom" + ] + self.cfg.grad_cost_file_out = "_temp_grad_cost_file_ph_ob.csv" + self.cfg.grad_rho_file_out = ( + "_temp_grad_rho_file_ph_ob.csv" # out??? xxxx tbd + ) # the xhat used here is the same as in the hub self.grad_object = grad.Find_Grad(self.opt, self.cfg) self.rho_setter = find_rho.Set_Rho(self.cfg).rho_setter self.grad_object.write_grad_cost() - def _phsolve(self, iternum): - verbose = self.opt.options['verbose'] + verbose = self.opt.options["verbose"] teeme = False if "tee-rank0-solves" in self.opt.options and self.opt.cylinder_rank == 0: - teeme = self.opt.options['tee-rank0-solves'] + teeme = self.opt.options["tee-rank0-solves"] self.opt.solve_loop( solver_options=self.opt.current_solver_options, dtiming=False, gripe=True, tee=teeme, - verbose=verbose + verbose=verbose, ) def _phboundsolve(self, iternum): self.opt._disable_prox() - verbose = self.opt.options['verbose'] + verbose = self.opt.options["verbose"] teeme = False if "tee-rank0-solves" in self.opt.options and self.opt.cylinder_rank == 0: - teeme = self.opt.options['tee-rank0-solves'] + teeme = self.opt.options["tee-rank0-solves"] self.opt.solve_loop( solver_options=self.opt.current_solver_options, dtiming=False, gripe=True, tee=teeme, - verbose=verbose + verbose=verbose, ) self.opt._reenable_prox() # Compute the resulting bound return self.opt.Ebound(verbose) - def _rescale_rho(self, rf): # IMPORTANT: the scalings accumulate. # E.g., 0.5 then 2.0 gets you back where you started. - for (sname, scenario) in self.opt.local_scenarios.items(): + for sname, scenario in self.opt.local_scenarios.items(): for ndn_i, xvar in scenario._mpisppy_data.nonant_indices.items(): scenario._mpisppy_model.rho[ndn_i] *= rf def _display_rho_values(self): for sname, scenario in self.opt.local_scenarios.items(): - rho_list = [scenario._mpisppy_model.rho[ndn_i]._value - for ndn_i in scenario._mpisppy_data.nonant_indices.keys()] - print(sname, 'PH OB rho values: ', rho_list[:5]) + rho_list = [ + scenario._mpisppy_model.rho[ndn_i]._value + for ndn_i in scenario._mpisppy_data.nonant_indices.keys() + ] + print(sname, "PH OB rho values: ", rho_list[:5]) break def _display_W_values(self): - for (sname, scenario) in self.opt.local_scenarios.items(): + for sname, scenario in self.opt.local_scenarios.items(): W_list = [w._value for w in scenario._mpisppy_model.W.values()] - print(sname, 'W values: ', W_list) + print(sname, "W values: ", W_list) break def _display_xbar_values(self): - for (sname, scenario) in self.opt.local_scenarios.items(): - xbar_list = [scenario._mpisppy_model.xbars[ndn_i]._value - for ndn_i in scenario._mpisppy_data.nonant_indices.keys()] - print(sname, 'PH OB xbar values: ', xbar_list) + for sname, scenario in self.opt.local_scenarios.items(): + xbar_list = [ + scenario._mpisppy_model.xbars[ndn_i]._value + for ndn_i in scenario._mpisppy_data.nonant_indices.keys() + ] + print(sname, "PH OB xbar values: ", xbar_list) break def _set_gradient_rho(self): self.grad_object.write_grad_rho() - rho_setter_kwargs = self.opt.options['rho_setter_kwargs'] \ - if 'rho_setter_kwargs' in self.opt.options \ - else dict() + rho_setter_kwargs = ( + self.opt.options["rho_setter_kwargs"] + if "rho_setter_kwargs" in self.opt.options + else dict() + ) for sname, scenario in self.opt.local_scenarios.items(): rholist = self.rho_setter(scenario, **rho_setter_kwargs) - for (vid, rho) in rholist: + for vid, rho in rholist: (ndn, i) = scenario._mpisppy_data.varid_to_nonant_index[vid] scenario._mpisppy_model.rho[(ndn, i)] = rho @@ -138,16 +151,15 @@ def _update_weights_and_solve(self, iternum): if self.use_gradient_rho and iternum == 0: self._set_gradient_rho() self._display_rho_values() - if self.rho_rescale_factors is not None\ - and iternum in self.rho_rescale_factors: + if self.rho_rescale_factors is not None and iternum in self.rho_rescale_factors: self._rescale_rho(self.rho_rescale_factors[iternum]) - bound = self._phboundsolve(iternum) # keep it before calling _phsolve + bound = self._phboundsolve(iternum) # keep it before calling _phsolve self._phsolve(iternum) return bound def main(self): self.ph_ob_prep() - self._rescale_rho(self.opt.options["ph_ob_initial_rho_rescale_factor"] ) + self._rescale_rho(self.opt.options["ph_ob_initial_rho_rescale_factor"]) self.trivial_bound = self._phsolve(0) self.bound = self.trivial_bound @@ -165,11 +177,11 @@ def main(self): wtracker.grab_local_Ws() def finalize(self): - ''' + """ Do one final lagrangian pass with the final PH weights. Useful for when PH convergence and/or iteration limit is the cause of termination - ''' + """ self.final_bound = self._update_weights_and_solve(self.B_iter) self.bound = self.final_bound return self.final_bound diff --git a/mpisppy/cylinders/reduced_costs_spoke.py b/mpisppy/cylinders/reduced_costs_spoke.py index d504e1ba7..64cd9ff2f 100644 --- a/mpisppy/cylinders/reduced_costs_spoke.py +++ b/mpisppy/cylinders/reduced_costs_spoke.py @@ -10,21 +10,22 @@ import numpy as np from mpisppy.cylinders.spcommunicator import communicator_array from mpisppy.cylinders.lagrangian_bounder import LagrangianOuterBound -from mpisppy.utils.sputils import is_persistent +from mpisppy.utils.sputils import is_persistent from mpisppy import MPI -class ReducedCostsSpoke(LagrangianOuterBound): +class ReducedCostsSpoke(LagrangianOuterBound): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) - self.bound_tol = self.opt.options['rc_bound_tol'] + self.bound_tol = self.opt.options["rc_bound_tol"] self.consensus_threshold = np.sqrt(self.bound_tol) - self.converger_spoke_char = 'R' - + self.converger_spoke_char = "R" def make_windows(self): if not hasattr(self.opt, "local_scenarios"): - raise RuntimeError("Provided SPBase object does not have local_scenarios attribute") + raise RuntimeError( + "Provided SPBase object does not have local_scenarios attribute" + ) if len(self.opt.local_scenarios) == 0: raise RuntimeError("Rank has zero local_scenarios") @@ -37,7 +38,7 @@ def make_windows(self): self._modeler_fixed_nonants = set() - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): for ndn_i, xvar in s._mpisppy_data.nonant_indices.items(): if xvar.fixed: self._modeler_fixed_nonants.add(ndn_i) @@ -55,11 +56,11 @@ def make_windows(self): @property def rc(self): - return self._bound[1:1+self.nonant_length] + return self._bound[1 : 1 + self.nonant_length] @rc.setter def rc(self, vals): - self._bound[1:1+self.nonant_length] = vals + self._bound[1 : 1 + self.nonant_length] = vals def lagrangian_prep(self): """ @@ -100,18 +101,26 @@ def extract_and_store_reduced_costs(self, outer_bound): for sub in self.opt.local_subproblems.values(): if is_persistent(sub._solver_plugin): - # Note: what happens with non-persistent solvers? + # Note: what happens with non-persistent solvers? # - if rc is accepted as a model suffix by the solver (e.g. gurobi shell), it is loaded in postsolve # - if not, the solver should throw an error # - direct solvers seem to behave the same as persistent solvers # GurobiDirect needs vars_to_load argument # XpressDirect loads for all vars by default - TODO: should notify someone of this inconsistency - vars_to_load = [x for sn in sub.scen_list for _, x in self.opt.local_scenarios[sn]._mpisppy_data.nonant_indices.items()] + vars_to_load = [ + x + for sn in sub.scen_list + for _, x in self.opt.local_scenarios[ + sn + ]._mpisppy_data.nonant_indices.items() + ] sub._solver_plugin.load_rc(vars_to_load=vars_to_load) for sn in sub.scen_list: s = self.opt.local_scenarios[sn] - for ci, (ndn_i, xvar) in enumerate(s._mpisppy_data.nonant_indices.items()): + for ci, (ndn_i, xvar) in enumerate( + s._mpisppy_data.nonant_indices.items() + ): # fixed by modeler if ndn_i in self._modeler_fixed_nonants: rc[ci] = np.nan @@ -119,14 +128,16 @@ def extract_and_store_reduced_costs(self, outer_bound): xb = s._mpisppy_model.xbars[ndn_i].value # check variance of xb to determine if consensus achieved var_xb = pyo.value(s._mpisppy_model.xsqbars[ndn_i]) - xb * xb - - if var_xb > self.consensus_threshold * self.consensus_threshold: + + if var_xb > self.consensus_threshold * self.consensus_threshold: rc[ci] = np.nan continue # solver takes care of sign of rc, based on lb, ub and max,min # rc can be of wrong sign if numerically 0 - accepted here, checked in extension - if (xb - xvar.lb <= self.bound_tol) or (xvar.ub - xb <= self.bound_tol): + if (xb - xvar.lb <= self.bound_tol) or ( + xvar.ub - xb <= self.bound_tol + ): rc[ci] += sub._mpisppy_probability * sub.rc[xvar] # not close to either bound -> rc = nan else: diff --git a/mpisppy/cylinders/slam_heuristic.py b/mpisppy/cylinders/slam_heuristic.py index 54f938118..5bcba5a5a 100644 --- a/mpisppy/cylinders/slam_heuristic.py +++ b/mpisppy/cylinders/slam_heuristic.py @@ -17,14 +17,14 @@ from mpisppy.utils.xhat_eval import Xhat_Eval # Could also pass, e.g., sys.stdout instead of a filename -mpisppy.log.setup_logger("mpisppy.cylinders.slam_heuristic", - "slamheur.log", - level=logging.CRITICAL) +mpisppy.log.setup_logger( + "mpisppy.cylinders.slam_heuristic", "slamheur.log", level=logging.CRITICAL +) logger = logging.getLogger("mpisppy.cylinders.slam_heuristic") -class _SlamHeuristic(spoke.InnerBoundNonantSpoke): - converger_spoke_char = 'S' +class _SlamHeuristic(spoke.InnerBoundNonantSpoke): + converger_spoke_char = "S" @property @abc.abstractmethod @@ -38,17 +38,23 @@ def mpi_op(self): def slam_heur_prep(self): if self.opt.multistage: - raise RuntimeError(f'The {self.__class__.__name__} only supports ' - 'two-stage models at this time.') + raise RuntimeError( + f"The {self.__class__.__name__} only supports " + "two-stage models at this time." + ) if not isinstance(self.opt, Xhat_Eval): - raise RuntimeError(f"{self.__class__.__name__} must be used with Xhat_Eval.") - verbose = self.opt.options['verbose'] + raise RuntimeError( + f"{self.__class__.__name__} must be used with Xhat_Eval." + ) + verbose = self.opt.options["verbose"] - logger.debug(f"{self.__class__.__name__} spoke back from PH_Prep rank {self.global_rank}") + logger.debug( + f"{self.__class__.__name__} spoke back from PH_Prep rank {self.global_rank}" + ) self.tee = False if "tee-rank0-solves" in self.opt.options: - self.tee = self.opt.options['tee-rank0-solves'] + self.tee = self.opt.options["tee-rank0-solves"] self.verbose = verbose @@ -58,7 +64,7 @@ def slam_heur_prep(self): def extract_local_candidate_soln(self): num_scen = len(self.opt.local_scenarios) num_vars = len(self.localnonants) // num_scen - assert(num_scen * num_vars == len(self.localnonants)) + assert num_scen * num_vars == len(self.localnonants) ## matrix with num_scen rows and num_vars columns nonant_matrix = np.reshape(self.localnonants, (num_scen, num_vars)) @@ -72,22 +78,27 @@ def main(self): slam_iter = 1 while not self.got_kill_signal(): - if (slam_iter-1) % 10000 == 0: - logger.debug(f' {self.__class__.__name__} loop iter={slam_iter} on rank {self.global_rank}') - logger.debug(f' {self.__class__.__name__} got from opt on rank {self.global_rank}') + if (slam_iter - 1) % 10000 == 0: + logger.debug( + f" {self.__class__.__name__} loop iter={slam_iter} on rank {self.global_rank}" + ) + logger.debug( + f" {self.__class__.__name__} got from opt on rank {self.global_rank}" + ) if self.new_nonants: - local_candidate = self.extract_local_candidate_soln() global_candidate = np.empty_like(local_candidate) - self.cylinder_comm.Allreduce(local_candidate, global_candidate, op=self.mpi_op) + self.cylinder_comm.Allreduce( + local_candidate, global_candidate, op=self.mpi_op + ) - ''' + """ ## round the candidate candidate = global_candidate.round() - ''' + """ # Everyone has the candidate solution at this point for s in self.opt.local_scenarios.values(): @@ -96,17 +107,17 @@ def main(self): for ix, var in enumerate(s._mpisppy_data.nonant_indices.values()): var.fix(global_candidate[ix]) - if (is_pers): + if is_pers: solver.update_var(var) obj = self.opt.calculate_incumbent(fix_nonants=False) self.update_if_improving(obj) - + slam_iter += 1 -class SlamMaxHeuristic(_SlamHeuristic): +class SlamMaxHeuristic(_SlamHeuristic): @property def numpy_op(self): return np.amax @@ -115,8 +126,8 @@ def numpy_op(self): def mpi_op(self): return mpi.MAX -class SlamMinHeuristic(_SlamHeuristic): +class SlamMinHeuristic(_SlamHeuristic): @property def numpy_op(self): return np.amin diff --git a/mpisppy/cylinders/spcommunicator.py b/mpisppy/cylinders/spcommunicator.py index 13c941c8c..c4b333778 100644 --- a/mpisppy/cylinders/spcommunicator.py +++ b/mpisppy/cylinders/spcommunicator.py @@ -6,18 +6,19 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -""" Conventional wisdom seems to be that we should use Put calls locally (i.e. - a process should Put() into its own buffer), and Get calls for - communication (i.e. call Get on a remote target, rather than your local - buffer). The following implementation uses this paradigm. +"""Conventional wisdom seems to be that we should use Put calls locally (i.e. +a process should Put() into its own buffer), and Get calls for +communication (i.e. call Get on a remote target, rather than your local +buffer). The following implementation uses this paradigm. - The communication in this paradigm is a star graph, with the hub at the - center and the spokes on the outside. Each spoke is concerned only - with the hub, but the hub must track information about all of the - spokes. +The communication in this paradigm is a star graph, with the hub at the +center and the spokes on the outside. Each spoke is concerned only +with the hub, but the hub must track information about all of the +spokes. - Separate hub and spoke classes for memory/window management? +Separate hub and spoke classes for memory/window management? """ + import numpy as np import abc import time @@ -25,17 +26,18 @@ def communicator_array(size): - arr = np.empty(size+1) + arr = np.empty(size + 1) arr[:] = np.nan arr[-1] = 0 return arr class SPCommunicator: - """ Notes: TODO - """ + """Notes: TODO""" - def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None): + def __init__( + self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None + ): # flag for if the windows have been constructed self._windows_constructed = False self.fullcomm = fullcomm @@ -46,7 +48,7 @@ def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options= self.cylinder_rank = cylinder_comm.Get_rank() self.n_spokes = strata_comm.Get_size() - 1 self.opt = spbase_object - self.inst_time = time.time() # For diagnostics + self.inst_time = time.time() # For diagnostics if options is None: self.options = dict() else: @@ -58,30 +60,27 @@ def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options= @abc.abstractmethod def main(self): - """ Every hub/spoke must have a main function - """ + """Every hub/spoke must have a main function""" pass def sync(self): - """ Every hub/spoke may have a sync function - """ + """Every hub/spoke may have a sync function""" pass def is_converged(self): - """ Every hub/spoke may have a is_converged function - """ + """Every hub/spoke may have a is_converged function""" return False def finalize(self): - """ Every hub/spoke may have a finalize function, - which does some final calculations/flushing to - disk after convergence + """Every hub/spoke may have a finalize function, + which does some final calculations/flushing to + disk after convergence """ pass def hub_finalize(self): - """ Every hub may have another finalize function, - which collects any results from finalize + """Every hub may have another finalize function, + which collects any results from finalize """ pass @@ -89,8 +88,7 @@ def allreduce_or(self, val): return self.opt.allreduce_or(val) def free_windows(self): - """ - """ + """ """ if self._windows_constructed: for i in range(self.n_spokes): self.windows[i].Free() @@ -98,25 +96,25 @@ def free_windows(self): self._windows_constructed = False def _make_window(self, length, comm=None): - """ Create a local window object and its corresponding - memory buffer using MPI.Win.Allocate() + """Create a local window object and its corresponding + memory buffer using MPI.Win.Allocate() - Args: - length (int): length of the buffer to create - comm (MPI Communicator, optional): MPI communicator object to - create the window over. Default is self.strata_comm. + Args: + length (int): length of the buffer to create + comm (MPI Communicator, optional): MPI communicator object to + create the window over. Default is self.strata_comm. - Returns: - window (MPI.Win object): The created window - buff (ndarray): Pointer to corresponding memory + Returns: + window (MPI.Win object): The created window + buff (ndarray): Pointer to corresponding memory - Notes: - The created buffer will actually be +1 longer than length. - The last entry is a write number to keep track of new info. + Notes: + The created buffer will actually be +1 longer than length. + The last entry is a write number to keep track of new info. - This function assumes that the user has provided the correct - window size for the local buffer based on whether this process - is a hub or spoke, etc. + This function assumes that the user has provided the correct + window size for the local buffer based on whether this process + is a hub or spoke, etc. """ if comm is None: comm = self.strata_comm @@ -124,5 +122,5 @@ def _make_window(self, length, comm=None): window = MPI.Win.Allocate(size, MPI.DOUBLE.size, comm=comm) buff = np.ndarray(dtype="d", shape=(length + 1,), buffer=window.tomemory()) buff[:] = np.nan - buff[-1] = 0. # Initialize the write number to zero + buff[-1] = 0.0 # Initialize the write number to zero return window, buff diff --git a/mpisppy/cylinders/spoke.py b/mpisppy/cylinders/spoke.py index 1d59e58cb..7ff47052a 100644 --- a/mpisppy/cylinders/spoke.py +++ b/mpisppy/cylinders/spoke.py @@ -25,8 +25,11 @@ class ConvergerSpokeType(enum.Enum): W_GETTER = 3 NONANT_GETTER = 4 + class Spoke(SPCommunicator): - def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None): + def __init__( + self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None + ): super().__init__(spbase_object, fullcomm, strata_comm, cylinder_comm, options) self.local_write_id = 0 self.remote_write_id = 0 @@ -62,14 +65,14 @@ def _make_windows(self, local_length, remote_length): self._windows_constructed = True def spoke_to_hub(self, values): - """ Put the specified values into the locally-owned buffer for the hub - to pick up. + """Put the specified values into the locally-owned buffer for the hub + to pick up. - Notes: - This automatically does the -1 indexing + Notes: + This automatically does the -1 indexing - This assumes that values contains a slot at the end for the - write_id + This assumes that values contains a slot at the end for the + write_id """ expected_length = self.local_length + 1 if len(values) != expected_length: @@ -86,8 +89,7 @@ def spoke_to_hub(self, values): window.Unlock(self.strata_rank) def spoke_from_hub(self, values): - """ - """ + """ """ expected_length = self.remote_length + 1 if len(values) != expected_length: raise RuntimeError( @@ -101,11 +103,11 @@ def spoke_from_hub(self, values): window.Unlock(0) new_id = int(values[-1]) - local_val = np.array((new_id,-new_id), 'i') - max_min_ids = np.zeros(2, 'i') - self.cylinder_comm.Allreduce((local_val, MPI.INT), - (max_min_ids, MPI.INT), - op=MPI.MAX) + local_val = np.array((new_id, -new_id), "i") + max_min_ids = np.zeros(2, "i") + self.cylinder_comm.Allreduce( + (local_val, MPI.INT), (max_min_ids, MPI.INT), op=MPI.MAX + ) max_id = max_min_ids[0] min_id = -max_min_ids[1] @@ -122,8 +124,8 @@ def spoke_from_hub(self, values): return False def got_kill_signal(self): - """ Spoke should call this method at least every iteration - to see if the Hub terminated + """Spoke should call this method at least every iteration + to see if the Hub terminated """ return self._got_kill_signal() @@ -142,26 +144,30 @@ def get_serial_number(self): @abc.abstractmethod def _got_kill_signal(self): - """ Every spoke needs a way to get the signal to terminate - from the hub + """Every spoke needs a way to get the signal to terminate + from the hub """ pass class _BoundSpoke(Spoke): - """ A base class for bound spokes - """ - def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None): - super().__init__(spbase_object, fullcomm, strata_comm, cylinder_comm, options) - if self.cylinder_rank == 0 and \ - 'trace_prefix' in spbase_object.options and \ - spbase_object.options['trace_prefix'] is not None: - trace_prefix = spbase_object.options['trace_prefix'] + """A base class for bound spokes""" - filen = trace_prefix+self.__class__.__name__+'.csv' + def __init__( + self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None + ): + super().__init__(spbase_object, fullcomm, strata_comm, cylinder_comm, options) + if ( + self.cylinder_rank == 0 + and "trace_prefix" in spbase_object.options + and spbase_object.options["trace_prefix"] is not None + ): + trace_prefix = spbase_object.options["trace_prefix"] + + filen = trace_prefix + self.__class__.__name__ + ".csv" if os.path.exists(filen): raise RuntimeError(f"Spoke trace file {filen} already exists!") - with open(filen, 'w') as f: + with open(filen, "w") as f: f.write("time,bound\n") self.trace_filen = filen self.start_time = spbase_object.start_time @@ -173,12 +179,12 @@ def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options= self._locals = None def make_windows(self): - """ Makes the bound window and a remote window to - look for a kill signal + """Makes the bound window and a remote window to + look for a kill signal """ - self._make_windows(1, 2) # kill signals are accounted for in _make_window - self._bound = communicator_array(1) # spoke bound + kill signal - self._locals = communicator_array(2) # hub outer/inner bounds and kill signal + self._make_windows(1, 2) # kill signals are accounted for in _make_window + self._bound = communicator_array(1) # spoke bound + kill signal + self._locals = communicator_array(2) # hub outer/inner bounds and kill signal @property def bound(self): @@ -208,25 +214,27 @@ def _got_kill_signal(self): def _append_trace(self, value): if self.cylinder_rank != 0 or self.trace_filen is None: return - with open(self.trace_filen, 'a') as f: + with open(self.trace_filen, "a") as f: f.write(f"{time.perf_counter()-self.start_time},{value}\n") class _BoundNonantLenSpoke(_BoundSpoke): - """ A base class for bound spokes which also - want something of len nonants from OPT + """A base class for bound spokes which also + want something of len nonants from OPT """ def make_windows(self): - """ Makes the bound window and with a remote buffer long enough - to hold an array as long as the nonants. + """Makes the bound window and with a remote buffer long enough + to hold an array as long as the nonants. - Input: - opt (SPBase): Must have local_scenarios attached already! + Input: + opt (SPBase): Must have local_scenarios attached already! """ if not hasattr(self.opt, "local_scenarios"): - raise RuntimeError("Provided SPBase object does not have local_scenarios attribute") + raise RuntimeError( + "Provided SPBase object does not have local_scenarios attribute" + ) if len(self.opt.local_scenarios) == 0: raise RuntimeError("Rank has zero local_scenarios") @@ -239,37 +247,40 @@ def make_windows(self): self._bound = communicator_array(1) self._locals = communicator_array(vbuflen) + class InnerBoundSpoke(_BoundSpoke): - """ For Spokes that provide an inner bound through self.bound to the - Hub, and do not need information from the main PH OPT hub. + """For Spokes that provide an inner bound through self.bound to the + Hub, and do not need information from the main PH OPT hub. """ + converger_spoke_types = (ConvergerSpokeType.INNER_BOUND,) - converger_spoke_char = 'I' + converger_spoke_char = "I" class OuterBoundSpoke(_BoundSpoke): - """ For Spokes that provide an outer bound through self.bound to the - Hub, and do not need information from the main PH OPT hub. + """For Spokes that provide an outer bound through self.bound to the + Hub, and do not need information from the main PH OPT hub. """ + converger_spoke_types = (ConvergerSpokeType.OUTER_BOUND,) - converger_spoke_char = 'O' + converger_spoke_char = "O" class _BoundWSpoke(_BoundNonantLenSpoke): - """ A base class for bound spokes which also want the W's from the OPT - threads + """A base class for bound spokes which also want the W's from the OPT + threads """ @property def localWs(self): """Returns the local copy of the weights""" - return self._locals[:-3] # -3 for the bounds and kill signal + return self._locals[:-3] # -3 for the bounds and kill signal @property def new_Ws(self): - """ Returns True if the local copy of - the weights has been updated since - the last call to got_kill_signal + """Returns True if the local copy of + the weights has been updated since + the last call to got_kill_signal """ return self._new_locals @@ -286,12 +297,12 @@ class OuterBoundWSpoke(_BoundWSpoke): ConvergerSpokeType.OUTER_BOUND, ConvergerSpokeType.W_GETTER, ) - converger_spoke_char = 'O' + converger_spoke_char = "O" class _BoundNonantSpoke(_BoundNonantLenSpoke): - """ A base class for bound spokes which also - want the xhat's from the OPT threads + """A base class for bound spokes which also + want the xhat's from the OPT threads """ @property @@ -302,42 +313,47 @@ def localnonants(self): @property def new_nonants(self): """Returns True if the local copy of - the nonants has been updated since - the last call to got_kill_signal""" + the nonants has been updated since + the last call to got_kill_signal""" return self._new_locals class InnerBoundNonantSpoke(_BoundNonantSpoke): - """ For Spokes that provide an inner (incumbent) - bound through self.bound to the Hub, - and receive the nonants from - the main SPOpt hub. + """For Spokes that provide an inner (incumbent) + bound through self.bound to the Hub, + and receive the nonants from + the main SPOpt hub. - Includes some helpful methods for saving - and restoring results + Includes some helpful methods for saving + and restoring results """ + converger_spoke_types = ( ConvergerSpokeType.INNER_BOUND, ConvergerSpokeType.NONANT_GETTER, ) - converger_spoke_char = 'I' + converger_spoke_char = "I" - def __init__(self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None): + def __init__( + self, spbase_object, fullcomm, strata_comm, cylinder_comm, options=None + ): super().__init__(spbase_object, fullcomm, strata_comm, cylinder_comm, options) self.is_minimizing = self.opt.is_minimizing self.best_inner_bound = math.inf if self.is_minimizing else -math.inf - self.solver_options = None # can be overwritten by derived classes + self.solver_options = None # can be overwritten by derived classes # set up best solution cache - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): s._mpisppy_data.best_solution_cache = None def update_if_improving(self, candidate_inner_bound): if candidate_inner_bound is None: return False - update = (candidate_inner_bound < self.best_inner_bound) \ - if self.is_minimizing else \ - (self.best_inner_bound < candidate_inner_bound) + update = ( + (candidate_inner_bound < self.best_inner_bound) + if self.is_minimizing + else (self.best_inner_bound < candidate_inner_bound) + ) if not update: return False @@ -348,7 +364,7 @@ def update_if_improving(self, candidate_inner_bound): return True def finalize(self): - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): if s._mpisppy_data.best_solution_cache is None: return None for var, value in s._mpisppy_data.best_solution_cache.items(): @@ -360,7 +376,7 @@ def finalize(self): return self.final_bound def _cache_best_solution(self): - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): scenario_cache = ComponentMap() for var in s.component_data_objects(Var): scenario_cache[var] = var.value @@ -368,13 +384,14 @@ def _cache_best_solution(self): class OuterBoundNonantSpoke(_BoundNonantSpoke): - """ For Spokes that provide an outer - bound through self.bound to the Hub, - and receive the nonants from - the main OPT hub. + """For Spokes that provide an outer + bound through self.bound to the Hub, + and receive the nonants from + the main OPT hub. """ + converger_spoke_types = ( ConvergerSpokeType.OUTER_BOUND, ConvergerSpokeType.NONANT_GETTER, ) - converger_spoke_char = 'A' # probably Lagrangian + converger_spoke_char = "A" # probably Lagrangian diff --git a/mpisppy/cylinders/subgradient_bounder.py b/mpisppy/cylinders/subgradient_bounder.py index fc125c23b..2485c059b 100644 --- a/mpisppy/cylinders/subgradient_bounder.py +++ b/mpisppy/cylinders/subgradient_bounder.py @@ -9,13 +9,13 @@ import mpisppy.cylinders.spoke from mpisppy.cylinders.lagrangian_bounder import _LagrangianMixin -class SubgradientOuterBound(_LagrangianMixin, mpisppy.cylinders.spoke.OuterBoundSpoke): - converger_spoke_char = 'G' +class SubgradientOuterBound(_LagrangianMixin, mpisppy.cylinders.spoke.OuterBoundSpoke): + converger_spoke_char = "G" def main(self): extensions = self.opt.extensions is not None - verbose = self.opt.options['verbose'] + verbose = self.opt.options["verbose"] self.lagrangian_prep() @@ -33,8 +33,8 @@ def main(self): self.opt.current_solver_options = self.opt.iterk_solver_options # update rho / alpha - if self.opt.options.get('subgradient_rho_multiplier') is not None: - rf = self.opt.options['subgradient_rho_multiplier'] + if self.opt.options.get("subgradient_rho_multiplier") is not None: + rf = self.opt.options["subgradient_rho_multiplier"] for scenario in self.opt.local_scenarios.values(): for ndn_i in scenario._mpisppy_model.rho: scenario._mpisppy_model.rho[ndn_i] *= rf diff --git a/mpisppy/cylinders/xhatlooper_bounder.py b/mpisppy/cylinders/xhatlooper_bounder.py index cfe5623dc..27edead7f 100644 --- a/mpisppy/cylinders/xhatlooper_bounder.py +++ b/mpisppy/cylinders/xhatlooper_bounder.py @@ -14,19 +14,20 @@ import mpisppy.log # Could also pass, e.g., sys.stdout instead of a filename -mpisppy.log.setup_logger("mpisppy.cylinders.xhatlooper_bounder", - "xhatlp.log", - level=logging.CRITICAL) +mpisppy.log.setup_logger( + "mpisppy.cylinders.xhatlooper_bounder", "xhatlp.log", level=logging.CRITICAL +) logger = logging.getLogger("mpisppy.cylinders.xhatlooper_bounder") class XhatLooperInnerBound(spoke.InnerBoundNonantSpoke): - - converger_spoke_char = 'X' + converger_spoke_char = "X" def xhatlooper_prep(self): - if "bundles_per_rank" in self.opt.options\ - and self.opt.options["bundles_per_rank"] != 0: + if ( + "bundles_per_rank" in self.opt.options + and self.opt.options["bundles_per_rank"] != 0 + ): raise RuntimeError("xhat spokes cannot have bundles (yet)") if not isinstance(self.opt, Xhat_Eval): @@ -37,7 +38,7 @@ def xhatlooper_prep(self): ### begin iter0 stuff xhatter.pre_iter0() self.opt._save_original_nonants() - + self.opt._lazy_create_solvers() # no iter0 loop, but we need the solvers self.opt._update_E1() @@ -50,7 +51,7 @@ def xhatlooper_prep(self): ### end iter0 stuff xhatter.post_iter0() - self.opt._save_nonants() # make the cache + self.opt._save_nonants() # make the cache return xhatter @@ -59,22 +60,26 @@ def main(self): xhatter = self.xhatlooper_prep() - scen_limit = self.opt.options['xhat_looper_options']['scen_limit'] + scen_limit = self.opt.options["xhat_looper_options"]["scen_limit"] xh_iter = 1 while not self.got_kill_signal(): - if (xh_iter-1) % 10000 == 0: - logger.debug(f' Xhatlooper loop iter={xh_iter} on rank {self.global_rank}') - logger.debug(f' Xhatlooper got from opt on rank {self.global_rank}') + if (xh_iter - 1) % 10000 == 0: + logger.debug( + f" Xhatlooper loop iter={xh_iter} on rank {self.global_rank}" + ) + logger.debug(f" Xhatlooper got from opt on rank {self.global_rank}") if self.new_nonants: - logger.debug(f' *Xhatlooper loop iter={xh_iter}') - logger.debug(f' *got a new one! on rank {self.global_rank}') - logger.debug(f' *localnonants={str(self.localnonants)}') + logger.debug(f" *Xhatlooper loop iter={xh_iter}") + logger.debug(f" *got a new one! on rank {self.global_rank}") + logger.debug(f" *localnonants={str(self.localnonants)}") self.opt._put_nonant_cache(self.localnonants) self.opt._restore_nonants() - upperbound, srcsname = xhatter.xhat_looper(scen_limit=scen_limit, restore_nonants=True) + upperbound, srcsname = xhatter.xhat_looper( + scen_limit=scen_limit, restore_nonants=True + ) # send a bound to the opt companion self.update_if_improving(upperbound) diff --git a/mpisppy/cylinders/xhatshufflelooper_bounder.py b/mpisppy/cylinders/xhatshufflelooper_bounder.py index 764eff1a3..65ccd914d 100644 --- a/mpisppy/cylinders/xhatshufflelooper_bounder.py +++ b/mpisppy/cylinders/xhatshufflelooper_bounder.py @@ -15,28 +15,31 @@ from mpisppy.extensions.xhatbase import XhatBase # Could also pass, e.g., sys.stdout instead of a filename -mpisppy.log.setup_logger("mpisppy.cylinders.xhatshufflelooper_bounder", - "xhatclp.log", - level=logging.CRITICAL) +mpisppy.log.setup_logger( + "mpisppy.cylinders.xhatshufflelooper_bounder", "xhatclp.log", level=logging.CRITICAL +) logger = logging.getLogger("mpisppy.cylinders.xhatshufflelooper_bounder") -class XhatShuffleInnerBound(spoke.InnerBoundNonantSpoke): - converger_spoke_char = 'X' +class XhatShuffleInnerBound(spoke.InnerBoundNonantSpoke): + converger_spoke_char = "X" def xhatbase_prep(self): - - if "bundles_per_rank" in self.opt.options\ - and self.opt.options["bundles_per_rank"] != 0: + if ( + "bundles_per_rank" in self.opt.options + and self.opt.options["bundles_per_rank"] != 0 + ): raise RuntimeError("xhat spokes cannot have bundles (yet)") ## for later - self.verbose = self.opt.options["verbose"] # typing aid - self.solver_options = self.opt.options["xhat_looper_options"]["xhat_solver_options"] + self.verbose = self.opt.options["verbose"] # typing aid + self.solver_options = self.opt.options["xhat_looper_options"][ + "xhat_solver_options" + ] if not isinstance(self.opt, Xhat_Eval): raise RuntimeError("XhatShuffleInnerBound must be used with Xhat_Eval.") - + xhatter = XhatBase(self.opt) self.xhatter = xhatter @@ -48,35 +51,41 @@ def xhatbase_prep(self): self.opt._update_E1() if abs(1 - self.opt.E1) > self.opt.E1_tolerance: - raise ValueError(f"Total probability of scenarios was {self.opt.E1} "+\ - f"(E1_tolerance is {self.opt.E1_tolerance})") + raise ValueError( + f"Total probability of scenarios was {self.opt.E1} " + + f"(E1_tolerance is {self.opt.E1_tolerance})" + ) ### end iter0 stuff (but note: no need for iter 0 solves in an xhatter) xhatter.post_iter0() - - self.opt._save_nonants() # make the cache + + self.opt._save_nonants() # make the cache ## option drive this? (could be dangerous) self.random_seed = 42 # Have a separate stream for shuffling self.random_stream = random.Random() - def try_scenario_dict(self, xhat_scenario_dict): - """ wrapper for _try_one""" + """wrapper for _try_one""" snamedict = xhat_scenario_dict stage2EFsolvern = self.opt.options.get("stage2EFsolvern", None) - branching_factors = self.opt.options.get("branching_factors", None) # for stage2ef - obj = self.xhatter._try_one(snamedict, - solver_options = self.solver_options, - verbose=False, - restore_nonants=True, - stage2EFsolvern=stage2EFsolvern, - branching_factors=branching_factors) - def _vb(msg): + branching_factors = self.opt.options.get( + "branching_factors", None + ) # for stage2ef + obj = self.xhatter._try_one( + snamedict, + solver_options=self.solver_options, + verbose=False, + restore_nonants=True, + stage2EFsolvern=stage2EFsolvern, + branching_factors=branching_factors, + ) + + def _vb(msg): if self.verbose and self.opt.cylinder_rank == 0: - print ("(rank0) " + msg) + print("(rank0) " + msg) if obj is None: _vb(f" Infeasible {snamedict}") @@ -84,7 +93,7 @@ def _vb(msg): _vb(f" Feasible {snamedict}, obj: {obj}") update = self.update_if_improving(obj) - logger.debug(f' bottom of try_scenario_dict on rank {self.global_rank}') + logger.debug(f" bottom of try_scenario_dict on rank {self.global_rank}") return update def main(self): @@ -102,20 +111,18 @@ def main(self): # give all ranks the same seed self.random_stream.seed(self.random_seed) - - #We need to keep track of the way scenario_names were sorted + + # We need to keep track of the way scenario_names were sorted scen_names = list(enumerate(self.opt.all_scenario_names)) - + # shuffle the scenarios associated (i.e., sample without replacement) - shuffled_scenarios = self.random_stream.sample(scen_names, - len(scen_names)) - - scenario_cycler = ScenarioCycler(shuffled_scenarios, - self.opt.nonleaves, - self.reverse, - self.iter_step) - - def _vb(msg): + shuffled_scenarios = self.random_stream.sample(scen_names, len(scen_names)) + + scenario_cycler = ScenarioCycler( + shuffled_scenarios, self.opt.nonleaves, self.reverse, self.iter_step + ) + + def _vb(msg): if self.verbose and self.opt.cylinder_rank == 0: print("(rank0) " + msg) @@ -126,21 +133,23 @@ def _vb(msg): if self.get_serial_number() == 0: continue - if (xh_iter-1) % 100 == 0: - logger.debug(f' Xhatshuffle loop iter={xh_iter} on rank {self.global_rank}') - logger.debug(f' Xhatshuffle got from opt on rank {self.global_rank}') + if (xh_iter - 1) % 100 == 0: + logger.debug( + f" Xhatshuffle loop iter={xh_iter} on rank {self.global_rank}" + ) + logger.debug(f" Xhatshuffle got from opt on rank {self.global_rank}") if self.new_nonants: # similar to above, not all ranks will agree on # when there are new_nonants (in the same loop) - logger.debug(f' *Xhatshuffle loop iter={xh_iter}') - logger.debug(f' *got a new one! on rank {self.global_rank}') - logger.debug(f' *localnonants={str(self.localnonants)}') + logger.debug(f" *Xhatshuffle loop iter={xh_iter}") + logger.debug(f" *got a new one! on rank {self.global_rank}") + logger.debug(f" *localnonants={str(self.localnonants)}") # update the caches self.opt._put_nonant_cache(self.localnonants) self.opt._restore_nonants() - + scenario_cycler.begin_epoch() next_scendict = scenario_cycler.get_next() @@ -151,34 +160,32 @@ def _vb(msg): _vb(f" Updating best to {next_scendict}") scenario_cycler.best = next_scendict["ROOT"] - #_vb(f" scenario_cycler._scenarios_this_epoch {scenario_cycler._scenarios_this_epoch}") + # _vb(f" scenario_cycler._scenarios_this_epoch {scenario_cycler._scenarios_this_epoch}") xh_iter += 1 class ScenarioCycler: - - def __init__(self, shuffled_scenarios,nonleaves,reverse,iter_step): - root_kids = nonleaves['ROOT'].kids if 'ROOT' in nonleaves else None - if root_kids is None or len(root_kids)==0 or root_kids[0].is_leaf: + def __init__(self, shuffled_scenarios, nonleaves, reverse, iter_step): + root_kids = nonleaves["ROOT"].kids if "ROOT" in nonleaves else None + if root_kids is None or len(root_kids) == 0 or root_kids[0].is_leaf: self._multi = False self._iter_shift = 1 if iter_step is None else iter_step - self._use_reverse = False #It is useless to reverse for 2stage SP + self._use_reverse = False # It is useless to reverse for 2stage SP else: self._multi = True self.BF0 = len(root_kids) self._nonleaves = nonleaves - + self._iter_shift = self.BF0 if iter_step is None else iter_step self._use_reverse = True if reverse is None else reverse - self._reversed = False #Do we iter in reverse mode ? + self._reversed = False # Do we iter in reverse mode ? self._shuffled_scenarios = shuffled_scenarios self._num_scenarios = len(shuffled_scenarios) - + self._cycle_idx = 0 self._best = None self._begin_normal_epoch() - @property def best(self): @@ -187,50 +194,57 @@ def best(self): @best.setter def best(self, value): self._best = value - - def _fill_nodescen_dict(self,empty_nodes): + + def _fill_nodescen_dict(self, empty_nodes): filling_idx = self._cycle_idx - while len(empty_nodes) >0: - #Sanity check to make no infinite loop. - if filling_idx == self._cycle_idx and 'ROOT' in self.nodescen_dict and self.nodescen_dict['ROOT'] is not None: + while len(empty_nodes) > 0: + # Sanity check to make no infinite loop. + if ( + filling_idx == self._cycle_idx + and "ROOT" in self.nodescen_dict + and self.nodescen_dict["ROOT"] is not None + ): print(self.nodescen_dict) - raise RuntimeError("_fill_nodescen_dict looped over every scenario but was not able to find a scen for every nonleaf node.") + raise RuntimeError( + "_fill_nodescen_dict looped over every scenario but was not able to find a scen for every nonleaf node." + ) sname = self._shuffled_snames[filling_idx] snum = self._original_order[filling_idx] - + def _add_sname_to_node(ndn): first = self._nonleaves[ndn].scenfirst last = self._nonleaves[ndn].scenlast - if snum>=first and snum<=last: + if snum >= first and snum <= last: self.nodescen_dict[ndn] = sname return False else: return True - #Adding sname to every nodes it goes by, and removing the nodes from empty_nodes - empty_nodes = list(filter(_add_sname_to_node,empty_nodes)) - filling_idx +=1 + + # Adding sname to every nodes it goes by, and removing the nodes from empty_nodes + empty_nodes = list(filter(_add_sname_to_node, empty_nodes)) + filling_idx += 1 filling_idx %= self._num_scenarios - + def create_nodescen_dict(self): - ''' - Creates an attribute nodescen_dict. - Keys are nonleaf names, values are local scenario names + """ + Creates an attribute nodescen_dict. + Keys are nonleaf names, values are local scenario names (a value can be None if the associated scenario is not in our rank) - + WARNING: _cur_ROOTscen must be up to date when calling this method - ''' + """ if not self._multi: - self.nodescen_dict = {'ROOT':self._cur_ROOTscen} + self.nodescen_dict = {"ROOT": self._cur_ROOTscen} else: self.nodescen_dict = dict() self._fill_nodescen_dict(self._nonleaves.keys()) - - def update_nodescen_dict(self,snames_to_remove): - ''' + + def update_nodescen_dict(self, snames_to_remove): + """ WARNING: _cur_ROOTscen must be up to date when calling this method - ''' + """ if not self._multi: - self.nodescen_dict = {'ROOT':self._cur_ROOTscen} + self.nodescen_dict = {"ROOT": self._cur_ROOTscen} else: empty_nodes = [] for ndn in self._nonleaves.keys(): @@ -238,31 +252,34 @@ def update_nodescen_dict(self,snames_to_remove): self.nodescen_dict[ndn] = None empty_nodes.append(ndn) self._fill_nodescen_dict(empty_nodes) - def begin_epoch(self): if self._multi and self._use_reverse and not self._reversed: self._begin_reverse_epoch() else: self._begin_normal_epoch() - + def _begin_normal_epoch(self): if self._multi: self._reversed = False self._shuffled_snames = [s[1] for s in self._shuffled_scenarios] self._original_order = [s[0] for s in self._shuffled_scenarios] - self._cur_ROOTscen = self._shuffled_snames[0] if self.best is None else self.best + self._cur_ROOTscen = ( + self._shuffled_snames[0] if self.best is None else self.best + ) self.create_nodescen_dict() - + self._scenarios_this_epoch = set() - + def _begin_reverse_epoch(self): self._reversed = True self._shuffled_snames = [s[1] for s in reversed(self._shuffled_scenarios)] self._original_order = [s[0] for s in reversed(self._shuffled_scenarios)] - self._cur_ROOTscen = self._shuffled_snames[0] if self.best is None else self.best + self._cur_ROOTscen = ( + self._shuffled_snames[0] if self.best is None else self.best + ) self.create_nodescen_dict() - + self._scenarios_this_epoch = set() def get_next(self): @@ -279,22 +296,24 @@ def _iter_scen(self): self._cycle_idx += self._iter_shift ## wrap around self._cycle_idx %= self._num_scenarios - - #do not reuse a previously visited scenario for 'ROOT' + + # do not reuse a previously visited scenario for 'ROOT' tmp_cycle_idx = self._cycle_idx while self._shuffled_snames[tmp_cycle_idx] in self._scenarios_this_epoch and ( - (tmp_cycle_idx+1)%self._num_scenarios != self._cycle_idx): - tmp_cycle_idx +=1 + (tmp_cycle_idx + 1) % self._num_scenarios != self._cycle_idx + ): + tmp_cycle_idx += 1 tmp_cycle_idx %= self._num_scenarios - + self._cycle_idx = tmp_cycle_idx - - #Updating scenarios + + # Updating scenarios self._cur_ROOTscen = self._shuffled_snames[self._cycle_idx] - if old_idx self.opt.E1_tolerance): + self.opt._update_E1() + if abs(1 - self.opt.E1) > self.opt.E1_tolerance: if self.opt.cylinder_rank == 0: print("ERROR") print("Total probability of scenarios was ", self.opt.E1) @@ -69,37 +70,42 @@ def main(self): Entry point. Communicates with the optimization companion. """ - dtm = logging.getLogger(f'dtm{global_rank}') + dtm = logging.getLogger(f"dtm{global_rank}") logging.debug("Enter xhatspecific main on rank {}".format(global_rank)) # What to try does not change, but the data in the scenarios should - xhat_scenario_dict = self.opt.options["xhat_specific_options"]\ - ["xhat_scenario_dict"] + xhat_scenario_dict = self.opt.options["xhat_specific_options"][ + "xhat_scenario_dict" + ] xhatter = self.ib_prep() ib_iter = 1 # ib is for inner bound - while (not self.got_kill_signal()): - logging.debug(' IB loop iter={} on global rank {}'.\ - format(ib_iter, global_rank)) + while not self.got_kill_signal(): + logging.debug( + " IB loop iter={} on global rank {}".format(ib_iter, global_rank) + ) # _log_values(ib_iter, self._locals, dtm) - logging.debug(' IB got from opt on global rank {}'.\ - format(global_rank)) - if (self.new_nonants): - logging.debug(' and its new! on global rank {}'.\ - format(global_rank)) - logging.debug(' localnonants={}'.format(str(self.localnonants))) + logging.debug(" IB got from opt on global rank {}".format(global_rank)) + if self.new_nonants: + logging.debug(" and its new! on global rank {}".format(global_rank)) + logging.debug(" localnonants={}".format(str(self.localnonants))) - self.opt._put_nonant_cache(self.localnonants) # don't really need all caches + self.opt._put_nonant_cache( + self.localnonants + ) # don't really need all caches self.opt._restore_nonants() - innerbound = xhatter.xhat_tryit(xhat_scenario_dict, restore_nonants=False) + innerbound = xhatter.xhat_tryit( + xhat_scenario_dict, restore_nonants=False + ) self.update_if_improving(innerbound) ib_iter += 1 - dtm.debug(f'IB specific thread ran {ib_iter} iterations\n') - + dtm.debug(f"IB specific thread ran {ib_iter} iterations\n") + + if __name__ == "__main__": print("no main.") diff --git a/mpisppy/cylinders/xhatxbar_bounder.py b/mpisppy/cylinders/xhatxbar_bounder.py index 38621a3be..8d43b6ebb 100644 --- a/mpisppy/cylinders/xhatxbar_bounder.py +++ b/mpisppy/cylinders/xhatxbar_bounder.py @@ -35,8 +35,7 @@ def _attach_xbars(opt): ############################################################################ class XhatXbarInnerBound(spoke.InnerBoundNonantSpoke): - - converger_spoke_char = 'B' + converger_spoke_char = "B" def ib_prep(self): """ @@ -45,8 +44,10 @@ def ib_prep(self): Returns: xhatter (xhatxbar object): Constructed by a call to Prep """ - if "bundles_per_rank" in self.opt.options\ - and self.opt.options["bundles_per_rank"] != 0: + if ( + "bundles_per_rank" in self.opt.options + and self.opt.options["bundles_per_rank"] != 0 + ): raise RuntimeError("xhat spokes cannot have bundles (yet)") if not isinstance(self.opt, Xhat_Eval): @@ -61,9 +62,12 @@ def ib_prep(self): self.opt._lazy_create_solvers() # no iter0 loop, but we need the solvers - self.opt._update_E1() - if (abs(1 - self.opt.E1) > self.opt.E1_tolerance): - raise RuntimeError(f"Total probability of scenarios was {self.E1}; E1_tolerance = ", self.E1_tolerance) + self.opt._update_E1() + if abs(1 - self.opt.E1) > self.opt.E1_tolerance: + raise RuntimeError( + f"Total probability of scenarios was {self.E1}; E1_tolerance = ", + self.E1_tolerance, + ) ### end iter0 stuff @@ -78,24 +82,25 @@ def main(self): Entry point. Communicates with the optimization companion. """ - dtm = logging.getLogger(f'dtm{global_rank}') + dtm = logging.getLogger(f"dtm{global_rank}") logging.debug("Enter xhatxbar main on rank {}".format(global_rank)) xhatter = self.ib_prep() ib_iter = 1 # ib is for inner bound - while (not self.got_kill_signal()): - logging.debug(' IB loop iter={} on global rank {}'.\ - format(ib_iter, global_rank)) + while not self.got_kill_signal(): + logging.debug( + " IB loop iter={} on global rank {}".format(ib_iter, global_rank) + ) # _log_values(ib_iter, self._locals, dtm) - logging.debug(' IB got from opt on global rank {}'.\ - format(global_rank)) - if (self.new_nonants): - logging.debug(' and its new! on global rank {}'.\ - format(global_rank)) - logging.debug(' localnonants={}'.format(str(self.localnonants))) - self.opt._put_nonant_cache(self.localnonants) # don't really need all caches + logging.debug(" IB got from opt on global rank {}".format(global_rank)) + if self.new_nonants: + logging.debug(" and its new! on global rank {}".format(global_rank)) + logging.debug(" localnonants={}".format(str(self.localnonants))) + self.opt._put_nonant_cache( + self.localnonants + ) # don't really need all caches self.opt._restore_nonants() innerbound = xhatter.xhat_tryit(restore_nonants=False) @@ -103,14 +108,15 @@ def main(self): ib_iter += 1 - dtm.debug(f'IB xbar thread ran {ib_iter} iterations\n') - + dtm.debug(f"IB xbar thread ran {ib_iter} iterations\n") + # for debugging - #print("output .txt files for debugging in xhatxbar_bounder.py") - #for k,s in self.opt.local_scenarios.items(): + # print("output .txt files for debugging in xhatxbar_bounder.py") + # for k,s in self.opt.local_scenarios.items(): # fname = f"xhatxbar_model_{k}.txt" # with open(fname, "w") as f: # s.pprint(f) + if __name__ == "__main__": print("no main.") diff --git a/mpisppy/extensions/avgminmaxer.py b/mpisppy/extensions/avgminmaxer.py index 5df4e47c2..ca0f8e67e 100644 --- a/mpisppy/extensions/avgminmaxer.py +++ b/mpisppy/extensions/avgminmaxer.py @@ -13,12 +13,14 @@ import mpisppy.extensions.xhatbase + class MinMaxAvg(mpisppy.extensions.xhatbase.XhatBase): """ Args: ph (PH object): the calling object rank (int): mpi process rank of currently running process """ + def __init__(self, ph, rank, n_proc): super().__init__(ph, rank, n_proc) self.compstr = self.ph.options["avgminmax_name"] @@ -28,18 +30,32 @@ def pre_iter0(self): def post_iter0(self): avgv, minv, maxv = self.ph.avg_min_max(self.compstr) - if (self.cylinder_rank == 0): - print (" ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv) - + if self.cylinder_rank == 0: + print( + " ### ", + self.compstr, + ": avg, min, max, max-min", + avgv, + minv, + maxv, + maxv - minv, + ) + def miditer(self, PHIter, conv): return def enditer(self, PHIter): avgv, minv, maxv = self.ph.avg_min_max(self.compstr) - if (self.cylinder_rank == 0): - print (" ### ", self.compstr,": avg, min, max, max-min", avgv, minv, maxv, maxv-minv) + if self.cylinder_rank == 0: + print( + " ### ", + self.compstr, + ": avg, min, max, max-min", + avgv, + minv, + maxv, + maxv - minv, + ) def post_everything(self, PHIter, conv): return - - diff --git a/mpisppy/extensions/cross_scen_extension.py b/mpisppy/extensions/cross_scen_extension.py index 9fe4ca718..9adb0aac0 100644 --- a/mpisppy/extensions/cross_scen_extension.py +++ b/mpisppy/extensions/cross_scen_extension.py @@ -19,17 +19,23 @@ import numpy as np import mpisppy.MPI as mpi + class CrossScenarioExtension(Extension): def __init__(self, spbase_object): super().__init__(spbase_object) if self.opt.multistage: - raise RuntimeError('CrossScenarioExtension only supports ' - 'two-stage models at this time') + raise RuntimeError( + "CrossScenarioExtension only supports " "two-stage models at this time" + ) opt = self.opt - if 'cross_scen_options' in opt.options and \ - 'check_bound_improve_iterations' in opt.options['cross_scen_options']: - self.check_bound_iterations = opt.options['cross_scen_options']['check_bound_improve_iterations'] + if ( + "cross_scen_options" in opt.options + and "check_bound_improve_iterations" in opt.options["cross_scen_options"] + ): + self.check_bound_iterations = opt.options["cross_scen_options"][ + "check_bound_improve_iterations" + ] else: self.check_bound_iterations = None @@ -54,18 +60,18 @@ def _disable_W_and_prox(self): if not opt.W_disabled and not opt.prox_disabled: opt.disable_W_and_prox() self.reenable_W = True - self.reenable_prox = True + self.reenable_prox = True elif not opt.W_disabled: opt._disable_W() self.eenable_W = True elif not opt.prox_disabled: opt._disable_prox() - self.reenable_prox = True + self.reenable_prox = True def _enable_W_and_prox(self): assert self.reenable_W is not None assert self.reenable_prox is not None - + opt = self.opt if self.reenable_W and self.reenable_prox: opt.reenable_W_and_prox() @@ -82,27 +88,27 @@ def _check_bound(self): cached_ph_obj = dict() - for k,s in opt.local_subproblems.items(): + for k, s in opt.local_subproblems.items(): phobj = find_active_objective(s) phobj.deactivate() cached_ph_obj[k] = phobj s._mpisppy_model.EF_Obj.activate() - teeme = ( - "tee-rank0-solves" in opt.options - and opt.options["tee-rank0-solves"] - ) + teeme = "tee-rank0-solves" in opt.options and opt.options["tee-rank0-solves"] opt.solve_loop( - solver_options=opt.current_solver_options, - dtiming=opt.options["display_timing"], - gripe=True, - disable_pyomo_signal_handling=False, - tee=teeme, - verbose=opt.options["verbose"], + solver_options=opt.current_solver_options, + dtiming=opt.options["display_timing"], + gripe=True, + disable_pyomo_signal_handling=False, + tee=teeme, + verbose=opt.options["verbose"], ) - local_obs = np.fromiter((s._mpisppy_data.outer_bound for s in opt.local_subproblems.values()), - dtype="d", count=len(opt.local_subproblems)) + local_obs = np.fromiter( + (s._mpisppy_data.outer_bound for s in opt.local_subproblems.values()), + dtype="d", + count=len(opt.local_subproblems), + ) local_ob = np.empty(1) if opt.is_minimizing: @@ -114,21 +120,23 @@ def _check_bound(self): if opt.is_minimizing: opt.mpicomm.Allreduce(local_ob, global_ob, op=mpi.MAX) - else: + else: opt.mpicomm.Allreduce(local_ob, global_ob, op=mpi.MIN) - #print(f"CrossScenarioExtension OB: {global_ob[0]}") + # print(f"CrossScenarioExtension OB: {global_ob[0]}") - opt.spcomm.BestOuterBound = opt.spcomm.OuterBoundUpdate(global_ob[0], char='C') + opt.spcomm.BestOuterBound = opt.spcomm.OuterBoundUpdate(global_ob[0], char="C") - for k,s in opt.local_subproblems.items(): + for k, s in opt.local_subproblems.items(): s._mpisppy_model.EF_Obj.deactivate() cached_ph_obj[k].activate() def get_from_cross_cuts(self): spcomm = self.opt.spcomm idx = self.cut_gen_spoke_index - receive_buffer = np.empty(spcomm.remote_lengths[idx - 1] + 1, dtype="d") # Must be doubles + receive_buffer = np.empty( + spcomm.remote_lengths[idx - 1] + 1, dtype="d" + ) # Must be doubles is_new = spcomm.hub_from_spoke(receive_buffer, idx) if is_new: self.make_cuts(receive_buffer) @@ -162,19 +170,19 @@ def make_cuts(self, coefs): # rows are: # [ const, eta_coeff, *nonant_coeffs ] - row_len = 1+1+self.nonant_len + row_len = 1 + 1 + self.nonant_len outer_iter = int(coefs[-1]) if opt.bundling: - for bn,b in opt.local_subproblems.items(): + for bn, b in opt.local_subproblems.items(): persistent_solver = sputils.is_persistent(b._solver_plugin) ## get an arbitrary scenario s = opt.local_scenarios[b.scen_list[0]] for idx, k in enumerate(opt.all_scenario_names): - row = coefs[row_len*idx:row_len*(idx+1)] + row = coefs[row_len * idx : row_len * (idx + 1)] # the row could be all zeros, # which doesn't do anything - if (row == 0.).all(): + if (row == 0.0).all(): continue # rows are: # [ const, eta_coeff, *nonant_coeffs ] @@ -187,20 +195,25 @@ def make_cuts(self, coefs): ## to the reference first stage variables linear_vars.append(b.ref_vars[ndn_i]) - cut_expr = LinearExpression(constant=linear_const, linear_coefs=linear_coefs, - linear_vars=linear_vars) + cut_expr = LinearExpression( + constant=linear_const, + linear_coefs=linear_coefs, + linear_vars=linear_vars, + ) b._mpisppy_model.benders_cuts[outer_iter, k] = (None, cut_expr, 0) if persistent_solver: - b._solver_plugin.add_constraint(b._mpisppy_model.benders_cuts[outer_iter, k]) + b._solver_plugin.add_constraint( + b._mpisppy_model.benders_cuts[outer_iter, k] + ) else: - for sn,s in opt.local_subproblems.items(): + for sn, s in opt.local_subproblems.items(): persistent_solver = sputils.is_persistent(s._solver_plugin) for idx, k in enumerate(opt.all_scenario_names): - row = coefs[row_len*idx:row_len*(idx+1)] + row = coefs[row_len * idx : row_len * (idx + 1)] # the row could be all zeros, # which doesn't do anything - if (row == 0.).all(): + if (row == 0.0).all(): continue # rows are: # [ const, eta_coeff, *nonant_coeffs ] @@ -209,11 +222,16 @@ def make_cuts(self, coefs): linear_vars = [s._mpisppy_model.eta[k]] linear_vars.extend(s._mpisppy_data.nonant_indices.values()) - cut_expr = LinearExpression(constant=linear_const, linear_coefs=linear_coefs, - linear_vars=linear_vars) - s._mpisppy_model.benders_cuts[outer_iter, k] = (None, cut_expr, 0.) + cut_expr = LinearExpression( + constant=linear_const, + linear_coefs=linear_coefs, + linear_vars=linear_vars, + ) + s._mpisppy_model.benders_cuts[outer_iter, k] = (None, cut_expr, 0.0) if persistent_solver: - s._solver_plugin.add_constraint(s._mpisppy_model.benders_cuts[outer_iter, k]) + s._solver_plugin.add_constraint( + s._mpisppy_model.benders_cuts[outer_iter, k] + ) # NOTE: the LShaped code negates the objective, so # we do the same here for consistency @@ -222,21 +240,30 @@ def make_cuts(self, coefs): if not opt.is_minimizing: ib = -ib ob = -ob - add_cut = (math.isfinite(ib) or math.isfinite(ob)) and \ - ((ib < self.best_inner_bound) or (ob > self.best_outer_bound)) + add_cut = (math.isfinite(ib) or math.isfinite(ob)) and ( + (ib < self.best_inner_bound) or (ob > self.best_outer_bound) + ) if add_cut: self.best_inner_bound = ib self.best_outer_bound = ob - for sn,s in opt.local_subproblems.items(): + for sn, s in opt.local_subproblems.items(): persistent_solver = sputils.is_persistent(s._solver_plugin) prior_outer_iter = list(s._mpisppy_model.inner_bound_constr.keys()) - s._mpisppy_model.inner_bound_constr[outer_iter] = (ob, s._mpisppy_model.EF_obj, ib) + s._mpisppy_model.inner_bound_constr[outer_iter] = ( + ob, + s._mpisppy_model.EF_obj, + ib, + ) if persistent_solver: - s._solver_plugin.add_constraint(s._mpisppy_model.inner_bound_constr[outer_iter]) + s._solver_plugin.add_constraint( + s._mpisppy_model.inner_bound_constr[outer_iter] + ) # remove other ib constraints (we only need the tightest) for it in prior_outer_iter: if persistent_solver: - s._solver_plugin.remove_constraint(s._mpisppy_model.inner_bound_constr[it]) + s._solver_plugin.remove_constraint( + s._mpisppy_model.inner_bound_constr[it] + ) del s._mpisppy_model.inner_bound_constr[it] ## helping the extention track cuts @@ -256,7 +283,7 @@ def setup_hub(self): self.new_cuts = False def initialize_spoke_indices(self): - for (i, spoke) in enumerate(self.opt.spcomm.spokes): + for i, spoke in enumerate(self.opt.spcomm.spokes): if spoke["spoke_class"] == CrossScenarioCutSpoke: self.cut_gen_spoke_index = i + 1 @@ -266,8 +293,10 @@ def sync_with_spokes(self): def pre_iter0(self): if self.opt.multistage: - raise RuntimeError("CrossScenarioExtension does not support " - "multi-stage problems at this time") + raise RuntimeError( + "CrossScenarioExtension does not support " + "multi-stage problems at this time" + ) ## hack as this provides logs for outer bounds self.opt.spcomm.has_outerbound_spokes = True @@ -275,44 +304,54 @@ def post_iter0(self): opt = self.opt # NOTE: the LShaped code negates the objective, so # we do the same here for consistency - if 'cross_scen_options' in opt.options and \ - 'valid_eta_bound' in opt.options['cross_scen_options']: - valid_eta_bound = opt.options['cross_scen_options']['valid_eta_bound'] + if ( + "cross_scen_options" in opt.options + and "valid_eta_bound" in opt.options["cross_scen_options"] + ): + valid_eta_bound = opt.options["cross_scen_options"]["valid_eta_bound"] if not opt.is_minimizing: - _eta_init = { k: -v for k,v in valid_eta_bound.items() } + _eta_init = {k: -v for k, v in valid_eta_bound.items()} else: _eta_init = valid_eta_bound + def _eta_bounds(m, k): return _eta_init[k], None else: - lb = (-sys.maxsize - 1) * 1. / len(opt.all_scenario_names) + lb = (-sys.maxsize - 1) * 1.0 / len(opt.all_scenario_names) + def _eta_init(m, k): return lb + def _eta_bounds(m, k): return lb, None # eta is attached to each subproblem, regardless of bundles bundling = opt.bundling - for k,s in opt.local_subproblems.items(): - s._mpisppy_model.eta = pyo.Var(opt.all_scenario_names, initialize=_eta_init, bounds=_eta_bounds) + for k, s in opt.local_subproblems.items(): + s._mpisppy_model.eta = pyo.Var( + opt.all_scenario_names, initialize=_eta_init, bounds=_eta_bounds + ) if sputils.is_persistent(s._solver_plugin): for var in s._mpisppy_model.eta.values(): s._solver_plugin.add_var(var) - if bundling: ## create a refence to eta on each subproblem + if bundling: ## create a refence to eta on each subproblem for sn in s.scen_list: scenario = opt.local_scenarios[sn] - scenario._mpisppy_model.eta = { k : s._mpisppy_model.eta[k] for k in opt.all_scenario_names } + scenario._mpisppy_model.eta = { + k: s._mpisppy_model.eta[k] for k in opt.all_scenario_names + } ## hold the PH object harmless self._disable_W_and_prox() - - for k,s in opt.local_subproblems.items(): + for k, s in opt.local_subproblems.items(): obj = find_active_objective(s) repn = generate_standard_repn(obj.expr, quadratic=True) if len(repn.nonlinear_vars) > 0: - raise ValueError("CrossScenario does not support models with nonlinear objective functions") + raise ValueError( + "CrossScenario does not support models with nonlinear objective functions" + ) if bundling: ## NOTE: this is slighly wasteful, in that for a bundle @@ -321,8 +360,11 @@ def _eta_bounds(m, k): ## to do the substitution nonant_vardata_list = list() for sn in s.scen_list: - nonant_vardata_list.extend( \ - opt.local_scenarios[sn]._mpisppy_node_list[0].nonant_vardata_list) + nonant_vardata_list.extend( + opt.local_scenarios[sn] + ._mpisppy_node_list[0] + .nonant_vardata_list + ) else: nonant_vardata_list = s._mpisppy_node_list[0].nonant_vardata_list @@ -335,11 +377,11 @@ def _eta_bounds(m, k): # adjust coefficients by scenario/bundle probability scen_prob = s._mpisppy_probability - for i,var in enumerate(repn.linear_vars): + for i, var in enumerate(repn.linear_vars): if id(var) not in nonant_ids: linear_coefs[i] *= scen_prob - for i,(x,y) in enumerate(repn.quadratic_vars): + for i, (x, y) in enumerate(repn.quadratic_vars): # only multiply through once if id(x) not in nonant_ids: quadratic_coefs[i] *= scen_prob @@ -349,9 +391,9 @@ def _eta_bounds(m, k): # NOTE: the LShaped code negates the objective, so # we do the same here for consistency if not opt.is_minimizing: - for i,coef in enumerate(linear_coefs): + for i, coef in enumerate(linear_coefs): linear_coefs[i] = -coef - for i,coef in enumerate(quadratic_coefs): + for i, coef in enumerate(quadratic_coefs): quadratic_coefs[i] = -coef # add the other etas @@ -367,20 +409,30 @@ def _eta_bounds(m, k): linear_vars.append(s._mpisppy_model.eta[sn]) eta_scenarios.append(sn) - expr = LinearExpression(constant=repn.constant, linear_coefs=linear_coefs, - linear_vars=linear_vars) + expr = LinearExpression( + constant=repn.constant, + linear_coefs=linear_coefs, + linear_vars=linear_vars, + ) if repn.quadratic_vars: expr += pyo.quicksum( - (coef*x*y for coef,(x,y) in zip(quadratic_coefs, repn.quadratic_vars)) + ( + coef * x * y + for coef, (x, y) in zip(quadratic_coefs, repn.quadratic_vars) + ) ) s._mpisppy_model.EF_obj = pyo.Expression(expr=expr) if opt.is_minimizing: - s._mpisppy_model.EF_Obj = pyo.Objective(expr=s._mpisppy_model.EF_obj, sense=pyo.minimize) + s._mpisppy_model.EF_Obj = pyo.Objective( + expr=s._mpisppy_model.EF_obj, sense=pyo.minimize + ) else: - s._mpisppy_model.EF_Obj = pyo.Objective(expr=-s._mpisppy_model.EF_obj, sense=pyo.maximize) + s._mpisppy_model.EF_Obj = pyo.Objective( + expr=-s._mpisppy_model.EF_obj, sense=pyo.maximize + ) s._mpisppy_model.EF_Obj.deactivate() # add cut constraint dicts @@ -409,18 +461,26 @@ def miditer(self): else: self.cur_ob = ob ob_new = True - + if not self.any_cuts: if self.new_cuts: self.any_cuts = True ## if its the second time or more with this IB, we'll only check ## if the last improved the OB, or if the OB is new itself (from somewhere else) - check = (self.check_bound_iterations is not None) and self.any_cuts and ( \ - (self.iter_at_cur_ib == self.check_bound_iterations) or \ - (self.iter_at_cur_ib > self.check_bound_iterations and ob_new) or \ - ((self.iter_since_last_check%self.check_bound_iterations == 0) and self.new_cuts)) - # if there hasn't been OB movement, check every so often if we have new cuts + check = ( + (self.check_bound_iterations is not None) + and self.any_cuts + and ( + (self.iter_at_cur_ib == self.check_bound_iterations) + or (self.iter_at_cur_ib > self.check_bound_iterations and ob_new) + or ( + (self.iter_since_last_check % self.check_bound_iterations == 0) + and self.new_cuts + ) + ) + ) + # if there hasn't been OB movement, check every so often if we have new cuts if check: self._check_bound() self.new_cuts = False diff --git a/mpisppy/extensions/diagnoser.py b/mpisppy/extensions/diagnoser.py index cdf63e6ef..6aafc5e56 100644 --- a/mpisppy/extensions/diagnoser.py +++ b/mpisppy/extensions/diagnoser.py @@ -18,38 +18,39 @@ import pyomo.environ as pyo import mpisppy.extensions.xhatbase + class Diagnoser(mpisppy.extensions.xhatbase.XhatBase): """ Args: ph (PH object): the calling object rank (int): mpi process rank of currently running process """ + def __init__(self, ph): dirname = ph.options["diagnoser_options"]["diagnoser_outdir"] if os.path.exists(dirname): if ph.cylinder_rank == 0: - print ("Shutting down because Diagnostic directory exists:", - dirname) + print("Shutting down because Diagnostic directory exists:", dirname) quit() if ph.cylinder_rank == 0: - os.mkdir(dirname) # just let it crash + os.mkdir(dirname) # just let it crash super().__init__(ph) self.options = self.ph.options["diagnoser_options"] self.dirname = self.options["diagnoser_outdir"] def write_loop(self): - """ Bundles are special. Also: this code needs help + """Bundles are special. Also: this code needs help from the ph object to be more efficient... """ for sname, s in self.ph.local_scenarios.items(): bundling = self.ph.bundling - fname = self.dirname+os.sep+sname+".dag" + fname = self.dirname + os.sep + sname + ".dag" with open(fname, "a") as f: - f.write(str(self.ph._PHIter)+",") + f.write(str(self.ph._PHIter) + ",") objfct = self.ph.saved_objectives[sname] if bundling: - f.write("Bundling"+",") + f.write("Bundling" + ",") f.write(str(pyo.value(objfct))) f.write("\n") @@ -58,12 +59,12 @@ def pre_iter0(self): def post_iter0(self): for sname, s in self.ph.local_scenarios.items(): - fname = self.dirname+os.sep+sname+".dag" + fname = self.dirname + os.sep + sname + ".dag" with open(fname, "w") as f: - f.write(str(dt.datetime.now())+", diagnoser\n") + f.write(str(dt.datetime.now()) + ", diagnoser\n") self.write_loop() - + def miditer(self, PHIter, conv): return diff --git a/mpisppy/extensions/extension.py b/mpisppy/extensions/extension.py index 9d6365ff7..fe0f14e96 100644 --- a/mpisppy/extensions/extension.py +++ b/mpisppy/extensions/extension.py @@ -6,53 +6,54 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' A template for creating PH_extension.py files - NOTE: we pass in the ph object, so extensions can wreck everything - if they want to! - Written: DLW Jan 2019 - Modified: DTM Aug 2019 +"""A template for creating PH_extension.py files +NOTE: we pass in the ph object, so extensions can wreck everything +if they want to! +Written: DLW Jan 2019 +Modified: DTM Aug 2019 + +NOTE: The return values of all non-constructor methods are ignored +""" - NOTE: The return values of all non-constructor methods are ignored -''' class Extension: - """ Abstract base class for extensions to general SPOpt/SPCommunicator objects. - """ + """Abstract base class for extensions to general SPOpt/SPCommunicator objects.""" + def __init__(self, spopt_object): self.opt = spopt_object def setup_hub(self): - ''' + """ Method called when the Hub SPCommunicator is set up (if used) Returns ------- None - ''' + """ pass def initialize_spoke_indices(self): - ''' + """ Method called when the Hub SPCommunicator initializes its spoke indices Returns ------- None - ''' + """ pass def sync_with_spokes(self): - ''' + """ Method called when the Hub SPCommunicator syncs with spokes Returns ------- None - ''' + """ pass def pre_solve(self, subproblem): - ''' + """ Method called before every subproblem solve Inputs @@ -62,11 +63,11 @@ def pre_solve(self, subproblem): Returns ------- None - ''' + """ pass def post_solve(self, subproblem, results): - ''' + """ Method called after every subproblem solve Inputs @@ -77,79 +78,80 @@ def post_solve(self, subproblem, results): Returns ------- results : Pyomo results objects from most recent solve - ''' + """ return results def pre_solve_loop(self): - ''' Method called before every solve loop within - mpisppy.spot.SPOpt.solve_loop() - ''' + """Method called before every solve loop within + mpisppy.spot.SPOpt.solve_loop() + """ pass def post_solve_loop(self): - ''' Method called after every solve loop within - mpisppy.spot.SPOpt.solve_loop() - ''' + """Method called after every solve loop within + mpisppy.spot.SPOpt.solve_loop() + """ pass def pre_iter0(self): - ''' Method called at the end of PH_Prep(). - When this method is called, all scenarios have been created, and - the dual/prox terms have been attached to the objective, but the - solvers have not yet been created. - ''' + """Method called at the end of PH_Prep(). + When this method is called, all scenarios have been created, and + the dual/prox terms have been attached to the objective, but the + solvers have not yet been created. + """ pass def post_iter0(self): - ''' Method called after the first PH iteration. - When this method is called, one call to solve_loop() has been - completed, and we have ensured that none of the models are - infeasible. The rho_setter, if present, has not yet been applied. - ''' + """Method called after the first PH iteration. + When this method is called, one call to solve_loop() has been + completed, and we have ensured that none of the models are + infeasible. The rho_setter, if present, has not yet been applied. + """ pass def post_iter0_after_sync(self): - ''' Method called after the first PH iteration, after the - synchronization of sending messages between cylinders - has completed. - ''' + """Method called after the first PH iteration, after the + synchronization of sending messages between cylinders + has completed. + """ pass def miditer(self): - ''' Method called after x-bar has been computed and the dual weights - have been updated, but before solve_loop(). - If a converger is present, this method is called between the - convergence_value() method and the is_converged() method. - ''' + """Method called after x-bar has been computed and the dual weights + have been updated, but before solve_loop(). + If a converger is present, this method is called between the + convergence_value() method and the is_converged() method. + """ pass def enditer(self): - ''' Method called after the solve_loop(), but before the next x-bar and - weight update. - ''' + """Method called after the solve_loop(), but before the next x-bar and + weight update. + """ pass def enditer_after_sync(self): - ''' Method called after the solve_loop(), after the - synchronization of sending messages between cylinders - has completed. - ''' + """Method called after the solve_loop(), after the + synchronization of sending messages between cylinders + has completed. + """ pass def post_everything(self): - ''' Method called after the termination of the algorithm. - This method is called after the scenario_denouement, if a - denouement is present. This function will not begin on any rank - within self.opt.mpicomm until the scenario_denouement has completed - on all other ranks. - ''' + """Method called after the termination of the algorithm. + This method is called after the scenario_denouement, if a + denouement is present. This function will not begin on any rank + within self.opt.mpicomm until the scenario_denouement has completed + on all other ranks. + """ pass class MultiExtension(Extension): - """ Container for all the extension classes we are using. - Also grabs ph and rank, so ad hoc calls (e.g., lagrangian) can use them. + """Container for all the extension classes we are using. + Also grabs ph and rank, so ad hoc calls (e.g., lagrangian) can use them. """ + def __init__(self, ph, ext_classes): super().__init__(ph) self.extdict = dict() diff --git a/mpisppy/extensions/fixer.py b/mpisppy/extensions/fixer.py index ab2963336..8dc9134f1 100644 --- a/mpisppy/extensions/fixer.py +++ b/mpisppy/extensions/fixer.py @@ -24,15 +24,16 @@ For other iters, use count; None is also how you avoid. """ + def Fixer_tuple(xvar, th=None, nb=None, lb=None, ub=None): - """ Somewhat self-documenting way to make a fixer tuple. + """Somewhat self-documenting way to make a fixer tuple. For use in/by/for the so-called Reference Model. Args: xvar (Var): the Pyomo Var th (float): compared to sqrt(abs(xbar_sqared - xsquared_bar)) - nb: (int) None means ignore; for iter k, number of iters + nb: (int) None means ignore; for iter k, number of iters converged anywhere. - lb: (int) None means ignore; for iter k, number of iters + lb: (int) None means ignore; for iter k, number of iters converged within th of xvar lower bound. ub: (int) None means ignore; for iter k, number of iters converged within th of xvar upper bound @@ -41,85 +42,95 @@ def Fixer_tuple(xvar, th=None, nb=None, lb=None, ub=None): tuple: a tuple to be appended to the iter0 or iterk list of tuples """ if th is None and nb is None and lb is None and ub is None: - print ("warning: Fixer_tuple called for Var=", xvar.name, - "but no arguments were given") + print( + "warning: Fixer_tuple called for Var=", + xvar.name, + "but no arguments were given", + ) if th is None: th = 0 if nb is not None and lb is not None and nb < lb: - print ("warning: Fixer_tuple called for Var=", xvar.name, - "with nb < lb, which means lb will be ignored.") - if nb is not None and ub is not None and nb < ub: - print ("warning: Fixer_tuple called for Var=", xvar.name, - "with nb < ub, which means ub will be ignored.") - + print( + "warning: Fixer_tuple called for Var=", + xvar.name, + "with nb < lb, which means lb will be ignored.", + ) + if nb is not None and ub is not None and nb < ub: + print( + "warning: Fixer_tuple called for Var=", + xvar.name, + "with nb < ub, which means ub will be ignored.", + ) + return (id(xvar), th, nb, lb, ub) -class Fixer(mpisppy.extensions.extension.Extension): +class Fixer(mpisppy.extensions.extension.Extension): def __init__(self, ph): self.ph = ph self.cylinder_rank = self.ph.cylinder_rank self.options = ph.options - self.fixeroptions = self.options["fixeroptions"] # required - self.verbose = self.options["verbose"] \ - or self.fixeroptions["verbose"] + self.fixeroptions = self.options["fixeroptions"] # required + self.verbose = self.options["verbose"] or self.fixeroptions["verbose"] # this function is scenario specific (takes a scenario as an arg) self.id_fix_list_fct = self.fixeroptions["id_fix_list_fct"] self.dprogress = ph.options["display_progress"] self.fixed_prior_iter0 = 0 - self.fixed_so_far = 0 + self.fixed_so_far = 0 self.boundtol = self.fixeroptions["boundtol"] def populate(self, local_scenarios): # [(ndn, i)] = iter count (i indices into nonantlist) self.local_scenarios = local_scenarios - self.iter0_fixer_tuples = {} # caller data - self.fixer_tuples = {} # caller data - self.threshold = {} - self.iter0_threshold = {} + self.iter0_fixer_tuples = {} # caller data + self.fixer_tuples = {} # caller data + self.threshold = {} + self.iter0_threshold = {} # This count dict drives the loops later # NOTE: every scenario has a list. - for k,s in self.local_scenarios.items(): - s._mpisppy_data.conv_iter_count = {} # key is (ndn, i) + for k, s in self.local_scenarios.items(): + s._mpisppy_data.conv_iter_count = {} # key is (ndn, i) - self.iter0_fixer_tuples[s], self.fixer_tuples[s] = \ - self.id_fix_list_fct(s) + self.iter0_fixer_tuples[s], self.fixer_tuples[s] = self.id_fix_list_fct(s) if self.iter0_fixer_tuples[s] is not None: - for (varid, th, nb, lb, ub) in self.iter0_fixer_tuples[s]: - (ndn, i) = s._mpisppy_data.varid_to_nonant_index[varid] # + for varid, th, nb, lb, ub in self.iter0_fixer_tuples[s]: + (ndn, i) = s._mpisppy_data.varid_to_nonant_index[varid] # if (ndn, i) not in self.iter0_threshold: self.iter0_threshold[(ndn, i)] = th else: if th != self.iter0_threshold[(ndn, i)]: - print (s.name, ndn, i, th) - raise RuntimeError("Attempt to vary iter0 fixer "+\ - "threshold across scenarios.") + print(s.name, ndn, i, th) + raise RuntimeError( + "Attempt to vary iter0 fixer " + + "threshold across scenarios." + ) if self.fixer_tuples[s] is not None: - for (varid, th, nb, lb, ub) in self.fixer_tuples[s]: + for varid, th, nb, lb, ub in self.fixer_tuples[s]: (ndn, i) = s._mpisppy_data.varid_to_nonant_index[varid] s._mpisppy_data.conv_iter_count[(ndn, i)] = 0 if (ndn, i) not in self.threshold: self.threshold[(ndn, i)] = th else: if th != self.threshold[(ndn, i)]: - print (s.name, ndn, i, th) - raise RuntimeError("Attempt to vary fixer "+\ - "threshold across scenarios") + print(s.name, ndn, i, th) + raise RuntimeError( + "Attempt to vary fixer " + "threshold across scenarios" + ) # verbose utility def _vb(self, str): if self.verbose and self.cylinder_rank == 0: - print ("(rank0) " + str) + print("(rank0) " + str) # display progress utility def _dp(self, str): if (self.dprogress or self.verbose) and self.cylinder_rank == 0: - print ("(rank0) " + str) + print("(rank0) " + str) def _update_fix_counts(self): - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): for ndn_i, xvar in s._mpisppy_data.nonant_indices.items(): if xvar.is_fixed(): continue @@ -135,44 +146,47 @@ def _update_fix_counts(self): else: s._mpisppy_data.conv_iter_count[ndn_i] = 0 ##print ("debug reset fix diff, tolval", diff, tolval) - - def iter0(self, local_scenarios): - # first, do some persistent solver with bundles gymnastics - have_bundles = self.ph.bundling # indicates bundles + def iter0(self, local_scenarios): + # first, do some persistent solver with bundles gymnastics + have_bundles = self.ph.bundling # indicates bundles if have_bundles: subpname = next(iter(self.ph.local_subproblems)) subp = self.ph.local_subproblems[subpname] - solver_is_persistent = isinstance(subp._solver_plugin, - pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver) + solver_is_persistent = isinstance( + subp._solver_plugin, + pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver, + ) if solver_is_persistent: vars_to_update = {} # modelers might have already fixed variables - count those up and output the result raw_fixed_on_arrival = 0 - raw_fixed_this_iter = 0 - for sname,s in self.ph.local_scenarios.items(): + raw_fixed_this_iter = 0 + for sname, s in self.ph.local_scenarios.items(): if self.iter0_fixer_tuples[s] is None: - print ("WARNING: No Iter0 fixer tuple for s.name=",s.name) + print("WARNING: No Iter0 fixer tuple for s.name=", s.name) return - + if not have_bundles: - solver_is_persistent = isinstance(s._solver_plugin, - pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver) + solver_is_persistent = isinstance( + s._solver_plugin, + pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver, + ) - for (varid, th, nb, lb, ub) in self.iter0_fixer_tuples[s]: + for varid, th, nb, lb, ub in self.iter0_fixer_tuples[s]: was_fixed = False try: (ndn, i) = s._mpisppy_data.varid_to_nonant_index[varid] except: - print ("Are you trying to fix a Var that is not nonant?") + print("Are you trying to fix a Var that is not nonant?") raise - xvar = s._mpisppy_data.nonant_indices[ndn,i] + xvar = s._mpisppy_data.nonant_indices[ndn, i] if not xvar.is_fixed(): - xb = pyo.value(s._mpisppy_model.xbars[(ndn,i)]) - diff = xb * xb - pyo.value(s._mpisppy_model.xsqbars[(ndn,i)]) + xb = pyo.value(s._mpisppy_model.xbars[(ndn, i)]) + diff = xb * xb - pyo.value(s._mpisppy_model.xsqbars[(ndn, i)]) tolval = self.iter0_threshold[(ndn, i)] - sqtolval = tolval*tolval # the tol is on sqrt + sqtolval = tolval * tolval # the tol is on sqrt if -diff > sqtolval or diff > sqtolval: ##print ("debug0 NO fix diff, sqtolval", diff, sqtolval) continue @@ -181,18 +195,24 @@ def iter0(self, local_scenarios): # if we are still here, it is converged if nb is not None: xvar.fix(xb) - self._vb("Fixed0 nb %s %s at %s" % \ - (s.name, xvar.name, str(xvar._value))) + self._vb( + "Fixed0 nb %s %s at %s" + % (s.name, xvar.name, str(xvar._value)) + ) was_fixed = True elif lb is not None and xb - xvar.lb < self.boundtol: xvar.fix(xvar.lb) - self._vb("Fixed0 lb %s %s at %s" % \ - (s.name, xvar.name, str(xvar._value))) + self._vb( + "Fixed0 lb %s %s at %s" + % (s.name, xvar.name, str(xvar._value)) + ) was_fixed = True elif ub is not None and xvar.ub - xb < self.boundtol: xvar.fix(xvar.ub) - self._vb("Fixed0 ub %s %s at %s" % \ - (s.name, xvar.name, str(xvar._value))) + self._vb( + "Fixed0 ub %s %s at %s" + % (s.name, xvar.name, str(xvar._value)) + ) was_fixed = True if was_fixed: @@ -210,77 +230,94 @@ def iter0(self, local_scenarios): raw_fixed_on_arrival += 1 if have_bundles and solver_is_persistent: - for k,subp in self.ph.local_subproblems.items(): + for k, subp in self.ph.local_subproblems.items(): subpnum = sputils.extract_num(k) rank_local = self.ph.cylinder_rank for sname in self.ph.names_in_bundles[rank_local][subpnum]: if sname in vars_to_update: for xvar in vars_to_update[sname]: subp._solver_plugin.update_var(xvar) - + self.fixed_prior_iter0 += raw_fixed_on_arrival / len(local_scenarios) self.fixed_so_far += raw_fixed_this_iter / len(local_scenarios) - self._dp("Unique vars fixed so far - %d (%d prior to iteration 0)" % (self.fixed_so_far+self.fixed_prior_iter0, self.fixed_prior_iter0)) + self._dp( + "Unique vars fixed so far - %d (%d prior to iteration 0)" + % (self.fixed_so_far + self.fixed_prior_iter0, self.fixed_prior_iter0) + ) if raw_fixed_this_iter % len(local_scenarios) != 0: - raise RuntimeError ("Variation in fixing across scenarios detected " - "in fixer.py (iter0)") - # maybe to do mpicomm.abort() + raise RuntimeError( + "Variation in fixing across scenarios detected " "in fixer.py (iter0)" + ) + # maybe to do mpicomm.abort() if raw_fixed_on_arrival % len(local_scenarios) != 0: - raise RuntimeError ("Variation in fixing across scenarios prior to iteration 0 detected " - "in fixer.py (iter0)") - + raise RuntimeError( + "Variation in fixing across scenarios prior to iteration 0 detected " + "in fixer.py (iter0)" + ) def iterk(self, PHIter): - """ Before iter k>1 solves, but after x-bar update. - """ - # first, do some persistent solver with bundles gymnastics - have_bundles = self.ph.bundling # indicates bundles + """Before iter k>1 solves, but after x-bar update.""" + # first, do some persistent solver with bundles gymnastics + have_bundles = self.ph.bundling # indicates bundles if have_bundles: subpname = next(iter(self.ph.local_subproblems)) subp = self.ph.local_subproblems[subpname] - solver_is_persistent = isinstance(subp._solver_plugin, - pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver) + solver_is_persistent = isinstance( + subp._solver_plugin, + pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver, + ) if solver_is_persistent: vars_to_update = {} raw_fixed_this_iter = 0 self._update_fix_counts() - for sname,s in self.local_scenarios.items(): + for sname, s in self.local_scenarios.items(): if self.fixer_tuples[s] is None: - print ("MAJOR WARNING: No Iter k fixer tuple for s.name=",s.name) + print("MAJOR WARNING: No Iter k fixer tuple for s.name=", s.name) return if not have_bundles: - solver_is_persistent = isinstance(s._solver_plugin, pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver) - for (varid, th, nb, lb, ub) in self.fixer_tuples[s]: + solver_is_persistent = isinstance( + s._solver_plugin, + pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver, + ) + for varid, th, nb, lb, ub in self.fixer_tuples[s]: was_fixed = False try: (ndn, i) = s._mpisppy_data.varid_to_nonant_index[varid] except: - print ("Are you trying to fix a Var that is not nonant?") + print("Are you trying to fix a Var that is not nonant?") raise - xvar = s._mpisppy_data.nonant_indices[ndn,i] + xvar = s._mpisppy_data.nonant_indices[ndn, i] if not xvar.is_fixed(): - xb = pyo.value(s._mpisppy_model.xbars[(ndn,i)]) - fx = s._mpisppy_data.conv_iter_count[(ndn,i)] + xb = pyo.value(s._mpisppy_model.xbars[(ndn, i)]) + fx = s._mpisppy_data.conv_iter_count[(ndn, i)] if fx > 0: - xbar = pyo.value(s._mpisppy_model.xbars[(ndn,i)]) + xbar = pyo.value(s._mpisppy_model.xbars[(ndn, i)]) was_fixed = False - if nb is not None and nb <= fx: + if nb is not None and nb <= fx: xvar.fix(xbar) - self._vb("Fixed nb %s %s at %s" % \ - (s.name, xvar.name, str(xvar._value))) + self._vb( + "Fixed nb %s %s at %s" + % (s.name, xvar.name, str(xvar._value)) + ) was_fixed = True - elif lb is not None and lb < fx \ - and xb - xvar.lb < self.boundtol: + elif ( + lb is not None and lb < fx and xb - xvar.lb < self.boundtol + ): xvar.fix(xvar.lb) - self._vb("Fixed lb %s %s at %s" % \ - (s.name, xvar.name, str(xvar._value))) + self._vb( + "Fixed lb %s %s at %s" + % (s.name, xvar.name, str(xvar._value)) + ) was_fixed = True - elif ub is not None and ub < fx \ - and xvar.ub - xb < self.boundtol: + elif ( + ub is not None and ub < fx and xvar.ub - xb < self.boundtol + ): xvar.fix(xvar.ub) - self._vb("Fixed ub %s %s at %s" % \ - (s.name, xvar.name, str(xvar._value))) + self._vb( + "Fixed ub %s %s at %s" + % (s.name, xvar.name, str(xvar._value)) + ) was_fixed = True if was_fixed: @@ -292,9 +329,8 @@ def iterk(self, PHIter): vars_to_update[sname] = [] vars_to_update[sname].append(xvar) - if have_bundles and solver_is_persistent: - for k,subp in self.ph.local_subproblems.items(): + for k, subp in self.ph.local_subproblems.items(): subpnum = sputils.extract_num(k) rank_local = self.ph.cylinder_rank for sname in self.ph.names_in_bundles[rank_local][subpnum]: @@ -303,21 +339,24 @@ def iterk(self, PHIter): subp._solver_plugin.update_var(xvar) self.fixed_so_far += raw_fixed_this_iter / len(self.local_scenarios) - self._dp("Unique vars fixed so far - %d (%d prior to iteration 0)" % (self.fixed_so_far+self.fixed_prior_iter0, self.fixed_prior_iter0)) + self._dp( + "Unique vars fixed so far - %d (%d prior to iteration 0)" + % (self.fixed_so_far + self.fixed_prior_iter0, self.fixed_prior_iter0) + ) if raw_fixed_this_iter % len(self.local_scenarios) != 0: - raise RuntimeError ("Variation in fixing across scenarios detected " - "in fixer.py") + raise RuntimeError( + "Variation in fixing across scenarios detected " "in fixer.py" + ) def pre_iter0(self): return - + def post_iter0(self): - """ initialize data structures; that's all we can do at this point - """ + """initialize data structures; that's all we can do at this point""" self.populate(self.ph.local_scenarios) def miditer(self): - """ Check for fixing before in the middle of PHIter (after + """Check for fixing before in the middle of PHIter (after the xbar update for PHiter-1). """ PHIter = self.ph._PHIter @@ -330,6 +369,4 @@ def enditer(self): return def post_everything(self): - self._dp("Final unique vars fixed by fixer= %s" % \ - (self.fixed_so_far)) - + self._dp("Final unique vars fixed by fixer= %s" % (self.fixed_so_far)) diff --git a/mpisppy/extensions/gradient_extension.py b/mpisppy/extensions/gradient_extension.py index 236e9b016..c230bf93c 100644 --- a/mpisppy/extensions/gradient_extension.py +++ b/mpisppy/extensions/gradient_extension.py @@ -19,18 +19,20 @@ # for trapping numpy warnings import warnings + class Gradient_extension(mpisppy.extensions.extension.Extension): """ This extension makes PH use gradient-rho and the corresponding rho setter. - + Args: opt (PHBase object): gives the problem cfg (Config object): config object - + Attributes: grad_object (Find_Grad object): gradient object - + """ + def __init__(self, opt, comm=None): super().__init__(opt) self.cylinder_rank = self.opt.cylinder_rank @@ -39,21 +41,23 @@ def __init__(self, opt, comm=None): # (e.g., if the user gave us an input file, use that) # TBD: stop using files # TBD: restore the rho_setter? - self.cfg_args_cache = {'rho_file_in': self.cfg.rho_file_in, - 'grad_rho_file_out': self.cfg.grad_rho_file_out, - 'rho_setter': self.cfg.grad_rho_setter} - if self.cfg.get('grad_cost_file_out', ifmissing="") == "": - self.cfg.grad_cost_file_out = './_temp_grad_cost_file.csv' -# else: -# self.cfg_args_cache["grad_cost_file_out"] = self.cfg.grad_cost_file_out - if self.cfg.get('grad_cost_file_in', ifmissing="") == "": + self.cfg_args_cache = { + "rho_file_in": self.cfg.rho_file_in, + "grad_rho_file_out": self.cfg.grad_rho_file_out, + "rho_setter": self.cfg.grad_rho_setter, + } + if self.cfg.get("grad_cost_file_out", ifmissing="") == "": + self.cfg.grad_cost_file_out = "./_temp_grad_cost_file.csv" + # else: + # self.cfg_args_cache["grad_cost_file_out"] = self.cfg.grad_cost_file_out + if self.cfg.get("grad_cost_file_in", ifmissing="") == "": self.cfg.grad_cost_file_in = self.cfg.grad_cost_file_out # write then read # from the perspective of this extension, we really should not have both - if self.cfg.get('rho_file_in', ifmissing="") == "": + if self.cfg.get("rho_file_in", ifmissing="") == "": if self.cfg.get("grad_rho_file_out", ifmissing="") == "": # we don't have either, but will write then read - self.cfg.grad_rho_file_out = './_temp_rho_file.csv' - self.cfg.rho_file_in = self.cfg.grad_rho_file_out + self.cfg.grad_rho_file_out = "./_temp_rho_file.csv" + self.cfg.rho_file_in = self.cfg.grad_rho_file_out else: # we don't have an in, but we have an out, still write then read self.cfg.rho_file_in = self.cfg.grad_rho_file_out @@ -66,39 +70,46 @@ def __init__(self, opt, comm=None): def _display_rho_values(self): for sname, scenario in self.opt.local_scenarios.items(): - rho_list = [scenario._mpisppy_model.rho[ndn_i]._value - for ndn_i, _ in scenario._mpisppy_data.nonant_indices.items()] - print(sname, 'rho values: ', rho_list[:5]) + rho_list = [ + scenario._mpisppy_model.rho[ndn_i]._value + for ndn_i, _ in scenario._mpisppy_data.nonant_indices.items() + ] + print(sname, "rho values: ", rho_list[:5]) break def _display_W_values(self): - for (sname, scenario) in self.opt.local_scenarios.items(): + for sname, scenario in self.opt.local_scenarios.items(): W_list = [w._value for w in scenario._mpisppy_model.W.values()] - print(sname, 'W values: ', W_list) + print(sname, "W values: ", W_list) break - def _update_rho_primal_based(self): with warnings.catch_warnings(): - warnings.filterwarnings('error') - curr_conv, last_conv = self.primal_conv_cache[-1], self.primal_conv_cache[-2] + warnings.filterwarnings("error") + curr_conv, last_conv = ( + self.primal_conv_cache[-1], + self.primal_conv_cache[-2], + ) try: - primal_diff = np.abs((last_conv - curr_conv) / last_conv) + primal_diff = np.abs((last_conv - curr_conv) / last_conv) except Warning: if self.cylinder_rank == 0: - print(f"Gradient extension reports {last_conv=} {curr_conv=} - no rho updates recommended") + print( + f"Gradient extension reports {last_conv=} {curr_conv=} - no rho updates recommended" + ) return False - return (primal_diff <= self.cfg.grad_dynamic_primal_thresh) + return primal_diff <= self.cfg.grad_dynamic_primal_thresh def _update_rho_dual_based(self): curr_conv, last_conv = self.dual_conv_cache[-1], self.dual_conv_cache[-2] - dual_diff = np.abs((last_conv - curr_conv) / last_conv) if last_conv != 0 else 0 - #print(f'{dual_diff =}') - return (dual_diff <= self.cfg.grad_dynamic_dual_thresh) + dual_diff = np.abs((last_conv - curr_conv) / last_conv) if last_conv != 0 else 0 + # print(f'{dual_diff =}') + return dual_diff <= self.cfg.grad_dynamic_dual_thresh def _update_recommended(self): - return (self.cfg.grad_dynamic_primal_crit and self._update_rho_primal_based()) or \ - (self.cfg.grad_dynamic_dual_crit and self._update_rho_dual_based()) + return ( + self.cfg.grad_dynamic_primal_crit and self._update_rho_primal_based() + ) or (self.cfg.grad_dynamic_dual_crit and self._update_rho_dual_based()) def pre_iter0(self): pass @@ -115,23 +126,25 @@ def miditer(self): self.grad_object.write_grad_cost() if self.opt._PHIter == 1 or self._update_recommended(): self.grad_object.write_grad_rho() - rho_setter_kwargs = self.opt.options['rho_setter_kwargs'] \ - if 'rho_setter_kwargs' in self.opt.options \ - else dict() + rho_setter_kwargs = ( + self.opt.options["rho_setter_kwargs"] + if "rho_setter_kwargs" in self.opt.options + else dict() + ) # sum/num is a total hack sum_rho = 0.0 num_rhos = 0 for sname, scenario in self.opt.local_scenarios.items(): rholist = self.rho_setter(scenario, **rho_setter_kwargs) - for (vid, rho) in rholist: + for vid, rho in rholist: (ndn, i) = scenario._mpisppy_data.varid_to_nonant_index[vid] scenario._mpisppy_model.rho[(ndn, i)] = rho sum_rho += rho num_rhos += 1 rho_avg = sum_rho / num_rhos - + global_toc(f"Rho values recomputed - average rank 0 rho={rho_avg}") def enditer(self): @@ -139,11 +152,17 @@ def enditer(self): def post_everything(self): # if we are using temp files, deal with it - if self.cylinder_rank == 0 and os.path.exists(self.cfg.rho_file_in)\ - and self.cfg.rho_file_in != self.cfg_args_cache['rho_file_in']: - os.remove(self.cfg.rho_file_in) - self.cfg.rho_file_in = self.cfg_args_cache['rho_file_in'] # namely "" - if self.cylinder_rank == 0 and os.path.exists(self.cfg.grad_cost_file_out) and \ - self.cfg.get('grad_cost_file_out', ifmissing="") == "": - os.remove(self.cfg.grad_cost_file_out) - self.cfg.grad_cost_file_out = self.cfg_args_cache['grad_cost_file_out'] + if ( + self.cylinder_rank == 0 + and os.path.exists(self.cfg.rho_file_in) + and self.cfg.rho_file_in != self.cfg_args_cache["rho_file_in"] + ): + os.remove(self.cfg.rho_file_in) + self.cfg.rho_file_in = self.cfg_args_cache["rho_file_in"] # namely "" + if ( + self.cylinder_rank == 0 + and os.path.exists(self.cfg.grad_cost_file_out) + and self.cfg.get("grad_cost_file_out", ifmissing="") == "" + ): + os.remove(self.cfg.grad_cost_file_out) + self.cfg.grad_cost_file_out = self.cfg_args_cache["grad_cost_file_out"] diff --git a/mpisppy/extensions/mipgapper.py b/mpisppy/extensions/mipgapper.py index ded606ee7..c8e9f0493 100644 --- a/mpisppy/extensions/mipgapper.py +++ b/mpisppy/extensions/mipgapper.py @@ -6,44 +6,43 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -""" Code for a mipgap schedule. This can be used - as the only extension, but it could also be called from a "multi" - extension. +"""Code for a mipgap schedule. This can be used +as the only extension, but it could also be called from a "multi" +extension. """ import mpisppy.extensions.extension -class Gapper(mpisppy.extensions.extension.Extension): +class Gapper(mpisppy.extensions.extension.Extension): def __init__(self, ph): self.ph = ph self.cylinder_rank = self.ph.cylinder_rank - self.gapperoptions = self.ph.options["gapperoptions"] # required + self.gapperoptions = self.ph.options["gapperoptions"] # required self.mipgapdict = self.gapperoptions["mipgapdict"] - self.verbose = self.ph.options["verbose"] \ - or self.gapperoptions["verbose"] - + self.verbose = self.ph.options["verbose"] or self.gapperoptions["verbose"] + def _vb(self, str): if self.verbose and self.cylinder_rank == 0: - print ("(rank0) mipgapper:" + str) + print("(rank0) mipgapper:" + str) def set_mipgap(self, mipgap): - """ set the mipgap + """set the mipgap Args: float (mipgap): the gap to set """ oldgap = None if "mipgap" in self.ph.current_solver_options: oldgap = self.ph.current_solver_options["mipgap"] - self._vb("Changing mipgap from "+str(oldgap)+" to "+str(mipgap)) + self._vb("Changing mipgap from " + str(oldgap) + " to " + str(mipgap)) self.ph.current_solver_options["mipgap"] = float(mipgap) - + def pre_iter0(self): if self.mipgapdict is None: return if 0 in self.mipgapdict: self.set_mipgap(self.mipgapdict[0]) - + def post_iter0(self): return @@ -54,7 +53,6 @@ def miditer(self): if PHIter in self.mipgapdict: self.set_mipgap(self.mipgapdict[PHIter]) - def enditer(self): return diff --git a/mpisppy/extensions/mult_rho_updater.py b/mpisppy/extensions/mult_rho_updater.py index b1e465265..c56ae9c4f 100644 --- a/mpisppy/extensions/mult_rho_updater.py +++ b/mpisppy/extensions/mult_rho_updater.py @@ -15,46 +15,51 @@ # for ph.options['mult_rho_options']: -_mult_rho_defaults = { 'convergence_tolerance' : 1e-4, - 'rho_update_stop_iteration' : None, - 'rho_update_start_iteration' : None, - 'verbose' : False, +_mult_rho_defaults = { + "convergence_tolerance": 1e-4, + "rho_update_stop_iteration": None, + "rho_update_start_iteration": None, + "verbose": False, } _attr_to_option_name_map = { - '_tol': 'convergence_tolerance', - '_stop_iter' : 'rho_update_stop_iteration', - '_start_iter' : 'rho_update_start_iteration', - '_verbose' : 'verbose', + "_tol": "convergence_tolerance", + "_stop_iter": "rho_update_stop_iteration", + "_start_iter": "rho_update_start_iteration", + "_verbose": "verbose", } class MultRhoUpdater(mpisppy.extensions.extension.Extension): - def __init__(self, ph): - self.ph = ph - self.mult_rho_options = \ - ph.options['mult_rho_options'] if 'mult_rho_options' in ph.options else dict() + self.mult_rho_options = ( + ph.options["mult_rho_options"] + if "mult_rho_options" in ph.options + else dict() + ) self._set_options() self._first_rho = None self.best_conv = float("inf") - def _conv(self): if self.ph.convobject is not None: return self.ph.convobject.conv else: - return self.ph.conv - + return self.ph.conv def _set_options(self): options = self.mult_rho_options for attr_name, opt_name in _attr_to_option_name_map.items(): - setattr(self, attr_name, options[opt_name] if opt_name in options else _mult_rho_defaults[opt_name]) + setattr( + self, + attr_name, + options[opt_name] + if opt_name in options + else _mult_rho_defaults[opt_name], + ) - def _attach_rho_ratio_data(self, ph, conv): if conv is None or conv == self._tol: return @@ -62,16 +67,17 @@ def _attach_rho_ratio_data(self, ph, conv): if not self.ph.multistage: # two stage for s in ph.local_scenarios.values(): - break # arbitrary scenario - self._first_rho = {ndn_i: rho._value for ndn_i, rho in s._mpisppy_model.rho.items()} + break # arbitrary scenario + self._first_rho = { + ndn_i: rho._value for ndn_i, rho in s._mpisppy_model.rho.items() + } else: # loop over all scenarios to get all nodes when multi-stage (wastes time...) self._first_rho = dict() for k, s in ph.local_scenarios.items(): - for ndn_i, rho in s._mpisppy_model.rho.items(): + for ndn_i, rho in s._mpisppy_model.rho.items(): self._first_rho[ndn_i] = rho._value - def pre_iter0(self): pass @@ -79,20 +85,17 @@ def post_iter0(self): pass def miditer(self): - ph = self.ph ph_iter = ph._PHIter - if (self._stop_iter is not None and \ - ph_iter > self._stop_iter) \ - or \ - (self._start_iter is not None and \ - ph_iter < self._start_iter): + if (self._stop_iter is not None and ph_iter > self._stop_iter) or ( + self._start_iter is not None and ph_iter < self._start_iter + ): return - conv = self._conv() + conv = self._conv() if conv < self.best_conv: self.best_conv = conv else: - return # only do something if we have a new best + return # only do something if we have a new best if self._first_rho is None: self._attach_rho_ratio_data(ph, conv) # rho / conv elif conv != 0: @@ -100,7 +103,9 @@ def miditer(self): for ndn_i, rho in s._mpisppy_model.rho.items(): rho._value = self._first_rho[ndn_i] * self.first_c / conv if ph.cylinder_rank == 0: - print(f"MultRhoUpdater iter={ph_iter}; {ndn_i} now has value {rho._value}") + print( + f"MultRhoUpdater iter={ph_iter}; {ndn_i} now has value {rho._value}" + ) def enditer(self): pass diff --git a/mpisppy/extensions/norm_rho_updater.py b/mpisppy/extensions/norm_rho_updater.py index e76cdd14e..ba6090b6d 100644 --- a/mpisppy/extensions/norm_rho_updater.py +++ b/mpisppy/extensions/norm_rho_updater.py @@ -15,43 +15,52 @@ import numpy as np import mpisppy.MPI as MPI -_norm_rho_defaults = { 'convergence_tolerance' : 1e-4, - 'rho_decrease_multiplier' : 2.0, - 'rho_increase_multiplier' : 2.0, - 'primal_dual_difference_factor' : 100., - 'iterations_converged_before_decrease' : 0, - 'rho_converged_decrease_multiplier' : 1.1, - 'rho_update_stop_iterations' : None, - 'verbose' : False, +_norm_rho_defaults = { + "convergence_tolerance": 1e-4, + "rho_decrease_multiplier": 2.0, + "rho_increase_multiplier": 2.0, + "primal_dual_difference_factor": 100.0, + "iterations_converged_before_decrease": 0, + "rho_converged_decrease_multiplier": 1.1, + "rho_update_stop_iterations": None, + "verbose": False, } _attr_to_option_name_map = { - '_tol': 'convergence_tolerance', - '_rho_decrease' : 'rho_decrease_multiplier', - '_rho_increase' : 'rho_increase_multiplier', - '_primal_dual_difference_factor' : 'primal_dual_difference_factor', - '_required_converged_before_decrease' : 'iterations_converged_before_decrease', - '_rho_converged_residual_decrease' : 'rho_converged_decrease_multiplier', - '_stop_iter_rho_update' : 'rho_update_stop_iterations', - '_verbose' : 'verbose', + "_tol": "convergence_tolerance", + "_rho_decrease": "rho_decrease_multiplier", + "_rho_increase": "rho_increase_multiplier", + "_primal_dual_difference_factor": "primal_dual_difference_factor", + "_required_converged_before_decrease": "iterations_converged_before_decrease", + "_rho_converged_residual_decrease": "rho_converged_decrease_multiplier", + "_stop_iter_rho_update": "rho_update_stop_iterations", + "_verbose": "verbose", } -class NormRhoUpdater(mpisppy.extensions.extension.Extension): +class NormRhoUpdater(mpisppy.extensions.extension.Extension): def __init__(self, ph): - self.ph = ph - self.norm_rho_options = \ - ph.options['norm_rho_options'] if 'norm_rho_options' in ph.options else dict() + self.norm_rho_options = ( + ph.options["norm_rho_options"] + if "norm_rho_options" in ph.options + else dict() + ) self._set_options() self._prev_avg = {} self.ph._mpisppy_norm_rho_update_inuse = True # allow NormRhoConverger - + def _set_options(self): options = self.norm_rho_options for attr_name, opt_name in _attr_to_option_name_map.items(): - setattr(self, attr_name, options[opt_name] if opt_name in options else _norm_rho_defaults[opt_name]) + setattr( + self, + attr_name, + options[opt_name] + if opt_name in options + else _norm_rho_defaults[opt_name], + ) def _snapshot_avg(self, ph): for s in ph.local_scenarios.values(): @@ -59,45 +68,49 @@ def _snapshot_avg(self, ph): self._prev_avg[ndn_i] = xbar.value def _compute_primal_residual_norm(self, ph): - local_nodenames = [] local_primal_residuals = {} global_primal_residuals = {} - for k,s in ph.local_scenarios.items(): - nlens = s._mpisppy_data.nlens + for k, s in ph.local_scenarios.items(): + nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: if node.name not in local_nodenames: - ndn = node.name local_nodenames.append(ndn) nlen = nlens[ndn] - local_primal_residuals[ndn] = np.zeros(nlen, dtype='d') - global_primal_residuals[ndn] = np.zeros(nlen, dtype='d') + local_primal_residuals[ndn] = np.zeros(nlen, dtype="d") + global_primal_residuals[ndn] = np.zeros(nlen, dtype="d") - for k,s in ph.local_scenarios.items(): + for k, s in ph.local_scenarios.items(): nlens = s._mpisppy_data.nlens xbars = s._mpisppy_model.xbars for node in s._mpisppy_node_list: ndn = node.name primal_residuals = local_primal_residuals[ndn] - unweighted_primal_residuals = \ - np.fromiter((abs(v._value - xbars[ndn,i]._value) for i,v in enumerate(node.nonant_vardata_list)), - dtype='d', count=nlens[ndn] ) + unweighted_primal_residuals = np.fromiter( + ( + abs(v._value - xbars[ndn, i]._value) + for i, v in enumerate(node.nonant_vardata_list) + ), + dtype="d", + count=nlens[ndn], + ) primal_residuals += s._mpisppy_probability * unweighted_primal_residuals for nodename in local_nodenames: ph.comms[nodename].Allreduce( [local_primal_residuals[nodename], MPI.DOUBLE], [global_primal_residuals[nodename], MPI.DOUBLE], - op=MPI.SUM) + op=MPI.SUM, + ) primal_resid = {} for ndn, global_primal_resid in global_primal_residuals.items(): for i, v in enumerate(global_primal_resid): - primal_resid[ndn,i] = v + primal_resid[ndn, i] = v return primal_resid @@ -105,8 +118,9 @@ def _compute_dual_residual_norm(self, ph): dual_resid = {} for s in ph.local_scenarios.values(): for ndn_i in s._mpisppy_data.nonant_indices: - dual_resid[ndn_i] = s._mpisppy_model.rho[ndn_i].value * \ - math.fabs( s._mpisppy_model.xbars[ndn_i].value - self._prev_avg[ndn_i] ) + dual_resid[ndn_i] = s._mpisppy_model.rho[ndn_i].value * math.fabs( + s._mpisppy_model.xbars[ndn_i].value - self._prev_avg[ndn_i] + ) return dual_resid def pre_iter0(self): @@ -116,11 +130,11 @@ def post_iter0(self): pass def miditer(self): - ph = self.ph ph_iter = ph._PHIter - if self._stop_iter_rho_update is not None and \ - (ph_iter > self._stop_iter_rho_update): + if self._stop_iter_rho_update is not None and ( + ph_iter > self._stop_iter_rho_update + ): return if not self._prev_avg: self._snapshot_avg(ph) @@ -137,10 +151,14 @@ def miditer(self): dual_resid = dual_residuals[ndn_i] action = None - if (primal_resid > primal_dual_difference_factor*dual_resid) and (primal_resid > self._tol): + if (primal_resid > primal_dual_difference_factor * dual_resid) and ( + primal_resid > self._tol + ): rho._value *= self._rho_increase action = "Increasing" - elif (dual_resid > primal_dual_difference_factor*primal_resid) and (dual_resid > self._tol): + elif ( + dual_resid > primal_dual_difference_factor * primal_resid + ) and (dual_resid > self._tol): if ph_iter >= self._required_converged_before_decrease: rho._value /= self._rho_decrease action = "Decreasing" @@ -150,17 +168,28 @@ def miditer(self): if self._verbose and ph.cylinder_rank == 0 and action is not None: if first: first = False - first_line = ("Updating rho values:\n%21s %40s %16s %16s %16s" - % ("Action", - "Variable", - "Primal Residual", - "Dual Residual", - "New Rho")) + first_line = ( + "Updating rho values:\n%21s %40s %16s %16s %16s" + % ( + "Action", + "Variable", + "Primal Residual", + "Dual Residual", + "New Rho", + ) + ) print(first_line) if first_scenario: - print("%21s %40s %16g %16g %16g" - % (action, s._mpisppy_data.nonant_indices[ndn_i].name, - primal_resid, dual_resid, rho.value)) + print( + "%21s %40s %16g %16g %16g" + % ( + action, + s._mpisppy_data.nonant_indices[ndn_i].name, + primal_resid, + dual_resid, + rho.value, + ) + ) first_scenario = False def enditer(self): diff --git a/mpisppy/extensions/phtracker.py b/mpisppy/extensions/phtracker.py index c95ab4624..22e74df18 100644 --- a/mpisppy/extensions/phtracker.py +++ b/mpisppy/extensions/phtracker.py @@ -6,9 +6,10 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' An extension to track the PH object (mpisppy.opt.ph.PH) during execution. - Must use the PH object for this to work -''' +"""An extension to track the PH object (mpisppy.opt.ph.PH) during execution. +Must use the PH object for this to work +""" + import ast import matplotlib.pyplot as plt import numpy as np @@ -17,11 +18,12 @@ from mpisppy.extensions.extension import Extension from mpisppy.cylinders.spoke import ConvergerSpokeType from mpisppy.cylinders.spoke import Spoke -from mpisppy.cylinders.reduced_costs_spoke import ReducedCostsSpoke +from mpisppy.cylinders.reduced_costs_spoke import ReducedCostsSpoke + + +class TrackedData: + """A class to manage the data for a single variable (e.g. gaps, bounds, etc.)""" -class TrackedData(): - ''' A class to manage the data for a single variable (e.g. gaps, bounds, etc.) - ''' def __init__(self, name, folder, plot=False, verbose=False): self.name = name self.folder = folder @@ -34,19 +36,18 @@ def __init__(self, name, folder, plot=False, verbose=False): self.seen_iters = set() def initialize_fnames(self, name=None): - """ Initialize filenames for saving and plotting - """ + """Initialize filenames for saving and plotting""" name = self.name if name is None else name - name = name[:-4] if name.endswith('.csv') else name + name = name[:-4] if name.endswith(".csv") else name - self.fname = os.path.join(self.folder, f'{name}.csv') + self.fname = os.path.join(self.folder, f"{name}.csv") # LRL: Encountered a bug where plot = False, but plots where still generated, # leading to error where plot_fname is None. So, always setting it here. - #if self.plot: - self.plot_fname = os.path.join(self.folder, f'{name}.png') + # if self.plot: + self.plot_fname = os.path.join(self.folder, f"{name}.png") def initialize_df(self, columns): - """ Initialize the dataframe for saving the data and write out the column names + """Initialize the dataframe for saving the data and write out the column names as future rows will be appended to the dataframe """ self.columns = columns @@ -54,12 +55,12 @@ def initialize_df(self, columns): self.df.to_csv(self.fname, index=False, header=True) def add_row(self, row): - """ Add a row to the dataframe; + """Add a row to the dataframe; Assumes the first column is the iteration number if row is a list """ assert len(row) == len(self.columns) if isinstance(row, dict): - row_iter = row['iteration'] + row_iter = row["iteration"] elif isinstance(row, list): row_iter = row[0] else: @@ -77,13 +78,13 @@ def add_row(self, row): self.df = pd.concat([self.df, new_dict], ignore_index=True) def write_out_data(self): - """ Write out the cached data to csv file and clear the cache - """ - self.df.to_csv(self.fname, mode='a', header=False, index=False) + """Write out the cached data to csv file and clear the cache""" + self.df.to_csv(self.fname, mode="a", header=False, index=False) self.df = pd.DataFrame(columns=self.columns) + class PHTracker(Extension): - """ Class for tracking the PH algorithm + """Class for tracking the PH algorithm NOTE: Can generalize this code to beyond PH by subclassing TrackedData for each @@ -93,6 +94,7 @@ class PHTracker(Extension): Must pass cylinder_name in options if multiple cylinders of the same class are being used """ + def __init__(self, opt): """ Args: @@ -100,7 +102,7 @@ def __init__(self, opt): """ super().__init__(opt) self.verbose = self.opt.options["verbose"] - if 'phtracker_options' in self.opt.options: + if "phtracker_options" in self.opt.options: self.tracker_options = self.opt.options["phtracker_options"] else: raise RuntimeError("phtracker_options not specified in options dict") @@ -109,23 +111,28 @@ def __init__(self, opt): self.save_every = self.tracker_options.get("save_every", 1) self.write_every = self.tracker_options.get("write_every", 3) self._rank = self.opt.cylinder_rank - self._reduce_types = ['nonants', 'duals', 'scen_gaps'] + self._reduce_types = ["nonants", "duals", "scen_gaps"] self._track_var_to_func = { - 'gaps': {'track': self.add_gaps, - 'finalize': self.plot_xbars_bounds_gaps}, - 'bounds': {'track': self.add_bounds, - 'finalize': self.plot_xbars_bounds_gaps}, - 'nonants': {'track': self.add_nonants, - 'finalize': self.plot_nonants_sgaps_duals}, - 'duals': {'track': self.add_duals, - 'finalize': self.plot_nonants_sgaps_duals}, - 'xbars': {'track': self.add_xbars, - 'finalize': self.plot_xbars_bounds_gaps}, - 'scen_gaps': {'track': self.add_scen_gaps, - 'finalize': self.plot_nonants_sgaps_duals}, - 'reduced_costs': {'track': self.add_rc, - 'finalize': self.plot_rc} + "gaps": {"track": self.add_gaps, "finalize": self.plot_xbars_bounds_gaps}, + "bounds": { + "track": self.add_bounds, + "finalize": self.plot_xbars_bounds_gaps, + }, + "nonants": { + "track": self.add_nonants, + "finalize": self.plot_nonants_sgaps_duals, + }, + "duals": { + "track": self.add_duals, + "finalize": self.plot_nonants_sgaps_duals, + }, + "xbars": {"track": self.add_xbars, "finalize": self.plot_xbars_bounds_gaps}, + "scen_gaps": { + "track": self.add_scen_gaps, + "finalize": self.plot_nonants_sgaps_duals, + }, + "reduced_costs": {"track": self.add_rc, "finalize": self.plot_rc}, } # will initialize these after spcomm is initialized @@ -135,23 +142,32 @@ def __init__(self, opt): self.finished_init = False def finish_init(self): - """ Finish initialization of the extension as we need the spcomm object + """Finish initialization of the extension as we need the spcomm object to be initialized and the tracker is initialized with opt before spcomm """ self.spcomm = self.opt.spcomm cylinder_name = self.tracker_options.get( - "cylinder_name", type(self.spcomm).__name__) + "cylinder_name", type(self.spcomm).__name__ + ) self.cylinder_folder = os.path.join(self.results_folder, cylinder_name) - track_types = ['gaps', 'bounds', 'nonants', 'duals', 'xbars', 'scen_gaps', 'reduced_costs'] + track_types = [ + "gaps", + "bounds", + "nonants", + "duals", + "xbars", + "scen_gaps", + "reduced_costs", + ] self.track_dict = {} for t in track_types: - if self.tracker_options.get(f'track_{t}', False): + if self.tracker_options.get(f"track_{t}", False): val = True # only rank 0 needs to create tracker objects; other ranks need to # know what data is being tracked if self._rank == 0: - plot = self.tracker_options.get(f'plot_{t}', False) + plot = self.tracker_options.get(f"plot_{t}", False) val = TrackedData(t, self.cylinder_folder, plot, self.verbose) user_fname = self.tracker_options.get(f"{t}_fname", None) val.initialize_fnames(name=user_fname) @@ -168,93 +184,95 @@ def finish_init(self): @property def curr_iter(self): - """ Get the current iteration number + """Get the current iteration number NOTE: This should probably made less ad hoc _PHIter could be inaccurate in that most spokes currently make specific function calls rather than ph_main(). Therefore, _PHIter may not be up to date. """ - if hasattr(self.spcomm, 'A_iter'): + if hasattr(self.spcomm, "A_iter"): return self.spcomm.A_iter - if hasattr(self.spcomm, 'dk_iter'): + if hasattr(self.spcomm, "dk_iter"): return self.spcomm.dk_iter - if hasattr(self.opt, '_PHIter'): + if hasattr(self.opt, "_PHIter"): return self.opt._PHIter raise RuntimeError("Iteration not found") def verify_tracking(self): - """ Verify that the user has specified the correct tracking options given - the spcomm object + """Verify that the user has specified the correct tracking options given + the spcomm object """ - if 'gaps' in self.track_dict or 'bounds' in self.track_dict: - if isinstance(self.spcomm, Spoke) and \ - not hasattr(self.spcomm, 'hub_outer_bound') and \ - not hasattr(self.spcomm, 'hub_inner_bound'): - - raise RuntimeError("Cannot access hub gaps without passing" - " them to spcomm") + if "gaps" in self.track_dict or "bounds" in self.track_dict: + if ( + isinstance(self.spcomm, Spoke) + and not hasattr(self.spcomm, "hub_outer_bound") + and not hasattr(self.spcomm, "hub_inner_bound") + ): + raise RuntimeError( + "Cannot access hub gaps without passing" " them to spcomm" + ) def get_var_names(self, xbar=False): - """ Get the names of the variables - """ + """Get the names of the variables""" var_names = [] - for (sname, model) in self.opt.local_scenarios.items(): + for sname, model in self.opt.local_scenarios.items(): for node in model._mpisppy_node_list: for var in node.nonant_vardata_list: - var_names.append(var.name if xbar else (sname, var.name)) + var_names.append(var.name if xbar else (sname, var.name)) if xbar: break return var_names def get_scen_colnames(self): - """ Get the names of the scenarios - """ - scen_names = [(sname, b) for sname in self.opt.local_scenarios.keys() - for b in ['ub', 'lb']] + """Get the names of the scenarios""" + scen_names = [ + (sname, b) + for sname in self.opt.local_scenarios.keys() + for b in ["ub", "lb"] + ] return scen_names def initialize_df_columns(self, track_var): - """ Create dataframes for saving the data by defining the columns - """ + """Create dataframes for saving the data by defining the columns""" if (track_var not in self._reduce_types) and self._rank != 0: return - if track_var == 'gaps': - df_columns = ['hub abs. gap', 'hub rel. gap'] + if track_var == "gaps": + df_columns = ["hub abs. gap", "hub rel. gap"] if isinstance(self.spcomm, Spoke): - df_columns += ['spoke abs. gap', 'spoke rel. gap'] - elif track_var == 'bounds': - df_columns = ['hub upper bound', 'hub lower bound'] + df_columns += ["spoke abs. gap", "spoke rel. gap"] + elif track_var == "bounds": + df_columns = ["hub upper bound", "hub lower bound"] if isinstance(self.spcomm, Spoke): - df_columns += ['spoke bound'] - elif track_var == 'nonants': + df_columns += ["spoke bound"] + elif track_var == "nonants": df_columns = self.get_var_names() - elif track_var == 'duals': + elif track_var == "duals": df_columns = self.get_var_names() - elif track_var == 'xbars': + elif track_var == "xbars": df_columns = self.get_var_names(xbar=True) - elif track_var == 'scen_gaps': + elif track_var == "scen_gaps": df_columns = self.get_scen_colnames() - elif track_var == 'reduced_costs': + elif track_var == "reduced_costs": df_columns = self.get_var_names(xbar=True) else: raise RuntimeError("track_var not recognized") if self._rank == 0 and track_var not in self._reduce_types: - df_columns.insert(0, 'iteration') + df_columns.insert(0, "iteration") self.track_dict[track_var].initialize_df(df_columns) else: - comm = self.opt.comms['ROOT'] + comm = self.opt.comms["ROOT"] df_columns = comm.gather(df_columns, root=0) if self._rank == 0: df_columns = df_columns[0] - df_columns.insert(0, 'iteration') + df_columns.insert(0, "iteration") self.track_dict[track_var].initialize_df(df_columns) def _add_data_and_write(self, track_var, data, gather=True, final=False): - """ Gather the data from all ranks and write it out + """Gather the data from all ranks and write it out Args: track_var (str): the variable to track data (dict): the data to write out @@ -266,7 +284,7 @@ def _add_data_and_write(self, track_var, data, gather=True, final=False): return if gather: - comm = self.opt.comms['ROOT'] + comm = self.opt.comms["ROOT"] data = comm.gather(data, root=0) # LRL: Bugfix to only get gathered data on rank 0. @@ -274,7 +292,7 @@ def _add_data_and_write(self, track_var, data, gather=True, final=False): data = data[0] if isinstance(data, dict): - data['iteration'] = self.curr_iter + data["iteration"] = self.curr_iter elif isinstance(data, list): data.insert(0, self.curr_iter) @@ -293,7 +311,7 @@ def _get_bounds(self): hub_inner_bound = self.spcomm.BestInnerBound hub_outer_bound = self.spcomm.BestOuterBound return hub_outer_bound, hub_inner_bound, spoke_bound - + def _get_rc(self): if not isinstance(self.spcomm, ReducedCostsSpoke): return None @@ -301,7 +319,7 @@ def _get_rc(self): return reduced_costs def _ob_ib_process(self, ob, ib): - """ process the outer and inner bounds + """process the outer and inner bounds Args: ob (float): outer bound ib (float): inner bound @@ -318,8 +336,7 @@ def _ob_ib_process(self, ob, ib): return ub, lb def add_bounds(self, final=False): - """ add iteration bounds to row - """ + """add iteration bounds to row""" if self._rank != 0: return hub_outer_bound, hub_inner_bound, spoke_bound = self._get_bounds() @@ -329,11 +346,10 @@ def add_bounds(self, final=False): row = [upper_bound, lower_bound, spoke_bound] else: row = [upper_bound, lower_bound] - self._add_data_and_write('bounds', row, gather=False, final=final) + self._add_data_and_write("bounds", row, gather=False, final=final) def _compute_rel_gap(self, abs_gap, outer_bound): - """ compute the relative gap using outer bound as the denominator - """ + """compute the relative gap using outer bound as the denominator""" ## define by the best solution, as is common nano = float("nan") # typing aid if ( @@ -350,8 +366,8 @@ def _compute_rel_gap(self, abs_gap, outer_bound): return rel_gap def add_gaps(self, final=False): - """ add iteration gaps to row; spoke gap is computed relative to if it is - an outer bound or inner bound spoke + """add iteration gaps to row; spoke gap is computed relative to if it is + an outer bound or inner bound spoke """ if self._rank != 0: return @@ -366,50 +382,53 @@ def add_gaps(self, final=False): # the spoke is worse than the hub if ConvergerSpokeType.OUTER_BOUND in self.spcomm.converger_spoke_types: - spoke_abs_gap = spoke_bound - hub_outer_bound \ - if self.opt.is_minimizing else hub_outer_bound - spoke_bound + spoke_abs_gap = ( + spoke_bound - hub_outer_bound + if self.opt.is_minimizing + else hub_outer_bound - spoke_bound + ) spoke_rel_gap = self._compute_rel_gap(spoke_abs_gap, hub_outer_bound) elif ConvergerSpokeType.INNER_BOUND in self.spcomm.converger_spoke_types: - spoke_abs_gap = hub_inner_bound - spoke_bound \ - if self.opt.is_minimizing else spoke_bound - hub_inner_bound + spoke_abs_gap = ( + hub_inner_bound - spoke_bound + if self.opt.is_minimizing + else spoke_bound - hub_inner_bound + ) spoke_rel_gap = self._compute_rel_gap(spoke_abs_gap, hub_inner_bound) else: raise RuntimeError("Converger spoke type not recognized") row = [hub_abs_gap, hub_rel_gap, spoke_abs_gap, spoke_rel_gap] else: row = [hub_abs_gap, hub_rel_gap] - self._add_data_and_write('gaps', row, gather=False, final=final) + self._add_data_and_write("gaps", row, gather=False, final=final) def add_scen_gaps(self, final=False): - """ add iteration scenario gaps to row - """ + """add iteration scenario gaps to row""" s_gaps = {} - for (sname, scenario) in self.opt.local_scenarios.items(): + for sname, scenario in self.opt.local_scenarios.items(): ob, ib = None, None - if hasattr(scenario._mpisppy_data, 'outer_bound'): + if hasattr(scenario._mpisppy_data, "outer_bound"): ob = scenario._mpisppy_data.outer_bound - if hasattr(scenario._mpisppy_data, 'inner_bound'): + if hasattr(scenario._mpisppy_data, "inner_bound"): ib = scenario._mpisppy_data.inner_bound ub, lb = self._ob_ib_process(ob, ib) - s_gaps[(sname, 'ub')] = ub - s_gaps[(sname, 'lb')] = lb + s_gaps[(sname, "ub")] = ub + s_gaps[(sname, "lb")] = lb - self._add_data_and_write('scen_gaps', s_gaps, gather=True, final=final) + self._add_data_and_write("scen_gaps", s_gaps, gather=True, final=final) def add_nonants(self, final=False): - """ add iteration nonants to row - """ + """add iteration nonants to row""" nonants = {} for k, s in self.opt.local_scenarios.items(): for node in s._mpisppy_node_list: for var in node.nonant_vardata_list: nonants[(k, var.name)] = var.value - self._add_data_and_write('nonants', nonants, gather=True, final=final) + self._add_data_and_write("nonants", nonants, gather=True, final=final) def add_rc(self, final=False): - """ add iteration reduced costs to rpw - """ + """add iteration reduced costs to rpw""" if self._rank != 0: return @@ -420,73 +439,84 @@ def add_rc(self, final=False): rc = {} sname = list(self.opt.local_scenarios.keys())[0] s = self.opt.local_scenarios[sname] - rc = {xvar.name: reduced_costs[ci] - for ci, (ndn_i, xvar) in enumerate(s._mpisppy_data.nonant_indices.items())} + rc = { + xvar.name: reduced_costs[ci] + for ci, (ndn_i, xvar) in enumerate(s._mpisppy_data.nonant_indices.items()) + } - self._add_data_and_write('reduced_costs', rc, gather=False, final=final) + self._add_data_and_write("reduced_costs", rc, gather=False, final=final) def add_duals(self, final=False): - """ add iteration duals to rpw - """ - local_duals_data = {(sname, var.name): - scenario._mpisppy_model.W[node.name, ix]._value - for (sname, scenario) in self.opt.local_scenarios.items() - for node in scenario._mpisppy_node_list - for (ix, var) in enumerate(node.nonant_vardata_list)} + """add iteration duals to rpw""" + local_duals_data = { + (sname, var.name): scenario._mpisppy_model.W[node.name, ix]._value + for (sname, scenario) in self.opt.local_scenarios.items() + for node in scenario._mpisppy_node_list + for (ix, var) in enumerate(node.nonant_vardata_list) + } - self._add_data_and_write('duals', local_duals_data, gather=True, final=final) + self._add_data_and_write("duals", local_duals_data, gather=True, final=final) def add_xbars(self, final=False): - """ add iteration xbars to xbars_df - """ + """add iteration xbars to xbars_df""" if self._rank != 0: return sname = list(self.opt.local_scenarios.keys())[0] scenario = self.opt.local_scenarios[sname] - xbars = {var.name: scenario._mpisppy_model.xbars[node.name, ix]._value - for node in scenario._mpisppy_node_list - for (ix, var) in enumerate(node.nonant_vardata_list)} + xbars = { + var.name: scenario._mpisppy_model.xbars[node.name, ix]._value + for node in scenario._mpisppy_node_list + for (ix, var) in enumerate(node.nonant_vardata_list) + } - self._add_data_and_write('xbars', xbars, gather=False, final=final) + self._add_data_and_write("xbars", xbars, gather=False, final=final) def plot_gaps(self, var): - ''' plot the gaps; Assumes gaps are saved in a csv file - ''' - + """plot the gaps; Assumes gaps are saved in a csv file""" - df = pd.read_csv(self.track_dict[var].fname, sep=',') + df = pd.read_csv(self.track_dict[var].fname, sep=",") df = df.replace([np.inf, -np.inf], np.nan) - plt.figure(figsize=(10, 6)) - plt.plot(df['iteration'], df['hub abs. gap'], marker='o', label='Hub Abs. Gap') - plt.plot(df['iteration'], df['hub rel. gap'], marker='o', label='Hub Rel. Gap') + plt.plot(df["iteration"], df["hub abs. gap"], marker="o", label="Hub Abs. Gap") + plt.plot(df["iteration"], df["hub rel. gap"], marker="o", label="Hub Rel. Gap") if isinstance(self.spcomm, Spoke): - plt.plot(df['iteration'], df['spoke abs. gap'], marker='o', label='Spoke Abs. Gap') - plt.plot(df['iteration'], df['spoke rel. gap'], marker='o', label='Spoke Rel. Gap') - - plt.xlabel('Iteration') - plt.ylabel('Value (log scale)') - plt.title('Absolute and Relative Gaps Over Iterations') + plt.plot( + df["iteration"], + df["spoke abs. gap"], + marker="o", + label="Spoke Abs. Gap", + ) + plt.plot( + df["iteration"], + df["spoke rel. gap"], + marker="o", + label="Spoke Rel. Gap", + ) + + plt.xlabel("Iteration") + plt.ylabel("Value (log scale)") + plt.title("Absolute and Relative Gaps Over Iterations") plt.legend() - plt.grid(True, which='major', linestyle='-', linewidth='0.5') - plt.grid(True, which='minor', linestyle='--', linewidth='0.5') + plt.grid(True, which="major", linestyle="-", linewidth="0.5") + plt.grid(True, which="minor", linestyle="--", linewidth="0.5") plt.savefig(self.track_dict[var].plot_fname) plt.close() def plot_nonants_sgaps_duals(self, var): - ''' plot the nonants/scene gaps/dual values; Assumes var is saved in a csv file - ''' + """plot the nonants/scene gaps/dual values; Assumes var is saved in a csv file""" if self._rank != 0: return - if var not in ['nonants', 'duals', 'scen_gaps']: + if var not in ["nonants", "duals", "scen_gaps"]: raise RuntimeError("var must be either nonants or duals") - df = pd.read_csv(self.track_dict[var].fname, sep=',') + df = pd.read_csv(self.track_dict[var].fname, sep=",") df = df.replace([np.inf, -np.inf], np.nan) - df.columns = [col if col == 'iteration' else ast.literal_eval(col) for col in df.columns] + df.columns = [ + col if col == "iteration" else ast.literal_eval(col) for col in df.columns + ] plt.figure(figsize=(16, 6)) # Adjust the figure size as needed column_names = df.columns[1:] @@ -494,48 +524,48 @@ def plot_nonants_sgaps_duals(self, var): for col in column_names: scenario, variable = col - label = f'{scenario} {variable}' - plt.plot(df['iteration'], df[col], label=label) + label = f"{scenario} {variable}" + plt.plot(df["iteration"], df[col], label=label) - plt.legend(loc='lower right') - plt.xlabel('Iteration') - plt.ylabel(f'{var.capitalize()} Value') + plt.legend(loc="lower right") + plt.xlabel("Iteration") + plt.ylabel(f"{var.capitalize()} Value") plt.grid(True) plt.savefig(self.track_dict[var].plot_fname) plt.close() def plot_xbars_bounds_gaps(self, var): - ''' plot the xbar/bounds/gaps values; - Assumes xbars/bounds/gaps are saved in a csv file - ''' + """plot the xbar/bounds/gaps values; + Assumes xbars/bounds/gaps are saved in a csv file + """ if self._rank != 0: return - if var not in ['xbars', 'bounds', 'gaps']: - raise RuntimeError('var must be either xbars, bounds, or gaps') + if var not in ["xbars", "bounds", "gaps"]: + raise RuntimeError("var must be either xbars, bounds, or gaps") - df = pd.read_csv(self.track_dict[var].fname, sep=',') + df = pd.read_csv(self.track_dict[var].fname, sep=",") df = df.replace([np.inf, -np.inf], np.nan) plt.figure(figsize=(10, 6)) # Adjust the figure size as needed column_names = df.columns[1:] for col in column_names: - plt.plot(df['iteration'], df[col], label=col.capitalize()) + plt.plot(df["iteration"], df[col], label=col.capitalize()) - if var == 'gaps': + if var == "gaps": df = df.dropna() - nonzero_gaps = df[df['hub rel. gap'] != 0]['hub rel. gap'] + nonzero_gaps = df[df["hub rel. gap"] != 0]["hub rel. gap"] if len(nonzero_gaps) > 0: threshold = np.percentile(nonzero_gaps, 10) - plt.yscale('symlog', linthresh=threshold) + plt.yscale("symlog", linthresh=threshold) else: if self.verbose: print("WARNING: No nonzero gaps to compute threshold") - plt.xlabel('Iteration') - plt.ylabel(f'{var.capitalize()} values') - plt.title(f'{var.capitalize()} Over Iterations') + plt.xlabel("Iteration") + plt.ylabel(f"{var.capitalize()} values") + plt.title(f"{var.capitalize()} Over Iterations") plt.legend() plt.grid(True) plt.savefig(self.track_dict[var].plot_fname) @@ -545,23 +575,23 @@ def plot_rc(self, var): if self._rank != 0: return - if var not in ['reduced_costs']: - raise RuntimeError('var must be reduced_costs') + if var not in ["reduced_costs"]: + raise RuntimeError("var must be reduced_costs") - df = pd.read_csv(self.track_dict[var].fname, sep=',') + df = pd.read_csv(self.track_dict[var].fname, sep=",") df = df.replace([np.inf, -np.inf], np.nan) - #df.dropna(inplace=True) + # df.dropna(inplace=True) plt.figure(figsize=(10, 6)) # Adjust the figure size as needed column_names = df.columns[1:] for col in column_names: if not np.isnan(df[col]).all(): - plt.plot(df['iteration'], df[col], label=col.capitalize()) + plt.plot(df["iteration"], df[col], label=col.capitalize()) - plt.xlabel('Iteration') - plt.ylabel(f'{var.capitalize()} values') - plt.title(f'{var.capitalize()} Over Iterations') + plt.xlabel("Iteration") + plt.ylabel(f"{var.capitalize()} values") + plt.title(f"{var.capitalize()} Over Iterations") plt.legend(bbox_to_anchor=(1, 1)) plt.grid(True) plt.savefig(self.track_dict[var].plot_fname) @@ -572,9 +602,9 @@ def pre_solve_loop(self): self.finish_init() if self.curr_iter % self.save_every == 0: for track_var in self.track_dict.keys(): - self._track_var_to_func[track_var]['track']() + self._track_var_to_func[track_var]["track"]() def post_everything(self): for track_var in self.track_dict.keys(): - self._track_var_to_func[track_var]['track'](final=True) - self._track_var_to_func[track_var]['finalize'](var=track_var) + self._track_var_to_func[track_var]["track"](final=True) + self._track_var_to_func[track_var]["finalize"](var=track_var) diff --git a/mpisppy/extensions/reduced_costs_fixer.py b/mpisppy/extensions/reduced_costs_fixer.py index 6b36f053a..b45bc631c 100644 --- a/mpisppy/extensions/reduced_costs_fixer.py +++ b/mpisppy/extensions/reduced_costs_fixer.py @@ -10,43 +10,44 @@ from mpisppy.extensions.extension import Extension -from mpisppy.cylinders.reduced_costs_spoke import ReducedCostsSpoke +from mpisppy.cylinders.reduced_costs_spoke import ReducedCostsSpoke from mpisppy.utils.sputils import is_persistent -class ReducedCostsFixer(Extension): +class ReducedCostsFixer(Extension): def __init__(self, spobj): super().__init__(spobj) ph_options = spobj.options - rc_options = ph_options['rc_options'] - self.verbose = ph_options['verbose'] or rc_options['verbose'] - self.debug = rc_options['debug'] + rc_options = ph_options["rc_options"] + self.verbose = ph_options["verbose"] or rc_options["verbose"] + self.debug = rc_options["debug"] - self._use_rc_bt = rc_options['use_rc_bt'] + self._use_rc_bt = rc_options["use_rc_bt"] # reduced costs less than this in absolute value # will be considered 0 - self.zero_rc_tol = rc_options['zero_rc_tol'] - self._use_rc_fixer = rc_options['use_rc_fixer'] + self.zero_rc_tol = rc_options["zero_rc_tol"] + self._use_rc_fixer = rc_options["use_rc_fixer"] # Percentage of variables which are at the bound we will target # to fix. We never fix varibles with reduced costs less than # the `zero_rc_tol` in absolute value - self._fix_fraction_target_iter0 = rc_options['fix_fraction_target_iter0'] + self._fix_fraction_target_iter0 = rc_options["fix_fraction_target_iter0"] if self._fix_fraction_target_iter0 < 0 or self._fix_fraction_target_iter0 > 1: raise ValueError("fix_fraction_target_iter0 must be between 0 and 1") - self._fix_fraction_target_iterK = rc_options['fix_fraction_target_iterK'] + self._fix_fraction_target_iterK = rc_options["fix_fraction_target_iterK"] if self._fix_fraction_target_iterK < 0 or self._fix_fraction_target_iterK > 1: raise ValueError("fix_fraction_target_iterK must be between 0 and 1") self.fix_fraction_target = self._fix_fraction_target_iter0 # TODO: This should be same as in rc spoke? - self.bound_tol = rc_options['rc_bound_tol'] + self.bound_tol = rc_options["rc_bound_tol"] - if not (self._use_rc_bt or self._use_rc_fixer) and \ - self.opt.cylinder_rank == 0: - print("Warning: ReducedCostsFixer will be idle. Enable use_rc_bt or use_rc_fixer in options.") + if not (self._use_rc_bt or self._use_rc_fixer) and self.opt.cylinder_rank == 0: + print( + "Warning: ReducedCostsFixer will be idle. Enable use_rc_bt or use_rc_fixer in options." + ) - #self._options = rc_options + # self._options = rc_options self._last_serial_number = -1 self._heuristic_fixed_vars = 0 @@ -55,7 +56,7 @@ def pre_iter0(self): self._modeler_fixed_nonants = set() self._integer_nonants = [] self.nonant_length = self.opt.nonant_length - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): for ndn_i, xvar in s._mpisppy_data.nonant_indices.items(): if xvar.fixed: self._modeler_fixed_nonants.add(ndn_i) @@ -63,12 +64,12 @@ def pre_iter0(self): if xvar.is_integer(): self._integer_nonants.append(ndn_i) break - + def post_iter0_after_sync(self): self.fix_fraction_target = self._fix_fraction_target_iterK def initialize_spoke_indices(self): - for (i, spoke) in enumerate(self.opt.spcomm.spokes): + for i, spoke in enumerate(self.opt.spcomm.spokes): if spoke["spoke_class"] == ReducedCostsSpoke: self.reduced_costs_spoke_index = i + 1 @@ -78,7 +79,9 @@ def sync_with_spokes(self): serial_number = int(round(spcomm.outerbound_receive_buffers[idx][-1])) if serial_number > self._last_serial_number: self._last_serial_number = serial_number - reduced_costs = spcomm.outerbound_receive_buffers[idx][1:1+self.nonant_length] + reduced_costs = spcomm.outerbound_receive_buffers[idx][ + 1 : 1 + self.nonant_length + ] this_outer_bound = spcomm.outerbound_receive_buffers[idx][0] if self._use_rc_bt: self.reduced_costs_bounds_tightening(reduced_costs, this_outer_bound) @@ -88,16 +91,16 @@ def sync_with_spokes(self): if self.opt.cylinder_rank == 0 and self.verbose: print("No new reduced costs!") - def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): - bounds_reduced_this_iter = 0 inner_bound = self.opt.spcomm.BestInnerBound outer_bound = this_outer_bound is_minimizing = self.opt.is_minimizing if np.isinf(inner_bound) or np.isinf(outer_bound): if self.opt.cylinder_rank == 0 and self.verbose: - print("Bounds tightened by reduced cost: 0 (inner or outer bound not available)") + print( + "Bounds tightened by reduced cost: 0 (inner or outer bound not available)" + ) return for sub in self.opt.local_subproblems.values(): @@ -105,7 +108,9 @@ def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): for sn in sub.scen_list: tightened_this_scenario = 0 s = self.opt.local_scenarios[sn] - for ci, (ndn_i, xvar) in enumerate(s._mpisppy_data.nonant_indices.items()): + for ci, (ndn_i, xvar) in enumerate( + s._mpisppy_data.nonant_indices.items() + ): if ndn_i in self._modeler_fixed_nonants: continue this_expected_rc = reduced_costs[ci] @@ -121,7 +126,9 @@ def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): if is_minimizing: # var at lb if this_expected_rc > 0 + self.zero_rc_tol: - new_ub = xvar.lb + (inner_bound - outer_bound)/ this_expected_rc + new_ub = ( + xvar.lb + (inner_bound - outer_bound) / this_expected_rc + ) old_ub = xvar.ub if new_ub < old_ub: if ndn_i in self._integer_nonants: @@ -130,13 +137,17 @@ def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): else: xvar.setub(new_ub) if self.debug and self.opt.cylinder_rank == 0: - print(f"tightening ub of var {xvar.name} to {new_ub} from {old_ub}; reduced cost is {this_expected_rc}") + print( + f"tightening ub of var {xvar.name} to {new_ub} from {old_ub}; reduced cost is {this_expected_rc}" + ) update_var = True bounds_reduced_this_iter += 1 tightened_this_scenario += 1 # var at ub elif this_expected_rc < 0 - self.zero_rc_tol: - new_lb = xvar.ub + (inner_bound - outer_bound)/ this_expected_rc + new_lb = ( + xvar.ub + (inner_bound - outer_bound) / this_expected_rc + ) old_lb = xvar.lb if new_lb > old_lb: if ndn_i in self._integer_nonants: @@ -145,7 +156,9 @@ def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): else: xvar.setlb(new_lb) if self.debug and self.opt.cylinder_rank == 0: - print(f"tightening lb of var {xvar.name} to {new_lb} from {old_lb}; reduced cost is {this_expected_rc}") + print( + f"tightening lb of var {xvar.name} to {new_lb} from {old_lb}; reduced cost is {this_expected_rc}" + ) update_var = True bounds_reduced_this_iter += 1 tightened_this_scenario += 1 @@ -153,7 +166,9 @@ def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): else: # var at lb if this_expected_rc < 0 - self.zero_rc_tol: - new_ub = xvar.lb - (outer_bound - inner_bound)/ this_expected_rc + new_ub = ( + xvar.lb - (outer_bound - inner_bound) / this_expected_rc + ) old_ub = xvar.ub if new_ub < old_ub: if ndn_i in self._integer_nonants: @@ -162,12 +177,16 @@ def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): else: xvar.setub(new_ub) if self.debug and self.opt.cylinder_rank == 0: - print(f"tightening ub of var {xvar.name} to {new_ub} from {old_ub}; reduced cost is {this_expected_rc}") + print( + f"tightening ub of var {xvar.name} to {new_ub} from {old_ub}; reduced cost is {this_expected_rc}" + ) update_var = True bounds_reduced_this_iter += 1 # var at ub elif this_expected_rc > 0 + self.zero_rc_tol: - new_lb = xvar.ub - (outer_bound - inner_bound)/ this_expected_rc + new_lb = ( + xvar.ub - (outer_bound - inner_bound) / this_expected_rc + ) old_lb = xvar.lb if new_lb > old_lb: if ndn_i in self._integer_nonants: @@ -176,27 +195,31 @@ def reduced_costs_bounds_tightening(self, reduced_costs, this_outer_bound): else: xvar.setlb(new_lb) if self.debug and self.opt.cylinder_rank == 0: - print(f"tightening lb of var {xvar.name} to {new_lb} from {old_lb}; reduced cost is {this_expected_rc}") + print( + f"tightening lb of var {xvar.name} to {new_lb} from {old_lb}; reduced cost is {this_expected_rc}" + ) update_var = True bounds_reduced_this_iter += 1 if update_var and persistent_solver: sub._solver_plugin.update_var(xvar) - total_bounds_tightened = bounds_reduced_this_iter / len(self.opt.local_scenarios) + total_bounds_tightened = bounds_reduced_this_iter / len( + self.opt.local_scenarios + ) if self.opt.cylinder_rank == 0 and self.verbose: - print(f"Bounds tightened by reduced cost: {int(round(total_bounds_tightened))}/{self.nonant_length}") - + print( + f"Bounds tightened by reduced cost: {int(round(total_bounds_tightened))}/{self.nonant_length}" + ) def reduced_costs_fixing(self, reduced_costs): - if np.all(np.isnan(reduced_costs)): # Note: If all rc = nan at some later iteration, # this will skip unfixing if self.opt.cylinder_rank == 0 and self.verbose: print("All reduced costs are nan, heuristic fixing will not be applied") return - + # compute the quantile target abs_reduced_costs = np.abs(reduced_costs) @@ -208,7 +231,9 @@ def reduced_costs_fixing(self, reduced_costs): # still need to continue, for unfixing target = self.zero_rc_tol else: - target = np.nanquantile(nonzero_rc, 1 - fix_fraction_target, method="median_unbiased") + target = np.nanquantile( + nonzero_rc, 1 - fix_fraction_target, method="median_unbiased" + ) if target < self.zero_rc_tol: # shouldn't be reached @@ -218,12 +243,14 @@ def reduced_costs_fixing(self, reduced_costs): print(f"Heuristic fixing reduced cost cutoff: {target}") raw_fixed_this_iter = 0 - + for sub in self.opt.local_subproblems.values(): persistent_solver = is_persistent(sub._solver_plugin) for sn in sub.scen_list: s = self.opt.local_scenarios[sn] - for ci, (ndn_i, xvar) in enumerate(s._mpisppy_data.nonant_indices.items()): + for ci, (ndn_i, xvar) in enumerate( + s._mpisppy_data.nonant_indices.items() + ): if ndn_i in self._modeler_fixed_nonants: continue this_expected_rc = abs_reduced_costs[ci] @@ -235,58 +262,82 @@ def reduced_costs_fixing(self, reduced_costs): update_var = True raw_fixed_this_iter -= 1 if self.debug and self.opt.cylinder_rank == 0: - print(f"unfixing var {xvar.name}; not converged in LP-LR") - else: # not nan, variable is converged in LP-LR + print( + f"unfixing var {xvar.name}; not converged in LP-LR" + ) + else: # not nan, variable is converged in LP-LR if xvar.fixed: - if (this_expected_rc <= target): + if this_expected_rc <= target: xvar.unfix() update_var = True raw_fixed_this_iter -= 1 if self.debug and self.opt.cylinder_rank == 0: - print(f"unfixing var {xvar.name}; reduced cost is zero/below target in LP-LR") + print( + f"unfixing var {xvar.name}; reduced cost is zero/below target in LP-LR" + ) else: xb = s._mpisppy_model.xbars[ndn_i].value - if (this_expected_rc >= target): + if this_expected_rc >= target: if self.opt.is_minimizing: # TODO: First check can be simplified as abs(rc) is already checked above - if (reduced_costs[ci] > 0 + self.zero_rc_tol) and (xb - xvar.lb <= self.bound_tol): + if (reduced_costs[ci] > 0 + self.zero_rc_tol) and ( + xb - xvar.lb <= self.bound_tol + ): xvar.fix(xvar.lb) if self.debug and self.opt.cylinder_rank == 0: - print(f"fixing var {xvar.name} to lb {xvar.lb}; reduced cost is {reduced_costs[ci]} LP-LR") + print( + f"fixing var {xvar.name} to lb {xvar.lb}; reduced cost is {reduced_costs[ci]} LP-LR" + ) update_var = True raw_fixed_this_iter += 1 - elif (reduced_costs[ci] < 0 - self.zero_rc_tol) and (xvar.ub - xb <= self.bound_tol): + elif ( + reduced_costs[ci] < 0 - self.zero_rc_tol + ) and (xvar.ub - xb <= self.bound_tol): xvar.fix(xvar.ub) if self.debug and self.opt.cylinder_rank == 0: - print(f"fixing var {xvar.name} to ub {xvar.ub}; reduced cost is {reduced_costs[ci]} LP-LR") + print( + f"fixing var {xvar.name} to ub {xvar.ub}; reduced cost is {reduced_costs[ci]} LP-LR" + ) update_var = True raw_fixed_this_iter += 1 else: - # rc is near 0 or + # rc is near 0 or # xbar from MIP might differ from rc from relaxation pass else: - if (reduced_costs[ci] < 0 - self.zero_rc_tol) and (xb - xvar.lb <= self.bound_tol): + if (reduced_costs[ci] < 0 - self.zero_rc_tol) and ( + xb - xvar.lb <= self.bound_tol + ): xvar.fix(xvar.lb) if self.debug and self.opt.cylinder_rank == 0: - print(f"fixing var {xvar.name} to lb {xvar.lb}; reduced cost is {reduced_costs[ci]} LP-LR") + print( + f"fixing var {xvar.name} to lb {xvar.lb}; reduced cost is {reduced_costs[ci]} LP-LR" + ) update_var = True raw_fixed_this_iter += 1 - elif (reduced_costs[ci] > 0 + self.zero_rc_tol) and (xvar.ub - xb <= self.bound_tol): + elif ( + reduced_costs[ci] > 0 + self.zero_rc_tol + ) and (xvar.ub - xb <= self.bound_tol): xvar.fix(xvar.ub) if self.debug and self.opt.cylinder_rank == 0: - print(f"fixing var {xvar.name} to ub {xvar.ub}; reduced cost is {reduced_costs[ci]} LP-LR") + print( + f"fixing var {xvar.name} to ub {xvar.ub}; reduced cost is {reduced_costs[ci]} LP-LR" + ) update_var = True raw_fixed_this_iter += 1 else: - # rc is near 0 or + # rc is near 0 or # xbar from MIP might differ from rc from relaxation pass - + if update_var and persistent_solver: sub._solver_plugin.update_var(xvar) # Note: might count incorrectly with bundling? - self._heuristic_fixed_vars += raw_fixed_this_iter / len(self.opt.local_scenarios) + self._heuristic_fixed_vars += raw_fixed_this_iter / len( + self.opt.local_scenarios + ) if self.opt.cylinder_rank == 0 and self.verbose: - print(f"Total unique vars fixed by heuristic: {int(round(self._heuristic_fixed_vars))}/{self.nonant_length}") + print( + f"Total unique vars fixed by heuristic: {int(round(self._heuristic_fixed_vars))}/{self.nonant_length}" + ) diff --git a/mpisppy/extensions/test_extension.py b/mpisppy/extensions/test_extension.py index d25c58f3d..db5d44dd2 100644 --- a/mpisppy/extensions/test_extension.py +++ b/mpisppy/extensions/test_extension.py @@ -12,56 +12,47 @@ import mpisppy.extensions.xhatbase + class TestExtension(mpisppy.extensions.extension.Extension): """ Args: spo (SPOpt object): the calling object """ + def __init__(self, spo): super().__init__(spo) self.who_is_called = set() # make it a little easier to find out who has been called - self.opt._TestExtension_who_is_called = self.who_is_called + self.opt._TestExtension_who_is_called = self.who_is_called - def pre_solve(self, subproblem): self.who_is_called.add("pre_solve") - def post_solve_loop(self): self.who_is_called.add("post_solve_loop") - def post_solve(self, subproblem, results): self.who_is_called.add("post_solve") return results - def pre_iter0(self): self.who_is_called.add("pre_iter0") - def post_iter0(self): self.who_is_called.add("post_iter0") - def post_iter0_after_sync(self): self.who_is_called.add("post_iter0_after_sync") - - + def miditer(self): self.who_is_called.add("miditer") - def enditer(self): self.who_is_called.add("enditer") print(f"end_iter {self.who_is_called =}") - - def enditer_after_sync(self): + def enditer_after_sync(self): self.who_is_called.add("enditer_after_sync") - def post_everything(self): self.who_is_called.add("post_everything") - diff --git a/mpisppy/extensions/wtracker_extension.py b/mpisppy/extensions/wtracker_extension.py index 429521316..7ead6e45c 100644 --- a/mpisppy/extensions/wtracker_extension.py +++ b/mpisppy/extensions/wtracker_extension.py @@ -14,16 +14,17 @@ class Wtracker_extension(mpisppy.extensions.extension.Extension): """ - wrap the wtracker code as an extension - - Args: - opt (PHBase (inherets from SPOpt) object): gives the problem that we bound - - Attributes: - scenario_name_to_rank (dict of dict): nodes (i.e. comms) scen names - keys are comms (i.e., tree nodes); values are dicts with keys - that are scenario names and values that are ranks + wrap the wtracker code as an extension + + Args: + opt (PHBase (inherets from SPOpt) object): gives the problem that we bound + + Attributes: + scenario_name_to_rank (dict of dict): nodes (i.e. comms) scen names + keys are comms (i.e., tree nodes); values are dicts with keys + that are scenario names and values that are ranks """ + def __init__(self, opt, comm=None): super().__init__(opt) self.cylinder_rank = self.opt.cylinder_rank @@ -38,7 +39,7 @@ def pre_iter0(self): def post_iter0(self): pass - + def miditer(self): pass @@ -49,9 +50,9 @@ def post_everything(self): reportlen = self.options.get("reportlen") stdevthresh = self.options.get("stdevthresh") file_prefix = self.options.get("file_prefix") - self.wtracker.report_by_moving_stats(self.wlen, - reportlen=reportlen, - stdevthresh=stdevthresh, - file_prefix=file_prefix) - - + self.wtracker.report_by_moving_stats( + self.wlen, + reportlen=reportlen, + stdevthresh=stdevthresh, + file_prefix=file_prefix, + ) diff --git a/mpisppy/extensions/xhatbase.py b/mpisppy/extensions/xhatbase.py index 023a7f83f..fa0edb8d7 100644 --- a/mpisppy/extensions/xhatbase.py +++ b/mpisppy/extensions/xhatbase.py @@ -19,16 +19,17 @@ class XhatBase(mpisppy.extensions.extension.Extension): """ - Any inherited class must implement the preiter0, postiter etc. methods - - Args: - opt (SPOpt object): gives the problem that we bound + Any inherited class must implement the preiter0, postiter etc. methods + + Args: + opt (SPOpt object): gives the problem that we bound - Attributes: - scenario_name_to_rank (dict of dict): nodes (i.e. comms) scen names - keys are comms (i.e., tree nodes); values are dicts with keys - that are scenario names and values that are ranks + Attributes: + scenario_name_to_rank (dict of dict): nodes (i.e. comms) scen names + keys are comms (i.e., tree nodes); values are dicts with keys + that are scenario names and values that are ranks """ + def __init__(self, opt): super().__init__(opt) self.cylinder_rank = self.opt.cylinder_rank @@ -37,42 +38,49 @@ def __init__(self, opt): self.scenario_name_to_rank = opt.scenario_names_to_rank # dict: scenario names --> LOCAL rank number (needed mainly for xhat) - - #********** - def _try_one(self, snamedict, solver_options=None, verbose=False, - restore_nonants=True, stage2EFsolvern=None, branching_factors=None): - """ try the scenario named sname in self.opt.local_scenarios - Args: - snamedict (dict): key: scenario tree non-leaf name, val: scen name - the scen name can be None if it is not local - solver_options (dict): passed through to the solver - verbose (boolean): controls output - restore_nonants (bool): if True, restores the nonants to their original - values in all scenarios. If False, leaves the - nonants as they are in the tried scenario - stage2EFsolvern: use this for EFs based on second stage nodes for multi-stage - branching_factors (list): list of branching factors for stage2ef - NOTE: The solve loop is with fixed nonants so W and rho do not - matter to the optimization. When we want to check the obj - value we need to drop them, but we don't need to re-optimize - since all other Vars are optimized already with the nonants - fixed. - Returns: - obj (float or None): the expected value for sname as xhat or None - NOTE: - the stage2ef stuff is a little bit hacked-in + + # ********** + def _try_one( + self, + snamedict, + solver_options=None, + verbose=False, + restore_nonants=True, + stage2EFsolvern=None, + branching_factors=None, + ): + """try the scenario named sname in self.opt.local_scenarios + Args: + snamedict (dict): key: scenario tree non-leaf name, val: scen name + the scen name can be None if it is not local + solver_options (dict): passed through to the solver + verbose (boolean): controls output + restore_nonants (bool): if True, restores the nonants to their original + values in all scenarios. If False, leaves the + nonants as they are in the tried scenario + stage2EFsolvern: use this for EFs based on second stage nodes for multi-stage + branching_factors (list): list of branching factors for stage2ef + NOTE: The solve loop is with fixed nonants so W and rho do not + matter to the optimization. When we want to check the obj + value we need to drop them, but we don't need to re-optimize + since all other Vars are optimized already with the nonants + fixed. + Returns: + obj (float or None): the expected value for sname as xhat or None + NOTE: + the stage2ef stuff is a little bit hacked-in """ xhats = dict() # to pass to _fix_nonants self.opt._save_nonants() # (BTW: for all local scenarios) # Special Tee option for xhat sopt = solver_options - Tee=False + Tee = False if solver_options is not None and "Tee" in solver_options: sopt = dict(solver_options) Tee = sopt["Tee"] del sopt["Tee"] - + # For now, we are going to treat two-stage as a special case if len(snamedict) == 1: sname = snamedict["ROOT"] # also serves as an assert @@ -84,8 +92,11 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, try: xhats["ROOT"] = self.comms["ROOT"].bcast(xhat, root=src_rank) except: - print("rank=",self.cylinder_rank, "xhats bcast failed on src_rank={}"\ - .format(src_rank)) + print( + "rank=", + self.cylinder_rank, + "xhats bcast failed on src_rank={}".format(src_rank), + ) print("root comm size={}".format(self.comms["ROOT"].size)) raise elif stage2EFsolvern is None: # regular multi-stage @@ -109,21 +120,32 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, raise RuntimeError(f"{ndn} not in snamedict={snamedict}") if snamedict[ndn] == k: # cache lists are just concated node lists - xhats[ndn] = [s._mpisppy_data.nonant_cache[i+cistart[ndn]] - for i in range(nlens[ndn])] + xhats[ndn] = [ + s._mpisppy_data.nonant_cache[i + cistart[ndn]] + for i in range(nlens[ndn]) + ] for ndn in cistart: # local nodes if snamedict[ndn] not in self.scenario_name_to_rank[ndn]: - print (f"For ndn={ndn}, snamedict[ndn] not in " - "self.scenario_name_to_rank[ndn]") + print( + f"For ndn={ndn}, snamedict[ndn] not in " + "self.scenario_name_to_rank[ndn]" + ) print(f"snamedict[ndn]={snamedict[ndn]}") - print(f"self.scenario_name_to_rank[ndn]={self.scenario_name_to_rank[ndn]}") + print( + f"self.scenario_name_to_rank[ndn]={self.scenario_name_to_rank[ndn]}" + ) raise RuntimeError("Bad scenario selection for xhat") src_rank = self.scenario_name_to_rank[ndn][snamedict[ndn]] try: xhats[ndn] = self.comms[ndn].bcast(xhats[ndn], root=src_rank) except: - print("rank=",self.cylinder_rank, "xhats bcast failed on ndn={}, src_rank={}"\ - .format(ndn,src_rank)) + print( + "rank=", + self.cylinder_rank, + "xhats bcast failed on ndn={}, src_rank={}".format( + ndn, src_rank + ), + ) raise else: # we are multi-stage with stage2ef # Form an ef for all local scenarios and then fix the first stage @@ -132,15 +154,21 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, sname = snamedict["ROOT"] # also serves as an assert if sname in self.opt.local_scenarios: rnlen = self.opt.local_scenarios[sname]._mpisppy_data.nlens["ROOT"] - xhat = [self.opt.local_scenarios[sname]._mpisppy_data.nonant_cache[i] for i in range(rnlen)] + xhat = [ + self.opt.local_scenarios[sname]._mpisppy_data.nonant_cache[i] + for i in range(rnlen) + ] else: xhat = None src_rank = self.scenario_name_to_rank["ROOT"][sname] try: xhats["ROOT"] = self.comms["ROOT"].bcast(xhat, root=src_rank) except: - print("rank=",self.cylinder_rank, "xhats bcast failed on src_rank={}"\ - .format(src_rank)) + print( + "rank=", + self.cylinder_rank, + "xhats bcast failed on src_rank={}".format(src_rank), + ) print("root comm size={}".format(self.comms["ROOT"].size)) raise # now form the EF for the appropriate number of second-stage scenario tree nodes @@ -148,14 +176,16 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, stage2cnt = branching_factors[1] rankcnt = self.n_proc # The next assert is important. - assert stage2cnt % rankcnt== 0, "for stage2ef, ranks must be a multiple of stage2 nodes" + assert ( + stage2cnt % rankcnt == 0 + ), "for stage2ef, ranks must be a multiple of stage2 nodes" nodes_per_rank = stage2cnt // rankcnt # to keep the code easier to read, we will first collect the nodenames # Important: we are assuming a standard node naming pattern using underscores. # TBD: do something that would work with underscores *in* the stage branch name # TBD: factor this local_2ndns = dict() # dict of dicts (inner dicts are for FormEF) - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): if hasattr(self, "_EFs"): sputils.deact_objs(s) # create EF does this the first time for node in s._mpisppy_node_list: @@ -165,12 +195,19 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, local_2ndns[ndn] = {k: s} local_2ndns[ndn][k] = s if len(local_2ndns) != nodes_per_rank: - print("stage2ef failure: nodes_per_rank=",nodes_per_rank, "local_2ndns=",local_2ndns) - raise RuntimeError("stagecnt assumes regular scenario tree and standard _ node naming") + print( + "stage2ef failure: nodes_per_rank=", + nodes_per_rank, + "local_2ndns=", + local_2ndns, + ) + raise RuntimeError( + "stagecnt assumes regular scenario tree and standard _ node naming" + ) # create an EF for each second stage node and solve with fixed nonant # TBD: factor out the EF creation! if not hasattr(self, "_EFs"): - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): sputils.stash_ref_objs(s) self._EFs = dict() for ndn2, sdict in local_2ndns.items(): # ndn2 will be a the node name @@ -181,20 +218,24 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, wxbarutils.fix_ef_ROOT_nonants(self._EFs[ndn2], xhats["ROOT"]) # solve EF solver = pyo.SolverFactory(stage2EFsolvern) - if 'persistent' in stage2EFsolvern: + if "persistent" in stage2EFsolvern: solver.set_instance(self._EFs[ndn2], symbolic_solver_labels=True) results = solver.solve(tee=Tee) else: - results = solver.solve(self._EFs[ndn2], tee=Tee, symbolic_solver_labels=True,) + results = solver.solve( + self._EFs[ndn2], + tee=Tee, + symbolic_solver_labels=True, + ) # restore objectives so Ebojective will work for s in sdict.values(): sputils.reactivate_objs(s) # if you hit infeas, return None if not pyo.check_optimal_termination(results): - self.opt._restore_nonants() - return None - + self.opt._restore_nonants() + return None + # feasible xhat found, so finish up 2EF part and return if verbose and src_rank == self.cylinder_rank: print(" Feasible xhat found:") @@ -212,13 +253,15 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, self.opt._fix_nonants(xhats) # (BTW: for all local scenarios) # NOTE: for APH we may need disable_pyomo_signal_handling - self.opt.solve_loop(solver_options=sopt, - #dis_W=True, dis_prox=True, - verbose=verbose, - tee=Tee) + self.opt.solve_loop( + solver_options=sopt, + # dis_W=True, dis_prox=True, + verbose=verbose, + tee=Tee, + ) infeasP = self.opt.infeas_prob() - if infeasP != 0.: + if infeasP != 0.0: # restoring does no harm # if this solution is infeasible self.opt._restore_nonants() @@ -232,20 +275,20 @@ def _try_one(self, snamedict, solver_options=None, verbose=False, self.opt._restore_nonants() return obj - #********** + # ********** def csv_nonants(self, snamedict, fname): - """ write the non-ants in csv format to files based on the file name - (we will over-write files if they already exists) - Args: - snamedic (str): the names of the scenarios to use - fname (str): the full name of the file to which to write + """write the non-ants in csv format to files based on the file name + (we will over-write files if they already exists) + Args: + snamedic (str): the names of the scenarios to use + fname (str): the full name of the file to which to write """ # only the rank with the requested scenario writes for ndn, sname in snamedict.items(): if sname not in self.opt.local_scenarios: continue scen = self.opt.local_scenarios[sname] - with open(fname+"_"+ndn+"_"+sname+".csv", "w") as f: + with open(fname + "_" + ndn + "_" + sname + ".csv", "w") as f: for node in scen._mpisppy_node_list: if node.name == ndn: break @@ -253,16 +296,16 @@ def csv_nonants(self, snamedict, fname): f.write(ndn) for i in range(nlens[ndn]): vardata = node.nonant_vardata_list[i] - f.write(', "'+vardata.name+'", '+str(vardata._value)) + f.write(', "' + vardata.name + '", ' + str(vardata._value)) f.write("\n") - #********** + # ********** def csv_allvars(self, snamedict, fname): - """ write all Vars in csv format to files based on the file name - (we will over-write files if they already exists) - Args: - snamedict (dict): scenario names - fname (str): the full name of the file to which to write + """write all Vars in csv format to files based on the file name + (we will over-write files if they already exists) + Args: + snamedict (dict): scenario names + fname (str): the full name of the file to which to write """ # only the rank with the requested scenario writes for ndn, sname in snamedict.items(): @@ -272,9 +315,10 @@ def csv_allvars(self, snamedict, fname): for node in scen._mpisppy_node_list: if node.name == ndn: break - with open(fname+"_"+ndn+"_"+sname,"w") as f: - for ((v_name, v_index), v_data)\ - in scen.component_data_iterindex(pyo.Var, active=True): + with open(fname + "_" + ndn + "_" + sname, "w") as f: + for (v_name, v_index), v_data in scen.component_data_iterindex( + pyo.Var, active=True + ): f.write(v_name + ", " + str(pyo.value(v_data)) + "\n") """ Functions to be called by xhat extensions. This is just code @@ -284,7 +328,7 @@ def csv_allvars(self, snamedict, fname): factored simply for the usual reasons to factor code. """ def xhat_common_post_everything(self, extname, obj, snamedict, restored_nonants): - """ Code that most xhat post_everything routines will want to call. + """Code that most xhat post_everything routines will want to call. Args: extname (str): the name of the extension for reporting obj (float): the xhat objective function @@ -297,10 +341,18 @@ def xhat_common_post_everything(self, extname, obj, snamedict, restored_nonants) self.opt.tree_solution_available = True self.opt.first_stage_solution_available = True if (obj is not None) and (self.opt.spcomm is not None): - self.opt.spcomm.BestInnerBound = self.opt.spcomm.InnerBoundUpdate(obj, char='E') + self.opt.spcomm.BestInnerBound = self.opt.spcomm.InnerBoundUpdate( + obj, char="E" + ) if self.cylinder_rank == 0 and self.verbose: - print ("****", extname ,"Used scenarios", - str(snamedict),"to get xhat Eobj=",obj) + print( + "****", + extname, + "Used scenarios", + str(snamedict), + "to get xhat Eobj=", + obj, + ) if "csvname" in self.options: self.csv_nonants(snamedict, self.options["csvname"]) @@ -314,7 +366,8 @@ def xhat_common_post_everything(self, extname, obj, snamedict, restored_nonants) def post_iter0(self): # the base class needs this self.comms = self.opt.comms - + + """ May 2020, DLW Simple tree with branching factors (arbitrary trees later) diff --git a/mpisppy/extensions/xhatclosest.py b/mpisppy/extensions/xhatclosest.py index 67905a10b..b348583d9 100644 --- a/mpisppy/extensions/xhatclosest.py +++ b/mpisppy/extensions/xhatclosest.py @@ -13,42 +13,46 @@ import mpisppy.extensions.xhatbase import pyomo.environ as pyo + class XhatClosest(mpisppy.extensions.xhatbase.XhatBase): """ Args: rank (int): mpi process rank of currently running process """ + def __init__(self, ph): super().__init__(ph) self.options = ph.options["xhat_closest_options"] self.solver_options = self.options["xhat_solver_options"] self.keep_solution = True - if ('keep_solution' in self.options) and (not self.options['keep_solution']): + if ("keep_solution" in self.options) and (not self.options["keep_solution"]): self.keep_solution = False def xhat_closest_to_xbar(self, verbose=False, restore_nonants=True): - """ Get a truncated z score and look for the closest overall. + """Get a truncated z score and look for the closest overall. Returns: obj (float or None): objective value, none if infeasible. snamedict (str): closest scenarios """ + def _vb(msg): if verbose and self.cylinder_rank == 0: - print ("(rank0) xhat_looper: " + msg) + print("(rank0) xhat_looper: " + msg) - localmindist = np.zeros(1, dtype='d') - globalmindist = np.zeros(1, dtype='d') + localmindist = np.zeros(1, dtype="d") + globalmindist = np.zeros(1, dtype="d") localwinnername = None for k, s in self.opt.local_scenarios.items(): dist = 0 for ndn_i, xvar in s._mpisppy_data.nonant_indices.items(): diff = pyo.value(xvar) - pyo.value(s._mpisppy_model.xbars[ndn_i]) - variance = pyo.value(s._mpisppy_model.xsqbars[ndn_i]) \ - - pyo.value(s._mpisppy_model.xbars[ndn_i])*pyo.value(s._mpisppy_model.xbars[ndn_i]) + variance = pyo.value(s._mpisppy_model.xsqbars[ndn_i]) - pyo.value( + s._mpisppy_model.xbars[ndn_i] + ) * pyo.value(s._mpisppy_model.xbars[ndn_i]) if variance > 0: stdev = np.sqrt(variance) - dist += min(3, abs(diff)/stdev) + dist += min(3, abs(diff) / stdev) if localwinnername is None: localmindist[0] = dist localwinnername = k @@ -56,19 +60,19 @@ def _vb(msg): localmindist[0] = dist localwinnername = k - self.comms["ROOT"].Allreduce([localmindist, mpi.DOUBLE], - [globalmindist, mpi.DOUBLE], - op=mpi.MIN) + self.comms["ROOT"].Allreduce( + [localmindist, mpi.DOUBLE], [globalmindist, mpi.DOUBLE], op=mpi.MIN + ) # ties are possible, so break the tie - localwinrank = np.zeros(1, dtype='d') # could use a python variable. - globalwinrank = np.zeros(1, dtype='d') + localwinrank = np.zeros(1, dtype="d") # could use a python variable. + globalwinrank = np.zeros(1, dtype="d") if globalmindist[0] < localmindist[0]: localwinrank[0] = -1 # we lost else: localwinrank[0] = self.cylinder_rank - self.comms["ROOT"].Allreduce([localwinrank, mpi.DOUBLE], - [globalwinrank, mpi.DOUBLE], - op=mpi.MAX) + self.comms["ROOT"].Allreduce( + [localwinrank, mpi.DOUBLE], [globalwinrank, mpi.DOUBLE], op=mpi.MAX + ) # We only used the rank to break a possible tie. if self.cylinder_rank == int(globalwinrank[0]): @@ -79,21 +83,23 @@ def _vb(msg): sroot = globalwinrank[0] sname = self.comms["ROOT"].bcast(globalwinnername, root=sroot) - _vb("Trying scenario "+sname) - _vb(" Solver options="+str(self.solver_options)) + _vb("Trying scenario " + sname) + _vb(" Solver options=" + str(self.solver_options)) # xxx TBD mult-stage snamedict = {"ROOT": sname} - obj = self._try_one(snamedict, - solver_options=self.solver_options, - verbose=False, - restore_nonants=restore_nonants) + obj = self._try_one( + snamedict, + solver_options=self.solver_options, + verbose=False, + restore_nonants=restore_nonants, + ) if obj is None: _vb(" Infeasible") else: _vb(" Feasible, returning " + str(obj)) - + return obj, snamedict - + def pre_iter0(self): if self.opt.multistage: raise RuntimeError("xhatclosest not done for multi-stage") @@ -101,7 +107,7 @@ def pre_iter0(self): def post_iter0(self): # a little bit silly self.comms = self.opt.comms - + def miditer(self): pass @@ -112,7 +118,11 @@ def post_everything(self): # if we're keeping the solution, we *do not* restore the nonants restore_nonants = not self.keep_solution self.opt.disable_W_and_prox() - obj, srcsname = self.xhat_closest_to_xbar(verbose=self.verbose, restore_nonants=restore_nonants) + obj, srcsname = self.xhat_closest_to_xbar( + verbose=self.verbose, restore_nonants=restore_nonants + ) self.opt.reenable_W_and_prox() - self.xhat_common_post_everything("closest to xbar", obj, srcsname, restore_nonants) + self.xhat_common_post_everything( + "closest to xbar", obj, srcsname, restore_nonants + ) self._final_xhat_closest_obj = obj diff --git a/mpisppy/extensions/xhatlooper.py b/mpisppy/extensions/xhatlooper.py index 382a04d7c..0f06ed179 100644 --- a/mpisppy/extensions/xhatlooper.py +++ b/mpisppy/extensions/xhatlooper.py @@ -6,36 +6,34 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -# look for an xhat. +# look for an xhat. # Written to be the only extension or called from an extension "manager." # DLW, Jan 2019 import mpisppy.extensions.xhatbase + class XhatLooper(mpisppy.extensions.xhatbase.XhatBase): """ Args: opt (SPBase object): problem that we are bounding rank (int): mpi process rank of currently running process """ + def __init__(self, ph): super().__init__(ph) self.options = ph.options["xhat_looper_options"] self.solver_options = self.options["xhat_solver_options"] self._xhat_looper_obj_final = None self.keep_solution = True - if ('keep_solution' in self.options) and (not self.options['keep_solution']): + if ("keep_solution" in self.options) and (not self.options["keep_solution"]): self.keep_solution = False - #========== - def xhat_looper(self, - scen_limit=1, - seed=None, - verbose=False, - restore_nonants=True): + # ========== + def xhat_looper(self, scen_limit=1, seed=None, verbose=False, restore_nonants=True): """Loop over some number of the global scenarios; if your rank has the chosen guy, bcast, if not, recieve the bcast. In any event, fix the vars - at the bcast values and see if it is feasible. If so, stop and + at the bcast values and see if it is feasible. If so, stop and leave the nonants fixed. Args: @@ -53,15 +51,16 @@ def xhat_looper(self, If options has an append_file_name, write to it Also attach the resulting bound to the object """ + def _vb(msg): if verbose and self.cylinder_rank == 0: - print (" rank {} xhat_looper: {}".\ - format(self.cylinder_rank,msg)) + print(" rank {} xhat_looper: {}".format(self.cylinder_rank, msg)) + obj = None sname = None snumlists = dict() llim = min(scen_limit, len(self.opt.all_scenario_names)) - _vb("Enter xhat_looper to try "+str(llim)+" scenarios.") + _vb("Enter xhat_looper to try " + str(llim) + " scenarios.") # The tedious task of collecting the tree information for # local scenario tree nodes (maybe move to the constructor) for k, s in self.opt.local_scenarios.items(): @@ -71,44 +70,46 @@ def _vb(msg): if seed is None: snumlists[ndn] = [i % nsize for i in range(llim)] else: - print ("need a random permutation in snumlist xxxx quitting") + print("need a random permutation in snumlist xxxx quitting") quit() - - self.opt._save_nonants() # to cache for use in fixing + + self.opt._save_nonants() # to cache for use in fixing # for the moment (dec 2019) treat two-stage as special if len(snumlists) == 1: for snum in snumlists["ROOT"]: sname = self.opt.all_scenario_names[snum] - _vb("Trying scenario "+sname) - _vb(" Solver options="+str(self.solver_options)) + _vb("Trying scenario " + sname) + _vb(" Solver options=" + str(self.solver_options)) snamedict = {"ROOT": sname} - obj = self._try_one(snamedict, - solver_options=self.solver_options, - verbose=False, - restore_nonants=restore_nonants) + obj = self._try_one( + snamedict, + solver_options=self.solver_options, + verbose=False, + restore_nonants=restore_nonants, + ) if obj is None: _vb(" Infeasible") else: _vb(" Feasible, returning " + str(obj)) break else: - raise RuntimeError("xhatlooper cannot do multi-stage") + raise RuntimeError("xhatlooper cannot do multi-stage") if "append_file_name" in self.options and self.opt.cylinder_rank == 0: with open(self.options["append_file_name"], "a") as f: - f.write(", "+str(obj)) + f.write(", " + str(obj)) self.xhatlooper_obj = obj return obj, snamedict def pre_iter0(self): if self.opt.multistage: - raise RuntimeError("xhatlooper cannot do multi-stage") + raise RuntimeError("xhatlooper cannot do multi-stage") def post_iter0(self): # a little bit silly self.comms = self.opt.comms - + def miditer(self): pass diff --git a/mpisppy/extensions/xhatspecific.py b/mpisppy/extensions/xhatspecific.py index 8149bbf59..7e100456f 100644 --- a/mpisppy/extensions/xhatspecific.py +++ b/mpisppy/extensions/xhatspecific.py @@ -12,28 +12,27 @@ import mpisppy.extensions.xhatbase + class XhatSpecific(mpisppy.extensions.xhatbase.XhatBase): """ Args: spo (SPOpt object): the calling object rank (int): mpi process rank of currently running process """ + def __init__(self, spo): super().__init__(spo) self.options = spo.options["xhat_specific_options"] self.solver_options = self.options["xhat_solver_options"] self.keep_solution = True - if ('keep_solution' in self.options) and (not self.options['keep_solution']): + if ("keep_solution" in self.options) and (not self.options["keep_solution"]): self.keep_solution = False - #========== - def xhat_tryit(self, - xhat_scenario_dict, - verbose=False, - restore_nonants=True): + # ========== + def xhat_tryit(self, xhat_scenario_dict, verbose=False, restore_nonants=True): """If your rank has the chosen guy, bcast, if not, recieve the bcast. In any event, fix the vars - at the bcast values and see if it is feasible. + at the bcast values and see if it is feasible. Args: xhat_scenario_dict (string): keys are nodes; values are scen names @@ -42,19 +41,22 @@ def xhat_tryit(self, xhatobjective (float or None): the objective function or None if one could not be obtained. """ + def _vb(msg): if verbose and self.cylinder_rank == 0: print(" xhat_specific: " + msg) obj = None - _vb("Enter XhatSpecific.xhat_tryit to try: "+str(xhat_scenario_dict)) + _vb("Enter XhatSpecific.xhat_tryit to try: " + str(xhat_scenario_dict)) - _vb(" Solver options="+str(self.solver_options)) - obj = self._try_one(xhat_scenario_dict, - solver_options=self.solver_options, - verbose=False, - restore_nonants=restore_nonants) + _vb(" Solver options=" + str(self.solver_options)) + obj = self._try_one( + xhat_scenario_dict, + solver_options=self.solver_options, + verbose=False, + restore_nonants=restore_nonants, + ) if obj is None: _vb("Infeasible") else: @@ -68,7 +70,7 @@ def pre_iter0(self): def post_iter0(self): # a little bit silly self.comms = self.opt.comms - + def miditer(self): pass @@ -80,10 +82,12 @@ def post_everything(self): restore_nonants = not self.keep_solution self.opt.disable_W_and_prox() xhat_scenario_dict = self.options["xhat_scenario_dict"] - obj = self.xhat_tryit(xhat_scenario_dict, - verbose=self.verbose, - restore_nonants=restore_nonants) + obj = self.xhat_tryit( + xhat_scenario_dict, verbose=self.verbose, restore_nonants=restore_nonants + ) self.opt.reenable_W_and_prox() # to make available to tester self._xhat_specific_obj_final = obj - self.xhat_common_post_everything("xhat specified scenario", obj, xhat_scenario_dict, restore_nonants) + self.xhat_common_post_everything( + "xhat specified scenario", obj, xhat_scenario_dict, restore_nonants + ) diff --git a/mpisppy/extensions/xhatxbar.py b/mpisppy/extensions/xhatxbar.py index 563bb9d25..e65b1d913 100644 --- a/mpisppy/extensions/xhatxbar.py +++ b/mpisppy/extensions/xhatxbar.py @@ -13,56 +13,57 @@ import mpisppy.extensions.xhatbase import mpisppy.phbase as phbase + class XhatXbar(mpisppy.extensions.xhatbase.XhatBase): """ Args: spo (SPOpt object): the calling object """ + def __init__(self, spo): super().__init__(spo) self.options = spo.options["xhat_xbar_options"] self.solver_options = self.options["xhat_solver_options"] self.keep_solution = True - if ('keep_solution' in self.options) and (not self.options['keep_solution']): + if ("keep_solution" in self.options) and (not self.options["keep_solution"]): self.keep_solution = False def _fix_nonants_xhat(self): - """ Fix the Vars subject to non-anticipativity at given values. + """Fix the Vars subject to non-anticipativity at given values. Loop over the scenarios to restore, but loop over subproblems to alert persistent solvers. Args: cache (ndn dict of list or numpy vector): values at which to fix - WARNING: + WARNING: We are counting on Pyomo indices not to change order between when the cache_list is created and used. NOTE: You probably want to call _save_nonants right before calling this copy/pasted from phabse _fix_nonants """ - for k,s in self.opt.local_scenarios.items(): + for k, s in self.opt.local_scenarios.items(): persistent_solver = None - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name - for i in range(nlens[ndn]): + for i in range(nlens[ndn]): this_vardata = node.nonant_vardata_list[i] if this_vardata.is_integer() or this_vardata.is_binary(): - this_vardata._value = round(s._mpisppy_model.xbars[(ndn,i)]._value) + this_vardata._value = round( + s._mpisppy_model.xbars[(ndn, i)]._value + ) else: - this_vardata._value = s._mpisppy_model.xbars[(ndn,i)]._value + this_vardata._value = s._mpisppy_model.xbars[(ndn, i)]._value this_vardata.fix() if persistent_solver is not None: persistent_solver.update_var(this_vardata) - - #========== - def xhat_tryit(self, - verbose=False, - restore_nonants=True): + # ========== + def xhat_tryit(self, verbose=False, restore_nonants=True): """Use xbar to set the nonants; round integers Args: @@ -72,6 +73,7 @@ def xhat_tryit(self, xhatobjective (float or None): the objective function or None if one could not be obtained. """ + def _vb(msg): if verbose and self.cylinder_rank == 0: print(" xhat_xbar: " + msg) @@ -80,20 +82,22 @@ def _vb(msg): _vb("Enter XhatXbar.xhat_tryit") - _vb(" Solver options="+str(self.solver_options)) + _vb(" Solver options=" + str(self.solver_options)) # This might be an extension for a Xhat_Eval object or a PH object, so we will assume the worst phbase._Compute_Xbar(self.opt) - self._fix_nonants_xhat() # (BTW: for all local scenarios) + self._fix_nonants_xhat() # (BTW: for all local scenarios) # NOTE: for APH we may need disable_pyomo_signal_handling - self.opt.solve_loop(solver_options=self.solver_options, - #dis_W=True, dis_prox=True, - verbose=verbose, - tee=False) + self.opt.solve_loop( + solver_options=self.solver_options, + # dis_W=True, dis_prox=True, + verbose=verbose, + tee=False, + ) infeasP = self.opt.infeas_prob() - if infeasP != 0.: + if infeasP != 0.0: # restoring does no harm # if this solution is infeasible self.opt._restore_nonants() @@ -118,7 +122,7 @@ def pre_iter0(self): def post_iter0(self): # a little bit silly self.comms = self.opt.comms - + def miditer(self): pass @@ -129,8 +133,7 @@ def post_everything(self): # if we're keeping the solution, we *do not* restore the nonants restore_nonants = not self.keep_solution self.opt.disable_W_and_prox() - obj = self.xhat_tryit(verbose=self.verbose, - restore_nonants=restore_nonants) + obj = self.xhat_tryit(verbose=self.verbose, restore_nonants=restore_nonants) self.opt.reenable_W_and_prox() # to make available to tester self._xhat_xbar_obj_final = obj diff --git a/mpisppy/fwph/fwph.py b/mpisppy/fwph/fwph.py index ac3c29c0a..1e4788a8a 100644 --- a/mpisppy/fwph/fwph.py +++ b/mpisppy/fwph/fwph.py @@ -6,48 +6,48 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Implementation of the Frank-Wolfe Progressive Hedging (FW-PH) algorithm - described in the paper: - - N. Boland et al., "Combining Progressive Hedging with a Frank-Wolfe method - to compute Lagrangian dual bounds in stochastic mixed-integer programming". - SIAM J. Optim. 28(2):1312--1336, 2018. - - Current implementation supports parallelism and bundling. - - Does not support: - 1. The use of extensions - 2. The solution of models with more than two stages - 3. Simultaneous use of bundling and user-specified initial points - - The FWPH algorithm allows the user to specify an initial set of points - (notated V^0_s in Boland) to approximate the convex hull of each scenario - subproblem. Points are specified via a callable which takes as input a - scenario name (str) and outputs a list of dicts. Each dict corresponds to a - point, and is formatted - - point_dict[variable_name] = variable_value - - (e.g. point_dict['Allocation[2,5]'] = 1.7). The order of this list - matters--the first point in the list is used to compute the initial dual - weights and xBar value (in Boland notation, the first point in the list is - the point (x0_s,y0_s) in Algorithm 3). The callable is passed to FWPH as an - option in the FW_options dict, with the key "point_creator". The - point_creator callable may also take an optional argument, - "point_creator_data", which may be any data type, and contain any - information needed to create the initial point set. The point_creator_data - is passed to FWPH as an option in the FW_options dict, with the key - "point_creator_data". - - See fwph_sslp.py for an example of user-specified point creation. -''' +"""Implementation of the Frank-Wolfe Progressive Hedging (FW-PH) algorithm +described in the paper: + +N. Boland et al., "Combining Progressive Hedging with a Frank-Wolfe method +to compute Lagrangian dual bounds in stochastic mixed-integer programming". +SIAM J. Optim. 28(2):1312--1336, 2018. + +Current implementation supports parallelism and bundling. + +Does not support: + 1. The use of extensions + 2. The solution of models with more than two stages + 3. Simultaneous use of bundling and user-specified initial points + +The FWPH algorithm allows the user to specify an initial set of points +(notated V^0_s in Boland) to approximate the convex hull of each scenario +subproblem. Points are specified via a callable which takes as input a +scenario name (str) and outputs a list of dicts. Each dict corresponds to a +point, and is formatted + + point_dict[variable_name] = variable_value + +(e.g. point_dict['Allocation[2,5]'] = 1.7). The order of this list +matters--the first point in the list is used to compute the initial dual +weights and xBar value (in Boland notation, the first point in the list is +the point (x0_s,y0_s) in Algorithm 3). The callable is passed to FWPH as an +option in the FW_options dict, with the key "point_creator". The +point_creator callable may also take an optional argument, +"point_creator_data", which may be any data type, and contain any +information needed to create the initial point set. The point_creator_data +is passed to FWPH as an option in the FW_options dict, with the key +"point_creator_data". + +See fwph_sslp.py for an example of user-specified point creation. +""" import mpisppy.phbase import mpisppy.utils.sputils as sputils import numpy as np import pyomo.environ as pyo import time -import re # For manipulating scenario names +import re # For manipulating scenario names from mpisppy import MPI from pyomo.repn.standard_repn import generate_standard_repn @@ -55,8 +55,8 @@ from pyomo.core.expr.visitor import replace_expressions from pyomo.core.expr.numeric_expr import LinearExpression + class FWPH(mpisppy.phbase.PHBase): - def __init__( self, PH_options, @@ -72,7 +72,7 @@ def __init__( variable_probability=None, ): super().__init__( - PH_options, + PH_options, all_scenario_names, scenario_creator, scenario_denouement, @@ -83,33 +83,38 @@ def __init__( extension_kwargs=None, ph_converger=ph_converger, rho_setter=rho_setter, - ) - assert (variable_probability is None), "variable probability is not allowed with fwph" + ) + assert ( + variable_probability is None + ), "variable probability is not allowed with fwph" self._init(FW_options) def _init(self, FW_options): self.FW_options = FW_options self._options_checks_fw() self.vb = True - if ('FW_verbose' in self.FW_options): - self.vb = self.FW_options['FW_verbose'] + if "FW_verbose" in self.FW_options: + self.vb = self.FW_options["FW_verbose"] def fw_prep(self): self.PH_Prep(attach_duals=True, attach_prox=False) self._output_header() - if ('point_creator' in self.FW_options): + if "point_creator" in self.FW_options: # The user cannot both specify and point_creator and use bundles. # At this point, we have already checked for that possibility, so # we can safely use the point_creator without further checks for # bundles. self._create_initial_points() - check = True if 'check_initial_points' not in self.FW_options \ - else self.FW_options['check_initial_points'] - if (check): + check = ( + True + if "check_initial_points" not in self.FW_options + else self.FW_options["check_initial_points"] + ) + if check: self._check_initial_points() self._create_solvers() - self._use_rho_setter(self.options['verbose'] and self.cylinder_rank==0) + self._use_rho_setter(self.options["verbose"] and self.cylinder_rank == 0) self._initialize_MIP_var_values() best_bound = -np.inf if self.is_minimizing else np.inf else: @@ -118,12 +123,12 @@ def fw_prep(self): self._output(0, trivial_bound, trivial_bound, np.nan, secs) best_bound = trivial_bound - if ('mip_solver_options' in self.FW_options): + if "mip_solver_options" in self.FW_options: self._set_MIP_solver_options() # Lines 2 and 3 of Algorithm 3 in Boland - self.Compute_Xbar(self.options['verbose']) - self.Update_W(self.options['verbose']) + self.Compute_Xbar(self.options["verbose"]) + self.Update_W(self.options["verbose"]) # Necessary pre-processing steps # We disable_W so they don't appear @@ -139,7 +144,7 @@ def fw_prep(self): self._swap_nonant_vars() self._reenable_W() - if (self.ph_converger): + if self.ph_converger: self.convobject = self.ph_converger(self, self.cylinder_rank, self.n_proc) return best_bound @@ -154,15 +159,16 @@ def fwph_main(self): return None, None, None # The body of the algorithm - for itr in range(self.options['PHIterLimit']): + for itr in range(self.options["PHIterLimit"]): self._PHIter = itr self._local_bound = 0 for name in self.local_subproblems: dual_bound = self.SDM(name) - self._local_bound += self.local_subproblems[name]._mpisppy_probability * \ - dual_bound + self._local_bound += ( + self.local_subproblems[name]._mpisppy_probability * dual_bound + ) self._compute_dual_bound() - if (self.is_minimizing): + if self.is_minimizing: best_bound = np.maximum(best_bound, self._local_bound) else: best_bound = np.minimum(best_bound, self._local_bound) @@ -171,156 +177,161 @@ def fwph_main(self): if self.spcomm: if self.spcomm.is_converged(): secs = time.time() - self.t0 - self._output(itr+1, self._local_bound, - best_bound, np.nan, secs) - if (self.cylinder_rank == 0 and self.vb): - print('FWPH converged to user-specified criteria') + self._output(itr + 1, self._local_bound, best_bound, np.nan, secs) + if self.cylinder_rank == 0 and self.vb: + print("FWPH converged to user-specified criteria") break self.spcomm.sync() - if (self.ph_converger): - self.Compute_Xbar(self.options['verbose']) + if self.ph_converger: + self.Compute_Xbar(self.options["verbose"]) diff = self.convobject.convergence_value() - if (self.convobject.is_converged()): + if self.convobject.is_converged(): secs = time.time() - self.t0 - self._output(itr+1, self._local_bound, - best_bound, diff, secs) - if (self.cylinder_rank == 0 and self.vb): - print('FWPH converged to user-specified criteria') + self._output(itr + 1, self._local_bound, best_bound, diff, secs) + if self.cylinder_rank == 0 and self.vb: + print("FWPH converged to user-specified criteria") break - else: # Convergence check from Boland + else: # Convergence check from Boland diff = self._conv_diff() - self.Compute_Xbar(self.options['verbose']) - if (diff < self.options['convthresh']): + self.Compute_Xbar(self.options["verbose"]) + if diff < self.options["convthresh"]: secs = time.time() - self.t0 - self._output(itr+1, self._local_bound, - best_bound, diff, secs) - if (self.cylinder_rank == 0 and self.vb): - print('PH converged based on standard criteria') + self._output(itr + 1, self._local_bound, best_bound, diff, secs) + if self.cylinder_rank == 0 and self.vb: + print("PH converged based on standard criteria") break secs = time.time() - self.t0 - self._output(itr+1, self._local_bound, best_bound, diff, secs) - self.Update_W(self.options['verbose']) - if (self._is_timed_out()): - if (self.cylinder_rank == 0 and self.vb): - print('Timeout.') + self._output(itr + 1, self._local_bound, best_bound, diff, secs) + self.Update_W(self.options["verbose"]) + if self._is_timed_out(): + if self.cylinder_rank == 0 and self.vb: + print("Timeout.") break self._swap_nonant_vars_back() - weight_dict = self._gather_weight_dict() # None if rank != 0 - xbars_dict = self._get_xbars() # None if rank != 0 - return itr+1, weight_dict, xbars_dict + weight_dict = self._gather_weight_dict() # None if rank != 0 + xbars_dict = self._get_xbars() # None if rank != 0 + return itr + 1, weight_dict, xbars_dict def SDM(self, model_name): - ''' Algorithm 2 in Boland et al. (with small tweaks) - ''' + """Algorithm 2 in Boland et al. (with small tweaks)""" mip = self.local_subproblems[model_name] - qp = self.local_QP_subproblems[model_name] - + qp = self.local_QP_subproblems[model_name] + # Set the QP dual weights to the correct values If we are bundling, we # initialize the QP dual weights to be the dual weights associated with # the first scenario in the bundle (this should be okay, because each # scenario in the bundle has the same dual weights, analytically--maybe # a numerical problem). - arb_scen_mip = self.local_scenarios[mip.scen_list[0]] \ - if self.bundling else mip - for (node_name, ix) in arb_scen_mip._mpisppy_data.nonant_indices: - qp._mpisppy_model.W[node_name, ix]._value = \ - arb_scen_mip._mpisppy_model.W[node_name, ix].value + arb_scen_mip = self.local_scenarios[mip.scen_list[0]] if self.bundling else mip + for node_name, ix in arb_scen_mip._mpisppy_data.nonant_indices: + qp._mpisppy_model.W[node_name, ix]._value = arb_scen_mip._mpisppy_model.W[ + node_name, ix + ].value - alpha = self.FW_options['FW_weight'] + alpha = self.FW_options["FW_weight"] # Algorithm 3 line 6 - xt = {ndn_i: - (1 - alpha) * pyo.value(arb_scen_mip._mpisppy_model.xbars[ndn_i]) + xt = { + ndn_i: (1 - alpha) * pyo.value(arb_scen_mip._mpisppy_model.xbars[ndn_i]) + alpha * pyo.value(xvar) for ndn_i, xvar in arb_scen_mip._mpisppy_data.nonant_indices.items() - } + } - for itr in range(self.FW_options['FW_iter_limit']): + for itr in range(self.FW_options["FW_iter_limit"]): # Algorithm 2 line 4 mip_source = mip.scen_list if self.bundling else [model_name] for scenario_name in mip_source: scen_mip = self.local_scenarios[scenario_name] for ndn_i, nonant in scen_mip._mpisppy_data.nonant_indices.items(): - x_source = xt[ndn_i] if itr==0 \ - else nonant._value - scen_mip._mpisppy_model.W[ndn_i]._value = ( - qp._mpisppy_model.W[ndn_i]._value - + scen_mip._mpisppy_model.rho[ndn_i]._value - * (x_source - - scen_mip._mpisppy_model.xbars[ndn_i]._value)) + x_source = xt[ndn_i] if itr == 0 else nonant._value + scen_mip._mpisppy_model.W[ndn_i]._value = qp._mpisppy_model.W[ + ndn_i + ]._value + scen_mip._mpisppy_model.rho[ndn_i]._value * ( + x_source - scen_mip._mpisppy_model.xbars[ndn_i]._value + ) # Algorithm 2 line 5 - if (sputils.is_persistent(mip._solver_plugin)): + if sputils.is_persistent(mip._solver_plugin): mip_obj = find_active_objective(mip) mip._solver_plugin.set_objective(mip_obj) mip_results = mip._solver_plugin.solve(mip) - self._check_solve(mip_results, model_name + ' (MIP)') + self._check_solve(mip_results, model_name + " (MIP)") # Algorithm 2 lines 6--8 obj = find_active_objective(mip) - if (itr == 0): - if (self.is_minimizing): + if itr == 0: + if self.is_minimizing: dual_bound = mip_results.Problem[0].Lower_bound else: dual_bound = mip_results.Problem[0].Upper_bound # Algorithm 2 line 9 (compute \Gamma^t) val0 = pyo.value(obj) - new = replace_expressions(obj.expr, mip._mpisppy_data.mip_to_qp) + new = replace_expressions(obj.expr, mip._mpisppy_data.mip_to_qp) val1 = pyo.value(new) obj.expr = replace_expressions(new, qp._mpisppy_data.qp_to_mip) if abs(val0) > 1e-9: - stop_check = (val1 - val0) / abs(val0) # \Gamma^t in Boland, but normalized + stop_check = (val1 - val0) / abs( + val0 + ) # \Gamma^t in Boland, but normalized else: - stop_check = val1 - val0 # \Gamma^t in Boland - stop_check_tol = self.FW_options["stop_check_tol"]\ - if "stop_check_tol" in self.FW_options else 1e-4 - if (self.is_minimizing and stop_check < -stop_check_tol): - print('Warning (fwph): convergence quantity Gamma^t = ' - '{sc:.2e} (should be non-negative)'.format(sc=stop_check)) - print('Try decreasing the MIP gap tolerance and re-solving') - elif (not self.is_minimizing and stop_check > stop_check_tol): - print('Warning (fwph): convergence quantity Gamma^t = ' - '{sc:.2e} (should be non-positive)'.format(sc=stop_check)) - print('Try decreasing the MIP gap tolerance and re-solving') + stop_check = val1 - val0 # \Gamma^t in Boland + stop_check_tol = ( + self.FW_options["stop_check_tol"] + if "stop_check_tol" in self.FW_options + else 1e-4 + ) + if self.is_minimizing and stop_check < -stop_check_tol: + print( + "Warning (fwph): convergence quantity Gamma^t = " + "{sc:.2e} (should be non-negative)".format(sc=stop_check) + ) + print("Try decreasing the MIP gap tolerance and re-solving") + elif not self.is_minimizing and stop_check > stop_check_tol: + print( + "Warning (fwph): convergence quantity Gamma^t = " + "{sc:.2e} (should be non-positive)".format(sc=stop_check) + ) + print("Try decreasing the MIP gap tolerance and re-solving") self._add_QP_column(model_name) - if (sputils.is_persistent(qp._QP_solver_plugin)): + if sputils.is_persistent(qp._QP_solver_plugin): qp_obj = find_active_objective(qp) qp._QP_solver_plugin.set_objective(qp_obj) qp_results = qp._QP_solver_plugin.solve(qp) - self._check_solve(qp_results, model_name + ' (QP)') + self._check_solve(qp_results, model_name + " (QP)") - if (stop_check < self.FW_options['FW_conv_thresh']): + if stop_check < self.FW_options["FW_conv_thresh"]: break - # Re-set the mip._mpisppy_model.W so that the QP objective + # Re-set the mip._mpisppy_model.W so that the QP objective # is correct in the next major iteration mip_source = mip.scen_list if self.bundling else [model_name] for scenario_name in mip_source: scen_mip = self.local_scenarios[scenario_name] - for (node_name, ix) in scen_mip._mpisppy_data.nonant_indices: - scen_mip._mpisppy_model.W[node_name, ix]._value = \ - qp._mpisppy_model.W[node_name, ix]._value + for node_name, ix in scen_mip._mpisppy_data.nonant_indices: + scen_mip._mpisppy_model.W[node_name, ix]._value = qp._mpisppy_model.W[ + node_name, ix + ]._value return dual_bound def _add_QP_column(self, model_name): - ''' Add a column to the QP, with values taken from the most recent MIP - solve. - ''' + """Add a column to the QP, with values taken from the most recent MIP + solve. + """ mip = self.local_subproblems[model_name] - qp = self.local_QP_subproblems[model_name] + qp = self.local_QP_subproblems[model_name] solver = qp._QP_solver_plugin persistent = sputils.is_persistent(solver) - if hasattr(solver, 'add_column'): + if hasattr(solver, "add_column"): new_var = qp.a.add() - coef_list = [1.] + coef_list = [1.0] constr_list = [qp.sum_one] target = mip.ref_vars if self.bundling else mip.nonant_vars - for (node, ix) in qp.eqx.index_set(): + for node, ix in qp.eqx.index_set(): coef_list.append(target[node, ix].value) constr_list.append(qp.eqx[node, ix]) for key in mip._mpisppy_model.y_indices: @@ -330,8 +341,8 @@ def _add_QP_column(self, model_name): return # Add new variable and update \sum a_i = 1 constraint - new_var = qp.a.add() # Add the new convex comb. variable - if (persistent): + new_var = qp.a.add() # Add the new convex comb. variable + if persistent: solver.add_var(new_var) solver.remove_constraint(qp.sum_one) qp.sum_one._body += new_var @@ -340,15 +351,15 @@ def _add_QP_column(self, model_name): qp.sum_one._body += new_var target = mip.ref_vars if self.bundling else mip.nonant_vars - for (node, ix) in qp.eqx.index_set(): - if (persistent): + for node, ix in qp.eqx.index_set(): + if persistent: solver.remove_constraint(qp.eqx[node, ix]) qp.eqx[node, ix]._body += new_var * target[node, ix].value solver.add_constraint(qp.eqx[node, ix]) else: - qp.eqx[node, ix]._body += new_var * target[node,ix].value + qp.eqx[node, ix]._body += new_var * target[node, ix].value for key in mip._mpisppy_model.y_indices: - if (persistent): + if persistent: solver.remove_constraint(qp.eqy[key]) qp.eqy[key]._body += new_var * pyo.value(mip.leaf_vars[key]) solver.add_constraint(qp.eqy[key]) @@ -356,33 +367,38 @@ def _add_QP_column(self, model_name): qp.eqy[key]._body += new_var * pyo.value(mip.leaf_vars[key]) def _attach_indices(self): - ''' Attach the fields x_indices and y_indices to the model objects in - self.local_subproblems (not self.local_scenarios, nor - self.local_QP_subproblems). - - x_indices is a list of tuples of the form... - (scenario name, node name, variable index) <-- bundling - (node name, variable index) <-- no bundling - y_indices is a list of tuples of the form... - (scenario_name, "LEAF", variable index) <-- bundling - ("LEAF", variable index) <-- no bundling - - Must be called after the subproblems (MIPs AND QPs) are created. - ''' + """Attach the fields x_indices and y_indices to the model objects in + self.local_subproblems (not self.local_scenarios, nor + self.local_QP_subproblems). + + x_indices is a list of tuples of the form... + (scenario name, node name, variable index) <-- bundling + (node name, variable index) <-- no bundling + y_indices is a list of tuples of the form... + (scenario_name, "LEAF", variable index) <-- bundling + ("LEAF", variable index) <-- no bundling + + Must be called after the subproblems (MIPs AND QPs) are created. + """ for name in self.local_subproblems.keys(): mip = self.local_subproblems[name] - qp = self.local_QP_subproblems[name] - if (self.bundling): - x_indices = [(scenario_name, node_name, ix) + qp = self.local_QP_subproblems[name] + if self.bundling: + x_indices = [ + (scenario_name, node_name, ix) for scenario_name in mip.scen_list - for (node_name, ix) in - self.local_scenarios[scenario_name]._mpisppy_data.nonant_indices] - y_indices = [(scenario_name, 'LEAF', ix) + for (node_name, ix) in self.local_scenarios[ + scenario_name + ]._mpisppy_data.nonant_indices + ] + y_indices = [ + (scenario_name, "LEAF", ix) for scenario_name in mip.scen_list - for ix in range(mip.num_leaf_vars[scenario_name])] + for ix in range(mip.num_leaf_vars[scenario_name]) + ] else: x_indices = mip._mpisppy_data.nonant_indices.keys() - y_indices = [('LEAF', ix) for ix in range(len(qp.y))] + y_indices = [("LEAF", ix) for ix in range(len(qp.y))] y_indices = pyo.Set(initialize=y_indices) y_indices.construct() @@ -392,62 +408,81 @@ def _attach_indices(self): mip._mpisppy_model.y_indices = y_indices def _attach_MIP_QP_maps(self): - ''' Create dictionaries that map MIP variable ids to their QP - counterparts, and vice versa. - ''' + """Create dictionaries that map MIP variable ids to their QP + counterparts, and vice versa. + """ for name in self.local_subproblems.keys(): mip = self.local_subproblems[name] - qp = self.local_QP_subproblems[name] + qp = self.local_QP_subproblems[name] - mip._mpisppy_data.mip_to_qp = {id(mip.nonant_vars[key]): qp.x[key] - for key in mip._mpisppy_model.x_indices} - mip._mpisppy_data.mip_to_qp.update({id(mip.leaf_vars[key]): qp.y[key] - for key in mip._mpisppy_model.y_indices}) - qp._mpisppy_data.qp_to_mip = {id(qp.x[key]): mip.nonant_vars[key] - for key in mip._mpisppy_model.x_indices} - qp._mpisppy_data.qp_to_mip.update({id(qp.y[key]): mip.leaf_vars[key] - for key in mip._mpisppy_model.y_indices}) + mip._mpisppy_data.mip_to_qp = { + id(mip.nonant_vars[key]): qp.x[key] + for key in mip._mpisppy_model.x_indices + } + mip._mpisppy_data.mip_to_qp.update( + { + id(mip.leaf_vars[key]): qp.y[key] + for key in mip._mpisppy_model.y_indices + } + ) + qp._mpisppy_data.qp_to_mip = { + id(qp.x[key]): mip.nonant_vars[key] + for key in mip._mpisppy_model.x_indices + } + qp._mpisppy_data.qp_to_mip.update( + { + id(qp.y[key]): mip.leaf_vars[key] + for key in mip._mpisppy_model.y_indices + } + ) def _attach_MIP_vars(self): - ''' Create a list indexed (node_name, ix) for all the MIP - non-anticipative and leaf variables, so that they can be easily - accessed when adding columns to the QP. - ''' - if (self.bundling): - for (bundle_name, EF) in self.local_subproblems.items(): + """Create a list indexed (node_name, ix) for all the MIP + non-anticipative and leaf variables, so that they can be easily + accessed when adding columns to the QP. + """ + if self.bundling: + for bundle_name, EF in self.local_subproblems.items(): EF.nonant_vars = dict() - EF.leaf_vars = dict() - EF.num_leaf_vars = dict() # Keys are scenario names + EF.leaf_vars = dict() + EF.num_leaf_vars = dict() # Keys are scenario names for scenario_name in EF.scen_list: mip = self.local_scenarios[scenario_name] # Non-anticipative variables - nonant_dict = {(scenario_name, ndn, ix): nonant - for (ndn,ix), nonant in mip._mpisppy_data.nonant_indices.items()} + nonant_dict = { + (scenario_name, ndn, ix): nonant + for ( + ndn, + ix, + ), nonant in mip._mpisppy_data.nonant_indices.items() + } EF.nonant_vars.update(nonant_dict) # Leaf variables - leaf_var_dict = {(scenario_name, 'LEAF', ix): - var for ix, var in enumerate(self._get_leaf_vars(mip))} + leaf_var_dict = { + (scenario_name, "LEAF", ix): var + for ix, var in enumerate(self._get_leaf_vars(mip)) + } EF.leaf_vars.update(leaf_var_dict) EF.num_leaf_vars[scenario_name] = len(leaf_var_dict) # Reference variables are already attached: EF.ref_vars # indexed by (node_name, index) else: - for (name, mip) in self.local_scenarios.items(): + for name, mip in self.local_scenarios.items(): mip.nonant_vars = mip._mpisppy_data.nonant_indices - mip.leaf_vars = { ('LEAF', ix): - var for ix, var in enumerate(self._get_leaf_vars(mip)) + mip.leaf_vars = { + ("LEAF", ix): var for ix, var in enumerate(self._get_leaf_vars(mip)) } def _check_initial_points(self): - ''' If t_max (i.e. the inner iteration limit) is set to 1, then the - initial point set must satisfy the additional condition (17) in - Boland et al. This function verifies that condition (17) is - satisfied by solving a linear program (similar to the Phase I - auxiliary LP in two-phase simplex). - - This function is only called by a single rank, which MUST be rank - 0. The rank 0 check happens before this function is called. - ''' + """If t_max (i.e. the inner iteration limit) is set to 1, then the + initial point set must satisfy the additional condition (17) in + Boland et al. This function verifies that condition (17) is + satisfied by solving a linear program (similar to the Phase I + auxiliary LP in two-phase simplex). + + This function is only called by a single rank, which MUST be rank + 0. The rank 0 check happens before this function is called. + """ # Need to get the first-stage variable names (as supplied by the user) # by picking them off of any random scenario that's laying around. arb_scenario = list(self.local_scenarios.keys())[0] @@ -455,23 +490,26 @@ def _check_initial_points(self): root = arb_mip._mpisppy_node_list[0] stage_one_var_names = [var.name for var in root.nonant_vardata_list] - init_pts = self.comms['ROOT'].gather(self.local_initial_points, root=0) - if (self.cylinder_rank != 0): + init_pts = self.comms["ROOT"].gather(self.local_initial_points, root=0) + if self.cylinder_rank != 0: return - print('Checking initial points...', end='', flush=True) - - points = {key: value for block in init_pts - for (key, value) in block.items()} + print("Checking initial points...", end="", flush=True) + + points = {key: value for block in init_pts for (key, value) in block.items()} scenario_names = points.keys() # Some index sets we will need.. - conv_ix = [(scenario_name, var_name) - for scenario_name in scenario_names - for var_name in stage_one_var_names] - conv_coeff = [(scenario_name, ix) - for scenario_name in scenario_names - for ix in range(len(points[scenario_name]))] + conv_ix = [ + (scenario_name, var_name) + for scenario_name in scenario_names + for var_name in stage_one_var_names + ] + conv_coeff = [ + (scenario_name, ix) + for scenario_name in scenario_names + for ix in range(len(points[scenario_name])) + ] aux = pyo.ConcreteModel() aux.x = pyo.Var(stage_one_var_names, within=pyo.Reals) @@ -480,79 +518,92 @@ def _check_initial_points(self): aux.conv = pyo.Var(conv_coeff, within=pyo.NonNegativeReals) def sum_one_rule(model, scenario_name): - return pyo.quicksum(model.conv[scenario_name,ix] \ - for ix in range(len(points[scenario_name]))) == 1 + return ( + pyo.quicksum( + model.conv[scenario_name, ix] + for ix in range(len(points[scenario_name])) + ) + == 1 + ) + aux.sum_one = pyo.Constraint(scenario_names, rule=sum_one_rule) def conv_rule(model, scenario_name, var_name): - return model.x[var_name] \ - + model.slack_plus[scenario_name, var_name] \ - - model.slack_minus[scenario_name, var_name] \ - == pyo.quicksum(model.conv[scenario_name, ix] * - points[scenario_name][ix][var_name] - for ix in range(len(points[scenario_name]))) + return model.x[var_name] + model.slack_plus[ + scenario_name, var_name + ] - model.slack_minus[scenario_name, var_name] == pyo.quicksum( + model.conv[scenario_name, ix] * points[scenario_name][ix][var_name] + for ix in range(len(points[scenario_name])) + ) + aux.comb = pyo.Constraint(conv_ix, rule=conv_rule) - obj_expr = pyo.quicksum(aux.slack_plus.values()) \ - + pyo.quicksum(aux.slack_minus.values()) + obj_expr = pyo.quicksum(aux.slack_plus.values()) + pyo.quicksum( + aux.slack_minus.values() + ) aux.obj = pyo.Objective(expr=obj_expr, sense=pyo.minimize) - solver = pyo.SolverFactory(self.FW_options['solver_name']) + solver = pyo.SolverFactory(self.FW_options["solver_name"]) results = solver.solve(aux) - self._check_solve(results, 'Auxiliary LP') + self._check_solve(results, "Auxiliary LP") - check_tol = self.FW_options['check_tol'] \ - if 'check_tol' in self.FW_options.keys() else 1e-4 - if (pyo.value(obj_expr) > check_tol): - print('error.') - raise ValueError('The specified initial points do not satisfy the ' - 'critera necessary for convergence. Please specify different ' - 'initial points, or increase FW_iter_limit') - print('done.') + check_tol = ( + self.FW_options["check_tol"] + if "check_tol" in self.FW_options.keys() + else 1e-4 + ) + if pyo.value(obj_expr) > check_tol: + print("error.") + raise ValueError( + "The specified initial points do not satisfy the " + "critera necessary for convergence. Please specify different " + "initial points, or increase FW_iter_limit" + ) + print("done.") def _check_solve(self, results, model_name): - ''' Verify that the solver solved to optimality ''' - if (results.solver.status != pyo.SolverStatus.ok) or \ - (results.solver.termination_condition != pyo.TerminationCondition.optimal): - print('Solve failed on model', model_name) - print('Solver status:', results.solver.status) - print('Termination conditions:', results.solver.termination_condition) + """Verify that the solver solved to optimality""" + if (results.solver.status != pyo.SolverStatus.ok) or ( + results.solver.termination_condition != pyo.TerminationCondition.optimal + ): + print("Solve failed on model", model_name) + print("Solver status:", results.solver.status) + print("Termination conditions:", results.solver.termination_condition) raise RuntimeError() def _compute_dual_bound(self): - ''' Compute the FWPH dual bound using self._local_bound from each rank - ''' + """Compute the FWPH dual bound using self._local_bound from each rank""" send = np.array(self._local_bound) - recv = np.array(0.) - self.comms['ROOT'].Allreduce( - [send, MPI.DOUBLE], [recv, MPI.DOUBLE], op=MPI.SUM) + recv = np.array(0.0) + self.comms["ROOT"].Allreduce([send, MPI.DOUBLE], [recv, MPI.DOUBLE], op=MPI.SUM) self._local_bound = recv def _conv_diff(self): - ''' Perform the convergence check of Algorithm 3 in Boland et al. ''' - diff = 0. + """Perform the convergence check of Algorithm 3 in Boland et al.""" + diff = 0.0 for name in self.local_subproblems.keys(): mip = self.local_subproblems[name] - arb_mip = self.local_scenarios[mip.scen_list[0]] \ - if self.bundling else mip - qp = self.local_QP_subproblems[name] - diff_s = 0. - for (node_name, ix) in arb_mip._mpisppy_data.nonant_indices: + arb_mip = self.local_scenarios[mip.scen_list[0]] if self.bundling else mip + qp = self.local_QP_subproblems[name] + diff_s = 0.0 + for node_name, ix in arb_mip._mpisppy_data.nonant_indices: qpx = qp.xr if self.bundling else qp.x - diff_s += np.power(pyo.value(qpx[node_name,ix]) - - pyo.value(arb_mip._mpisppy_model.xbars[node_name,ix]), 2) + diff_s += np.power( + pyo.value(qpx[node_name, ix]) + - pyo.value(arb_mip._mpisppy_model.xbars[node_name, ix]), + 2, + ) diff_s *= mip._mpisppy_probability diff += diff_s diff = np.array(diff) - recv = np.array(0.) - self.comms['ROOT'].Allreduce( - [diff, MPI.DOUBLE], [recv, MPI.DOUBLE], op=MPI.SUM) + recv = np.array(0.0) + self.comms["ROOT"].Allreduce([diff, MPI.DOUBLE], [recv, MPI.DOUBLE], op=MPI.SUM) return recv def _create_initial_points(self): - pc = self.FW_options['point_creator'] - if ('point_creator_data' in self.FW_options.keys()): - pd = self.FW_options['point_creator_data'] + pc = self.FW_options["point_creator"] + if "point_creator_data" in self.FW_options.keys(): + pd = self.FW_options["point_creator_data"] else: pd = None self.local_initial_points = dict() @@ -561,232 +612,282 @@ def _create_initial_points(self): self.local_initial_points[scenario_name] = pts def _extract_objective(self, mip): - ''' Extract the original part of the provided MIP's objective function - (no dual or prox terms), and create a copy containing the QP - variables in place of the MIP variables. - - Args: - mip (Pyomo ConcreteModel): MIP model for a scenario or bundle. - - Returns: - obj (Pyomo Objective): objective function extracted - from the MIP - new (Pyomo Expression): expression from the MIP model - objective with the MIP variables replaced by QP variables. - Does not inculde dual or prox terms. - - Notes: - Acts on either a single-scenario model or a bundle - ''' + """Extract the original part of the provided MIP's objective function + (no dual or prox terms), and create a copy containing the QP + variables in place of the MIP variables. + + Args: + mip (Pyomo ConcreteModel): MIP model for a scenario or bundle. + + Returns: + obj (Pyomo Objective): objective function extracted + from the MIP + new (Pyomo Expression): expression from the MIP model + objective with the MIP variables replaced by QP variables. + Does not inculde dual or prox terms. + + Notes: + Acts on either a single-scenario model or a bundle + """ mip_to_qp = mip._mpisppy_data.mip_to_qp obj = find_active_objective(mip) repn = generate_standard_repn(obj.expr, quadratic=True) if len(repn.nonlinear_vars) > 0: - raise ValueError("FWPH does not support models with nonlinear objective functions") + raise ValueError( + "FWPH does not support models with nonlinear objective functions" + ) linear_vars = [mip_to_qp[id(var)] for var in repn.linear_vars] new = LinearExpression( - constant=repn.constant, linear_coefs=repn.linear_coefs, linear_vars=linear_vars + constant=repn.constant, + linear_coefs=repn.linear_coefs, + linear_vars=linear_vars, ) if repn.quadratic_vars: quadratic_vars = ( - (mip_to_qp[id(x)], mip_to_qp[id(y)]) for x,y in repn.quadratic_vars + (mip_to_qp[id(x)], mip_to_qp[id(y)]) for x, y in repn.quadratic_vars ) new += pyo.quicksum( - (coef*x*y for coef,(x,y) in zip(repn.quadratic_coefs, quadratic_vars)) + ( + coef * x * y + for coef, (x, y) in zip(repn.quadratic_coefs, quadratic_vars) + ) ) return obj, new def _gather_weight_dict(self, strip_bundle_names=False): - ''' Compute a double nested dictionary of the form + """Compute a double nested dictionary of the form + + weights[scenario name][variable name] = weight value - weights[scenario name][variable name] = weight value + for FWPH to return to the user. - for FWPH to return to the user. - - Notes: - Must be called after the variables are swapped back. - ''' + Notes: + Must be called after the variables are swapped back. + """ local_weights = dict() - for (name, scenario) in self.local_scenarios.items(): - if (self.bundling and strip_bundle_names): + for name, scenario in self.local_scenarios.items(): + if self.bundling and strip_bundle_names: scenario_weights = dict() for ndn_ix, var in scenario._mpisppy_data.nonant_indices.items(): - rexp = '^' + scenario.name + '\.' - var_name = re.sub(rexp, '', var.name) - scenario_weights[var_name] = \ - scenario._mpisppy_model.W[ndn_ix].value + rexp = "^" + scenario.name + "\." + var_name = re.sub(rexp, "", var.name) + scenario_weights[var_name] = scenario._mpisppy_model.W[ndn_ix].value else: - scenario_weights = {nonant.name: scenario._mpisppy_model.W[ndn_ix].value - for ndn_ix, nonant in scenario._mpisppy_data.nonant_indices.items()} + scenario_weights = { + nonant.name: scenario._mpisppy_model.W[ndn_ix].value + for ndn_ix, nonant in scenario._mpisppy_data.nonant_indices.items() + } local_weights[name] = scenario_weights - weights = self.comms['ROOT'].gather(local_weights, root=0) + weights = self.comms["ROOT"].gather(local_weights, root=0) return weights def _get_leaf_vars(self, scenario): - ''' This method simply needs to take an input scenario - (pyo.ConcreteModel) and yield the variable objects - corresponding to the leaf node variables for that scenario. - - Functions by returning the complement of the set of - non-anticipative variables. - ''' - nonant_var_ids = {id(var) for node in scenario._mpisppy_node_list - for var in node.nonant_vardata_list} + """This method simply needs to take an input scenario + (pyo.ConcreteModel) and yield the variable objects + corresponding to the leaf node variables for that scenario. + + Functions by returning the complement of the set of + non-anticipative variables. + """ + nonant_var_ids = { + id(var) + for node in scenario._mpisppy_node_list + for var in node.nonant_vardata_list + } for var in scenario.component_data_objects(pyo.Var): if id(var) not in nonant_var_ids: yield var def _get_xbars(self, strip_bundle_names=False): - ''' Return the xbar vector if rank = 0 and None, otherwise - (Consistent with _gather_weight_dict). - - Args: - TODO - - Notes: - Paralellism is not necessary since each rank already has an - identical copy of xbar, provided by Compute_Xbar(). - - Returned dictionary is indexed by variable name - (as provided by the user). - - Must be called after variables are swapped back (I think). - ''' - if (self.cylinder_rank != 0): + """Return the xbar vector if rank = 0 and None, otherwise + (Consistent with _gather_weight_dict). + + Args: + TODO + + Notes: + Paralellism is not necessary since each rank already has an + identical copy of xbar, provided by Compute_Xbar(). + + Returned dictionary is indexed by variable name + (as provided by the user). + + Must be called after variables are swapped back (I think). + """ + if self.cylinder_rank != 0: return None else: random_scenario_name = list(self.local_scenarios.keys())[0] scenario = self.local_scenarios[random_scenario_name] xbar_dict = {} for node in scenario._mpisppy_node_list: - for (ix, var) in enumerate(node.nonant_vardata_list): + for ix, var in enumerate(node.nonant_vardata_list): var_name = var.name - if (self.bundling and strip_bundle_names): - rexp = '^' + random_scenario_name + '\.' - var_name = re.sub(rexp, '', var_name) - xbar_dict[var_name] = scenario._mpisppy_model.xbars[node.name, ix].value + if self.bundling and strip_bundle_names: + rexp = "^" + random_scenario_name + "\." + var_name = re.sub(rexp, "", var_name) + xbar_dict[var_name] = scenario._mpisppy_model.xbars[ + node.name, ix + ].value return xbar_dict def _initialize_MIP_var_values(self): - ''' Initialize the MIP variable values to the user-specified point. + """Initialize the MIP variable values to the user-specified point. - For now, arbitrarily choose the first point in each list. - ''' + For now, arbitrarily choose the first point in each list. + """ points = self.local_initial_points - for (name, mip) in self.local_subproblems.items(): - pt = points[name][0] # Select the first point arbitrarily + for name, mip in self.local_subproblems.items(): + pt = points[name][0] # Select the first point arbitrarily mip_vars = list(mip.component_data_objects(pyo.Var)) for var in mip_vars: try: var.set_value(pt[var.name]) except KeyError as e: - raise KeyError('Found variable named' + var.name + - ' in model ' + name + ' not contained in the ' - 'specified initial point dictionary') from e + raise KeyError( + "Found variable named" + + var.name + + " in model " + + name + + " not contained in the " + "specified initial point dictionary" + ) from e def _initialize_QP_subproblems(self): - ''' Instantiates the (convex) QP subproblems (eqn. (13) in the Boland - paper) for each scenario. Does not create/attach an objective. - - Attachs a local_QP_subproblems dict to self. Keys are scenario - names (or bundle names), values are Pyomo ConcreteModel objects - corresponding to the QP subproblems. - - QP subproblems are in their original form, without the x and y - variables eliminated. Rationale: pre-solve will get this, easier - bookkeeping (objective does not need to be changed at each inner - iteration this way). - ''' + """Instantiates the (convex) QP subproblems (eqn. (13) in the Boland + paper) for each scenario. Does not create/attach an objective. + + Attachs a local_QP_subproblems dict to self. Keys are scenario + names (or bundle names), values are Pyomo ConcreteModel objects + corresponding to the QP subproblems. + + QP subproblems are in their original form, without the x and y + variables eliminated. Rationale: pre-solve will get this, easier + bookkeeping (objective does not need to be changed at each inner + iteration this way). + """ self.local_QP_subproblems = dict() - has_init_pts = hasattr(self, 'local_initial_points') - for (name, model) in self.local_subproblems.items(): - if (self.bundling): + has_init_pts = hasattr(self, "local_initial_points") + for name, model in self.local_subproblems.items(): + if self.bundling: xr_indices = model.ref_vars.keys() nonant_indices = model.nonant_vars.keys() leaf_indices = model.leaf_vars.keys() - if (has_init_pts): - raise RuntimeError('Cannot currently specify ' - 'initial points while using bundles') + if has_init_pts: + raise RuntimeError( + "Cannot currently specify " "initial points while using bundles" + ) else: nonant_indices = model._mpisppy_data.nonant_indices.keys() leaf_indices = model.leaf_vars.keys() - ''' Convex comb. coefficients ''' + """ Convex comb. coefficients """ QP = pyo.ConcreteModel() QP.a = pyo.VarList(domain=pyo.NonNegativeReals) - if (has_init_pts): + if has_init_pts: for _ in range(len(self.local_initial_points[name])): QP.a.add() else: - QP.a.add() # Just one variable (1-based index!) to start + QP.a.add() # Just one variable (1-based index!) to start - ''' Other variables ''' + """ Other variables """ QP.x = pyo.Var(nonant_indices, within=pyo.Reals) QP.y = pyo.Var(leaf_indices, within=pyo.Reals) - if (self.bundling): + if self.bundling: QP.xr = pyo.Var(xr_indices, within=pyo.Reals) - ''' Non-anticipativity constraint ''' - if (self.bundling): + """ Non-anticipativity constraint """ + if self.bundling: + def nonant_rule(m, scenario_name, node_name, ix): - return m.x[scenario_name, node_name, ix] == \ - m.xr[node_name, ix] + return m.x[scenario_name, node_name, ix] == m.xr[node_name, ix] + QP.na = pyo.Constraint(nonant_indices, rule=nonant_rule) - - ''' (x,y) constraints ''' - if (self.bundling): + + """ (x,y) constraints """ + if self.bundling: + def x_rule(m, node_name, ix): - return -m.xr[node_name, ix] + m.a[1] * \ - model.ref_vars[node_name, ix].value == 0 + return ( + -m.xr[node_name, ix] + + m.a[1] * model.ref_vars[node_name, ix].value + == 0 + ) + def y_rule(m, scenario_name, node_name, ix): - return -m.y[scenario_name, node_name, ix] + m.a[1]\ - * model.leaf_vars[scenario_name,node_name,ix].value == 0 + return ( + -m.y[scenario_name, node_name, ix] + + m.a[1] * model.leaf_vars[scenario_name, node_name, ix].value + == 0 + ) + QP.eqx = pyo.Constraint(xr_indices, rule=x_rule) else: - if (has_init_pts): + if has_init_pts: pts = self.local_initial_points[name] + def x_rule(m, node_name, ix): nm = model.nonant_vars[node_name, ix].name - return -m.x[node_name, ix] + \ - pyo.quicksum(m.a[i+1] * pts[i][nm] - for i in range(len(pts))) == 0 + return ( + -m.x[node_name, ix] + + pyo.quicksum( + m.a[i + 1] * pts[i][nm] for i in range(len(pts)) + ) + == 0 + ) + def y_rule(m, node_name, ix): nm = model.leaf_vars[node_name, ix].name - return -m.y[node_name,ix] + \ - pyo.quicksum(m.a[i+1] * pts[i][nm] - for i in range(len(pts))) == 0 + return ( + -m.y[node_name, ix] + + pyo.quicksum( + m.a[i + 1] * pts[i][nm] for i in range(len(pts)) + ) + == 0 + ) else: + def x_rule(m, node_name, ix): - return -m.x[node_name, ix] + m.a[1] * \ - model.nonant_vars[node_name, ix].value == 0 + return ( + -m.x[node_name, ix] + + m.a[1] * model.nonant_vars[node_name, ix].value + == 0 + ) + def y_rule(m, node_name, ix): - return -m.y[node_name,ix] + m.a[1] * \ - model.leaf_vars['LEAF', ix].value == 0 + return ( + -m.y[node_name, ix] + + m.a[1] * model.leaf_vars["LEAF", ix].value + == 0 + ) + QP.eqx = pyo.Constraint(nonant_indices, rule=x_rule) QP.eqy = pyo.Constraint(leaf_indices, rule=y_rule) - QP.sum_one = pyo.Constraint(expr=pyo.quicksum(QP.a.values())==1) + QP.sum_one = pyo.Constraint(expr=pyo.quicksum(QP.a.values()) == 1) QP._mpisppy_data = pyo.Block(name="For non-Pyomo mpi-sppy data") - QP._mpisppy_model = pyo.Block(name="For mpi-sppy Pyomo additions to the scenario model") + QP._mpisppy_model = pyo.Block( + name="For mpi-sppy Pyomo additions to the scenario model" + ) self.local_QP_subproblems[name] = QP - + def _initialize_QP_var_values(self): - ''' Set the value of the QP variables to be equal to the values of the - corresponding MIP variables. + """Set the value of the QP variables to be equal to the values of the + corresponding MIP variables. - Notes: - Must be called before _swap_nonant_vars() + Notes: + Must be called before _swap_nonant_vars() - Must be called after _initialize_MIP_var_values(), if the user - specifies initial sets of points. Otherwise, it must be called - after Iter0(). - ''' + Must be called after _initialize_MIP_var_values(), if the user + specifies initial sets of points. Otherwise, it must be called + after Iter0(). + """ for name in self.local_subproblems.keys(): mip = self.local_subproblems[name] - qp = self.local_QP_subproblems[name] + qp = self.local_QP_subproblems[name] for key in mip._mpisppy_model.x_indices: qp.x[key].set_value(mip.nonant_vars[key].value) @@ -794,162 +895,199 @@ def _initialize_QP_var_values(self): qp.y[key].set_value(mip.leaf_vars[key].value) # Set the non-anticipative reference variables if we're bundling - if (self.bundling): + if self.bundling: arb_scenario = mip.scen_list[0] naix = self.local_scenarios[arb_scenario]._mpisppy_data.nonant_indices - for (node_name, ix) in naix: + for node_name, ix in naix: # Check that non-anticipativity is satisfied # within the bundle (for debugging) - vals = [mip.nonant_vars[scenario_name, node_name, ix].value - for scenario_name in mip.scen_list] - assert(max(vals) - min(vals) < 1e-7) + vals = [ + mip.nonant_vars[scenario_name, node_name, ix].value + for scenario_name in mip.scen_list + ] + assert max(vals) - min(vals) < 1e-7 qp.xr[node_name, ix].set_value( - mip.nonant_vars[arb_scenario, node_name, ix].value) + mip.nonant_vars[arb_scenario, node_name, ix].value + ) def _is_timed_out(self): - if (self.cylinder_rank == 0): + if self.cylinder_rank == 0: time_elapsed = time.time() - self.t0 - status = 1 if (time_elapsed > self.FW_options['time_limit']) \ - else 0 + status = 1 if (time_elapsed > self.FW_options["time_limit"]) else 0 else: status = None - status = self.comms['ROOT'].bcast(status, root=0) + status = self.comms["ROOT"].bcast(status, root=0) return status != 0 def _options_checks_fw(self): - ''' Name Boland notation (Algorithm 2) - ------------------------------------------------- - FW_iter_limit t_max - FW_weight alpha - FW_conv_thresh tau - ''' + """Name Boland notation (Algorithm 2) + ------------------------------------------------- + FW_iter_limit t_max + FW_weight alpha + FW_conv_thresh tau + """ # 1. Check for required options - reqd_options = ['FW_iter_limit', 'FW_weight', 'FW_conv_thresh', - 'solver_name'] + reqd_options = ["FW_iter_limit", "FW_weight", "FW_conv_thresh", "solver_name"] losers = [opt for opt in reqd_options if opt not in self.FW_options] - if (len(losers) > 0): - msg = "FW_options is missing the following key(s): " + \ - ", ".join(losers) + if len(losers) > 0: + msg = "FW_options is missing the following key(s): " + ", ".join(losers) raise RuntimeError(msg) # 2. Check that bundles, pre-specified points and t_max play nice. This # is only checked on rank 0, because that is where the initial # points are supposed to be specified. - use_bundles = ('bundles_per_rank' in self.options - and self.options['bundles_per_rank'] > 0) - t_max = self.FW_options['FW_iter_limit'] - specd_init_pts = 'point_creator' in self.FW_options.keys() and \ - self.FW_options['point_creator'] is not None - - if (use_bundles and specd_init_pts): - if (t_max == 1): - raise RuntimeError('Cannot use bundles and specify initial ' - 'points with t_max=1 at the same time.') + use_bundles = ( + "bundles_per_rank" in self.options and self.options["bundles_per_rank"] > 0 + ) + t_max = self.FW_options["FW_iter_limit"] + specd_init_pts = ( + "point_creator" in self.FW_options.keys() + and self.FW_options["point_creator"] is not None + ) + + if use_bundles and specd_init_pts: + if t_max == 1: + raise RuntimeError( + "Cannot use bundles and specify initial " + "points with t_max=1 at the same time." + ) else: - if (self.cylinder_rank == 0): - print('WARNING: Cannot specify initial points and use ' - 'bundles at the same time. Ignoring specified initial ' - 'points') + if self.cylinder_rank == 0: + print( + "WARNING: Cannot specify initial points and use " + "bundles at the same time. Ignoring specified initial " + "points" + ) # Remove specified initial points - self.FW_options.pop('point_creator', None) + self.FW_options.pop("point_creator", None) - if (t_max == 1 and not specd_init_pts): - raise RuntimeError('FW_iter_limit set to 1. To ensure ' - 'convergence, provide initial points, or increase ' - 'FW_iter_limit') + if t_max == 1 and not specd_init_pts: + raise RuntimeError( + "FW_iter_limit set to 1. To ensure " + "convergence, provide initial points, or increase " + "FW_iter_limit" + ) # 3a. Check that the user did not specify the linearization of binary # proximal terms (no binary variables allowed in FWPH QPs) - if ('linearize_binary_proximal_terms' in self.options - and self.options['linearize_binary_proximal_terms']): - print('Warning: linearize_binary_proximal_terms cannot be used ' - 'with the FWPH algorithm. Ignoring...') - self.options['linearize_binary_proximal_terms'] = False + if ( + "linearize_binary_proximal_terms" in self.options + and self.options["linearize_binary_proximal_terms"] + ): + print( + "Warning: linearize_binary_proximal_terms cannot be used " + "with the FWPH algorithm. Ignoring..." + ) + self.options["linearize_binary_proximal_terms"] = False # 3b. Check that the user did not specify the linearization of all # proximal terms (FWPH QPs should be QPs) - if ('linearize_proximal_terms' in self.options - and self.options['linearize_proximal_terms']): - print('Warning: linearize_proximal_terms cannot be used ' - 'with the FWPH algorithm. Ignoring...') - self.options['linearize_proximal_terms'] = False + if ( + "linearize_proximal_terms" in self.options + and self.options["linearize_proximal_terms"] + ): + print( + "Warning: linearize_proximal_terms cannot be used " + "with the FWPH algorithm. Ignoring..." + ) + self.options["linearize_proximal_terms"] = False # 4. Provide a time limit of inf if the user did not specify - if ('time_limit' not in self.FW_options.keys()): - self.FW_options['time_limit'] = np.inf + if "time_limit" not in self.FW_options.keys(): + self.FW_options["time_limit"] = np.inf def _output(self, itr, bound, best_bound, diff, secs): - if (self.cylinder_rank == 0 and self.vb): - print('{itr:3d} {bound:12.4f} {best_bound:12.4f} {diff:12.4e} {secs:11.1f}s'.format( - itr=itr, bound=bound, best_bound=best_bound, - diff=diff, secs=secs)) - if (self.cylinder_rank == 0 and 'save_file' in self.FW_options.keys()): - fname = self.FW_options['save_file'] - with open(fname, 'a') as f: - f.write('{itr:d},{bound:.16f},{best_bound:.16f},{diff:.16f},{secs:.16f}\n'.format( - itr=itr, bound=bound, best_bound=best_bound, - diff=diff, secs=secs)) + if self.cylinder_rank == 0 and self.vb: + print( + "{itr:3d} {bound:12.4f} {best_bound:12.4f} {diff:12.4e} {secs:11.1f}s".format( + itr=itr, bound=bound, best_bound=best_bound, diff=diff, secs=secs + ) + ) + if self.cylinder_rank == 0 and "save_file" in self.FW_options.keys(): + fname = self.FW_options["save_file"] + with open(fname, "a") as f: + f.write( + "{itr:d},{bound:.16f},{best_bound:.16f},{diff:.16f},{secs:.16f}\n".format( + itr=itr, + bound=bound, + best_bound=best_bound, + diff=diff, + secs=secs, + ) + ) def _output_header(self): - if (self.cylinder_rank == 0 and self.vb): - print('itr {bound:>12s} {bb:>12s} {cd:>12s} {tm:>12s}'.format( - bound="bound", bb="best bound", cd="conv diff", tm="time")) - if (self.cylinder_rank == 0 and 'save_file' in self.FW_options.keys()): - fname = self.FW_options['save_file'] - with open(fname, 'a') as f: - f.write('{itr:s},{bound:s},{bb:s},{diff:s},{secs:s}\n'.format( - itr="Iteration", bound="Bound", bb="Best bound", - diff="Error", secs="Time(s)")) + if self.cylinder_rank == 0 and self.vb: + print( + "itr {bound:>12s} {bb:>12s} {cd:>12s} {tm:>12s}".format( + bound="bound", bb="best bound", cd="conv diff", tm="time" + ) + ) + if self.cylinder_rank == 0 and "save_file" in self.FW_options.keys(): + fname = self.FW_options["save_file"] + with open(fname, "a") as f: + f.write( + "{itr:s},{bound:s},{bb:s},{diff:s},{secs:s}\n".format( + itr="Iteration", + bound="Bound", + bb="Best bound", + diff="Error", + secs="Time(s)", + ) + ) def save_weights(self, fname): - ''' Save the computed weights to the specified file. - - Notes: - Handles parallelism--only writes one copy of the file. - - Rather "fast-and-loose", in that it doesn't enforce _when_ this - function can be called. - ''' - weights = self._gather_weight_dict(strip_bundle_names=self.bundling) # None if rank != 0 - if (self.cylinder_rank != 0): + """Save the computed weights to the specified file. + + Notes: + Handles parallelism--only writes one copy of the file. + + Rather "fast-and-loose", in that it doesn't enforce _when_ this + function can be called. + """ + weights = self._gather_weight_dict( + strip_bundle_names=self.bundling + ) # None if rank != 0 + if self.cylinder_rank != 0: return - with open(fname, 'w') as f: + with open(fname, "w") as f: for block in weights: - for (scenario_name, wts) in block.items(): - for (var_name, weight_val) in wts.items(): - row = '{sn},{vn},{wv:.16f}\n'.format( - sn=scenario_name, vn=var_name, wv=weight_val) + for scenario_name, wts in block.items(): + for var_name, weight_val in wts.items(): + row = "{sn},{vn},{wv:.16f}\n".format( + sn=scenario_name, vn=var_name, wv=weight_val + ) f.write(row) def save_xbars(self, fname): - ''' Save the computed xbar to the specified file. + """Save the computed xbar to the specified file. - Notes: - Handles parallelism--only writes one copy of the file. + Notes: + Handles parallelism--only writes one copy of the file. - Rather "fast-and-loose", in that it doesn't enforce _when_ this - function can be called. - ''' - if (self.cylinder_rank != 0): + Rather "fast-and-loose", in that it doesn't enforce _when_ this + function can be called. + """ + if self.cylinder_rank != 0: return - xbars = self._get_xbars(strip_bundle_names=self.bundling) # None if rank != 0 - with open(fname, 'w') as f: - for (var_name, xbs) in xbars.items(): - row = '{vn},{vv:.16f}\n'.format(vn=var_name, vv=xbs) + xbars = self._get_xbars(strip_bundle_names=self.bundling) # None if rank != 0 + with open(fname, "w") as f: + for var_name, xbs in xbars.items(): + row = "{vn},{vv:.16f}\n".format(vn=var_name, vv=xbs) f.write(row) def _set_MIP_solver_options(self): - mip_opts = self.FW_options['mip_solver_options'] - if (len(mip_opts) > 0): + mip_opts = self.FW_options["mip_solver_options"] + if len(mip_opts) > 0: for model in self.local_subproblems.values(): - for (key, option) in mip_opts.items(): + for key, option in mip_opts.items(): model._solver_plugin.options[key] = option def _set_QP_objective(self): - ''' Attach dual weights, objective function and solver to each QP. - - QP dual weights are initialized to the MIP dual weights. - ''' + """Attach dual weights, objective function and solver to each QP. + + QP dual weights are initialized to the MIP dual weights. + """ for name, mip in self.local_subproblems.items(): QP = self.local_QP_subproblems[name] @@ -965,50 +1103,56 @@ def _set_QP_objective(self): x_source = QP.x QP._mpisppy_model.W = pyo.Param( - m_source._mpisppy_data.nonant_indices.keys(), mutable=True, initialize=m_source._mpisppy_model.W + m_source._mpisppy_data.nonant_indices.keys(), + mutable=True, + initialize=m_source._mpisppy_model.W, ) # rhos are attached to each scenario, not each bundle (should they be?) - ph_term = pyo.quicksum(( - QP._mpisppy_model.W[nni] * x_source[nni] + - (m_source._mpisppy_model.rho[nni] / 2.) * (x_source[nni] - m_source._mpisppy_model.xbars[nni]) * (x_source[nni] - m_source._mpisppy_model.xbars[nni]) - for nni in m_source._mpisppy_data.nonant_indices - )) + ph_term = pyo.quicksum( + ( + QP._mpisppy_model.W[nni] * x_source[nni] + + (m_source._mpisppy_model.rho[nni] / 2.0) + * (x_source[nni] - m_source._mpisppy_model.xbars[nni]) + * (x_source[nni] - m_source._mpisppy_model.xbars[nni]) + for nni in m_source._mpisppy_data.nonant_indices + ) + ) if obj.is_minimizing(): - QP.obj = pyo.Objective(expr=new+ph_term, sense=pyo.minimize) + QP.obj = pyo.Objective(expr=new + ph_term, sense=pyo.minimize) else: - QP.obj = pyo.Objective(expr=-new+ph_term, sense=pyo.minimize) + QP.obj = pyo.Objective(expr=-new + ph_term, sense=pyo.minimize) - ''' Attach a solver with various options ''' - solver = pyo.SolverFactory(self.FW_options['solver_name']) + """ Attach a solver with various options """ + solver = pyo.SolverFactory(self.FW_options["solver_name"]) if sputils.is_persistent(solver): solver.set_instance(QP) - if 'qp_solver_options' in self.FW_options: - qp_opts = self.FW_options['qp_solver_options'] + if "qp_solver_options" in self.FW_options: + qp_opts = self.FW_options["qp_solver_options"] if qp_opts: - for (key, option) in qp_opts.items(): + for key, option in qp_opts.items(): solver.options[key] = option self.local_QP_subproblems[name]._QP_solver_plugin = solver def _swap_nonant_vars(self): - ''' Change the pointers in - scenario._mpisppy_node_list[i].nonant_vardata_list - to point to the QP variables, rather than the MIP variables. - - Notes: - When computing xBar and updating the weights in the outer - iteration, the values of the x variables are pulled from - scenario._mpisppy_node_list[i].nonant_vardata_list. In the FWPH - algorithm, xBar should be computed using the QP values, not the - MIP values (like in normal PH). - - Reruns SPBase._attach_nonant_indices so that the scenario - _nonant_indices dictionary has the correct variable pointers - - Updates nonant_vardata_list but NOT nonant_list. - ''' - for (name, model) in self.local_subproblems.items(): + """Change the pointers in + scenario._mpisppy_node_list[i].nonant_vardata_list + to point to the QP variables, rather than the MIP variables. + + Notes: + When computing xBar and updating the weights in the outer + iteration, the values of the x variables are pulled from + scenario._mpisppy_node_list[i].nonant_vardata_list. In the FWPH + algorithm, xBar should be computed using the QP values, not the + MIP values (like in normal PH). + + Reruns SPBase._attach_nonant_indices so that the scenario + _nonant_indices dictionary has the correct variable pointers + + Updates nonant_vardata_list but NOT nonant_list. + """ + for name, model in self.local_subproblems.items(): scens = model.scen_list if self.bundling else [name] for scenario_name in scens: scenario = self.local_scenarios[scenario_name] @@ -1016,33 +1160,36 @@ def _swap_nonant_vars(self): node_list = scenario._mpisppy_node_list for node in node_list: node.nonant_vardata_list = [ - self.local_QP_subproblems[name].xr[node.name,i] - if self.bundling else - self.local_QP_subproblems[name].x[node.name,i] - for i in range(num_nonant_vars[node.name])] + self.local_QP_subproblems[name].xr[node.name, i] + if self.bundling + else self.local_QP_subproblems[name].x[node.name, i] + for i in range(num_nonant_vars[node.name]) + ] self._attach_nonant_indices() def _swap_nonant_vars_back(self): - ''' Swap variables back, in case they're needed somewhere else. - ''' - for (name, model) in self.local_subproblems.items(): - if (self.bundling): + """Swap variables back, in case they're needed somewhere else.""" + for name, model in self.local_subproblems.items(): + if self.bundling: EF = self.local_subproblems[name] for scenario_name in EF.scen_list: scenario = self.local_scenarios[scenario_name] num_nonant_vars = scenario._mpisppy_data.nlens for node in scenario._mpisppy_node_list: node.nonant_vardata_list = [ - EF.nonant_vars[scenario_name,node.name,ix] - for ix in range(num_nonant_vars[node.name])] + EF.nonant_vars[scenario_name, node.name, ix] + for ix in range(num_nonant_vars[node.name]) + ] else: scenario = self.local_scenarios[name] num_nonant_vars = scenario._mpisppy_data.nlens for node in scenario._mpisppy_node_list: node.nonant_vardata_list = [ - scenario.nonant_vars[node.name,ix] - for ix in range(num_nonant_vars[node.name])] + scenario.nonant_vars[node.name, ix] + for ix in range(num_nonant_vars[node.name]) + ] self._attach_nonant_indices() -if __name__=='__main__': - print('fwph.py has no main()') + +if __name__ == "__main__": + print("fwph.py has no main()") diff --git a/mpisppy/generic_cylinders.py b/mpisppy/generic_cylinders.py index 95200edbc..16e1257af 100644 --- a/mpisppy/generic_cylinders.py +++ b/mpisppy/generic_cylinders.py @@ -26,24 +26,32 @@ import mpisppy.utils.solver_spec as solver_spec from mpisppy import global_toc + def _parse_args(m): # m is the model file module cfg = config.Config() - cfg.add_to_config(name="module_name", - description="Name of the file that has the scenario creator, etc.", - domain=str, - default=None, - argparse=True) - assert hasattr(m, "inparser_adder"), "The model file must have an inparser_adder function" - cfg.add_to_config(name="solution_base_name", - description="The string used for a directory of ouput along with a csv and an npv file (default None, which means no soltion output)", - domain=str, - default=None) - cfg.add_to_config(name="run_async", - description="Use APH instead of PH (default False)", - domain=bool, - default=False) - + cfg.add_to_config( + name="module_name", + description="Name of the file that has the scenario creator, etc.", + domain=str, + default=None, + argparse=True, + ) + assert hasattr( + m, "inparser_adder" + ), "The model file must have an inparser_adder function" + cfg.add_to_config( + name="solution_base_name", + description="The string used for a directory of ouput along with a csv and an npv file (default None, which means no soltion output)", + domain=str, + default=None, + ) + cfg.add_to_config( + name="run_async", + description="Use APH instead of PH (default False)", + domain=bool, + default=False, + ) m.inparser_adder(cfg) # many models, e.g., farmer, need num_scens_required @@ -56,8 +64,8 @@ def _parse_args(m): cfg.two_sided_args() cfg.ph_args() cfg.aph_args() - cfg.fixer_args() - cfg.gapper_args() + cfg.fixer_args() + cfg.gapper_args() cfg.fwph_args() cfg.lagrangian_args() cfg.ph_ob_args() @@ -74,15 +82,18 @@ def _parse_args(m): cfg.parse_command_line(f"mpi-sppy for {cfg.module_name}") return cfg -def _name_lists(module, cfg): +def _name_lists(module, cfg): # Note: high level code like this assumes there are branching factors for # multi-stage problems. For other trees, you will need lower-level code if cfg.get("branching_factors") is not None: - all_nodenames = sputils.create_nodenames_from_branching_factors(\ - cfg.branching_factors) + all_nodenames = sputils.create_nodenames_from_branching_factors( + cfg.branching_factors + ) num_scens = np.prod(cfg.branching_factors) - assert not cfg.xhatshuffle or cfg.get("stage2EFsolvern") is not None, "For now, stage2EFsolvern is required for multistage xhat" + assert ( + not cfg.xhatshuffle or cfg.get("stage2EFsolvern") is not None + ), "For now, stage2EFsolvern is required for multistage xhat" else: all_nodenames = None @@ -93,45 +104,53 @@ def _name_lists(module, cfg): return all_scenario_names, all_nodenames -#========== -def _do_decomp(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_denouement): - rho_setter = module._rho_setter if hasattr(module, '_rho_setter') else None +# ========== +def _do_decomp( + module, cfg, scenario_creator, scenario_creator_kwargs, scenario_denouement +): + rho_setter = module._rho_setter if hasattr(module, "_rho_setter") else None if cfg.default_rho is None and rho_setter is None: - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) - if False: # maybe later... cfg.use_norm_rho_converger: + if False: # maybe later... cfg.use_norm_rho_converger: if not cfg.use_norm_rho_updater: - raise RuntimeError("--use-norm-rho-converger requires --use-norm-rho-updater") + raise RuntimeError( + "--use-norm-rho-converger requires --use-norm-rho-updater" + ) else: ph_converger = NormRhoConverger - elif False: # maybe later... cfg.primal_dual_converger: + elif False: # maybe later... cfg.primal_dual_converger: ph_converger = PrimalDualConverger else: ph_converger = None - all_scenario_names, all_nodenames = _name_lists(module, cfg) + all_scenario_names, all_nodenames = _name_lists(module, cfg) # Things needed for vanilla cylinders beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = rho_setter, - all_nodenames = all_nodenames, - ) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=rho_setter, + all_nodenames=all_nodenames, + ) else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - ph_converger=ph_converger, - rho_setter = rho_setter, - all_nodenames = all_nodenames, - ) - + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=rho_setter, + all_nodenames=all_nodenames, + ) + # Extend and/or correct the vanilla dictionary ext_classes = list() # TBD: add cross_scenario_cuts, which also needs a cylinder @@ -142,11 +161,13 @@ def _do_decomp(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_ mipgapdict = {int(i): din[i] for i in din} hub_dict["opt_kwargs"]["options"]["gapperoptions"] = { "verbose": cfg.verbose, - "mipgapdict": mipgapdict + "mipgapdict": mipgapdict, } - + if cfg.fixer: # cfg_vanilla takes care of the fixer_tol? - assert hasattr(module, "id_fix_list_fct"), "id_fix_list_fct required for --fixer" + assert hasattr( + module, "id_fix_list_fct" + ), "id_fix_list_fct required for --fixer" ext_classes.append(Fixer) hub_dict["opt_kwargs"]["options"]["fixeroptions"] = { "verbose": cfg.verbose, @@ -155,65 +176,73 @@ def _do_decomp(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_ } if cfg.grad_rho_setter: ext_classes.append(Gradient_extension) - hub_dict['opt_kwargs']['options']['gradient_extension_options'] = {'cfg': cfg} - + hub_dict["opt_kwargs"]["options"]["gradient_extension_options"] = {"cfg": cfg} + if len(ext_classes) != 0: - hub_dict['opt_kwargs']['extensions'] = MultiExtension - hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes" : ext_classes} + hub_dict["opt_kwargs"]["extensions"] = MultiExtension + hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes": ext_classes} if cfg.primal_dual_converger: - hub_dict['opt_kwargs']['options']\ - ['primal_dual_converger_options'] = { - 'verbose': True, - 'tol': cfg.primal_dual_converger_tol, - 'tracking': True} + hub_dict["opt_kwargs"]["options"]["primal_dual_converger_options"] = { + "verbose": True, + "tol": cfg.primal_dual_converger_tol, + "tracking": True, + } ## norm rho adaptive rho (not the gradient version) - #if cfg.use_norm_rho_updater: + # if cfg.use_norm_rho_updater: # extension_adder(hub_dict, NormRhoUpdater) # hub_dict['opt_kwargs']['options']['norm_rho_options'] = {'verbose': True} # FWPH spoke if cfg.fwph: - fw_spoke = vanilla.fwph_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames, - rho_setter=rho_setter, - ) + fw_spoke = vanilla.fwph_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + rho_setter=rho_setter, + ) # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter, - all_nodenames = all_nodenames, - ) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + all_nodenames=all_nodenames, + ) # ph outer bounder spoke if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter, - all_nodenames = all_nodenames, - ) + ph_ob_spoke = vanilla.ph_ob_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + all_nodenames=all_nodenames, + ) # subgradient outer bound spoke if cfg.subgradient: - subgradient_spoke = vanilla.subgradient_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter, - all_nodenames = all_nodenames, - ) + subgradient_spoke = vanilla.subgradient_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + all_nodenames=all_nodenames, + ) # xhat shuffle bound spoke if cfg.xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + ) if cfg.xhatxbar: - xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames) + xhatxbar_spoke = vanilla.xhatxbar_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + ) # reduced cost fixer options setup if cfg.reduced_costs: @@ -221,16 +250,19 @@ def _do_decomp(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_ # reduced cost fixer if cfg.reduced_costs: - reduced_costs_spoke = vanilla.reduced_costs_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None) + reduced_costs_spoke = vanilla.reduced_costs_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=None + ) - - # special code for multi-stage (e.g., hydro) + # special code for multi-stage (e.g., hydro) if cfg.get("stage2EFsolvern") is not None: assert cfg.get("xhatshuffle"), "xhatshuffle is required for stage2EFsolvern" - xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg["stage2EFsolvern"] - xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg["branching_factors"] + xhatshuffle_spoke["opt_kwargs"]["options"]["stage2EFsolvern"] = cfg[ + "stage2EFsolvern" + ] + xhatshuffle_spoke["opt_kwargs"]["options"]["branching_factors"] = cfg[ + "branching_factors" + ] list_of_spoke_dict = list() if cfg.fwph: @@ -247,58 +279,66 @@ def _do_decomp(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_ list_of_spoke_dict.append(xhatxbar_spoke) if cfg.reduced_costs: list_of_spoke_dict.append(reduced_costs_spoke) - wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() if cfg.solution_base_name is not None: - wheel.write_first_stage_solution(f'{cfg.solution_base_name}.csv') - wheel.write_first_stage_solution(f'{cfg.solution_base_name}.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution(f'{cfg.solution_base_name}_soldir') + wheel.write_first_stage_solution(f"{cfg.solution_base_name}.csv") + wheel.write_first_stage_solution( + f"{cfg.solution_base_name}.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution(f"{cfg.solution_base_name}_soldir") global_toc("Wrote solution data.") -#========== +# ========== def _do_EF(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_denouement): - all_scenario_names, _ = _name_lists(module, cfg) ef = sputils.create_EF( all_scenario_names, module.scenario_creator, scenario_creator_kwargs=module.kw_creator(cfg), ) - + sroot, solver_name, solver_options = solver_spec.solver_specification(cfg, "EF") solver = pyo.SolverFactory(solver_name) if solver_options is not None: # We probably could just assign the dictionary in one line... - for option_key,option_value in solver_options.items(): + for option_key, option_value in solver_options.items(): solver.options[option_key] = option_value - if 'persistent' in solver_name: + if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) results = solver.solve(tee=cfg.tee_EF) else: - results = solver.solve(ef, tee=cfg.tee_EF, symbolic_solver_labels=True,) + results = solver.solve( + ef, + tee=cfg.tee_EF, + symbolic_solver_labels=True, + ) if not pyo.check_optimal_termination(results): print("Warning: non-optimal solver termination") global_toc(f"EF objective: {pyo.value(ef.EF_Obj)}") if cfg.solution_base_name is not None: - sputils.ef_nonants_csv(ef, f'{cfg.solution_base_name}.csv') - sputils.ef_ROOT_nonants_npy_serializer(ef, f'{cfg.solution_base_name}.npy') - sputils.write_ef_tree_solution(ef,f'{cfg.solution_base_name}_soldir') + sputils.ef_nonants_csv(ef, f"{cfg.solution_base_name}.csv") + sputils.ef_ROOT_nonants_npy_serializer(ef, f"{cfg.solution_base_name}.npy") + sputils.write_ef_tree_solution(ef, f"{cfg.solution_base_name}_soldir") global_toc("Wrote EF solution data.") + def _model_fname(): def _bad_news(): - raise RuntimeError("Unable to parse module name from first argument" - " (for module foo.py, we want, e.g.\n" - "--module-name foo\n" - "or\n" - "--module-name=foo") + raise RuntimeError( + "Unable to parse module name from first argument" + " (for module foo.py, we want, e.g.\n" + "--module-name foo\n" + "or\n" + "--module-name=foo" + ) + def _len_check(needed_length): if len(sys.argv) <= needed_length: _bad_news() @@ -306,7 +346,9 @@ def _len_check(needed_length): return True _len_check(1) - assert sys.argv[1][:13] == "--module-name", f"The first command argument must start with'--module-name' but you gave {sys.argv[1]}" + assert ( + sys.argv[1][:13] == "--module-name" + ), f"The first command argument must start with'--module-name' but you gave {sys.argv[1]}" if sys.argv[1] == "--module-name": _len_check(2) return sys.argv[2] @@ -315,22 +357,24 @@ def _len_check(needed_length): if len(parts) != 2: _bad_news() return parts[1] - - - + + ########################################################################## if __name__ == "__main__": if len(sys.argv) == 1: print("The python model file module name (no .py) must be given.") - print("usage, e.g.: python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --help") + print( + "usage, e.g.: python -m mpi4py ../../mpisppy/generic_cylinders.py --module-name farmer --help" + ) quit() model_fname = _model_fname() # TBD: when agnostic is merged, use the function and delete the code lines # module = sputils.module_name_to_module(model_fname) - # TBD: do the sys.path.append trick in sputils + # TBD: do the sys.path.append trick in sputils import importlib import inspect + if inspect.ismodule(model_fname): module = model_fname else: @@ -338,15 +382,21 @@ def _len_check(needed_length): fname = os.path.basename(model_fname) sys.path.append(dpath) module = importlib.import_module(fname) - + cfg = _parse_args(module) scenario_creator = module.scenario_creator - scenario_creator_kwargs = module.kw_creator(cfg) - assert hasattr(module, "scenario_denouement"), "The model file must have a scenario_denouement function" + scenario_creator_kwargs = module.kw_creator(cfg) + assert hasattr( + module, "scenario_denouement" + ), "The model file must have a scenario_denouement function" scenario_denouement = module.scenario_denouement if cfg.EF: - _do_EF(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_denouement) + _do_EF( + module, cfg, scenario_creator, scenario_creator_kwargs, scenario_denouement + ) else: - _do_decomp(module, cfg, scenario_creator, scenario_creator_kwargs, scenario_denouement) + _do_decomp( + module, cfg, scenario_creator, scenario_creator_kwargs, scenario_denouement + ) diff --git a/mpisppy/log.py b/mpisppy/log.py index bd1398dce..4935d1c42 100644 --- a/mpisppy/log.py +++ b/mpisppy/log.py @@ -13,22 +13,22 @@ Examples ======== -To use the logger in your code, add the following +To use the logger in your code, add the following after your import .. code-block:: python - + import logging logger = logging.getLogger('mpisppy.path.to.module') Then, you can use the standard logging functions .. code-block:: python - + logger.debug('message') logger.info('message') logger.warning('message') logger.error('message') logger.critical('message') - + Note that by default, any message that has a logging level of warning or higher (warning, error, critical) will be logged. @@ -42,12 +42,14 @@ logging.error("Exception occurred", exc_info=True) """ + import sys import logging -log_format = '%(message)s' + +log_format = "%(message)s" # configure the root logger for mpisppy -logger = logging.getLogger('mpisppy') +logger = logging.getLogger("mpisppy") logger.setLevel(logging.INFO) console_handler = logging.StreamHandler(sys.stdout) @@ -55,10 +57,11 @@ console_handler.setFormatter(fmtr) logger.addHandler(console_handler) -def setup_logger(name, out, level=logging.DEBUG, mode='w', fmt=None): - ''' Set up a custom logger quickly - https://stackoverflow.com/a/17037016/8516804 - ''' + +def setup_logger(name, out, level=logging.DEBUG, mode="w", fmt=None): + """Set up a custom logger quickly + https://stackoverflow.com/a/17037016/8516804 + """ if fmt is None: fmt = "(%(asctime)s) %(message)s" log = logging.getLogger(name) @@ -67,7 +70,7 @@ def setup_logger(name, out, level=logging.DEBUG, mode='w', fmt=None): formatter = logging.Formatter(fmt) if out in (sys.stdout, sys.stderr): handler = logging.StreamHandler(out) - else: # out is a filename - handler = logging.FileHandler(out, mode=mode) + else: # out is a filename + handler = logging.FileHandler(out, mode=mode) handler.setFormatter(formatter) log.addHandler(handler) diff --git a/mpisppy/opt/aph.py b/mpisppy/opt/aph.py index 9539c9c5d..6f09a8660 100644 --- a/mpisppy/opt/aph.py +++ b/mpisppy/opt/aph.py @@ -22,9 +22,10 @@ fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() -logging.basicConfig(level=logging.CRITICAL, # level=logging.CRITICAL, DEBUG - format='(%(threadName)-10s) %(message)s', - ) +logging.basicConfig( + level=logging.CRITICAL, # level=logging.CRITICAL, DEBUG + format="(%(threadName)-10s) %(message)s", +) EPSILON = 1e-5 # for, e.g., fractions of ranks @@ -44,6 +45,7 @@ k is often used as the "key" (i.e., scenario name) for the local scenarios) """ + class APH(ph_base.PHBase): """ Args: @@ -56,41 +58,38 @@ class APH(ph_base.PHBase): `scenario_creator`. Attributes (partial list): - local_scenarios (dict of scenario objects): concrete models with + local_scenarios (dict of scenario objects): concrete models with extra data, key is name comms (dict): keys are node names values are comm objects. scenario_name_to_rank (dict): all scenario names - local_scenario_names (list): names of locals + local_scenario_names (list): names of locals current_solver_options (dict): from options; callbacks might change synchronizer (object): asynch listener management scenario_creator_kwargs (dict): keyword arguments passed to `scenario_creator`. """ + def setup_Lens(self): - """ We need to know the lengths of c-style vectors for listener_util - """ - self.Lens = collections.OrderedDict({"FirstReduce": {}, - "SecondReduce": {}}) + """We need to know the lengths of c-style vectors for listener_util""" + self.Lens = collections.OrderedDict({"FirstReduce": {}, "SecondReduce": {}}) for sname, scenario in self.local_scenarios.items(): for node in scenario._mpisppy_node_list: - self.Lens["FirstReduce"][node.name] \ - = 3 * len(node.nonant_vardata_list) - self.Lens["SecondReduce"][node.name] = 0 # only use root? + self.Lens["FirstReduce"][node.name] = 3 * len(node.nonant_vardata_list) + self.Lens["SecondReduce"][node.name] = 0 # only use root? self.Lens["FirstReduce"]["ROOT"] += self.n_proc # for time of update # tau, phi, pusqnorm, pvsqnorm, pwsqnorm, pzsqnorm, secs self.Lens["SecondReduce"]["ROOT"] += 6 + self.n_proc - - #============================ + # ============================ def __init__( self, options, all_scenario_names, scenario_creator, scenario_denouement=None, - all_nodenames=None, + all_nodenames=None, mpicomm=None, scenario_creator_kwargs=None, extensions=None, @@ -114,12 +113,12 @@ def __init__( variable_probability=variable_probability, ) - self.phis = {} # phi values, indexed by scenario names + self.phis = {} # phi values, indexed by scenario names self.tau_summand = 0 # place holder for iteration 1 reduce self.phi_summand = 0 self.global_tau = 0 self.global_phi = 0 - self.global_pusqnorm = 0 # ... may be out of date... + self.global_pusqnorm = 0 # ... may be out of date... self.global_pvsqnorm = 0 self.global_pwsqnorm = 0 self.global_pzsqnorm = 0 @@ -128,10 +127,10 @@ def __init__( self.conv = None self.use_lag = options.get("APHuse_lag", False) self.APHgamma = options.get("APHgamma", 1) - assert(self.APHgamma > 0) + assert self.APHgamma > 0 self.use_dynamic_gamma = options.get("use_dynamic_gamma", False) if self.use_dynamic_gamma: - print('**** dynamic gamma is True so watch out!') + print("**** dynamic gamma is True so watch out!") self.shelf_life = options.get("shelf_life", 99) # 99 is intended to be large self.round_robin_dispatch = options.get("round_robin_dispatch", False) # TBD: use a property decorator for nu to enforce 0 < nu < 2 @@ -141,34 +140,39 @@ def __init__( # Note June, 2023: Hack for nu self.use_hack_for_nu = options.get("use_hack_for_nu", False) if self.use_hack_for_nu: - print('**** you are using the hack for nu so be careful!') + print("**** you are using the hack for nu so be careful!") assert 0 < self.nu and self.nu < 2 - self.dispatchrecord = dict() # for local subproblems sname: (iter, phi) + self.dispatchrecord = dict() # for local subproblems sname: (iter, phi) # plot_trace_prefix or filename will indicate output is needed - self.plot_trace_prefix = options.get("APHplot_trace_prefix") if self.cylinder_rank == 0 else None - self.conv_trace_filename = None if self.plot_trace_prefix is None else f"{self.plot_trace_prefix}_dyngam_{self.use_dynamic_gamma}"\ - +f"_hack_nu_{self.use_hack_for_nu}_nu_{self.nu}"\ - +".csv" + self.plot_trace_prefix = ( + options.get("APHplot_trace_prefix") if self.cylinder_rank == 0 else None + ) + self.conv_trace_filename = ( + None + if self.plot_trace_prefix is None + else f"{self.plot_trace_prefix}_dyngam_{self.use_dynamic_gamma}" + + f"_hack_nu_{self.use_hack_for_nu}_nu_{self.nu}" + + ".csv" + ) if self.plot_trace_prefix is not None: with open(self.conv_trace_filename, "w") as fil: fil.write("iter,conv,gamma,nu,theta,punorm,pvnorm\n") - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): with open(f"trace_{k}_{self.conv_trace_filename}", "w") as fil: fil.write("iter,obj fct") - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): fil.write(f",{xvar.name} x,{xvar.name} z, {xvar.name} w") fil.write("\n") - #============================ + # ============================ def setup_dispatchrecord(self): # Start with a small number for iteration to randomize fist dispatch. for sname in self.local_subproblems: r = np.random.rand() - self.dispatchrecord[sname] = [(r,0)] - + self.dispatchrecord[sname] = [(r, 0)] - #============================ + # ============================ def Update_y(self, dlist, verbose): # compute the new y (or set to zero if it is iter 1) # iter 1 is iter 0 post-solves when seen from the paper @@ -176,53 +180,63 @@ def Update_y(self, dlist, verbose): slist = [d[0] for d in dlist] # just the names if self._PHIter != 1: - for k,s in self.local_scenarios.items(): - if (not self.bundling and k in slist) \ - or (self.bundling and s._mpisppy_data.bundlename in slist): - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): + for k, s in self.local_scenarios.items(): + if (not self.bundling and k in slist) or ( + self.bundling and s._mpisppy_data.bundlename in slist + ): + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): if not self.use_lag: - z_touse = s._mpisppy_model.z[(ndn,i)]._value - W_touse = pyo.value(s._mpisppy_model.W[(ndn,i)]) + z_touse = s._mpisppy_model.z[(ndn, i)]._value + W_touse = pyo.value(s._mpisppy_model.W[(ndn, i)]) else: - z_touse = s._mpisppy_model.z_foropt[(ndn,i)]._value - W_touse = pyo.value(s._mpisppy_model.W_foropt[(ndn,i)]) + z_touse = s._mpisppy_model.z_foropt[(ndn, i)]._value + W_touse = pyo.value(s._mpisppy_model.W_foropt[(ndn, i)]) # pyo.value vs. _value ?? # NOTE: W_touse and z_touse are coming from # the previous iteration - s._mpisppy_model.y[(ndn,i)]._value = W_touse \ - + pyo.value(s._mpisppy_model.rho[(ndn,i)]) \ - * (xvar._value - z_touse) #Eq.25 + s._mpisppy_model.y[(ndn, i)]._value = W_touse + pyo.value( + s._mpisppy_model.rho[(ndn, i)] + ) * (xvar._value - z_touse) # Eq.25 if verbose and self.cylinder_rank == 0: - print ("node, scen, var, y", ndn, k, - self.cylinder_rank, xvar.name, - pyo.value(s._mpisppy_model.y[(ndn,i)])) + print( + "node, scen, var, y", + ndn, + k, + self.cylinder_rank, + xvar.name, + pyo.value(s._mpisppy_model.y[(ndn, i)]), + ) # Special code for variable probabilities to mask y; rarely used. if s._mpisppy_data.has_variable_probability: - s._mpisppy_model.y[(ndn,i)]._value *= s._mpisppy_data.prob0_mask[ndn][i] + s._mpisppy_model.y[ + (ndn, i) + ]._value *= s._mpisppy_data.prob0_mask[ndn][i] else: - for k,s in self.local_scenarios.items(): - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): - s._mpisppy_model.y[(ndn,i)]._value = 0 + for k, s in self.local_scenarios.items(): + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): + s._mpisppy_model.y[(ndn, i)]._value = 0 if verbose and self.cylinder_rank == 0: - print ("All y=0 for iter1") + print("All y=0 for iter1") - - - #============================ + # ============================ def compute_phis_summand(self): # update phis, return summand (variable_probability is already resolved) summand = 0.0 - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): self.phis[k] = 0.0 - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): # Step 16, phi - self.phis[k] += (pyo.value(s._mpisppy_model.z[(ndn,i)]) - xvar._value) \ - *(pyo.value(s._mpisppy_model.W[(ndn,i)]) - pyo.value(s._mpisppy_model.y[(ndn,i)])) + self.phis[k] += ( + pyo.value(s._mpisppy_model.z[(ndn, i)]) - xvar._value + ) * ( + pyo.value(s._mpisppy_model.W[(ndn, i)]) + - pyo.value(s._mpisppy_model.y[(ndn, i)]) + ) self.phis[k] *= pyo.value(s._mpisppy_probability) summand += self.phis[k] return summand - #============================***********========= + # ============================***********========= def _calculate_APHgamma(self, synchro): """This function calculates a gamma value that accounts for the value @@ -238,10 +252,10 @@ def _calculate_APHgamma(self, synchro): using it when they are zero """ - + uk = self.global_pusqnorm vk = self.global_pvsqnorm - + # Note June, 2023: We are waiting until we get values greater # than 0 for the norms. Iteration 3 is arbitrary if self._PHIter <= 3: @@ -255,27 +269,26 @@ def _calculate_APHgamma(self, synchro): uk1 = self.uk1 vk1 = self.vk1 # Note June, 2023: vk1 and uk1 should be going down - v_term = ((vk1 - vk) / vk) # use vk1 in denominator? - u_term = ((uk1 - uk) / uk) # use uk1 in denominator? + v_term = (vk1 - vk) / vk # use vk1 in denominator? + u_term = (uk1 - uk) / uk # use uk1 in denominator? if v_term <= 0 or u_term <= 0: # print('v_term=', v_term, 'u_term=', u_term, 'vk1=', vk1, 'vk=', vk, 'uk1=', uk1, 'uk=', uk) gamma = self.APHgamma else: gamma = ( - v_term / u_term # gamma value gets + v_term / u_term # gamma value gets # vk / uk # (vk / zk) / (uk / wk) # use scaled v and u ) self.uk1 = uk self.vk1 = vk - + self.APHgamma = gamma return self.APHgamma - def listener_side_gig(self, synchro): - """ Called by the listener after the first reduce. + """Called by the listener after the first reduce. First, see if there are enough xbar contributions to proceed. If there are, then compute tau and phi. NOTE: it gets the synchronizer as an arg but self already has it. @@ -285,7 +298,7 @@ def listener_side_gig(self, synchro): Massive side-effects: e.g., update xbar etc. Iter 1 (iter 0) in the paper is special: the v := u, which is a little - complicated because we only compute y-bar. + complicated because we only compute y-bar. """ # This does unsafe things, so it can only be called when the worker is @@ -293,100 +306,114 @@ def listener_side_gig(self, synchro): verbose = self.options["verbose"] # See if we have enough xbars to proceed (need not be perfect) - self.synchronizer._unsafe_get_global_data("FirstReduce", - self.node_concats) - self.synchronizer._unsafe_get_global_data("SecondReduce", - self.node_concats) + self.synchronizer._unsafe_get_global_data("FirstReduce", self.node_concats) + self.synchronizer._unsafe_get_global_data("SecondReduce", self.node_concats) # last_phi_tau_update_time # (the last time this side-gig did the calculations) # We are going to see how many rank's xbars have been computed # since then. If enough (determined by frac_needed), the do the calcs. # The six is because the reduced data (e.g. phi) are in the first 6. - lptut = np.max(self.node_concats["SecondReduce"]["ROOT"][6:]) + lptut = np.max(self.node_concats["SecondReduce"]["ROOT"][6:]) - logging.debug(' +++ debug enter listener_side_gig on cylinder_rank {} last phi update {}'\ - .format(self.cylinder_rank, lptut)) + logging.debug( + " +++ debug enter listener_side_gig on cylinder_rank {} last phi update {}".format( + self.cylinder_rank, lptut + ) + ) - xbarin = 0 # count ranks (close enough to be a proxy for scenarios) + xbarin = 0 # count ranks (close enough to be a proxy for scenarios) for cr in range(self.n_proc): backdist = self.n_proc - cr # how far back into the vector ##logging.debug(' *side_gig* cr {} on rank {} time {}'.\ ## format(cr, self.cylinder_rank, ## self.node_concats["FirstReduce"]["ROOT"][-backdist])) - if self.node_concats["FirstReduce"]["ROOT"][-backdist] \ - > lptut: + if self.node_concats["FirstReduce"]["ROOT"][-backdist] > lptut: xbarin += 1 - fracin = xbarin/self.n_proc + EPSILON - if fracin < self.options["async_frac_needed"]: + fracin = xbarin / self.n_proc + EPSILON + if fracin < self.options["async_frac_needed"]: # We have not really "done" the side gig. - logging.debug(' ^ debug not good to go listener_side_gig on cylinder_rank {}; xbarin={}; fracin={}'\ - .format(self.cylinder_rank, xbarin, fracin)) + logging.debug( + " ^ debug not good to go listener_side_gig on cylinder_rank {}; xbarin={}; fracin={}".format( + self.cylinder_rank, xbarin, fracin + ) + ) return # If we are still here, we have enough to do the calculations - logging.debug('^^^ debug good to go listener_side_gig on cylinder_rank {}; xbarin={}'\ - .format(self.cylinder_rank, xbarin)) + logging.debug( + "^^^ debug good to go listener_side_gig on cylinder_rank {}; xbarin={}".format( + self.cylinder_rank, xbarin + ) + ) if verbose and self.cylinder_rank == 0: - print ("(%d)" % xbarin) - + print("(%d)" % xbarin) + # set the xbar, xsqbar, and ybar in all the scenarios - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): nlens = s._mpisppy_data.nlens - for (ndn,i) in s._mpisppy_data.nonant_indices: - s._mpisppy_model.xbars[(ndn,i)]._value \ - = self.node_concats["FirstReduce"][ndn][i] - s._mpisppy_model.xsqbars[(ndn,i)]._value \ - = self.node_concats["FirstReduce"][ndn][nlens[ndn]+i] - s._mpisppy_model.ybars[(ndn,i)]._value \ - = self.node_concats["FirstReduce"][ndn][2*nlens[ndn]+i] + for ndn, i in s._mpisppy_data.nonant_indices: + s._mpisppy_model.xbars[(ndn, i)]._value = self.node_concats[ + "FirstReduce" + ][ndn][i] + s._mpisppy_model.xsqbars[(ndn, i)]._value = self.node_concats[ + "FirstReduce" + ][ndn][nlens[ndn] + i] + s._mpisppy_model.ybars[(ndn, i)]._value = self.node_concats[ + "FirstReduce" + ][ndn][2 * nlens[ndn] + i] if verbose and self.cylinder_rank == 0: - print ("rank, scen, node, var, xbar:", - self.cylinder_rank,k,ndn,s._mpisppy_data.nonant_indices[ndn,i].name, - pyo.value(s._mpisppy_model.xbars[(ndn,i)])) + print( + "rank, scen, node, var, xbar:", + self.cylinder_rank, + k, + ndn, + s._mpisppy_data.nonant_indices[ndn, i].name, + pyo.value(s._mpisppy_model.xbars[(ndn, i)]), + ) # There is one tau_summand for the rank; global_tau is out of date when # we get here because we could not compute it until the averages were. # vk is just going to be ybar directly if not hasattr(self, "uk"): # indexed by sname and nonant index [sname][(ndn,i)] - self.uk = {sname: dict() for sname in self.local_scenarios.keys()} + self.uk = {sname: dict() for sname in self.local_scenarios.keys()} self.local_pusqnorm = 0 # local summand for probability weighted sqnorm self.local_pvsqnorm = 0 new_tau_summand = 0 # for this rank - for sname,s in self.local_scenarios.items(): + for sname, s in self.local_scenarios.items(): scen_usqnorm = 0.0 scen_vsqnorm = 0.0 nlens = s._mpisppy_data.nlens if not s._mpisppy_data.has_variable_probability: - - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): - self.uk[sname][(ndn,i)] = xvar._value \ - - pyo.value(s._mpisppy_model.xbars[(ndn,i)]) # Eq.27 + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): + self.uk[sname][(ndn, i)] = xvar._value - pyo.value( + s._mpisppy_model.xbars[(ndn, i)] + ) # Eq.27 # compute the usqnorm and vsqnorm (squared L2 norms) - scen_usqnorm += (self.uk[sname][(ndn,i)] \ - * self.uk[sname][(ndn,i)]) - scen_vsqnorm += (pyo.value(s._mpisppy_model.ybars[(ndn,i)]) \ - * pyo.value(s._mpisppy_model.ybars[(ndn,i)])) + scen_usqnorm += self.uk[sname][(ndn, i)] * self.uk[sname][(ndn, i)] + scen_vsqnorm += pyo.value( + s._mpisppy_model.ybars[(ndn, i)] + ) * pyo.value(s._mpisppy_model.ybars[(ndn, i)]) else: # In the unlikely event of variable probability, do it # over again - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): if s._mpisppy_data.prob0_mask[ndn][i] != 0: - self.uk[sname][(ndn,i)] = ( - xvar._value \ - - pyo.value(s._mpisppy_model.xbars[(ndn,i)]) # Eq.27 + self.uk[sname][(ndn, i)] = ( + xvar._value + - pyo.value(s._mpisppy_model.xbars[(ndn, i)]) # Eq.27 ) else: - self.uk[sname][(ndn,i)] = 0 + self.uk[sname][(ndn, i)] = 0 # compute the usqnorm and vsqnorm (squared L2 norms) - scen_usqnorm += (self.uk[sname][(ndn,i)] \ - * self.uk[sname][(ndn,i)]) - scen_vsqnorm += (pyo.value(s._mpisppy_model.ybars[(ndn,i)]) \ - * pyo.value(s._mpisppy_model.ybars[(ndn,i)])) - + scen_usqnorm += self.uk[sname][(ndn, i)] * self.uk[sname][(ndn, i)] + scen_vsqnorm += pyo.value( + s._mpisppy_model.ybars[(ndn, i)] + ) * pyo.value(s._mpisppy_model.ybars[(ndn, i)]) + # Note by DLW April 2023: You need to move the probs up for multi-stage if not s._mpisppy_data.has_variable_probability: # NOTE: The p is not true but we avoid changing the @@ -395,31 +422,37 @@ def listener_side_gig(self, synchro): self.local_pusqnorm += scen_usqnorm self.local_pvsqnorm += scen_vsqnorm else: - self.local_pusqnorm += pyo.value(s._mpisppy_probability) * scen_usqnorm # prob first done - self.local_pvsqnorm += pyo.value(s._mpisppy_probability) * scen_vsqnorm # prob first done + self.local_pusqnorm += ( + pyo.value(s._mpisppy_probability) * scen_usqnorm + ) # prob first done + self.local_pvsqnorm += ( + pyo.value(s._mpisppy_probability) * scen_vsqnorm + ) # prob first done if self.use_dynamic_gamma: - gamma = self._calculate_APHgamma(synchro) # update APHgamma - print('dynamic gamma=', gamma, 'i=', i, 'sname=', sname) - + gamma = self._calculate_APHgamma(synchro) # update APHgamma + print("dynamic gamma=", gamma, "i=", i, "sname=", sname) + # I don't think s._mpisppy_dat.has_variable_probability is needed here - new_tau_summand += ( - pyo.value(s._mpisppy_probability) \ - * (scen_usqnorm + scen_vsqnorm/self.APHgamma) + new_tau_summand += pyo.value(s._mpisppy_probability) * ( + scen_usqnorm + scen_vsqnorm / self.APHgamma ) - + # tauk is the expectation of the sum sum of squares; update for this calc - logging.debug(' in side-gig, old global_tau={}'.format(self.global_tau)) - logging.debug(' in side-gig, old summand={}'.format(self.tau_summand)) - logging.debug(' in side-gig, new summand={}'.format(new_tau_summand)) + logging.debug(" in side-gig, old global_tau={}".format(self.global_tau)) + logging.debug(" in side-gig, old summand={}".format(self.tau_summand)) + logging.debug(" in side-gig, new summand={}".format(new_tau_summand)) self.global_tau = self.global_tau - self.tau_summand + new_tau_summand - self.tau_summand = new_tau_summand # make available for the next reduce - logging.debug(' in side-gig, new global_tau={}'.format(self.global_tau)) + self.tau_summand = new_tau_summand # make available for the next reduce + logging.debug(" in side-gig, new global_tau={}".format(self.global_tau)) - # now we can get the local contribution to the phi_sum + # now we can get the local contribution to the phi_sum if self.global_tau <= 0: - logging.debug(' *** Negative tau={} on rank {}'\ - .format(self.global_tau, self.cylinder_rank)) + logging.debug( + " *** Negative tau={} on rank {}".format( + self.global_tau, self.cylinder_rank + ) + ) self.phi_summand = self.compute_phis_summand() # prepare for the reduction that will take place after this side-gig @@ -433,17 +466,16 @@ def listener_side_gig(self, synchro): # we have updated our summands and the listener will do a reduction secs_so_far = time.perf_counter() - self.start_time # Put in a time only for this rank, so the "sum" is really a report - self.local_concats["SecondReduce"]["ROOT"][6+self.cylinder_rank] = secs_so_far + self.local_concats["SecondReduce"]["ROOT"][6 + self.cylinder_rank] = secs_so_far # This is run by the listener, so don't tell the worker you have done # it until you are sure you have. - self.synchronizer._unsafe_put_local_data("SecondReduce", - self.local_concats) + self.synchronizer._unsafe_put_local_data("SecondReduce", self.local_concats) self.synchronizer.enable_side_gig = False # we did it - logging.debug(' exit side_gid on rank {}'.format(self.cylinder_rank)) - - #============================ + logging.debug(" exit side_gid on rank {}".format(self.cylinder_rank)) + + # ============================ def Compute_Averages(self, verbose=False): - """Gather ybar, xbar and x squared bar for each node + """Gather ybar, xbar and x squared bar for each node and also distribute the values back to the scenarios. Compute the tau summand from self and distribute back tauk (tau_k is a scalar and special with respect to synchronizing). @@ -460,110 +492,131 @@ def Compute_Averages(self, verbose=False): """ if not hasattr(self, "local_concats"): - nodenames = [] # avoid repeated work - self.local_concats = {"FirstReduce": {}, # keys are tree node names - "SecondReduce": {}} - self.node_concats = {"FirstReduce": {}, # concat of xbar and xsqbar - "SecondReduce": {}} + nodenames = [] # avoid repeated work + self.local_concats = { + "FirstReduce": {}, # keys are tree node names + "SecondReduce": {}, + } + self.node_concats = { + "FirstReduce": {}, # concat of xbar and xsqbar + "SecondReduce": {}, + } # accumulate & concatenate all local contributions before the reduce # create the c-style storage for the concats - for k,s in self.local_scenarios.items(): - nlens = s._mpisppy_data.nlens + for k, s in self.local_scenarios.items(): + nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: if node.name not in nodenames: ndn = node.name nodenames.append(ndn) mylen = self.Lens["FirstReduce"][ndn] - self.local_concats["FirstReduce"][ndn]\ - = np.zeros(mylen, dtype='d') - self.node_concats["FirstReduce"][ndn]\ - = np.zeros(mylen, dtype='d') + self.local_concats["FirstReduce"][ndn] = np.zeros( + mylen, dtype="d" + ) + self.node_concats["FirstReduce"][ndn] = np.zeros( + mylen, dtype="d" + ) # second reduce is tau and phi mylen = self.Lens["SecondReduce"]["ROOT"] - self.local_concats["SecondReduce"]["ROOT"]\ - = np.zeros(mylen, dtype='d') - self.node_concats["SecondReduce"]["ROOT"]\ - = np.zeros(mylen, dtype='d') - else: # concats are here, just zero them out. + self.local_concats["SecondReduce"]["ROOT"] = np.zeros(mylen, dtype="d") + self.node_concats["SecondReduce"]["ROOT"] = np.zeros(mylen, dtype="d") + else: # concats are here, just zero them out. # We zero them so we can use an accumulator in the next loop and # that seems to be OK. nodenames = [] - for k,s in self.local_scenarios.items(): - nlens = s._mpisppy_data.nlens + for k, s in self.local_scenarios.items(): + nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: if node.name not in nodenames: ndn = node.name nodenames.append(ndn) self.local_concats["FirstReduce"][ndn].fill(0) - self.node_concats["FirstReduce"][ndn].fill(0) + self.node_concats["FirstReduce"][ndn].fill(0) self.local_concats["SecondReduce"]["ROOT"].fill(0) self.node_concats["SecondReduce"]["ROOT"].fill(0) # Compute the locals and concat them for the first reduce, which includes xbar. # We don't need to lock here because the direct buffers are only accessed # by compute_global_data. - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name for i in range(nlens[node.name]): v_value = node.nonant_vardata_list[i]._value - self.local_concats["FirstReduce"][node.name][i] += \ - (s._mpisppy_probability / node.uncond_prob) * v_value - logging.debug(" rank= {} scen={}, i={}, v_value={}".\ - format(global_rank, k, i, v_value)) - self.local_concats["FirstReduce"][node.name][nlens[ndn]+i]\ - += (s._mpisppy_probability / node.uncond_prob) * v_value * v_value - self.local_concats["FirstReduce"][node.name][2*nlens[ndn]+i]\ - += (s._mpisppy_probability / node.uncond_prob) \ - * pyo.value(s._mpisppy_model.y[(node.name,i)]) + self.local_concats["FirstReduce"][node.name][i] += ( + s._mpisppy_probability / node.uncond_prob + ) * v_value + logging.debug( + " rank= {} scen={}, i={}, v_value={}".format( + global_rank, k, i, v_value + ) + ) + self.local_concats["FirstReduce"][node.name][nlens[ndn] + i] += ( + (s._mpisppy_probability / node.uncond_prob) * v_value * v_value + ) + self.local_concats["FirstReduce"][node.name][ + 2 * nlens[ndn] + i + ] += (s._mpisppy_probability / node.uncond_prob) * pyo.value( + s._mpisppy_model.y[(node.name, i)] + ) # print('test1', 'i:', i, 'mpisppy_prob', s._mpisppy_probability, 'uncond_prob', node.uncond_prob) if s._mpisppy_data.has_variable_probability: # re-do in the unlikely event of variable probabilities xxx TBD: check for multi-stage ##prob = s._mpisppy_data.prob_coeff[ndn_i[0]][ndn_i[1]] prob = s._mpisppy_data.prob_coeff[ndn][i] - self.local_concats["FirstReduce"][node.name][i] += \ - (prob / node.uncond_prob) * v_value - self.local_concats["FirstReduce"][node.name][nlens[ndn]+i]\ - += (prob / node.uncond_prob) * v_value * v_value + self.local_concats["FirstReduce"][node.name][i] += ( + prob / node.uncond_prob + ) * v_value + self.local_concats["FirstReduce"][node.name][ + nlens[ndn] + i + ] += (prob / node.uncond_prob) * v_value * v_value # for variable probability, ybar is really ysum!!! - self.local_concats["FirstReduce"][node.name][2*nlens[ndn]+i]\ - += pyo.value(s._mpisppy_model.y[(node.name,i)]) + self.local_concats["FirstReduce"][node.name][ + 2 * nlens[ndn] + i + ] += pyo.value(s._mpisppy_model.y[(node.name, i)]) # print('test2', 'i:', i, 'prob', prob, 'uncond_prob', node.uncond_prob) # record the time secs_sofar = time.perf_counter() - self.start_time # only this rank puts a time for this rank, so the sum is a report - self.local_concats["FirstReduce"]["ROOT"][3*nlens["ROOT"]+self.cylinder_rank] \ - = secs_sofar - logging.debug('Compute_Averages at secs_sofar {} on rank {}'\ - .format(secs_sofar, self.cylinder_rank)) - - self.synchronizer.compute_global_data(self.local_concats, - self.node_concats, - enable_side_gig = True, - rednames = ["FirstReduce"], - keep_up = True) + self.local_concats["FirstReduce"]["ROOT"][ + 3 * nlens["ROOT"] + self.cylinder_rank + ] = secs_sofar + logging.debug( + "Compute_Averages at secs_sofar {} on rank {}".format( + secs_sofar, self.cylinder_rank + ) + ) + + self.synchronizer.compute_global_data( + self.local_concats, + self.node_concats, + enable_side_gig=True, + rednames=["FirstReduce"], + keep_up=True, + ) # The lock is something to worry about here. - while self.synchronizer.global_quitting == 0 \ - and self.synchronizer.enable_side_gig: + while ( + self.synchronizer.global_quitting == 0 and self.synchronizer.enable_side_gig + ): # Other ranks could be reporting, so keep looking for them. - self.synchronizer.compute_global_data(self.local_concats, - self.node_concats) + self.synchronizer.compute_global_data(self.local_concats, self.node_concats) if not self.synchronizer.enable_side_gig: - logging.debug(' did side gig break on rank {}'.format(self.cylinder_rank)) + logging.debug( + " did side gig break on rank {}".format(self.cylinder_rank) + ) break else: - logging.debug(' gig wait sleep on rank {}'.format(self.cylinder_rank)) + logging.debug(" gig wait sleep on rank {}".format(self.cylinder_rank)) if verbose and self.cylinder_rank == 0: - print ('s'), + (print("s"),) time.sleep(self.options["async_sleep_secs"]) # (if the listener still has the lock, compute_global_will wait for it) - self.synchronizer.compute_global_data(self.local_concats, - self.node_concats) + self.synchronizer.compute_global_data(self.local_concats, self.node_concats) # We assign the global xbar, etc. as side-effect in the side gig, btw self.global_tau = self.node_concats["SecondReduce"]["ROOT"][0] self.global_phi = self.node_concats["SecondReduce"]["ROOT"][1] @@ -572,20 +625,27 @@ def Compute_Averages(self, verbose=False): self.global_pwsqnorm = self.node_concats["SecondReduce"]["ROOT"][4] self.global_pzsqnorm = self.node_concats["SecondReduce"]["ROOT"][5] - logging.debug('Assigned global tau {} and phi {} on rank {}'\ - .format(self.global_tau, self.global_phi, self.cylinder_rank)) - - #============================ + logging.debug( + "Assigned global tau {} and phi {} on rank {}".format( + self.global_tau, self.global_phi, self.cylinder_rank + ) + ) + + # ============================ def Update_theta_zw(self, verbose): """ Compute and store theta, then update z and w and update the probability weighted norms. """ if self.global_tau <= 0: - logging.debug('|tau {}, rank {}'.format(self.global_tau, self.cylinder_rank)) - self.theta = 0 # Step 17 + logging.debug( + "|tau {}, rank {}".format(self.global_tau, self.cylinder_rank) + ) + self.theta = 0 # Step 17 elif self.global_phi <= 0: - logging.debug('|phi {}, rank {}'.format(self.global_phi, self.cylinder_rank)) + logging.debug( + "|phi {}, rank {}".format(self.global_phi, self.cylinder_rank) + ) self.theta = 0 else: punorm = math.sqrt(self.global_pusqnorm) @@ -601,66 +661,80 @@ def Update_theta_zw(self, verbose): self.nu = 1 - nu_val else: self.nu = 1 + nu_val - self.theta = self.global_phi * self.nu / self.global_tau # Step 16 + self.theta = self.global_phi * self.nu / self.global_tau # Step 16 # print(f'nu={self.nu}') - for k,s in self.local_scenarios.items(): - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): + for k, s in self.local_scenarios.items(): + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): if punorm <= pvnorm: factor = 1 - rho_val else: factor = 1 + rho_val - s._mpisppy_model.rho[(ndn,i)] = pyo.value(s._mpisppy_model.rho[(ndn,i)]) * factor + s._mpisppy_model.rho[(ndn, i)] = ( + pyo.value(s._mpisppy_model.rho[(ndn, i)]) * factor + ) # print(f'rho={pyo.value(s._mpisppy_model.rho[(ndn,i)])}') - - logging.debug('Iter {} assigned theta {} on rank {}'\ - .format(self._PHIter, self.theta, self.cylinder_rank)) + + logging.debug( + "Iter {} assigned theta {} on rank {}".format( + self._PHIter, self.theta, self.cylinder_rank + ) + ) oldpw = self.local_pwsqnorm oldpz = self.local_pzsqnorm self.local_pwsqnorm = 0 self.local_pzsqnorm = 0 # v is just ybar - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): probs = pyo.value(s._mpisppy_probability) - for (ndn, i) in s._mpisppy_data.nonant_indices: - Wupdate = self.theta * self.uk[k][(ndn,i)] - Ws = pyo.value(s._mpisppy_model.W[(ndn,i)]) + Wupdate # Step 19, Algorithm 2 + for ndn, i in s._mpisppy_data.nonant_indices: + Wupdate = self.theta * self.uk[k][(ndn, i)] + Ws = ( + pyo.value(s._mpisppy_model.W[(ndn, i)]) + Wupdate + ) # Step 19, Algorithm 2 # Special code for variable probabilities to mask W; rarely used. if s._mpisppy_data.has_variable_probability: Ws *= s._mpisppy_data.prob0_mask[ndn][i] - s._mpisppy_model.W[(ndn,i)] = Ws + s._mpisppy_model.W[(ndn, i)] = Ws self.local_pwsqnorm += probs * Ws * Ws # iter 1 is iter 0 post-solves when seen from the paper - if self._PHIter != 1: # Step 18, Algorithm 2 + if self._PHIter != 1: # Step 18, Algorithm 2 # NOTE: for variable probability, ybar was computed as a sum!!!! - zs = pyo.value(s._mpisppy_model.z[(ndn,i)])\ - + self.theta * pyo.value(s._mpisppy_model.ybars[(ndn,i)])/self.APHgamma + zs = ( + pyo.value(s._mpisppy_model.z[(ndn, i)]) + + self.theta + * pyo.value(s._mpisppy_model.ybars[(ndn, i)]) + / self.APHgamma + ) else: - zs = pyo.value(s._mpisppy_model.xbars[(ndn,i)]) + zs = pyo.value(s._mpisppy_model.xbars[(ndn, i)]) # Special code for variable probabilities to mask W; rarely used. if s._mpisppy_data.has_variable_probability: zs *= s._mpisppy_data.prob0_mask[ndn][i] - s._mpisppy_model.z[(ndn,i)] = zs + s._mpisppy_model.z[(ndn, i)] = zs self.local_pzsqnorm += probs * zs * zs - logging.debug("rank={}, scen={}, i={}, Ws={}, zs={}".\ - format(global_rank, k, i, Ws, zs)) + logging.debug( + "rank={}, scen={}, i={}, Ws={}, zs={}".format( + global_rank, k, i, Ws, zs + ) + ) # ? so they will be there next time? (we really need a third reduction) self.local_concats["SecondReduce"]["ROOT"][4] = self.local_pwsqnorm self.local_concats["SecondReduce"]["ROOT"][5] = self.local_pzsqnorm # The values we just computed can't be in the global yet, so update here - self.global_pwsqnorm += (self.local_pwsqnorm - oldpw) - self.global_pzsqnorm += (self.local_pzsqnorm - oldpz) - - #============================ + self.global_pwsqnorm += self.local_pwsqnorm - oldpw + self.global_pzsqnorm += self.local_pzsqnorm - oldpz + + # ============================ def Compute_Convergence(self, verbose=False): """ The convergence metric is the sqrt of the sum of probability weighted unorm scaled by the probability weighted w norm probability weighted vnorm scaled by the probability weighted z norm - + Returns: update self.conv if appropriate """ @@ -676,8 +750,15 @@ def Compute_Convergence(self, verbose=False): if pwnorm > 0 and pznorm > 0: self.conv = punorm / pwnorm + pvnorm / pznorm - logging.debug('self.conv={} self.global_pusqnorm={} self.global_pwsqnorm={} self.global_pvsqnorm={} self.global_pzsqnorm={})'\ - .format(self.conv, self.global_pusqnorm, self.global_pwsqnorm, self.global_pvsqnorm, self.global_pzsqnorm)) + logging.debug( + "self.conv={} self.global_pusqnorm={} self.global_pwsqnorm={} self.global_pvsqnorm={} self.global_pzsqnorm={})".format( + self.conv, + self.global_pusqnorm, + self.global_pwsqnorm, + self.global_pvsqnorm, + self.global_pzsqnorm, + ) + ) # allow a PH converger, mainly for mpisspy to get xhat from a wheel conv if hasattr(self, "ph_convobject") and self.ph_convobject is not None: phc = self.ph_convobject(self, self.cylinder_rank, self.n_proc) @@ -685,10 +766,11 @@ def Compute_Convergence(self, verbose=False): if self.conv_trace_filename is not None: with open(self.conv_trace_filename, "a") as fil: - fil.write(f"{self._PHIter},{self.conv},{self.APHgamma},{self.nu},{self.theta},{punorm},{pvnorm}\n") - + fil.write( + f"{self._PHIter},{self.conv},{self.APHgamma},{self.nu},{self.theta},{punorm},{pvnorm}\n" + ) - #========== + # ========== def _update_foropt(self, dlist): # dlist is a list of subproblem names that were dispatched assert self.use_lag @@ -701,27 +783,37 @@ def _update_foropt(self, dlist): if not self.bundling: for dl in dlist: scenario = self.local_scenarios[dl[0]] - for (ndn,i), xvar in scenario._mpisppy_data.nonant_indices.items(): - scenario._mpisppy_model.z_foropt[(ndn,i)] = scenario._mpisppy_model.z[(ndn,i)] - scenario._mpisppy_model.W_foropt[(ndn,i)] = scenario._mpisppy_model.W[(ndn,i)] + for (ndn, i), xvar in scenario._mpisppy_data.nonant_indices.items(): + scenario._mpisppy_model.z_foropt[(ndn, i)] = ( + scenario._mpisppy_model.z[(ndn, i)] + ) + scenario._mpisppy_model.W_foropt[(ndn, i)] = ( + scenario._mpisppy_model.W[(ndn, i)] + ) else: for dl in dlist: for sname in self.local_subproblems[dl[0]].scen_list: scenario = self.local_scenarios[sname] - for (ndn,i), xvar in scenario._mpisppy_data.nonant_indices.items(): - scenario._mpisppy_model.z_foropt[(ndn,i)] = scenario._mpisppy_model.z[(ndn,i)] - scenario._mpisppy_model.W_foropt[(ndn,i)] = scenario._mpisppy_model.W[(ndn,i)] - - - #==================================================================== - def APH_solve_loop(self, solver_options=None, - use_scenarios_not_subproblems=False, - dtiming=False, - gripe=False, - disable_pyomo_signal_handling=False, - tee=False, - verbose=False, - dispatch_frac=1): + for (ndn, i), xvar in scenario._mpisppy_data.nonant_indices.items(): + scenario._mpisppy_model.z_foropt[(ndn, i)] = ( + scenario._mpisppy_model.z[(ndn, i)] + ) + scenario._mpisppy_model.W_foropt[(ndn, i)] = ( + scenario._mpisppy_model.W[(ndn, i)] + ) + + # ==================================================================== + def APH_solve_loop( + self, + solver_options=None, + use_scenarios_not_subproblems=False, + dtiming=False, + gripe=False, + disable_pyomo_signal_handling=False, + tee=False, + verbose=False, + dispatch_frac=1, + ): """See phbase.solve_loop. Loop over self.local_subproblems and solve them in a manner dicated by the arguments. In addition to changing the Var values in the scenarios, update @@ -732,7 +824,7 @@ def APH_solve_loop(self, solver_options=None, use_scenarios_not_subproblems (boolean): for use by bounds dtiming (boolean): indicates that timing should be reported gripe (boolean): output a message if a solve fails - disable_pyomo_signal_handling (boolean): set to true for asynch, + disable_pyomo_signal_handling (boolean): set to true for asynch, ignored for persistent solvers. tee (boolean): show solver output to screen if possible verbose (boolean): indicates verbose output @@ -741,12 +833,13 @@ def APH_solve_loop(self, solver_options=None, Returns: dlist (list of (str, float): (dispatched name, phi ) """ - #========== - def _vb(msg): + + # ========== + def _vb(msg): if verbose and self.cylinder_rank == 0: - print ("(cylinder rank {}) {}".format(self.cylinder_rank, msg)) - _vb("Entering solve_loop function.") + print("(cylinder rank {}) {}".format(self.cylinder_rank, msg)) + _vb("Entering solve_loop function.") if use_scenarios_not_subproblems: s_source = self.local_scenarios @@ -756,45 +849,60 @@ def _vb(msg): if not self.bundling: phidict = self.phis else: - phidict = {k: self.phis[self.local_subproblems[k].scen_list[0]] for k in s_source.keys()} + phidict = { + k: self.phis[self.local_subproblems[k].scen_list[0]] + for k in s_source.keys() + } # dict(sorted(phidict.items(), key=lambda item: item[1])) # sortedbyphi = {k: v for k, v in sorted(phidict.items(), key=lambda item: item[1])} - - #======== + # ======== def _dispatch_list(scnt): - # Return the list of scnt (subproblems,phi) + # Return the list of scnt (subproblems,phi) # pairs for dispatch. # There is an option to allow for round-robin for research purposes. # NOTE: intermediate lists are created to help with verification. # reminder: dispatchrecord is sname:[(iter,phi)...] if self.round_robin_dispatch: # TBD: check this sort - sortedbyI = {k: v for k, v in sorted(self.dispatchrecord.items(), - key=lambda item: item[1][-1])} + sortedbyI = { + k: v + for k, v in sorted( + self.dispatchrecord.items(), key=lambda item: item[1][-1] + ) + } _vb(" sortedbyI={}.format(sortedbyI)") # There is presumably a pythonic way to do this... retval = list() i = 0 - for k,v in sortedbyI.items(): + for k, v in sortedbyI.items(): retval.append((k, phidict[k])) # sname, phi i += 1 if i >= scnt: return retval - raise RuntimeError(f"bad scnt={scnt} in _dispatch_list;" - f" len(sortedbyI)={len(sortedbyI)}") + raise RuntimeError( + f"bad scnt={scnt} in _dispatch_list;" + f" len(sortedbyI)={len(sortedbyI)}" + ) else: # Not doing round robin # k is sname - tosort = [(k, -max(self.dispatchrecord[k][-1][0], self.shelf_life-1), phidict[k])\ - for k in self.dispatchrecord.keys()] - sortedlist = sorted(tosort, key=lambda element: (element[1], element[2])) + tosort = [ + ( + k, + -max(self.dispatchrecord[k][-1][0], self.shelf_life - 1), + phidict[k], + ) + for k in self.dispatchrecord.keys() + ] + sortedlist = sorted( + tosort, key=lambda element: (element[1], element[2]) + ) retval = [(sortedlist[k][0], sortedlist[k][2]) for k in range(scnt)] # TBD: See if there were enough w/negative phi values and warn. # TBD: see if shelf-life is hitting and warn return retval - # body of APH_solve_loop fct starts hare logging.debug(" early APH solve_loop for rank={}".format(self.cylinder_rank)) @@ -803,152 +911,181 @@ def _dispatch_list(scnt): _vb("dispatch list before dispath: {}".format(dispatch_list)) pyomo_solve_times = list() for dguy in dispatch_list: - k = dguy[0] # name of who to dispatch - p = dguy[1] # phi + k = dguy[0] # name of who to dispatch + p = dguy[1] # phi s = s_source[k] self.dispatchrecord[k].append((self._PHIter, p)) _vb("dispatch k={}; phi={}".format(k, p)) - logging.debug(" in APH solve_loop rank={}, k={}, phi={}".\ - format(self.cylinder_rank, k, p)) + logging.debug( + " in APH solve_loop rank={}, k={}, phi={}".format( + self.cylinder_rank, k, p + ) + ) # the lower lever dtiming does a gather - pyomo_solve_times.append(self.solve_one(solver_options, k, s, - dtiming=False, - verbose=verbose, - tee=tee, - gripe=gripe, - disable_pyomo_signal_handling=disable_pyomo_signal_handling - )) + pyomo_solve_times.append( + self.solve_one( + solver_options, + k, + s, + dtiming=False, + verbose=verbose, + tee=tee, + gripe=gripe, + disable_pyomo_signal_handling=disable_pyomo_signal_handling, + ) + ) if dtiming: print("Pyomo solve times (seconds):") - print("\trank=,%d, n=,%d, min=,%4.2f, mean=,%4.2f, max=,%4.2f" % - (self.global_rank, - len(pyomo_solve_times), - np.min(pyomo_solve_times), - np.mean(pyomo_solve_times), - np.max(pyomo_solve_times))) + print( + "\trank=,%d, n=,%d, min=,%4.2f, mean=,%4.2f, max=,%4.2f" + % ( + self.global_rank, + len(pyomo_solve_times), + np.min(pyomo_solve_times), + np.mean(pyomo_solve_times), + np.max(pyomo_solve_times), + ) + ) return dispatch_list - #======== + # ======== def _print_conv_detail(self): - print("Convergence Metric=",self.conv) + print("Convergence Metric=", self.conv) punorm = math.sqrt(self.global_pusqnorm) pwnorm = math.sqrt(self.global_pwsqnorm) pvnorm = math.sqrt(self.global_pvsqnorm) pznorm = math.sqrt(self.global_pzsqnorm) - print(f' punorm={punorm} pwnorm={pwnorm} pvnorm={pvnorm} pznorm={pznorm}') + print(f" punorm={punorm} pwnorm={pwnorm} pvnorm={pvnorm} pznorm={pznorm}") if pwnorm > 0 and pznorm > 0: - print(f" scaled U term={punorm / pwnorm}; scaled V term={pvnorm / pznorm}") + print( + f" scaled U term={punorm / pwnorm}; scaled V term={pvnorm / pznorm}" + ) else: print(" ! convergence metric cannot be computed due to zero-divide") - - #======== + # ======== def display_details(self, msg): """Ouput as much as you can about the current state""" print(f"hello {msg}") print(f"*** global rank {global_rank} display details: {msg}") print(f"zero-based iteration number {self._PHIter}") self._print_conv_detail() - print(f"phi={self.global_phi}, nu={self.nu}, tau={self.global_tau} so theta={self.theta}") + print( + f"phi={self.global_phi}, nu={self.nu}, tau={self.global_tau} so theta={self.theta}" + ) print(f"{'Nonants for':19} {'x':8} {'z':8} {'W':8} {'u':8} {'y':8}") - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): print(f" Scenario {k}") - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): - print(f" {(ndn,i)} {float(xvar._value):9.3} " - f"{float(s._mpisppy_model.z[(ndn,i)]._value):9.3}" - f"{float(s._mpisppy_model.W[(ndn,i)]._value):9.3}" - f"{float(self.uk[k][(ndn,i)]):9.3}" - f"{float(s._mpisppy_model.y[(ndn,i)]._value):9.3}") + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): + print( + f" {(ndn,i)} {float(xvar._value):9.3} " + f"{float(s._mpisppy_model.z[(ndn,i)]._value):9.3}" + f"{float(s._mpisppy_model.W[(ndn,i)]._value):9.3}" + f"{float(self.uk[k][(ndn,i)]):9.3}" + f"{float(s._mpisppy_model.y[(ndn,i)]._value):9.3}" + ) ph_base._Compute_Wbar(self) if self.plot_trace_prefix is not None: - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): objval = pyo.value(self.saved_objectives[k]) with open(f"trace_{k}_{self.conv_trace_filename}", "a") as fil: fil.write(f"{self._PHIter},{objval}") - for (ndn,i), xvar in s._mpisppy_data.nonant_indices.items(): - fil.write(f",{xvar._value},{s._mpisppy_model.z[(ndn,i)]._value},{s._mpisppy_model.W[(ndn,i)]._value}") + for (ndn, i), xvar in s._mpisppy_data.nonant_indices.items(): + fil.write( + f",{xvar._value},{s._mpisppy_model.z[(ndn,i)]._value},{s._mpisppy_model.W[(ndn,i)]._value}" + ) fil.write("\n") - - #==================================================================== + # ==================================================================== def APH_iterk(self, spcomm): - """ Loop for the main iterations (called by synchronizer). + """Loop for the main iterations (called by synchronizer). Args: spcomm (SPCommunitator object): to communicate intra and inter - Updates: + Updates: self.conv (): APH convergence """ - logging.debug('==== enter iterk on rank {}'.format(self.cylinder_rank)) + logging.debug("==== enter iterk on rank {}".format(self.cylinder_rank)) verbose = self.options["verbose"] have_extensions = self.extensions is not None # We have the "bottom of the loop at the top" # so we need a dlist to get the ball rolling (it might not be used) dlist = [(sn, 0.0) for sn in self.local_scenario_names] - + # put dispatch_frac on the object so extensions can modify it - self.dispatch_frac = self.options["dispatch_frac"]\ - if "dispatch_frac" in self.options else 1 + self.dispatch_frac = ( + self.options["dispatch_frac"] if "dispatch_frac" in self.options else 1 + ) have_converger = self.ph_converger is not None dprogress = self.options["display_progress"] dtiming = self.options["display_timing"] - ddetail = "display_convergence_detail" in self.options and\ - self.options["display_convergence_detail"] + ddetail = ( + "display_convergence_detail" in self.options + and self.options["display_convergence_detail"] + ) self.conv = None # The notion of an iteration is unclear # we enter after the iteration 0 solves, so do updates first - for self._PHIter in range(1, self.options["PHIterLimit"]+1): + for self._PHIter in range(1, self.options["PHIterLimit"] + 1): if self.synchronizer.global_quitting: break iteration_start_time = time.time() if dprogress and self.cylinder_rank == 0: print("") - print ("Initiating APH Iteration",self._PHIter) + print("Initiating APH Iteration", self._PHIter) print("") self.Update_y(dlist, verbose) # Compute xbar, etc - logging.debug('pre Compute_Averages on rank {}'.format(self.cylinder_rank)) + logging.debug("pre Compute_Averages on rank {}".format(self.cylinder_rank)) self.Compute_Averages(verbose) - logging.debug('post Compute_Averages on rank {}'.format(self.cylinder_rank)) + logging.debug("post Compute_Averages on rank {}".format(self.cylinder_rank)) if self.global_tau <= 0: - logging.critical('***tau is 0 on rank {}'.format(self.cylinder_rank)) + logging.critical("***tau is 0 on rank {}".format(self.cylinder_rank)) # Apr 2019 dlw: If you want the convergence crit. to be up to date, # do this as a listener side-gig and add another reduction. self.Update_theta_zw(verbose) self.Compute_Convergence() # updates conv - phisum = self.compute_phis_summand() # post-step phis for dispatch - logging.debug('phisum={} after step on {}'.format(phisum, self.cylinder_rank)) + phisum = self.compute_phis_summand() # post-step phis for dispatch + logging.debug( + "phisum={} after step on {}".format(phisum, self.cylinder_rank) + ) # ORed checks for convergence if spcomm is not None and type(spcomm) is not mpi.Intracomm: spcomm.sync_with_spokes() - logging.debug('post sync_with_spokes on rank {}'.format(self.cylinder_rank)) + logging.debug( + "post sync_with_spokes on rank {}".format(self.cylinder_rank) + ) if spcomm.is_converged(): - break + break if have_converger: if self.convobject.is_converged(): if self.cylinder_rank == 0: - print("User-supplied converger determined termination criterion reached") + print( + "User-supplied converger determined termination criterion reached" + ) break if ddetail: - self.display_details("pre-solve loop (everything is updated from prev iter)") + self.display_details( + "pre-solve loop (everything is updated from prev iter)" + ) # slight divergence from PH, where mid-iter is before conv if have_extensions: self.extobject.miditer() - - teeme = ("tee-rank0-solves" in self.options) \ - and (self.options["tee-rank0-solves"] - and self.cylinder_rank == 0) + + teeme = ("tee-rank0-solves" in self.options) and ( + self.options["tee-rank0-solves"] and self.cylinder_rank == 0 + ) # Let the solve loop deal with persistent solvers & signal handling # Aug2020 switch to a partial loop xxxxx maybe that is enough..... # Aug2020 ... at least you would get dispatch @@ -956,41 +1093,39 @@ def APH_iterk(self, spcomm): # TBD: ? restructure so iter 1 can have partial dispatch if self._PHIter == 1: savefrac = self.dispatch_frac - self.dispatch_frac = 1 # to get a decent w for everyone - logging.debug('pre APH_solve_loop on rank {}'.format(self.cylinder_rank)) - dlist = self.APH_solve_loop(solver_options = \ - self.current_solver_options, - dtiming=dtiming, - gripe=True, - disable_pyomo_signal_handling=True, - tee=teeme, - verbose=verbose, - dispatch_frac=self.dispatch_frac) - - logging.debug('post APH_solve_loop on rank {}'.format(self.cylinder_rank)) + self.dispatch_frac = 1 # to get a decent w for everyone + logging.debug("pre APH_solve_loop on rank {}".format(self.cylinder_rank)) + dlist = self.APH_solve_loop( + solver_options=self.current_solver_options, + dtiming=dtiming, + gripe=True, + disable_pyomo_signal_handling=True, + tee=teeme, + verbose=verbose, + dispatch_frac=self.dispatch_frac, + ) + + logging.debug("post APH_solve_loop on rank {}".format(self.cylinder_rank)) if self._PHIter == 1: - self.dispatch_frac = savefrac + self.dispatch_frac = savefrac if have_extensions: self.extobject.enditer() if dprogress and self.cylinder_rank == 0: print("") - print("After APH Iteration",self._PHIter) + print("After APH Iteration", self._PHIter) if not ddetail: self._print_conv_detail() - print("Iteration time: %6.2f" \ - % (time.time() - iteration_start_time)) - print("Elapsed time: %6.2f" \ - % (time.perf_counter() - self.start_time)) + print("Iteration time: %6.2f" % (time.time() - iteration_start_time)) + print("Elapsed time: %6.2f" % (time.perf_counter() - self.start_time)) if self.use_lag: self._update_foropt(dlist) - logging.debug('Setting synchronizer.quitting on rank %d' % self.cylinder_rank) + logging.debug("Setting synchronizer.quitting on rank %d" % self.cylinder_rank) self.synchronizer.quitting = 1 - #==================================================================== + # ==================================================================== def APH_main(self, spcomm=None, finalize=True): - """Execute the APH algorithm. Args: spcomm (SPCommunitator object): for intra or inter communications @@ -999,9 +1134,9 @@ def APH_main(self, spcomm=None, finalize=True): and return None for Eobj Returns: - conv, Eobj, trivial_bound: - The first two CANNOT BE EASILY INTERPRETED. - Eobj is the expected, weighted objective with + conv, Eobj, trivial_bound: + The first two CANNOT BE EASILY INTERPRETED. + Eobj is the expected, weighted objective with proximal term. It is not directly useful. The trivial bound is computed after iter 0 NOTE: @@ -1009,60 +1144,96 @@ def APH_main(self, spcomm=None, finalize=True): """ # Prep needs to be before iter 0 for bundling # (It could be split up) - logging.debug('enter aph main on cylinder_rank {}'.format(self.cylinder_rank)) + logging.debug("enter aph main on cylinder_rank {}".format(self.cylinder_rank)) self.PH_Prep(attach_duals=False, attach_prox=False) # Begin APH-specific Prep - for sname, scenario in self.local_scenarios.items(): + for sname, scenario in self.local_scenarios.items(): # ys is plural of y - scenario._mpisppy_model.y = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - initialize = 0.0, - mutable = True) - scenario._mpisppy_model.ybars = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - initialize = 0.0, - mutable = True) - scenario._mpisppy_model.z = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - initialize = 0.0, - mutable = True) + scenario._mpisppy_model.y = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, + ) + scenario._mpisppy_model.ybars = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, + ) + scenario._mpisppy_model.z = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, + ) # lag: we will support lagging back only to the last solve # IMPORTANT: pyomo does not support a second reference so no: # scenario._mpisppy_model.z_foropt = scenario._mpisppy_model.z - + if self.use_lag: - scenario._mpisppy_model.z_foropt = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - initialize = 0.0, - mutable = True) - scenario._mpisppy_model.W_foropt = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - initialize = 0.0, - mutable = True) - + scenario._mpisppy_model.z_foropt = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, + ) + scenario._mpisppy_model.W_foropt = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, + ) + objfct = self.saved_objectives[sname] - + if self.use_lag: assert not scenario._mpisppy_data.has_variable_probability - - for (ndn,i), xvar in scenario._mpisppy_data.nonant_indices.items(): + + for (ndn, i), xvar in scenario._mpisppy_data.nonant_indices.items(): # proximal term - objfct.expr += scenario._mpisppy_model.prox_on * \ - (scenario._mpisppy_model.rho[(ndn,i)] /2.0) * \ - (xvar**2 - 2.0*xvar*scenario._mpisppy_model.z_foropt[(ndn,i)] + scenario._mpisppy_model.z_foropt[(ndn,i)]**2) + objfct.expr += ( + scenario._mpisppy_model.prox_on + * (scenario._mpisppy_model.rho[(ndn, i)] / 2.0) + * ( + xvar**2 + - 2.0 * xvar * scenario._mpisppy_model.z_foropt[(ndn, i)] + + scenario._mpisppy_model.z_foropt[(ndn, i)] ** 2 + ) + ) # W term - objfct.expr += scenario._mpisppy_model.W_on * scenario._mpisppy_model.W_foropt[ndn,i] * xvar + objfct.expr += ( + scenario._mpisppy_model.W_on + * scenario._mpisppy_model.W_foropt[ndn, i] + * xvar + ) else: - for (ndn,i), xvar in scenario._mpisppy_data.nonant_indices.items(): + for (ndn, i), xvar in scenario._mpisppy_data.nonant_indices.items(): if not scenario._mpisppy_data.has_variable_probability: # proximal term - objfct.expr += scenario._mpisppy_model.prox_on * \ - (scenario._mpisppy_model.rho[(ndn,i)] /2.0) * \ - (xvar**2 - 2.0*xvar*scenario._mpisppy_model.z[(ndn,i)] + scenario._mpisppy_model.z[(ndn,i)]**2) + objfct.expr += ( + scenario._mpisppy_model.prox_on + * (scenario._mpisppy_model.rho[(ndn, i)] / 2.0) + * ( + xvar**2 + - 2.0 * xvar * scenario._mpisppy_model.z[(ndn, i)] + + scenario._mpisppy_model.z[(ndn, i)] ** 2 + ) + ) else: - objfct.expr += scenario._mpisppy_data.prob0_mask[ndn][i] * \ - scenario._mpisppy_model.prox_on * \ - (scenario._mpisppy_model.rho[(ndn,i)] /2.0) * \ - (xvar**2 - 2.0*xvar*scenario._mpisppy_model.z[(ndn,i)] + scenario._mpisppy_model.z[(ndn,i)]**2) + objfct.expr += ( + scenario._mpisppy_data.prob0_mask[ndn][i] + * scenario._mpisppy_model.prox_on + * (scenario._mpisppy_model.rho[(ndn, i)] / 2.0) + * ( + xvar**2 + - 2.0 * xvar * scenario._mpisppy_model.z[(ndn, i)] + + scenario._mpisppy_model.z[(ndn, i)] ** 2 + ) + ) # W term - objfct.expr += scenario._mpisppy_model.W_on * scenario._mpisppy_model.W[ndn,i] * xvar + objfct.expr += ( + scenario._mpisppy_model.W_on + * scenario._mpisppy_model.W[ndn, i] + * xvar + ) # End APH-specific Prep @@ -1074,15 +1245,19 @@ def APH_main(self, spcomm=None, finalize=True): sleep_secs = self.options["async_sleep_secs"] lkwargs = None # nothing beyond synchro - listener_gigs = {"FirstReduce": (self.listener_side_gig, lkwargs), - "SecondReduce": None} - self.synchronizer = listener_util.Synchronizer(comms = self.comms, - Lens = self.Lens, - work_fct = self.APH_iterk, - rank = self.cylinder_rank, - sleep_secs = sleep_secs, - asynch = True, - listener_gigs = listener_gigs) + listener_gigs = { + "FirstReduce": (self.listener_side_gig, lkwargs), + "SecondReduce": None, + } + self.synchronizer = listener_util.Synchronizer( + comms=self.comms, + Lens=self.Lens, + work_fct=self.APH_iterk, + rank=self.cylinder_rank, + sleep_secs=sleep_secs, + asynch=True, + listener_gigs=listener_gigs, + ) args = [spcomm] if spcomm is not None else [fullcomm] kwargs = None # {"extensions": extensions} self.synchronizer.run(args, kwargs) @@ -1092,21 +1267,22 @@ def APH_main(self, spcomm=None, finalize=True): else: Eobj = None -# print(f"Debug: here's the dispatch record for rank={self.global_rank}") -# for k,v in self.dispatchrecord.items(): -# print(k, v) -# print() -# print("End dispatch record") + # print(f"Debug: here's the dispatch record for rank={self.global_rank}") + # for k,v in self.dispatchrecord.items(): + # print(k, v) + # print() + # print("End dispatch record") return self.conv, Eobj, trivial_bound -#************************************************************ + +# ************************************************************ if __name__ == "__main__": # hardwired by dlw for debugging import mpisppy.tests.examples.farmer as refmodel PHopt = {} - PHopt["asynchronousPH"] = False # APH is *projective* and always APH + PHopt["asynchronousPH"] = False # APH is *projective* and always APH PHopt["solver_name"] = "cplex" PHopt["PHIterLimit"] = 5 PHopt["defaultPHrho"] = 1 @@ -1121,10 +1297,10 @@ def APH_main(self, spcomm=None, finalize=True): PHopt["iterk_solver_options"] = {"mipgap": 0.001} ScenCount = 3 - scenario_creator_kwargs = {'use_integer': False, "crops_multiplier": 1} + scenario_creator_kwargs = {"use_integer": False, "crops_multiplier": 1} all_scenario_names = list() for sn in range(ScenCount): - all_scenario_names.append("scen"+str(sn)) + all_scenario_names.append("scen" + str(sn)) # end hardwire scenario_creator = refmodel.scenario_creator @@ -1140,7 +1316,6 @@ def APH_main(self, spcomm=None, finalize=True): scenario_creator_kwargs=scenario_creator_kwargs, ) - """ import xhatlooper PHopt["xhat_looper_options"] = {"xhat_solver_options":\ @@ -1151,11 +1326,10 @@ def APH_main(self, spcomm=None, finalize=True): conv, obj, bnd = aph.APH_main() if aph.cylinder_rank == 0: - print ("E[obj] for converged solution (probably NOT non-anticipative)", - obj) + print("E[obj] for converged solution (probably NOT non-anticipative)", obj) dopts = sputils.option_string_to_dict("mipgap=0.001") objbound = aph.post_solve_bound(solver_options=dopts, verbose=False) - if (aph.cylinder_rank == 0): - print ("**** Lagrangian objective function bound=",objbound) - print ("(probably converged way too early, BTW)") + if aph.cylinder_rank == 0: + print("**** Lagrangian objective function bound=", objbound) + print("(probably converged way too early, BTW)") diff --git a/mpisppy/opt/ef.py b/mpisppy/opt/ef.py index 6c647d812..0647ed5de 100644 --- a/mpisppy/opt/ef.py +++ b/mpisppy/opt/ef.py @@ -13,8 +13,9 @@ logger = logging.getLogger("mpisppy.ef") + class ExtensiveForm(mpisppy.spbase.SPBase): - """ Create and solve an extensive form. + """Create and solve an extensive form. Attributes: ef (:class:`pyomo.environ.ConcreteModel`): @@ -42,6 +43,7 @@ class ExtensiveForm(mpisppy.spbase.SPBase): Note: allowing use of the "solver" option key is for backward compatibility """ + def __init__( self, options, @@ -52,13 +54,13 @@ def __init__( model_name=None, suppress_warnings=False, ): - """ Create the EF and associated solver. """ + """Create the EF and associated solver.""" super().__init__( options, all_scenario_names, scenario_creator, scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames + all_nodenames=all_nodenames, ) self.bundling = True if self.n_proc > 1 and self.cylinder_rank == 0: @@ -66,29 +68,30 @@ def __init__( required = ["solver"] self._options_check(required, self.options) self.solver = pyo.SolverFactory(self.options["solver"]) - self.ef = sputils._create_EF_from_scen_dict(self.local_scenarios, - EF_name=model_name) + self.ef = sputils._create_EF_from_scen_dict( + self.local_scenarios, EF_name=model_name + ) def solve_extensive_form(self, solver_options=None, tee=False): - """ Solve the extensive form. - - Args: - solver_options (dict, optional): - Dictionary of solver-specific options (e.g. Gurobi options, - CPLEX options, etc.). - tee (bool, optional): - If True, displays solver output. Default False. - - Returns: - :class:`pyomo.opt.results.results_.SolverResults`: - Result returned by the Pyomo solve method. - + """Solve the extensive form. + + Args: + solver_options (dict, optional): + Dictionary of solver-specific options (e.g. Gurobi options, + CPLEX options, etc.). + tee (bool, optional): + If True, displays solver output. Default False. + + Returns: + :class:`pyomo.opt.results.results_.SolverResults`: + Result returned by the Pyomo solve method. + """ if "persistent" in self.options["solver"]: self.solver.set_instance(self.ef) # Pass solver-specifiec (e.g. Gurobi, CPLEX) options if solver_options is not None: - for (opt, value) in solver_options.items(): + for opt, value in solver_options.items(): self.solver.options[opt] = value results = self.solver.solve(self.ef, tee=tee, load_solutions=False) if len(results.solution) > 0: @@ -101,8 +104,8 @@ def solve_extensive_form(self, solver_options=None, tee=False): return results def get_objective_value(self): - """ Retrieve the objective value. - + """Retrieve the objective value. + Returns: float: Objective value. @@ -114,11 +117,13 @@ def get_objective_value(self): try: obj_val = pyo.value(self.ef.EF_Obj) except Exception as e: - raise ValueError(f"Could not extract EF objective value with error: {str(e)}") + raise ValueError( + f"Could not extract EF objective value with error: {str(e)}" + ) return obj_val def get_root_solution(self): - """ Get the value of the variables at the root node. + """Get the value of the variables at the root node. Returns: dict: @@ -130,12 +135,12 @@ def get_root_solution(self): var_name = var.name dot_index = var_name.find(".") if dot_index >= 0 and var_name[:dot_index] in self.all_scenario_names: - var_name = var_name[dot_index+1:] + var_name = var_name[dot_index + 1 :] result[var_name] = var.value return result def nonants(self): - """ An iterator to give representative Vars subject to non-anticipitivity + """An iterator to give representative Vars subject to non-anticipitivity Args: None Yields: @@ -143,17 +148,15 @@ def nonants(self): """ yield from sputils.ef_nonants(self.ef) - def nonants_to_csv(self, filename): - """ Dump the nonant vars from an ef to a csv file; truly a dump... + """Dump the nonant vars from an ef to a csv file; truly a dump... Args: filename (str): the full name of the csv output file """ sputils.ef_nonants_csv(self.ef, filename) - def scenarios(self): - """ An iterator to give the scenario sub-models in an ef + """An iterator to give the scenario sub-models in an ef Args: None Yields: diff --git a/mpisppy/opt/lshaped.py b/mpisppy/opt/lshaped.py index c79a8a21c..f5903b12c 100644 --- a/mpisppy/opt/lshaped.py +++ b/mpisppy/opt/lshaped.py @@ -19,15 +19,14 @@ from mpisppy.utils.sputils import find_active_objective from mpisppy.utils.lshaped_cuts import LShapedCutGenerator from mpisppy.spopt import set_instance_retry -from pyomo.core import ( - SOSConstraint, Constraint, Var -) +from pyomo.core import SOSConstraint, Constraint, Var from pyomo.core.expr.visitor import identify_variables from pyomo.repn.standard_repn import generate_standard_repn from pyomo.core.expr.numeric_expr import LinearExpression + class LShapedMethod(spbase.SPBase): - """ Base class for the L-shaped method for two-stage stochastic programs. + """Base class for the L-shaped method for two-stage stochastic programs. Warning: This class explicitly assumes minimization. @@ -54,22 +53,23 @@ class LShapedMethod(spbase.SPBase): variable in the model to the stage they belong to. all_scenario_names (list): List of all scenarios names present in the model (strings). - scenario_creator (callable): + scenario_creator (callable): Function which take a scenario name (string) and returns a Pyomo Concrete model with some things attached. scenario_denouement (callable, optional): Function which does post-processing and reporting. - all_nodenames (list, optional): + all_nodenames (list, optional): List of all node name (strings). Can be `None` for two-stage problems. mpicomm (MPI comm, optional): MPI communicator to use between all scenarios. Default is `MPI.COMM_WORLD`. - scenario_creator_kwargs (dict, optional): + scenario_creator_kwargs (dict, optional): Keyword arguments to pass to `scenario_creator`. """ + def __init__( - self, + self, options, all_scenario_names, scenario_creator, @@ -104,7 +104,7 @@ def __init__( if "root_scenarios" in options: self.root_scenarios = options["root_scenarios"] - self.relax_root = False + self.relax_root = False if "relax_root" in options: self.relax_root = options["relax_root"] @@ -112,9 +112,11 @@ def __init__( if "valid_eta_lb" in options: self.valid_eta_lb = options["valid_eta_lb"] self.compute_eta_bound = False - else: # fit the user does not provide a bound, compute one - self.valid_eta_lb = { scen : (-sys.maxsize - 1) * 1. / len(self.all_scenario_names) \ - for scen in self.all_scenario_names } + else: # fit the user does not provide a bound, compute one + self.valid_eta_lb = { + scen: (-sys.maxsize - 1) * 1.0 / len(self.all_scenario_names) + for scen in self.all_scenario_names + } self.compute_eta_bound = True if scenario_creator_kwargs is None: @@ -129,7 +131,7 @@ def __init__( self.subproblems = {} def options_check(self): - """ Check to ensure that the user-specified options are valid. Requried + """Check to ensure that the user-specified options are valid. Requried options are: - root_solver (string) - Solver to use for the root problem. @@ -144,11 +146,11 @@ def options_check(self): def _add_root_etas(self, root, index): def _eta_bounds(m, s): - return (self.valid_eta_lb[s],None) + return (self.valid_eta_lb[s], None) + root.eta = pyo.Var(index, within=pyo.Reals, bounds=_eta_bounds) def _create_root_no_scenarios(self): - # using the first scenario as a basis root = self.scenario_creator( self.all_scenario_names[0], **self.scenario_creator_kwargs @@ -161,14 +163,21 @@ def _create_root_no_scenarios(self): self.root_vars = nonant_list - for constr_data in list(itertools.chain( - root.component_data_objects(SOSConstraint, active=True, descend_into=True) - , root.component_data_objects(Constraint, active=True, descend_into=True))): + for constr_data in list( + itertools.chain( + root.component_data_objects( + SOSConstraint, active=True, descend_into=True + ), + root.component_data_objects(Constraint, active=True, descend_into=True), + ) + ): if not _first_stage_only(constr_data, nonant_ids): _del_con(constr_data) # delete the second stage variables - for var in list(root.component_data_objects(Var, active=True, descend_into=True)): + for var in list( + root.component_data_objects(Var, active=True, descend_into=True) + ): if id(var) not in nonant_ids: _del_var(var) @@ -180,7 +189,9 @@ def _create_root_no_scenarios(self): repn = generate_standard_repn(obj.expr, quadratic=True) if len(repn.nonlinear_vars) > 0: - raise ValueError("LShaped does not support models with nonlinear objective functions") + raise ValueError( + "LShaped does not support models with nonlinear objective functions" + ) linear_vars = list() linear_coefs = list() @@ -188,7 +199,7 @@ def _create_root_no_scenarios(self): quadratic_coefs = list() ## we'll assume the constant is part of stage 1 (wlog it is), just ## like the first-stage bits of the objective - constant = repn.constant + constant = repn.constant ## only keep the first stage variables in the objective for coef, var in zip(repn.linear_coefs, repn.linear_vars): @@ -196,18 +207,18 @@ def _create_root_no_scenarios(self): if id_var in nonant_ids: linear_vars.append(var) linear_coefs.append(coef) - for coef, (x,y) in zip(repn.quadratic_coefs, repn.quadratic_vars): + for coef, (x, y) in zip(repn.quadratic_coefs, repn.quadratic_vars): id_x = id(x) id_y = id(y) if id_x in nonant_ids and id_y in nonant_ids: quadratic_coefs.append(coef) - quadratic_vars.append((x,y)) + quadratic_vars.append((x, y)) # checks if model sense is max, if so negates the objective if not self.is_minimizing: - for i,coef in enumerate(linear_coefs): + for i, coef in enumerate(linear_coefs): linear_coefs[i] = -coef - for i,coef in enumerate(quadratic_coefs): + for i, coef in enumerate(quadratic_coefs): quadratic_coefs[i] = -coef # add the etas @@ -215,12 +226,13 @@ def _create_root_no_scenarios(self): linear_vars.append(var) linear_coefs.append(1) - expr = LinearExpression(constant=constant, linear_coefs=linear_coefs, - linear_vars=linear_vars) + expr = LinearExpression( + constant=constant, linear_coefs=linear_coefs, linear_vars=linear_vars + ) if quadratic_coefs: expr += pyo.quicksum( - (coef*x*y for coef,(x,y) in zip(quadratic_coefs, quadratic_vars)) - ) + (coef * x * y for coef, (x, y) in zip(quadratic_coefs, quadratic_vars)) + ) root.del_component(obj) @@ -230,17 +242,18 @@ def _create_root_no_scenarios(self): self.root = root def _create_root_with_scenarios(self): - ef_scenarios = self.root_scenarios ## we want the correct probabilities to be set when ## calling create_EF if len(ef_scenarios) > 1: + def scenario_creator_wrapper(name, **creator_options): scenario = self.scenario_creator(name, **creator_options) - if not hasattr(scenario, '_mpisppy_probability'): - scenario._mpisppy_probability = 1./len(self.all_scenario_names) + if not hasattr(scenario, "_mpisppy_probability"): + scenario._mpisppy_probability = 1.0 / len(self.all_scenario_names) return scenario + root = sputils.create_EF( ef_scenarios, scenario_creator_wrapper, @@ -253,9 +266,11 @@ def scenario_creator_wrapper(name, **creator_options): ef_scenarios[0], **self.scenario_creator_kwargs, ) - if not hasattr(root, '_mpisppy_probability')\ - or root._mpisppy_probability == "uniform": - root._mpisppy_probability = 1./len(self.all_scenario_names) + if ( + not hasattr(root, "_mpisppy_probability") + or root._mpisppy_probability == "uniform" + ): + root._mpisppy_probability = 1.0 / len(self.all_scenario_names) nonant_list, nonant_ids = _get_nonant_ids(root) @@ -263,26 +278,31 @@ def scenario_creator_wrapper(name, **creator_options): # creates the eta variables for scenarios that are NOT selected to be # included in the root problem - eta_indx = [scenario_name for scenario_name in self.all_scenario_names - if scenario_name not in self.root_scenarios] + eta_indx = [ + scenario_name + for scenario_name in self.all_scenario_names + if scenario_name not in self.root_scenarios + ] self._add_root_etas(root, eta_indx) obj = find_active_objective(root) repn = generate_standard_repn(obj.expr, quadratic=True) if len(repn.nonlinear_vars) > 0: - raise ValueError("LShaped does not support models with nonlinear objective functions") + raise ValueError( + "LShaped does not support models with nonlinear objective functions" + ) linear_vars = list(repn.linear_vars) linear_coefs = list(repn.linear_coefs) quadratic_coefs = list(repn.quadratic_coefs) # adjust coefficients by scenario/bundle probability scen_prob = root._mpisppy_probability - for i,var in enumerate(repn.linear_vars): + for i, var in enumerate(repn.linear_vars): if id(var) not in nonant_ids: linear_coefs[i] *= scen_prob - for i,(x,y) in enumerate(repn.quadratic_vars): + for i, (x, y) in enumerate(repn.quadratic_vars): # only multiply through once if id(x) not in nonant_ids: quadratic_coefs[i] *= scen_prob @@ -292,9 +312,9 @@ def scenario_creator_wrapper(name, **creator_options): # NOTE: the LShaped code negates the objective, so # we do the same here for consistency if not self.is_minimizing: - for i,coef in enumerate(linear_coefs): + for i, coef in enumerate(linear_coefs): linear_coefs[i] = -coef - for i,coef in enumerate(quadratic_coefs): + for i, coef in enumerate(quadratic_coefs): quadratic_coefs[i] = -coef # add the etas @@ -302,11 +322,15 @@ def scenario_creator_wrapper(name, **creator_options): linear_vars.append(var) linear_coefs.append(1) - expr = LinearExpression(constant=repn.constant, linear_coefs=linear_coefs, - linear_vars=linear_vars) + expr = LinearExpression( + constant=repn.constant, linear_coefs=linear_coefs, linear_vars=linear_vars + ) if repn.quadratic_vars: expr += pyo.quicksum( - (coef*x*y for coef,(x,y) in zip(quadratic_coefs, repn.quadratic_vars)) + ( + coef * x * y + for coef, (x, y) in zip(quadratic_coefs, repn.quadratic_vars) + ) ) root.del_component(obj) @@ -317,7 +341,6 @@ def scenario_creator_wrapper(name, **creator_options): self.root = root def _create_shadow_root(self): - root = pyo.ConcreteModel() arb_scen = self.local_scenarios[self.local_scenario_names[0]] @@ -328,10 +351,13 @@ def _create_shadow_root(self): nonant_shadow = pyo.Var(name=v.name) root.add_component(v.name, nonant_shadow) root_vars.append(nonant_shadow) - + if self.has_root_scens: - eta_indx = [scenario_name for scenario_name in self.all_scenario_names - if scenario_name not in self.root_scenarios] + eta_indx = [ + scenario_name + for scenario_name in self.all_scenario_names + if scenario_name not in self.root_scenarios + ] else: eta_indx = self.all_scenario_names self._add_root_etas(root, eta_indx) @@ -343,8 +369,11 @@ def _create_shadow_root(self): def set_eta_bounds(self): if self.compute_eta_bound: ## for scenarios not in self.local_scenarios, these will be a large negative number - this_etas_lb = np.fromiter((self.valid_eta_lb[scen] for scen in self.all_scenario_names), - float, count=len(self.all_scenario_names)) + this_etas_lb = np.fromiter( + (self.valid_eta_lb[scen] for scen in self.all_scenario_names), + float, + count=len(self.all_scenario_names), + ) all_etas_lb = np.empty_like(this_etas_lb) @@ -352,50 +381,56 @@ def set_eta_bounds(self): for idx, s in enumerate(self.all_scenario_names): self.valid_eta_lb[s] = all_etas_lb[idx] - + # root may not have etas for every scenarios for s, v in self.root.eta.items(): v.setlb(self.valid_eta_lb[s]) def create_root(self): - """ creates a ConcreteModel from one of the problem scenarios then - modifies the model to serve as the root problem + """creates a ConcreteModel from one of the problem scenarios then + modifies the model to serve as the root problem """ if self.cylinder_rank == 0: if self.has_root_scens: self._create_root_with_scenarios() else: self._create_root_no_scenarios() - else: + else: ## if we're not rank0, just create a root to - ## hold the nonants and etas; rank0 will do + ## hold the nonants and etas; rank0 will do ## the optimizing self._create_shadow_root() - + def attach_nonant_var_map(self, scenario_name): instance = self.local_scenarios[scenario_name] subproblem_to_root_vars_map = pyo.ComponentMap() - for var, rvar in zip(instance._mpisppy_data.nonant_indices.values(), self.root_vars): + for var, rvar in zip( + instance._mpisppy_data.nonant_indices.values(), self.root_vars + ): if var.name not in rvar.name: - raise Exception("Error: Complicating variable mismatch, sub-problem variables changed order") - subproblem_to_root_vars_map[var] = rvar + raise Exception( + "Error: Complicating variable mismatch, sub-problem variables changed order" + ) + subproblem_to_root_vars_map[var] = rvar # this is for interefacing with PH code - instance._mpisppy_model.subproblem_to_root_vars_map = subproblem_to_root_vars_map + instance._mpisppy_model.subproblem_to_root_vars_map = ( + subproblem_to_root_vars_map + ) def create_subproblem(self, scenario_name): - """ the subproblem creation function passed into the - BendersCutsGenerator + """the subproblem creation function passed into the + BendersCutsGenerator """ instance = self.local_scenarios[scenario_name] - nonant_list, nonant_ids = _get_nonant_ids(instance) + nonant_list, nonant_ids = _get_nonant_ids(instance) # NOTE: since we use generate_standard_repn below, we need # to unfix any nonants so they'll properly appear # in the objective - fixed_nonants = [ var for var in nonant_list if var.fixed ] + fixed_nonants = [var for var in nonant_list if var.fixed] for var in fixed_nonants: var.fixed = False @@ -403,12 +438,14 @@ def create_subproblem(self, scenario_name): obj = find_active_objective(instance) if not hasattr(instance, "_mpisppy_probability"): - instance._mpisppy_probability = 1. / self.scenario_count + instance._mpisppy_probability = 1.0 / self.scenario_count _mpisppy_probability = instance._mpisppy_probability repn = generate_standard_repn(obj.expr, quadratic=True) if len(repn.nonlinear_vars) > 0: - raise ValueError("LShaped does not support models with nonlinear objective functions") + raise ValueError( + "LShaped does not support models with nonlinear objective functions" + ) linear_vars = list() linear_coefs = list() @@ -416,34 +453,35 @@ def create_subproblem(self, scenario_name): quadratic_coefs = list() ## we'll assume the constant is part of stage 1 (wlog it is), just ## like the first-stage bits of the objective - constant = repn.constant + constant = repn.constant ## only keep the second stage variables in the objective for coef, var in zip(repn.linear_coefs, repn.linear_vars): id_var = id(var) if id_var not in nonant_ids: linear_vars.append(var) - linear_coefs.append(_mpisppy_probability*coef) - for coef, (x,y) in zip(repn.quadratic_coefs, repn.quadratic_vars): + linear_coefs.append(_mpisppy_probability * coef) + for coef, (x, y) in zip(repn.quadratic_coefs, repn.quadratic_vars): id_x = id(x) id_y = id(y) if id_x not in nonant_ids or id_y not in nonant_ids: - quadratic_coefs.append(_mpisppy_probability*coef) - quadratic_vars.append((x,y)) + quadratic_coefs.append(_mpisppy_probability * coef) + quadratic_vars.append((x, y)) # checks if model sense is max, if so negates the objective if not self.is_minimizing: - for i,coef in enumerate(linear_coefs): + for i, coef in enumerate(linear_coefs): linear_coefs[i] = -coef - for i,coef in enumerate(quadratic_coefs): + for i, coef in enumerate(quadratic_coefs): quadratic_coefs[i] = -coef - expr = LinearExpression(constant=constant, linear_coefs=linear_coefs, - linear_vars=linear_vars) + expr = LinearExpression( + constant=constant, linear_coefs=linear_coefs, linear_vars=linear_vars + ) if quadratic_coefs: expr += pyo.quicksum( - (coef*x*y for coef,(x,y) in zip(quadratic_coefs, quadratic_vars)) - ) + (coef * x * y for coef, (x, y) in zip(quadratic_coefs, quadratic_vars)) + ) instance.del_component(obj) @@ -460,7 +498,7 @@ def create_subproblem(self, scenario_name): var.fixed = True opt = pyo.SolverFactory(self.options["sp_solver"]) if self.options["sp_solver_options"]: - for k,v in self.options["sp_solver_options"].items(): + for k, v in self.options["sp_solver_options"].items(): opt.options[k] = v if sputils.is_persistent(opt): @@ -480,9 +518,16 @@ def create_subproblem(self, scenario_name): # iterates through constraints and removes first stage constraints from the model # the id dict is used to improve the speed of identifying the stage each variables belongs to - for constr_data in list(itertools.chain( - instance.component_data_objects(SOSConstraint, active=True, descend_into=True) - , instance.component_data_objects(Constraint, active=True, descend_into=True))): + for constr_data in list( + itertools.chain( + instance.component_data_objects( + SOSConstraint, active=True, descend_into=True + ), + instance.component_data_objects( + Constraint, active=True, descend_into=True + ), + ) + ): if _first_stage_only(constr_data, nonant_ids): _del_con(constr_data) @@ -493,10 +538,12 @@ def create_subproblem(self, scenario_name): # creates the complicating var map that connects the first stage variables in the sub problem to those in # the root problem -- also set the bounds on the subproblem root vars to be none for better cuts for var, rvar in zip(nonant_list, self.root_vars): - if var.name not in rvar.name: # rvar.name may be part of a bundle - raise Exception("Error: Complicating variable mismatch, sub-problem variables changed order") + if var.name not in rvar.name: # rvar.name may be part of a bundle + raise Exception( + "Error: Complicating variable mismatch, sub-problem variables changed order" + ) complicating_vars_map[rvar] = var - subproblem_to_root_vars_map[var] = rvar + subproblem_to_root_vars_map[var] = rvar # these are already enforced in the root # don't need to be enfored in the subproblems @@ -505,7 +552,9 @@ def create_subproblem(self, scenario_name): var.fixed = False # this is for interefacing with PH code - instance._mpisppy_model.subproblem_to_root_vars_map = subproblem_to_root_vars_map + instance._mpisppy_model.subproblem_to_root_vars_map = ( + subproblem_to_root_vars_map + ) if self.store_subproblems: self.subproblems[scenario_name] = instance @@ -513,8 +562,7 @@ def create_subproblem(self, scenario_name): return instance, complicating_vars_map def lshaped_algorithm(self, converger=None): - """ function that runs the lshaped.py algorithm - """ + """function that runs the lshaped.py algorithm""" if converger: converger = converger(self, self.cylinder_rank, self.n_proc) max_iter = 30 @@ -552,7 +600,8 @@ def lshaped_algorithm(self, converger=None): # and let it internally handle which ranks get which scenarios if self.has_root_scens: sub_scenarios = [ - scenario_name for scenario_name in self.local_scenario_names + scenario_name + for scenario_name in self.local_scenario_names if scenario_name not in self.root_scenarios ] else: @@ -560,13 +609,13 @@ def lshaped_algorithm(self, converger=None): for scenario_name in self.local_scenario_names: if scenario_name in sub_scenarios: subproblem_fn_kwargs = dict() - subproblem_fn_kwargs['scenario_name'] = scenario_name + subproblem_fn_kwargs["scenario_name"] = scenario_name m.bender.add_subproblem( subproblem_fn=self.create_subproblem, subproblem_fn_kwargs=subproblem_fn_kwargs, root_eta=m.eta[scenario_name], subproblem_solver=sp_solver, - subproblem_solver_options=self.options["sp_solver_options"] + subproblem_solver_options=self.options["sp_solver_options"], ) else: self.attach_nonant_var_map(scenario_name) @@ -581,7 +630,7 @@ def lshaped_algorithm(self, converger=None): raise Exception("Error: Failed to Create Master Solver") # set options - for k,v in self.options["root_solver_options"].items(): + for k, v in self.options["root_solver_options"].items(): opt.options[k] = v is_persistent = sputils.is_persistent(opt) @@ -597,10 +646,26 @@ def lshaped_algorithm(self, converger=None): for self.iter in range(max_iter): if verbose and self.cylinder_rank == 0: if self.iter > 0: - print("Current Iteration:", self.iter + 1, "Time Elapsed:", "%7.2f" % (time.time() - t), "Time Spent on Last Master:", "%7.2f" % t1, - "Time Spent Generating Last Cut Set:", "%7.2f" % t2, "Current Objective:", "%7.2f" % m.obj.expr()) + print( + "Current Iteration:", + self.iter + 1, + "Time Elapsed:", + "%7.2f" % (time.time() - t), + "Time Spent on Last Master:", + "%7.2f" % t1, + "Time Spent Generating Last Cut Set:", + "%7.2f" % t2, + "Current Objective:", + "%7.2f" % m.obj.expr(), + ) else: - print("Current Iteration:", self.iter + 1, "Time Elapsed:", "%7.2f" % (time.time() - t), "Current Objective: -Inf") + print( + "Current Iteration:", + self.iter + 1, + "Time Elapsed:", + "%7.2f" % (time.time() - t), + "Current Objective: -Inf", + ) t1 = time.time() x_vals = np.zeros(len(self.root_vars)) eta_vals = np.zeros(self.scenario_count) @@ -685,6 +750,7 @@ def lshaped_algorithm(self, converger=None): break return res + def _del_con(c): parent = c.parent_component() if parent.is_indexed(): @@ -693,6 +759,7 @@ def _del_con(c): assert parent is c c.parent_block().del_component(c) + def _del_var(v): parent = v.parent_component() if parent.is_indexed(): @@ -702,11 +769,13 @@ def _del_var(v): block = v.parent_block() block.del_component(v) + def _get_nonant_ids(instance): assert len(instance._mpisppy_node_list) == 1 # set comprehension nonant_list = instance._mpisppy_node_list[0].nonant_vardata_list - return nonant_list, { id(var) for var in nonant_list } + return nonant_list, {id(var) for var in nonant_list} + def _get_nonant_ids_EF(instance): assert len(instance._mpisppy_data.nlens) == 1 @@ -715,7 +784,7 @@ def _get_nonant_ids_EF(instance): ## this is for the cut variables, so we just need (and want) ## exactly one set of them - nonant_list = list(instance.ref_vars[ndn,i] for i in range(nlen)) + nonant_list = list(instance.ref_vars[ndn, i] for i in range(nlen)) ## this is for adjusting the objective, so needs all the nonants ## in the EF @@ -723,27 +792,32 @@ def _get_nonant_ids_EF(instance): nonant_ids = set() for s in snames: - nonant_ids.update( (id(v) for v in \ - getattr(instance, s)._mpisppy_node_list[0].nonant_vardata_list) - ) - return nonant_list, nonant_ids + nonant_ids.update( + ( + id(v) + for v in getattr(instance, s)._mpisppy_node_list[0].nonant_vardata_list + ) + ) + return nonant_list, nonant_ids + def _first_stage_only(constr_data, nonant_ids): - """ iterates through the constraint in a scenario and returns if it only - has first stage variables + """iterates through the constraint in a scenario and returns if it only + has first stage variables """ for var in identify_variables(constr_data.body): - if id(var) not in nonant_ids: + if id(var) not in nonant_ids: return False return True + def _init_vars(varlist): - ''' + """ for every pyomo var in varlist without a value, sets it to the lower bound (if it exists), or the upper bound (if it exists, and the lower bound does note) or 0 (if neither bound exists). - ''' + """ value = pyo.value for var in varlist: if var.value is not None: @@ -756,22 +830,22 @@ def _init_vars(varlist): var.set_value(0) - def main(): import mpisppy.tests.examples.farmer as ref import os + # Turn off output from all ranks except rank 1 if MPI.COMM_WORLD.Get_rank() != 0: - sys.stdout = open(os.devnull, 'w') - scenario_names = ['scen' + str(i) for i in range(3)] - bounds = {i:-432000 for i in scenario_names} + sys.stdout = open(os.devnull, "w") + scenario_names = ["scen" + str(i) for i in range(3)] + bounds = {i: -432000 for i in scenario_names} options = { "root_solver": "gurobi_persistent", "sp_solver": "gurobi_persistent", - "sp_solver_options" : {"threads" : 1}, + "sp_solver_options": {"threads": 1}, "valid_eta_lb": bounds, "max_iter": 10, - } + } ls = LShapedMethod(options, scenario_names, ref.scenario_creator) res = ls.lshaped_algorithm() @@ -779,5 +853,5 @@ def main(): print(res) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/mpisppy/opt/ph.py b/mpisppy/opt/ph.py index d08373105..e272f31f1 100644 --- a/mpisppy/opt/ph.py +++ b/mpisppy/opt/ph.py @@ -12,24 +12,26 @@ import shutil import mpisppy.MPI as mpi + # decorator snarfed from stack overflow - allows per-rank profile output file generation. def profile(filename=None, comm=mpi.COMM_WORLD): pass + fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() ############################################################################ class PH(mpisppy.phbase.PHBase): - """ PH. See PHBase for list of args. """ + """PH. See PHBase for list of args.""" - #====================================================================== - # uncomment the line below to get per-rank profile outputs, which can + # ====================================================================== + # uncomment the line below to get per-rank profile outputs, which can # be examined with snakeviz (or your favorite profile output analyzer) - #@profile(filename="profile_out") + # @profile(filename="profile_out") def ph_main(self, finalize=True): - """ Execute the PH algorithm. + """Execute the PH algorithm. Args: finalize (bool, optional, default=True): @@ -54,16 +56,16 @@ def ph_main(self, finalize=True): NOTE: You need an xhat finder either in denoument or in an extension. """ - verbose = self.options['verbose'] - smoothed = self.options['smoothed'] - self.PH_Prep(attach_smooth = smoothed) + verbose = self.options["verbose"] + smoothed = self.options["smoothed"] + self.PH_Prep(attach_smooth=smoothed) - if (verbose): - print('Calling PH Iter0 on global rank {}'.format(global_rank)) + if verbose: + print("Calling PH Iter0 on global rank {}".format(global_rank)) trivial_bound = self.Iter0() - if (verbose): - print ('Completed PH Iter0 on global rank {}'.format(global_rank)) - if ('asynchronousPH' in self.options) and (self.options['asynchronousPH']): + if verbose: + print("Completed PH Iter0 on global rank {}".format(global_rank)) + if ("asynchronousPH" in self.options) and (self.options["asynchronousPH"]): raise RuntimeError("asynchronousPH is deprecated; use APH") self.iterk_loop() @@ -77,7 +79,7 @@ def ph_main(self, finalize=True): if __name__ == "__main__": - #============================== + # ============================== # hardwired by dlw for debugging import mpisppy.tests.examples.farmer as refmodel import mpisppy.utils.sputils as sputils @@ -92,13 +94,12 @@ def ph_main(self, finalize=True): PHopt["display_timing"] = True PHopt["display_progress"] = True # one way to set up options (never mind that this is not a MIP) - PHopt["iter0_solver_options"]\ - = sputils.option_string_to_dict("mipgap=0.01") + PHopt["iter0_solver_options"] = sputils.option_string_to_dict("mipgap=0.01") # another way PHopt["iterk_solver_options"] = {"mipgap": 0.001} ScenCount = 50 - all_scenario_names = ['scen' + str(i) for i in range(ScenCount)] + all_scenario_names = ["scen" + str(i) for i in range(ScenCount)] # end hardwire scenario_creator = refmodel.scenario_creator @@ -107,50 +108,69 @@ def ph_main(self, finalize=True): # now test extensions and convergers together. # NOTE that the options is a reference, # so you could have changed PHopt instead. - PHopt["PHIterLimit"] = 5 # the converger will probably stop it. + PHopt["PHIterLimit"] = 5 # the converger will probably stop it. PHopt["asynchronousPH"] = False from mpisppy.extensions.xhatlooper import XhatLooper from mpisppy.convergers.fracintsnotconv import FractionalConverger - PHopt["xhat_looper_options"] = {"xhat_solver_options":\ - PHopt["iterk_solver_options"], - "scen_limit": 3, - "csvname": "looper.csv"} - ph = PH(PHopt, all_scenario_names, scenario_creator, scenario_denouement, - extensions=XhatLooper, - ph_converger=FractionalConverger, - rho_setter=None) + + PHopt["xhat_looper_options"] = { + "xhat_solver_options": PHopt["iterk_solver_options"], + "scen_limit": 3, + "csvname": "looper.csv", + } + ph = PH( + PHopt, + all_scenario_names, + scenario_creator, + scenario_denouement, + extensions=XhatLooper, + ph_converger=FractionalConverger, + rho_setter=None, + ) conv, obj, bnd = ph.ph_main() - print ("Quitting Early.") + print("Quitting Early.") quit() from mpisppy.extensions.diagnoser import Diagnoser + # now test whatever is new - ph = PH(PHopt, all_scenario_names, scenario_creator, scenario_denouement, - extensions=Diagnoser, - ph_converger=None, - rho_setter=None) + ph = PH( + PHopt, + all_scenario_names, + scenario_creator, + scenario_denouement, + extensions=Diagnoser, + ph_converger=None, + rho_setter=None, + ) ph.options["PHIterLimit"] = 3 if global_rank == 0: try: shutil.rmtree("delme_diagdir") - print ("...deleted delme_diagdir") + print("...deleted delme_diagdir") except Exception: pass ph.options["diagnoser_options"] = {"diagnoser_outdir": "delme_diagdir"} conv, obj, bnd = ph.ph_main() from mpisppy.extensions.avgminmaxer import MinMaxAvg - ph = PH(PHopt, all_scenario_names, scenario_creator, scenario_denouement, - extensions=MinMaxAvg, - ph_converger=None, - rho_setter=None) - ph.options["avgminmax_name"] = "FirstStageCost" + + ph = PH( + PHopt, + all_scenario_names, + scenario_creator, + scenario_denouement, + extensions=MinMaxAvg, + ph_converger=None, + rho_setter=None, + ) + ph.options["avgminmax_name"] = "FirstStageCost" ph.options["PHIterLimit"] = 3 conv, obj, bnd = ph.ph_main() - # trying asynchronous operation + # trying asynchronous operation PHopt["asynchronousPH"] = True PHopt["async_frac_needed"] = 0.5 PHopt["async_sleep_secs"] = 1 @@ -159,12 +179,10 @@ def ph_main(self, finalize=True): conv, obj, bnd = ph.ph_main() if global_rank == 0: - print ("E[obj] for converged solution (probably NOT non-anticipative)", - obj) + print("E[obj] for converged solution (probably NOT non-anticipative)", obj) dopts = sputils.option_string_to_dict("mipgap=0.001") objbound = ph.post_solve_bound(solver_options=dopts, verbose=False) if global_rank == 0: - print ("**** Lagrangian objective function bound=",objbound) - print ("(probably converged way too early, BTW)") - + print("**** Lagrangian objective function bound=", objbound) + print("(probably converged way too early, BTW)") diff --git a/mpisppy/opt/presolve.py b/mpisppy/opt/presolve.py index 87edece58..d8e249965 100644 --- a/mpisppy/opt/presolve.py +++ b/mpisppy/opt/presolve.py @@ -105,7 +105,6 @@ def presolve(self): ) while True: - if not same_nonant_bounds: update = True diff --git a/mpisppy/opt/sc.py b/mpisppy/opt/sc.py index 8e085f878..e943f45cd 100644 --- a/mpisppy/opt/sc.py +++ b/mpisppy/opt/sc.py @@ -17,7 +17,7 @@ from mpisppy.utils.sputils import find_active_objective -logger = logging.getLogger('mpisppy.sc') +logger = logging.getLogger("mpisppy.sc") SCOptions = parapint.algorithms.IPOptions @@ -26,15 +26,21 @@ def _assert_continuous(m: _BlockData): for v in m.component_data_objects(pyo.Var, descend_into=True, active=True): if not v.is_continuous(): - raise RuntimeError(f'Variable {v} in block {m} is not continuous; The Schur-Complement method only supports continuous problems.') - - -class _SCInterface(parapint.interfaces.MPIStochasticSchurComplementInteriorPointInterface): - def __init__(self, - local_scenario_models: Dict[str, _BlockData], - all_scenario_names: List[str], - comm: MPI.Comm, - ownership_map: Dict): + raise RuntimeError( + f"Variable {v} in block {m} is not continuous; The Schur-Complement method only supports continuous problems." + ) + + +class _SCInterface( + parapint.interfaces.MPIStochasticSchurComplementInteriorPointInterface +): + def __init__( + self, + local_scenario_models: Dict[str, _BlockData], + all_scenario_names: List[str], + comm: MPI.Comm, + ownership_map: Dict, + ): self.local_scenario_models = local_scenario_models models = list(local_scenario_models.values()) @@ -42,57 +48,70 @@ def __init__(self, models = models[1:] self.nonant_vars = list(ref_model._mpisppy_data.nonant_indices.keys()) - super(_SCInterface, self).__init__(scenarios=all_scenario_names, - nonanticipative_var_identifiers=self.nonant_vars, - comm=comm, - ownership_map=ownership_map) + super(_SCInterface, self).__init__( + scenarios=all_scenario_names, + nonanticipative_var_identifiers=self.nonant_vars, + comm=comm, + ownership_map=ownership_map, + ) - def build_model_for_scenario(self, - scenario_identifier: str) -> Tuple[_BlockData, Dict[Any, _GeneralVarData]]: + def build_model_for_scenario( + self, scenario_identifier: str + ) -> Tuple[_BlockData, Dict[Any, _GeneralVarData]]: m = self.local_scenario_models[scenario_identifier] _assert_continuous(m) active_obj = find_active_objective(m) active_obj.deactivate() - m._mpisppy_model.weighted_obj = pyo.Objective(expr=m._mpisppy_probability * active_obj.expr, sense=active_obj.sense) + m._mpisppy_model.weighted_obj = pyo.Objective( + expr=m._mpisppy_probability * active_obj.expr, sense=active_obj.sense + ) nonant_vars = m._mpisppy_data.nonant_indices if len(nonant_vars) != len(self.nonant_vars): - raise ValueError(f'Number of non-anticipative variables is not consistent in scenario {scenario_identifier}.') + raise ValueError( + f"Number of non-anticipative variables is not consistent in scenario {scenario_identifier}." + ) return m, nonant_vars class SchurComplement(SPBase): - def __init__(self, - options: Union[Dict, SCOptions], - all_scenario_names: List, - scenario_creator: Callable, - scenario_creator_kwargs: Optional[Dict] = None, - all_nodenames=None, - mpicomm=None, - model_name=None, - suppress_warnings=False): - super(SchurComplement, self).__init__(options=options, - all_scenario_names=all_scenario_names, - scenario_creator=scenario_creator, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames, - mpicomm=mpicomm) + def __init__( + self, + options: Union[Dict, SCOptions], + all_scenario_names: List, + scenario_creator: Callable, + scenario_creator_kwargs: Optional[Dict] = None, + all_nodenames=None, + mpicomm=None, + model_name=None, + suppress_warnings=False, + ): + super(SchurComplement, self).__init__( + options=options, + all_scenario_names=all_scenario_names, + scenario_creator=scenario_creator, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + mpicomm=mpicomm, + ) if self.bundling: - raise ValueError('The Schur-Complement method does not support bundling') + raise ValueError("The Schur-Complement method does not support bundling") ownership_map = dict() for _rank, scenario_index_list in enumerate(self._rank_slices): for _scenario_ndx in scenario_index_list: ownership_map[_scenario_ndx] = _rank - self.interface = _SCInterface(local_scenario_models=self.local_scenarios, - all_scenario_names=self.all_scenario_names, - comm=self.mpicomm, - ownership_map=ownership_map) + self.interface = _SCInterface( + local_scenario_models=self.local_scenarios, + all_scenario_names=self.all_scenario_names, + comm=self.mpicomm, + ownership_map=ownership_map, + ) def solve(self): if isinstance(self.options, SCOptions): @@ -101,13 +120,22 @@ def solve(self): options = SCOptions()(self.options) if options.linalg.solver is None: options.linalg.solver = parapint.linalg.MPISchurComplementLinearSolver( - subproblem_solvers={ndx: parapint.linalg.InteriorPointMA27Interface(cntl_options={1: 1e-6}) for ndx in range(len(self.all_scenario_names))}, - schur_complement_solver=parapint.linalg.InteriorPointMA27Interface(cntl_options={1: 1e-6})) - - status = parapint.algorithms.ip_solve(interface=self.interface, - options=options) + subproblem_solvers={ + ndx: parapint.linalg.InteriorPointMA27Interface( + cntl_options={1: 1e-6} + ) + for ndx in range(len(self.all_scenario_names)) + }, + schur_complement_solver=parapint.linalg.InteriorPointMA27Interface( + cntl_options={1: 1e-6} + ), + ) + + status = parapint.algorithms.ip_solve(interface=self.interface, options=options) if status != parapint.algorithms.InteriorPointStatus.optimal: - raise RuntimeError('Schur-Complement Interior Point algorithm did not converge') + raise RuntimeError( + "Schur-Complement Interior Point algorithm did not converge" + ) self.interface.load_primals_into_pyomo_model() diff --git a/mpisppy/phbase.py b/mpisppy/phbase.py index d0f8b18bf..44cde157c 100644 --- a/mpisppy/phbase.py +++ b/mpisppy/phbase.py @@ -20,17 +20,20 @@ from mpisppy.utils.prox_approx import ProxApproxManager from mpisppy import global_toc + # decorator snarfed from stack overflow - allows per-rank profile output file generation. def profile(filename=None, comm=MPI.COMM_WORLD): pass -logger = logging.getLogger('PHBase') + +logger = logging.getLogger("PHBase") logger.setLevel(logging.WARN) -#====================== +# ====================== + def _Compute_Xbar(opt, verbose=False): - """ Gather xbar and x squared bar for each node in the list and + """Gather xbar and x squared bar for each node in the list and distribute the values back to the scenarios. Args: @@ -51,24 +54,24 @@ def _Compute_Xbar(opt, verbose=False): vector to make it easier to use the current asynch code. """ - nodenames = [] # to transmit to comms - local_concats = {} # keys are tree node names - global_concats = {} # values are concat of xbar and xsqbar + nodenames = [] # to transmit to comms + local_concats = {} # keys are tree node names + global_concats = {} # values are concat of xbar and xsqbar # we need to accumulate all local contributions before the reduce - for k,s in opt.local_scenarios.items(): + for k, s in opt.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: if node.name not in nodenames: ndn = node.name nodenames.append(ndn) - mylen = 2*nlens[ndn] + mylen = 2 * nlens[ndn] - local_concats[ndn] = np.zeros(mylen, dtype='d') - global_concats[ndn] = np.zeros(mylen, dtype='d') + local_concats[ndn] = np.zeros(mylen, dtype="d") + global_concats[ndn] = np.zeros(mylen, dtype="d") # compute the local xbar and sqbar (put the sq in the 2nd 1/2 of concat) - for k,s in opt.local_scenarios.items(): + for k, s in opt.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name @@ -77,8 +80,9 @@ def _Compute_Xbar(opt, verbose=False): xbars = local_concats[ndn][:nlen] xsqbars = local_concats[ndn][nlen:] - nonants_array = np.fromiter((v._value for v in node.nonant_vardata_list), - dtype='d', count=nlen) + nonants_array = np.fromiter( + (v._value for v in node.nonant_vardata_list), dtype="d", count=nlen + ) probs = s._mpisppy_data.prob_coeff[ndn] * np.ones(nlen) xbars += probs * nonants_array @@ -89,12 +93,14 @@ def _Compute_Xbar(opt, verbose=False): opt.comms[nodename].Allreduce( [local_concats[nodename], MPI.DOUBLE], [global_concats[nodename], MPI.DOUBLE], - op=MPI.SUM) + op=MPI.SUM, + ) # set the xbar and xsqbar in all the scenarios - for k,s in opt.local_scenarios.items(): - logger.debug(' top of assign xbar loop for {} on rank {}'.\ - format(k, opt.cylinder_rank)) + for k, s in opt.local_scenarios.items(): + logger.debug( + " top of assign xbar loop for {} on rank {}".format(k, opt.cylinder_rank) + ) nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name @@ -104,15 +110,21 @@ def _Compute_Xbar(opt, verbose=False): xsqbars = global_concats[ndn][nlen:] for i in range(nlen): - s._mpisppy_model.xbars[(ndn,i)]._value = xbars[i] - s._mpisppy_model.xsqbars[(ndn,i)]._value = xsqbars[i] - if verbose: # and opt.cylinder_rank == 0: - print ("cylinder rank, scen, node, var, xbar:", - opt.cylinder_rank, k, ndn, node.nonant_vardata_list[i].name, - pyo.value(s._mpisppy_model.xbars[(ndn,i)])) + s._mpisppy_model.xbars[(ndn, i)]._value = xbars[i] + s._mpisppy_model.xsqbars[(ndn, i)]._value = xsqbars[i] + if verbose: # and opt.cylinder_rank == 0: + print( + "cylinder rank, scen, node, var, xbar:", + opt.cylinder_rank, + k, + ndn, + node.nonant_vardata_list[i].name, + pyo.value(s._mpisppy_model.xbars[(ndn, i)]), + ) + def _Compute_Wbar(opt, verbose=False, repair=True): - """ Seldom used (mainly for diagnostics); gather Wbar for each node. + """Seldom used (mainly for diagnostics); gather Wbar for each node. Args: opt (phbase or xhat_eval object): object with the local scenarios @@ -122,12 +134,12 @@ def _Compute_Wbar(opt, verbose=False, repair=True): If True, normalize the W values so EW = 0 """ - nodenames = [] # to transmit to comms - local_concats = {} # keys are tree node names - global_concats = {} # values are concat of xbar and xsqbar + nodenames = [] # to transmit to comms + local_concats = {} # keys are tree node names + global_concats = {} # values are concat of xbar and xsqbar # we need to accumulate all local contributions before the reduce - for k,s in opt.local_scenarios.items(): + for k, s in opt.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: if node.name not in nodenames: @@ -135,11 +147,11 @@ def _Compute_Wbar(opt, verbose=False, repair=True): nodenames.append(ndn) mylen = nlens[ndn] - local_concats[ndn] = np.zeros(mylen, dtype='d') - global_concats[ndn] = np.zeros(mylen, dtype='d') + local_concats[ndn] = np.zeros(mylen, dtype="d") + global_concats[ndn] = np.zeros(mylen, dtype="d") # compute the local Wbar - for k,s in opt.local_scenarios.items(): + for k, s in opt.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name @@ -148,8 +160,15 @@ def _Compute_Wbar(opt, verbose=False, repair=True): Wbars = local_concats[ndn][:nlen] # s._mpisppy_data.nonant_indices.keys() indexes the W Param - Wnonants_array = np.fromiter((pyo.value(s._mpisppy_model.W[idx]) for idx in s._mpisppy_data.nonant_indices if idx[0] == ndn), - dtype='d', count=nlen) + Wnonants_array = np.fromiter( + ( + pyo.value(s._mpisppy_model.W[idx]) + for idx in s._mpisppy_data.nonant_indices + if idx[0] == ndn + ), + dtype="d", + count=nlen, + ) probs = s._mpisppy_data.prob_coeff[ndn] * np.ones(nlen) Wbars += probs * Wnonants_array @@ -158,12 +177,14 @@ def _Compute_Wbar(opt, verbose=False, repair=True): opt.comms[nodename].Allreduce( [local_concats[nodename], MPI.DOUBLE], [global_concats[nodename], MPI.DOUBLE], - op=MPI.SUM) + op=MPI.SUM, + ) # check the Wbar - for k,s in opt.local_scenarios.items(): - logger.debug(' top of Wbar loop for {} on rank {}'.\ - format(k, opt.cylinder_rank)) + for k, s in opt.local_scenarios.items(): + logger.debug( + " top of Wbar loop for {} on rank {}".format(k, opt.cylinder_rank) + ) nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name @@ -173,73 +194,77 @@ def _Compute_Wbar(opt, verbose=False, repair=True): for i in range(nlen): if abs(Wbars[i]) > opt.E1_tolerance and opt.cylinder_rank == 0: - print(f"EW={Wbars[i]} (should be zero) for {node.nonant_vardata_list[i].name}") + print( + f"EW={Wbars[i]} (should be zero) for {node.nonant_vardata_list[i].name}" + ) if repair: print(f" repairing in {k}") - s._mpisppy_model.W[(ndn,i)]._value -= Wbars[i] - + s._mpisppy_model.W[(ndn, i)]._value -= Wbars[i] + + +# ====================== -#====================== class PHBase(mpisppy.spopt.SPOpt): - """ Base class for all PH-based algorithms. - - Based on mpi4py (but should run with, or without, mpi) - EVERY INDEX IS ZERO-BASED! (Except stages, which are one based). - - Node names other than ROOT, although strings, must be a number or end - in a number because mpi4py comms need a number. PH using a smart - referencemodel that knows how to make its own tree nodes and just wants - a trailing number in the scenario name. Assume we have only non-leaf - nodes. - - To check for rank 0 use self.cylinder_rank == 0. - - Attributes: - local_scenarios (dict): - Dictionary mapping scenario names (strings) to scenarios (Pyomo - conrete model objects). These are only the scenarios managed by - the current rank (not all scenarios in the entire model). - comms (dict): - Dictionary mapping node names (strings) to MPI communicator - objects. - local_scenario_names (list): - List of local scenario names (strings). Should match the keys - of the local_scenarios dict. - current_solver_options (dict): from options, but callbacks might - Dictionary of solver options provided in options. Note that - callbacks could change these options. + """Base class for all PH-based algorithms. + + Based on mpi4py (but should run with, or without, mpi) + EVERY INDEX IS ZERO-BASED! (Except stages, which are one based). + + Node names other than ROOT, although strings, must be a number or end + in a number because mpi4py comms need a number. PH using a smart + referencemodel that knows how to make its own tree nodes and just wants + a trailing number in the scenario name. Assume we have only non-leaf + nodes. + + To check for rank 0 use self.cylinder_rank == 0. + + Attributes: + local_scenarios (dict): + Dictionary mapping scenario names (strings) to scenarios (Pyomo + conrete model objects). These are only the scenarios managed by + the current rank (not all scenarios in the entire model). + comms (dict): + Dictionary mapping node names (strings) to MPI communicator + objects. + local_scenario_names (list): + List of local scenario names (strings). Should match the keys + of the local_scenarios dict. + current_solver_options (dict): from options, but callbacks might + Dictionary of solver options provided in options. Note that + callbacks could change these options. - Args: - options (dict): - Options for the PH algorithm. - all_scenario_names (list): - List of all scenario names in the model (strings). - scenario_creator (callable): - Function which take a scenario name (string) and returns a - Pyomo Concrete model with some things attached. - scenario_denouement (callable, optional): - Function which does post-processing and reporting. - all_nodenames (list, optional): - List of all node name (strings). Can be `None` for two-stage - problems. - mpicomm (MPI comm, optional): - MPI communicator to use between all scenarios. Default is - `MPI.COMM_WORLD`. - scenario_creator_kwargs (dict, optional): - Keyword arguments passed to `scenario_creator`. - extensions (object, optional): - PH extension object. - extension_kwargs (dict, optional): - Keyword arguments to pass to the extensions. - ph_converger (object, optional): - PH converger object. - rho_setter (callable, optional): - Function to set rho values throughout the PH algorithm. - variable_probability (callable, optional): - Function to set variable specific probabilities. + Args: + options (dict): + Options for the PH algorithm. + all_scenario_names (list): + List of all scenario names in the model (strings). + scenario_creator (callable): + Function which take a scenario name (string) and returns a + Pyomo Concrete model with some things attached. + scenario_denouement (callable, optional): + Function which does post-processing and reporting. + all_nodenames (list, optional): + List of all node name (strings). Can be `None` for two-stage + problems. + mpicomm (MPI comm, optional): + MPI communicator to use between all scenarios. Default is + `MPI.COMM_WORLD`. + scenario_creator_kwargs (dict, optional): + Keyword arguments passed to `scenario_creator`. + extensions (object, optional): + PH extension object. + extension_kwargs (dict, optional): + Keyword arguments to pass to the extensions. + ph_converger (object, optional): + PH converger object. + rho_setter (callable, optional): + Function to set rho values throughout the PH algorithm. + variable_probability (callable, optional): + Function to set variable specific probabilities. """ + def __init__( self, options, @@ -286,9 +311,8 @@ def __init__( self.convobject = None # PH converger self.attach_xbars() - def Compute_Xbar(self, verbose=False): - """ Gather xbar and x squared bar for each node in the list and + """Gather xbar and x squared bar for each node in the list and distribute the values back to the scenarios. Args: @@ -297,68 +321,75 @@ def Compute_Xbar(self, verbose=False): """ _Compute_Xbar(self, verbose=verbose) - def Update_W(self, verbose): - """ Update the dual weights during the PH algorithm. + """Update the dual weights during the PH algorithm. Args: verbose (bool): If True, displays verbose output during update. """ # Assumes the scenarios are up to date - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): for ndn_i, nonant in s._mpisppy_data.nonant_indices.items(): - ##if nonant._value == None: ## print(f"***_value is None for nonant var {nonant.name}") - xdiff = nonant._value \ - - s._mpisppy_model.xbars[ndn_i]._value - s._mpisppy_model.W[ndn_i]._value += pyo.value(s._mpisppy_model.rho[ndn_i]) * xdiff + xdiff = nonant._value - s._mpisppy_model.xbars[ndn_i]._value + s._mpisppy_model.W[ndn_i]._value += ( + pyo.value(s._mpisppy_model.rho[ndn_i]) * xdiff + ) if verbose and self.cylinder_rank == 0: - print ("rank, node, scen, var, W", ndn_i[0], k, - self.cylinder_rank, nonant.name, - pyo.value(s._mpisppy_model.W[ndn_i])) + print( + "rank, node, scen, var, W", + ndn_i[0], + k, + self.cylinder_rank, + nonant.name, + pyo.value(s._mpisppy_model.W[ndn_i]), + ) # Special code for variable probabilities to mask W; rarely used. if s._mpisppy_data.has_variable_probability: for ndn_i in s._mpisppy_data.nonant_indices: (lndn, li) = ndn_i s._mpisppy_model.W[ndn_i] *= s._mpisppy_data.prob0_mask[lndn][li] - def Update_z(self, verbose): - """ Update the smoothing variable z during the PH algorithm. + """Update the smoothing variable z during the PH algorithm. Args: verbose (bool): If True, displays verbose output during update. """ # Assumes the scenarios are up to date - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): for ndn_i, nonant in s._mpisppy_data.nonant_indices.items(): - - xzdiff = nonant._value \ - - s._mpisppy_model.z[ndn_i]._value - s._mpisppy_model.z[ndn_i]._value += pyo.value(s._mpisppy_model.beta[ndn_i]) * xzdiff + xzdiff = nonant._value - s._mpisppy_model.z[ndn_i]._value + s._mpisppy_model.z[ndn_i]._value += ( + pyo.value(s._mpisppy_model.beta[ndn_i]) * xzdiff + ) if verbose and self.cylinder_rank == 0: - print ("rank, node, scen, var, z", ndn_i[0], k, - self.cylinder_rank, nonant.name, - pyo.value(s._mpisppy_model.z[ndn_i])) - + print( + "rank, node, scen, var, z", + ndn_i[0], + k, + self.cylinder_rank, + nonant.name, + pyo.value(s._mpisppy_model.z[ndn_i]), + ) def convergence_diff(self): - """ Compute the convergence metric ||x_s - \\bar{x}||_1 / num_scenarios. + """Compute the convergence metric ||x_s - \\bar{x}||_1 / num_scenarios. - Returns: - float: - The convergence metric ||x_s - \\bar{x}||_1 / num_scenarios. + Returns: + float: + The convergence metric ||x_s - \\bar{x}||_1 / num_scenarios. """ # Every scenario has its own node list, with a vardata list global_diff = np.zeros(1) local_diff = np.zeros(1) varcount = 0 - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): for ndn_i, nonant in s._mpisppy_data.nonant_indices.items(): xval = nonant._value xdiff = xval - s._mpisppy_model.xbars[ndn_i]._value @@ -370,9 +401,8 @@ def convergence_diff(self): return global_diff[0] / self.n_proc - def _populate_W_cache(self, cache, padding): - """ Copy the W values for noants *for all local scenarios* + """Copy the W values for noants *for all local scenarios* Args: cache (np vector) to receive the W's for all local scenarios (for sending) @@ -380,22 +410,25 @@ def _populate_W_cache(self, cache, padding): values into the same cache and the cache is *not* attached to the scenario. """ - ci = 0 # Cache index + ci = 0 # Cache index for model in self.local_scenarios.values(): if (ci + len(model._mpisppy_data.nonant_indices)) >= len(cache): - tlen = len(model._mpisppy_data.nonant_indices) * len(self.local_scenarios) - raise RuntimeError("W cache length mismatch detected by " - f"{self.__class__.__name__} that has " - f"total W len {tlen} but passed cache len-1={len(cache)-1}; " - f"len(nonants)={len(model._mpisppy_data.nonant_indices)}") + tlen = len(model._mpisppy_data.nonant_indices) * len( + self.local_scenarios + ) + raise RuntimeError( + "W cache length mismatch detected by " + f"{self.__class__.__name__} that has " + f"total W len {tlen} but passed cache len-1={len(cache)-1}; " + f"len(nonants)={len(model._mpisppy_data.nonant_indices)}" + ) for ix in model._mpisppy_data.nonant_indices: cache[ci] = pyo.value(model._mpisppy_model.W[ix]) ci += 1 - assert(ci == len(cache) - padding) # the other cylinder will fail above - + assert ci == len(cache) - padding # the other cylinder will fail above def W_from_flat_list(self, flat_list): - """ Set the dual weight values (Ws) for all local scenarios from a + """Set the dual weight values (Ws) for all local scenarios from a flat list. Args: @@ -406,39 +439,39 @@ def W_from_flat_list(self, flat_list): We are counting on Pyomo indices not to change order between list creation and use. """ - ci = 0 # Cache index + ci = 0 # Cache index for model in self.local_scenarios.values(): for ndn_i in model._mpisppy_data.nonant_indices: model._mpisppy_model.W[ndn_i].value = flat_list[ci] ci += 1 def _use_rho_setter(self, verbose): - """ set rho values using a function self.rho_setter + """set rho values using a function self.rho_setter that gives us a list of (id(vardata), rho)] """ if self.rho_setter is None: return didit = 0 skipped = 0 - rho_setter_kwargs = self.options['rho_setter_kwargs'] \ - if 'rho_setter_kwargs' in self.options \ - else dict() + rho_setter_kwargs = ( + self.options["rho_setter_kwargs"] + if "rho_setter_kwargs" in self.options + else dict() + ) for sname, scenario in self.local_scenarios.items(): rholist = self.rho_setter(scenario, **rho_setter_kwargs) - for (vid, rho) in rholist: + for vid, rho in rholist: (ndn, i) = scenario._mpisppy_data.varid_to_nonant_index[vid] scenario._mpisppy_model.rho[(ndn, i)] = rho didit += len(rholist) skipped += len(scenario._mpisppy_data.varid_to_nonant_index) - didit if verbose and self.cylinder_rank == 0: - print ("rho_setter set",didit,"and skipped",skipped) - + print("rho_setter set", didit, "and skipped", skipped) def _disable_prox(self): for k, scenario in self.local_scenarios.items(): scenario._mpisppy_model.prox_on = 0 - def _disable_W(self): # It would be odd to disable W and not prox. # TODO: we should eliminate this method @@ -446,30 +479,25 @@ def _disable_W(self): for scenario in self.local_scenarios.values(): scenario._mpisppy_model.W_on = 0 - def disable_W_and_prox(self): self._disable_W() self._disable_prox() - def _reenable_prox(self): for k, scenario in self.local_scenarios.items(): scenario._mpisppy_model.prox_on = 1 - def _reenable_W(self): # TODO: we should eliminate this method for k, scenario in self.local_scenarios.items(): scenario._mpisppy_model.W_on = 1 - def reenable_W_and_prox(self): self._reenable_W() self._reenable_prox() - def post_solve_bound(self, solver_options=None, verbose=False): - ''' Compute a bound Lagrangian bound using the existing weights. + """Compute a bound Lagrangian bound using the existing weights. Args: solver_options (dict, optional): @@ -486,15 +514,17 @@ def post_solve_bound(self, solver_options=None, verbose=False): suitable for use at the end of the solves, or if you really know what you are doing. It is not suitable as a general, per-iteration Lagrangian bound solver. - ''' - if (self.cylinder_rank == 0): - print('Warning: Lagrangian bounds might not be correct in certain ' - 'cases where there are integers not subject to ' - 'non-anticipativity and those integers do not reach integrality.') - if (verbose and self.cylinder_rank == 0): - print('Beginning post-solve Lagrangian bound computation') - - if (self.W_disabled): + """ + if self.cylinder_rank == 0: + print( + "Warning: Lagrangian bounds might not be correct in certain " + "cases where there are integers not subject to " + "non-anticipativity and those integers do not reach integrality." + ) + if verbose and self.cylinder_rank == 0: + print("Beginning post-solve Lagrangian bound computation") + + if self.W_disabled: self._reenable_W() self._disable_prox() @@ -503,32 +533,36 @@ def post_solve_bound(self, solver_options=None, verbose=False): # If dis_prox=True, they are enabled at the end, and Ebound returns # the incorrect value (unless you explicitly disable them again) - self.solve_loop(solver_options=solver_options, - dis_prox=False, # Important - gripe=True, - tee=False, - verbose=verbose) + self.solve_loop( + solver_options=solver_options, + dis_prox=False, # Important + gripe=True, + tee=False, + verbose=verbose, + ) bound = self.Ebound(verbose) # A half-hearted attempt to restore the state self._reenable_prox() - if (verbose and self.cylinder_rank == 0): - print(f'Post-solve Lagrangian bound: {bound:.4f}') + if verbose and self.cylinder_rank == 0: + print(f"Post-solve Lagrangian bound: {bound:.4f}") return bound - - def solve_loop(self, solver_options=None, - use_scenarios_not_subproblems=False, - dtiming=False, - dis_W=False, - dis_prox=False, - gripe=False, - disable_pyomo_signal_handling=False, - tee=False, - verbose=False): - """ Loop over `local_subproblems` and solve them in a manner + def solve_loop( + self, + solver_options=None, + use_scenarios_not_subproblems=False, + dtiming=False, + dis_W=False, + dis_prox=False, + gripe=False, + disable_pyomo_signal_handling=False, + tee=False, + verbose=False, + ): + """Loop over `local_subproblems` and solve them in a manner dicated by the arguments. In addition to changing the Var values in the scenarios, this function @@ -580,13 +614,15 @@ def solve_loop(self, solver_options=None, if self._prox_approx and (not self.prox_disabled): self._update_prox_approx() - super().solve_loop(solver_options, - use_scenarios_not_subproblems, - dtiming, - gripe, - disable_pyomo_signal_handling, - tee, - verbose) + super().solve_loop( + solver_options, + use_scenarios_not_subproblems, + dtiming, + gripe, + disable_pyomo_signal_handling, + tee, + verbose, + ) if dis_W and dis_prox: self.reenable_W_and_prox() @@ -595,7 +631,6 @@ def solve_loop(self, solver_options=None, elif dis_prox: self._reenable_prox() - def _update_prox_approx(self): """ update proximal term approximation by potentially @@ -605,63 +640,82 @@ def _update_prox_approx(self): """ tol = self.prox_approx_tol for sn, s in self.local_scenarios.items(): - persistent_solver = (s._solver_plugin if sputils.is_persistent(s._solver_plugin) else None) - #print(f"total number of proximal cuts: {len(s._mpisppy_model.xsqvar_cuts)}") + persistent_solver = ( + s._solver_plugin if sputils.is_persistent(s._solver_plugin) else None + ) + # print(f"total number of proximal cuts: {len(s._mpisppy_model.xsqvar_cuts)}") for prox_approx_manager in s._mpisppy_data.xsqvar_prox_approx.values(): prox_approx_manager.check_tol_add_cut(tol, persistent_solver) - def attach_Ws_and_prox(self): - """ Attach the dual and prox terms to the models in `local_scenarios`. - """ - for (sname, scenario) in self.local_scenarios.items(): + """Attach the dual and prox terms to the models in `local_scenarios`.""" + for sname, scenario in self.local_scenarios.items(): # these are bound by index to the vardata list at the node - scenario._mpisppy_model.W = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - initialize=0.0, - mutable=True) + scenario._mpisppy_model.W = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, + ) # create ph objective terms, but disabled - scenario._mpisppy_model.W_on = pyo.Param(initialize=0, mutable=True, within=pyo.Binary) + scenario._mpisppy_model.W_on = pyo.Param( + initialize=0, mutable=True, within=pyo.Binary + ) - scenario._mpisppy_model.prox_on = pyo.Param(initialize=0, mutable=True, within=pyo.Binary) + scenario._mpisppy_model.prox_on = pyo.Param( + initialize=0, mutable=True, within=pyo.Binary + ) # note that rho is per var and scenario here - scenario._mpisppy_model.rho = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - mutable=True, - default=self.options["defaultPHrho"]) - - + scenario._mpisppy_model.rho = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + mutable=True, + default=self.options["defaultPHrho"], + ) + def attach_smoothing(self): - """ Attach the smoothing terms to the models in `local_scenarios`. - """ - for (sname, scenario) in self.local_scenarios.items(): - scenario._mpisppy_model.z = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - initialize=0.0, - mutable=True) - - scenario._mpisppy_model.p = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - mutable=True, - default=self.options["defaultPHp"]) - - scenario._mpisppy_model.beta = pyo.Param(scenario._mpisppy_data.nonant_indices.keys(), - mutable=True, - default=self.options["defaultPHbeta"]) - + """Attach the smoothing terms to the models in `local_scenarios`.""" + for sname, scenario in self.local_scenarios.items(): + scenario._mpisppy_model.z = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, + ) + + scenario._mpisppy_model.p = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + mutable=True, + default=self.options["defaultPHp"], + ) + + scenario._mpisppy_model.beta = pyo.Param( + scenario._mpisppy_data.nonant_indices.keys(), + mutable=True, + default=self.options["defaultPHbeta"], + ) @property def W_disabled(self): - assert hasattr(self.local_scenarios[self.local_scenario_names[0]]._mpisppy_model, 'W_on') - return not bool(self.local_scenarios[self.local_scenario_names[0]]._mpisppy_model.W_on.value) - + assert hasattr( + self.local_scenarios[self.local_scenario_names[0]]._mpisppy_model, "W_on" + ) + return not bool( + self.local_scenarios[self.local_scenario_names[0]]._mpisppy_model.W_on.value + ) @property def prox_disabled(self): - assert hasattr(self.local_scenarios[self.local_scenario_names[0]]._mpisppy_model, 'prox_on') - return not bool(self.local_scenarios[self.local_scenario_names[0]]._mpisppy_model.prox_on.value) - + assert hasattr( + self.local_scenarios[self.local_scenario_names[0]]._mpisppy_model, "prox_on" + ) + return not bool( + self.local_scenarios[ + self.local_scenario_names[0] + ]._mpisppy_model.prox_on.value + ) def attach_PH_to_objective(self, add_duals, add_prox, add_smooth=0): - """ Attach dual weight and prox terms to the objective function of the + """Attach dual weight and prox terms to the objective function of the models in `local_scenarios`. Args: @@ -671,24 +725,24 @@ def attach_PH_to_objective(self, add_duals, add_prox, add_smooth=0): If True, adds the prox term to the objective. """ - if ('linearize_binary_proximal_terms' in self.options): - lin_bin_prox = self.options['linearize_binary_proximal_terms'] + if "linearize_binary_proximal_terms" in self.options: + lin_bin_prox = self.options["linearize_binary_proximal_terms"] else: lin_bin_prox = False - if ('linearize_proximal_terms' in self.options): - self._prox_approx = self.options['linearize_proximal_terms'] - if 'proximal_linearization_tolerance' in self.options: - self.prox_approx_tol = self.options['proximal_linearization_tolerance'] + if "linearize_proximal_terms" in self.options: + self._prox_approx = self.options["linearize_proximal_terms"] + if "proximal_linearization_tolerance" in self.options: + self.prox_approx_tol = self.options["proximal_linearization_tolerance"] else: - self.prox_approx_tol = 1.e-1 + self.prox_approx_tol = 1.0e-1 else: self._prox_approx = False - for (sname, scenario) in self.local_scenarios.items(): + for sname, scenario in self.local_scenarios.items(): """Attach the dual and prox terms to the objective. """ - if ((not add_duals) and (not add_prox)): + if (not add_duals) and (not add_prox): return objfct = self.saved_objectives[sname] is_min_problem = objfct.is_minimizing() @@ -699,8 +753,12 @@ def attach_PH_to_objective(self, add_duals, add_prox, add_smooth=0): # set-up pyomo IndexVar, but keep it sparse # since some nonants might be binary # Define the first cut to be _xsqvar >= 0 - scenario._mpisppy_model.xsqvar = pyo.Var(scenario._mpisppy_data.nonant_indices, dense=False, bounds=(0, None)) - scenario._mpisppy_model.xsqvar_cuts = pyo.Constraint(scenario._mpisppy_data.nonant_indices, pyo.Integers) + scenario._mpisppy_model.xsqvar = pyo.Var( + scenario._mpisppy_data.nonant_indices, dense=False, bounds=(0, None) + ) + scenario._mpisppy_model.xsqvar_cuts = pyo.Constraint( + scenario._mpisppy_data.nonant_indices, pyo.Integers + ) scenario._mpisppy_data.xsqvar_prox_approx = {} else: scenario._mpisppy_model.xsqvar = None @@ -708,16 +766,19 @@ def attach_PH_to_objective(self, add_duals, add_prox, add_smooth=0): ph_term = 0 # Dual term (weights W) - if (add_duals): - scenario._mpisppy_model.WExpr = pyo.Expression(expr=\ - sum(scenario._mpisppy_model.W[ndn_i] * xvar \ - for ndn_i, xvar in scenario._mpisppy_data.nonant_indices.items()) ) + if add_duals: + scenario._mpisppy_model.WExpr = pyo.Expression( + expr=sum( + scenario._mpisppy_model.W[ndn_i] * xvar + for ndn_i, xvar in scenario._mpisppy_data.nonant_indices.items() + ) + ) ph_term += scenario._mpisppy_model.W_on * scenario._mpisppy_model.WExpr # Prox term (quadratic) - if (add_prox): - prox_expr = 0. - smooth_expr = 0. + if add_prox: + prox_expr = 0.0 + smooth_expr = 0.0 for ndn_i, xvar in scenario._mpisppy_data.nonant_indices.items(): # expand (x - xbar)**2 to (x**2 - 2*xbar*x + xbar**2) # x**2 is the only qradratic term, which might be @@ -726,40 +787,51 @@ def attach_PH_to_objective(self, add_duals, add_prox, add_smooth=0): xvarsqrd = xvar elif self._prox_approx: xvarsqrd = scenario._mpisppy_model.xsqvar[ndn_i] - scenario._mpisppy_data.xsqvar_prox_approx[ndn_i] = \ - ProxApproxManager(xvar, xvarsqrd, xbars[ndn_i], scenario._mpisppy_model.xsqvar_cuts, ndn_i) + scenario._mpisppy_data.xsqvar_prox_approx[ndn_i] = ( + ProxApproxManager( + xvar, + xvarsqrd, + xbars[ndn_i], + scenario._mpisppy_model.xsqvar_cuts, + ndn_i, + ) + ) else: xvarsqrd = xvar**2 - prox_expr += (scenario._mpisppy_model.rho[ndn_i] / 2.0) * \ - (xvarsqrd - 2.0 * xbars[ndn_i] * xvar + xbars[ndn_i]**2) - + prox_expr += (scenario._mpisppy_model.rho[ndn_i] / 2.0) * ( + xvarsqrd - 2.0 * xbars[ndn_i] * xvar + xbars[ndn_i] ** 2 + ) + # Computing smoothing term (quadratic) - if (add_smooth): - smooth_expr += (scenario._mpisppy_model.p[ndn_i] / 2.0) * \ - (xvarsqrd - 2.0 * scenario._mpisppy_model.z[ndn_i] * xvar \ - + scenario._mpisppy_model.z[ndn_i]**2) + if add_smooth: + smooth_expr += (scenario._mpisppy_model.p[ndn_i] / 2.0) * ( + xvarsqrd + - 2.0 * scenario._mpisppy_model.z[ndn_i] * xvar + + scenario._mpisppy_model.z[ndn_i] ** 2 + ) scenario._mpisppy_model.ProxExpr = pyo.Expression(expr=prox_expr) - ph_term += scenario._mpisppy_model.prox_on * scenario._mpisppy_model.ProxExpr + ph_term += ( + scenario._mpisppy_model.prox_on * scenario._mpisppy_model.ProxExpr + ) - if (add_smooth): + if add_smooth: # Adding smoothing term - scenario._mpisppy_model.SmoothExpr = pyo.Expression(expr=smooth_expr) - ph_term += scenario._mpisppy_model.prox_on * scenario._mpisppy_model.SmoothExpr - - if (is_min_problem): + scenario._mpisppy_model.SmoothExpr = pyo.Expression( + expr=smooth_expr + ) + ph_term += ( + scenario._mpisppy_model.prox_on + * scenario._mpisppy_model.SmoothExpr + ) + + if is_min_problem: objfct.expr += ph_term else: objfct.expr -= ph_term - - def PH_Prep( - self, - attach_duals=True, - attach_prox=True, - attach_smooth=0 - ): - """ Set up PH objectives (duals and prox terms), and prepare + def PH_Prep(self, attach_duals=True, attach_prox=True, attach_smooth=0): + """Set up PH objectives (duals and prox terms), and prepare extensions, if available. Args: @@ -768,7 +840,7 @@ def PH_Prep( add_prox (boolean, optional): If True, adds prox terms to the objective. Default True. attach_smooth (int, optional): - If 0, no smoothing; if 1, p_value is used; if 2, p_ratio is used. + If 0, no smoothing; if 1, p_value is used; if 2, p_ratio is used. Note: This function constructs an Extension object if one was specified @@ -781,9 +853,8 @@ def PH_Prep( self.attach_smoothing() self.attach_PH_to_objective(attach_duals, attach_prox, attach_smooth) - def options_check(self): - """ Check whether the options in the `options` attribute are + """Check whether the options in the `options` attribute are acceptable. Required options are @@ -803,8 +874,12 @@ def options_check(self): """ required = [ - "solver_name", "PHIterLimit", "defaultPHrho", - "convthresh", "verbose", "display_progress", + "solver_name", + "PHIterLimit", + "defaultPHrho", + "convthresh", + "verbose", + "display_progress", ] self._options_check(required, self.options) # Display timing and display convergence detail are special for no good reason. @@ -816,9 +891,8 @@ def options_check(self): if "smoothed" not in self.options: self.options["smoothed"] = 0 - def Iter0(self): - """ Create solvers and perform the initial PH solve (with no dual + """Create solvers and perform the initial PH solve (with no dual weights or prox terms). This function quits() if the scenario probabilities do not sum to one, @@ -832,7 +906,7 @@ def Iter0(self): stochastic program with the nonanticipativity constraints removed. """ - if (self.extensions is not None): + if self.extensions is not None: self.extobject.pre_iter0() verbose = self.options["verbose"] @@ -853,27 +927,37 @@ def _vb(msg): global_toc("Creating solvers") self._create_solvers() - teeme = ("tee-rank0-solves" in self.options - and self.options['tee-rank0-solves'] - and self.cylinder_rank == 0 - ) + teeme = ( + "tee-rank0-solves" in self.options + and self.options["tee-rank0-solves"] + and self.cylinder_rank == 0 + ) if self.options["verbose"]: - print ("About to call PH Iter0 solve loop on rank={}".format(self.cylinder_rank)) + print( + "About to call PH Iter0 solve loop on rank={}".format( + self.cylinder_rank + ) + ) global_toc("Entering solve loop in PHBase.Iter0") - self.solve_loop(solver_options=self.current_solver_options, - dtiming=dtiming, - gripe=True, - tee=teeme, - verbose=verbose) + self.solve_loop( + solver_options=self.current_solver_options, + dtiming=dtiming, + gripe=True, + tee=teeme, + verbose=verbose, + ) if self.options["verbose"]: - print ("PH Iter0 solve loop complete on rank={}".format(self.cylinder_rank)) + print("PH Iter0 solve loop complete on rank={}".format(self.cylinder_rank)) self._update_E1() # Apologies for doing this after the solves... - if (abs(1 - self.E1) > self.E1_tolerance): - raise RuntimeError(f"Total probability of scenarios was {self.E1}; E1_tolerance = ", self.E1_tolerance) + if abs(1 - self.E1) > self.E1_tolerance: + raise RuntimeError( + f"Total probability of scenarios was {self.E1}; E1_tolerance = ", + self.E1_tolerance, + ) feasP = self.feas_prob() if feasP != self.E1: raise RuntimeError(f"Infeasibility detected; E_feas={feasP}, E1={self.E1}") @@ -883,9 +967,9 @@ def _vb(msg): for sname in self.local_scenario_names: fd.write('*** {} ***\n'.format(sname)) """ - #global_toc('Rank: {} - Building and solving models 0th iteration'.format(rank), True) + # global_toc('Rank: {} - Building and solving models 0th iteration'.format(rank), True) - #global_toc('Rank: {} - assigning rho'.format(rank), True) + # global_toc('Rank: {} - assigning rho'.format(rank), True) if have_extensions: self.extobject.post_iter0() @@ -906,21 +990,23 @@ def _vb(msg): if smooth_type == 2: for _, scenario in self.local_scenarios.items(): for ndn_i, _ in scenario._mpisppy_data.nonant_indices.items(): - scenario._mpisppy_model.p[ndn_i] *= scenario._mpisppy_model.rho[ndn_i] + scenario._mpisppy_model.p[ndn_i] *= scenario._mpisppy_model.rho[ + ndn_i + ] if have_converger: # Call the constructor of the converger object self.convobject = self.ph_converger(self) - #global_toc('Rank: {} - Before iter loop'.format(self.cylinder_rank), True) + # global_toc('Rank: {} - Before iter loop'.format(self.cylinder_rank), True) self.conv = None self.trivial_bound = self.Ebound(verbose) if dprogress and self.cylinder_rank == 0: print("") - print("After PH Iteration",self._PHIter) + print("After PH Iteration", self._PHIter) print("Trivial bound =", self.trivial_bound) - print("PHBase Convergence Metric =",self.conv) + print("PHBase Convergence Metric =", self.conv) print("Elapsed time: %6.2f" % (time.perf_counter() - self.start_time)) if dconvergence_detail: @@ -932,9 +1018,8 @@ def _vb(msg): return self.trivial_bound - def iterk_loop(self): - """ Perform all PH iterations after iteration 0. + """Perform all PH iterations after iteration 0. This function terminates if any of the following occur: @@ -960,26 +1045,28 @@ def iterk_loop(self): max_iterations = int(self.options["PHIterLimit"]) - for self._PHIter in range(1, max_iterations+1): + for self._PHIter in range(1, max_iterations + 1): iteration_start_time = time.time() if dprogress: - global_toc(f"Initiating PH Iteration {self._PHIter}\n", self.cylinder_rank == 0) + global_toc( + f"Initiating PH Iteration {self._PHIter}\n", self.cylinder_rank == 0 + ) # Compute xbar - #global_toc('Rank: {} - Before Compute_Xbar'.format(self.cylinder_rank), True) + # global_toc('Rank: {} - Before Compute_Xbar'.format(self.cylinder_rank), True) self.Compute_Xbar(verbose) - #global_toc('Rank: {} - After Compute_Xbar'.format(self.cylinder_rank), True) + # global_toc('Rank: {} - After Compute_Xbar'.format(self.cylinder_rank), True) # update the weights self.Update_W(verbose) - #global_toc('Rank: {} - After Update_W'.format(self.cylinder_rank), True) + # global_toc('Rank: {} - After Update_W'.format(self.cylinder_rank), True) if smoothed: self.Update_z(verbose) - + self.conv = self.convergence_diff() - #global_toc('Rank: {} - After convergence_diff'.format(self.cylinder_rank), True) + # global_toc('Rank: {} - After convergence_diff'.format(self.cylinder_rank), True) if have_extensions: self.extobject.miditer() @@ -989,16 +1076,23 @@ def iterk_loop(self): # latest data, even at termination if have_converger: if self.convobject.is_converged(): - global_toc("User-supplied converger determined termination criterion reached", self.cylinder_rank == 0) + global_toc( + "User-supplied converger determined termination criterion reached", + self.cylinder_rank == 0, + ) break elif self.conv is not None: if self.conv < self.options["convthresh"]: - global_toc("Convergence metric=%f dropped below user-supplied threshold=%f" % (self.conv, self.options["convthresh"]), self.cylinder_rank == 0) + global_toc( + "Convergence metric=%f dropped below user-supplied threshold=%f" + % (self.conv, self.options["convthresh"]), + self.cylinder_rank == 0, + ) break teeme = ( "tee-rank0-solves" in self.options - and self.options["tee-rank0-solves"] + and self.options["tee-rank0-solves"] and self.cylinder_rank == 0 ) @@ -1008,7 +1102,7 @@ def iterk_loop(self): gripe=True, disable_pyomo_signal_handling=False, tee=teeme, - verbose=verbose + verbose=verbose, ) if have_extensions: @@ -1025,26 +1119,29 @@ def iterk_loop(self): if dprogress and self.cylinder_rank == 0: print("") - print("After PH Iteration",self._PHIter) - print("Scaled PHBase Convergence Metric=",self.conv) + print("After PH Iteration", self._PHIter) + print("Scaled PHBase Convergence Metric=", self.conv) print("Iteration time: %6.2f" % (time.time() - iteration_start_time)) print("Elapsed time: %6.2f" % (time.perf_counter() - self.start_time)) if dconvergence_detail: self.report_var_values_at_rank0(header="Convergence detail:") - else: # no break, (self._PHIter == max_iterations) + else: # no break, (self._PHIter == max_iterations) # NOTE: If we return for any other reason things are reasonably in-sync. # due to the convergence check. However, here we return we'll be # out-of-sync because of the solve_loop could take vasty different # times on different threads. This can especially mess up finalization. # As a guard, we'll put a barrier here. self.mpicomm.Barrier() - global_toc("Reached user-specified limit=%d on number of PH iterations" % max_iterations, self.cylinder_rank == 0) - + global_toc( + "Reached user-specified limit=%d on number of PH iterations" + % max_iterations, + self.cylinder_rank == 0, + ) def post_loops(self, extensions=None): - """ Call scenario denouement methods, and report the expected objective + """Call scenario denouement methods, and report the expected objective value. Args: @@ -1068,7 +1165,7 @@ def post_loops(self, extensions=None): print("") if self.scenario_denouement is not None: - for sname,s in self.local_scenarios.items(): + for sname, s in self.local_scenarios.items(): self.scenario_denouement(self.cylinder_rank, sname, s) self.mpicomm.Barrier() @@ -1081,7 +1178,9 @@ def post_loops(self, extensions=None): if have_extensions: self.extobject.post_everything() - if self.ph_converger is not None and hasattr(self.ph_converger, 'post_everything'): + if self.ph_converger is not None and hasattr( + self.ph_converger, "post_everything" + ): self.convobject.post_everything() Eobj = self.Eobjective(verbose) @@ -1095,24 +1194,30 @@ def post_loops(self, extensions=None): if dtiming and self.cylinder_rank == 0: print("") - print("Cumulative execution time=%5.2f" % (time.perf_counter()-self.start_time)) + print( + "Cumulative execution time=%5.2f" + % (time.perf_counter() - self.start_time) + ) print("") return Eobj - def attach_xbars(self): - """ Attach xbar and xbar^2 Pyomo parameters to each model in + """Attach xbar and xbar^2 Pyomo parameters to each model in `local_scenarios`. """ for scenario in self.local_scenarios.values(): scenario._mpisppy_model.xbars = pyo.Param( - scenario._mpisppy_data.nonant_indices.keys(), initialize=0.0, mutable=True + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, ) scenario._mpisppy_model.xsqbars = pyo.Param( - scenario._mpisppy_data.nonant_indices.keys(), initialize=0.0, mutable=True + scenario._mpisppy_data.nonant_indices.keys(), + initialize=0.0, + mutable=True, ) if __name__ == "__main__": - print ("No main for PHBase") + print("No main for PHBase") diff --git a/mpisppy/scenario_tree.py b/mpisppy/scenario_tree.py index febc2ffd0..ba1988ef0 100644 --- a/mpisppy/scenario_tree.py +++ b/mpisppy/scenario_tree.py @@ -13,7 +13,8 @@ import pyomo.environ as pyo from pyomo.core.base.indexed_component_slice import IndexedComponent_slice -logger = logging.getLogger('mpisppy.scenario_tree') +logger = logging.getLogger("mpisppy.scenario_tree") + def build_vardatalist(self, model, varlist=None): """ @@ -30,7 +31,9 @@ def build_vardatalist(self, model, varlist=None): # if the varlist is None, then assume we want all the active variables if varlist is None: raise RuntimeError("varlist is None in scenario_tree.build_vardatalist") - vardatalist = [v for v in model.component_data_objects(pyo.Var, active=True, sort=True)] + vardatalist = [ + v for v in model.component_data_objects(pyo.Var, active=True, sort=True) + ] elif isinstance(varlist, (pyo.Var, IndexedComponent_slice)): # user provided a variable, not a list of variables. Let's work with it anyway varlist = [varlist] @@ -47,7 +50,8 @@ def build_vardatalist(self, model, varlist=None): else: vardatalist.append(v) return vardatalist - + + class ScenarioNode: """Store a node in the scenario tree. @@ -59,22 +63,31 @@ class ScenarioNode: name (str): name of the node; one node must be named "ROOT" cond_prob (float): conditional probability stage (int): stage number (root is 1) - cost_expression (pyo Expression or Var): stage cost + cost_expression (pyo Expression or Var): stage cost nonant_list (list of pyo Var, Vardata or slices): the Vars that require nonanticipativity at the node (might not be a list) scen_model (pyo concrete model): the (probably not 'a') concrete model nonant_ef_suppl_list (list of pyo Var, Vardata or slices): vars for which nonanticipativity constraints tighten the EF (important for bundling) - parent_name (str): name of the parent node + parent_name (str): name of the parent node Lists: nonant_vardata(list of vardata objects): vardatas to blend x_bar_list(list of floats): bound by index to nonant_vardata """ - def __init__(self, name, cond_prob, stage, cost_expression, - nonant_list, scen_model, nonant_ef_suppl_list=None, - parent_name=None): + + def __init__( + self, + name, + cond_prob, + stage, + cost_expression, + nonant_list, + scen_model, + nonant_ef_suppl_list=None, + parent_name=None, + ): """Initialize a ScenarioNode object. Assume most error detection is done elsewhere. """ @@ -84,20 +97,22 @@ def __init__(self, name, cond_prob, stage, cost_expression, self.cost_expression = cost_expression self.nonant_list = nonant_list self.nonant_ef_suppl_list = nonant_ef_suppl_list - self.parent_name = parent_name # None for ROOT + self.parent_name = parent_name # None for ROOT # now make the vardata lists if self.nonant_list is not None: - self.nonant_vardata_list = build_vardatalist(self, - scen_model, - self.nonant_list) + self.nonant_vardata_list = build_vardatalist( + self, scen_model, self.nonant_list + ) else: - logger.warning("nonant_list is empty for node {},".format(name) +\ - "No nonanticipativity will be enforced at this node by default") + logger.warning( + "nonant_list is empty for node {},".format(name) + + "No nonanticipativity will be enforced at this node by default" + ) self.nonant_vardata_list = [] if self.nonant_ef_suppl_list is not None: - self.nonant_ef_suppl_vardata_list = build_vardatalist(self, - scen_model, - self.nonant_ef_suppl_list) + self.nonant_ef_suppl_vardata_list = build_vardatalist( + self, scen_model, self.nonant_ef_suppl_list + ) else: self.nonant_ef_suppl_vardata_list = [] diff --git a/mpisppy/spbase.py b/mpisppy/spbase.py index 5de8203b7..407d00237 100644 --- a/mpisppy/spbase.py +++ b/mpisppy/spbase.py @@ -24,38 +24,38 @@ class SPBase: - """ Defines an interface to all strata (hubs and spokes) - - Args: - options (dict): options - all_scenario_names (list): all scenario names - scenario_creator (fct): returns a concrete model with special things - scenario_denouement (fct): for post processing and reporting - all_nodenames (list): all node names (including leaves); can be None for 2 Stage - mpicomm (MPI comm): if not given, use the global fullcomm - scenario_creator_kwargs (dict): kwargs passed directly to - scenario_creator. - variable_probability (fct): returns a list of tuples of (id(var), prob) - to set variable-specific probability (similar to PHBase.rho_setter). - - Attributes: - local_scenarios (dict of scenario objects): concrete models with - extra data, key is name - comms (dict): keys are node names values are comm objects. - local_scenario_names (list): names of locals + """Defines an interface to all strata (hubs and spokes) + + Args: + options (dict): options + all_scenario_names (list): all scenario names + scenario_creator (fct): returns a concrete model with special things + scenario_denouement (fct): for post processing and reporting + all_nodenames (list): all node names (including leaves); can be None for 2 Stage + mpicomm (MPI comm): if not given, use the global fullcomm + scenario_creator_kwargs (dict): kwargs passed directly to + scenario_creator. + variable_probability (fct): returns a list of tuples of (id(var), prob) + to set variable-specific probability (similar to PHBase.rho_setter). + + Attributes: + local_scenarios (dict of scenario objects): concrete models with + extra data, key is name + comms (dict): keys are node names values are comm objects. + local_scenario_names (list): names of locals """ def __init__( - self, - options, - all_scenario_names, - scenario_creator, - scenario_denouement=None, - all_nodenames=None, - mpicomm=None, - scenario_creator_kwargs=None, - variable_probability=None, - E1_tolerance=1e-5, + self, + options, + all_scenario_names, + scenario_creator, + scenario_denouement=None, + all_nodenames=None, + mpicomm=None, + scenario_creator_kwargs=None, + variable_probability=None, + E1_tolerance=1e-5, ): # TODO add missing and private attributes (JP) # TODO add a class attribute called ROOTNODENAME = "ROOT" @@ -80,7 +80,7 @@ def __init__( else: raise RuntimeError("'ROOT' must be in the list of node names") self.variable_probability = variable_probability - self.multistage = (len(self.all_nodenames) > 1) + self.multistage = len(self.all_nodenames) > 1 # Set up MPI communicator and rank if mpicomm is not None: @@ -91,7 +91,6 @@ def __init__( self.n_proc = self.mpicomm.Get_size() self.global_rank = MPI.COMM_WORLD.Get_rank() - if options.get("toc", True): global_toc("Initializing SPBase") @@ -124,9 +123,9 @@ def __init__( self.first_stage_solution_available = False def _set_sense(self, comm=None): - """ Check to confirm that all the models constructed by scenario_crator - have the same sense (min v. max), and set self.is_minimizing - accordingly. + """Check to confirm that all the models constructed by scenario_crator + have the same sense (min v. max), and set self.is_minimizing + accordingly. """ is_min, clear = sputils._models_have_same_sense(self.local_scenarios) if not clear: @@ -152,10 +151,10 @@ def _set_sense(self, comm=None): ) def _verify_nonant_lengths(self): - local_node_nonant_lengths = {} # keys are tree node names + local_node_nonant_lengths = {} # keys are tree node names # we need to accumulate all local contributions before the reduce - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name @@ -163,68 +162,73 @@ def _verify_nonant_lengths(self): if ndn not in local_node_nonant_lengths: local_node_nonant_lengths[ndn] = mylen elif local_node_nonant_lengths[ndn] != mylen: - raise RuntimeError(f"Tree node {ndn} has scenarios with different numbers of non-anticipative " - f"variables: {mylen} vs. {local_node_nonant_lengths[ndn]}") + raise RuntimeError( + f"Tree node {ndn} has scenarios with different numbers of non-anticipative " + f"variables: {mylen} vs. {local_node_nonant_lengths[ndn]}" + ) # compute node values(reduction) for ndn, val in local_node_nonant_lengths.items(): - local_val = np.array([val], 'i') - max_val = np.zeros(1, 'i') - self.comms[ndn].Allreduce([local_val, MPI.INT], - [max_val, MPI.INT], - op=MPI.MAX) + local_val = np.array([val], "i") + max_val = np.zeros(1, "i") + self.comms[ndn].Allreduce( + [local_val, MPI.INT], [max_val, MPI.INT], op=MPI.MAX + ) if val != int(max_val[0]): - raise RuntimeError(f"Tree node {ndn} has scenarios with different numbers of non-anticipative " - f"variables: {val} vs. max {max_val[0]}") - + raise RuntimeError( + f"Tree node {ndn} has scenarios with different numbers of non-anticipative " + f"variables: {val} vs. max {max_val[0]}" + ) + def _check_nodenames(self): for ndn in self.all_nodenames: - if ndn != 'ROOT' and sputils.parent_ndn(ndn) not in self.all_nodenames: - raise RuntimeError(f"all_nodenames is inconsistent:" - f"The node {sputils.parent_ndn(ndn)}, parent of {ndn}, is missing.") - + if ndn != "ROOT" and sputils.parent_ndn(ndn) not in self.all_nodenames: + raise RuntimeError( + f"all_nodenames is inconsistent:" + f"The node {sputils.parent_ndn(ndn)}, parent of {ndn}, is missing." + ) def _calculate_scenario_ranks(self): - """ Populate the following attributes - 1. self.scenario_names_to_rank (dict of dict): - keys are comms (i.e., tree nodes); values are dicts with keys - that are scenario names and values that are ranks within that comm + """Populate the following attributes + 1. self.scenario_names_to_rank (dict of dict): + keys are comms (i.e., tree nodes); values are dicts with keys + that are scenario names and values that are ranks within that comm - 2. self._rank_slices (list of lists) - indices correspond to ranks in self.mpicomm and the values are a list - of scenario indices - rank -> list of scenario indices for that rank + 2. self._rank_slices (list of lists) + indices correspond to ranks in self.mpicomm and the values are a list + of scenario indices + rank -> list of scenario indices for that rank - 3. self._scenario_slices (list) - indices are scenario indices and values are the rank of that scenario - within self.mpicomm - scenario index -> rank + 3. self._scenario_slices (list) + indices are scenario indices and values are the rank of that scenario + within self.mpicomm + scenario index -> rank - 4. self._scenario_tree (instance of sputils._ScenTree) + 4. self._scenario_tree (instance of sputils._ScenTree) - 5. self.local_scenario_names (list) - List of index names owned by the local rank + 5. self.local_scenario_names (list) + List of index names owned by the local rank """ tree = sputils._ScenTree(self.all_nodenames, self.all_scenario_names) - self.scenario_names_to_rank, self._rank_slices, self._scenario_slices =\ - tree.scen_names_to_ranks(self.n_proc) + self.scenario_names_to_rank, self._rank_slices, self._scenario_slices = ( + tree.scen_names_to_ranks(self.n_proc) + ) self._scenario_tree = tree - self.nonleaves = {node.name : node for node in tree.nonleaves} + self.nonleaves = {node.name: node for node in tree.nonleaves} # list of scenario names owned locally self.local_scenario_names = [ self.all_scenario_names[i] for i in self._rank_slices[self.cylinder_rank] ] - def _assign_bundles(self): - """ Create self.names_in_bundles, a dict of dicts - - self.names_in_bundles[rank number][bundle number] = - list of scenarios in that bundle + """Create self.names_in_bundles, a dict of dicts + + self.names_in_bundles[rank number][bundle number] = + list of scenarios in that bundle """ scen_count = len(self.all_scenario_names) @@ -257,13 +261,13 @@ def _assign_bundles(self): } def _create_scenarios(self, scenario_creator_kwargs): - """ Call the scenario_creator for every local scenario, and store the - results in self.local_scenarios (dict indexed by scenario names). + """Call the scenario_creator for every local scenario, and store the + results in self.local_scenarios (dict indexed by scenario names). - Notes: - If a scenario probability is not specified as an attribute - _mpisppy_probability of the ConcreteModel returned by ScenarioCreator, - this function automatically assumes uniform probabilities. + Notes: + If a scenario probability is not specified as an attribute + _mpisppy_probability of the ConcreteModel returned by ScenarioCreator, + this function automatically assumes uniform probabilities. """ if self.scenarios_constructed: raise RuntimeError("Scenarios already constructed.") @@ -271,43 +275,44 @@ def _create_scenarios(self, scenario_creator_kwargs): if scenario_creator_kwargs is None: scenario_creator_kwargs = dict() - local_ict = list() # Local instance creation times for time tracking + local_ict = list() # Local instance creation times for time tracking for sname in self.local_scenario_names: instance_creation_start_time = time.time() s = self.scenario_creator(sname, **scenario_creator_kwargs) self.local_scenarios[sname] = s if self.multistage: - #Checking that the scenario can have an associated leaf node in all_nodenames + # Checking that the scenario can have an associated leaf node in all_nodenames stmax = np.argmax([nd.stage for nd in s._mpisppy_node_list]) - if(s._mpisppy_node_list[stmax].name)+'_0' not in self.all_nodenames: - raise RuntimeError("The leaf node associated with this scenario is not on all_nodenames" - f"Its last non-leaf node {s._mpisppy_node_list[stmax].name} has no first child {s._mpisppy_node_list[stmax].name+'_0'}") + if (s._mpisppy_node_list[stmax].name) + "_0" not in self.all_nodenames: + raise RuntimeError( + "The leaf node associated with this scenario is not on all_nodenames" + f"Its last non-leaf node {s._mpisppy_node_list[stmax].name} has no first child {s._mpisppy_node_list[stmax].name+'_0'}" + ) local_ict.append(time.time() - instance_creation_start_time) - + if self.options.get("display_timing", False): - all_instance_creation_times = self.mpicomm.gather( - local_ict, root=0 - ) + all_instance_creation_times = self.mpicomm.gather(local_ict, root=0) if self.cylinder_rank == 0: aict = [ict for l_ict in all_instance_creation_times for ict in l_ict] print("Scenario instance creation times:") - print(f"\tmin={np.min(aict):4.2f} mean={np.mean(aict):4.2f} max={np.max(aict):4.2f}") + print( + f"\tmin={np.min(aict):4.2f} mean={np.mean(aict):4.2f} max={np.max(aict):4.2f}" + ) self.scenarios_constructed = True def _attach_nonant_indices(self): - for (sname, scenario) in self.local_scenarios.items(): + for sname, scenario in self.local_scenarios.items(): _nonant_indices = dict() - nlens = scenario._mpisppy_data.nlens + nlens = scenario._mpisppy_data.nlens for node in scenario._mpisppy_node_list: ndn = node.name for i in range(nlens[ndn]): - _nonant_indices[ndn,i] = node.nonant_vardata_list[i] + _nonant_indices[ndn, i] = node.nonant_vardata_list[i] scenario._mpisppy_data.nonant_indices = _nonant_indices self.nonant_length = len(_nonant_indices) - def _attach_nlens(self): - for (sname, scenario) in self.local_scenarios.items(): + for sname, scenario in self.local_scenarios.items(): # Things need to be by node so we can bind to the # indices of the vardata lists for the nodes. scenario._mpisppy_data.nlens = { @@ -323,29 +328,29 @@ def _attach_nlens(self): scenario._mpisppy_data.cistart[ndn] = sofar sofar += ndn_len - def _attach_varid_to_nonant_index(self): - """ Create a map from the id of nonant variables to their Pyomo index. - """ - for (sname, scenario) in self.local_scenarios.items(): + """Create a map from the id of nonant variables to their Pyomo index.""" + for sname, scenario in self.local_scenarios.items(): # In order to support rho setting, create a map # from the id of vardata object back its _nonant_index. - scenario._mpisppy_data.varid_to_nonant_index =\ - {id(var): ndn_i for ndn_i, var in scenario._mpisppy_data.nonant_indices.items()} - + scenario._mpisppy_data.varid_to_nonant_index = { + id(var): ndn_i + for ndn_i, var in scenario._mpisppy_data.nonant_indices.items() + } def _create_communicators(self): - # Create communicator objects, one for each node nonleafnodes = dict() - for (sname, scenario) in self.local_scenarios.items(): + for sname, scenario in self.local_scenarios.items(): for node in scenario._mpisppy_node_list: nonleafnodes[node.name] = node # might be assigned&reassigned # check the node names given by the scenarios for nodename in nonleafnodes: if nodename not in self.all_nodenames: - raise RuntimeError(f"Tree node '{nodename}' not in all_nodenames list {self.all_nodenames} for {self.global_rank=}") + raise RuntimeError( + f"Tree node '{nodename}' not in all_nodenames list {self.all_nodenames} for {self.global_rank=}" + ) # loop over all nodes and make the comms (split requires all ranks) # make sure we loop in the same order, so every rank iterate over @@ -354,12 +359,14 @@ def _create_communicators(self): if nodename == "ROOT": self.comms["ROOT"] = self.mpicomm elif nodename in nonleafnodes: - #The position in all_nodenames is an integer unique id. + # The position in all_nodenames is an integer unique id. nodenumber = self.all_nodenames.index(nodename) # IMPORTANT: See note in sputils._ScenTree.scen_names_to_ranks. Need to keep # this split aligned with self.scenario_names_to_rank - self.comms[nodename] = self.mpicomm.Split(color=nodenumber, key=self.cylinder_rank) - else: # this rank is not included in the communicator + self.comms[nodename] = self.mpicomm.Split( + color=nodenumber, key=self.cylinder_rank + ) + else: # this rank is not included in the communicator self.mpicomm.Split(color=MPI.UNDEFINED, key=self.n_proc) ## ensure we've set things up correctly for all comms @@ -368,35 +375,41 @@ def _create_communicators(self): for sname, rank in scenario_names_to_comm_rank.items(): if sname in self.local_scenarios: if rank != comm.Get_rank(): - raise RuntimeError(f"For the node {nodename}, the scenario {sname} has the rank {rank} from scenario_names_to_rank and {comm.Get_rank()} from its comm.") - + raise RuntimeError( + f"For the node {nodename}, the scenario {sname} has the rank {rank} from scenario_names_to_rank and {comm.Get_rank()} from its comm." + ) + ## ensure we've set things up correctly for all local scenarios for sname in self.local_scenarios: for nodename, comm in self.comms.items(): scenario_names_to_comm_rank = self.scenario_names_to_rank[nodename] if sname in scenario_names_to_comm_rank: if comm.Get_rank() != scenario_names_to_comm_rank[sname]: - raise RuntimeError(f"For the node {nodename}, the scenario {sname} has the rank {rank} from scenario_names_to_rank and {comm.Get_rank()} from its comm.") - + raise RuntimeError( + f"For the node {nodename}, the scenario {sname} has the rank {rank} from scenario_names_to_rank and {comm.Get_rank()} from its comm." + ) def _compute_unconditional_node_probabilities(self): - """ calculates unconditional node probabilities and prob_coeff - and prob0_mask is set to a scalar 1 (used by variable_probability)""" - for k,s in self.local_scenarios.items(): + """calculates unconditional node probabilities and prob_coeff + and prob0_mask is set to a scalar 1 (used by variable_probability)""" + for k, s in self.local_scenarios.items(): root = s._mpisppy_node_list[0] root.uncond_prob = 1.0 - for parent,child in zip(s._mpisppy_node_list[:-1],s._mpisppy_node_list[1:]): + for parent, child in zip( + s._mpisppy_node_list[:-1], s._mpisppy_node_list[1:] + ): child.uncond_prob = parent.uncond_prob * child.cond_prob - if not hasattr(s._mpisppy_data, 'prob_coeff'): + if not hasattr(s._mpisppy_data, "prob_coeff"): s._mpisppy_data.prob_coeff = dict() s._mpisppy_data.prob0_mask = dict() for node in s._mpisppy_node_list: - s._mpisppy_data.prob_coeff[node.name] = (s._mpisppy_probability / node.uncond_prob) + s._mpisppy_data.prob_coeff[node.name] = ( + s._mpisppy_probability / node.uncond_prob + ) s._mpisppy_data.prob0_mask[node.name] = 1.0 # needs to be a float - def _use_variable_probability_setter(self, verbose=False): - """ set variable probability unconditional values using a function self.variable_probability + """set variable probability unconditional values using a function self.variable_probability that gives us a list of (id(vardata), probability)] ALSO set prob0_mask, which is a mask for W calculations (mask out zero probs) Note: We estimate that less than 0.01 of mpi-sppy runs will call this. @@ -407,27 +420,35 @@ def _use_variable_probability_setter(self, verbose=False): return didit = 0 skipped = 0 - variable_probability_kwargs = self.options['variable_probability_kwargs'] \ - if 'variable_probability_kwargs' in self.options \ - else dict() - sum_probs = {} # indexed by (ndn,i) - maps to sum of probs for that variable + variable_probability_kwargs = ( + self.options["variable_probability_kwargs"] + if "variable_probability_kwargs" in self.options + else dict() + ) + sum_probs = {} # indexed by (ndn,i) - maps to sum of probs for that variable for sname, s in self.local_scenarios.items(): - variable_probability = self.variable_probability(s, **variable_probability_kwargs) + variable_probability = self.variable_probability( + s, **variable_probability_kwargs + ) s._mpisppy_data.has_variable_probability = True - for (vid, prob) in variable_probability: + for vid, prob in variable_probability: ndn, i = s._mpisppy_data.varid_to_nonant_index[vid] # If you are going to do any variables at a node, you have to do all. if type(s._mpisppy_data.prob_coeff[ndn]) is float: # not yet a vector defprob = s._mpisppy_data.prob_coeff[ndn] - s._mpisppy_data.prob_coeff[ndn] = np.full(s._mpisppy_data.nlens[ndn], defprob, dtype='d') - s._mpisppy_data.prob0_mask[ndn] = np.ones(s._mpisppy_data.nlens[ndn], dtype='d') + s._mpisppy_data.prob_coeff[ndn] = np.full( + s._mpisppy_data.nlens[ndn], defprob, dtype="d" + ) + s._mpisppy_data.prob0_mask[ndn] = np.ones( + s._mpisppy_data.nlens[ndn], dtype="d" + ) s._mpisppy_data.prob_coeff[ndn][i] = prob if prob == 0: # there's probably a way to do this in numpy... s._mpisppy_data.prob0_mask[ndn][i] = 0 - sum_probs[(ndn,i)] = sum_probs.get((ndn,i),0.0) + prob + sum_probs[(ndn, i)] = sum_probs.get((ndn, i), 0.0) + prob didit += len(variable_probability) skipped += len(s._mpisppy_data.varid_to_nonant_index) - didit - + """ this needs to be MPIized; but check below should do the trick for (ndn,i),prob in sum_probs.items(): if not math.isclose(prob, 1.0, abs_tol=self.E1_tolerance): @@ -435,12 +456,12 @@ def _use_variable_probability_setter(self, verbose=False): """ if verbose and self.cylinder_rank == 0: - print ("variable_probability set",didit,"and skipped",skipped) + print("variable_probability set", didit, "and skipped", skipped) - if not self.options.get('do_not_check_variable_probabilities', False): + if not self.options.get("do_not_check_variable_probabilities", False): self._check_variable_probabilities_sum(verbose) - def is_zero_prob( self, scenario_model, var ): + def is_zero_prob(self, scenario_model, var): """ Args: scenario_model : a value in SPBase.local_scenarios @@ -454,28 +475,27 @@ def is_zero_prob( self, scenario_model, var ): _mpisppy_data = scenario_model._mpisppy_data ndn, i = _mpisppy_data.varid_to_nonant_index[id(var)] if isinstance(_mpisppy_data.prob_coeff[ndn], np.ndarray): - return float(_mpisppy_data.prob_coeff[ndn][i]) == 0. + return float(_mpisppy_data.prob_coeff[ndn][i]) == 0.0 else: return False def _check_variable_probabilities_sum(self, verbose): - - nodenames = [] # to transmit to comms - local_concats = {} # keys are tree node names - global_concats = {} # values sums of node conditional probabilities + nodenames = [] # to transmit to comms + local_concats = {} # keys are tree node names + global_concats = {} # values sums of node conditional probabilities # we need to accumulate all local contributions before the reduce - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: if node.name not in nodenames: ndn = node.name nodenames.append(ndn) - local_concats[ndn] = np.zeros(nlens[ndn], dtype='d') - global_concats[ndn] = np.zeros(nlens[ndn], dtype='d') + local_concats[ndn] = np.zeros(nlens[ndn], dtype="d") + global_concats[ndn] = np.zeros(nlens[ndn], dtype="d") # sum local conditional probabilities - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): for node in s._mpisppy_node_list: ndn = node.name local_concats[ndn] += s._mpisppy_data.prob_coeff[ndn] @@ -485,54 +505,70 @@ def _check_variable_probabilities_sum(self, verbose): self.comms[ndn].Allreduce( [local_concats[ndn], MPI.DOUBLE], [global_concats[ndn], MPI.DOUBLE], - op=MPI.SUM) + op=MPI.SUM, + ) tol = self.E1_tolerance checked_nodes = list() # check sum node conditional probabilites are close to 1 - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name if ndn not in checked_nodes: - if not np.allclose(global_concats[ndn], 1., atol=tol): - notclose = ~np.isclose(global_concats[ndn], 1., atol=tol) + if not np.allclose(global_concats[ndn], 1.0, atol=tol): + notclose = ~np.isclose(global_concats[ndn], 1.0, atol=tol) indices = np.nonzero(notclose)[0] - bad_vars = [ s._mpisppy_data.nonant_indices[ndn,idx].name for idx in indices ] - badprobs = [ global_concats[ndn][idx] for idx in indices] - raise RuntimeError(f"Node {ndn}, variables {bad_vars} have respective" - f" conditional probability sum {badprobs}" - " which are not 1") + bad_vars = [ + s._mpisppy_data.nonant_indices[ndn, idx].name + for idx in indices + ] + badprobs = [global_concats[ndn][idx] for idx in indices] + raise RuntimeError( + f"Node {ndn}, variables {bad_vars} have respective" + f" conditional probability sum {badprobs}" + " which are not 1" + ) checked_nodes.append(ndn) - def _look_and_leap(self): - for (sname, scenario) in self.local_scenarios.items(): - + for sname, scenario in self.local_scenarios.items(): if not hasattr(scenario, "_mpisppy_data"): scenario._mpisppy_data = pyo.Block(name="For non-Pyomo mpi-sppy data") if not hasattr(scenario, "_mpisppy_model"): - scenario._mpisppy_model = pyo.Block(name="For mpi-sppy Pyomo additions to the scenario model") + scenario._mpisppy_model = pyo.Block( + name="For mpi-sppy Pyomo additions to the scenario model" + ) if hasattr(scenario, "PySP_prob"): raise RuntimeError("PySP_prob is deprecated; use _mpisppy_probability") - pspec = scenario._mpisppy_probability if hasattr(scenario, "_mpisppy_probability") else None + pspec = ( + scenario._mpisppy_probability + if hasattr(scenario, "_mpisppy_probability") + else None + ) if pspec is None or pspec == "uniform": - prob = 1./len(self.all_scenario_names) + prob = 1.0 / len(self.all_scenario_names) if self.cylinder_rank == 0 and pspec is None: - print(f"Did not find _mpisppy_probability, assuming uniform probability {prob} (avoid this message by assigning a probability or the string 'uniform' to _mpisppy_probability on the scenario model object)") + print( + f"Did not find _mpisppy_probability, assuming uniform probability {prob} (avoid this message by assigning a probability or the string 'uniform' to _mpisppy_probability on the scenario model object)" + ) scenario._mpisppy_probability = prob if not hasattr(scenario, "_mpisppy_node_list"): raise RuntimeError(f"_mpisppy_node_list not found on scenario {sname}") def _options_check(self, required_options, given_options): - """ Confirm that the specified list of options contains the specified - list of required options. Raises a ValueError if anything is - missing. + """Confirm that the specified list of options contains the specified + list of required options. Raises a ValueError if anything is + missing. """ - missing = [option for option in required_options if given_options.get(option) is None] + missing = [ + option for option in required_options if given_options.get(option) is None + ] if missing: - raise ValueError(f"Missing the following required options: {', '.join(missing)}") + raise ValueError( + f"Missing the following required options: {', '.join(missing)}" + ) @property def spcomm(self): @@ -547,19 +583,17 @@ def spcomm(self, value): else: raise RuntimeError("SPBase.spcomm should only be set once") - def allreduce_or(self, val): - local_val = np.array([val], dtype='int8') - global_val = np.zeros(1, dtype='int8') + local_val = np.array([val], dtype="int8") + global_val = np.zeros(1, dtype="int8") self.mpicomm.Allreduce(local_val, global_val, op=MPI.LOR) if global_val[0] > 0: return True else: return False - def gather_var_values_to_rank0(self, get_zero_prob_values=False): - """ Gather the values of the nonanticipative variables to the root of + """Gather the values of the nonanticipative variables to the root of the `mpicomm` for the cylinder Returns: @@ -569,14 +603,14 @@ def gather_var_values_to_rank0(self, get_zero_prob_values=False): ranks, returns None. """ var_values = dict() - for (sname, model) in self.local_scenarios.items(): + for sname, model in self.local_scenarios.items(): for node in model._mpisppy_node_list: for var in node.nonant_vardata_list: var_name = var.name if self.bundling: - dot_index = var_name.find('.') + dot_index = var_name.find(".") assert dot_index >= 0 - var_name = var_name[(dot_index+1):] + var_name = var_name[(dot_index + 1) :] if (self.is_zero_prob(model, var)) and (not get_zero_prob_values): var_values[sname, var_name] = None else: @@ -587,77 +621,96 @@ def gather_var_values_to_rank0(self, get_zero_prob_values=False): result = self.mpicomm.gather(var_values, root=0) - if (self.cylinder_rank == 0): - result = {key: value - for dic in result - for (key, value) in dic.items() - } + if self.cylinder_rank == 0: + result = {key: value for dic in result for (key, value) in dic.items()} return result - def report_var_values_at_rank0(self, header="", print_zero_prob_values=False): - """ Pretty-print the values and associated statistics for - non-anticipative variables across all scenarios. """ + """Pretty-print the values and associated statistics for + non-anticipative variables across all scenarios.""" - var_values = self.gather_var_values_to_rank0(get_zero_prob_values=print_zero_prob_values) + var_values = self.gather_var_values_to_rank0( + get_zero_prob_values=print_zero_prob_values + ) if self.cylinder_rank == 0: - if len(header) != 0: print(header) - scenario_names = sorted(set(x for (x,y) in var_values)) + scenario_names = sorted(set(x for (x, y) in var_values)) max_scenario_name_len = max(len(s) for s in scenario_names) - variable_names = sorted(set(y for (x,y) in var_values)) + variable_names = sorted(set(y for (x, y) in var_values)) max_variable_name_len = max(len(v) for v in variable_names) # the "10" below is a reasonable minimum for floating-point output value_field_len = max(10, max_scenario_name_len) - print("{0: <{width}s} | ".format("", width=max_variable_name_len), end='') + print("{0: <{width}s} | ".format("", width=max_variable_name_len), end="") for this_scenario in scenario_names: - print("{0: ^{width}s} ".format(this_scenario, width=value_field_len), end='') + print( + "{0: ^{width}s} ".format(this_scenario, width=value_field_len), + end="", + ) print("") for this_var in variable_names: - print("{0: <{width}} | ".format(this_var, width=max_variable_name_len), end='') + print( + "{0: <{width}} | ".format(this_var, width=max_variable_name_len), + end="", + ) for this_scenario in scenario_names: if (this_scenario, this_var) not in var_values: - print("{0: ^{width}s}".format("-", width=value_field_len), end='') + print( + "{0: ^{width}s}".format("-", width=value_field_len), end="" + ) else: this_var_value = var_values[this_scenario, this_var] if (this_var_value is None) and (not print_zero_prob_values): - print("{0: ^{width}s}".format("-", width=value_field_len), end='') + print( + "{0: ^{width}s}".format("-", width=value_field_len), + end="", + ) else: - print("{0: {width}.4f}".format(this_var_value, width=value_field_len), end='') - print(" ", end='') + print( + "{0: {width}.4f}".format( + this_var_value, width=value_field_len + ), + end="", + ) + print(" ", end="") print("") - def write_first_stage_solution(self, file_name, - first_stage_solution_writer=sputils.first_stage_nonant_writer): - """ Writes the first-stage solution, if this object reports one available. + def write_first_stage_solution( + self, file_name, first_stage_solution_writer=sputils.first_stage_nonant_writer + ): + """Writes the first-stage solution, if this object reports one available. - Args: - file_name: path of file to write first stage solution to - first_stage_solution_writer (optional): custom first stage solution writer function + Args: + file_name: path of file to write first stage solution to + first_stage_solution_writer (optional): custom first stage solution writer function """ if not self.first_stage_solution_available: raise RuntimeError("No first stage solution available") if self.cylinder_rank == 0: dirname = os.path.dirname(file_name) - if dirname != '': + if dirname != "": os.makedirs(os.path.dirname(file_name), exist_ok=True) representative_scenario = self.local_scenarios[self.local_scenario_names[0]] - first_stage_solution_writer(file_name, representative_scenario, self.bundling) + first_stage_solution_writer( + file_name, representative_scenario, self.bundling + ) - def write_tree_solution(self, directory_name, - scenario_tree_solution_writer=sputils.scenario_tree_solution_writer): - """ Writes the tree solution, if this object reports one available. - Raises a RuntimeError if it is not. + def write_tree_solution( + self, + directory_name, + scenario_tree_solution_writer=sputils.scenario_tree_solution_writer, + ): + """Writes the tree solution, if this object reports one available. + Raises a RuntimeError if it is not. - Args: - directory_name: directory to write tree solution to - scenario_tree_solution_writer (optional): custom scenario solution writer function + Args: + directory_name: directory to write tree solution to + scenario_tree_solution_writer (optional): custom scenario solution writer function """ if not self.tree_solution_available: raise RuntimeError("No tree solution available") @@ -665,4 +718,6 @@ def write_tree_solution(self, directory_name, os.makedirs(directory_name, exist_ok=True) self.mpicomm.Barrier() for scenario_name, scenario in self.local_scenarios.items(): - scenario_tree_solution_writer(directory_name, scenario_name, scenario, self.bundling) + scenario_tree_solution_writer( + directory_name, scenario_name, scenario, self.bundling + ) diff --git a/mpisppy/spin_the_wheel.py b/mpisppy/spin_the_wheel.py index c6913538d..6a1b3e057 100644 --- a/mpisppy/spin_the_wheel.py +++ b/mpisppy/spin_the_wheel.py @@ -11,24 +11,24 @@ from mpisppy import haveMPI, global_toc, MPI from mpisppy.utils.sputils import ( - first_stage_nonant_writer, - scenario_tree_solution_writer, - ) + first_stage_nonant_writer, + scenario_tree_solution_writer, +) -class WheelSpinner: +class WheelSpinner: def __init__(self, hub_dict, list_of_spoke_dict): - """ top level for the hub and spoke system + """top level for the hub and spoke system Args: hub_dict(dict): controls hub creation list_of_spoke_dict(list dict): controls creation of spokes - + Returns: spcomm (Hub or Spoke object): the object that did the work (windowless) opt_dict (dict): the dictionary that controlled creation for this rank - + NOTE: the return is after termination; the objects are provided for query. - + """ if not haveMPI: raise RuntimeError("spin_the_wheel called, but cannot import mpi4py") @@ -41,7 +41,7 @@ def spin(self, comm_world=None): return self.run(comm_world=comm_world) def run(self, comm_world=None): - """ top level for the hub and spoke system + """top level for the hub and spoke system Args: comm_world (MPI comm): the world for this hub-spoke system """ @@ -81,26 +81,26 @@ def run(self, comm_world=None): spoke_dict["spoke_kwargs"] = dict() if "opt_kwargs" not in spoke_dict: spoke_dict["opt_kwargs"] = dict() - + if comm_world is None: comm_world = MPI.COMM_WORLD n_spokes = len(list_of_spoke_dict) - + # Create the necessary communicators fullcomm = comm_world strata_comm, cylinder_comm = _make_comms(n_spokes, fullcomm=fullcomm) strata_rank = strata_comm.Get_rank() cylinder_rank = cylinder_comm.Get_rank() global_rank = fullcomm.Get_rank() - + # Assign hub/spokes to individual ranks - if strata_rank == 0: # This rank is a hub + if strata_rank == 0: # This rank is a hub sp_class = hub_dict["hub_class"] sp_kwargs = hub_dict["hub_kwargs"] opt_class = hub_dict["opt_class"] opt_kwargs = hub_dict["opt_kwargs"] opt_dict = hub_dict - else: # This rank is a spoke + else: # This rank is a spoke spoke_dict = list_of_spoke_dict[strata_rank - 1] sp_class = spoke_dict["spoke_class"] sp_kwargs = spoke_dict["spoke_kwargs"] @@ -111,15 +111,21 @@ def run(self, comm_world=None): # Create the appropriate opt object locally opt_kwargs["mpicomm"] = cylinder_comm opt = opt_class(**opt_kwargs) - + # Create the SPCommunicator object (hub/spoke) with # the appropriate SPBase object attached - if strata_rank == 0: # Hub - spcomm = sp_class(opt, fullcomm, strata_comm, cylinder_comm, - list_of_spoke_dict, **sp_kwargs) - else: # Spokes - spcomm = sp_class(opt, fullcomm, strata_comm, cylinder_comm, **sp_kwargs) - + if strata_rank == 0: # Hub + spcomm = sp_class( + opt, + fullcomm, + strata_comm, + cylinder_comm, + list_of_spoke_dict, + **sp_kwargs, + ) + else: # Spokes + spcomm = sp_class(opt, fullcomm, strata_comm, cylinder_comm, **sp_kwargs) + # Create the windows, run main(), destroy the windows spcomm.make_windows() if strata_rank == 0: @@ -127,22 +133,27 @@ def run(self, comm_world=None): global_toc("Starting spcomm.main()") spcomm.main() - if strata_rank == 0: # If this is the hub + if strata_rank == 0: # If this is the hub spcomm.send_terminate() - + # Anything that's left to do spcomm.finalize() - + # to ensure the messages below are True cylinder_comm.Barrier() - global_toc(f"Hub algorithm {opt_class.__name__} complete, waiting for spoke finalization") - global_toc(f"Spoke {sp_class.__name__} finalized", (cylinder_rank == 0 and strata_rank != 0)) - + global_toc( + f"Hub algorithm {opt_class.__name__} complete, waiting for spoke finalization" + ) + global_toc( + f"Spoke {sp_class.__name__} finalized", + (cylinder_rank == 0 and strata_rank != 0), + ) + fullcomm.Barrier() - + ## give the hub the chance to catch new values spcomm.hub_finalize() - + fullcomm.Barrier() spcomm.free_windows() @@ -157,7 +168,7 @@ def run(self, comm_world=None): if self.strata_rank == 0: self.BestInnerBound = spcomm.BestInnerBound self.BestOuterBound = spcomm.BestOuterBound - else: # the cylinder ranks don't track the inner / outer bounds + else: # the cylinder ranks don't track the inner / outer bounds self.BestInnerBound = None self.BestOuterBound = None @@ -166,46 +177,61 @@ def run(self, comm_world=None): def on_hub(self): if not self._ran: raise RuntimeError("Need to call WheelSpinner.run() before finding out.") - return ("hub_class" in self.opt_dict) + return "hub_class" in self.opt_dict - def write_first_stage_solution(self, solution_file_name, - first_stage_solution_writer=first_stage_nonant_writer): - """ Write a solution file, if a solution is available, to the solution_file_name provided + def write_first_stage_solution( + self, solution_file_name, first_stage_solution_writer=first_stage_nonant_writer + ): + """Write a solution file, if a solution is available, to the solution_file_name provided Args: solution_file_name : filename to write the solution to first_stage_solution_writer (optional) : custom first stage solution writer function """ if not self._ran: - raise RuntimeError("Need to call WheelSpinner.run() before querying solutions.") + raise RuntimeError( + "Need to call WheelSpinner.run() before querying solutions." + ) winner = self._determine_innerbound_winner() if winner: - self.spcomm.opt.write_first_stage_solution(solution_file_name,first_stage_solution_writer) - - def write_tree_solution(self, solution_directory_name, - scenario_tree_solution_writer=scenario_tree_solution_writer): - """ Write a tree solution directory, if available, to the solution_directory_name provided + self.spcomm.opt.write_first_stage_solution( + solution_file_name, first_stage_solution_writer + ) + + def write_tree_solution( + self, + solution_directory_name, + scenario_tree_solution_writer=scenario_tree_solution_writer, + ): + """Write a tree solution directory, if available, to the solution_directory_name provided Args: solution_file_name : filename to write the solution to scenario_tree_solution_writer (optional) : custom scenario solution writer function """ if not self._ran: - raise RuntimeError("Need to call WheelSpinner.run() before querying solutions.") + raise RuntimeError( + "Need to call WheelSpinner.run() before querying solutions." + ) winner = self._determine_innerbound_winner() if winner: - self.spcomm.opt.write_tree_solution(solution_directory_name,scenario_tree_solution_writer) + self.spcomm.opt.write_tree_solution( + solution_directory_name, scenario_tree_solution_writer + ) def local_nonant_cache(self): - """ Returns a dict with non-anticipative values at each local node - We assume that the optimization has been done before calling this + """Returns a dict with non-anticipative values at each local node + We assume that the optimization has been done before calling this """ if not self._ran: - raise RuntimeError("Need to call WheelSpinner.run() before querying solutions.") + raise RuntimeError( + "Need to call WheelSpinner.run() before querying solutions." + ) local_xhats = dict() - for k,s in self.spcomm.opt.local_scenarios.items(): + for k, s in self.spcomm.opt.local_scenarios.items(): for node in s._mpisppy_node_list: if node.name not in local_xhats: local_xhats[node.name] = [ - value(var) for var in node.nonant_vardata_list] + value(var) for var in node.nonant_vardata_list + ] return local_xhats def _determine_innerbound_winner(self): @@ -217,20 +243,20 @@ def _determine_innerbound_winner(self): best_strata_rank = self.spcomm.last_ib_idx else: best_strata_rank = None - + best_strata_rank = self.spcomm.fullcomm.bcast(best_strata_rank, root=0) - return (self.spcomm.strata_rank == best_strata_rank) + return self.spcomm.strata_rank == best_strata_rank + def _make_comms(n_spokes, fullcomm=None): - """ Create the strata_comm and cylinder_comm for hub/spoke style runs - """ + """Create the strata_comm and cylinder_comm for hub/spoke style runs""" if not haveMPI: raise RuntimeError("make_comms called, but cannot import mpi4py") # Ensure that the proper number of processes have been invoked - nsp1 = n_spokes + 1 # Add 1 for the hub + nsp1 = n_spokes + 1 # Add 1 for the hub if fullcomm is None: fullcomm = MPI.COMM_WORLD - n_proc = fullcomm.Get_size() + n_proc = fullcomm.Get_size() if n_proc % nsp1 != 0: raise RuntimeError(f"Need a multiple of {nsp1} processes (got {n_proc})") diff --git a/mpisppy/spopt.py b/mpisppy/spopt.py index 2032d9fe5..543b295eb 100644 --- a/mpisppy/spopt.py +++ b/mpisppy/spopt.py @@ -28,22 +28,23 @@ logger = logging.getLogger("SPOpt") logger.setLevel(logging.WARN) + class SPOpt(SPBase): - """ Defines optimization methods for hubs and spokes """ + """Defines optimization methods for hubs and spokes""" def __init__( - self, - options, - all_scenario_names, - scenario_creator, - scenario_denouement=None, - all_nodenames=None, - mpicomm=None, - extensions=None, - extension_kwargs=None, - scenario_creator_kwargs=None, - variable_probability=None, - E1_tolerance=1e-5, + self, + options, + all_scenario_names, + scenario_creator, + scenario_denouement=None, + all_nodenames=None, + mpicomm=None, + extensions=None, + extension_kwargs=None, + scenario_creator_kwargs=None, + variable_probability=None, + E1_tolerance=1e-5, ): super().__init__( options, @@ -65,13 +66,11 @@ def __init__( self.extensions = extensions self.extension_kwargs = extension_kwargs - if (self.extensions is not None): + if self.extensions is not None: if self.extension_kwargs is None: self.extobject = self.extensions(self) else: - self.extobject = self.extensions( - self, **self.extension_kwargs - ) + self.extobject = self.extensions(self, **self.extension_kwargs) def _check_staleness(self, s): # check for staleness in *scenario* s @@ -82,9 +81,10 @@ def _check_staleness(self, s): if (not v.fixed) and v.stale: if self.is_zero_prob(s, v) and v._value is None: raise RuntimeError( - f"Non-anticipative zero-probability variable {v.name} " - f"on scenario {s.name} reported as stale and has no value. " - "Zero-probability variables must have a value (e.g., fixed).") + f"Non-anticipative zero-probability variable {v.name} " + f"on scenario {s.name} reported as stale and has no value. " + "Zero-probability variables must have a value (e.g., fixed)." + ) else: try: float(pyo.value(v)) @@ -93,17 +93,22 @@ def _check_staleness(self, s): f"Non-anticipative variable {v.name} on scenario {s.name} " "reported as stale. This usually means this variable " "did not appear in any (active) components, and hence " - "was not communicated to the subproblem solver. ") - - - def solve_one(self, solver_options, k, s, - dtiming=False, - gripe=False, - tee=False, - verbose=False, - disable_pyomo_signal_handling=False, - update_objective=True): - """ Solve one subproblem. + "was not communicated to the subproblem solver. " + ) + + def solve_one( + self, + solver_options, + k, + s, + dtiming=False, + gripe=False, + tee=False, + verbose=False, + disable_pyomo_signal_handling=False, + update_objective=True, + ): + """Solve one subproblem. Args: solver_options (dict or None): @@ -132,10 +137,9 @@ def solve_one(self, solver_options, k, s, Pyomo solve time in seconds. """ - def _vb(msg): if verbose and self.cylinder_rank == 0: - print ("(rank0) " + msg) + print("(rank0) " + msg) # if using a persistent solver plugin, # re-compile the objective due to changed weights and x-bars @@ -143,14 +147,21 @@ def _vb(msg): if update_objective and (sputils.is_persistent(s._solver_plugin)): set_objective_start_time = time.time() - active_objective_datas = list(s.component_data_objects( - pyo.Objective, active=True, descend_into=True)) + active_objective_datas = list( + s.component_data_objects(pyo.Objective, active=True, descend_into=True) + ) if len(active_objective_datas) > 1: - raise RuntimeError('Multiple active objectives identified ' - 'for scenario {sn}'.format(sn=s._name)) + raise RuntimeError( + "Multiple active objectives identified " "for scenario {sn}".format( + sn=s._name + ) + ) elif len(active_objective_datas) < 1: - raise RuntimeError('Could not find any active objectives ' - 'for scenario {sn}'.format(sn=s._name)) + raise RuntimeError( + "Could not find any active objectives " "for scenario {sn}".format( + sn=s._name + ) + ) else: s._solver_plugin.set_objective(active_objective_datas[0]) set_objective_time = time.time() - set_objective_start_time @@ -161,25 +172,24 @@ def _vb(msg): results = self.extobject.pre_solve(s) solve_start_time = time.time() - if (solver_options): - _vb("Using sub-problem solver options=" - + str(solver_options)) - for option_key,option_value in solver_options.items(): + if solver_options: + _vb("Using sub-problem solver options=" + str(solver_options)) + for option_key, option_value in solver_options.items(): s._solver_plugin.options[option_key] = option_value solve_keyword_args = dict() if self.cylinder_rank == 0: if tee is not None and tee is True: solve_keyword_args["tee"] = True - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): solve_keyword_args["save_results"] = False elif disable_pyomo_signal_handling: solve_keyword_args["use_signal_handling"] = False try: - results = s._solver_plugin.solve(s, - **solve_keyword_args, - load_solutions=False) + results = s._solver_plugin.solve( + s, **solve_keyword_args, load_solutions=False + ) solver_exception = None except Exception as e: results = None @@ -193,17 +203,16 @@ def _vb(msg): name = self.__class__.__name__ if self.spcomm: name = self.spcomm.__class__.__name__ - print (f"[{name}] Solve failed for scenario {s.name}") + print(f"[{name}] Solve failed for scenario {s.name}") if results is not None: - print ("status=", results.solver.status) - print ("TerminationCondition=", - results.solver.termination_condition) + print("status=", results.solver.status) + print("TerminationCondition=", results.solver.termination_condition) else: print("no results object, so solving agin with tee=True") solve_keyword_args["tee"] = True - results = s._solver_plugin.solve(s, - **solve_keyword_args, - load_solutions=False) + results = s._solver_plugin.solve( + s, **solve_keyword_args, load_solutions=False + ) if solver_exception is not None: raise solver_exception @@ -222,12 +231,13 @@ def _vb(msg): s._mpisppy_data.scenario_feasible = True # TBD: get this ready for IPopt (e.g., check feas_prob every time) # propogate down - if self.bundling: # must be a bundle + if self.bundling: # must be a bundle for sname in s._ef_scenario_names: - self.local_scenarios[sname]._mpisppy_data.scenario_feasible\ - = s._mpisppy_data.scenario_feasible - if s._mpisppy_data.scenario_feasible: - self._check_staleness(self.local_scenarios[sname]) + self.local_scenarios[ + sname + ]._mpisppy_data.scenario_feasible = s._mpisppy_data.scenario_feasible + if s._mpisppy_data.scenario_feasible: + self._check_staleness(self.local_scenarios[sname]) else: # not a bundle if s._mpisppy_data.scenario_feasible: self._check_staleness(s) @@ -235,17 +245,21 @@ def _vb(msg): if self.extensions is not None: results = self.extobject.post_solve(s, results) - return pyomo_solve_time + set_objective_time # set_objective_time added Feb 2023 - - - def solve_loop(self, solver_options=None, - use_scenarios_not_subproblems=False, - dtiming=False, - gripe=False, - disable_pyomo_signal_handling=False, - tee=False, - verbose=False): - """ Loop over `local_subproblems` and solve them in a manner + return ( + pyomo_solve_time + set_objective_time + ) # set_objective_time added Feb 2023 + + def solve_loop( + self, + solver_options=None, + use_scenarios_not_subproblems=False, + dtiming=False, + gripe=False, + disable_pyomo_signal_handling=False, + tee=False, + verbose=False, + ): + """Loop over `local_subproblems` and solve them in a manner dicated by the arguments. In addition to changing the Var values in the scenarios, this function @@ -281,14 +295,16 @@ def solve_loop(self, solver_options=None, set_objective takes care of W and prox changes. """ + def _vb(msg): if verbose and self.cylinder_rank == 0: - print ("(rank0) " + msg) + print("(rank0) " + msg) + _vb("Entering solve_loop function.") logger.debug(" early solve_loop for rank={}".format(self.cylinder_rank)) if self.extensions is not None: - self.extobject.pre_solve_loop() + self.extobject.pre_solve_loop() # note that when there is no bundling, scenarios are subproblems if use_scenarios_not_subproblems: @@ -296,34 +312,46 @@ def _vb(msg): else: s_source = self.local_subproblems pyomo_solve_times = list() - for k,s in s_source.items(): - logger.debug(" in loop solve_loop k={}, rank={}".format(k, self.cylinder_rank)) + for k, s in s_source.items(): + logger.debug( + " in loop solve_loop k={}, rank={}".format(k, self.cylinder_rank) + ) if tee: print(f"Tee solve for {k} on global rank {self.global_rank}") - pyomo_solve_times.append(self.solve_one(solver_options, k, s, - dtiming=dtiming, - verbose=verbose, - tee=tee, - gripe=gripe, - disable_pyomo_signal_handling=disable_pyomo_signal_handling - )) + pyomo_solve_times.append( + self.solve_one( + solver_options, + k, + s, + dtiming=dtiming, + verbose=verbose, + tee=tee, + gripe=gripe, + disable_pyomo_signal_handling=disable_pyomo_signal_handling, + ) + ) if self.extensions is not None: - self.extobject.post_solve_loop() + self.extobject.post_solve_loop() if dtiming: all_pyomo_solve_times = self.mpicomm.gather(pyomo_solve_times, root=0) if self.cylinder_rank == 0: apst = [pst for l_pst in all_pyomo_solve_times for pst in l_pst] print("Pyomo solve times (seconds):") - print("\tmin=%4.2f@%d mean=%4.2f max=%4.2f@%d" % - (np.min(apst), np.argmin(apst), - np.mean(apst), - np.max(apst), np.argmax(apst))) - + print( + "\tmin=%4.2f@%d mean=%4.2f max=%4.2f@%d" + % ( + np.min(apst), + np.argmin(apst), + np.mean(apst), + np.max(apst), + np.argmax(apst), + ) + ) def Eobjective(self, verbose=False): - """ Compute the expected objective function across all scenarios. + """Compute the expected objective function across all scenarios. Note: Assumes the optimization is done beforehand, @@ -340,13 +368,16 @@ def Eobjective(self, verbose=False): The expected objective function value """ local_Eobjs = [] - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): objfct = self.saved_objectives[k] local_Eobjs.append(s._mpisppy_probability * pyo.value(objfct)) if verbose: - print ("caller", inspect.stack()[1][3]) - print ("E_Obj Scenario {}, prob={}, Obj={}, ObjExpr={}"\ - .format(k, s._mpisppy_probability, pyo.value(objfct), objfct.expr)) + print("caller", inspect.stack()[1][3]) + print( + "E_Obj Scenario {}, prob={}, Obj={}, ObjExpr={}".format( + k, s._mpisppy_probability, pyo.value(objfct), objfct.expr + ) + ) local_Eobj = np.array([math.fsum(local_Eobjs)]) global_Eobj = np.zeros(1) @@ -354,9 +385,8 @@ def Eobjective(self, verbose=False): return global_Eobj[0] - def Ebound(self, verbose=False, extra_sum_terms=None): - """ Compute the expected outer bound across all scenarios. + """Compute the expected outer bound across all scenarios. Note: Assumes the optimization is done beforehand. @@ -374,18 +404,23 @@ def Ebound(self, verbose=False, extra_sum_terms=None): The expected objective outer bound. """ local_Ebounds = [] - for k,s in self.local_subproblems.items(): + for k, s in self.local_subproblems.items(): logger.debug(" in loop Ebound k={}, rank={}".format(k, self.cylinder_rank)) try: eb = s._mpisppy_probability * float(s._mpisppy_data.outer_bound) except: - print(f"eb calc failed for {s._mpisppy_probability} * {s._mpisppy_data.outer_bound}") + print( + f"eb calc failed for {s._mpisppy_probability} * {s._mpisppy_data.outer_bound}" + ) raise local_Ebounds.append(eb) if verbose: - print ("caller", inspect.stack()[1][3]) - print ("E_Bound Scenario {}, prob={}, bound={}"\ - .format(k, s._mpisppy_probability, s._mpisppy_data.outer_bound)) + print("caller", inspect.stack()[1][3]) + print( + "E_Bound Scenario {}, prob={}, bound={}".format( + k, s._mpisppy_probability, s._mpisppy_data.outer_bound + ) + ) if extra_sum_terms is not None: local_Ebound_list = [math.fsum(local_Ebounds)] + list(extra_sum_terms) @@ -402,26 +437,22 @@ def Ebound(self, verbose=False, extra_sum_terms=None): else: return global_Ebound[0], global_Ebound[1:] - def _update_E1(self): - """ Add up the probabilities of all scenarios using a reduce call. - then attach it to the PH object as a float. + """Add up the probabilities of all scenarios using a reduce call. + then attach it to the PH object as a float. """ - localP = np.zeros(1, dtype='d') - globalP = np.zeros(1, dtype='d') + localP = np.zeros(1, dtype="d") + globalP = np.zeros(1, dtype="d") - for k,s in self.local_scenarios.items(): - localP[0] += s._mpisppy_probability + for k, s in self.local_scenarios.items(): + localP[0] += s._mpisppy_probability - self.mpicomm.Allreduce([localP, MPI.DOUBLE], - [globalP, MPI.DOUBLE], - op=MPI.SUM) + self.mpicomm.Allreduce([localP, MPI.DOUBLE], [globalP, MPI.DOUBLE], op=MPI.SUM) self.E1 = float(globalP[0]) - def feas_prob(self): - """ Compute the total probability of all feasible scenarios. + """Compute the total probability of all feasible scenarios. This function can be used to check whether all scenarios are feasible by comparing the return value to one. @@ -437,22 +468,19 @@ def feas_prob(self): """ # locals[0] is E_feas and locals[1] is E_1 - locals = np.zeros(1, dtype='d') - globals = np.zeros(1, dtype='d') + locals = np.zeros(1, dtype="d") + globals = np.zeros(1, dtype="d") - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): if s._mpisppy_data.scenario_feasible: locals[0] += s._mpisppy_probability - self.mpicomm.Allreduce([locals, MPI.DOUBLE], - [globals, MPI.DOUBLE], - op=MPI.SUM) + self.mpicomm.Allreduce([locals, MPI.DOUBLE], [globals, MPI.DOUBLE], op=MPI.SUM) return float(globals[0]) - def infeas_prob(self): - """ Sum the total probability for all infeasible scenarios. + """Sum the total probability for all infeasible scenarios. Note: This function assumes the scenarios have a boolean @@ -464,22 +492,19 @@ def infeas_prob(self): This value equals 0 if all scenarios are feasible. """ - locals = np.zeros(1, dtype='d') - globals = np.zeros(1, dtype='d') + locals = np.zeros(1, dtype="d") + globals = np.zeros(1, dtype="d") - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): if not s._mpisppy_data.scenario_feasible: locals[0] += s._mpisppy_probability - self.mpicomm.Allreduce([locals, MPI.DOUBLE], - [globals, MPI.DOUBLE], - op=MPI.SUM) + self.mpicomm.Allreduce([locals, MPI.DOUBLE], [globals, MPI.DOUBLE], op=MPI.SUM) return float(globals[0]) - def avg_min_max(self, compstr): - """ Can be used to track convergence progress. + """Can be used to track convergence progress. Args: compstr (str): @@ -501,20 +526,18 @@ def avg_min_max(self, compstr): Not user-friendly. If you give a bad compstr, it will just crash. """ firsttime = True - localavg = np.zeros(1, dtype='d') - localmin = np.zeros(1, dtype='d') - localmax = np.zeros(1, dtype='d') - globalavg = np.zeros(1, dtype='d') - globalmin = np.zeros(1, dtype='d') - globalmax = np.zeros(1, dtype='d') + localavg = np.zeros(1, dtype="d") + localmin = np.zeros(1, dtype="d") + localmax = np.zeros(1, dtype="d") + globalavg = np.zeros(1, dtype="d") + globalmin = np.zeros(1, dtype="d") + globalmax = np.zeros(1, dtype="d") v_cuid = pyo.ComponentUID(compstr) - for k,s in self.local_scenarios.items(): - + for k, s in self.local_scenarios.items(): compv = pyo.value(v_cuid.find_component_on(s)) - ###compv = pyo.value(getattr(s, compstr)) localavg[0] += s._mpisppy_probability * compv if compv < localmin[0] or firsttime: @@ -523,51 +546,50 @@ def avg_min_max(self, compstr): localmax[0] = compv firsttime = False - self.comms["ROOT"].Allreduce([localavg, MPI.DOUBLE], - [globalavg, MPI.DOUBLE], - op=MPI.SUM) - self.comms["ROOT"].Allreduce([localmin, MPI.DOUBLE], - [globalmin, MPI.DOUBLE], - op=MPI.MIN) - self.comms["ROOT"].Allreduce([localmax, MPI.DOUBLE], - [globalmax, MPI.DOUBLE], - op=MPI.MAX) - return (float(globalavg[0]), - float(globalmin[0]), - float(globalmax[0])) - + self.comms["ROOT"].Allreduce( + [localavg, MPI.DOUBLE], [globalavg, MPI.DOUBLE], op=MPI.SUM + ) + self.comms["ROOT"].Allreduce( + [localmin, MPI.DOUBLE], [globalmin, MPI.DOUBLE], op=MPI.MIN + ) + self.comms["ROOT"].Allreduce( + [localmax, MPI.DOUBLE], [globalmax, MPI.DOUBLE], op=MPI.MAX + ) + return (float(globalavg[0]), float(globalmin[0]), float(globalmax[0])) def _put_nonant_cache(self, cache): - """ Put the value in the cache for nonants *for all local scenarios* + """Put the value in the cache for nonants *for all local scenarios* Args: cache (np vector) to receive the nonant's for all local scenarios """ - ci = 0 # Cache index + ci = 0 # Cache index for sname, model in self.local_scenarios.items(): if model._mpisppy_data.nonant_cache is None: - raise RuntimeError(f"Rank {self.global_rank} Scenario {sname}" - " nonant_cache is None" - " (call _save_nonants first?)") - for i,_ in enumerate(model._mpisppy_data.nonant_indices): - assert(ci < len(cache)) + raise RuntimeError( + f"Rank {self.global_rank} Scenario {sname}" + " nonant_cache is None" + " (call _save_nonants first?)" + ) + for i, _ in enumerate(model._mpisppy_data.nonant_indices): + assert ci < len(cache) model._mpisppy_data.nonant_cache[i] = cache[ci] ci += 1 - def _restore_original_fixedness(self): # We are going to hack a little to get the original fixedness, but current values # (We are assuming that algorithms are not fixing anticipative vars; but if they # do, they had better put their fixedness back to its correct state.) self._save_nonants() - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): for ci, _ in enumerate(s._mpisppy_data.nonant_indices): - s._mpisppy_data.fixedness_cache[ci] = s._mpisppy_data.original_fixedness[ci] + s._mpisppy_data.fixedness_cache[ci] = ( + s._mpisppy_data.original_fixedness[ci] + ) self._restore_nonants() - def _fix_nonants(self, cache): - """ Fix the Vars subject to non-anticipativity at given values. + """Fix the Vars subject to non-anticipativity at given values. Loop over the scenarios to restore, but loop over subproblems to alert persistent solvers. Args: @@ -578,23 +600,26 @@ def _fix_nonants(self, cache): NOTE: You probably want to call _save_nonants right before calling this """ - for k,s in self.local_scenarios.items(): - + for k, s in self.local_scenarios.items(): persistent_solver = None - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name if ndn not in cache: - raise RuntimeError("Could not find {} in {}"\ - .format(ndn, cache)) + raise RuntimeError("Could not find {} in {}".format(ndn, cache)) if cache[ndn] is None: - raise RuntimeError("Empty cache for scen={}, node={}".format(k, ndn)) + raise RuntimeError( + "Empty cache for scen={}, node={}".format(k, ndn) + ) if len(cache[ndn]) != nlens[ndn]: - raise RuntimeError("Needed {} nonant Vars for {}, got {}"\ - .format(nlens[ndn], ndn, len(cache[ndn]))) + raise RuntimeError( + "Needed {} nonant Vars for {}, got {}".format( + nlens[ndn], ndn, len(cache[ndn]) + ) + ) for i in range(nlens[ndn]): this_vardata = node.nonant_vardata_list[i] this_vardata._value = cache[ndn][i] @@ -602,8 +627,8 @@ def _fix_nonants(self, cache): if persistent_solver is not None: persistent_solver.update_var(this_vardata) - def _fix_root_nonants(self,root_cache): - """ Fix the 1st stage Vars subject to non-anticipativity at given values. + def _fix_root_nonants(self, root_cache): + """Fix the 1st stage Vars subject to non-anticipativity at given values. Loop over the scenarios to restore, but loop over subproblems to alert persistent solvers. Useful for multistage to find feasible solutions with a given scenario. @@ -615,40 +640,39 @@ def _fix_root_nonants(self,root_cache): NOTE: You probably want to call _save_nonants right before calling this """ - for k,s in self.local_scenarios.items(): - + for k, s in self.local_scenarios.items(): persistent_solver = None - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin nlens = s._mpisppy_data.nlens rootnode = None for node in s._mpisppy_node_list: - if node.name == 'ROOT': + if node.name == "ROOT": rootnode = node break if rootnode is None: - raise RuntimeError("Could not find a 'ROOT' node in scen {}"\ - .format(k)) + raise RuntimeError("Could not find a 'ROOT' node in scen {}".format(k)) if root_cache is None: raise RuntimeError("Empty root cache for scen={}".format(k)) - if len(root_cache) != nlens['ROOT']: - raise RuntimeError("Needed {} nonant Vars for 'ROOT', got {}"\ - .format(nlens['ROOT'], len(root_cache))) + if len(root_cache) != nlens["ROOT"]: + raise RuntimeError( + "Needed {} nonant Vars for 'ROOT', got {}".format( + nlens["ROOT"], len(root_cache) + ) + ) - for i in range(nlens['ROOT']): + for i in range(nlens["ROOT"]): this_vardata = node.nonant_vardata_list[i] this_vardata._value = root_cache[i] this_vardata.fix() if persistent_solver is not None: persistent_solver.update_var(this_vardata) - - def _restore_nonants(self): - """ Restore nonanticipative variables to their original values. + """Restore nonanticipative variables to their original values. This function works in conjunction with _save_nonants. @@ -660,10 +684,9 @@ def _restore_nonants(self): and restoration. THIS WILL NOT WORK ON BUNDLES (Feb 2019) but hopefully does not need to. """ - for k,s in self.local_scenarios.items(): - + for k, s in self.local_scenarios.items(): persistent_solver = None - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin for ci, vardata in enumerate(s._mpisppy_data.nonant_indices.values()): @@ -673,9 +696,8 @@ def _restore_nonants(self): if persistent_solver is not None: persistent_solver.update_var(vardata) - def _save_nonants(self): - """ Save the values and fixedness status of the Vars that are + """Save the values and fixedness status of the Vars that are subject to non-anticipativity. Note: @@ -687,43 +709,41 @@ def _save_nonants(self): Note: The value cache is np because it might be transmitted """ - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): nlens = s._mpisppy_data.nlens - if not hasattr(s._mpisppy_data,"nonant_cache"): + if not hasattr(s._mpisppy_data, "nonant_cache"): clen = sum(nlens[ndn] for ndn in nlens) - s._mpisppy_data.nonant_cache = np.zeros(clen, dtype='d') + s._mpisppy_data.nonant_cache = np.zeros(clen, dtype="d") s._mpisppy_data.fixedness_cache = [None for _ in range(clen)] for ci, xvar in enumerate(s._mpisppy_data.nonant_indices.values()): - s._mpisppy_data.nonant_cache[ci] = xvar._value - s._mpisppy_data.fixedness_cache[ci] = xvar.is_fixed() - + s._mpisppy_data.nonant_cache[ci] = xvar._value + s._mpisppy_data.fixedness_cache[ci] = xvar.is_fixed() def _save_original_nonants(self): - """ Save the current value of the nonanticipative variables. + """Save the current value of the nonanticipative variables. Values are saved in the `_PySP_original_nonants` attribute. Whether the variable was fixed is stored in `_PySP_original_fixedness`. """ - for k,s in self.local_scenarios.items(): - if hasattr(s,"_PySP_original_fixedness"): - print ("ERROR: Attempt to replace original nonants") + for k, s in self.local_scenarios.items(): + if hasattr(s, "_PySP_original_fixedness"): + print("ERROR: Attempt to replace original nonants") raise - if not hasattr(s._mpisppy_data,"nonant_cache"): + if not hasattr(s._mpisppy_data, "nonant_cache"): # uses nonant cache to signal other things have not # been created # TODO: combine cache creation (or something else) clen = len(s._mpisppy_data.nonant_indices) s._mpisppy_data.original_fixedness = [None] * clen - s._mpisppy_data.original_nonants = np.zeros(clen, dtype='d') + s._mpisppy_data.original_nonants = np.zeros(clen, dtype="d") for ci, xvar in enumerate(s._mpisppy_data.nonant_indices.values()): - s._mpisppy_data.original_fixedness[ci] = xvar.is_fixed() - s._mpisppy_data.original_nonants[ci] = xvar._value - + s._mpisppy_data.original_fixedness[ci] = xvar.is_fixed() + s._mpisppy_data.original_nonants[ci] = xvar._value def _restore_original_nonants(self): - """ Restore nonanticipative variables to their original values. + """Restore nonanticipative variables to their original values. This function works in conjunction with _save_original_nonants. @@ -735,11 +755,10 @@ def _restore_original_nonants(self): and restoration. THIS WILL NOT WORK ON BUNDLES (Feb 2019) but hopefully does not need to. """ - for k,s in self.local_scenarios.items(): - + for k, s in self.local_scenarios.items(): persistent_solver = None if not self.bundling: - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin else: print("restore_original_nonants called for a bundle") @@ -751,17 +770,17 @@ def _restore_original_nonants(self): if persistent_solver is not None: persistent_solver.update_var(vardata) - def _save_active_objectives(self): - """ Save the active objectives for use in PH, bundles, and calculation """ + """Save the active objectives for use in PH, bundles, and calculation""" self.saved_objectives = dict() for sname, scenario_instance in self.local_scenarios.items(): - self.saved_objectives[sname] = sputils.find_active_objective(scenario_instance) - + self.saved_objectives[sname] = sputils.find_active_objective( + scenario_instance + ) def FormEF(self, scen_dict, EF_name=None): - """ Make the EF for a list of scenarios. + """Make the EF for a list of scenarios. This function is mainly to build bundles. To build (and solve) the EF of the entire problem, use the EF class instead. @@ -805,17 +824,19 @@ def FormEF(self, scen_dict, EF_name=None): if len(scen_dict) == 1: sname, scenario_instance = list(scen_dict.items())[0] if EF_name is not None: - print ("WARNING: EF_name="+EF_name+" not used; singleton="+sname) - print ("MAJOR WARNING: a bundle of size one encountered; if you try to compute bounds it might crash (Feb 2019)") + print("WARNING: EF_name=" + EF_name + " not used; singleton=" + sname) + print( + "MAJOR WARNING: a bundle of size one encountered; if you try to compute bounds it might crash (Feb 2019)" + ) return scenario_instance - EF_instance = sputils._create_EF_from_scen_dict(scen_dict, EF_name=EF_name, - nonant_for_fixed_vars=False) + EF_instance = sputils._create_EF_from_scen_dict( + scen_dict, EF_name=EF_name, nonant_for_fixed_vars=False + ) return EF_instance - def _subproblem_creation(self, verbose=False): - """ Create local subproblems (not local scenarios). + """Create local subproblems (not local scenarios). If bundles are specified, this function creates the bundles. Otherwise, this function simply copies pointers to the already-created @@ -832,39 +853,39 @@ def _subproblem_creation(self, verbose=False): sdict = dict() bname = "rank" + str(self.cylinder_rank) + "bundle" + str(bun) for sname in self.names_in_bundles[rank_local][bun]: - if (verbose and self.cylinder_rank==0): - print ("bundling "+sname+" into "+bname) + if verbose and self.cylinder_rank == 0: + print("bundling " + sname + " into " + bname) scen = self.local_scenarios[sname] scen._mpisppy_data.bundlename = bname sdict[sname] = scen self.local_subproblems[bname] = self.FormEF(sdict, bname) - self.local_subproblems[bname].scen_list = \ - self.names_in_bundles[rank_local][bun] - self.local_subproblems[bname]._mpisppy_probability = \ - sum(s._mpisppy_probability for s in sdict.values()) + self.local_subproblems[bname].scen_list = self.names_in_bundles[ + rank_local + ][bun] + self.local_subproblems[bname]._mpisppy_probability = sum( + s._mpisppy_probability for s in sdict.values() + ) else: for sname, s in self.local_scenarios.items(): self.local_subproblems[sname] = s self.local_subproblems[sname].scen_list = [sname] - def _create_solvers(self, presolve=True): - if self._presolver is not None and presolve: self._presolver.presolve() dtiming = ("display_timing" in self.options) and self.options["display_timing"] - local_sit = [] # Local set instance time for time tracking - for sname, s in self.local_subproblems.items(): # solver creation + local_sit = [] # Local set instance time for time tracking + for sname, s in self.local_subproblems.items(): # solver creation s._solver_plugin = SolverFactory(self.options["solver_name"]) - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): if dtiming: set_instance_start_time = time.time() set_instance_retry(s, s._solver_plugin, sname) if dtiming: - local_sit.append( time.time() - set_instance_start_time ) + local_sit.append(time.time() - set_instance_start_time) else: if dtiming: local_sit.append(0.0) @@ -877,16 +898,16 @@ def _create_solvers(self, presolve=True): scen = self.local_scenarios[scen_name] scen._solver_plugin = s._solver_plugin if dtiming: - all_set_instance_times = self.mpicomm.gather(local_sit, - root=0) + all_set_instance_times = self.mpicomm.gather(local_sit, root=0) if self.cylinder_rank == 0: asit = [sit for l_sit in all_set_instance_times for sit in l_sit] if len(asit) == 0: print("Set instance times not available.") else: - print("Set instance times: \tmin=%4.2f mean=%4.2f max=%4.2f" % - (np.min(asit), np.mean(asit), np.max(asit))) - + print( + "Set instance times: \tmin=%4.2f mean=%4.2f max=%4.2f" + % (np.min(asit), np.mean(asit), np.max(asit)) + ) def subproblem_scenario_generator(self): """ @@ -905,8 +926,8 @@ def subproblem_scenario_generator(self): # logic can be encapsulated in a sputils.py function. MAX_ACQUIRE_LICENSE_RETRY_ATTEMPTS = 5 -def set_instance_retry(subproblem, solver_plugin, subproblem_name): +def set_instance_retry(subproblem, solver_plugin, subproblem_name): sname = subproblem_name # this loop is required to address the sitution where license # token servers become temporarily over-subscribed / non-responsive @@ -917,17 +938,29 @@ def set_instance_retry(subproblem, solver_plugin, subproblem_name): try: solver_plugin.set_instance(subproblem) if num_retry_attempts > 0: - print("Acquired solver license (call to set_instance() for scenario=%s) after %d retry attempts" % (sname, num_retry_attempts)) + print( + "Acquired solver license (call to set_instance() for scenario=%s) after %d retry attempts" + % (sname, num_retry_attempts) + ) break # pyomo presently has no general way to trap a license acquisition # error - so we're stuck with trapping on "any" exception. not ideal. except Exception: if num_retry_attempts == 0: - print("Failed to acquire solver license (call to set_instance() for scenario=%s) after first attempt" % (sname)) + print( + "Failed to acquire solver license (call to set_instance() for scenario=%s) after first attempt" + % (sname) + ) else: - print("Failed to acquire solver license (call to set_instance() for scenario=%s) after %d retry attempts" % (sname, num_retry_attempts)) + print( + "Failed to acquire solver license (call to set_instance() for scenario=%s) after %d retry attempts" + % (sname, num_retry_attempts) + ) if num_retry_attempts == MAX_ACQUIRE_LICENSE_RETRY_ATTEMPTS: - raise RuntimeError("Failed to acquire solver license - call to set_instance() for scenario=%s failed after %d retry attempts" % (sname, num_retry_attempts)) + raise RuntimeError( + "Failed to acquire solver license - call to set_instance() for scenario=%s failed after %d retry attempts" + % (sname, num_retry_attempts) + ) else: sleep_time = random.random() print(f"Sleeping for {sleep_time:.2f} seconds before re-attempting") diff --git a/mpisppy/tests/examples/aircond.py b/mpisppy/tests/examples/aircond.py index 11c36eab0..94e26b594 100644 --- a/mpisppy/tests/examples/aircond.py +++ b/mpisppy/tests/examples/aircond.py @@ -6,7 +6,7 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#ReferenceModel for full set of scenarios for AirCond; June 2021 +# ReferenceModel for full set of scenarios for AirCond; June 2021 # Dec 2021; numerous enhancements by DLW; do not change defaults # Feb 2022: Changed all inventory cost coefficients to be positive numbers # except Last Inventory cost, which should be negative. @@ -23,26 +23,27 @@ # Use this random stream: aircondstream = np.random.RandomState() # Do not edit these defaults! -parms = {"mu_dev": (float, 0.), - "sigma_dev": (float, 40.), - "start_ups": (bool, False), - "StartUpCost": (float, 300.), - "start_seed": (int, 1134), - "min_d": (float, 0.), - "max_d": (float, 400.), - "starting_d": (float, 200.), - "BeginInventory": (float, 200.), - "InventoryCost": (float, 0.5), - "LastInventoryCost": (float, -0.8), - "Capacity": (float, 200.), - "RegularProdCost": (float, 1.), - "OvertimeProdCost": (float, 3.), - "NegInventoryCost": (float, 5.), - "QuadShortCoeff": (float, 0) +parms = { + "mu_dev": (float, 0.0), + "sigma_dev": (float, 40.0), + "start_ups": (bool, False), + "StartUpCost": (float, 300.0), + "start_seed": (int, 1134), + "min_d": (float, 0.0), + "max_d": (float, 400.0), + "starting_d": (float, 200.0), + "BeginInventory": (float, 200.0), + "InventoryCost": (float, 0.5), + "LastInventoryCost": (float, -0.8), + "Capacity": (float, 200.0), + "RegularProdCost": (float, 1.0), + "OvertimeProdCost": (float, 3.0), + "NegInventoryCost": (float, 5.0), + "QuadShortCoeff": (float, 0), } -def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs): +def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs): if "start_seed" not in kwargs: raise RuntimeError(f"start_seed not in kwargs={kwargs}") start_seed = kwargs["start_seed"] @@ -51,7 +52,7 @@ def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs mu_dev = kwargs.get("mu_dev", None) sigma_dev = kwargs.get("sigma_dev", None) - scennum = sputils.extract_num(sname) + scennum = sputils.extract_num(sname) # Find the right path and the associated seeds (one for each node) using scennum prod = np.prod(sample_branching_factors) assert prod is not None, "sample_branching_factors is None in _demands_creator" @@ -60,34 +61,48 @@ def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs demands = [d] nodenames = [root_name] for bf in sample_branching_factors: - assert prod%bf == 0 - prod = prod//bf - nodenames.append(str(s//prod)) - s = s%prod - + assert prod % bf == 0 + prod = prod // bf + nodenames.append(str(s // prod)) + s = s % prod + stagelist = [int(x) for x in nodenames[1:]] - for t in range(1,len(nodenames)): - aircondstream.seed(start_seed+sputils.node_idx(stagelist[:t],sample_branching_factors)) - d = min(max_d,max(min_d,d+aircondstream.normal(mu_dev,sigma_dev))) + for t in range(1, len(nodenames)): + aircondstream.seed( + start_seed + sputils.node_idx(stagelist[:t], sample_branching_factors) + ) + d = min(max_d, max(min_d, d + aircondstream.normal(mu_dev, sigma_dev))) demands.append(d) - - return demands,nodenames -def general_rho_setter(scenario_instance, rho_scale_factor=1.0): + return demands, nodenames + +def general_rho_setter(scenario_instance, rho_scale_factor=1.0): computed_rhos = [] for t in scenario_instance.T[:-1]: - computed_rhos.append((id(scenario_instance.stage_models[t].RegularProd), - pyo.value(scenario_instance.stage_models[t].RegularProdCost) * rho_scale_factor)) - computed_rhos.append((id(scenario_instance.stage_models[t].OvertimeProd), - pyo.value(scenario_instance.stage_models[t].OvertimeProdCost) * rho_scale_factor)) + computed_rhos.append( + ( + id(scenario_instance.stage_models[t].RegularProd), + pyo.value(scenario_instance.stage_models[t].RegularProdCost) + * rho_scale_factor, + ) + ) + computed_rhos.append( + ( + id(scenario_instance.stage_models[t].OvertimeProd), + pyo.value(scenario_instance.stage_models[t].OvertimeProdCost) + * rho_scale_factor, + ) + ) return computed_rhos + def dual_rho_setter(scenario_instance): return general_rho_setter(scenario_instance, rho_scale_factor=0.0001) + def primal_rho_setter(scenario_instance): return general_rho_setter(scenario_instance, rho_scale_factor=0.01) @@ -97,26 +112,26 @@ def _StageModel_creator(time, demand, last_stage, **kwargs): def _kw(pname): return kwargs.get(pname, parms[pname][1]) - + model = pyo.ConcreteModel() model.T = [time] - #Parameters - #Demand + # Parameters + # Demand model.Demand = demand - #Inventory Cost: model.InventoryCost = -0.8 if last_stage else 0.5 + # Inventory Cost: model.InventoryCost = -0.8 if last_stage else 0.5 model.InventoryCost = _kw("InventoryCost") model.LastInventoryCost = _kw("LastInventoryCost") - #Regular Capacity + # Regular Capacity model.Capacity = _kw("Capacity") - #Regular Production Cost + # Regular Production Cost model.RegularProdCost = _kw("RegularProdCost") - #Overtime Production Cost + # Overtime Production Cost model.OvertimeProdCost = _kw("OvertimeProdCost") - model.max_T = 25 # this is for the start-up cost constraints, + model.max_T = 25 # this is for the start-up cost constraints, model.bigM = model.Capacity * model.max_T - + model.start_ups = _kw("start_ups") if model.start_ups: @@ -126,198 +141,234 @@ def _kw(pname): # Negative Inventory Cost model.NegInventoryCost = _kw("NegInventoryCost") model.QuadShortCoeff = _kw("QuadShortCoeff") - - #Variables - model.RegularProd = pyo.Var(domain=pyo.NonNegativeReals, - bounds = (0, model.bigM)) - - model.OvertimeProd = pyo.Var(domain=pyo.NonNegativeReals, - bounds = (0, model.bigM)) - model.Inventory = pyo.Var(domain=pyo.Reals, - bounds = (-model.bigM, model.bigM)) + # Variables + model.RegularProd = pyo.Var(domain=pyo.NonNegativeReals, bounds=(0, model.bigM)) + + model.OvertimeProd = pyo.Var(domain=pyo.NonNegativeReals, bounds=(0, model.bigM)) + + model.Inventory = pyo.Var(domain=pyo.Reals, bounds=(-model.bigM, model.bigM)) if model.start_ups: # start-up cost variable model.StartUp = pyo.Var(within=pyo.Binary) - #Constraints (and some auxilliary variables) + # Constraints (and some auxilliary variables) def CapacityRule(m): - return m.RegularProd<=m.Capacity + return m.RegularProd <= m.Capacity + model.MaximumCapacity = pyo.Constraint(rule=CapacityRule) if model.start_ups: model.RegStartUpConstraint = pyo.Constraint( - expr=model.bigM * model.StartUp >= model.RegularProd + model.OvertimeProd) + expr=model.bigM * model.StartUp >= model.RegularProd + model.OvertimeProd + ) # grab positive and negative parts of inventory assert model.InventoryCost > 0, model.InventoryCost assert model.NegInventoryCost > 0, model.NegInventoryCost - model.negInventory = pyo.Var(domain=pyo.NonNegativeReals, initialize=0.0, bounds = (0, model.bigM)) - model.posInventory = pyo.Var(domain=pyo.NonNegativeReals, initialize=0.0, bounds = (0, model.bigM)) - model.doleInventory = pyo.Constraint(expr=model.Inventory == model.posInventory - model.negInventory) + model.negInventory = pyo.Var( + domain=pyo.NonNegativeReals, initialize=0.0, bounds=(0, model.bigM) + ) + model.posInventory = pyo.Var( + domain=pyo.NonNegativeReals, initialize=0.0, bounds=(0, model.bigM) + ) + model.doleInventory = pyo.Constraint( + expr=model.Inventory == model.posInventory - model.negInventory + ) # create the inventory cost expression if not last_stage: - model.LinInvenCostExpr = pyo.Expression(expr = model.InventoryCost*model.posInventory\ - + model.NegInventoryCost*model.negInventory) - else: - assert model.LastInventoryCost < 0, f"last stage inven cost: {model.LastInventoryCost}" - model.LinInvenCostExpr = pyo.Expression(expr = model.LastInventoryCost*model.posInventory\ - + model.NegInventoryCost*model.negInventory) + model.LinInvenCostExpr = pyo.Expression( + expr=model.InventoryCost * model.posInventory + + model.NegInventoryCost * model.negInventory + ) + else: + assert ( + model.LastInventoryCost < 0 + ), f"last stage inven cost: {model.LastInventoryCost}" + model.LinInvenCostExpr = pyo.Expression( + expr=model.LastInventoryCost * model.posInventory + + model.NegInventoryCost * model.negInventory + ) if model.QuadShortCoeff > 0 and not last_stage: - model.InvenCostExpr = pyo.Expression(expr = model.LinInvenCostExpr +\ - model.QuadShortCoeff * model.negInventory * model.negInventory) + model.InvenCostExpr = pyo.Expression( + expr=model.LinInvenCostExpr + + model.QuadShortCoeff * model.negInventory * model.negInventory + ) else: assert model.QuadShortCoeff >= 0, model.QuadShortCoeff - model.InvenCostExpr = pyo.Expression(expr = model.LinInvenCostExpr) - - #Objective + model.InvenCostExpr = pyo.Expression(expr=model.LinInvenCostExpr) + + # Objective def stage_objective(m): - expr = m.RegularProdCost*m.RegularProd +\ - m.OvertimeProdCost*m.OvertimeProd +\ - m.InvenCostExpr + expr = ( + m.RegularProdCost * m.RegularProd + + m.OvertimeProdCost * m.OvertimeProd + + m.InvenCostExpr + ) if m.start_ups: - expr += m.StartUpCost * m.StartUp + expr += m.StartUpCost * m.StartUp return expr - model.StageObjective = pyo.Objective(rule=stage_objective,sense=pyo.minimize) - + model.StageObjective = pyo.Objective(rule=stage_objective, sense=pyo.minimize) + return model -#Assume that demands has been drawn before + +# Assume that demands has been drawn before def aircond_model_creator(demands, **kwargs): # create a single aircond model for the given demands # branching_factors=None, num_scens=None, mu_dev=0, sigma_dev=40, start_seed=0, start_ups=None): # typing aids... start_ups = kwargs.get("start_ups", parms["start_ups"][1]) - + model = pyo.ConcreteModel() num_stages = len(demands) - model.T = range(1,num_stages+1) #Stages 1,2,...,T + model.T = range(1, num_stages + 1) # Stages 1,2,...,T model.max_T = 25 if model.T[-1] > model.max_T: - raise RuntimeError( - f'The number of stages exceeds {model.max_T}') - + raise RuntimeError(f"The number of stages exceeds {model.max_T}") + model.start_ups = start_ups - #Parameters + # Parameters model.BeginInventory = kwargs.get("BeginInventory", parms["BeginInventory"][1]) - - #Creating stage models + + # Creating stage models model.stage_models = {} for t in model.T: - last_stage = (t==num_stages) - model.stage_models[t] = _StageModel_creator(t, demands[t-1], - last_stage=last_stage, **kwargs) + last_stage = t == num_stages + model.stage_models[t] = _StageModel_creator( + t, demands[t - 1], last_stage=last_stage, **kwargs + ) - #Constraints - - def material_balance_rule(m,t): + # Constraints + + def material_balance_rule(m, t): if t == 1: - return(m.BeginInventory+m.stage_models[t].RegularProd +\ - m.stage_models[t].OvertimeProd-m.stage_models[t].Inventory ==\ - m.stage_models[t].Demand) + return ( + m.BeginInventory + + m.stage_models[t].RegularProd + + m.stage_models[t].OvertimeProd + - m.stage_models[t].Inventory + == m.stage_models[t].Demand + ) else: - return(m.stage_models[t-1].Inventory +\ - m.stage_models[t].RegularProd+m.stage_models[t].OvertimeProd -\ - m.stage_models[t].Inventory ==\ - m.stage_models[t].Demand) + return ( + m.stage_models[t - 1].Inventory + + m.stage_models[t].RegularProd + + m.stage_models[t].OvertimeProd + - m.stage_models[t].Inventory + == m.stage_models[t].Demand + ) + model.MaterialBalance = pyo.Constraint(model.T, rule=material_balance_rule) - #Objectives - - #Disactivating stage objectives + # Objectives + + # Disactivating stage objectives for t in model.T: model.stage_models[t].StageObjective.deactivate() - + def stage_cost(stage_m): - expr = stage_m.RegularProdCost*stage_m.RegularProd +\ - stage_m.OvertimeProdCost*stage_m.OvertimeProd +\ - stage_m.InvenCostExpr + expr = ( + stage_m.RegularProdCost * stage_m.RegularProd + + stage_m.OvertimeProdCost * stage_m.OvertimeProd + + stage_m.InvenCostExpr + ) if stage_m.start_ups: expr += stage_m.StartUpCost * stage_m.StartUp return expr - + def total_cost_rule(m): - return(sum(stage_cost(m.stage_models[t]) for t in m.T)) - model.TotalCostObjective = pyo.Objective(rule = total_cost_rule, - sense = pyo.minimize) - - #Last step + return sum(stage_cost(m.stage_models[t]) for t in m.T) + + model.TotalCostObjective = pyo.Objective(rule=total_cost_rule, sense=pyo.minimize) + + # Last step for t in model.T: - setattr(model, "stage_model_"+str(t), model.stage_models[t]) - + setattr(model, "stage_model_" + str(t), model.stage_models[t]) + return model -def MakeNodesforScen(model,nodenames,branching_factors,starting_stage=1): - #Create all nonleaf nodes used by the scenario - #Compatible with sample scenario creation + +def MakeNodesforScen(model, nodenames, branching_factors, starting_stage=1): + # Create all nonleaf nodes used by the scenario + # Compatible with sample scenario creation TreeNodes = [] for stage in model.T: + nonant_list = [ + model.stage_models[stage].RegularProd, + model.stage_models[stage].OvertimeProd, + ] - nonant_list=[model.stage_models[stage].RegularProd, - model.stage_models[stage].OvertimeProd] - nonant_ef_suppl_list = [model.stage_models[stage].Inventory] if model.start_ups: nonant_ef_suppl_list.append(model.stage_models[stage].StartUp) - if stage ==1: - ndn="ROOT" - TreeNodes.append(scenario_tree.ScenarioNode(name=ndn, - cond_prob=1.0, - stage=stage, - cost_expression=model.stage_models[stage].StageObjective, - nonant_list=nonant_list, - scen_model=model, - nonant_ef_suppl_list = nonant_ef_suppl_list - ) - ) - elif stage <=starting_stage: + if stage == 1: + ndn = "ROOT" + TreeNodes.append( + scenario_tree.ScenarioNode( + name=ndn, + cond_prob=1.0, + stage=stage, + cost_expression=model.stage_models[stage].StageObjective, + nonant_list=nonant_list, + scen_model=model, + nonant_ef_suppl_list=nonant_ef_suppl_list, + ) + ) + elif stage <= starting_stage: parent_ndn = ndn - ndn = parent_ndn+"_0" #Only one node per stage before starting stage - TreeNodes.append(scenario_tree.ScenarioNode(name=ndn, - cond_prob=1.0, - stage=stage, - cost_expression=model.stage_models[stage].StageObjective, - nonant_list=nonant_list, - scen_model=model, - nonant_ef_suppl_list = nonant_ef_suppl_list, - parent_name = parent_ndn - ) - ) - elif stage < max(model.T): #We don't add the leaf node + ndn = parent_ndn + "_0" # Only one node per stage before starting stage + TreeNodes.append( + scenario_tree.ScenarioNode( + name=ndn, + cond_prob=1.0, + stage=stage, + cost_expression=model.stage_models[stage].StageObjective, + nonant_list=nonant_list, + scen_model=model, + nonant_ef_suppl_list=nonant_ef_suppl_list, + parent_name=parent_ndn, + ) + ) + elif stage < max(model.T): # We don't add the leaf node parent_ndn = ndn - ndn = parent_ndn+"_"+nodenames[stage-starting_stage] - TreeNodes.append(scenario_tree.ScenarioNode(name=ndn, - cond_prob=1.0/branching_factors[stage-starting_stage-1], - stage=stage, - cost_expression=model.stage_models[stage].StageObjective, - nonant_list=nonant_list, - scen_model=model, - nonant_ef_suppl_list = nonant_ef_suppl_list, - parent_name = parent_ndn - ) - ) - return(TreeNodes) - - -def scenario_creator(sname, **kwargs): + ndn = parent_ndn + "_" + nodenames[stage - starting_stage] + TreeNodes.append( + scenario_tree.ScenarioNode( + name=ndn, + cond_prob=1.0 / branching_factors[stage - starting_stage - 1], + stage=stage, + cost_expression=model.stage_models[stage].StageObjective, + nonant_list=nonant_list, + scen_model=model, + nonant_ef_suppl_list=nonant_ef_suppl_list, + parent_name=parent_ndn, + ) + ) + return TreeNodes + +def scenario_creator(sname, **kwargs): if "branching_factors" not in kwargs: - raise RuntimeError("scenario_creator for aircond needs branching_factors in kwargs") + raise RuntimeError( + "scenario_creator for aircond needs branching_factors in kwargs" + ) branching_factors = kwargs["branching_factors"] - demands,nodenames = _demands_creator(sname, branching_factors, root_name="ROOT", **kwargs) - + demands, nodenames = _demands_creator( + sname, branching_factors, root_name="ROOT", **kwargs + ) + model = aircond_model_creator(demands, **kwargs) - - - #Constructing the nodes used by the scenario + + # Constructing the nodes used by the scenario model._mpisppy_node_list = MakeNodesforScen(model, nodenames, branching_factors) model._mpisppy_probability = 1 / np.prod(branching_factors) """ @@ -330,12 +381,18 @@ def scenario_creator(sname, **kwargs): model.pprint(fileh) quit() """ - return(model) + return model -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - """ Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + """Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. Args: sname (string): scenario name to be created stage (int >=1 ): for stages > 1, fix data based on sname in earlier stages @@ -347,47 +404,57 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, scenario (Pyomo concrete model): A scenario for sname with data in stages < stage determined by the arguments """ - + # Finding demands from stage 1 to t starting_d = scenario_creator_kwargs.get("starting_d", parms["starting_d"][1]) if given_scenario is None: if stage == 1: past_demands = [starting_d] else: - raise RuntimeError("sample_tree_scen_creator for aircond needs a 'given_scenario' argument if the starting stage is greater than 1") + raise RuntimeError( + "sample_tree_scen_creator for aircond needs a 'given_scenario' argument if the starting stage is greater than 1" + ) else: - past_demands = [given_scenario.stage_models[t].Demand for t in given_scenario.T if t<=stage] + past_demands = [ + given_scenario.stage_models[t].Demand + for t in given_scenario.T + if t <= stage + ] kwargs = scenario_creator_kwargs.copy() kwargs["start_seed"] = seed - - #Finding demands for stages after t (note the dynamic seed) - future_demands,nodenames = _demands_creator(sname, sample_branching_factors, - root_name='ROOT'+'_0'*(stage-1), **kwargs) - - demands = past_demands+future_demands[1:] #The demand at the starting stage is in both past and future demands - + + # Finding demands for stages after t (note the dynamic seed) + future_demands, nodenames = _demands_creator( + sname, sample_branching_factors, root_name="ROOT" + "_0" * (stage - 1), **kwargs + ) + + demands = ( + past_demands + future_demands[1:] + ) # The demand at the starting stage is in both past and future demands + model = aircond_model_creator(demands, **scenario_creator_kwargs) - - model._mpisppy_probability = 1/np.prod(sample_branching_factors) - - #Constructing the nodes used by the scenario - model._mpisppy_node_list = MakeNodesforScen(model, nodenames, sample_branching_factors, - starting_stage=stage) - + + model._mpisppy_probability = 1 / np.prod(sample_branching_factors) + + # Constructing the nodes used by the scenario + model._mpisppy_node_list = MakeNodesforScen( + model, nodenames, sample_branching_factors, starting_stage=stage + ) + return model - -#========= -def scenario_names_creator(num_scens,start=None): + +# ========= +def scenario_names_creator(num_scens, start=None): # (only for Amalgamator): return the full list of num_scens scenario names # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"scen{i}" for i in range(start,start+num_scens)] - + if start is None: + start = 0 + return [f"scen{i}" for i in range(start, start + num_scens)] + -#========= +# ========= def inparser_adder(cfg): # Add command options unique to aircond # Do not change the defaults. @@ -396,34 +463,61 @@ def _doone(name, helptext, argname=None): # The name should be the name in parms # helptext should not include the default h = f"{helptext} (default {parms[name][1]})" - cfg.add_to_config(name, - description=h, - domain=parms[name][0], - default=parms[name][1], - argparse=True) - - cfg.add_to_config("branching_factors", description="branching factors", domain=pyofig.ListOf(int), default=None) - _doone("mu_dev", "average deviation of demand between two periods", argname="mu-dev") - _doone("sigma_dev", "standard deviation of deviation of demand between two periods", argname="sigma-dev") - _doone("start_ups", "Include start-up costs in model, resulting in a MPI (default {d})") - _doone("StartUpCost", helptext="Cost if production in a period is non-zero and start-up is True") + cfg.add_to_config( + name, + description=h, + domain=parms[name][0], + default=parms[name][1], + argparse=True, + ) + + cfg.add_to_config( + "branching_factors", + description="branching factors", + domain=pyofig.ListOf(int), + default=None, + ) + _doone( + "mu_dev", "average deviation of demand between two periods", argname="mu-dev" + ) + _doone( + "sigma_dev", + "standard deviation of deviation of demand between two periods", + argname="sigma-dev", + ) + _doone( + "start_ups", "Include start-up costs in model, resulting in a MPI (default {d})" + ) + _doone( + "StartUpCost", + helptext="Cost if production in a period is non-zero and start-up is True", + ) _doone("start_seed", helptext="random number seed") _doone("min_d", helptext="minimum demand in a period") _doone("max_d", helptext="maximum demand in a period") _doone("starting_d", helptext="period 0 demand") _doone("InventoryCost", helptext="Inventory cost per period per item") _doone("BeginInventory", helptext="Inital Inventory") - _doone("LastInventoryCost", helptext="Inventory `cost` (should be negative) in last period)") + _doone( + "LastInventoryCost", + helptext="Inventory `cost` (should be negative) in last period)", + ) _doone("Capacity", helptext="Per period regular time capacity") _doone("RegularProdCost", helptext="Regular time producion cost") _doone("OvertimeProdCost", helptext="Overtime (or subcontractor) production cost") - _doone("NegInventoryCost", helptext="Linear coefficient for backorders (should be negative; not used in last stage)") - _doone("QuadShortCoeff", helptext="Coefficient for backorders squared (should be nonnegative; not used in last stage)") - + _doone( + "NegInventoryCost", + helptext="Linear coefficient for backorders (should be negative; not used in last stage)", + ) + _doone( + "QuadShortCoeff", + helptext="Coefficient for backorders squared (should be nonnegative; not used in last stage)", + ) -#========= + +# ========= def kw_creator(cfg, optionsin=None): - """ Use the options passed in and/or cfg to + """Use the options passed in and/or cfg to create the key word arguments for the creator fucntion(s) args: cfg (Config object): probably has parsed values @@ -432,10 +526,10 @@ def kw_creator(cfg, optionsin=None): options = optionsin if optionsin is not None else dict() if "kwargs" in options: return options["kwargs"] - + kwargs = dict() - - def _kwarg(option_name, default = None, arg_name=None): + + def _kwarg(option_name, default=None, arg_name=None): # options trumps args retval = options.get(option_name) if retval is not None: @@ -446,7 +540,7 @@ def _kwarg(option_name, default = None, arg_name=None): retval = getattr(cfg, aname) if hasattr(cfg, aname) else None retval = default if retval is None else retval kwargs[option_name] = retval - + # Linked to the scenario_creator and inparser_adder # for confidence intervals, we need to see if the values are in args _kwarg("branching_factors") @@ -454,24 +548,36 @@ def _kwarg(option_name, default = None, arg_name=None): _kwarg(idx, tpl[1]) if kwargs["start_ups"] is None: - raise ValueError(f"kw_creator called, but no value given for start_ups, options={options}") + raise ValueError( + f"kw_creator called, but no value given for start_ups, options={options}" + ) if kwargs["start_seed"] is None: - raise ValueError(f"kw_creator called, but no value given for start_seed, options={options}") + raise ValueError( + f"kw_creator called, but no value given for start_seed, options={options}" + ) return kwargs -#============================ +# ============================ def scenario_denouement(rank, scenario_name, scenario): pass -#============================ -def xhat_generator_aircond(scenario_names, solver_name=None, solver_options=None, - branching_factors=None, mu_dev = 0, sigma_dev = 40, - start_ups=None, start_seed = 0): - ''' + +# ============================ +def xhat_generator_aircond( + scenario_names, + solver_name=None, + solver_options=None, + branching_factors=None, + mu_dev=0, + sigma_dev=40, + start_ups=None, + start_seed=0, +): + """ For sequential sampling. - Takes scenario names as input and provide the best solution for the + Takes scenario names as input and provide the best solution for the approximate problem associated with the scenarios. Parameters ---------- @@ -482,7 +588,7 @@ def xhat_generator_aircond(scenario_names, solver_name=None, solver_options=None solver_options: dict, optional Solving options. The default is None. branching_factors: list, optional - Branching factors of the scenario 3. The default is [3,2,3] + Branching factors of the scenario 3. The default is [3,2,3] (a 4 stage model with 18 different scenarios) mu_dev: float, optional The average deviation of demand between two stages; The default is 0. @@ -497,36 +603,47 @@ def xhat_generator_aircond(scenario_names, solver_name=None, solver_options=None ------- xhat: str A generated xhat, solution to the approximate problem induced by scenario_names. - - NOTE: This tool only works when the file is in mpisppy. In SPInstances, + + NOTE: This tool only works when the file is in mpisppy. In SPInstances, you must change the from_module line. - ''' + """ num_scens = len(scenario_names) - assert branching_factors is not None, "branching factors must be supplied to xhat_generator_aircond" - assert solver_name is not None, "solver_name must be supplied to xhat_generator_aircond" - + assert ( + branching_factors is not None + ), "branching factors must be supplied to xhat_generator_aircond" + assert ( + solver_name is not None + ), "solver_name must be supplied to xhat_generator_aircond" + cfg = config.Config() cfg.quick_assign("EF_mstage", bool, True) cfg.quick_assign("EF_solver_name", str, solver_name) - #cfg.quick_assign("solver_name", str, solver_name) # amalgamator wants this + # cfg.quick_assign("solver_name", str, solver_name) # amalgamator wants this cfg.quick_assign("EF_solver_options", dict, solver_options) cfg.quick_assign("num_scens", int, num_scens) - cfg.quick_assign("_mpisppy_probability", float, 1/num_scens) + cfg.quick_assign("_mpisppy_probability", float, 1 / num_scens) cfg.quick_assign("start_seed", int, start_seed) - cfg.add_and_assign("branching_factors", description="branching factors", domain=pyofig.ListOf(int), default=None, value=branching_factors) + cfg.add_and_assign( + "branching_factors", + description="branching factors", + domain=pyofig.ListOf(int), + default=None, + value=branching_factors, + ) cfg.quick_assign("mu_dev", float, mu_dev) cfg.quick_assign("sigma_dev", float, sigma_dev) cfg.quick_assign("start_ups", bool, start_ups) - #We use from_module to build easily an Amalgamator object - ama = amalgamator.from_module("mpisppy.tests.examples.aircond", - cfg, use_command_line=False) - #Correcting the building by putting the right scenarios. + # We use from_module to build easily an Amalgamator object + ama = amalgamator.from_module( + "mpisppy.tests.examples.aircond", cfg, use_command_line=False + ) + # Correcting the building by putting the right scenarios. ama.scenario_names = scenario_names ama.verbose = False ama.run() - + # get the xhat xhat = sputils.nonant_cache_from_ef(ama.ef) @@ -536,71 +653,73 @@ def xhat_generator_aircond(scenario_names, solver_name=None, solver_options=None if __name__ == "__main__": # This __main__ is just for developers to use for quick tests parser = argparse.ArgumentParser() - parser.add_argument('--solver-name', - help="solver name", - default=None) - parser.add_argument("--branching-factors", - help="Spaces delimited branching factors (default 3 3)", - dest="branching_factors", - nargs="*", - type=int, - default=[10,10]) - parser.add_argument("--with-start-ups", - help="Toggles start-up costs in the aircond model", - dest="start_ups", - action="store_true") + parser.add_argument("--solver-name", help="solver name", default=None) + parser.add_argument( + "--branching-factors", + help="Spaces delimited branching factors (default 3 3)", + dest="branching_factors", + nargs="*", + type=int, + default=[10, 10], + ) + parser.add_argument( + "--with-start-ups", + help="Toggles start-up costs in the aircond model", + dest="start_ups", + action="store_true", + ) parser.set_defaults(start_ups=False) - parser.add_argument("--save-xhat", - help="Toggles file save", - dest="save_xhat", - action="store_true") + parser.add_argument( + "--save-xhat", help="Toggles file save", dest="save_xhat", action="store_true" + ) parser.set_defaults(save_xhat=False) + args = parser.parse_args() - args=parser.parse_args() - - solver_name=args.solver_name - start_ups=args.start_ups + solver_name = args.solver_name + start_ups = args.start_ups bfs = args.branching_factors save_xhat = args.save_xhat if start_ups: - solver_options={"mipgap":0.015,'verbose':True,} + solver_options = { + "mipgap": 0.015, + "verbose": True, + } else: - solver_options=dict() - - num_scens = np.prod(bfs) #To check with a full tree - ama_options = { "EF-mstage": True, - "EF_solver_name": solver_name, - "EF_solver_options":solver_options, - "num_scens": num_scens, - "branching_factors":bfs, - "mu_dev":0, - "sigma_dev":40, - "start_ups":start_ups, - "start_seed":0 - } - refmodel = "mpisppy.tests.examples.aircond" # WARNING: Change this in SPInstances - #We use from_module to build easily an Amalgamator object - t0=time.time() - ama = amalgamator.from_module(refmodel, - ama_options, - use_command_line=False) + solver_options = dict() + + num_scens = np.prod(bfs) # To check with a full tree + ama_options = { + "EF-mstage": True, + "EF_solver_name": solver_name, + "EF_solver_options": solver_options, + "num_scens": num_scens, + "branching_factors": bfs, + "mu_dev": 0, + "sigma_dev": 40, + "start_ups": start_ups, + "start_seed": 0, + } + refmodel = "mpisppy.tests.examples.aircond" # WARNING: Change this in SPInstances + # We use from_module to build easily an Amalgamator object + t0 = time.time() + ama = amalgamator.from_module(refmodel, ama_options, use_command_line=False) ama.run() - print('start ups costs: ', start_ups) - print('branching factors: ', bfs) - print('run time: ', time.time()-t0) + print("start ups costs: ", start_ups) + print("branching factors: ", bfs) + print("run time: ", time.time() - t0) print("inner bound =", ama.best_inner_bound) print("outer bound =", ama.best_outer_bound) - + xhat = sputils.nonant_cache_from_ef(ama.ef) - print('xhat_one = ', xhat['ROOT']) + print("xhat_one = ", xhat["ROOT"]) if save_xhat: - bf_string = '' + bf_string = "" for bf in bfs: - bf_string = bf_string + bf + '_' - np.savetxt('aircond_start_ups='+str(start_ups)+bf_string+'zhatstar.txt', xhat['ROOT']) - - - + bf_string = bf_string + bf + "_" + np.savetxt( + "aircond_start_ups=" + str(start_ups) + bf_string + "zhatstar.txt", + xhat["ROOT"], + ) diff --git a/mpisppy/tests/examples/aircondB.py b/mpisppy/tests/examples/aircondB.py index 4a1805cba..3d61255c2 100644 --- a/mpisppy/tests/examples/aircondB.py +++ b/mpisppy/tests/examples/aircondB.py @@ -6,7 +6,7 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#ReferenceModel for full set of scenarios for AirCond; June 2021 +# ReferenceModel for full set of scenarios for AirCond; June 2021 # PICKLE BUNDLE VERSION!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # Dec 2021; numerous enhancements by DLW; do not change defaults # Feb 2022: Changed all inventory cost coefficients to be positive numbers @@ -22,24 +22,26 @@ # Use this random stream: aircondstream = np.random.RandomState() # Do not edit these defaults! -parms = {"mu_dev": (float, 0.), - "sigma_dev": (float, 40.), - "start_ups": (bool, False), - "StartUpCost": (float, 300.), - "start_seed": (int, 1134), - "min_d": (float, 0.), - "max_d": (float, 400.), - "starting_d": (float, 200.), - "BeginInventory": (float, 200.), - "InventoryCost": (float, 0.5), - "LastInventoryCost": (float, -0.8), - "Capacity": (float, 200.), - "RegularProdCost": (float, 1.), - "OvertimeProdCost": (float, 3.), - "NegInventoryCost": (float, 5.), - "QuadShortCoeff": (float, 0) +parms = { + "mu_dev": (float, 0.0), + "sigma_dev": (float, 40.0), + "start_ups": (bool, False), + "StartUpCost": (float, 300.0), + "start_seed": (int, 1134), + "min_d": (float, 0.0), + "max_d": (float, 400.0), + "starting_d": (float, 200.0), + "BeginInventory": (float, 200.0), + "InventoryCost": (float, 0.5), + "LastInventoryCost": (float, -0.8), + "Capacity": (float, 200.0), + "RegularProdCost": (float, 1.0), + "OvertimeProdCost": (float, 3.0), + "NegInventoryCost": (float, 5.0), + "QuadShortCoeff": (float, 0), } + def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs): # create replicable pseudo random demand steps # DLW on 3March2022: The seed and therefore the demand (directly) are determined by @@ -53,7 +55,7 @@ def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs mu_dev = kwargs.get("mu_dev", None) sigma_dev = kwargs.get("sigma_dev", None) - scennum = sputils.extract_num(sname) + scennum = sputils.extract_num(sname) # Find the right path and the associated seeds (one for each node) using scennum prod = np.prod(sample_branching_factors) s = int(scennum % prod) @@ -61,34 +63,49 @@ def _demands_creator(sname, sample_branching_factors, root_name="ROOT", **kwargs demands = [d] nodenames = [root_name] for bf in sample_branching_factors: - assert prod%bf == 0 - prod = prod//bf - nodenames.append(str(s//prod)) - s = s%prod - + assert prod % bf == 0 + prod = prod // bf + nodenames.append(str(s // prod)) + s = s % prod + stagelist = [int(x) for x in nodenames[1:]] - for t in range(1,len(nodenames)): - aircondstream.seed(start_seed+sputils.node_idx(stagelist[:t],sample_branching_factors)) - d = min(max_d,max(min_d,d+aircondstream.normal(mu_dev,sigma_dev))) + for t in range(1, len(nodenames)): + aircondstream.seed( + start_seed + sputils.node_idx(stagelist[:t], sample_branching_factors) + ) + d = min(max_d, max(min_d, d + aircondstream.normal(mu_dev, sigma_dev))) demands.append(d) - - return demands,nodenames + + return demands, nodenames + def general_rho_setter(scenario_instance, rho_scale_factor=1.0): - """ TBD: make this work with proper bundles, which are two stage problems when solved""" + """TBD: make this work with proper bundles, which are two stage problems when solved""" computed_rhos = [] for t in scenario_instance.T[:-1]: - computed_rhos.append((id(scenario_instance.stage_models[t].RegularProd), - pyo.value(scenario_instance.stage_models[t].RegularProdCost) * rho_scale_factor)) - computed_rhos.append((id(scenario_instance.stage_models[t].OvertimeProd), - pyo.value(scenario_instance.stage_models[t].OvertimeProdCost) * rho_scale_factor)) + computed_rhos.append( + ( + id(scenario_instance.stage_models[t].RegularProd), + pyo.value(scenario_instance.stage_models[t].RegularProdCost) + * rho_scale_factor, + ) + ) + computed_rhos.append( + ( + id(scenario_instance.stage_models[t].OvertimeProd), + pyo.value(scenario_instance.stage_models[t].OvertimeProdCost) + * rho_scale_factor, + ) + ) return computed_rhos + def dual_rho_setter(scenario_instance): return general_rho_setter(scenario_instance, rho_scale_factor=0.0001) + def primal_rho_setter(scenario_instance): return general_rho_setter(scenario_instance, rho_scale_factor=0.01) @@ -97,109 +114,137 @@ def _StageModel_creator(time, demand, last_stage, **kwargs): return base_aircond._StageModel_creator(time, demand, last_stage, kwargs) -#Assume that demands has been drawn before +# Assume that demands has been drawn before def aircond_model_creator(demands, **kwargs): return base_aircond.aircond_model_creator(demands, **kwargs) -def MakeNodesforScen(model,nodenames,branching_factors,starting_stage=1): - return base_aircond.MakeNodesforScen(model,nodenames,branching_factors,starting_stage=starting_stage) - +def MakeNodesforScen(model, nodenames, branching_factors, starting_stage=1): + return base_aircond.MakeNodesforScen( + model, nodenames, branching_factors, starting_stage=starting_stage + ) + + def scenario_creator(sname, **kwargs): """ NOTE: modified Feb/March 2022 to be able to return a bundle if the name is Bundle_firstnum_lastnum (e.g. Bundle_14_28) This returns a Pyomo model (either for scenario, or the EF of a bundle) """ + def _bunBFs(branching_factors, bunsize): # return branching factors for the bundle's EF assert len(branching_factors) > 1 beyond2size = np.prod(branching_factors[1:]) - if bunsize % beyond2size!= 0: - raise RuntimeError(f"Bundles must consume entire second stage nodes {beyond2size} {bunsize}") - bunBFs = [bunsize // beyond2size] + branching_factors[1:] # branching factors in the bundle + if bunsize % beyond2size != 0: + raise RuntimeError( + f"Bundles must consume entire second stage nodes {beyond2size} {bunsize}" + ) + bunBFs = [bunsize // beyond2size] + branching_factors[ + 1: + ] # branching factors in the bundle return bunBFs - # NOTE: start seed is not used here (noted Feb 2022) - start_seed = kwargs['start_seed'] + start_seed = kwargs["start_seed"] if "branching_factors" not in kwargs: - raise RuntimeError("scenario_creator for aircond needs branching_factors in kwargs") + raise RuntimeError( + "scenario_creator for aircond needs branching_factors in kwargs" + ) branching_factors = kwargs["branching_factors"] if "scen" in sname: - - demands,nodenames = _demands_creator(sname, branching_factors, root_name="ROOT", **kwargs) + demands, nodenames = _demands_creator( + sname, branching_factors, root_name="ROOT", **kwargs + ) model = aircond_model_creator(demands, **kwargs) - #Constructing the nodes used by the scenario + # Constructing the nodes used by the scenario model._mpisppy_node_list = MakeNodesforScen(model, nodenames, branching_factors) model._mpisppy_probability = 1 / np.prod(branching_factors) - return model + return model elif "Bundle" in sname: firstnum = int(sname.split("_")[1]) lastnum = int(sname.split("_")[2]) - snames = [f"scen{i}" for i in range(firstnum, lastnum+1)] + snames = [f"scen{i}" for i in range(firstnum, lastnum + 1)] if kwargs.get("unpickle_bundles_dir") is not None: - fname = os.path.join(kwargs["unpickle_bundles_dir"], sname+".pkl") - bundle = pickle_bundle.dill_unpickle(fname) + fname = os.path.join(kwargs["unpickle_bundles_dir"], sname + ".pkl") + bundle = pickle_bundle.dill_unpickle(fname) return bundle # if we are still here, we have to create the bundle (we did not load it) bunkwargs = kwargs.copy() bunkwargs["branching_factors"] = _bunBFs(branching_factors, len(snames)) # The next line is needed, but eliminates comparison of pickle and non-pickle - bunkwargs["start_seed"] = start_seed+firstnum * len(branching_factors) # the usual block of seeds - bundle = sputils.create_EF(snames, scenario_creator, - scenario_creator_kwargs=bunkwargs, EF_name=sname, - nonant_for_fixed_vars = False) + bunkwargs["start_seed"] = start_seed + firstnum * len( + branching_factors + ) # the usual block of seeds + bundle = sputils.create_EF( + snames, + scenario_creator, + scenario_creator_kwargs=bunkwargs, + EF_name=sname, + nonant_for_fixed_vars=False, + ) # It simplifies things if we assume that the bundles consume entire second stage nodes, # then all we need is a root node and the only nonants that need to be reported are # at the root node (otherwise, more coding is required here to figure out which nodes and Vars # are shared with other bundles) - bunsize = (lastnum-firstnum+1) + bunsize = lastnum - firstnum + 1 N = np.prod(branching_factors) numbuns = N / bunsize - nonantlist = [v for idx,v in bundle.ref_vars.items() if idx[0] =="ROOT"] + nonantlist = [v for idx, v in bundle.ref_vars.items() if idx[0] == "ROOT"] attach_root_node(bundle, 0, nonantlist) # scenarios are equally likely so bundles are too - bundle._mpisppy_probability = 1/numbuns + bundle._mpisppy_probability = 1 / numbuns if kwargs.get("pickle_bundles_dir") is not None: # note that sname is a bundle name - fname = os.path.join(kwargs["pickle_bundles_dir"], sname+".pkl") + fname = os.path.join(kwargs["pickle_bundles_dir"], sname + ".pkl") pickle_bundle.dill_pickle(bundle, fname) return bundle else: - raise RuntimeError (f"Scenario name does not have scen or Bundle: {sname}") - - -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - return base_aircond.sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=given_scenario, **scenario_creator_kwargs) - - -#========= -def scenario_names_creator(num_scens,start=None): + raise RuntimeError(f"Scenario name does not have scen or Bundle: {sname}") + + +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + return base_aircond.sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=given_scenario, + **scenario_creator_kwargs, + ) + + +# ========= +def scenario_names_creator(num_scens, start=None): # (only for Amalgamator): return the full list of num_scens scenario names # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"scen{i}" for i in range(start,start+num_scens)] - + if start is None: + start = 0 + return [f"scen{i}" for i in range(start, start + num_scens)] + -#========= +# ========= def inparser_adder(cfg): base_aircond.inparser_adder(cfg) # special "proper" bundle arguments pickle_bundle.pickle_bundle_config(cfg) - -#========= + +# ========= def kw_creator(options=None): kwargs = base_aircond.kw_creator(options) @@ -210,22 +255,33 @@ def kw_creator(options=None): return kwargs -#============================ +# ============================ def scenario_denouement(rank, scenario_name, scenario): pass -#============================ -def xhat_generator_aircond(scenario_names, solver_name="gurobi", solver_options=None, - branching_factors=None, mudev = 0, sigmadev = 40, - start_ups=None, start_seed = 0): - return base_aircond.xhat_generator_aircond(scenario_names, solver_name="gurobi", solver_options=solver_options, - branching_factors=branching_factors, mudev = mudev, sigmadev = sigmadev, - start_ups=start_ups, start_seed = start_seed) +# ============================ +def xhat_generator_aircond( + scenario_names, + solver_name="gurobi", + solver_options=None, + branching_factors=None, + mudev=0, + sigmadev=40, + start_ups=None, + start_seed=0, +): + return base_aircond.xhat_generator_aircond( + scenario_names, + solver_name="gurobi", + solver_options=solver_options, + branching_factors=branching_factors, + mudev=mudev, + sigmadev=sigmadev, + start_ups=start_ups, + start_seed=start_seed, + ) if __name__ == "__main__": print("not directly runnable.") - - - diff --git a/mpisppy/tests/examples/apl1p.py b/mpisppy/tests/examples/apl1p.py index 7ae904cf9..45756afbd 100644 --- a/mpisppy/tests/examples/apl1p.py +++ b/mpisppy/tests/examples/apl1p.py @@ -6,9 +6,9 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#ReferenceModel for full set of scenarios for APL1P; May 2021 -#We use costs from Bailey, Jensen and Morton, Response Surface Analysis of Two-Stage Stochastic Linear Programming with Recourse -#(costs are 10x higher than in the original [Infanger 1992] paper) +# ReferenceModel for full set of scenarios for APL1P; May 2021 +# We use costs from Bailey, Jensen and Morton, Response Surface Analysis of Two-Stage Stochastic Linear Programming with Recourse +# (costs are 10x higher than in the original [Infanger 1992] paper) import pyomo.environ as pyo import numpy as np @@ -19,145 +19,161 @@ # Use this random stream: apl1pstream = np.random.RandomState() + def APL1P_model_creator(seed): apl1pstream.seed(seed) - random_array = apl1pstream.rand(6) #We only use 5 random numbers - + random_array = apl1pstream.rand(6) # We only use 5 random numbers + # # Model # - + model = pyo.ConcreteModel() - + # # Parameters # - + # generator - model.G = [1,2] - + model.G = [1, 2] + # Demand level - model.DL = [1,2,3] - + model.DL = [1, 2, 3] + # Availability - avail_outcome = ([1.,0.9,0.5,0.1],[1.,0.9,0.7,0.1,0.0]) - avail_probability = ([0.2,0.3,0.4,0.1],[0.1,0.2,0.5,0.1,0.1]) - avail_cumprob = (np.cumsum(avail_probability[0]),np.cumsum(avail_probability[1])) - assert(max(avail_cumprob[0])==1.0 and max(avail_cumprob[1])==1.0) - def availability_init(m,g): + avail_outcome = ([1.0, 0.9, 0.5, 0.1], [1.0, 0.9, 0.7, 0.1, 0.0]) + avail_probability = ([0.2, 0.3, 0.4, 0.1], [0.1, 0.2, 0.5, 0.1, 0.1]) + avail_cumprob = (np.cumsum(avail_probability[0]), np.cumsum(avail_probability[1])) + assert max(avail_cumprob[0]) == 1.0 and max(avail_cumprob[1]) == 1.0 + + def availability_init(m, g): rd = random_array[g] - i = np.searchsorted(avail_cumprob[g-1],rd) - return avail_outcome[g-1][i] - - model.Availability = pyo.Param(model.G, within=pyo.NonNegativeReals, - initialize=availability_init) - + i = np.searchsorted(avail_cumprob[g - 1], rd) + return avail_outcome[g - 1][i] + + model.Availability = pyo.Param( + model.G, within=pyo.NonNegativeReals, initialize=availability_init + ) + # Min Capacity cmin_init = 1000 model.Cmin = pyo.Param(model.G, within=pyo.NonNegativeReals, initialize=cmin_init) - - + # Investment, aka Capacity costs - invest = np.array([4.,2.5]) - def investment_init(m,g): - return(invest[g-1]) + invest = np.array([4.0, 2.5]) + + def investment_init(m, g): + return invest[g - 1] + + model.Investment = pyo.Param( + model.G, within=pyo.NonNegativeReals, initialize=investment_init + ) - model.Investment = pyo.Param(model.G, within=pyo.NonNegativeReals, - initialize=investment_init) - # Operating Cost - op_cost = np.array([[4.3,2.0,0.5],[8.7,4.0,1.0]]) - def operatingcost_init(m,g,dl): - return(op_cost[g-1,dl-1]) - - model.OperatingCost = pyo.Param(model.G, model.DL, within=pyo.NonNegativeReals, - initialize = operatingcost_init) - + op_cost = np.array([[4.3, 2.0, 0.5], [8.7, 4.0, 1.0]]) + + def operatingcost_init(m, g, dl): + return op_cost[g - 1, dl - 1] + + model.OperatingCost = pyo.Param( + model.G, model.DL, within=pyo.NonNegativeReals, initialize=operatingcost_init + ) + # Demand - demand_outcome = [900,1000,1100,1200] - demand_prob = [.15,.45,.25,.15] + demand_outcome = [900, 1000, 1100, 1200] + demand_prob = [0.15, 0.45, 0.25, 0.15] demand_cumprob = np.cumsum(demand_prob) - assert(max(demand_cumprob) == 1.0) - def demand_init(m,dl): - rd = random_array[2+dl] - i = np.searchsorted(demand_cumprob,rd) + assert max(demand_cumprob) == 1.0 + + def demand_init(m, dl): + rd = random_array[2 + dl] + i = np.searchsorted(demand_cumprob, rd) return demand_outcome[i] - - model.Demand = pyo.Param(model.DL, within=pyo.NonNegativeReals, - initialize=demand_init) - + + model.Demand = pyo.Param( + model.DL, within=pyo.NonNegativeReals, initialize=demand_init + ) + # Cost of unserved demand - unserved_cost =10.0 - model.CostUnservedDemand = pyo.Param(model.DL, within=pyo.NonNegativeReals, - initialize=unserved_cost) - + unserved_cost = 10.0 + model.CostUnservedDemand = pyo.Param( + model.DL, within=pyo.NonNegativeReals, initialize=unserved_cost + ) + # # Variables # - + # Capacity of generators model.CapacityGenerators = pyo.Var(model.G, domain=pyo.NonNegativeReals) - + # Operation level model.OperationLevel = pyo.Var(model.G, model.DL, domain=pyo.NonNegativeReals) - + # Unserved demand model.UnservedDemand = pyo.Var(model.DL, domain=pyo.NonNegativeReals) - - + # # Constraints # - + # Minimum capacity def MinimumCapacity_rule(model, g): return model.CapacityGenerators[g] >= model.Cmin[g] - + model.MinimumCapacity = pyo.Constraint(model.G, rule=MinimumCapacity_rule) - + # Maximum operating level def MaximumOperating_rule(model, g): - return sum(model.OperationLevel[g, dl] for dl in model.DL) <= model.Availability[g] * model.CapacityGenerators[g] - + return ( + sum(model.OperationLevel[g, dl] for dl in model.DL) + <= model.Availability[g] * model.CapacityGenerators[g] + ) + model.MaximumOperating = pyo.Constraint(model.G, rule=MaximumOperating_rule) - + # Satisfy demand def SatisfyDemand_rule(model, dl): - return sum(model.OperationLevel[g, dl] for g in model.G) + model.UnservedDemand[dl] >= model.Demand[dl] - + return ( + sum(model.OperationLevel[g, dl] for g in model.G) + model.UnservedDemand[dl] + >= model.Demand[dl] + ) + model.SatisfyDemand = pyo.Constraint(model.DL, rule=SatisfyDemand_rule) - + # # Stage-specific cost computations # - + def ComputeFirstStageCost_rule(model): return sum(model.Investment[g] * model.CapacityGenerators[g] for g in model.G) - + model.FirstStageCost = pyo.Expression(rule=ComputeFirstStageCost_rule) - - + def ComputeSecondStageCost_rule(model): expr = sum( - model.OperatingCost[g, dl] * model.OperationLevel[g, dl] for g in model.G for dl in model.DL) + sum( - model.CostUnservedDemand[dl] * model.UnservedDemand[dl] for dl in model.DL) + model.OperatingCost[g, dl] * model.OperationLevel[g, dl] + for g in model.G + for dl in model.DL + ) + sum( + model.CostUnservedDemand[dl] * model.UnservedDemand[dl] for dl in model.DL + ) return expr model.SecondStageCost = pyo.Expression(rule=ComputeSecondStageCost_rule) - - + def total_cost_rule(model): return model.FirstStageCost + model.SecondStageCost - + model.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, sense=pyo.minimize) - return(model) + return model def scenario_creator(sname, num_scens=None): - scennum = sputils.extract_num(sname) + scennum = sputils.extract_num(sname) model = APL1P_model_creator(scennum) - + # Create the list of nodes associated with the scenario (for two stage, # there is only one node associated with the scenario--leaf nodes are # ignored). @@ -167,51 +183,52 @@ def scenario_creator(sname, num_scens=None): cond_prob=1.0, stage=1, cost_expression=model.Total_Cost_Objective, - nonant_list=[model.CapacityGenerators], + nonant_list=[model.CapacityGenerators], scen_model=model, ) ] - - #Add the probability of the scenario - if num_scens is not None : - model._mpisppy_probability = 1/num_scens - - return(model) + + # Add the probability of the scenario + if num_scens is not None: + model._mpisppy_probability = 1 / num_scens + + return model -#========= -def scenario_names_creator(num_scens,start=None): +# ========= +def scenario_names_creator(num_scens, start=None): # (only for Amalgamator): return the full list of num_scens scenario names # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"scen{i}" for i in range(start,start+num_scens)] - + if start is None: + start = 0 + return [f"scen{i}" for i in range(start, start + num_scens)] -#========= + +# ========= def inparser_adder(inparser): # (only for Amalgamator): add command options unique to apl1p pass -#========= +# ========= def kw_creator(options): # (only for Amalgamator): linked to the scenario_creator and inparser_adder - kwargs = {"num_scens" : options['num_scens'] if 'num_scens' in options else None, - } + kwargs = { + "num_scens": options["num_scens"] if "num_scens" in options else None, + } return kwargs -#============================ +# ============================ def scenario_denouement(rank, scenario_name, scenario): pass -#============================ +# ============================ def xhat_generator_apl1p(scenario_names, solver_name="gurobi", solver_options=None): - ''' + """ For sequential sampling. - Takes scenario names as input and provide the best solution for the + Takes scenario names as input and provide the best solution for the approximate problem associated with the scenarios. Parameters ---------- @@ -227,38 +244,46 @@ def xhat_generator_apl1p(scenario_names, solver_name="gurobi", solver_options=No xhat: str A generated xhat, solution to the approximate problem induced by scenario_names. - ''' + """ num_scens = len(scenario_names) - - ama_options = { "EF-2stage": True, - "EF_solver_name": solver_name, - "EF_solver_options": solver_options, - "num_scens": num_scens, - "_mpisppy_probability": 1/num_scens, - } - #We use from_module to build easily an Amalgamator object - ama = amalgamator.from_module("mpisppy.tests.examples.apl1p", - ama_options,use_command_line=False) - #Correcting the building by putting the right scenarios. + + ama_options = { + "EF-2stage": True, + "EF_solver_name": solver_name, + "EF_solver_options": solver_options, + "num_scens": num_scens, + "_mpisppy_probability": 1 / num_scens, + } + # We use from_module to build easily an Amalgamator object + ama = amalgamator.from_module( + "mpisppy.tests.examples.apl1p", ama_options, use_command_line=False + ) + # Correcting the building by putting the right scenarios. ama.scenario_names = scenario_names ama.run() - + # get the xhat xhat = sputils.nonant_cache_from_ef(ama.ef) return xhat + if __name__ == "__main__": - #An example of sequential sampling for the APL1P model + # An example of sequential sampling for the APL1P model from mpisppy.confidence_intervals.seqsampling import SeqSampling - optionsFSP = {'eps': 5.0, - 'solver_name': "gurobi_direct", - "c0":50,} - apl1p_pb = SeqSampling("mpisppy.tests.examples.apl1p", - xhat_generator_apl1p, - optionsFSP, - stopping_criterion="BPL", - stochastic_sampling=False) + + optionsFSP = { + "eps": 5.0, + "solver_name": "gurobi_direct", + "c0": 50, + } + apl1p_pb = SeqSampling( + "mpisppy.tests.examples.apl1p", + xhat_generator_apl1p, + optionsFSP, + stopping_criterion="BPL", + stochastic_sampling=False, + ) res = apl1p_pb.run() print(res) diff --git a/mpisppy/tests/examples/distr.py b/mpisppy/tests/examples/distr.py index c6f81a7fd..101ca1426 100644 --- a/mpisppy/tests/examples/distr.py +++ b/mpisppy/tests/examples/distr.py @@ -20,7 +20,8 @@ # we will use dummy-nodes, which will be represented in mpi-sppy by non-anticipative variables # The following association of terms are made: regions = scenarios, and dummy-nodes = nonants = consensus-vars -### The following functions create the data +### The following functions create the data + def inter_region_dict_creator(num_scens): """Creates the oriented arcs between the regions, with their capacities and costs. \n @@ -30,51 +31,81 @@ def inter_region_dict_creator(num_scens): num_scens (int): select the number of scenarios (regions) wanted Returns: - dict: + dict: Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n The arcs are used as keys for the dictionaries of costs and capacities """ - inter_region_dict={} + inter_region_dict = {} if num_scens == 2: - inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] - inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} - inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + inter_region_dict["arcs"] = [(("Region1", "DC1"), ("Region2", "DC2"))] + inter_region_dict["costs"] = {(("Region1", "DC1"), ("Region2", "DC2")): 100} + inter_region_dict["capacities"] = {(("Region1", "DC1"), ("Region2", "DC2")): 70} elif num_scens == 3: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - } - + inter_region_dict["arcs"] = [ + (("Region1", "DC1"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region1", "DC1")), + (("Region1", "DC1"), ("Region3", "DC3_1")), + (("Region3", "DC3_1"), ("Region1", "DC1")), + (("Region2", "DC2"), ("Region3", "DC3_2")), + (("Region3", "DC3_2"), ("Region2", "DC2")), + ] + inter_region_dict["costs"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region1", "DC1")): 50, + (("Region1", "DC1"), ("Region3", "DC3_1")): 200, + (("Region3", "DC3_1"), ("Region1", "DC1")): 200, + (("Region2", "DC2"), ("Region3", "DC3_2")): 200, + (("Region3", "DC3_2"), ("Region2", "DC2")): 200, + } + inter_region_dict["capacities"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 70, + (("Region2", "DC2"), ("Region1", "DC1")): 100, + (("Region1", "DC1"), ("Region3", "DC3_1")): 50, + (("Region3", "DC3_1"), ("Region1", "DC1")): 50, + (("Region2", "DC2"), ("Region3", "DC3_2")): 50, + (("Region3", "DC3_2"), ("Region2", "DC2")): 50, + } + elif num_scens == 4: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ - (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ - (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ - (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ - } - + inter_region_dict["arcs"] = [ + (("Region1", "DC1"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region1", "DC1")), + (("Region1", "DC1"), ("Region3", "DC3_1")), + (("Region3", "DC3_1"), ("Region1", "DC1")), + (("Region2", "DC2"), ("Region3", "DC3_2")), + (("Region3", "DC3_2"), ("Region2", "DC2")), + (("Region1", "DC1"), ("Region4", "DC4")), + (("Region4", "DC4"), ("Region1", "DC1")), + (("Region4", "DC4"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region4", "DC4")), + ] + inter_region_dict["costs"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region1", "DC1")): 50, + (("Region1", "DC1"), ("Region3", "DC3_1")): 200, + (("Region3", "DC3_1"), ("Region1", "DC1")): 200, + (("Region2", "DC2"), ("Region3", "DC3_2")): 200, + (("Region3", "DC3_2"), ("Region2", "DC2")): 200, + (("Region1", "DC1"), ("Region4", "DC4")): 30, + (("Region4", "DC4"), ("Region1", "DC1")): 50, + (("Region4", "DC4"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region4", "DC4")): 70, + } + inter_region_dict["capacities"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 70, + (("Region2", "DC2"), ("Region1", "DC1")): 100, + (("Region1", "DC1"), ("Region3", "DC3_1")): 50, + (("Region3", "DC3_1"), ("Region1", "DC1")): 50, + (("Region2", "DC2"), ("Region3", "DC3_2")): 50, + (("Region3", "DC3_2"), ("Region2", "DC2")): 50, + (("Region1", "DC1"), ("Region4", "DC4")): 100, + (("Region4", "DC4"), ("Region1", "DC1")): 60, + (("Region4", "DC4"), ("Region2", "DC2")): 20, + (("Region2", "DC2"), ("Region4", "DC4")): 40, + } + return inter_region_dict @@ -86,10 +117,10 @@ def dummy_nodes_generator(region_dict, inter_region_dict): inter_region_dict (dict): dictionary of the inter-region relations Returns: - local_dict (dict): - This dictionary copies region_dict, completes the already existing fields of + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of region_dict to represent the dummy nodes, and adds the following keys:\n - dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) + dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) is in the considered region. \n dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) """ @@ -103,19 +134,25 @@ def dummy_nodes_generator(region_dict, inter_region_dict): local_dict["dummy nodes source slack bounds"] = {} local_dict["dummy nodes target slack bounds"] = {} for arc in inter_region_dict["arcs"]: - source,target = arc - region_source,node_source = source - region_target,node_target = target + source, target = arc + region_source, node_source = source + region_target, node_target = target if region_source == region_dict["name"]: - dummy_node=node_source+node_target + dummy_node = node_source + node_target local_dict["nodes"].append(dummy_node) local_dict["supply"][dummy_node] = 0 local_dict["dummy nodes source"].append(dummy_node) local_dict["arcs"].append((node_source, dummy_node)) - local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + local_dict["flow costs"][(node_source, dummy_node)] = ( + inter_region_dict["costs"][(source, target)] / 2 + ) # should be adapted to model + local_dict["flow capacities"][(node_source, dummy_node)] = ( + inter_region_dict["capacities"][(source, target)] + ) + local_dict["dummy nodes source slack bounds"][dummy_node] = ( + inter_region_dict["capacities"][(source, target)] + ) if region_target == local_dict["name"]: dummy_node = node_source + node_target @@ -123,16 +160,23 @@ def dummy_nodes_generator(region_dict, inter_region_dict): local_dict["supply"][dummy_node] = 0 local_dict["dummy nodes target"].append(dummy_node) local_dict["arcs"].append((dummy_node, node_target)) - local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + local_dict["flow costs"][(dummy_node, node_target)] = ( + inter_region_dict["costs"][(source, target)] / 2 + ) # should be adapted to model + local_dict["flow capacities"][(dummy_node, node_target)] = ( + inter_region_dict["capacities"][(source, target)] + ) + local_dict["dummy nodes target slack bounds"][dummy_node] = ( + inter_region_dict["capacities"][(source, target)] + ) return local_dict + def _is_partition(L, *lists): # Step 1: Verify that the union of all sublists contains all elements of L if set(L) != set().union(*lists): return False - + # Step 2: Ensure each element in L appears in exactly one sublist for item in L: count = 0 @@ -141,13 +185,14 @@ def _is_partition(L, *lists): count += 1 if count != 1: return False - + return True + def region_dict_creator(scenario_name): - """ Create a scenario for the inter-region max profit distribution example. + """Create a scenario for the inter-region max profit distribution example. - The convention for node names is: + The convention for node names is: Symbol + number of the region (+ _ + number of the example if needed), \n with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n For instance: F3_1 is the 1st factory node of region 3. \n @@ -167,85 +212,162 @@ def region_dict_creator(scenario_name): "flow costs" (dict[a] of float) : costs per unit flow on each arc \n "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n """ - if scenario_name == "Region1" : + if scenario_name == "Region1": # Creates data for Region1 - region_dict={"name": "Region1"} + region_dict = {"name": "Region1"} region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] - region_dict["factory nodes"] = ["F1_1","F1_2"] - region_dict["buyer nodes"] = ["B1_1","B1_2"] - region_dict["distribution center nodes"]= ["DC1"] - region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} - region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), - ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + region_dict["factory nodes"] = ["F1_1", "F1_2"] + region_dict["buyer nodes"] = ["B1_1", "B1_2"] + region_dict["distribution center nodes"] = ["DC1"] + region_dict["supply"] = { + "F1_1": 80, + "F1_2": 70, + "B1_1": -60, + "B1_2": -90, + "DC1": 0, + } + region_dict["arcs"] = [ + ("F1_1", "DC1"), + ("F1_2", "DC1"), + ("DC1", "B1_1"), + ("DC1", "B1_2"), + ("F1_1", "B1_1"), + ("F1_2", "B1_2"), + ] region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} # most capacities are 50, so start there and then override region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} - region_dict["flow capacities"][("F1_1","B1_1")] = None - region_dict["flow capacities"][("F1_2","B1_2")] = None - region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, - ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} - - elif scenario_name=="Region2": + region_dict["flow capacities"][("F1_1", "B1_1")] = None + region_dict["flow capacities"][("F1_2", "B1_2")] = None + region_dict["flow costs"] = { + ("F1_1", "DC1"): 300, + ("F1_2", "DC1"): 500, + ("DC1", "B1_1"): 200, + ("DC1", "B1_2"): 400, + ("F1_1", "B1_1"): 700, + ("F1_2", "B1_2"): 1000, + } + + elif scenario_name == "Region2": # Creates data for Region2 - region_dict={"name": "Region2"} + region_dict = {"name": "Region2"} region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] region_dict["factory nodes"] = list() - region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] - region_dict["distribution center nodes"]= ["DC2"] + region_dict["buyer nodes"] = ["B2_1", "B2_2", "B2_3"] + region_dict["distribution center nodes"] = ["DC2"] region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} - region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + region_dict["arcs"] = [("DC2", "B2_1"), ("DC2", "B2_2"), ("DC2", "B2_3")] region_dict["production costs"] = {} - region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} - region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} - region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} - - elif scenario_name == "Region3" : + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3": 1200} + region_dict["flow capacities"] = { + ("DC2", "B2_1"): 200, + ("DC2", "B2_2"): 150, + ("DC2", "B2_3"): 100, + } + region_dict["flow costs"] = { + ("DC2", "B2_1"): 100, + ("DC2", "B2_2"): 200, + ("DC2", "B2_3"): 300, + } + + elif scenario_name == "Region3": # Creates data for Region3 - region_dict={"name": "Region3"} + region_dict = {"name": "Region3"} region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] - region_dict["factory nodes"] = ["F3_1","F3_2"] - region_dict["buyer nodes"] = ["B3_1","B3_2"] - region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] - region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} - region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), - ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + region_dict["factory nodes"] = ["F3_1", "F3_2"] + region_dict["buyer nodes"] = ["B3_1", "B3_2"] + region_dict["distribution center nodes"] = ["DC3_1", "DC3_2"] + region_dict["supply"] = { + "F3_1": 80, + "F3_2": 60, + "B3_1": -100, + "B3_2": -100, + "DC3_1": 0, + "DC3_2": 0, + } + region_dict["arcs"] = [ + ("F3_1", "DC3_1"), + ("F3_2", "DC3_2"), + ("DC3_1", "B3_1"), + ("DC3_2", "B3_2"), + ("DC3_1", "DC3_2"), + ("DC3_2", "DC3_1"), + ] region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} - region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, - ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} - region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, - ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} - + region_dict["flow capacities"] = { + ("F3_1", "DC3_1"): 80, + ("F3_2", "DC3_2"): 60, + ("DC3_1", "B3_1"): 100, + ("DC3_2", "B3_2"): 100, + ("DC3_1", "DC3_2"): 70, + ("DC3_2", "DC3_1"): 50, + } + region_dict["flow costs"] = { + ("F3_1", "DC3_1"): 100, + ("F3_2", "DC3_2"): 100, + ("DC3_1", "B3_1"): 201, + ("DC3_2", "B3_2"): 200, + ("DC3_1", "DC3_2"): 100, + ("DC3_2", "DC3_1"): 100, + } + elif scenario_name == "Region4": # Creates data for Region4 - region_dict={"name": "Region4"} + region_dict = {"name": "Region4"} region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] - region_dict["factory nodes"] = ["F4_1","F4_2"] - region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["factory nodes"] = ["F4_1", "F4_2"] + region_dict["buyer nodes"] = ["B4_1", "B4_2"] region_dict["distribution center nodes"] = ["DC4"] - region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} - region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + region_dict["supply"] = { + "F4_1": 200, + "F4_2": 30, + "B4_1": -100, + "B4_2": -100, + "DC4": 0, + } + region_dict["arcs"] = [ + ("F4_1", "DC4"), + ("F4_2", "DC4"), + ("DC4", "B4_1"), + ("DC4", "B4_2"), + ] region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} - region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} - region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} - + region_dict["flow capacities"] = { + ("F4_1", "DC4"): 80, + ("F4_2", "DC4"): 60, + ("DC4", "B4_1"): 100, + ("DC4", "B4_2"): 100, + } + region_dict["flow costs"] = { + ("F4_1", "DC4"): 100, + ("F4_2", "DC4"): 80, + ("DC4", "B4_1"): 90, + ("DC4", "B4_2"): 70, + } + else: - raise RuntimeError (f"unknown Region name {scenario_name}") + raise RuntimeError(f"unknown Region name {scenario_name}") - assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + assert _is_partition( + region_dict["nodes"], + region_dict["factory nodes"], + region_dict["buyer nodes"], + region_dict["distribution center nodes"], + ) return region_dict ###Creates the model when local_dict is given def min_cost_distr_problem(local_dict, sense=pyo.minimize): - """ Create an arcs formulation of network flow + """Create an arcs formulation of network flow Args: local_dict (dict): dictionary representing a region including the dummy nodes \n @@ -262,9 +384,11 @@ def min_cost_distr_problem(local_dict, sense=pyo.minimize): arcsout[a[0]].append(a) arcsin[a[1]].append(a) - model = pyo.ConcreteModel(name='MinCostFlowArcs') - def flowBounds_rule(model, i,j): - return (0, local_dict["flow capacities"][(i,j)]) + model = pyo.ConcreteModel(name="MinCostFlowArcs") + + def flowBounds_rule(model, i, j): + return (0, local_dict["flow capacities"][(i, j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x def slackBounds_rule(model, n): @@ -272,34 +396,52 @@ def slackBounds_rule(model, n): return (0, local_dict["supply"][n]) elif n in local_dict["buyer nodes"]: return (local_dict["supply"][n], 0) - elif n in local_dict["dummy nodes source"]: - return (0,local_dict["dummy nodes source slack bounds"][n]) - elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule - return (0,local_dict["dummy nodes target slack bounds"][n]) + elif n in local_dict["dummy nodes source"]: + return (0, local_dict["dummy nodes source slack bounds"][n]) + elif ( + n in local_dict["dummy nodes target"] + ): # this slack will respect the opposite flow balance rule + return (0, local_dict["dummy nodes target slack bounds"][n]) elif n in local_dict["distribution center nodes"]: - return (0,0) + return (0, 0) else: raise ValueError(f"unknown node type for node {n}") - + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) - model.MinCost = pyo.Objective(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"]) , - sense=sense) - + model.MinCost = pyo.Objective( + expr=sum( + local_dict["flow costs"][a] * model.flow[a] for a in local_dict["arcs"] + ) + + sum( + local_dict["production costs"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["factory nodes"] + ) + + sum( + local_dict["revenues"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["buyer nodes"] + ), + sense=sense, + ) + def FlowBalance_rule(m, n): - #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal + # we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal if n in local_dict["dummy nodes target"]: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - - m.y[n] == local_dict["supply"][n] + return ( + sum(m.flow[a] for a in arcsout[n]) + - sum(m.flow[a] for a in arcsin[n]) + - m.y[n] + == local_dict["supply"][n] + ) else: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] - model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + return ( + sum(m.flow[a] for a in arcsout[n]) + - sum(m.flow[a] for a in arcsin[n]) + + m.y[n] + == local_dict["supply"][n] + ) + + model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) return model @@ -316,22 +458,23 @@ def scenario_creator(scenario_name, num_scens=None): Returns: Pyomo ConcreteModel: the instantiated model """ - assert (num_scens is not None) + assert num_scens is not None inter_region_dict = inter_region_dict_creator(num_scens) region_dict = region_dict_creator(scenario_name) # Adding dummy nodes and associated features local_dict = dummy_nodes_generator(region_dict, inter_region_dict) # Generating the model model = min_cost_distr_problem(local_dict) - + varlist = list() - sputils.attach_root_node(model, model.MinCost, varlist) - + sputils.attach_root_node(model, model.MinCost, varlist) + return model ###Functions required in other files, which constructions are specific to the problem + def scenario_denouement(rank, scenario_name, scenario): """for each scenario prints its name and the final variable values @@ -352,28 +495,32 @@ def consensus_vars_creator(num_scens): Args: num_scens (int): select the number of scenarios (regions) wanted - + Returns: - dict: dictionary which keys are the regions and values are the list of consensus variables + dict: dictionary which keys are the regions and values are the list of consensus variables present in the region """ - # Due to the small size of inter_region_dict, it is not given as argument but rather created. + # Due to the small size of inter_region_dict, it is not given as argument but rather created. inter_region_dict = inter_region_dict_creator(num_scens) consensus_vars = {} for arc in inter_region_dict["arcs"]: - source,target = arc - region_source,node_source = source - region_target,node_target = target + source, target = arc + region_source, node_source = source + region_target, node_target = target dummy_node = node_source + node_target - vstr = f"y[{dummy_node}]" #variable name as string, y is the slack + vstr = f"y[{dummy_node}]" # variable name as string, y is the slack - #adds dummy_node in the source region - if region_source not in consensus_vars: #initiates consensus_vars[region_source] + # adds dummy_node in the source region + if ( + region_source not in consensus_vars + ): # initiates consensus_vars[region_source] consensus_vars[region_source] = list() consensus_vars[region_source].append(vstr) - #adds dummy_node in the target region - if region_target not in consensus_vars: #initiates consensus_vars[region_target] + # adds dummy_node in the target region + if ( + region_target not in consensus_vars + ): # initiates consensus_vars[region_target] consensus_vars[region_target] = list() consensus_vars[region_target].append(vstr) return consensus_vars @@ -399,13 +546,16 @@ def kw_creator(cfg): Returns: dict (str): the kwargs that are used in distr.scenario_creator, here {"num_scens": num_scens} """ - kwargs = {"num_scens" : cfg.get('num_scens', None), - } + kwargs = { + "num_scens": cfg.get("num_scens", None), + } if kwargs["num_scens"] not in [2, 3, 4]: - RuntimeError (f"unexpected number of regions {cfg.num_scens}, whould be in [2, 3, 4]") + RuntimeError( + f"unexpected number of regions {cfg.num_scens}, whould be in [2, 3, 4]" + ) return kwargs def inparser_adder(cfg): - #requires the user to give the number of scenarios - cfg.num_scens_required() \ No newline at end of file + # requires the user to give the number of scenarios + cfg.num_scens_required() diff --git a/mpisppy/tests/examples/farmer.py b/mpisppy/tests/examples/farmer.py index ce0dc47a7..45efc0cec 100644 --- a/mpisppy/tests/examples/farmer.py +++ b/mpisppy/tests/examples/farmer.py @@ -13,8 +13,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2018 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -29,12 +29,17 @@ # Use this random stream: farmerstream = np.random.RandomState() + def scenario_creator( - scenario_name, use_integer=False, sense=pyo.minimize, crops_multiplier=1, - num_scens=None, seedoffset=0 + scenario_name, + use_integer=False, + sense=pyo.minimize, + crops_multiplier=1, + num_scens=None, + seedoffset=0, ): - """ Create a scenario for the (scalable) farmer example. - + """Create a scenario for the (scalable) farmer example. + Args: scenario_name (str): Name of the scenario to construct. @@ -47,24 +52,24 @@ def scenario_creator( Factor to control scaling. There will be three times this many crops. Default is 1. num_scens (int, optional): - Number of scenarios. We use it to compute _mpisppy_probability. + Number of scenarios. We use it to compute _mpisppy_probability. Default is None. seedoffset (int): used by confidence interval code """ # scenario_name has the form e.g. scen12, foobar7 # The digits are scraped off the right of scenario_name using regex then # converted mod 3 into one of the below avg./avg./above avg. scenarios - scennum = sputils.extract_num(scenario_name) - basenames = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario'] - basenum = scennum % 3 - groupnum = scennum // 3 - scenname = basenames[basenum]+str(groupnum) + scennum = sputils.extract_num(scenario_name) + basenames = ["BelowAverageScenario", "AverageScenario", "AboveAverageScenario"] + basenum = scennum % 3 + groupnum = scennum // 3 + scenname = basenames[basenum] + str(groupnum) # The RNG is seeded with the scenario number so that it is # reproducible when used with multiple threads. # NOTE: if you want to do replicates, you will need to pass a seed # as a kwarg to scenario_creator then use seed+scennum as the seed argument. - farmerstream.seed(scennum+seedoffset) + farmerstream.seed(scennum + seedoffset) # Check for minimization vs. maximization if sense not in [pyo.minimize, pyo.maximize]: @@ -91,12 +96,13 @@ def scenario_creator( scen_model=model, ) ] - - #Add the probability of the scenario - if num_scens is not None : - model._mpisppy_probability = 1/num_scens + + # Add the probability of the scenario + if num_scens is not None: + model._mpisppy_probability = 1 / num_scens return model + def pysp_instance_creation_callback( scenario_name, use_integer=False, sense=pyo.minimize, crops_multiplier=1 ): @@ -108,15 +114,15 @@ def pysp_instance_creation_callback( # scenarios come in groups of three scengroupnum = sputils.extract_num(scenario_name) scenario_base_name = scenario_name.rstrip("0123456789") - + model = pyo.ConcreteModel(scenario_name) def crops_init(m): retval = [] for i in range(crops_multiplier): - retval.append("WHEAT"+str(i)) - retval.append("CORN"+str(i)) - retval.append("SUGAR_BEETS"+str(i)) + retval.append("WHEAT" + str(i)) + retval.append("CORN" + str(i)) + retval.append("SUGAR_BEETS" + str(i)) return retval model.CROPS = pyo.Set(initialize=crops_init) @@ -130,63 +136,66 @@ def crops_init(m): def _scale_up_data(indict): outdict = {} for i in range(crops_multiplier): - for crop in ['WHEAT', 'CORN', 'SUGAR_BEETS']: - outdict[crop+str(i)] = indict[crop] + for crop in ["WHEAT", "CORN", "SUGAR_BEETS"]: + outdict[crop + str(i)] = indict[crop] return outdict - + model.PriceQuota = _scale_up_data( - {'WHEAT':100000.0,'CORN':100000.0,'SUGAR_BEETS':6000.0}) + {"WHEAT": 100000.0, "CORN": 100000.0, "SUGAR_BEETS": 6000.0} + ) model.SubQuotaSellingPrice = _scale_up_data( - {'WHEAT':170.0,'CORN':150.0,'SUGAR_BEETS':36.0}) + {"WHEAT": 170.0, "CORN": 150.0, "SUGAR_BEETS": 36.0} + ) model.SuperQuotaSellingPrice = _scale_up_data( - {'WHEAT':0.0,'CORN':0.0,'SUGAR_BEETS':10.0}) + {"WHEAT": 0.0, "CORN": 0.0, "SUGAR_BEETS": 10.0} + ) model.CattleFeedRequirement = _scale_up_data( - {'WHEAT':200.0,'CORN':240.0,'SUGAR_BEETS':0.0}) + {"WHEAT": 200.0, "CORN": 240.0, "SUGAR_BEETS": 0.0} + ) model.PurchasePrice = _scale_up_data( - {'WHEAT':238.0,'CORN':210.0,'SUGAR_BEETS':100000.0}) + {"WHEAT": 238.0, "CORN": 210.0, "SUGAR_BEETS": 100000.0} + ) model.PlantingCostPerAcre = _scale_up_data( - {'WHEAT':150.0,'CORN':230.0,'SUGAR_BEETS':260.0}) + {"WHEAT": 150.0, "CORN": 230.0, "SUGAR_BEETS": 260.0} + ) # # Stochastic Data # Yield = {} - Yield['BelowAverageScenario'] = \ - {'WHEAT':2.0,'CORN':2.4,'SUGAR_BEETS':16.0} - Yield['AverageScenario'] = \ - {'WHEAT':2.5,'CORN':3.0,'SUGAR_BEETS':20.0} - Yield['AboveAverageScenario'] = \ - {'WHEAT':3.0,'CORN':3.6,'SUGAR_BEETS':24.0} + Yield["BelowAverageScenario"] = {"WHEAT": 2.0, "CORN": 2.4, "SUGAR_BEETS": 16.0} + Yield["AverageScenario"] = {"WHEAT": 2.5, "CORN": 3.0, "SUGAR_BEETS": 20.0} + Yield["AboveAverageScenario"] = {"WHEAT": 3.0, "CORN": 3.6, "SUGAR_BEETS": 24.0} def Yield_init(m, cropname): # yield as in "crop yield" crop_base_name = cropname.rstrip("0123456789") if scengroupnum != 0: - return Yield[scenario_base_name][crop_base_name]+farmerstream.rand() + return Yield[scenario_base_name][crop_base_name] + farmerstream.rand() else: return Yield[scenario_base_name][crop_base_name] - model.Yield = pyo.Param(model.CROPS, - within=pyo.NonNegativeReals, - initialize=Yield_init, - mutable=True) + model.Yield = pyo.Param( + model.CROPS, within=pyo.NonNegativeReals, initialize=Yield_init, mutable=True + ) # # Variables # - if (use_integer): - model.DevotedAcreage = pyo.Var(model.CROPS, - within=pyo.NonNegativeIntegers, - bounds=(0.0, model.TOTAL_ACREAGE)) + if use_integer: + model.DevotedAcreage = pyo.Var( + model.CROPS, + within=pyo.NonNegativeIntegers, + bounds=(0.0, model.TOTAL_ACREAGE), + ) else: - model.DevotedAcreage = pyo.Var(model.CROPS, - bounds=(0.0, model.TOTAL_ACREAGE)) + model.DevotedAcreage = pyo.Var(model.CROPS, bounds=(0.0, model.TOTAL_ACREAGE)) model.QuantitySubQuotaSold = pyo.Var(model.CROPS, bounds=(0.0, None)) model.QuantitySuperQuotaSold = pyo.Var(model.CROPS, bounds=(0.0, None)) @@ -202,12 +211,25 @@ def ConstrainTotalAcreage_rule(model): model.ConstrainTotalAcreage = pyo.Constraint(rule=ConstrainTotalAcreage_rule) def EnforceCattleFeedRequirement_rule(model, i): - return model.CattleFeedRequirement[i] <= (model.Yield[i] * model.DevotedAcreage[i]) + model.QuantityPurchased[i] - model.QuantitySubQuotaSold[i] - model.QuantitySuperQuotaSold[i] + return ( + model.CattleFeedRequirement[i] + <= (model.Yield[i] * model.DevotedAcreage[i]) + + model.QuantityPurchased[i] + - model.QuantitySubQuotaSold[i] + - model.QuantitySuperQuotaSold[i] + ) - model.EnforceCattleFeedRequirement = pyo.Constraint(model.CROPS, rule=EnforceCattleFeedRequirement_rule) + model.EnforceCattleFeedRequirement = pyo.Constraint( + model.CROPS, rule=EnforceCattleFeedRequirement_rule + ) def LimitAmountSold_rule(model, i): - return model.QuantitySubQuotaSold[i] + model.QuantitySuperQuotaSold[i] - (model.Yield[i] * model.DevotedAcreage[i]) <= 0.0 + return ( + model.QuantitySubQuotaSold[i] + + model.QuantitySuperQuotaSold[i] + - (model.Yield[i] * model.DevotedAcreage[i]) + <= 0.0 + ) model.LimitAmountSold = pyo.Constraint(model.CROPS, rule=LimitAmountSold_rule) @@ -220,63 +242,79 @@ def EnforceQuotas_rule(model, i): def ComputeFirstStageCost_rule(model): return pyo.sum_product(model.PlantingCostPerAcre, model.DevotedAcreage) + model.FirstStageCost = pyo.Expression(rule=ComputeFirstStageCost_rule) def ComputeSecondStageCost_rule(model): expr = pyo.sum_product(model.PurchasePrice, model.QuantityPurchased) expr -= pyo.sum_product(model.SubQuotaSellingPrice, model.QuantitySubQuotaSold) - expr -= pyo.sum_product(model.SuperQuotaSellingPrice, model.QuantitySuperQuotaSold) + expr -= pyo.sum_product( + model.SuperQuotaSellingPrice, model.QuantitySuperQuotaSold + ) return expr + model.SecondStageCost = pyo.Expression(rule=ComputeSecondStageCost_rule) def total_cost_rule(model): - if (sense == pyo.minimize): + if sense == pyo.minimize: return model.FirstStageCost + model.SecondStageCost return -model.FirstStageCost - model.SecondStageCost - model.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, - sense=sense) + + model.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, sense=sense) return model + # begin functions not needed by farmer_cylinders # (but needed by special codes such as confidence intervals) -#========= -def scenario_names_creator(num_scens,start=None): +# ========= +def scenario_names_creator(num_scens, start=None): # (only for Amalgamator): return the full list of num_scens scenario names # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"scen{i}" for i in range(start,start+num_scens)] - + if start is None: + start = 0 + return [f"scen{i}" for i in range(start, start + num_scens)] -#========= +# ========= def inparser_adder(cfg): # add options unique to farmer cfg.num_scens_required() - cfg.add_to_config("crops_multiplier", - description="number of crops will be three times this (default 1)", - domain=int, - default=1) - - cfg.add_to_config("farmer_with_integers", - description="make the version that has integers (default False)", - domain=bool, - default=False) - - -#========= + cfg.add_to_config( + "crops_multiplier", + description="number of crops will be three times this (default 1)", + domain=int, + default=1, + ) + + cfg.add_to_config( + "farmer_with_integers", + description="make the version that has integers (default False)", + domain=bool, + default=False, + ) + + +# ========= def kw_creator(options): # (only for Amalgamator): linked to the scenario_creator and inparser_adder - kwargs = {"use_integer": options.get('use_integer', False), - "crops_multiplier": options.get('crops_multiplier', 1), - "num_scens" : options.get('num_scens', None), - } + kwargs = { + "use_integer": options.get("use_integer", False), + "crops_multiplier": options.get("crops_multiplier", 1), + "num_scens": options.get("num_scens", None), + } return kwargs -def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, - given_scenario=None, **scenario_creator_kwargs): - """ Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. + +def sample_tree_scen_creator( + sname, + stage, + sample_branching_factors, + seed, + given_scenario=None, + **scenario_creator_kwargs, +): + """Create a scenario within a sample tree. Mainly for multi-stage and simple for two-stage. (this function supports zhat and confidence interval code) Args: sname (string): scenario name to be created @@ -299,12 +337,16 @@ def sample_tree_scen_creator(sname, stage, sample_branching_factors, seed, # end functions not needed by farmer_cylinders -#============================ +# ============================ def scenario_denouement(rank, scenario_name, scenario): sname = scenario_name s = scenario - if sname == 'scen0': + if sname == "scen0": print("Arbitrary sanity checks:") - print ("SUGAR_BEETS0 for scenario",sname,"is", - pyo.value(s.DevotedAcreage["SUGAR_BEETS0"])) - print ("FirstStageCost for scenario",sname,"is", pyo.value(s.FirstStageCost)) + print( + "SUGAR_BEETS0 for scenario", + sname, + "is", + pyo.value(s.DevotedAcreage["SUGAR_BEETS0"]), + ) + print("FirstStageCost for scenario", sname, "is", pyo.value(s.FirstStageCost)) diff --git a/mpisppy/tests/examples/gbd/gbd.py b/mpisppy/tests/examples/gbd/gbd.py index 9ced77d94..1f1001bdb 100644 --- a/mpisppy/tests/examples/gbd/gbd.py +++ b/mpisppy/tests/examples/gbd/gbd.py @@ -6,9 +6,9 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#ReferenceModel for full set of scenarios for GBD; Aug 2021 -#From original Ferguson and Dantzig 1956 -#Extended scenarios as done in Seq Sampling by B&M +# ReferenceModel for full set of scenarios for GBD; Aug 2021 +# From original Ferguson and Dantzig 1956 +# Extended scenarios as done in Seq Sampling by B&M import pyomo.environ as pyo import numpy as np @@ -18,20 +18,20 @@ import json # Use this random stream: -gbdstream= np.random.RandomState() +gbdstream = np.random.RandomState() -with open('gbd_data/gbd_extended_data.json','r') as openfile: - gbd_extended_data = json.load(openfile) +with open("gbd_data/gbd_extended_data.json", "r") as openfile: + gbd_extended_data = json.load(openfile) -def GBD_model_creator(seed): +def GBD_model_creator(seed): gbdstream.seed(seed) - random_array = gbdstream.rand(5) #We only use 5 random numbers - + random_array = gbdstream.rand(5) # We only use 5 random numbers + # # Model # - + model = pyo.ConcreteModel() model.aircraftType = range(4) model.route = range(5) @@ -39,9 +39,9 @@ def GBD_model_creator(seed): # number of aircraft type i sent on route j model.x = pyo.Var(model.aircraftType, model.route, within=pyo.NonNegativeReals) # these aircrafts cannot fly these routes: - model.x[1,0].fix(0) - model.x[2,0].fix(0) - model.x[2,2].fix(0) + model.x[1, 0].fix(0) + model.x[2, 0].fix(0) + model.x[2, 2].fix(0) # excess aircraft model.aircraftSlack = pyo.Var(model.aircraftType, within=pyo.NonNegativeReals) @@ -49,70 +49,115 @@ def GBD_model_creator(seed): # excess / deficit demand in hundreds of passengers model.passengerSlack_pos = pyo.Var(model.route, within=pyo.NonNegativeReals) model.passengerSlack_neg = pyo.Var(model.route, within=pyo.NonNegativeReals) - # # Parameters # - model.numAircrafts = [10,19,25,15] # fixed aircraft inventory + model.numAircrafts = [10, 19, 25, 15] # fixed aircraft inventory # (hundreds) of passengers which can be hauled per month for # each aircraft type and route (see paper for formulation) model.p = { - (1,1): 16, (1,2):15, (1,3):28, (1,4):23, (1,5):81, (1,6):0, - - (2,1):0, (2,2): 10, (2,3):14, (2,4):15, (2,5):57, (2,6):0, - - (3,1):0, (3,2):5, (3,3):0, (3,4):7, (3,5):29, (3,6):0, - - (4,1):9, (4,2):11, (4,3):22, (4,4):17, (4,5):55, (4,6):0, - - (5,1):1, (5,2):1, (5,3):1, (5,4):1, (5,5):1, (5,6):0 + (1, 1): 16, + (1, 2): 15, + (1, 3): 28, + (1, 4): 23, + (1, 5): 81, + (1, 6): 0, + (2, 1): 0, + (2, 2): 10, + (2, 3): 14, + (2, 4): 15, + (2, 5): 57, + (2, 6): 0, + (3, 1): 0, + (3, 2): 5, + (3, 3): 0, + (3, 4): 7, + (3, 5): 29, + (3, 6): 0, + (4, 1): 9, + (4, 2): 11, + (4, 3): 22, + (4, 4): 17, + (4, 5): 55, + (4, 6): 0, + (5, 1): 1, + (5, 2): 1, + (5, 3): 1, + (5, 4): 1, + (5, 5): 1, + (5, 6): 0, } # cost per month (in thousands) for each aircraft type for each route model.c = { - (1,1): 18, (1,2):21, (1,3):18, (1,4):16, (1,5):10, (1,6):0, - - (2,1):0, (2,2): 15, (2,3):16, (2,4):14, (2,5):9, (2,6):0, - - (3,1):0, (3,2):10, (3,3):0, (3,4):9, (3,5):6, (3,6):0, - - (4,1):17, (4,2):16, (4,3):17, (4,4):15, (4,5):10, (4,6):0, - - (5,1):13, (5,2):13, (5,3):7, (5,4):7, (5,5):1, (5,6):0 + (1, 1): 18, + (1, 2): 21, + (1, 3): 18, + (1, 4): 16, + (1, 5): 10, + (1, 6): 0, + (2, 1): 0, + (2, 2): 15, + (2, 3): 16, + (2, 4): 14, + (2, 5): 9, + (2, 6): 0, + (3, 1): 0, + (3, 2): 10, + (3, 3): 0, + (3, 4): 9, + (3, 5): 6, + (3, 6): 0, + (4, 1): 17, + (4, 2): 16, + (4, 3): 17, + (4, 4): 15, + (4, 5): 10, + (4, 6): 0, + (5, 1): 13, + (5, 2): 13, + (5, 3): 7, + (5, 4): 7, + (5, 5): 1, + (5, 6): 0, } - #demand uncertainty - - possible_demands = (gbd_extended_data['r1_dmds'], - gbd_extended_data['r2_dmds'], - gbd_extended_data['r3_dmds'], - gbd_extended_data['r4_dmds'], - gbd_extended_data['r5_dmds'] - ) - demand_probs = (gbd_extended_data['r1_prbs'], - gbd_extended_data['r2_prbs'], - gbd_extended_data['r3_prbs'], - gbd_extended_data['r4_prbs'], - gbd_extended_data['r5_prbs'] - ) - - demand_cumprobs=(np.flip(np.cumsum(np.flip(demand_probs[0]))), - np.flip(np.cumsum(np.flip(demand_probs[1]))), - np.flip(np.cumsum(np.flip(demand_probs[2]))), - np.flip(np.cumsum(np.flip(demand_probs[3]))), - np.flip(np.cumsum(np.flip(demand_probs[4])))) + # demand uncertainty + + possible_demands = ( + gbd_extended_data["r1_dmds"], + gbd_extended_data["r2_dmds"], + gbd_extended_data["r3_dmds"], + gbd_extended_data["r4_dmds"], + gbd_extended_data["r5_dmds"], + ) + demand_probs = ( + gbd_extended_data["r1_prbs"], + gbd_extended_data["r2_prbs"], + gbd_extended_data["r3_prbs"], + gbd_extended_data["r4_prbs"], + gbd_extended_data["r5_prbs"], + ) + + demand_cumprobs = ( + np.flip(np.cumsum(np.flip(demand_probs[0]))), + np.flip(np.cumsum(np.flip(demand_probs[1]))), + np.flip(np.cumsum(np.flip(demand_probs[2]))), + np.flip(np.cumsum(np.flip(demand_probs[3]))), + np.flip(np.cumsum(np.flip(demand_probs[4]))), + ) # original demands fron 1956 paper: - # possible_demands = ([20, 22, 25, 27, 30], + # possible_demands = ([20, 22, 25, 27, 30], # [5, 15], # [14, 16, 18, 20, 22], # [1, 5, 8, 10, 34], # [58, 60, 62] # ) - # demand_probs = ([.2, .05, .35, .2, .2], + # demand_probs = ([.2, .05, .35, .2, .2], # [.3, .7], # [.1, .2, .4, .2, .1], # [.2, .2, .3, .2, .1], @@ -125,31 +170,40 @@ def GBD_model_creator(seed): # [1, .9, .1] # ) - # Assign demands - def demands_init(m,g): + def demands_init(m, g): rd = random_array[g] - j = np.searchsorted(np.flip(demand_cumprobs[g]),rd) - return possible_demands[g][len(demand_cumprobs[g])-1 - j] + j = np.searchsorted(np.flip(demand_cumprobs[g]), rd) + return possible_demands[g][len(demand_cumprobs[g]) - 1 - j] - model.passengerDemand = pyo.Param(model.route, within=pyo.NonNegativeReals, - initialize=demands_init) + model.passengerDemand = pyo.Param( + model.route, within=pyo.NonNegativeReals, initialize=demands_init + ) # # Constraints # - + # aircraft inventory constraint def AircraftSupply_rule(m, a): - return sum(model.x[a,j] for j in model.route) + model.aircraftSlack[a] == model.numAircrafts[a] + return ( + sum(model.x[a, j] for j in model.route) + model.aircraftSlack[a] + == model.numAircrafts[a] + ) - model.SatisfyAircraftConstraints = pyo.Constraint(model.aircraftType, rule=AircraftSupply_rule) + model.SatisfyAircraftConstraints = pyo.Constraint( + model.aircraftType, rule=AircraftSupply_rule + ) # route demand constraint - def RouteDemand_rule(m,r): - return sum(model.p[i+1,r+1]*model.x[i,r] for i in model.aircraftType) +\ - model.p[5,r+1]*(model.passengerSlack_pos[r] - model.passengerSlack_neg[r]) == model.passengerDemand[r] + def RouteDemand_rule(m, r): + return ( + sum(model.p[i + 1, r + 1] * model.x[i, r] for i in model.aircraftType) + + model.p[5, r + 1] + * (model.passengerSlack_pos[r] - model.passengerSlack_neg[r]) + == model.passengerDemand[r] + ) model.SatisftyDemandConstraints = pyo.Constraint(model.route, rule=RouteDemand_rule) @@ -158,12 +212,16 @@ def RouteDemand_rule(m,r): # def ComputeOperatingCosts(m): - return sum(model.c[i+1,j+1]*model.x[i,j] for i in model.aircraftType for j in model.route) + return sum( + model.c[i + 1, j + 1] * model.x[i, j] + for i in model.aircraftType + for j in model.route + ) model.OperatingCosts = pyo.Expression(rule=ComputeOperatingCosts) def ComputePassengerRevenueLost(m): - return sum(model.c[5, j+1]*model.passengerSlack_pos[j] for j in model.route) + return sum(model.c[5, j + 1] * model.passengerSlack_pos[j] for j in model.route) model.PassengerRevenueLost = pyo.Expression(rule=ComputePassengerRevenueLost) @@ -172,13 +230,13 @@ def ComputeTotalCost(m): model.obj = pyo.Objective(rule=ComputeTotalCost) - return(model) + return model def scenario_creator(sname, num_scens=None): - scennum = sputils.extract_num(sname) + scennum = sputils.extract_num(sname) model = GBD_model_creator(scennum) - + # Create the list of nodes associated with the scenario (for two stage, # there is only one node associated with the scenario--leaf nodes are # ignored). @@ -192,46 +250,47 @@ def scenario_creator(sname, num_scens=None): scen_model=model, ) ] - #Add the probability of the scenario - if num_scens is not None : - model._mpisppy_probability = 1/num_scens - - return(model) + # Add the probability of the scenario + if num_scens is not None: + model._mpisppy_probability = 1 / num_scens + return model -#========= -def scenario_names_creator(num_scens,start=None): + +# ========= +def scenario_names_creator(num_scens, start=None): # (only for Amalgamator): return the full list of num_scens scenario names # if start!=None, the list starts with the 'start' labeled scenario - if (start is None) : - start=0 - return [f"scen{i}" for i in range(start,start+num_scens)] - + if start is None: + start = 0 + return [f"scen{i}" for i in range(start, start + num_scens)] + -#========= +# ========= def inparser_adder(inparser): # (only for Amalgamator): add command options unique to gbd pass -#========= +# ========= def kw_creator(options): # (only for Amalgamator): linked to the scenario_creator and inparser_adder - kwargs = {"num_scens" : options['num_scens'] if 'num_scens' in options else None, - } + kwargs = { + "num_scens": options["num_scens"] if "num_scens" in options else None, + } return kwargs -#============================ +# ============================ def scenario_denouement(rank, scenario_name, scenario): pass -#============================ +# ============================ def xhat_generator_gbd(scenario_names, solver_name="gurobi", solver_options=None): - ''' + """ For sequential sampling. - Takes scenario names as input and provide the best solution for the + Takes scenario names as input and provide the best solution for the approximate problem associated with the scenarios. Parameters ---------- @@ -247,85 +306,96 @@ def xhat_generator_gbd(scenario_names, solver_name="gurobi", solver_options=None xhat: str A generated xhat, solution to the approximate problem induced by scenario_names. - ''' + """ num_scens = len(scenario_names) - - ama_options = { "EF-2stage": True, - "EF_solver_name": solver_name, - "EF_solver_options": solver_options, - "num_scens": num_scens, - "_mpisppy_probability": 1/num_scens, - } - #We use from_module to build easily an Amalgamator object - ama = amalgamator.from_module("mpisppy.tests.examples.gbd.gbd", - ama_options,use_command_line=False) - #Correcting the building by putting the right scenarios. + + ama_options = { + "EF-2stage": True, + "EF_solver_name": solver_name, + "EF_solver_options": solver_options, + "num_scens": num_scens, + "_mpisppy_probability": 1 / num_scens, + } + # We use from_module to build easily an Amalgamator object + ama = amalgamator.from_module( + "mpisppy.tests.examples.gbd.gbd", ama_options, use_command_line=False + ) + # Correcting the building by putting the right scenarios. ama.scenario_names = scenario_names ama.run() - + # get the xhat xhat = sputils.nonant_cache_from_ef(ama.ef) return xhat -if __name__ == "__main__": +if __name__ == "__main__": num_scens = 300 - solver_name = 'gurobi_direct' - solver_options=None - - scenario_names = scenario_names_creator(num_scens,start=6000) - - ama_options = { "EF-2stage": True, - "EF_solver_name": solver_name, - "EF_solver_options": solver_options, - "num_scens": num_scens, - "_mpisppy_probability": None, - } - #We use from_module to build easily an Amalgamator object - ama = amalgamator.from_module("mpisppy.tests.examples.gbd.gbd", - ama_options,use_command_line=False) - #Correcting the building by putting the right scenarios. + solver_name = "gurobi_direct" + solver_options = None + + scenario_names = scenario_names_creator(num_scens, start=6000) + + ama_options = { + "EF-2stage": True, + "EF_solver_name": solver_name, + "EF_solver_options": solver_options, + "num_scens": num_scens, + "_mpisppy_probability": None, + } + # We use from_module to build easily an Amalgamator object + ama = amalgamator.from_module( + "mpisppy.tests.examples.gbd.gbd", ama_options, use_command_line=False + ) + # Correcting the building by putting the right scenarios. ama.scenario_names = scenario_names ama.run() print("inner bound=", ama.best_inner_bound) print("outer bound=", ama.best_outer_bound) xhat = sputils.nonant_cache_from_ef(ama.ef) - print("xhat=",xhat['ROOT']) + print("xhat=", xhat["ROOT"]) from mpisppy.confidence_intervals.seqsampling import SeqSampling - optionsFSP = {'eps': 8.0, - 'solver_name': "gurobi_direct", - "c0":300, - "kf_Gs":25, - "kf_xhat":25} - - #from seq samp paper: - optionsBM_GBD_300 = { 'h':0.18, - 'hprime':0.015, - 'eps':2e-7, - 'epsprime':1e-7, - "p":0.191, - "kf_Gs":25, - "kf_xhat":1, - "solver_name":"gurobi_direct", - } - optionsBM_GBD_500 = { 'h':0.1427, - 'hprime':0.015, - 'eps':2e-7, - 'epsprime':1e-7, - "p":0.191, - "kf_Gs":25, - "kf_xhat":1, - "solver_name":"gurobi_direct", - } - - gbd_pb = SeqSampling("mpisppy.tests.examples.gbd.gbd", - xhat_generator_gbd, - optionsFSP, - stopping_criterion="BPL", - stochastic_sampling=False) + + optionsFSP = { + "eps": 8.0, + "solver_name": "gurobi_direct", + "c0": 300, + "kf_Gs": 25, + "kf_xhat": 25, + } + + # from seq samp paper: + optionsBM_GBD_300 = { + "h": 0.18, + "hprime": 0.015, + "eps": 2e-7, + "epsprime": 1e-7, + "p": 0.191, + "kf_Gs": 25, + "kf_xhat": 1, + "solver_name": "gurobi_direct", + } + optionsBM_GBD_500 = { + "h": 0.1427, + "hprime": 0.015, + "eps": 2e-7, + "epsprime": 1e-7, + "p": 0.191, + "kf_Gs": 25, + "kf_xhat": 1, + "solver_name": "gurobi_direct", + } + + gbd_pb = SeqSampling( + "mpisppy.tests.examples.gbd.gbd", + xhat_generator_gbd, + optionsFSP, + stopping_criterion="BPL", + stochastic_sampling=False, + ) res = gbd_pb.run() print(res) diff --git a/mpisppy/tests/examples/sizes/sizes.py b/mpisppy/tests/examples/sizes/sizes.py index 817043c19..9b8cb56f0 100644 --- a/mpisppy/tests/examples/sizes/sizes.py +++ b/mpisppy/tests/examples/sizes/sizes.py @@ -10,10 +10,11 @@ import mpisppy.tests.examples.sizes.ReferenceModel as ref import mpisppy.utils.sputils as sputils + def scenario_creator(scenario_name, scenario_count=None): - """ The callback needs to create an instance and then attach - the PySP nodes to it in a list _mpisppy_node_list ordered by stages. - Optionally attach _PHrho. + """The callback needs to create an instance and then attach + the PySP nodes to it in a list _mpisppy_node_list ordered by stages. + Optionally attach _PHrho. """ if scenario_count is None: raise ValueError("Sizes scenario_creator requires a scenario_count kwarg") @@ -41,7 +42,7 @@ def scenario_denouement(rank, scenario_name, scenario): def _rho_setter(scen): - """ rho values for the scenario. + """rho values for the scenario. Args: scen (pyo.ConcreteModel): the scenario Returns: @@ -65,7 +66,7 @@ def _rho_setter(scen): def id_fix_list_fct(s): - """ specify tuples used by the fixer. + """specify tuples used by the fixer. Args: s (ConcreteModel): the sizes instance. diff --git a/mpisppy/tests/examples/stoch_distr/stoch_distr.py b/mpisppy/tests/examples/stoch_distr/stoch_distr.py index 01cbe47f0..d4620e05d 100644 --- a/mpisppy/tests/examples/stoch_distr/stoch_distr.py +++ b/mpisppy/tests/examples/stoch_distr/stoch_distr.py @@ -15,7 +15,7 @@ import re import mpisppy.scenario_tree as scenario_tree -# This version present in the test slightly differs from the original +# This version present in the test slightly differs from the original # as it allows a three-stage problem # In this file, we create a stochastic (linear) inter-region minimal cost distribution problem. @@ -23,9 +23,12 @@ # and amongst the different regions in inter_region_dict_creator # The data slightly differs depending on the number of regions (num_admm_subproblems) which is created for 2, 3 or 4 regions -### The following functions create the data +### The following functions create the data -def inter_region_dict_creator(num_admm_subproblems): #doesn't depend on the scenarios in this example + +def inter_region_dict_creator( + num_admm_subproblems, +): # doesn't depend on the scenarios in this example """Creates the oriented arcs between the regions, with their capacities and costs. \n This dictionary represents the inter-region constraints and flows. It indicates where to add dummy nodes. @@ -33,55 +36,85 @@ def inter_region_dict_creator(num_admm_subproblems): #doesn't depend on the scen num_admm_subproblems (int): select the number of subproblems (regions) wanted Returns: - dict: + dict: Each arc is presented as a pair (source, target) with source and target containing (scenario_name, node_name) \n The arcs are used as keys for the dictionaries of costs and capacities """ - inter_region_dict={} + inter_region_dict = {} if num_admm_subproblems == 2: - inter_region_dict["arcs"]=[(("Region1","DC1"),("Region2","DC2"))] - inter_region_dict["costs"]={(("Region1","DC1"),("Region2","DC2")): 100} - inter_region_dict["capacities"]={(("Region1","DC1"),("Region2","DC2")): 70} + inter_region_dict["arcs"] = [(("Region1", "DC1"), ("Region2", "DC2"))] + inter_region_dict["costs"] = {(("Region1", "DC1"), ("Region2", "DC2")): 100} + inter_region_dict["capacities"] = {(("Region1", "DC1"), ("Region2", "DC2")): 70} elif num_admm_subproblems == 3: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - } - + inter_region_dict["arcs"] = [ + (("Region1", "DC1"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region1", "DC1")), + (("Region1", "DC1"), ("Region3", "DC3_1")), + (("Region3", "DC3_1"), ("Region1", "DC1")), + (("Region2", "DC2"), ("Region3", "DC3_2")), + (("Region3", "DC3_2"), ("Region2", "DC2")), + ] + inter_region_dict["costs"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region1", "DC1")): 50, + (("Region1", "DC1"), ("Region3", "DC3_1")): 200, + (("Region3", "DC3_1"), ("Region1", "DC1")): 200, + (("Region2", "DC2"), ("Region3", "DC3_2")): 200, + (("Region3", "DC3_2"), ("Region2", "DC2")): 200, + } + inter_region_dict["capacities"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 70, + (("Region2", "DC2"), ("Region1", "DC1")): 100, + (("Region1", "DC1"), ("Region3", "DC3_1")): 50, + (("Region3", "DC3_1"), ("Region1", "DC1")): 50, + (("Region2", "DC2"), ("Region3", "DC3_2")): 50, + (("Region3", "DC3_2"), ("Region2", "DC2")): 50, + } + elif num_admm_subproblems == 4: - inter_region_dict["arcs"] = [(("Region1","DC1"),("Region2","DC2")),(("Region2","DC2"),("Region1","DC1")),\ - (("Region1","DC1"),("Region3","DC3_1")),(("Region3","DC3_1"),("Region1","DC1")),\ - (("Region2","DC2"),("Region3","DC3_2")),(("Region3","DC3_2"),("Region2","DC2")),\ - (("Region1","DC1"),("Region4","DC4")),(("Region4","DC4"),("Region1","DC1")),\ - (("Region4","DC4"),("Region2","DC2")),(("Region2","DC2"),("Region4","DC4")),\ - ] - inter_region_dict["costs"] = {(("Region1","DC1"),("Region2","DC2")): 100, (("Region2","DC2"),("Region1","DC1")): 50,\ - (("Region1","DC1"),("Region3","DC3_1")): 200, (("Region3","DC3_1"),("Region1","DC1")): 200,\ - (("Region2","DC2"),("Region3","DC3_2")): 200, (("Region3","DC3_2"),("Region2","DC2")): 200,\ - (("Region1","DC1"),("Region4","DC4")): 30, (("Region4","DC4"),("Region1","DC1")): 50,\ - (("Region4","DC4"),("Region2","DC2")): 100, (("Region2","DC2"),("Region4","DC4")): 70,\ - } - inter_region_dict["capacities"] = {(("Region1","DC1"),("Region2","DC2")): 70, (("Region2","DC2"),("Region1","DC1")): 100,\ - (("Region1","DC1"),("Region3","DC3_1")): 50, (("Region3","DC3_1"),("Region1","DC1")): 50,\ - (("Region2","DC2"),("Region3","DC3_2")): 50, (("Region3","DC3_2"),("Region2","DC2")): 50,\ - (("Region1","DC1"),("Region4","DC4")): 100, (("Region4","DC4"),("Region1","DC1")): 60,\ - (("Region4","DC4"),("Region2","DC2")): 20, (("Region2","DC2"),("Region4","DC4")): 40,\ - } - + inter_region_dict["arcs"] = [ + (("Region1", "DC1"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region1", "DC1")), + (("Region1", "DC1"), ("Region3", "DC3_1")), + (("Region3", "DC3_1"), ("Region1", "DC1")), + (("Region2", "DC2"), ("Region3", "DC3_2")), + (("Region3", "DC3_2"), ("Region2", "DC2")), + (("Region1", "DC1"), ("Region4", "DC4")), + (("Region4", "DC4"), ("Region1", "DC1")), + (("Region4", "DC4"), ("Region2", "DC2")), + (("Region2", "DC2"), ("Region4", "DC4")), + ] + inter_region_dict["costs"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region1", "DC1")): 50, + (("Region1", "DC1"), ("Region3", "DC3_1")): 200, + (("Region3", "DC3_1"), ("Region1", "DC1")): 200, + (("Region2", "DC2"), ("Region3", "DC3_2")): 200, + (("Region3", "DC3_2"), ("Region2", "DC2")): 200, + (("Region1", "DC1"), ("Region4", "DC4")): 30, + (("Region4", "DC4"), ("Region1", "DC1")): 50, + (("Region4", "DC4"), ("Region2", "DC2")): 100, + (("Region2", "DC2"), ("Region4", "DC4")): 70, + } + inter_region_dict["capacities"] = { + (("Region1", "DC1"), ("Region2", "DC2")): 70, + (("Region2", "DC2"), ("Region1", "DC1")): 100, + (("Region1", "DC1"), ("Region3", "DC3_1")): 50, + (("Region3", "DC3_1"), ("Region1", "DC1")): 50, + (("Region2", "DC2"), ("Region3", "DC3_2")): 50, + (("Region3", "DC3_2"), ("Region2", "DC2")): 50, + (("Region1", "DC1"), ("Region4", "DC4")): 100, + (("Region4", "DC4"), ("Region1", "DC1")): 60, + (("Region4", "DC4"), ("Region2", "DC2")): 20, + (("Region2", "DC2"), ("Region4", "DC4")): 40, + } + return inter_region_dict -def dummy_nodes_generator(region_dict, inter_region_dict): +def dummy_nodes_generator(region_dict, inter_region_dict): """This function creates a new dictionary ``local_dict similar`` to ``region_dict`` with the dummy nodes and their constraints Args: @@ -89,10 +122,10 @@ def dummy_nodes_generator(region_dict, inter_region_dict): inter_region_dict (dict): dictionary of the inter-region relations Returns: - local_dict (dict): - This dictionary copies region_dict, completes the already existing fields of + local_dict (dict): + This dictionary copies region_dict, completes the already existing fields of region_dict to represent the dummy nodes, and adds the following keys:\n - dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) + dummy nodes source (resp. target): the list of dummy nodes for which the source (resp. target) is in the considered region. \n dummy nodes source (resp. target) slack bounds: dictionary on dummy nodes source (resp. target) """ @@ -106,19 +139,25 @@ def dummy_nodes_generator(region_dict, inter_region_dict): local_dict["dummy nodes source slack bounds"] = {} local_dict["dummy nodes target slack bounds"] = {} for arc in inter_region_dict["arcs"]: - source,target = arc - region_source,node_source = source - region_target,node_target = target + source, target = arc + region_source, node_source = source + region_target, node_target = target if region_source == region_dict["name"]: - dummy_node=node_source+node_target + dummy_node = node_source + node_target local_dict["nodes"].append(dummy_node) local_dict["supply"][dummy_node] = 0 local_dict["dummy nodes source"].append(dummy_node) local_dict["arcs"].append((node_source, dummy_node)) - local_dict["flow costs"][(node_source, dummy_node)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(node_source, dummy_node)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes source slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + local_dict["flow costs"][(node_source, dummy_node)] = ( + inter_region_dict["costs"][(source, target)] / 2 + ) # should be adapted to model + local_dict["flow capacities"][(node_source, dummy_node)] = ( + inter_region_dict["capacities"][(source, target)] + ) + local_dict["dummy nodes source slack bounds"][dummy_node] = ( + inter_region_dict["capacities"][(source, target)] + ) if region_target == local_dict["name"]: dummy_node = node_source + node_target @@ -126,11 +165,18 @@ def dummy_nodes_generator(region_dict, inter_region_dict): local_dict["supply"][dummy_node] = 0 local_dict["dummy nodes target"].append(dummy_node) local_dict["arcs"].append((dummy_node, node_target)) - local_dict["flow costs"][(dummy_node, node_target)] = inter_region_dict["costs"][(source, target)]/2 #should be adapted to model - local_dict["flow capacities"][(dummy_node, node_target)] = inter_region_dict["capacities"][(source, target)] - local_dict["dummy nodes target slack bounds"][dummy_node] = inter_region_dict["capacities"][(source, target)] + local_dict["flow costs"][(dummy_node, node_target)] = ( + inter_region_dict["costs"][(source, target)] / 2 + ) # should be adapted to model + local_dict["flow capacities"][(dummy_node, node_target)] = ( + inter_region_dict["capacities"][(source, target)] + ) + local_dict["dummy nodes target slack bounds"][dummy_node] = ( + inter_region_dict["capacities"][(source, target)] + ) return local_dict + def _is_partition(L, *lists): # Verification that the node list is a partition of the other nodes lists # Step 1: Verify that the union of all sublists contains all elements of L @@ -146,11 +192,14 @@ def _is_partition(L, *lists): return False return True -def region_dict_creator(admm_subproblem_name): #in this precise example region_dict doesn't depend on the scenario - """ Create a scenario for the inter-region max profit distribution example. + +def region_dict_creator( + admm_subproblem_name, +): # in this precise example region_dict doesn't depend on the scenario + """Create a scenario for the inter-region max profit distribution example. In this precise example region_dict doesn't depend on the stochastic scenario - The convention for node names is: + The convention for node names is: Symbol + number of the region (+ _ + number of the example if needed), with symbols DC for distribution centers, F for factory nodes, B for buyer nodes. \n For instance: F3_1 is the 1st factory node of region 3. \n @@ -170,78 +219,155 @@ def region_dict_creator(admm_subproblem_name): #in this precise example region_d "flow costs" (dict[a] of float) : costs per unit flow on each arc \n "flow capacities" (dict[a] of floats) : upper bound capacities of each arc \n """ - if admm_subproblem_name == "Region1" : + if admm_subproblem_name == "Region1": # Creates data for Region1 - region_dict={"name": "Region1"} + region_dict = {"name": "Region1"} region_dict["nodes"] = ["F1_1", "F1_2", "DC1", "B1_1", "B1_2"] - region_dict["factory nodes"] = ["F1_1","F1_2"] - region_dict["buyer nodes"] = ["B1_1","B1_2"] - region_dict["distribution center nodes"]= ["DC1"] - region_dict["supply"] = {"F1_1": 80, "F1_2": 70, "B1_1": -60, "B1_2": -90, "DC1": 0} - region_dict["arcs"] = [("F1_1","DC1"), ("F1_2","DC1"), ("DC1","B1_1"), - ("DC1","B1_2"), ("F1_1","B1_1"), ("F1_2","B1_2")] + region_dict["factory nodes"] = ["F1_1", "F1_2"] + region_dict["buyer nodes"] = ["B1_1", "B1_2"] + region_dict["distribution center nodes"] = ["DC1"] + region_dict["supply"] = { + "F1_1": 80, + "F1_2": 70, + "B1_1": -60, + "B1_2": -90, + "DC1": 0, + } + region_dict["arcs"] = [ + ("F1_1", "DC1"), + ("F1_2", "DC1"), + ("DC1", "B1_1"), + ("DC1", "B1_2"), + ("F1_1", "B1_1"), + ("F1_2", "B1_2"), + ] region_dict["production costs"] = {"F1_1": 50, "F1_2": 80} region_dict["revenues"] = {"B1_1": 800, "B1_2": 900} # most capacities are 50, so start there and then override region_dict["flow capacities"] = {a: 50 for a in region_dict["arcs"]} - region_dict["flow capacities"][("F1_1","B1_1")] = None - region_dict["flow capacities"][("F1_2","B1_2")] = None - region_dict["flow costs"] = {("F1_1","DC1"): 300, ("F1_2","DC1"): 500, ("DC1","B1_1"): 200, - ("DC1","B1_2"): 400, ("F1_1","B1_1"): 700, ("F1_2","B1_2"): 1000} - + region_dict["flow capacities"][("F1_1", "B1_1")] = None + region_dict["flow capacities"][("F1_2", "B1_2")] = None + region_dict["flow costs"] = { + ("F1_1", "DC1"): 300, + ("F1_2", "DC1"): 500, + ("DC1", "B1_1"): 200, + ("DC1", "B1_2"): 400, + ("F1_1", "B1_1"): 700, + ("F1_2", "B1_2"): 1000, + } + elif admm_subproblem_name == "Region2": # Creates data for Region2 - region_dict={"name": "Region2"} + region_dict = {"name": "Region2"} region_dict["nodes"] = ["DC2", "B2_1", "B2_2", "B2_3"] region_dict["factory nodes"] = list() - region_dict["buyer nodes"] = ["B2_1","B2_2","B2_3"] - region_dict["distribution center nodes"]= ["DC2"] + region_dict["buyer nodes"] = ["B2_1", "B2_2", "B2_3"] + region_dict["distribution center nodes"] = ["DC2"] region_dict["supply"] = {"B2_1": -200, "B2_2": -150, "B2_3": -100, "DC2": 0} - region_dict["arcs"] = [("DC2","B2_1"), ("DC2","B2_2"), ("DC2","B2_3")] + region_dict["arcs"] = [("DC2", "B2_1"), ("DC2", "B2_2"), ("DC2", "B2_3")] region_dict["production costs"] = {} - region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3":1200} - region_dict["flow capacities"] = {("DC2","B2_1"): 200, ("DC2","B2_2"): 150, ("DC2","B2_3"): 100} - region_dict["flow costs"] = {("DC2","B2_1"): 100, ("DC2","B2_2"): 200, ("DC2","B2_3"): 300} - - elif admm_subproblem_name == "Region3" : + region_dict["revenues"] = {"B2_1": 900, "B2_2": 800, "B2_3": 1200} + region_dict["flow capacities"] = { + ("DC2", "B2_1"): 200, + ("DC2", "B2_2"): 150, + ("DC2", "B2_3"): 100, + } + region_dict["flow costs"] = { + ("DC2", "B2_1"): 100, + ("DC2", "B2_2"): 200, + ("DC2", "B2_3"): 300, + } + + elif admm_subproblem_name == "Region3": # Creates data for Region3 - region_dict={"name": "Region3"} + region_dict = {"name": "Region3"} region_dict["nodes"] = ["F3_1", "F3_2", "DC3_1", "DC3_2", "B3_1", "B3_2"] - region_dict["factory nodes"] = ["F3_1","F3_2"] - region_dict["buyer nodes"] = ["B3_1","B3_2"] - region_dict["distribution center nodes"]= ["DC3_1","DC3_2"] - region_dict["supply"] = {"F3_1": 80, "F3_2": 60, "B3_1": -100, "B3_2": -100, "DC3_1": 0, "DC3_2": 0} - region_dict["arcs"] = [("F3_1","DC3_1"), ("F3_2","DC3_2"), ("DC3_1","B3_1"), - ("DC3_2","B3_2"), ("DC3_1","DC3_2"), ("DC3_2","DC3_1")] + region_dict["factory nodes"] = ["F3_1", "F3_2"] + region_dict["buyer nodes"] = ["B3_1", "B3_2"] + region_dict["distribution center nodes"] = ["DC3_1", "DC3_2"] + region_dict["supply"] = { + "F3_1": 80, + "F3_2": 60, + "B3_1": -100, + "B3_2": -100, + "DC3_1": 0, + "DC3_2": 0, + } + region_dict["arcs"] = [ + ("F3_1", "DC3_1"), + ("F3_2", "DC3_2"), + ("DC3_1", "B3_1"), + ("DC3_2", "B3_2"), + ("DC3_1", "DC3_2"), + ("DC3_2", "DC3_1"), + ] region_dict["production costs"] = {"F3_1": 50, "F3_2": 50} region_dict["revenues"] = {"B3_1": 900, "B3_2": 700} - region_dict["flow capacities"] = {("F3_1","DC3_1"): 80, ("F3_2","DC3_2"): 60, ("DC3_1","B3_1"): 100, - ("DC3_2","B3_2"): 100, ("DC3_1","DC3_2"): 70, ("DC3_2","DC3_1"): 50} - region_dict["flow costs"] = {("F3_1","DC3_1"): 100, ("F3_2","DC3_2"): 100, ("DC3_1","B3_1"): 201, - ("DC3_2","B3_2"): 200, ("DC3_1","DC3_2"): 100, ("DC3_2","DC3_1"): 100} - + region_dict["flow capacities"] = { + ("F3_1", "DC3_1"): 80, + ("F3_2", "DC3_2"): 60, + ("DC3_1", "B3_1"): 100, + ("DC3_2", "B3_2"): 100, + ("DC3_1", "DC3_2"): 70, + ("DC3_2", "DC3_1"): 50, + } + region_dict["flow costs"] = { + ("F3_1", "DC3_1"): 100, + ("F3_2", "DC3_2"): 100, + ("DC3_1", "B3_1"): 201, + ("DC3_2", "B3_2"): 200, + ("DC3_1", "DC3_2"): 100, + ("DC3_2", "DC3_1"): 100, + } + elif admm_subproblem_name == "Region4": # Creates data for Region4 - region_dict={"name": "Region4"} + region_dict = {"name": "Region4"} region_dict["nodes"] = ["F4_1", "F4_2", "DC4", "B4_1", "B4_2"] - region_dict["factory nodes"] = ["F4_1","F4_2"] - region_dict["buyer nodes"] = ["B4_1","B4_2"] + region_dict["factory nodes"] = ["F4_1", "F4_2"] + region_dict["buyer nodes"] = ["B4_1", "B4_2"] region_dict["distribution center nodes"] = ["DC4"] - region_dict["supply"] = {"F4_1": 200, "F4_2": 30, "B4_1": -100, "B4_2": -100, "DC4": 0} - region_dict["arcs"] = [("F4_1","DC4"), ("F4_2","DC4"), ("DC4","B4_1"), ("DC4","B4_2")] + region_dict["supply"] = { + "F4_1": 200, + "F4_2": 30, + "B4_1": -100, + "B4_2": -100, + "DC4": 0, + } + region_dict["arcs"] = [ + ("F4_1", "DC4"), + ("F4_2", "DC4"), + ("DC4", "B4_1"), + ("DC4", "B4_2"), + ] region_dict["production costs"] = {"F4_1": 50, "F4_2": 50} region_dict["revenues"] = {"B4_1": 900, "B4_2": 700} - region_dict["flow capacities"] = {("F4_1","DC4"): 80, ("F4_2","DC4"): 60, ("DC4","B4_1"): 100, ("DC4","B4_2"): 100} - region_dict["flow costs"] = {("F4_1","DC4"): 100, ("F4_2","DC4"): 80, ("DC4","B4_1"): 90, ("DC4","B4_2"): 70} - + region_dict["flow capacities"] = { + ("F4_1", "DC4"): 80, + ("F4_2", "DC4"): 60, + ("DC4", "B4_1"): 100, + ("DC4", "B4_2"): 100, + } + region_dict["flow costs"] = { + ("F4_1", "DC4"): 100, + ("F4_2", "DC4"): 80, + ("DC4", "B4_1"): 90, + ("DC4", "B4_2"): 70, + } + else: - raise RuntimeError (f"unknown Region name {admm_subproblem_name}") + raise RuntimeError(f"unknown Region name {admm_subproblem_name}") - assert _is_partition(region_dict["nodes"], region_dict["factory nodes"], region_dict["buyer nodes"], region_dict["distribution center nodes"]) + assert _is_partition( + region_dict["nodes"], + region_dict["factory nodes"], + region_dict["buyer nodes"], + region_dict["distribution center nodes"], + ) return region_dict @@ -249,18 +375,20 @@ def region_dict_creator(admm_subproblem_name): #in this precise example region_d def _scenario_number(stoch_scenario_name, demand=None): if demand is None: scennum = sputils.extract_num(stoch_scenario_name) - else: # 3-stage - production_num = int(re.search(r'\d+', stoch_scenario_name).group()) + else: # 3-stage + production_num = int(re.search(r"\d+", stoch_scenario_name).group()) if demand == "high": - scennum = (production_num-1)*2 + scennum = (production_num - 1) * 2 else: - scennum = (production_num-1)*2+1 + scennum = (production_num - 1) * 2 + 1 return scennum ###Creates the model when local_dict is given, local_dict depends on the subproblem -def min_cost_distr_problem(local_dict, stoch_scenario_name, cfg, demand=None, sense=pyo.minimize): - """ Create an arcs formulation of network flow for the region and stochastic scenario considered. +def min_cost_distr_problem( + local_dict, stoch_scenario_name, cfg, demand=None, sense=pyo.minimize +): + """Create an arcs formulation of network flow for the region and stochastic scenario considered. Args: local_dict (dict): dictionary representing a region including the dummy nodes \n @@ -283,20 +411,24 @@ def min_cost_distr_problem(local_dict, stoch_scenario_name, cfg, demand=None, se arcsin[a[1]].append(a) # Creating the random demand based on the supply if it is a three-stage example. In which case the original - # local_dict["supply"][n] is the average of demand for a node n + # local_dict["supply"][n] is the average of demand for a node n if demand is not None: if demand == "high": factor = 1.2 elif demand == "low": factor = 0.8 else: - raise RuntimeError (f"for the 3-stage problem demand should be 'high' or 'low', but it is {demand=}") + raise RuntimeError( + f"for the 3-stage problem demand should be 'high' or 'low', but it is {demand=}" + ) for n in local_dict["buyer nodes"]: local_dict["supply"][n] *= factor - model = pyo.ConcreteModel(name='MinCostFlowArcs') - def flowBounds_rule(model, i,j): - return (0, local_dict["flow capacities"][(i,j)]) + model = pyo.ConcreteModel(name="MinCostFlowArcs") + + def flowBounds_rule(model, i, j): + return (0, local_dict["flow capacities"][(i, j)]) + model.flow = pyo.Var(local_dict["arcs"], bounds=flowBounds_rule) # x def slackBounds_rule(model, n): @@ -304,68 +436,103 @@ def slackBounds_rule(model, n): return (0, local_dict["supply"][n]) elif n in local_dict["buyer nodes"]: return (local_dict["supply"][n], 0) - elif n in local_dict["dummy nodes source"]: - return (0,local_dict["dummy nodes source slack bounds"][n]) - elif n in local_dict["dummy nodes target"]: #this slack will respect the opposite flow balance rule - return (0,local_dict["dummy nodes target slack bounds"][n]) + elif n in local_dict["dummy nodes source"]: + return (0, local_dict["dummy nodes source slack bounds"][n]) + elif ( + n in local_dict["dummy nodes target"] + ): # this slack will respect the opposite flow balance rule + return (0, local_dict["dummy nodes target slack bounds"][n]) elif n in local_dict["distribution center nodes"]: - return (0,0) + return (0, 0) else: raise ValueError(f"unknown node type for node {n}") - + model.y = pyo.Var(local_dict["nodes"], bounds=slackBounds_rule) - if demand is not None: # 3-stage example, defining exportation - def export_bounds(m,i): - return (0,10) + if demand is not None: # 3-stage example, defining exportation + + def export_bounds(m, i): + return (0, 10) + model.export = pyo.Var(local_dict["buyer nodes"], bounds=export_bounds) - model.FirstStageCost = pyo.Expression(expr=\ - sum(local_dict["production costs"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["factory nodes"])) - - if demand is not None: # 3-stage example - model.SecondStageCost = pyo.Expression(expr=\ - sum(-810*model.export[n] for n in local_dict["buyer nodes"])) - - model.LastStageCost = pyo.Expression(expr=\ - sum(local_dict["flow costs"][a]*model.flow[a] for a in local_dict["arcs"]) \ - + sum(local_dict["revenues"][n]*(local_dict["supply"][n]-model.y[n]) for n in local_dict["buyer nodes"])) - - if demand is not None: # 3-stage example - model.MinCost = pyo.Objective(expr=model.FirstStageCost + model.SecondStageCost +model.LastStageCost, sense=sense) - else: # 2-stage example - model.MinCost = pyo.Objective(expr=model.FirstStageCost +model.LastStageCost, sense=sense) - + model.FirstStageCost = pyo.Expression( + expr=sum( + local_dict["production costs"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["factory nodes"] + ) + ) + + if demand is not None: # 3-stage example + model.SecondStageCost = pyo.Expression( + expr=sum(-810 * model.export[n] for n in local_dict["buyer nodes"]) + ) + + model.LastStageCost = pyo.Expression( + expr=sum( + local_dict["flow costs"][a] * model.flow[a] for a in local_dict["arcs"] + ) + + sum( + local_dict["revenues"][n] * (local_dict["supply"][n] - model.y[n]) + for n in local_dict["buyer nodes"] + ) + ) + + if demand is not None: # 3-stage example + model.MinCost = pyo.Objective( + expr=model.FirstStageCost + model.SecondStageCost + model.LastStageCost, + sense=sense, + ) + else: # 2-stage example + model.MinCost = pyo.Objective( + expr=model.FirstStageCost + model.LastStageCost, sense=sense + ) + def FlowBalance_rule(m, n): - #we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal + # we change the definition of the slack for target dummy nodes so that we have the slack from the source and from the target equal if n in local_dict["dummy nodes target"]: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - - m.y[n] == local_dict["supply"][n] + return ( + sum(m.flow[a] for a in arcsout[n]) + - sum(m.flow[a] for a in arcsin[n]) + - m.y[n] + == local_dict["supply"][n] + ) elif n in local_dict["factory nodes"]: # We generate pseudo randomly the loss on each factory node - numbers = re.findall(r'\d+', n) + numbers = re.findall(r"\d+", n) # Concatenate the numbers together - node_num = int(''.join(numbers)) - np.random.seed(scennum+node_num+cfg.initial_seed) - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - == (local_dict["supply"][n] - m.y[n]) * min(1,max(0,1-np.random.normal(cfg.spm,cfg.cv)/100)) # We add the loss - elif n in local_dict["buyer nodes"] and demand is not None: # for a 3-stage scenario the export changes the flow balance - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] - m.export[n] + node_num = int("".join(numbers)) + np.random.seed(scennum + node_num + cfg.initial_seed) + return sum(m.flow[a] for a in arcsout[n]) - sum( + m.flow[a] for a in arcsin[n] + ) == (local_dict["supply"][n] - m.y[n]) * min( + 1, max(0, 1 - np.random.normal(cfg.spm, cfg.cv) / 100) + ) # We add the loss + elif ( + n in local_dict["buyer nodes"] and demand is not None + ): # for a 3-stage scenario the export changes the flow balance + return ( + sum(m.flow[a] for a in arcsout[n]) + - sum(m.flow[a] for a in arcsin[n]) + + m.y[n] + == local_dict["supply"][n] - m.export[n] + ) else: - return sum(m.flow[a] for a in arcsout[n])\ - - sum(m.flow[a] for a in arcsin[n])\ - + m.y[n] == local_dict["supply"][n] - model.FlowBalance= pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) + return ( + sum(m.flow[a] for a in arcsout[n]) + - sum(m.flow[a] for a in arcsin[n]) + + m.y[n] + == local_dict["supply"][n] + ) + + model.FlowBalance = pyo.Constraint(local_dict["nodes"], rule=FlowBalance_rule) return model ###Functions required in other files, which constructions are specific to the problem + def scenario_denouement(rank, admm_stoch_subproblem_scenario_name, scenario): """For each admm stochastic scenario subproblem prints its name and the final variable values @@ -379,37 +546,44 @@ def scenario_denouement(rank, admm_stoch_subproblem_scenario_name, scenario): scenario.flow.pprint() print(f"slack values for {admm_stoch_subproblem_scenario_name=} at {rank=}") scenario.y.pprint() - if hasattr(scenario,"export"): # ie 3-stage problem + if hasattr(scenario, "export"): # ie 3-stage problem print(f"export values for {admm_stoch_subproblem_scenario_name=} at {rank=}") scenario.export.pprint() # Only if 3-stage problem for the example def MakeNodesforScen(model, BFs, scennum, local_dict): - """ Make just those scenario tree nodes needed by a scenario. - Return them as a list. - NOTE: the nodes depend on the scenario model and are, in some sense, - local to it. - Args: - BFs (list of int): branching factors + """Make just those scenario tree nodes needed by a scenario. + Return them as a list. + NOTE: the nodes depend on the scenario model and are, in some sense, + local to it. + Args: + BFs (list of int): branching factors """ - ndn = "ROOT_"+str((scennum-0) // BFs[1]) # scennum is 0-based, + ndn = "ROOT_" + str((scennum - 0) // BFs[1]) # scennum is 0-based, # In a more general way we should divide by: BFs[1:] - #objfunc = pyo.Expression(expr=0) # Nothing is done so there is no cost - retval = [scenario_tree.ScenarioNode("ROOT", - 1.0, - 1, - model.FirstStageCost, - [model.y[n] for n in local_dict["factory nodes"]], - model), - scenario_tree.ScenarioNode(ndn, - 1.0/BFs[0], - 2, - model.FirstStageCost, - [model.export[n] for n in local_dict["buyer nodes"]], # No consensus_variable for now to simplify the model - model, - parent_name="ROOT") - ] + # objfunc = pyo.Expression(expr=0) # Nothing is done so there is no cost + retval = [ + scenario_tree.ScenarioNode( + "ROOT", + 1.0, + 1, + model.FirstStageCost, + [model.y[n] for n in local_dict["factory nodes"]], + model, + ), + scenario_tree.ScenarioNode( + ndn, + 1.0 / BFs[0], + 2, + model.FirstStageCost, + [ + model.export[n] for n in local_dict["buyer nodes"] + ], # No consensus_variable for now to simplify the model + model, + parent_name="ROOT", + ), + ] return retval @@ -417,12 +591,16 @@ def branching_factors_creator(num_stoch_scens, num_stage): # BFs is only used with multiple stages if num_stage == 3: # There should always be a high demand and a low demand version - assert num_stoch_scens % 2 == 0, "there should be an even number of stochastic scenarios for this 3 stage problem, but it is odd" - BFs = [num_stoch_scens//2, 2] + assert ( + num_stoch_scens % 2 == 0 + ), "there should be an even number of stochastic scenarios for this 3 stage problem, but it is odd" + BFs = [num_stoch_scens // 2, 2] elif num_stage == 2: - BFs = None # No need to use branching factors (or even to define them with two stage problems) + BFs = None # No need to use branching factors (or even to define them with two stage problems) else: - raise RuntimeError (f"the example is only made for 2 or 3-stage problems, but {num_stage} were given") + raise RuntimeError( + f"the example is only made for 2 or 3-stage problems, but {num_stage} were given" + ) return BFs @@ -439,10 +617,12 @@ def scenario_creator(admm_stoch_subproblem_scenario_name, **kwargs): Pyomo ConcreteModel: the instantiated model """ cfg = kwargs.get("cfg") - + # assert(cfg.num_admm_subproblems is not None) # assert (cfg.num_stoch_scens is not None) - admm_subproblem_name, stoch_scenario_name = split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_name) + admm_subproblem_name, stoch_scenario_name = ( + split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_name) + ) inter_region_dict = inter_region_dict_creator(cfg.num_admm_subproblems) region_dict = region_dict_creator(admm_subproblem_name) @@ -453,67 +633,83 @@ def scenario_creator(admm_stoch_subproblem_scenario_name, **kwargs): # Generating the model according to the number of stages and creatung the tree if cfg.num_stage == 2: model = min_cost_distr_problem(local_dict, stoch_scenario_name, cfg) - sputils.attach_root_node(model, model.FirstStageCost, [model.y[n] for n in local_dict["factory nodes"]]) + sputils.attach_root_node( + model, + model.FirstStageCost, + [model.y[n] for n in local_dict["factory nodes"]], + ) else: BFs = branching_factors_creator(cfg.num_stoch_scens, cfg.num_stage) - if stoch_scenario_name.endswith('high'): + if stoch_scenario_name.endswith("high"): demand = "high" - elif stoch_scenario_name.endswith('low'): + elif stoch_scenario_name.endswith("low"): demand = "low" else: - raise RuntimeError (f"the stochastic scenario name should end with 'high' or 'low', but it is {stoch_scenario_name}") - model = min_cost_distr_problem(local_dict, stoch_scenario_name, cfg, demand=demand) + raise RuntimeError( + f"the stochastic scenario name should end with 'high' or 'low', but it is {stoch_scenario_name}" + ) + model = min_cost_distr_problem( + local_dict, stoch_scenario_name, cfg, demand=demand + ) scennum = _scenario_number(stoch_scenario_name, demand=demand) model._mpisppy_node_list = MakeNodesforScen(model, BFs, scennum, local_dict) - model._mpisppy_probability = 1/cfg.num_stoch_scens - + model._mpisppy_probability = 1 / cfg.num_stoch_scens + return model -def consensus_vars_creator(admm_subproblem_names, stoch_scenario_name, kwargs, num_stage=2): +def consensus_vars_creator( + admm_subproblem_names, stoch_scenario_name, kwargs, num_stage=2 +): """The following function creates the consensus_vars dictionary thanks to the inter-region dictionary. \n This dictionary has redundant information, but is useful for admmWrapper. Args: admm_subproblem_names (list of str): name of the admm subproblems (regions) \n - stoch_scenario_name (str): name of any stochastic_scenario, it is only used - in this example to access the non anticipative variables (which are common to + stoch_scenario_name (str): name of any stochastic_scenario, it is only used + in this example to access the non anticipative variables (which are common to every stochastic scenario) and their stage. - + Returns: - dict: dictionary which keys are the subproblems and values are the list of + dict: dictionary which keys are the subproblems and values are the list of pairs (consensus_variable_name (str), stage (int)). """ - # Due to the small size of inter_region_dict, it is not given as argument but rather created. + # Due to the small size of inter_region_dict, it is not given as argument but rather created. inter_region_dict = inter_region_dict_creator(len(admm_subproblem_names)) consensus_vars = {} for arc in inter_region_dict["arcs"]: - source,target = arc - region_source,node_source = source - region_target,node_target = target + source, target = arc + region_source, node_source = source + region_target, node_target = target dummy_node = node_source + node_target - vstr = f"y[{dummy_node}]" #variable name as string, y is the slack + vstr = f"y[{dummy_node}]" # variable name as string, y is the slack - #adds dummy_node in the source region - if region_source not in consensus_vars: #initiates consensus_vars[region_source] + # adds dummy_node in the source region + if ( + region_source not in consensus_vars + ): # initiates consensus_vars[region_source] consensus_vars[region_source] = list() - consensus_vars[region_source].append((vstr,num_stage)) + consensus_vars[region_source].append((vstr, num_stage)) - #adds dummy_node in the target region - if region_target not in consensus_vars: #initiates consensus_vars[region_target] + # adds dummy_node in the target region + if ( + region_target not in consensus_vars + ): # initiates consensus_vars[region_target] consensus_vars[region_target] = list() - consensus_vars[region_target].append((vstr,num_stage)) + consensus_vars[region_target].append((vstr, num_stage)) # now add the parents. It doesn't depend on the stochastic scenario so we chose one and - # then we go through the models (created by scenario creator) for all the admm_stoch_subproblem_scenario + # then we go through the models (created by scenario creator) for all the admm_stoch_subproblem_scenario # which have this scenario as an ancestor (parent) in the tree for admm_subproblem_name in admm_subproblem_names: - admm_stoch_subproblem_scenario_name = combining_names(admm_subproblem_name,stoch_scenario_name) + admm_stoch_subproblem_scenario_name = combining_names( + admm_subproblem_name, stoch_scenario_name + ) model = scenario_creator(admm_stoch_subproblem_scenario_name, **kwargs) for node in model._mpisppy_node_list: for var in node.nonant_list: - #print(f"{admm_subproblem_name=}") - #print(f"{var.name=}") + # print(f"{admm_subproblem_name=}") + # print(f"{var.name=}") if var.name not in consensus_vars[admm_subproblem_name]: consensus_vars[admm_subproblem_name].append((var.name, node.stage)) return consensus_vars @@ -529,7 +725,11 @@ def stoch_scenario_names_creator(num_stoch_scens, num_stage=2): list (str): the list of stochastic scenario names """ if num_stage == 3: - return [f"StochasticScenario{i+1}_{demand}" for i in range(num_stoch_scens//2) for demand in ["high","low"]] + return [ + f"StochasticScenario{i+1}_{demand}" + for i in range(num_stoch_scens // 2) + for demand in ["high", "low"] + ] else: return [f"StochasticScenario{i+1}" for i in range(num_stoch_scens)] @@ -546,13 +746,15 @@ def admm_subproblem_names_creator(num_admm_subproblems): return [f"Region{i+1}" for i in range(num_admm_subproblems)] -def combining_names(admm_subproblem_name,stoch_scenario_name): +def combining_names(admm_subproblem_name, stoch_scenario_name): # Used to create the admm_stoch_subproblem_scenario_name return f"ADMM_STOCH_{admm_subproblem_name}_{stoch_scenario_name}" -def admm_stoch_subproblem_scenario_names_creator(admm_subproblem_names,stoch_scenario_names): - """ Creates the list of the admm stochastic subproblem scenarios, which are the admm subproblems given a scenario +def admm_stoch_subproblem_scenario_names_creator( + admm_subproblem_names, stoch_scenario_names +): + """Creates the list of the admm stochastic subproblem scenarios, which are the admm subproblems given a scenario Args: admm_subproblem_names (list of str): names of the admm subproblem @@ -562,14 +764,16 @@ def admm_stoch_subproblem_scenario_names_creator(admm_subproblem_names,stoch_sce list of str: name of the admm stochastic subproblem scenarios """ ### The order is important, we may want all the subproblems to appear consecutively in a scenario - return [combining_names(admm_subproblem_name,stoch_scenario_name) \ - for stoch_scenario_name in stoch_scenario_names \ - for admm_subproblem_name in admm_subproblem_names ] + return [ + combining_names(admm_subproblem_name, stoch_scenario_name) + for stoch_scenario_name in stoch_scenario_names + for admm_subproblem_name in admm_subproblem_names + ] def split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_name): - """ Gives the admm_subproblem_name and the stoch_scenario_name given an admm_stoch_subproblem_scenario_name. - This function, specific to the problem, is the reciprocal function of ``combining_names`` which creates the + """Gives the admm_subproblem_name and the stoch_scenario_name given an admm_stoch_subproblem_scenario_name. + This function, specific to the problem, is the reciprocal function of ``combining_names`` which creates the admm_stoch_subproblem_scenario_name given the admm_subproblem_name and the stoch_scenario_name. Args: @@ -579,11 +783,11 @@ def split_admm_stoch_subproblem_scenario_name(admm_stoch_subproblem_scenario_nam (str,str): admm_subproblem_name, stoch_scenario_name """ # Method specific to our example and because the admm_subproblem_name and stoch_scenario_name don't include "_" - splitted = admm_stoch_subproblem_scenario_name.split('_') + splitted = admm_stoch_subproblem_scenario_name.split("_") # The next line would require adding the argument num_stage and transferring it to stoch_admmWrapper - #assert (len(splitted) == num_stage+2), f"appart from the 'ADMM_STOCH_' prefix, underscore should only separate stages" + # assert (len(splitted) == num_stage+2), f"appart from the 'ADMM_STOCH_' prefix, underscore should only separate stages" admm_subproblem_name = splitted[2] - stoch_scenario_name = '_'.join(splitted[3:]) + stoch_scenario_name = "_".join(splitted[3:]) return admm_subproblem_name, stoch_scenario_name @@ -595,52 +799,57 @@ def kw_creator(cfg): Returns: dict (str): the kwargs that are used in distr.scenario_creator, chich are included in cfg. """ - kwargs = { - "cfg": cfg - } + kwargs = {"cfg": cfg} return kwargs def inparser_adder(cfg): - """Adding to the config argument, specific elements to our problems. In this case the numbers of stochastic scenarios + """Adding to the config argument, specific elements to our problems. In this case the numbers of stochastic scenarios and admm subproblems which are required. And elements used for random number generations. Args: cfg (config): specifications for the problem given on the command line """ cfg.add_to_config( - "num_stoch_scens", - description="Number of stochastic scenarios (default None)", - domain=int, - default=None, - argparse_args = {"required": True} - ) + "num_stoch_scens", + description="Number of stochastic scenarios (default None)", + domain=int, + default=None, + argparse_args={"required": True}, + ) cfg.add_to_config( - "num_admm_subproblems", - description="Number of admm subproblems per stochastic scenario (default None)", - domain=int, - default=None, - argparse_args = {"required": True} - ) + "num_admm_subproblems", + description="Number of admm subproblems per stochastic scenario (default None)", + domain=int, + default=None, + argparse_args={"required": True}, + ) + + cfg.add_to_config( + "spm", + description="mean percentage of scrap loss at the production", + domain=float, + default=5, + ) - cfg.add_to_config("spm", - description="mean percentage of scrap loss at the production", - domain=float, - default=5) - - cfg.add_to_config("cv", - description="coefficient of variation of the loss at the production", - domain=float, - default=20) - - cfg.add_to_config("initial_seed", - description="initial seed for generating the loss", - domain=int, - default=0) - - cfg.add_to_config("num_stage", - description="choice of the number of stages for the example, by default two-stage", - domain=int, - default=2) - \ No newline at end of file + cfg.add_to_config( + "cv", + description="coefficient of variation of the loss at the production", + domain=float, + default=20, + ) + + cfg.add_to_config( + "initial_seed", + description="initial seed for generating the loss", + domain=int, + default=0, + ) + + cfg.add_to_config( + "num_stage", + description="choice of the number of stages for the example, by default two-stage", + domain=int, + default=2, + ) diff --git a/mpisppy/tests/examples/stoch_distr/stoch_distr_admm_cylinders.py b/mpisppy/tests/examples/stoch_distr/stoch_distr_admm_cylinders.py index 207ec360f..ca90f74e4 100644 --- a/mpisppy/tests/examples/stoch_distr/stoch_distr_admm_cylinders.py +++ b/mpisppy/tests/examples/stoch_distr/stoch_distr_admm_cylinders.py @@ -16,6 +16,7 @@ # Driver file for stochastic admm import mpisppy.utils.stoch_admmWrapper as stoch_admmWrapper + # import stoch_distr ## It is necessary for the test to have the full direction import mpisppy.tests.examples.stoch_distr.stoch_distr as stoch_distr @@ -24,10 +25,12 @@ from mpisppy.utils import config import mpisppy.utils.cfg_vanilla as vanilla from mpisppy import MPI + global_rank = MPI.COMM_WORLD.Get_rank() write_solution = False + def _parse_args(): # create a config object and parse cfg = config.Config() @@ -41,10 +44,12 @@ def _parse_args(): cfg.lagrangian_args() cfg.ph_ob_args() cfg.tracking_args() - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) cfg.parse_command_line("stoch_distr_admm_cylinders") return cfg @@ -52,96 +57,143 @@ def _parse_args(): def _count_cylinders(cfg): count = 1 - cfglist = ["xhatxbar", "lagrangian", "ph_ob", "fwph"] #all the cfg arguments that create a new cylinders + cfglist = [ + "xhatxbar", + "lagrangian", + "ph_ob", + "fwph", + ] # all the cfg arguments that create a new cylinders for cylname in cfglist: if cfg[cylname]: count += 1 return count -def _make_admm(cfg, n_cylinders,verbose=None): +def _make_admm(cfg, n_cylinders, verbose=None): options = {} - + BFs = stoch_distr.branching_factors_creator(cfg.num_stoch_scens, cfg.num_stage) - admm_subproblem_names = stoch_distr.admm_subproblem_names_creator(cfg.num_admm_subproblems) - stoch_scenario_names = stoch_distr.stoch_scenario_names_creator(num_stoch_scens=cfg.num_stoch_scens, num_stage=cfg.num_stage) - all_admm_stoch_subproblem_scenario_names = stoch_distr.admm_stoch_subproblem_scenario_names_creator(admm_subproblem_names,stoch_scenario_names) - split_admm_stoch_subproblem_scenario_name = stoch_distr.split_admm_stoch_subproblem_scenario_name - + admm_subproblem_names = stoch_distr.admm_subproblem_names_creator( + cfg.num_admm_subproblems + ) + stoch_scenario_names = stoch_distr.stoch_scenario_names_creator( + num_stoch_scens=cfg.num_stoch_scens, num_stage=cfg.num_stage + ) + all_admm_stoch_subproblem_scenario_names = ( + stoch_distr.admm_stoch_subproblem_scenario_names_creator( + admm_subproblem_names, stoch_scenario_names + ) + ) + split_admm_stoch_subproblem_scenario_name = ( + stoch_distr.split_admm_stoch_subproblem_scenario_name + ) + scenario_creator = stoch_distr.scenario_creator scenario_creator_kwargs = stoch_distr.kw_creator(cfg) - stoch_scenario_name = stoch_scenario_names[0] # choice of any scenario to create consensus_vars - consensus_vars = stoch_distr.consensus_vars_creator(admm_subproblem_names, stoch_scenario_name, scenario_creator_kwargs, num_stage=cfg.num_stage) - admm = stoch_admmWrapper.Stoch_AdmmWrapper(options, - all_admm_stoch_subproblem_scenario_names, - split_admm_stoch_subproblem_scenario_name, - admm_subproblem_names, - stoch_scenario_names, - scenario_creator, - consensus_vars, - n_cylinders=n_cylinders, - mpicomm=MPI.COMM_WORLD, - scenario_creator_kwargs=scenario_creator_kwargs, - verbose=verbose, - BFs=BFs, - ) + stoch_scenario_name = stoch_scenario_names[ + 0 + ] # choice of any scenario to create consensus_vars + consensus_vars = stoch_distr.consensus_vars_creator( + admm_subproblem_names, + stoch_scenario_name, + scenario_creator_kwargs, + num_stage=cfg.num_stage, + ) + admm = stoch_admmWrapper.Stoch_AdmmWrapper( + options, + all_admm_stoch_subproblem_scenario_names, + split_admm_stoch_subproblem_scenario_name, + admm_subproblem_names, + stoch_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + verbose=verbose, + BFs=BFs, + ) return admm, all_admm_stoch_subproblem_scenario_names - -def _wheel_creator(cfg, n_cylinders, scenario_creator, variable_probability, all_nodenames, all_admm_stoch_subproblem_scenario_names, scenario_creator_kwargs=None): #the wrapper doesn't need any kwarg + +def _wheel_creator( + cfg, + n_cylinders, + scenario_creator, + variable_probability, + all_nodenames, + all_admm_stoch_subproblem_scenario_names, + scenario_creator_kwargs=None, +): # the wrapper doesn't need any kwarg ph_converger = None - #Things needed for vanilla cylinders + # Things needed for vanilla cylinders scenario_denouement = stoch_distr.scenario_denouement - beans = (cfg, scenario_creator, scenario_denouement, all_admm_stoch_subproblem_scenario_names) + beans = ( + cfg, + scenario_creator, + scenario_denouement, + all_admm_stoch_subproblem_scenario_names, + ) if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = None, - variable_probability=variable_probability, - all_nodenames=all_nodenames) #needs to be modified, all_nodenames is None only for 2 stage problems + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=None, + variable_probability=variable_probability, + all_nodenames=all_nodenames, + ) # needs to be modified, all_nodenames is None only for 2 stage problems else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - ph_converger=ph_converger, - rho_setter=None, - variable_probability=variable_probability, - all_nodenames=all_nodenames) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + ph_converger=ph_converger, + rho_setter=None, + variable_probability=variable_probability, + all_nodenames=all_nodenames, + ) # FWPH spoke doesn't work with variable probability # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None, - all_nodenames=all_nodenames) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=None, + all_nodenames=all_nodenames, + ) # FWPH spoke : does not work here but may be called in global model. if cfg.fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # ph outer bounder spoke if cfg.ph_ob: - ph_ob_spoke = vanilla.ph_ob_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = None, - all_nodenames=all_nodenames, - variable_probability=variable_probability) + ph_ob_spoke = vanilla.ph_ob_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=None, + all_nodenames=all_nodenames, + variable_probability=variable_probability, + ) # xhat looper bound spoke if cfg.xhatxbar: - xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - all_nodenames=all_nodenames, - variable_probability=variable_probability) - + xhatxbar_spoke = vanilla.xhatxbar_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + all_nodenames=all_nodenames, + variable_probability=variable_probability, + ) list_of_spoke_dict = list() if cfg.fwph: @@ -153,40 +205,56 @@ def _wheel_creator(cfg, n_cylinders, scenario_creator, variable_probability, all if cfg.xhatxbar: list_of_spoke_dict.append(xhatxbar_spoke) - assert n_cylinders == 1 + len(list_of_spoke_dict), f"{n_cylinders=},{len(list_of_spoke_dict)=}" + assert n_cylinders == 1 + len( + list_of_spoke_dict + ), f"{n_cylinders=},{len(list_of_spoke_dict)=}" wheel = WheelSpinner(hub_dict, list_of_spoke_dict) return wheel - def main(cfg): assert cfg.fwph_args is not None, "fwph does not support variable probability" - if cfg.default_rho is None: # and rho_setter is None - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + if cfg.default_rho is None: # and rho_setter is None + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) n_cylinders = _count_cylinders(cfg) admm, all_admm_stoch_subproblem_scenario_names = _make_admm(cfg, n_cylinders) - - scenario_creator = admm.admmWrapper_scenario_creator # scenario_creator on a local scale - #note that the stoch_admmWrapper scenario_creator wrapper doesn't take any arguments + + scenario_creator = ( + admm.admmWrapper_scenario_creator + ) # scenario_creator on a local scale + # note that the stoch_admmWrapper scenario_creator wrapper doesn't take any arguments variable_probability = admm.var_prob_list all_nodenames = admm.all_nodenames - wheel = _wheel_creator(cfg, n_cylinders, scenario_creator, variable_probability, all_nodenames, all_admm_stoch_subproblem_scenario_names) + wheel = _wheel_creator( + cfg, + n_cylinders, + scenario_creator, + variable_probability, + all_nodenames, + all_admm_stoch_subproblem_scenario_names, + ) wheel.spin() if write_solution: - wheel.write_first_stage_solution('stoch_distr_soln.csv') - wheel.write_first_stage_solution('stoch_distr_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('stoch_distr_full_solution') + wheel.write_first_stage_solution("stoch_distr_soln.csv") + wheel.write_first_stage_solution( + "stoch_distr_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution("stoch_distr_full_solution") if global_rank == 0: - best_objective = wheel.spcomm.BestInnerBound #* len(all_admm_stoch_subproblem_scenario_names) + best_objective = ( + wheel.spcomm.BestInnerBound + ) # * len(all_admm_stoch_subproblem_scenario_names) print(f"{best_objective=}") if __name__ == "__main__": cfg = _parse_args() - main(cfg) \ No newline at end of file + main(cfg) diff --git a/mpisppy/tests/examples/stoch_distr/stoch_distr_ef.py b/mpisppy/tests/examples/stoch_distr/stoch_distr_ef.py index 19af1eafe..4b809243d 100644 --- a/mpisppy/tests/examples/stoch_distr/stoch_distr_ef.py +++ b/mpisppy/tests/examples/stoch_distr/stoch_distr_ef.py @@ -12,7 +12,7 @@ # This file can be executed thanks to python stoch_distr_ef.py --num-stoch-scens 2 --solver-name xpress --num-stoch-scens 2 --num-admm-subproblems 3 -# Solves the stochastic distribution problem +# Solves the stochastic distribution problem import stoch_distr import stoch_distr_admm_cylinders import pyomo.environ as pyo @@ -20,25 +20,26 @@ import mpisppy.utils.sputils as sputils from mpisppy.utils import config from mpisppy import MPI + global_rank = MPI.COMM_WORLD.Get_rank() write_solution = True + def _parse_args(): # create a config object and parse cfg = config.Config() stoch_distr.inparser_adder(cfg) - cfg.add_to_config("solver_name", - description="Choice of the solver", - domain=str, - default=None) + cfg.add_to_config( + "solver_name", description="Choice of the solver", domain=str, default=None + ) cfg.parse_command_line("stoch_distr_ef") return cfg -def solve_EF_directly(admm,solver_name): +def solve_EF_directly(admm, solver_name): local_scenario_names = admm.local_admm_stoch_subproblem_scenarios_names scenario_creator = admm.admmWrapper_scenario_creator @@ -48,30 +49,34 @@ def solve_EF_directly(admm,solver_name): nonant_for_fixed_vars=False, ) solver = pyo.SolverFactory(solver_name) - if 'persistent' in solver_name: + if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) solver.solve(tee=True) else: - solver.solve(ef, tee=True, symbolic_solver_labels=True,) + solver.solve( + ef, + tee=True, + symbolic_solver_labels=True, + ) return ef -def main(): +def main(): cfg = _parse_args() - - n_cylinders = 1 # There is no spoke so we only use one cylinder - admm, _ = stoch_distr_admm_cylinders._make_admm(cfg, n_cylinders) + + n_cylinders = 1 # There is no spoke so we only use one cylinder + admm, _ = stoch_distr_admm_cylinders._make_admm(cfg, n_cylinders) solved_ef = solve_EF_directly(admm, cfg.solver_name) - #with open("ef.txt", "w") as f: + # with open("ef.txt", "w") as f: # solved_ef.pprint(f) - #print ("******* model written to ef.txt *******") - #solution_file_name = "solution_distr.txt" - #sputils.write_ef_first_stage_solution(solved_ef, + # print ("******* model written to ef.txt *******") + # solution_file_name = "solution_distr.txt" + # sputils.write_ef_first_stage_solution(solved_ef, # solution_file_name,) - #print(f"EF solution written to {solution_file_name}") + # print(f"EF solution written to {solution_file_name}") print(f"EF objective: {pyo.value(solved_ef.EF_Obj)}") if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/mpisppy/tests/examples/uc/uc_funcs.py b/mpisppy/tests/examples/uc/uc_funcs.py index 3d419d11a..888ef64fc 100644 --- a/mpisppy/tests/examples/uc/uc_funcs.py +++ b/mpisppy/tests/examples/uc/uc_funcs.py @@ -22,30 +22,33 @@ import egret.data.model_data as md import egret.models.unit_commitment as uc -def pysp_instance_creation_callback(scenario_name): - #print("Building instance for scenario =", scenario_name) +def pysp_instance_creation_callback(scenario_name): + # print("Building instance for scenario =", scenario_name) scennum = sputils.extract_num(scenario_name) uc_model_params = pdp.get_uc_model() - # hardcode - path = os.path.dirname(os.path.abspath(__file__))+os.sep+"3scenarios_r1" - + # hardcode + path = os.path.dirname(os.path.abspath(__file__)) + os.sep + "3scenarios_r1" + scenario_data = DataPortal(model=uc_model_params) - scenario_data.load(filename=path+os.sep+"RootNode.dat") - scenario_data.load(filename=path+os.sep+"Node"+str(scennum)+".dat") + scenario_data.load(filename=path + os.sep + "RootNode.dat") + scenario_data.load(filename=path + os.sep + "Node" + str(scennum) + ".dat") - scenario_params = uc_model_params.create_instance(scenario_data, - report_timing=False, - name=scenario_name) + scenario_params = uc_model_params.create_instance( + scenario_data, report_timing=False, name=scenario_name + ) - scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + scenario_md = md.ModelData( + pdp.create_model_data_dict_params(scenario_params, keep_names=True) + ) ## TODO: use the "power_balance_constraints" for now. In the future, networks should be ## handled with a custom callback -- also consider other base models - scenario_instance = uc.create_tight_unit_commitment_model(scenario_md, - network_constraints='power_balance_constraints') + scenario_instance = uc.create_tight_unit_commitment_model( + scenario_md, network_constraints="power_balance_constraints" + ) # hold over string attribute from Egret, # causes warning wth LShaped/Benders @@ -53,15 +56,16 @@ def pysp_instance_creation_callback(scenario_name): return scenario_instance -def scenario_creator(scenario_name): +def scenario_creator(scenario_name): return pysp2_callback(scenario_name) + def pysp2_callback(scenario_name): - ''' The callback needs to create an instance and then attach - the PySP nodes to it in a list _mpisppy_node_list ordered by stages. - Optionally attach _PHrho. Standard (1.0) PySP signature for now... - ''' + """The callback needs to create an instance and then attach + the PySP nodes to it in a list _mpisppy_node_list ordered by stages. + Optionally attach _PHrho. Standard (1.0) PySP signature for now... + """ instance = pysp_instance_creation_callback(scenario_name) @@ -77,100 +81,117 @@ def pysp2_callback(scenario_name): [instance.UnitStart, instance.UnitStop, instance.StartupIndicator], )] """ - sputils.attach_root_node(instance, - instance.StageCost["Stage_1"], - [instance.UnitOn], - nonant_ef_suppl_list = [instance.UnitStart, - instance.UnitStop, - instance.StartupIndicator]) + sputils.attach_root_node( + instance, + instance.StageCost["Stage_1"], + [instance.UnitOn], + nonant_ef_suppl_list=[ + instance.UnitStart, + instance.UnitStop, + instance.StartupIndicator, + ], + ) return instance + def scenario_denouement(rank, scenario_name, scenario): -# print("First stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["FirstStage"])) -# print("Second stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["SecondStage"])) + # print("First stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["FirstStage"])) + # print("Second stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["SecondStage"])) pass -def scenario_rhosa(scenario_instance): +def scenario_rhosa(scenario_instance): return scenario_rhos(scenario_instance) -def _rho_setter(scenario_instance): +def _rho_setter(scenario_instance): return scenario_rhos(scenario_instance) + def scenario_rhos(scenario_instance, rho_scale_factor=0.1): computed_rhos = [] for t in scenario_instance.TimePeriods: for g in scenario_instance.ThermalGenerators: - min_power = pyo.value(scenario_instance.MinimumPowerOutput[g,t]) - max_power = pyo.value(scenario_instance.MaximumPowerOutput[g,t]) + min_power = pyo.value(scenario_instance.MinimumPowerOutput[g, t]) + max_power = pyo.value(scenario_instance.MaximumPowerOutput[g, t]) avg_power = min_power + ((max_power - min_power) / 2.0) - min_cost = pyo.value(scenario_instance.MinimumProductionCost[g,t]) + min_cost = pyo.value(scenario_instance.MinimumProductionCost[g, t]) - avg_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, avg_power) + min_cost - #max_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, max_power) + min_cost + avg_cost = ( + scenario_instance.ComputeProductionCosts( + scenario_instance, g, t, avg_power + ) + + min_cost + ) + # max_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, max_power) + min_cost computed_rho = rho_scale_factor * avg_cost - computed_rhos.append((id(scenario_instance.UnitOn[g,t]), computed_rho)) - + computed_rhos.append((id(scenario_instance.UnitOn[g, t]), computed_rho)) + return computed_rhos -def scenario_rhos_trial_from_file(scenario_instance, rho_scale_factor=0.01, - fname=None): - ''' First computes the standard rho values (as computed by scenario_rhos() - above). Then reads rho values from the specified file (raises error if - no file specified) which is a csv formatted (var_name,rho_value). If - the rho_value specified in the file is strictly positive, it replaces - the value computed by scenario_rhos(). - - DTM: I wrote this function to test some specific things--I don't think - this will have a general purpose use, and can probably be deleted. - ''' - if (fname is None): - raise RuntimeError('Please provide an "fname" kwarg to ' - 'the "rho_setter_kwargs" option in options') - computed_rhos = scenario_rhos(scenario_instance, - rho_scale_factor=rho_scale_factor) + +def scenario_rhos_trial_from_file(scenario_instance, rho_scale_factor=0.01, fname=None): + """First computes the standard rho values (as computed by scenario_rhos() + above). Then reads rho values from the specified file (raises error if + no file specified) which is a csv formatted (var_name,rho_value). If + the rho_value specified in the file is strictly positive, it replaces + the value computed by scenario_rhos(). + + DTM: I wrote this function to test some specific things--I don't think + this will have a general purpose use, and can probably be deleted. + """ + if fname is None: + raise RuntimeError( + 'Please provide an "fname" kwarg to ' + 'the "rho_setter_kwargs" option in options' + ) + computed_rhos = scenario_rhos(scenario_instance, rho_scale_factor=rho_scale_factor) try: trial_rhos = _get_saved_rhos(fname) except Exception: - raise RuntimeError('Formatting issue in specified rho file ' + fname + - '. Format should be (variable_name,rho_value) for ' - 'each row, with no blank lines, and no ' - 'extra/commented lines') - + raise RuntimeError( + "Formatting issue in specified rho file " + + fname + + ". Format should be (variable_name,rho_value) for " + "each row, with no blank lines, and no " + "extra/commented lines" + ) + index = 0 for b in sorted(scenario_instance.Buses): for t in sorted(scenario_instance.TimePeriods): for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): - var = scenario_instance.UnitOn[g,t] + var = scenario_instance.UnitOn[g, t] try: trial_rho = trial_rhos[var.name] except KeyError: - raise RuntimeError(var.name + ' is missing from ' - 'the specified rho file ' + fname) - if (trial_rho >= 1e-14): - print('Using a trial rho') + raise RuntimeError( + var.name + " is missing from " "the specified rho file " + fname + ) + if trial_rho >= 1e-14: + print("Using a trial rho") computed_rhos[index] = (id(var), trial_rho) index += 1 - + return computed_rhos + def _get_saved_rhos(fname): - ''' Return a dict of trial rho values, indexed by variable name. - ''' + """Return a dict of trial rho values, indexed by variable name.""" rhos = dict() - with open(fname, 'r') as f: + with open(fname, "r") as f: for line in f: - line = line.split(',') - vname = ','.join(line[:-1]) + line = line.split(",") + vname = ",".join(line[:-1]) rho = float(line[-1]) rhos[vname] = rho return rhos + def id_fix_list_fct(scenario_instance): - """ specify tuples used by the fixer. + """specify tuples used by the fixer. Args: s (ConcreteModel): the sizes instance. @@ -190,15 +211,21 @@ def id_fix_list_fct(scenario_instance): for b in sorted(scenario_instance.Buses): for t in sorted(scenario_instance.TimePeriods): for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): - - iter0tuples.append(fixer.Fixer_tuple(scenario_instance.UnitOn[g,t], - th=0.01, nb=None, lb=0, ub=None)) - - iterktuples.append(fixer.Fixer_tuple(scenario_instance.UnitOn[g,t], - th=0.01, nb=None, lb=6, ub=6)) + iter0tuples.append( + fixer.Fixer_tuple( + scenario_instance.UnitOn[g, t], th=0.01, nb=None, lb=0, ub=None + ) + ) + + iterktuples.append( + fixer.Fixer_tuple( + scenario_instance.UnitOn[g, t], th=0.01, nb=None, lb=6, ub=6 + ) + ) return iter0tuples, iterktuples + def write_solution(spcomm, opt_dict, solution_dir): from mpisppy.cylinders.xhatshufflelooper_bounder import XhatShuffleInnerBound from mpisppy.extensions.xhatclosest import XhatClosest @@ -222,12 +249,17 @@ def write_solution(spcomm, opt_dict, solution_dir): # do some checks, to make sure the solution we print will be nonantipative if best_strata_rank != 0: - assert opt_dict["spoke_class"] in (XhatShuffleInnerBound, ) - else: # this is the hub, TODO: also could check for XhatSpecific - assert opt_dict["opt_class"] in (PH, ) + assert opt_dict["spoke_class"] in (XhatShuffleInnerBound,) + else: # this is the hub, TODO: also could check for XhatSpecific + assert opt_dict["opt_class"] in (PH,) assert XhatClosest in opt_dict["opt_kwargs"]["extension_kwargs"]["ext_classes"] - assert "keep_solution" in opt_dict["opt_kwargs"]["options"]["xhat_closest_options"] - assert opt_dict["opt_kwargs"]["options"]["xhat_closest_options"]["keep_solution"] is True + assert ( + "keep_solution" in opt_dict["opt_kwargs"]["options"]["xhat_closest_options"] + ) + assert ( + opt_dict["opt_kwargs"]["options"]["xhat_closest_options"]["keep_solution"] + is True + ) ## if we've passed the above checks, the scenarios should have the tree solution @@ -240,7 +272,7 @@ def write_solution(spcomm, opt_dict, solution_dir): spcomm.cylinder_comm.Barrier() for sname, s in spcomm.opt.local_scenarios.items(): - file_name = os.path.join(solution_dir, sname+'.json') + file_name = os.path.join(solution_dir, sname + ".json") mds = uc._save_uc_results(s, relaxed=False) mds.write(file_name) diff --git a/mpisppy/tests/examples/w_test_data/farmer_get.py b/mpisppy/tests/examples/w_test_data/farmer_get.py index 4099e7eda..521dfc248 100644 --- a/mpisppy/tests/examples/w_test_data/farmer_get.py +++ b/mpisppy/tests/examples/w_test_data/farmer_get.py @@ -23,69 +23,83 @@ write_solution = True + def _parse_args(): # create a config object and parse cfg = config.Config() - + cfg.num_scens_required() cfg.popular_args() cfg.two_sided_args() - cfg.ph_args() - cfg.aph_args() + cfg.ph_args() + cfg.aph_args() cfg.xhatlooper_args() cfg.fwph_args() cfg.lagrangian_args() cfg.lagranger_args() cfg.xhatshuffle_args() - cfg.add_to_config("crops_mult", - description="There will be 3x this many crops (default 1)", - domain=int, - default=1) - cfg.add_to_config("use_norm_rho_updater", - description="Use the norm rho updater extension", - domain=bool, - default=False) - cfg.add_to_config("use-norm-rho-converger", - description="Use the norm rho converger", - domain=bool, - default=False) - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) - cfg.add_to_config("use_norm_rho_converger", - description="Use the norm rho converger", - domain=bool, - default=False) + cfg.add_to_config( + "crops_mult", + description="There will be 3x this many crops (default 1)", + domain=int, + default=1, + ) + cfg.add_to_config( + "use_norm_rho_updater", + description="Use the norm rho updater extension", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use-norm-rho-converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use_norm_rho_converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) cfg.parse_command_line("farmer_cylinders") return cfg - + def main(): - cfg = _parse_args() num_scen = cfg.num_scens crops_multiplier = cfg.crops_mult - rho_setter = farmer._rho_setter if hasattr(farmer, '_rho_setter') else None + rho_setter = farmer._rho_setter if hasattr(farmer, "_rho_setter") else None if cfg.default_rho is None and rho_setter is None: - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) if cfg.use_norm_rho_converger: if not cfg.use_norm_rho_updater: - raise RuntimeError("--use-norm-rho-converger requires --use-norm-rho-updater") + raise RuntimeError( + "--use-norm-rho-converger requires --use-norm-rho-updater" + ) else: ph_converger = NormRhoConverger else: ph_converger = None - + scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement - all_scenario_names = ['scen{}'.format(sn) for sn in range(num_scen)] + all_scenario_names = ["scen{}".format(sn) for sn in range(num_scen)] scenario_creator_kwargs = { - 'use_integer': False, + "use_integer": False, "crops_multiplier": crops_multiplier, } @@ -94,54 +108,67 @@ def main(): if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = rho_setter) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=rho_setter, + ) else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=WXBarWriter, - ph_converger=ph_converger, - rho_setter = rho_setter) - - hub_dict['opt_kwargs']['options']["W_and_xbar_writer"] = {"Wcsvdir": "Wdir"} - hub_dict['opt_kwargs']['options']['W_fname'] = 'w_file.csv' - hub_dict['opt_kwargs']['options']['Xbar_fname'] = 'xbar_file.csv' - + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=WXBarWriter, + ph_converger=ph_converger, + rho_setter=rho_setter, + ) + + hub_dict["opt_kwargs"]["options"]["W_and_xbar_writer"] = {"Wcsvdir": "Wdir"} + hub_dict["opt_kwargs"]["options"]["W_fname"] = "w_file.csv" + hub_dict["opt_kwargs"]["options"]["Xbar_fname"] = "xbar_file.csv" ## hack in adaptive rho if cfg.use_norm_rho_updater: - hub_dict['opt_kwargs']['extensions'] = NormRhoUpdater - hub_dict['opt_kwargs']['options']['norm_rho_options'] = {'verbose': True} + hub_dict["opt_kwargs"]["extensions"] = NormRhoUpdater + hub_dict["opt_kwargs"]["options"]["norm_rho_options"] = {"verbose": True} # FWPH spoke if cfg.fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # Special Lagranger bound spoke if cfg.lagranger: - lagranger_spoke = vanilla.lagranger_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) - lagranger_spoke['opt_kwargs']['options']['lagranger_write_W'] = True - lagranger_spoke['opt_kwargs']['options']['lagranger_write_xbar'] = True + lagranger_spoke = vanilla.lagranger_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) + lagranger_spoke["opt_kwargs"]["options"]["lagranger_write_W"] = True + lagranger_spoke["opt_kwargs"]["options"]["lagranger_write_xbar"] = True # xhat looper bound spoke if cfg.xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if cfg.xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + list_of_spoke_dict = list() if cfg.fwph: list_of_spoke_dict.append(fw_spoke) @@ -158,10 +185,13 @@ def main(): wheel.spin() if write_solution: - wheel.write_first_stage_solution('farmer_plant.csv') - wheel.write_first_stage_solution('farmer_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('farmer_full_solution') + wheel.write_first_stage_solution("farmer_plant.csv") + wheel.write_first_stage_solution( + "farmer_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution("farmer_full_solution") + if __name__ == "__main__": main() diff --git a/mpisppy/tests/examples/w_test_data/farmer_set.py b/mpisppy/tests/examples/w_test_data/farmer_set.py index f28373dac..60b62e789 100644 --- a/mpisppy/tests/examples/w_test_data/farmer_set.py +++ b/mpisppy/tests/examples/w_test_data/farmer_set.py @@ -25,129 +25,157 @@ write_solution = True + def _parse_args(): # create a config object and parse cfg = config.Config() - + cfg.num_scens_required() cfg.popular_args() cfg.two_sided_args() - cfg.ph_args() - cfg.aph_args() + cfg.ph_args() + cfg.aph_args() cfg.xhatlooper_args() cfg.fwph_args() cfg.lagrangian_args() cfg.lagranger_args() cfg.xhatshuffle_args() - cfg.add_to_config("crops_mult", - description="There will be 3x this many crops (default 1)", - domain=int, - default=1) - cfg.add_to_config("use_norm_rho_updater", - description="Use the norm rho updater extension", - domain=bool, - default=False) - cfg.add_to_config("use-norm-rho-converger", - description="Use the norm rho converger", - domain=bool, - default=False) - cfg.add_to_config("run_async", - description="Run with async projective hedging instead of progressive hedging", - domain=bool, - default=False) - cfg.add_to_config("use_norm_rho_converger", - description="Use the norm rho converger", - domain=bool, - default=False) + cfg.add_to_config( + "crops_mult", + description="There will be 3x this many crops (default 1)", + domain=int, + default=1, + ) + cfg.add_to_config( + "use_norm_rho_updater", + description="Use the norm rho updater extension", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use-norm-rho-converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) + cfg.add_to_config( + "run_async", + description="Run with async projective hedging instead of progressive hedging", + domain=bool, + default=False, + ) + cfg.add_to_config( + "use_norm_rho_converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) cfg.parse_command_line("farmer_cylinders") return cfg - + def main(): - cfg = _parse_args() num_scen = cfg.num_scens crops_multiplier = cfg.crops_mult - rho_setter = farmer._rho_setter if hasattr(farmer, '_rho_setter') else None + rho_setter = farmer._rho_setter if hasattr(farmer, "_rho_setter") else None if cfg.default_rho is None and rho_setter is None: - raise RuntimeError("No rho_setter so a default must be specified via --default-rho") + raise RuntimeError( + "No rho_setter so a default must be specified via --default-rho" + ) if cfg.use_norm_rho_converger: if not cfg.use_norm_rho_updater: - raise RuntimeError("--use-norm-rho-converger requires --use-norm-rho-updater") + raise RuntimeError( + "--use-norm-rho-converger requires --use-norm-rho-updater" + ) else: ph_converger = NormRhoConverger else: ph_converger = None - + scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement - all_scenario_names = ['scen{}'.format(sn) for sn in range(num_scen)] + all_scenario_names = ["scen{}".format(sn) for sn in range(num_scen)] scenario_creator_kwargs = { - 'use_integer': False, + "use_integer": False, "crops_multiplier": crops_multiplier, } - multi_ext = {"ext_classes" : [WXBarWriter, WXBarReader]} - + multi_ext = {"ext_classes": [WXBarWriter, WXBarReader]} + # Things needed for vanilla cylinders beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) if cfg.run_async: # Vanilla APH hub - hub_dict = vanilla.aph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=None, - rho_setter = rho_setter) + hub_dict = vanilla.aph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=None, + rho_setter=rho_setter, + ) else: # Vanilla PH hub - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=MultiExtension, - extension_kwargs=multi_ext, - ph_converger=ph_converger, - rho_setter = rho_setter) - - hub_dict['opt_kwargs']['options']["W_and_xbar_reader"] = {"Wcsvdir": "Wdir"} - hub_dict['opt_kwargs']['options']['init_W_fname'] = 'w_file.csv' - hub_dict['opt_kwargs']['options']['init_Xbar_fname'] = 'xbar_file.csv' - - hub_dict['opt_kwargs']['options']["W_and_xbar_writer"] = {"Wcsvdir": "Wdir"} - hub_dict['opt_kwargs']['options']['W_fname'] = 'warm_start_w.csv' - hub_dict['opt_kwargs']['options']['Xbar_fname'] = 'warm_start_xbar.csv' + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=MultiExtension, + extension_kwargs=multi_ext, + ph_converger=ph_converger, + rho_setter=rho_setter, + ) + + hub_dict["opt_kwargs"]["options"]["W_and_xbar_reader"] = {"Wcsvdir": "Wdir"} + hub_dict["opt_kwargs"]["options"]["init_W_fname"] = "w_file.csv" + hub_dict["opt_kwargs"]["options"]["init_Xbar_fname"] = "xbar_file.csv" + + hub_dict["opt_kwargs"]["options"]["W_and_xbar_writer"] = {"Wcsvdir": "Wdir"} + hub_dict["opt_kwargs"]["options"]["W_fname"] = "warm_start_w.csv" + hub_dict["opt_kwargs"]["options"]["Xbar_fname"] = "warm_start_xbar.csv" ## hack in adaptive rho if cfg.use_norm_rho_updater: - hub_dict['opt_kwargs']['extensions'] = NormRhoUpdater - hub_dict['opt_kwargs']['options']['norm_rho_options'] = {'verbose': True} + hub_dict["opt_kwargs"]["extensions"] = NormRhoUpdater + hub_dict["opt_kwargs"]["options"]["norm_rho_options"] = {"verbose": True} # FWPH spoke if cfg.fwph: - fw_spoke = vanilla.fwph_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + fw_spoke = vanilla.fwph_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # Standard Lagrangian bound spoke if cfg.lagrangian: - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # Special Lagranger bound spoke if cfg.lagranger: - lagranger_spoke = vanilla.lagranger_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - rho_setter = rho_setter) + lagranger_spoke = vanilla.lagranger_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + rho_setter=rho_setter, + ) # xhat looper bound spoke if cfg.xhatlooper: - xhatlooper_spoke = vanilla.xhatlooper_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + xhatlooper_spoke = vanilla.xhatlooper_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) # xhat shuffle bound spoke if cfg.xhatshuffle: - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, scenario_creator_kwargs=scenario_creator_kwargs) - + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) + list_of_spoke_dict = list() if cfg.fwph: list_of_spoke_dict.append(fw_spoke) @@ -164,10 +192,13 @@ def main(): wheel.spin() if write_solution: - wheel.write_first_stage_solution('farmer_plant.csv') - wheel.write_first_stage_solution('farmer_cyl_nonants.npy', - first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer) - wheel.write_tree_solution('farmer_full_solution') + wheel.write_first_stage_solution("farmer_plant.csv") + wheel.write_first_stage_solution( + "farmer_cyl_nonants.npy", + first_stage_solution_writer=sputils.first_stage_nonant_npy_serializer, + ) + wheel.write_tree_solution("farmer_full_solution") + if __name__ == "__main__": main() diff --git a/mpisppy/tests/helper_extension.py b/mpisppy/tests/helper_extension.py index 958840ad8..628ad8952 100644 --- a/mpisppy/tests/helper_extension.py +++ b/mpisppy/tests/helper_extension.py @@ -11,55 +11,46 @@ from collections import defaultdict import mpisppy.extensions.xhatbase + class TestHelperExtension(mpisppy.extensions.extension.Extension): """ Args: spo (SPOpt object): the calling object """ + def __init__(self, spo): super().__init__(spo) - self.checked = defaultdict(list) # for testing xhatter + self.checked = defaultdict(list) # for testing xhatter # make it a little easier to find out who has been sent here self.opt._TestHelperExtension_checked = self.checked - def pre_solve(self, subproblem): pass - def post_solve_loop(self): pass - def post_solve(self, subproblem, results): self.checked[subproblem.name].append(results.Problem[0].Upper_bound) return results - def pre_iter0(self): pass - def post_iter0(self): pass - def post_iter0_after_sync(self): pass - - + def miditer(self): pass - def enditer(self): pass - - def enditer_after_sync(self): + def enditer_after_sync(self): pass - - + def post_everything(self): pass - diff --git a/mpisppy/tests/straight_tests.py b/mpisppy/tests/straight_tests.py index 5f525b6ef..6a5a9773f 100644 --- a/mpisppy/tests/straight_tests.py +++ b/mpisppy/tests/straight_tests.py @@ -11,20 +11,27 @@ import os from mpisppy.tests.utils import get_solver -solver_available, solver_name, persistent_available, persistent_solver_name= get_solver() + +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) badguys = list() + def _doone(cmdstr): print("testing:", cmdstr) ret = os.system(cmdstr) if ret != 0: badguys.append(f"Test failed with code {ret}:\n{cmdstr}") + ##################################################### # farmer -example_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'examples', 'farmer') -fpath = os.path.join(example_dir, 'farmer_cylinders.py') +example_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "examples", "farmer" +) +fpath = os.path.join(example_dir, "farmer_cylinders.py") cmdstr = f"python {fpath} --help" _doone(cmdstr) @@ -34,12 +41,14 @@ def _doone(cmdstr): # aircond -example_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'examples', 'aircond') -fpath = os.path.join(example_dir, 'aircond_cylinders.py') -jpath = os.path.join(example_dir, 'lagranger_factors.json') +example_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "examples", "aircond" +) +fpath = os.path.join(example_dir, "aircond_cylinders.py") +jpath = os.path.join(example_dir, "lagranger_factors.json") # PH and lagranger rescale factors w/ FWPH -cmdstr = f"mpiexec -np 4 python -m mpi4py {fpath} --bundles-per-rank=0 --max-iterations=5 --default-rho=1 --solver-name={solver_name} --branching-factors \"4 3 2\" --Capacity 200 --QuadShortCoeff 0.3 --BeginInventory 50 --rel-gap 0.01 --mu-dev 0 --sigma-dev 40 --max-solver-threads 2 --start-seed 0 --lagranger --lagranger-rho-rescale-factors-json {jpath} --fwph --xhatshuffle" +cmdstr = f'mpiexec -np 4 python -m mpi4py {fpath} --bundles-per-rank=0 --max-iterations=5 --default-rho=1 --solver-name={solver_name} --branching-factors "4 3 2" --Capacity 200 --QuadShortCoeff 0.3 --BeginInventory 50 --rel-gap 0.01 --mu-dev 0 --sigma-dev 40 --max-solver-threads 2 --start-seed 0 --lagranger --lagranger-rho-rescale-factors-json {jpath} --fwph --xhatshuffle' _doone(cmdstr) diff --git a/mpisppy/tests/test_admmWrapper.py b/mpisppy/tests/test_admmWrapper.py index 1371f2653..04936b7f9 100644 --- a/mpisppy/tests/test_admmWrapper.py +++ b/mpisppy/tests/test_admmWrapper.py @@ -15,7 +15,10 @@ import subprocess import os -solver_available, solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) + class TestAdmmWrapper(unittest.TestCase): def setUp(self): @@ -23,39 +26,39 @@ def setUp(self): def tearDown(self): pass - + def _cfg_creator(self, num_scens): cfg = config.Config() cfg.num_scens_required() cfg.num_scens = num_scens - return cfg - + return cfg - def _make_admm(self, num_scens,verbose=False): + def _make_admm(self, num_scens, verbose=False): cfg = self._cfg_creator(num_scens) options = {} all_scenario_names = distr.scenario_names_creator(num_scens=cfg.num_scens) scenario_creator = distr.scenario_creator scenario_creator_kwargs = distr.kw_creator(cfg) consensus_vars = distr.consensus_vars_creator(cfg.num_scens) - n_cylinders = 1 #distr_admm_cylinders._count_cylinders(cfg) - return admmWrapper.AdmmWrapper(options, - all_scenario_names, - scenario_creator, - consensus_vars, - n_cylinders=n_cylinders, - mpicomm=MPI.COMM_WORLD, - scenario_creator_kwargs=scenario_creator_kwargs, - verbose=verbose, - ) - + n_cylinders = 1 # distr_admm_cylinders._count_cylinders(cfg) + return admmWrapper.AdmmWrapper( + options, + all_scenario_names, + scenario_creator, + consensus_vars, + n_cylinders=n_cylinders, + mpicomm=MPI.COMM_WORLD, + scenario_creator_kwargs=scenario_creator_kwargs, + verbose=verbose, + ) + def test_constructor(self): - self._make_admm(2,verbose=True) - for i in range(3,5): + self._make_admm(2, verbose=True) + for i in range(3, 5): self._make_admm(i) - def test_variable_probability(self): + def test_variable_probability(self): admm = self._make_admm(3) q = dict() for sname, s in admm.local_scenarios.items(): @@ -69,7 +72,7 @@ def test_admmWrapper_scenario_creator(self): q = admm.admmWrapper_scenario_creator(sname) self.assertTrue(q.y__DC1DC2__.is_fixed()) self.assertFalse(q.y["DC3_1DC1"].is_fixed()) - + def _slack_name(self, dummy_node): return f"y[{dummy_node}]" @@ -77,16 +80,16 @@ def test_assign_variable_probs_error1(self): admm = self._make_admm(3) admm.consensus_vars["Region1"].append(self._slack_name("DC2DC3")) self.assertRaises(RuntimeError, admm.assign_variable_probs) - + def test_assign_variable_probs_error2(self): admm = self._make_admm(3) admm.consensus_vars["Region1"].remove(self._slack_name("DC3_1DC1")) self.assertRaises(RuntimeError, admm.assign_variable_probs) - def _extracting_output(self, line): import re - pattern = r'\[\s*\d+\.\d+\]\s+\d+\s+(?:L\s*B?|B\s*L?)?\s+([-.\d]+)\s+([-.\d]+)' + + pattern = r"\[\s*\d+\.\d+\]\s+\d+\s+(?:L\s*B?|B\s*L?)?\s+([-.\d]+)\s+([-.\d]+)" match = re.search(pattern, line) @@ -95,20 +98,28 @@ def _extracting_output(self, line): inner_bound = match.group(2) return float(outer_bound), float(inner_bound) else: - raise RuntimeError("The test is probably not correctly adapted: can't match the format of the line") + raise RuntimeError( + "The test is probably not correctly adapted: can't match the format of the line" + ) def test_values(self): - command_line_pairs = [(f"mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --rel-gap 0.01 --ensure-xhat-feas" \ - , f"python distr_ef.py --solver-name {solver_name} --num-scens 3 --ensure-xhat-feas"), \ - (f"mpiexec -np 6 python -u -m mpi4py distr_admm_cylinders.py --num-scens 5 --default-rho 10 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --mnpr 6 --rel-gap 0.05 --scalable --ensure-xhat-feas" \ - , f"python distr_ef.py --solver-name {solver_name} --num-scens 5 --ensure-xhat-feas --mnpr 6 --scalable")] + command_line_pairs = [ + ( + f"mpiexec -np 3 python -u -m mpi4py distr_admm_cylinders.py --num-scens 3 --default-rho 10 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --rel-gap 0.01 --ensure-xhat-feas", + f"python distr_ef.py --solver-name {solver_name} --num-scens 3 --ensure-xhat-feas", + ), + ( + f"mpiexec -np 6 python -u -m mpi4py distr_admm_cylinders.py --num-scens 5 --default-rho 10 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --mnpr 6 --rel-gap 0.05 --scalable --ensure-xhat-feas", + f"python distr_ef.py --solver-name {solver_name} --num-scens 5 --ensure-xhat-feas --mnpr 6 --scalable", + ), + ] original_dir = os.getcwd() for command_line_pair in command_line_pairs: - target_directory = '../../examples/distr' + target_directory = "../../examples/distr" os.chdir(target_directory) objectives = {} command = command_line_pair[0].split() - + result = subprocess.run(command, capture_output=True, text=True) if result.stderr: print("Error output:") @@ -116,37 +127,43 @@ def test_values(self): # Check the standard output if result.stdout: - result_by_line = result.stdout.strip().split('\n') - + result_by_line = result.stdout.strip().split("\n") + target_line = "Iter. Best Bound Best Incumbent Rel. Gap Abs. Gap" precedent_line_target = False i = 0 for line in result_by_line: if precedent_line_target: - if i%2 == 1: + if i % 2 == 1: outer_bound, inner_bound = self._extracting_output(line) objectives["outer bound"] = outer_bound objectives["inner bound"] = inner_bound - precedent_line_target = False + precedent_line_target = False i += 1 elif target_line in line: precedent_line_target = True - + # For the EF command = command_line_pair[1].split() result = subprocess.run(command, capture_output=True, text=True) - result_by_line = result.stdout.strip().split('\n') + result_by_line = result.stdout.strip().split("\n") for i in range(len(result_by_line)): - if "EF objective" in result_by_line[-i-1]: #should be on last line but we can check - decomposed_line = result_by_line[-i-1].split(': ') + if ( + "EF objective" in result_by_line[-i - 1] + ): # should be on last line but we can check + decomposed_line = result_by_line[-i - 1].split(": ") objectives["EF objective"] = float(decomposed_line[1]) try: - correct_order = objectives["outer bound"] <= objectives["EF objective"] <= objectives["inner bound"] + correct_order = ( + objectives["outer bound"] + <= objectives["EF objective"] + <= objectives["inner bound"] + ) except Exception: raise RuntimeError("The output could not be read to capture the values") assert correct_order, f' We obtained {objectives["outer bound"]=}, {objectives["EF objective"]=}, {objectives["inner bound"]=}' os.chdir(original_dir) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/test_aph.py b/mpisppy/tests/test_aph.py index 8d43711d7..72b242272 100644 --- a/mpisppy/tests/test_aph.py +++ b/mpisppy/tests/test_aph.py @@ -19,21 +19,23 @@ from math import log10, floor import mpisppy.opt.aph import mpisppy.phbase -from mpisppy.tests.examples.sizes.sizes import scenario_creator, \ - scenario_denouement +from mpisppy.tests.examples.sizes.sizes import scenario_creator, scenario_denouement import mpisppy.tests.examples.farmer as farmer from mpisppy.tests.utils import get_solver import mpisppy.MPI as mpi __version__ = 0.6 -solver_available, solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) fullcomm = mpi.COMM_WORLD global_rank = fullcomm.Get_rank() -#***************************************************************************** + +# ***************************************************************************** class Test_aph_sizes(unittest.TestCase): - """ Test the aph mpisppy code using sizes.""" + """Test the aph mpisppy code using sizes.""" def setUp(self): self.Baseoptions = {} @@ -53,20 +55,20 @@ def setUp(self): self.all3_scenario_names = list() for sn in range(3): - self.all3_scenario_names.append("Scenario"+str(sn+1)) + self.all3_scenario_names.append("Scenario" + str(sn + 1)) self.all10_scenario_names = list() for sn in range(10): - self.all10_scenario_names.append("Scenario"+str(sn+1)) + self.all10_scenario_names.append("Scenario" + str(sn + 1)) def _copy_of_base_options(self): retval = {} - for k,v in self.Baseoptions.items(): + for k, v in self.Baseoptions.items(): retval[k] = v return retval - + def test_make_aph(self): - """ Just smoke to verify construction""" + """Just smoke to verify construction""" options = self._copy_of_base_options() options["PHIterLimit"] = 2 options["async_frac_needed"] = 0.5 @@ -80,8 +82,9 @@ def test_make_aph(self): ) assert aph is not None - @unittest.skipIf(not solver_available, - "%s solver is not available" % (solver_name,)) + @unittest.skipIf( + not solver_available, "%s solver is not available" % (solver_name,) + ) def test_aph_basic(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -96,8 +99,8 @@ def test_aph_basic(self): ) conv, obj, tbound = aph.APH_main(spcomm=None) - print ("objthing={}, (was=-2435908)".format(obj)) - print ("tbound ={} (was=224106)".format(tbound)) + print("objthing={}, (was=-2435908)".format(obj)) + print("tbound ={} (was=224106)".format(tbound)) def test_bundles(self): options = self._copy_of_base_options() @@ -113,12 +116,12 @@ def test_bundles(self): scenario_creator_kwargs={"scenario_count": 10}, ) conv, obj, tbound = aph.APH_main(spcomm=None) - print ("bundle objthing={}, (was=224712.9)".format(obj)) - print ("bundle tbound ={} (was=223168.5)".format(tbound)) + print("bundle objthing={}, (was=224712.9)".format(obj)) + print("bundle tbound ={} (was=223168.5)".format(tbound)) - - @unittest.skipIf(not solver_available, - "%s solver is not available" % (solver_name,)) + @unittest.skipIf( + not solver_available, "%s solver is not available" % (solver_name,) + ) def test_APHgamma(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -135,12 +138,12 @@ def test_APHgamma(self): ) conv, obj, tbound = aph.APH_main(spcomm=None) - print (f"APHgamma={a}; objthing={obj}") - print ("tbound ={}".format(tbound)) - + print(f"APHgamma={a}; objthing={obj}") + print("tbound ={}".format(tbound)) - @unittest.skipIf(not solver_available, - "%s solver is not available" % (solver_name,)) + @unittest.skipIf( + not solver_available, "%s solver is not available" % (solver_name,) + ) def test_use_lag(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -156,12 +159,12 @@ def test_use_lag(self): ) conv, obj, tbound = aph.APH_main(spcomm=None) - print (f"use lag objthing={obj}") - print ("tbound ={}".format(tbound)) + print(f"use lag objthing={obj}") + print("tbound ={}".format(tbound)) - - @unittest.skipIf(not solver_available, - "%s solver is not available" % (solver_name,)) + @unittest.skipIf( + not solver_available, "%s solver is not available" % (solver_name,) + ) def test_running_dump(self): # just see if "display_convergence_detail" causes a crash options = self._copy_of_base_options() @@ -178,7 +181,6 @@ def test_running_dump(self): ) conv, obj, tbound = aph.APH_main(spcomm=None) - def test_lags_bundles(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -194,12 +196,13 @@ def test_lags_bundles(self): scenario_creator_kwargs={"scenario_count": 10}, ) conv, obj, tbound = aph.APH_main(spcomm=None) - print ("uselag bundle objthing={}, (pre-lag was=224712.9)".format(obj)) - print ("bundle tbound ={} (was=223168.5)".format(tbound)) + print("uselag bundle objthing={}, (pre-lag was=224712.9)".format(obj)) + print("bundle tbound ={} (was=223168.5)".format(tbound)) -#***************************************************************************** + +# ***************************************************************************** class Test_aph_farmer(unittest.TestCase): - """ Test the aph mpisppy code using farmer.""" + """Test the aph mpisppy code using farmer.""" def setUp(self): self.Baseoptions = {} @@ -219,18 +222,19 @@ def setUp(self): def _copy_of_base_options(self): retval = {} - for k,v in self.Baseoptions.items(): + for k, v in self.Baseoptions.items(): retval[k] = v return retval def round_pos_sig(self, x, sig=1): - return round(x, sig-int(floor(log10(abs(x))))-1) + return round(x, sig - int(floor(log10(abs(x)))) - 1) def _make_scenario_names(self, cnt): return [f"Scenario{i+1}" for i in range(cnt)] - - @unittest.skipIf(not solver_available, - "%s solver is not available" % (solver_name,)) + + @unittest.skipIf( + not solver_available, "%s solver is not available" % (solver_name,) + ) def test_aph_farmer_basic30(self): Aoptions = self._copy_of_base_options() Aoptions["PHIterLimit"] = 2 @@ -247,7 +251,7 @@ def test_aph_farmer_basic30(self): ) conv, obj, tbound = aph.APH_main(spcomm=None) - digits=3 + digits = 3 tbtarget = self.round_pos_sig(137846, digits) boundgot = self.round_pos_sig(-tbound, digits) self.assertEqual(tbtarget, boundgot) @@ -256,8 +260,9 @@ def test_aph_farmer_basic30(self): objgot = self.round_pos_sig(-obj, digits) self.assertEqual(objgot, objtarget) - @unittest.skipIf(not solver_available, - "%s solver is not available" % (solver_name,)) + @unittest.skipIf( + not solver_available, "%s solver is not available" % (solver_name,) + ) def test_aph_farmer_dispatch(self): Aoptions = self._copy_of_base_options() Aoptions["PHIterLimit"] = 2 @@ -274,7 +279,7 @@ def test_aph_farmer_dispatch(self): ) conv, obj, tbound = aph.APH_main(spcomm=None) - digits=3 + digits = 3 tbtarget = self.round_pos_sig(137846, digits) boundgot = self.round_pos_sig(-tbound, digits) self.assertEqual(tbtarget, boundgot) @@ -283,9 +288,9 @@ def test_aph_farmer_dispatch(self): objgot = self.round_pos_sig(-obj, digits) self.assertEqual(objgot, objtarget) - - @unittest.skipIf(not solver_available, - "%s solver is not available" % (solver_name,)) + @unittest.skipIf( + not solver_available, "%s solver is not available" % (solver_name,) + ) def test_aph_farmer_dispatch_bundles(self): Aoptions = self._copy_of_base_options() Aoptions["PHIterLimit"] = 2 @@ -294,7 +299,7 @@ def test_aph_farmer_dispatch_bundles(self): Aoptions["async_sleep_secs"] = 0.01 Aoptions["aph_gamma"] = 1 Aoptions["bundles_per_rank"] = 5 - + aph = mpisppy.opt.aph.APH( Aoptions, self._make_scenario_names(30), @@ -304,7 +309,7 @@ def test_aph_farmer_dispatch_bundles(self): ) conv, obj, tbound = aph.APH_main(spcomm=None) - digits=3 + digits = 3 tbtarget = self.round_pos_sig(133005, digits) boundgot = self.round_pos_sig(-tbound, digits) self.assertEqual(tbtarget, boundgot) @@ -314,5 +319,5 @@ def test_aph_farmer_dispatch_bundles(self): self.assertEqual(objgot, objtarget) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/test_conf_int_aircond.py b/mpisppy/tests/test_conf_int_aircond.py index 6e5c741e0..aa3f16082 100644 --- a/mpisppy/tests/test_conf_int_aircond.py +++ b/mpisppy/tests/test_conf_int_aircond.py @@ -7,9 +7,7 @@ # full copyright and license information. ############################################################################### # Provide some test for confidence intervals -""" - -""" +""" """ import os import tempfile @@ -33,20 +31,23 @@ __version__ = 0.4 -solver_available, solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) module_dir = os.path.dirname(os.path.abspath(__file__)) -global_BFs = [4,3,2] +global_BFs = [4, 3, 2] + -#***************************************************************************** +# ***************************************************************************** class Test_confint_aircond(unittest.TestCase): - """ Test the confint code using aircond.""" + """Test the confint code using aircond.""" @classmethod def setUpClass(self): - self.refmodelname ="mpisppy.tests.examples.aircond" # amalgamator compatible + self.refmodelname = "mpisppy.tests.examples.aircond" # amalgamator compatible # TBD: maybe this code should create the file - self.xhatpath = "farmer_cyl_nonants.spy.npy" + self.xhatpath = "farmer_cyl_nonants.spy.npy" def _get_base_options(self): # Base option has batch options @@ -67,12 +68,14 @@ def _get_base_options(self): def _get_xhatEval_options(self): cfg, scenario_creator_kwargs = self._get_base_options() - options = {"iter0_solver_options": None, - "iterk_solver_options": None, - "display_timing": False, - "solver_name": solver_name, - "verbose": False, - "solver_options":None} + options = { + "iter0_solver_options": None, + "iterk_solver_options": None, + "display_timing": False, + "solver_name": solver_name, + "verbose": False, + "solver_options": None, + } options.update(cfg) return options @@ -80,151 +83,176 @@ def _get_xhatEval_options(self): def _get_xhat_gen_options(self, BFs): num_scens = np.prod(BFs) scenario_names = aircond.scenario_names_creator(num_scens) - xhat_gen_options = {"scenario_names": scenario_names, - "solver_name": solver_name, - "solver_options": None, - "branching_factors": BFs, - "mu_dev": 0, - "sigma_dev": 40, - "start_ups": False, - "start_seed": 0, - } + xhat_gen_options = { + "scenario_names": scenario_names, + "solver_name": solver_name, + "solver_options": None, + "branching_factors": BFs, + "mu_dev": 0, + "sigma_dev": 40, + "start_ups": False, + "start_seed": 0, + } return xhat_gen_options def _make_full_xhat(self, BFs): ndns = sputils.create_nodenames_from_branching_factors(BFs) - retval = {i: [200.0,0] for i in ndns} + retval = {i: [200.0, 0] for i in ndns} return retval - def setUp(self): - self.xhat = {'ROOT': np.array([200.0, 0.0])} # really xhat_1 - tmpxhat = tempfile.mkstemp(prefix="xhat",suffix=".npy") - self.xhat_path = tmpxhat[1] # create an empty .npy file - ciutils.write_xhat(self.xhat,self.xhat_path) - + self.xhat = {"ROOT": np.array([200.0, 0.0])} # really xhat_1 + tmpxhat = tempfile.mkstemp(prefix="xhat", suffix=".npy") + self.xhat_path = tmpxhat[1] # create an empty .npy file + ciutils.write_xhat(self.xhat, self.xhat_path) def tearDown(self): - os.remove(self.xhat_path) + os.remove(self.xhat_path) - def _eval_creator(self): xh_options = self._get_xhatEval_options() - + MMW_options, scenario_creator_kwargs = self._get_base_options() - branching_factors= global_BFs + branching_factors = global_BFs scen_count = np.prod(branching_factors) all_scenario_names = aircond.scenario_names_creator(scen_count) - all_nodenames = sputils.create_nodenames_from_branching_factors(branching_factors) - ev = Xhat_Eval(xh_options, - all_scenario_names, - aircond.scenario_creator, - scenario_denouement=None, - all_nodenames=all_nodenames, - scenario_creator_kwargs=scenario_creator_kwargs - ) + all_nodenames = sputils.create_nodenames_from_branching_factors( + branching_factors + ) + ev = Xhat_Eval( + xh_options, + all_scenario_names, + aircond.scenario_creator, + scenario_denouement=None, + all_nodenames=all_nodenames, + scenario_creator_kwargs=scenario_creator_kwargs, + ) return ev - def test_ama_creator(self): options, scenario_creator_kwargs = self._get_base_options() - ama_object = ama.from_module(self.refmodelname, - cfg=options, - use_command_line=False) + ama_object = ama.from_module( + self.refmodelname, cfg=options, use_command_line=False + ) assert ama_object is not None - + def test_MMW_constructor(self): options, scenario_creator_kwargs = self._get_base_options() xhat = ciutils.read_xhat(self.xhat_path) - MMW = MMWci.MMWConfidenceIntervals(self.refmodelname, - options, - xhat, - options['num_batches'], batch_size = options["batch_size"], start = options['num_scens']) + MMW = MMWci.MMWConfidenceIntervals( + self.refmodelname, + options, + xhat, + options["num_batches"], + batch_size=options["batch_size"], + start=options["num_scens"], + ) assert MMW is not None def test_seqsampling_creator(self): optionsBM = config.Config() confidence_config.confidence_config(optionsBM) confidence_config.sequential_config(optionsBM) - optionsBM.quick_assign('BM_h', float, 0.2) - optionsBM.quick_assign('BM_hprime', float, 0.015,) - optionsBM.quick_assign('BM_eps', float, 0.5,) - optionsBM.quick_assign('BM_eps_prime', float, 0.4,) + optionsBM.quick_assign("BM_h", float, 0.2) + optionsBM.quick_assign( + "BM_hprime", + float, + 0.015, + ) + optionsBM.quick_assign( + "BM_eps", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps_prime", + float, + 0.4, + ) optionsBM.quick_assign("BM_p", float, 0.2) optionsBM.quick_assign("BM_q", float, 1.2) optionsBM.quick_assign("solver_name", str, solver_name) - optionsBM.quick_assign("stopping", str, "BM") # TBD use this and drop stopping_criterion from the constructor + optionsBM.quick_assign( + "stopping", str, "BM" + ) # TBD use this and drop stopping_criterion from the constructor optionsBM.quick_assign("solving_type", str, "EF_mstage") optionsBM.quick_assign("branching_factors", pyofig.ListOf(int), global_BFs) - seqsampling.SeqSampling(self.refmodelname, - aircond.xhat_generator_aircond, - optionsBM, - stochastic_sampling=False, - stopping_criterion="BM", - solving_type="EF_mstage", - ) + seqsampling.SeqSampling( + self.refmodelname, + aircond.xhat_generator_aircond, + optionsBM, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="EF_mstage", + ) - def test_indepscens_seqsampling_creator(self): options, scenario_creator_kwargs = self._get_base_options() - + # We want a very small instance for testing on GitHub. optionsBM = config.Config() confidence_config.confidence_config(optionsBM) confidence_config.sequential_config(optionsBM) - optionsBM.quick_assign('BM_h', float, 1.75) - optionsBM.quick_assign('BM_hprime', float, 0.5,) - optionsBM.quick_assign('BM_eps', float, 0.2,) - optionsBM.quick_assign('BM_eps_prime', float, 0.1,) + optionsBM.quick_assign("BM_h", float, 1.75) + optionsBM.quick_assign( + "BM_hprime", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps", + float, + 0.2, + ) + optionsBM.quick_assign( + "BM_eps_prime", + float, + 0.1, + ) optionsBM.quick_assign("BM_p", float, 0.1) optionsBM.quick_assign("BM_q", float, 1.2) optionsBM.quick_assign("solver_name", str, solver_name) - optionsBM.quick_assign("stopping", str, "BM") # TBD use this and drop stopping_criterion from the constructor + optionsBM.quick_assign( + "stopping", str, "BM" + ) # TBD use this and drop stopping_criterion from the constructor optionsBM.quick_assign("solving_type", str, "EF_mstage") optionsBM.quick_assign("branching_factors", pyofig.ListOf(int), global_BFs) - multi_seqsampling.IndepScens_SeqSampling(self.refmodelname, - aircond.xhat_generator_aircond, - optionsBM, - stochastic_sampling=False, - stopping_criterion="BM", - solving_type="EF_mstage", - ) - + multi_seqsampling.IndepScens_SeqSampling( + self.refmodelname, + aircond.xhat_generator_aircond, + optionsBM, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="EF_mstage", + ) - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ama_running(self): options, scenario_creator_kwargs = self._get_base_options() - ama_object = ama.from_module(self.refmodelname, - options, use_command_line=False) + ama_object = ama.from_module(self.refmodelname, options, use_command_line=False) ama_object.run() - obj = round_pos_sig(ama_object.EF_Obj,2) - self.assertEqual(obj, 970.) + obj = round_pos_sig(ama_object.EF_Obj, 2) + self.assertEqual(obj, 970.0) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_eval_creator(self): ev = self._eval_creator() assert ev is not None - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_eval_evaluate(self): ev = self._eval_creator() base_options, scenario_creator_kwargs = self._get_base_options() - branching_factors= global_BFs + branching_factors = global_BFs full_xhat = self._make_full_xhat(branching_factors) - obj = round_pos_sig(ev.evaluate(full_xhat),2) + obj = round_pos_sig(ev.evaluate(full_xhat), 2) self.assertEqual(obj, 1000.0) # rebaselined feb 2022 - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_eval_evaluate_one(self): ev = self._eval_creator() options, scenario_creator_kwargs = self._get_base_options() @@ -232,94 +260,105 @@ def test_xhat_eval_evaluate_one(self): full_xhat = self._make_full_xhat(branching_factors) num_scens = np.prod(branching_factors) - scenario_creator_kwargs['num_scens'] = num_scens + scenario_creator_kwargs["num_scens"] = num_scens all_scenario_names = aircond.scenario_names_creator(num_scens) k = all_scenario_names[0] - obj = ev.evaluate_one(full_xhat,k,ev.local_scenarios[k]) - obj = round_pos_sig(obj,2) - self.assertEqual(obj, 1100.0) # rebaselined feb 2022 + obj = ev.evaluate_one(full_xhat, k, ev.local_scenarios[k]) + obj = round_pos_sig(obj, 2) + self.assertEqual(obj, 1100.0) # rebaselined feb 2022 - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_MMW_running(self): options, scenario_creator_kwargs = self._get_base_options() xhat = ciutils.read_xhat(self.xhat_path) - MMW = MMWci.MMWConfidenceIntervals(self.refmodelname, - options, - xhat, - options['num_batches'], - batch_size = options["batch_size"], - start = options['num_scens']) - r = MMW.run() - s = round_pos_sig(r['std'],2) - bound = round_pos_sig(r['gap_inner_bound'],2) - self.assertEqual((s,bound), (33.0, 130.0)) - - - @unittest.skipIf(not solver_available, - "no solver is available") + MMW = MMWci.MMWConfidenceIntervals( + self.refmodelname, + options, + xhat, + options["num_batches"], + batch_size=options["batch_size"], + start=options["num_scens"], + ) + r = MMW.run() + s = round_pos_sig(r["std"], 2) + bound = round_pos_sig(r["gap_inner_bound"], 2) + self.assertEqual((s, bound), (33.0, 130.0)) + + @unittest.skipIf(not solver_available, "no solver is available") def test_gap_estimators(self): options, scenario_creator_kwargs = self._get_base_options() - branching_factors= global_BFs + branching_factors = global_BFs scen_count = np.prod(branching_factors) scenario_names = aircond.scenario_names_creator(scen_count, start=1000) - sample_options = {"seed": 0, - "branching_factors": global_BFs} - estim = ciutils.gap_estimators(self.xhat, - self.refmodelname, - solving_type="EF_mstage", - sample_options=sample_options, - cfg = options, - solver_name=solver_name, - scenario_names=scenario_names, - ) - G = estim['G'] - s = estim['s'] - G,s = round_pos_sig(G,3),round_pos_sig(s,3) - #self.assertEqual((G,s), (69.6, 53.8)) # rebaselined April 2022 + sample_options = {"seed": 0, "branching_factors": global_BFs} + estim = ciutils.gap_estimators( + self.xhat, + self.refmodelname, + solving_type="EF_mstage", + sample_options=sample_options, + cfg=options, + solver_name=solver_name, + scenario_names=scenario_names, + ) + G = estim["G"] + s = estim["s"] + G, s = round_pos_sig(G, 3), round_pos_sig(s, 3) + # self.assertEqual((G,s), (69.6, 53.8)) # rebaselined April 2022 # April 2022: s seems to depend a little on the solver. TBD: look into this self.assertEqual(G, 69.6) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_indepscens_seqsampling_running(self): options, scenario_creator_kwargs = self._get_base_options() - branching_factors= global_BFs + branching_factors = global_BFs xhat_gen_options = self._get_xhat_gen_options(branching_factors) - + # We want a very small instance for testing on GitHub. optionsBM = config.Config() confidence_config.confidence_config(optionsBM) confidence_config.sequential_config(optionsBM) - optionsBM.quick_assign('BM_h', float, 1.75) - optionsBM.quick_assign('BM_hprime', float, 0.5,) - optionsBM.quick_assign('BM_eps', float, 0.2,) - optionsBM.quick_assign('BM_eps_prime', float, 0.1,) + optionsBM.quick_assign("BM_h", float, 1.75) + optionsBM.quick_assign( + "BM_hprime", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps", + float, + 0.2, + ) + optionsBM.quick_assign( + "BM_eps_prime", + float, + 0.1, + ) optionsBM.quick_assign("BM_p", float, 0.1) optionsBM.quick_assign("BM_q", float, 1.2) optionsBM.quick_assign("solver_name", str, solver_name) - optionsBM.quick_assign("stopping", str, "BM") # TBD use this and drop stopping_criterion from the constructor + optionsBM.quick_assign( + "stopping", str, "BM" + ) # TBD use this and drop stopping_criterion from the constructor optionsBM.quick_assign("solving_type", str, "EF_mstage") - optionsBM.quick_assign("EF_mstage", bool, True) # TBD: we should not need both + optionsBM.quick_assign("EF_mstage", bool, True) # TBD: we should not need both optionsBM.quick_assign("start_seed", str, 0) optionsBM.quick_assign("branching_factors", pyofig.ListOf(int), [4, 3, 2]) optionsBM.quick_assign("xhat_gen_kwargs", dict, xhat_gen_options) - seq_pb = multi_seqsampling.IndepScens_SeqSampling(self.refmodelname, - aircond.xhat_generator_aircond, - optionsBM, - stochastic_sampling=False, - stopping_criterion="BM", - solving_type="EF_mstage", - ) + seq_pb = multi_seqsampling.IndepScens_SeqSampling( + self.refmodelname, + aircond.xhat_generator_aircond, + optionsBM, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="EF_mstage", + ) x = seq_pb.run(maxit=50) - T = x['T'] - ub = round_pos_sig(x['CI'][1],2) - self.assertEqual((T,ub), (15, 180.0)) + T = x["T"] + ub = round_pos_sig(x["CI"][1], 2) + self.assertEqual((T, ub), (15, 180.0)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() - diff --git a/mpisppy/tests/test_conf_int_farmer.py b/mpisppy/tests/test_conf_int_farmer.py index e130b9277..4bf4761e6 100644 --- a/mpisppy/tests/test_conf_int_farmer.py +++ b/mpisppy/tests/test_conf_int_farmer.py @@ -7,9 +7,7 @@ # full copyright and license information. ############################################################################### # Provide some test for confidence intervals -""" - -""" +""" """ import os import tempfile @@ -35,19 +33,20 @@ __version__ = 0.6 -solver_available, solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) module_dir = os.path.dirname(os.path.abspath(__file__)) -#***************************************************************************** +# ***************************************************************************** class Test_confint_farmer(unittest.TestCase): - """ Test the confint code using farmer.""" + """Test the confint code using farmer.""" @classmethod def setUpClass(self): - self.refmodelname ="mpisppy.tests.examples.farmer" - self.arefmodelname ="mpisppy.tests.examples.farmer" # amalgamator compatible - + self.refmodelname = "mpisppy.tests.examples.farmer" + self.arefmodelname = "mpisppy.tests.examples.farmer" # amalgamator compatible def _get_base_options(self): cfg = config.Config() @@ -59,23 +58,25 @@ def _get_base_options(self): cfg.quick_assign("num_batches", int, 2) cfg.quick_assign("batch_size", int, 10) scenario_creator_kwargs = farmer.kw_creator(cfg) - cfg.quick_assign('kwargs', dict,scenario_creator_kwargs) + cfg.quick_assign("kwargs", dict, scenario_creator_kwargs) return cfg def _get_xhatEval_options(self): - options = {"iter0_solver_options": None, - "iterk_solver_options": None, - "display_timing": False, - "solver_name": solver_name, - "verbose": False, - "solver_options":None} + options = { + "iter0_solver_options": None, + "iterk_solver_options": None, + "display_timing": False, + "solver_name": solver_name, + "verbose": False, + "solver_options": None, + } return options def setUp(self): - self.xhat = {'ROOT': np.array([74.0,245.0,181.0])} - tmpxhat = tempfile.mkstemp(prefix="xhat",suffix=".npy") - self.xhat_path = tmpxhat[1] # create an empty .npy file - ciutils.write_xhat(self.xhat,self.xhat_path) + self.xhat = {"ROOT": np.array([74.0, 245.0, 181.0])} + tmpxhat = tempfile.mkstemp(prefix="xhat", suffix=".npy") + self.xhat_path = tmpxhat[1] # create an empty .npy file + ciutils.write_xhat(self.xhat, self.xhat_path) def tearDown(self): os.remove(self.xhat_path) @@ -84,190 +85,220 @@ def test_MMW_constructor(self): cfg = self._get_base_options() xhat = ciutils.read_xhat(self.xhat_path) - MMW = MMWci.MMWConfidenceIntervals(self.refmodelname, - cfg, - xhat, - cfg['num_batches'], batch_size = cfg["batch_size"], start = cfg['num_scens']) + MMW = MMWci.MMWConfidenceIntervals( + self.refmodelname, + cfg, + xhat, + cfg["num_batches"], + batch_size=cfg["batch_size"], + start=cfg["num_scens"], + ) assert MMW is not None - + def test_xhat_read_write(self): - path = tempfile.mkstemp(prefix="xhat",suffix=".npy")[1] - ciutils.write_xhat(self.xhat,path=path) + path = tempfile.mkstemp(prefix="xhat", suffix=".npy")[1] + ciutils.write_xhat(self.xhat, path=path) x = ciutils.read_xhat(path, delete_file=True) - self.assertEqual(list(x['ROOT']), list(self.xhat['ROOT'])) - + self.assertEqual(list(x["ROOT"]), list(self.xhat["ROOT"])) + def test_xhat_read_write_txt(self): - path = tempfile.mkstemp(prefix="xhat",suffix=".npy")[1] - ciutils.writetxt_xhat(self.xhat,path=path) + path = tempfile.mkstemp(prefix="xhat", suffix=".npy")[1] + ciutils.writetxt_xhat(self.xhat, path=path) x = ciutils.readtxt_xhat(path, delete_file=True) - self.assertEqual(list(x['ROOT']), list(self.xhat['ROOT'])) - + self.assertEqual(list(x["ROOT"]), list(self.xhat["ROOT"])) + def test_ama_creator(self): cfg = self._get_base_options() ama_options = cfg() ama_options.quick_assign("EF_2stage", bool, True) - ama_object = ama.from_module(self.refmodelname, - cfg=ama_options, - use_command_line=False) + ama_object = ama.from_module( + self.refmodelname, cfg=ama_options, use_command_line=False + ) assert ama_object is not None - + def test_seqsampling_creator(self): optionsBM = config.Config() confidence_config.confidence_config(optionsBM) confidence_config.sequential_config(optionsBM) - optionsBM.quick_assign('BM_h', float, 0.2) - optionsBM.quick_assign('BM_hprime', float, 0.015,) - optionsBM.quick_assign('BM_eps', float, 0.5,) - optionsBM.quick_assign('BM_eps_prime', float, 0.4,) + optionsBM.quick_assign("BM_h", float, 0.2) + optionsBM.quick_assign( + "BM_hprime", + float, + 0.015, + ) + optionsBM.quick_assign( + "BM_eps", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps_prime", + float, + 0.4, + ) optionsBM.quick_assign("BM_p", float, 0.2) optionsBM.quick_assign("BM_q", float, 1.2) optionsBM.quick_assign("solver_name", str, solver_name) - optionsBM.quick_assign("stopping", str, "BM") # TBD use this and drop stopping_criterion from the constructor + optionsBM.quick_assign( + "stopping", str, "BM" + ) # TBD use this and drop stopping_criterion from the constructor optionsBM.quick_assign("solving_type", str, "EF_2stage") - seqsampling.SeqSampling("mpisppy.tests.examples.farmer", - seqsampling.xhat_generator_farmer, - optionsBM, - stochastic_sampling=False, - stopping_criterion="BM", - solving_type="EF_2stage", + seqsampling.SeqSampling( + "mpisppy.tests.examples.farmer", + seqsampling.xhat_generator_farmer, + optionsBM, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="EF_2stage", ) - def test_pyomo_opt_sense(self): cfg = self._get_base_options() module = self.refmodelname ciutils.pyomo_opt_sense(module, cfg) assert cfg.pyomo_opt_sense == pyo.minimize # minimize is 1 - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ama_running(self): cfg = self._get_base_options() ama_options = cfg() ama_options.quick_assign("EF_2stage", bool, True) - ama_object = ama.from_module(self.refmodelname, - ama_options, use_command_line=False) + ama_object = ama.from_module( + self.refmodelname, ama_options, use_command_line=False + ) ama_object.run() - obj = round_pos_sig(ama_object.EF_Obj,2) + obj = round_pos_sig(ama_object.EF_Obj, 2) self.assertEqual(obj, -130000) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_eval_creator(self): options = self._get_xhatEval_options() - + MMW_options = self._get_base_options() - scenario_creator_kwargs = MMW_options['kwargs'] - scenario_creator_kwargs['num_scens'] = MMW_options['batch_size'] - ev = Xhat_Eval(options, - farmer.scenario_names_creator(100), - farmer.scenario_creator, - scenario_denouement=None, - scenario_creator_kwargs=scenario_creator_kwargs - ) + scenario_creator_kwargs = MMW_options["kwargs"] + scenario_creator_kwargs["num_scens"] = MMW_options["batch_size"] + ev = Xhat_Eval( + options, + farmer.scenario_names_creator(100), + farmer.scenario_creator, + scenario_denouement=None, + scenario_creator_kwargs=scenario_creator_kwargs, + ) assert ev is not None - - @unittest.skipIf(not solver_available, - "no solver is available") + + @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_eval_evaluate(self): options = self._get_xhatEval_options() MMW_options = self._get_base_options() - scenario_creator_kwargs = MMW_options['kwargs'] - scenario_creator_kwargs['num_scens'] = MMW_options['batch_size'] - ev = Xhat_Eval(options, - farmer.scenario_names_creator(100), - farmer.scenario_creator, - scenario_denouement=None, - scenario_creator_kwargs=scenario_creator_kwargs - ) - + scenario_creator_kwargs = MMW_options["kwargs"] + scenario_creator_kwargs["num_scens"] = MMW_options["batch_size"] + ev = Xhat_Eval( + options, + farmer.scenario_names_creator(100), + farmer.scenario_creator, + scenario_denouement=None, + scenario_creator_kwargs=scenario_creator_kwargs, + ) + xhat = ciutils.read_xhat(self.xhat_path) - obj = round_pos_sig(ev.evaluate(xhat),2) + obj = round_pos_sig(ev.evaluate(xhat), 2) self.assertEqual(obj, -1300000.0) - - @unittest.skipIf(not solver_available, - "no solver is available") + + @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_eval_evaluate_one(self): options = self._get_xhatEval_options() MMW_options = self._get_base_options() xhat = ciutils.read_xhat(self.xhat_path) - scenario_creator_kwargs = MMW_options['kwargs'] - scenario_creator_kwargs['num_scens'] = MMW_options['batch_size'] + scenario_creator_kwargs = MMW_options["kwargs"] + scenario_creator_kwargs["num_scens"] = MMW_options["batch_size"] scenario_names = farmer.scenario_names_creator(100) - ev = Xhat_Eval(options, - scenario_names, - farmer.scenario_creator, - scenario_denouement=None, - scenario_creator_kwargs=scenario_creator_kwargs - ) + ev = Xhat_Eval( + options, + scenario_names, + farmer.scenario_creator, + scenario_denouement=None, + scenario_creator_kwargs=scenario_creator_kwargs, + ) k = scenario_names[0] - obj = ev.evaluate_one(xhat,k,ev.local_scenarios[k]) - obj = round_pos_sig(obj,2) + obj = ev.evaluate_one(xhat, k, ev.local_scenarios[k]) + obj = round_pos_sig(obj, 2) self.assertEqual(obj, -48000.0) - - @unittest.skipIf(not solver_available, - "no solver is available") + + @unittest.skipIf(not solver_available, "no solver is available") def test_MMW_running(self): cfg = self._get_base_options() xhat = ciutils.read_xhat(self.xhat_path) - MMW = MMWci.MMWConfidenceIntervals(self.refmodelname, - cfg, - xhat, - cfg['num_batches'], - batch_size = cfg["batch_size"], - start = cfg['num_scens']) - r = MMW.run() - s = round_pos_sig(r['std'],2) - bound = round_pos_sig(r['gap_inner_bound'],2) - self.assertEqual((s,bound), (1.5,96.0)) - - @unittest.skipIf(not solver_available, - "no solver is available") + MMW = MMWci.MMWConfidenceIntervals( + self.refmodelname, + cfg, + xhat, + cfg["num_batches"], + batch_size=cfg["batch_size"], + start=cfg["num_scens"], + ) + r = MMW.run() + s = round_pos_sig(r["std"], 2) + bound = round_pos_sig(r["gap_inner_bound"], 2) + self.assertEqual((s, bound), (1.5, 96.0)) + + @unittest.skipIf(not solver_available, "no solver is available") def test_gap_estimators(self): - scenario_names = farmer.scenario_names_creator(50,start=1000) - estim = ciutils.gap_estimators(self.xhat, - self.refmodelname, - cfg=self._get_base_options(), - solver_name=solver_name, - scenario_names=scenario_names, - ) - G = estim['G'] - s = estim['s'] - G,s = round_pos_sig(G,3),round_pos_sig(s,3) - self.assertEqual((G,s), (456.0,944.0)) - - @unittest.skipIf(not solver_available, - "no solver is available") + scenario_names = farmer.scenario_names_creator(50, start=1000) + estim = ciutils.gap_estimators( + self.xhat, + self.refmodelname, + cfg=self._get_base_options(), + solver_name=solver_name, + scenario_names=scenario_names, + ) + G = estim["G"] + s = estim["s"] + G, s = round_pos_sig(G, 3), round_pos_sig(s, 3) + self.assertEqual((G, s), (456.0, 944.0)) + + @unittest.skipIf(not solver_available, "no solver is available") @unittest.skipIf(True, "too big for community solvers") def test_seqsampling_running(self): - #We need a very small instance for testing on GitHub. + # We need a very small instance for testing on GitHub. optionsBM = config.Config() confidence_config.confidence_config(optionsBM) confidence_config.sequential_config(optionsBM) - optionsBM.quick_assign('BM_h', float, 0.2) - optionsBM.quick_assign('BM_hprime', float, 0.015,) - optionsBM.quick_assign('BM_eps', float, 0.5,) - optionsBM.quick_assign('BM_eps_prime', float, 0.4,) + optionsBM.quick_assign("BM_h", float, 0.2) + optionsBM.quick_assign( + "BM_hprime", + float, + 0.015, + ) + optionsBM.quick_assign( + "BM_eps", + float, + 0.5, + ) + optionsBM.quick_assign( + "BM_eps_prime", + float, + 0.4, + ) optionsBM.quick_assign("BM_p", float, 0.2) optionsBM.quick_assign("BM_q", float, 1.2) optionsBM.quick_assign("solver_name", str, solver_name) - optionsBM.quick_assign("stopping", str, "BM") # TBD use this and drop stopping_criterion from the constructor + optionsBM.quick_assign( + "stopping", str, "BM" + ) # TBD use this and drop stopping_criterion from the constructor optionsBM.quick_assign("solving_type", str, "EF_2stage") - seq_pb = seqsampling.SeqSampling("mpisppy.tests.examples.farmer", - seqsampling.xhat_generator_farmer, - optionsBM, - stochastic_sampling=False, - stopping_criterion="BM", - solving_type="EF-2stage", + seq_pb = seqsampling.SeqSampling( + "mpisppy.tests.examples.farmer", + seqsampling.xhat_generator_farmer, + optionsBM, + stochastic_sampling=False, + stopping_criterion="BM", + solving_type="EF-2stage", ) x = seq_pb.run(maxit=50) - T = x['T'] - ub = round_pos_sig(x['CI'][1],2) - self.assertEqual((T,ub), (3, 13.0)) + T = x["T"] + ub = round_pos_sig(x["CI"][1], 2) + self.assertEqual((T, ub), (3, 13.0)) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() - diff --git a/mpisppy/tests/test_ef_ph.py b/mpisppy/tests/test_ef_ph.py index 8a1154688..5750f1153 100644 --- a/mpisppy/tests/test_ef_ph.py +++ b/mpisppy/tests/test_ef_ph.py @@ -22,18 +22,23 @@ import mpisppy.phbase import mpisppy.utils.sputils as sputils import mpisppy.utils.rho_utils as rho_utils -from mpisppy.tests.examples.sizes.sizes import scenario_creator, \ - scenario_denouement, \ - _rho_setter +from mpisppy.tests.examples.sizes.sizes import ( + scenario_creator, + scenario_denouement, + _rho_setter, +) import mpisppy.tests.examples.hydro.hydro as hydro from mpisppy.extensions.mult_rho_updater import MultRhoUpdater from mpisppy.extensions.xhatspecific import XhatSpecific from mpisppy.extensions.xhatxbar import XhatXbar -from mpisppy.tests.utils import get_solver,round_pos_sig +from mpisppy.tests.utils import get_solver, round_pos_sig __version__ = 0.55 -solver_available,solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) + def _get_ph_base_options(): Baseoptions = {} @@ -58,34 +63,31 @@ def _get_ph_base_options(): return Baseoptions - - -#***************************************************************************** +# ***************************************************************************** class Test_sizes(unittest.TestCase): - """ Test the mpisppy code using sizes.""" + """Test the mpisppy code using sizes.""" def setUp(self): self.all3_scenario_names = list() for sn in range(3): - self.all3_scenario_names.append("Scenario"+str(sn+1)) + self.all3_scenario_names.append("Scenario" + str(sn + 1)) self.all10_scenario_names = list() for sn in range(10): - self.all10_scenario_names.append("Scenario"+str(sn+1)) + self.all10_scenario_names.append("Scenario" + str(sn + 1)) def _copy_of_base_options(self): return _get_ph_base_options() def _fix_creator(self, scenario_name, scenario_count=None): - """ For the test of fixed vars""" - model = scenario_creator( - scenario_name, scenario_count=scenario_count - ) - model.NumProducedFirstStage[5].fix(1134) # 3scen -> 45k + """For the test of fixed vars""" + model = scenario_creator(scenario_name, scenario_count=scenario_count) + model.NumProducedFirstStage[5].fix(1134) # 3scen -> 45k return model - + def test_disable_tictoc(self): import mpisppy.utils.sputils as utils + print("disabling tictoc output") utils.disable_tictoc_output() # now just do anything that would make a little tictoc output @@ -102,7 +104,6 @@ def test_disable_tictoc(self): print("reeanabling tictoc ouput") utils.reenable_tictoc_output() - def test_ph_constructor(self): options = self._copy_of_base_options() options["PHIterLimit"] = 0 @@ -122,12 +123,11 @@ def test_ef_constructor(self): self.all3_scenario_names, scenario_creator, scenario_creator_kwargs={"scenario_count": ScenCount}, - suppress_warnings=True + suppress_warnings=True, ) assert ef is not None - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ef_solve(self): options = self._copy_of_base_options() solver = pyo.SolverFactory(options["solver_name"]) @@ -136,17 +136,16 @@ def test_ef_solve(self): self.all3_scenario_names, scenario_creator, scenario_creator_kwargs={"scenario_count": ScenCount}, - suppress_warnings=True + suppress_warnings=True, ) - if '_persistent' in options["solver_name"]: + if "_persistent" in options["solver_name"]: solver.set_instance(ef) results = solver.solve(ef, tee=False) pyo.assert_optimal_termination(results) - sig2eobj = round_pos_sig(pyo.value(ef.EF_Obj),2) + sig2eobj = round_pos_sig(pyo.value(ef.EF_Obj), 2) self.assertEqual(220000.0, sig2eobj) - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_fix_ef_solve(self): options = self._copy_of_base_options() solver = pyo.SolverFactory(options["solver_name"]) @@ -156,15 +155,14 @@ def test_fix_ef_solve(self): self._fix_creator, scenario_creator_kwargs={"scenario_count": ScenCount}, ) - if '_persistent' in options["solver_name"]: + if "_persistent" in options["solver_name"]: solver.set_instance(ef) results = solver.solve(ef, tee=False) pyo.assert_optimal_termination(results) # the fix creator fixed num prod first stage for size 5 to 1134 self.assertEqual(pyo.value(ef.Scenario1.NumProducedFirstStage[5]), 1134) - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_iter0(self): options = self._copy_of_base_options() options["PHIterLimit"] = 0 @@ -176,11 +174,10 @@ def test_ph_iter0(self): scenario_denouement, scenario_creator_kwargs={"scenario_count": 3}, ) - + conv, obj, tbound = ph.ph_main() - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_fix_ph_iter0(self): options = self._copy_of_base_options() options["PHIterLimit"] = 0 @@ -192,16 +189,15 @@ def test_fix_ph_iter0(self): scenario_denouement, scenario_creator_kwargs={"scenario_count": 3}, ) - + conv, obj, tbound = ph.ph_main() # Actually, if it is still fixed for one scenario, probably fixed forall. - for k,s in ph.local_scenarios.items(): + for k, s in ph.local_scenarios.items(): self.assertEqual(pyo.value(s.NumProducedFirstStage[5]), 1134) self.assertTrue(s.NumProducedFirstStage[5].is_fixed()) - - @unittest.skipIf(not solver_available, - "no solver is available") + + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_basic(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -214,9 +210,7 @@ def test_ph_basic(self): ) conv, obj, tbound = ph.ph_main() - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_fix_ph_basic(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -228,13 +222,11 @@ def test_fix_ph_basic(self): scenario_creator_kwargs={"scenario_count": 3}, ) conv, obj, tbound = ph.ph_main() - for k,s in ph.local_scenarios.items(): + for k, s in ph.local_scenarios.items(): self.assertTrue(s.NumProducedFirstStage[5].is_fixed()) self.assertEqual(pyo.value(s.NumProducedFirstStage[5]), 1134) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_rhosetter(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -247,12 +239,10 @@ def test_ph_rhosetter(self): rho_setter=_rho_setter, ) conv, obj, tbound = ph.ph_main() - sig1obj = round_pos_sig(obj,1) + sig1obj = round_pos_sig(obj, 1) self.assertEqual(200000.0, sig1obj) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_write_read(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -274,9 +264,7 @@ def test_ph_write_read(self): os.remove(fname) self.assertEqual(len(rholist), rholen) # not a deep test... - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_bundles(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 @@ -290,8 +278,10 @@ def test_bundles(self): ) conv, obj, tbound = ph.ph_main() - @unittest.skipIf(not persistent_available, - "%s solver is not available" % (persistent_solver_name,)) + @unittest.skipIf( + not persistent_available, + "%s solver is not available" % (persistent_solver_name,), + ) def test_persistent_basic(self): options = self._copy_of_base_options() options["PHIterLimit"] = 10 @@ -316,15 +306,16 @@ def test_persistent_basic(self): ) conv, pobj, tbound = ph.ph_main() - sig2basic = round_pos_sig(basic_obj,2) - sig2pobj = round_pos_sig(pobj,2) + sig2basic = round_pos_sig(basic_obj, 2) + sig2pobj = round_pos_sig(pobj, 2) self.assertEqual(sig2basic, sig2pobj) - - @unittest.skipIf(not persistent_available, - "%s solver is not available" % (persistent_solver_name,)) + + @unittest.skipIf( + not persistent_available, + "%s solver is not available" % (persistent_solver_name,), + ) def test_persistent_bundles(self): - """ This excercises complicated code. - """ + """This excercises complicated code.""" options = self._copy_of_base_options() options["PHIterLimit"] = 2 options["bundles_per_rank"] = 2 @@ -350,21 +341,21 @@ def test_persistent_bundles(self): ) conv, pbobj, tbound = ph.ph_main() - sig2basic = round_pos_sig(basic_obj,2) - sig2pbobj = round_pos_sig(pbobj,2) + sig2basic = round_pos_sig(basic_obj, 2) + sig2pbobj = round_pos_sig(pbobj, 2) self.assertEqual(sig2basic, sig2pbobj) - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhat_extension(self): - """ Make sure least one of the xhat extensions runs. - """ + """Make sure least one of the xhat extensions runs.""" from mpisppy.extensions.xhatlooper import XhatLooper + options = self._copy_of_base_options() options["PHIterLimit"] = 1 - options["xhat_looper_options"] = {"xhat_solver_options":\ - options["iterk_solver_options"], - "scen_limit": 3} + options["xhat_looper_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "scen_limit": 3, + } ph = mpisppy.opt.ph.PH( options, @@ -379,18 +370,19 @@ def test_xhat_extension(self): xhatobj1 = round_pos_sig(ph.extobject._xhat_looper_obj_final, 1) self.assertEqual(xhatobj1, 200000) - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_wtracker_extension(self): - """ Make sure the wtracker at least does not cause a stack trace - """ + """Make sure the wtracker at least does not cause a stack trace""" from mpisppy.extensions.wtracker_extension import Wtracker_extension + options = self._copy_of_base_options() options["PHIterLimit"] = 4 - options["wtracker_options"] ={"wlen": 3, - "reportlen": 6, - "stdevthresh": 0.1, - "file_prefix": "__1134__"} + options["wtracker_options"] = { + "wlen": 3, + "reportlen": 6, + "stdevthresh": 0.1, + "file_prefix": "__1134__", + } ph = mpisppy.opt.ph.PH( options, self.all3_scenario_names, @@ -400,21 +392,18 @@ def test_wtracker_extension(self): extensions=Wtracker_extension, ) conv, basic_obj, tbound = ph.ph_main() - fileList = glob.glob('__1134__*.csv') + fileList = glob.glob("__1134__*.csv") for filePath in fileList: os.remove(filePath) - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_wtracker_lacks_iters(self): - """ Make sure the wtracker is graceful with not enough data - """ + """Make sure the wtracker is graceful with not enough data""" from mpisppy.extensions.wtracker_extension import Wtracker_extension + options = self._copy_of_base_options() options["PHIterLimit"] = 4 - options["wtracker_options"] ={"wlen": 10, - "reportlen": 6, - "stdevthresh": 0.1} + options["wtracker_options"] = {"wlen": 10, "reportlen": 6, "stdevthresh": 0.1} ph = mpisppy.opt.ph.PH( options, @@ -425,19 +414,18 @@ def test_wtracker_lacks_iters(self): extensions=Wtracker_extension, ) conv, basic_obj, tbound = ph.ph_main() - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_fix_xhat_extension(self): - """ Make sure that ph and xhat do not unfix a fixed Var - """ + """Make sure that ph and xhat do not unfix a fixed Var""" from mpisppy.extensions.xhatlooper import XhatLooper + options = self._copy_of_base_options() options["PHIterLimit"] = 1 - options["xhat_looper_options"] = {"xhat_solver_options":\ - options["iterk_solver_options"], - "scen_limit": 3} + options["xhat_looper_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "scen_limit": 3, + } ph = mpisppy.opt.ph.PH( options, @@ -449,22 +437,21 @@ def test_fix_xhat_extension(self): ) conv, basic_obj, tbound = ph.ph_main() - for k,s in ph.local_scenarios.items(): + for k, s in ph.local_scenarios.items(): self.assertTrue(s.NumProducedFirstStage[5].is_fixed()) self.assertEqual(pyo.value(s.NumProducedFirstStage[5]), 1134) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhatlooper_bound(self): - """ smoke test for the looper as an extension - """ + """smoke test for the looper as an extension""" from mpisppy.extensions.xhatlooper import XhatLooper + options = self._copy_of_base_options() options["PHIterLimit"] = 1 - options["xhat_looper_options"] = {"xhat_solver_options":\ - options["iterk_solver_options"], - "scen_limit": 3} + options["xhat_looper_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "scen_limit": 3, + } ph = mpisppy.opt.ph.PH( options, self.all3_scenario_names, @@ -477,13 +464,11 @@ def test_xhatlooper_bound(self): xhatobj = ph.extobject._xhat_looper_obj_final dopts = sputils.option_string_to_dict("mipgap=0.0001") objbound = ph.post_solve_bound(solver_options=dopts, verbose=False) - self.assertGreaterEqual(xhatobj+1.e-6, objbound) - - @unittest.skipIf(not solver_available, - "no solver is available") + self.assertGreaterEqual(xhatobj + 1.0e-6, objbound) + + @unittest.skipIf(not solver_available, "no solver is available") def smoke_for_extensions(self): - """ Make sure the example extensions can at least run. - """ + """Make sure the example extensions can at least run.""" from mpisppy.extensions.extension import MultiExtension from mpisppy.extensions.fixer import Fixer from mpisppy.extensions.mipgapper import Gapper @@ -493,46 +478,49 @@ def smoke_for_extensions(self): options = self._copy_of_base_options() options["PHIterLimit"] = 2 multi_ext = {"ext_classes": [Fixer, Gapper, XhatLooper, XhatClosest]} - ph = mpisppy.opt.ph.PH(options, self.all3_scenario_names, - scenario_creator, scenario_denouement, - extensions=MultiExtension, - extension_kwargs=multi_ext, + ph = mpisppy.opt.ph.PH( + options, + self.all3_scenario_names, + scenario_creator, + scenario_denouement, + extensions=MultiExtension, + extension_kwargs=multi_ext, ) conv, basic_obj, tbound = ph.ph_main() - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def fix_for_extensions(self): - """ Make sure the example extensions don't destroy fixedness - """ + """Make sure the example extensions don't destroy fixedness""" from mpisppy.extensions.extensions import MultiExtension from mpisppy.extensions.fixer import Fixer from mpisppy.extensions.mipgapper import Gapper from mpisppy.extensions.xhatlooper import XhatLooper from mpisppy.extensions.xhatclosest import XhatClosest + options = self._copy_of_base_options() options["PHIterLimit"] = 2 multi_ext = {"ext_classes": [Fixer, Gapper, XhatLooper, XhatClosest]} - ph = mpisppy.opt.ph.PH(options, self.all3_scenario_names, - self._fix_creator, scenario_denouement, - extensions=MultiExtension, - extension_kwargs=multi_ext, + ph = mpisppy.opt.ph.PH( + options, + self.all3_scenario_names, + self._fix_creator, + scenario_denouement, + extensions=MultiExtension, + extension_kwargs=multi_ext, ) - conv, basic_obj, tbound = ph.ph_main( ) - for k,s in ph.local_scenarios.items(): + conv, basic_obj, tbound = ph.ph_main() + for k, s in ph.local_scenarios.items(): self.assertTrue(s.NumProducedFirstStage[5].is_fixed()) self.assertEqual(pyo.value(s.NumProducedFirstStage[5]), 1134) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_xhat_xbar(self): options = self._copy_of_base_options() options["PHIterLimit"] = 5 - options["xhat_xbar_options"] = {"xhat_solver_options": - options["iterk_solver_options"], - "csvname": "xhatxbar.csv"} + options["xhat_xbar_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "csvname": "xhatxbar.csv", + } ph = mpisppy.opt.ph.PH( options, @@ -540,22 +528,21 @@ def test_ph_xhat_xbar(self): scenario_creator, scenario_denouement, scenario_creator_kwargs={"scenario_count": 3}, - extensions = XhatXbar + extensions=XhatXbar, ) conv, obj, tbound = ph.ph_main() - sig2obj = round_pos_sig(obj,2) + sig2obj = round_pos_sig(obj, 2) self.assertEqual(sig2obj, 230000) - -#***************************************************************************** + +# ***************************************************************************** class Test_hydro(unittest.TestCase): - """ Test the mpisppy code using hydro (three stages).""" + """Test the mpisppy code using hydro (three stages).""" def _copy_of_base_options(self): retval = _get_ph_base_options() retval["branching_factors"] = [3, 3] return retval - def setUp(self): self.options = self._copy_of_base_options() @@ -564,8 +551,10 @@ def setUp(self): self.ScenCount = self.branching_factors[0] * self.branching_factors[1] self.all_scenario_names = list() for sn in range(self.ScenCount): - self.all_scenario_names.append("Scen"+str(sn+1)) - self.all_nodenames = sputils.create_nodenames_from_branching_factors(self.branching_factors) + self.all_scenario_names.append("Scen" + str(sn + 1)) + self.all_nodenames = sputils.create_nodenames_from_branching_factors( + self.branching_factors + ) # end hardwire def test_ph_constructor(self): @@ -589,8 +578,7 @@ def test_ef_constructor(self): ) assert ef is not None - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ef_solve(self): options = self._copy_of_base_options() solver = pyo.SolverFactory(options["solver_name"]) @@ -599,7 +587,7 @@ def test_ef_solve(self): hydro.scenario_creator, scenario_creator_kwargs={"branching_factors": self.branching_factors}, ) - if '_persistent' in options["solver_name"]: + if "_persistent" in options["solver_name"]: solver.set_instance(ef) results = solver.solve(ef, tee=False) pyo.assert_optimal_termination(results) @@ -610,9 +598,7 @@ def test_ef_solve(self): self.assertEqual(val2, 60) os.remove("delme.csv") - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ef_csv(self): options = self._copy_of_base_options() solver = pyo.SolverFactory(options["solver_name"]) @@ -621,14 +607,12 @@ def test_ef_csv(self): hydro.scenario_creator, scenario_creator_kwargs={"branching_factors": self.branching_factors}, ) - if '_persistent' in options["solver_name"]: + if "_persistent" in options["solver_name"]: solver.set_instance(ef) results = solver.solve(ef, tee=False) pyo.assert_optimal_termination(results) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_solve(self): options = self._copy_of_base_options() @@ -649,20 +633,20 @@ def test_ph_solve(self): sig2eobj = round_pos_sig(ph.Eobjective(), 2) self.assertEqual(190, sig2eobj) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_xhat_specific(self): options = self._copy_of_base_options() options["PHIterLimit"] = 10 # xhat is feasible - options["xhat_specific_options"] = {"xhat_solver_options": - options["iterk_solver_options"], - "xhat_scenario_dict": \ - {"ROOT": "Scen1", - "ROOT_0": "Scen1", - "ROOT_1": "Scen4", - "ROOT_2": "Scen7"}, - "csvname": "specific.csv"} + options["xhat_specific_options"] = { + "xhat_solver_options": options["iterk_solver_options"], + "xhat_scenario_dict": { + "ROOT": "Scen1", + "ROOT_0": "Scen1", + "ROOT_1": "Scen4", + "ROOT_2": "Scen7", + }, + "csvname": "specific.csv", + } ph = mpisppy.opt.ph.PH( options, @@ -671,23 +655,22 @@ def test_ph_xhat_specific(self): hydro.scenario_denouement, all_nodenames=self.all_nodenames, scenario_creator_kwargs={"branching_factors": self.branching_factors}, - extensions = XhatSpecific + extensions=XhatSpecific, ) conv, obj, tbound = ph.ph_main() sig2xhatobj = round_pos_sig(ph.extobject._xhat_specific_obj_final, 2) self.assertEqual(190, sig2xhatobj) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ph_mult_rho_updater(self): options = self._copy_of_base_options() options["PHIterLimit"] = 5 - options["mult_rho_options"] = {'convergence_tolerance' : 1e-4, - 'rho_update_stop_iteration' : 4, - 'rho_update_start_iteration' : 1, - 'verbose' : False, - } + options["mult_rho_options"] = { + "convergence_tolerance": 1e-4, + "rho_update_stop_iteration": 4, + "rho_update_start_iteration": 1, + "verbose": False, + } ph = mpisppy.opt.ph.PH( options, @@ -696,14 +679,14 @@ def test_ph_mult_rho_updater(self): hydro.scenario_denouement, all_nodenames=self.all_nodenames, scenario_creator_kwargs={"branching_factors": self.branching_factors}, - extensions = MultRhoUpdater, + extensions=MultRhoUpdater, ) conv, obj, tbound = ph.ph_main() obj2 = round_pos_sig(obj, 2) self.assertEqual(210, obj2) - + # MultRhoUpdater - -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/test_gradient_rho.py b/mpisppy/tests/test_gradient_rho.py index 19af19d16..d81923831 100644 --- a/mpisppy/tests/test_gradient_rho.py +++ b/mpisppy/tests/test_gradient_rho.py @@ -30,7 +30,10 @@ __version__ = 0.21 -solver_available,solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) + def _create_cfg(): cfg = config.Config() @@ -44,10 +47,12 @@ def _create_cfg(): cfg.default_rho = 1 return cfg -#***************************************************************************** + +# ***************************************************************************** + class Test_gradient_farmer(unittest.TestCase): - """ Test the gradient code using farmer.""" + """Test the gradient code using farmer.""" def _create_ph_farmer(self): self.cfg.num_scens = 3 @@ -56,7 +61,9 @@ def _create_ph_farmer(self): all_scenario_names = farmer.scenario_names_creator(self.cfg.num_scens) scenario_creator_kwargs = farmer.kw_creator(self.cfg) beans = (self.cfg, scenario_creator, scenario_denouement, all_scenario_names) - hub_dict = vanilla.ph_hub(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + hub_dict = vanilla.ph_hub( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) list_of_spoke_dict = list() wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() @@ -66,9 +73,9 @@ def _create_ph_farmer(self): def setUp(self): self.cfg = _create_cfg() - self.cfg.xhatpath = './examples/rho_test_data/farmer_cyl_nonants.npy' - self.cfg.grad_cost_file_out = '_test_grad_cost.csv' - self.cfg.grad_rho_file_out = './_test_grad_rho.csv' + self.cfg.xhatpath = "./examples/rho_test_data/farmer_cyl_nonants.npy" + self.cfg.grad_cost_file_out = "_test_grad_cost.csv" + self.cfg.grad_rho_file_out = "./_test_grad_rho.csv" self.cfg.grad_order_stat = 0.5 self.cfg.max_iterations = 0 self.ph_object = self._create_ph_farmer() @@ -81,12 +88,12 @@ def test_compute_grad(self): k0, s0 = list(self.grad_object.ph_object.local_scenarios.items())[0] self.ph_object.disable_W_and_prox() grad_cost0 = self.grad_object.compute_grad(k0, s0) - self.assertEqual(grad_cost0[('ROOT', 1)], -230) + self.assertEqual(grad_cost0[("ROOT", 1)], -230) def test_find_grad_cost(self): self.grad_object = grad.Find_Grad(self.ph_object, self.cfg) self.grad_object.find_grad_cost() - self.assertEqual(self.grad_object.c[('scen0', 'DevotedAcreage[CORN0]')], -150) + self.assertEqual(self.grad_object.c[("scen0", "DevotedAcreage[CORN0]")], -150) def test_write_grad_cost(self): self.grad_object = grad.Find_Grad(self.ph_object, self.cfg) @@ -94,43 +101,45 @@ def test_write_grad_cost(self): try: os.remove(self.cfg.grad_cost_file_out) except Exception: - raise RuntimeError('gradient.write_grad_cost() did not write a csv file') - + raise RuntimeError("gradient.write_grad_cost() did not write a csv file") + def test_find_grad_rho(self): - self.cfg.grad_cost_file_in= './examples/rho_test_data/grad_cost.csv' + self.cfg.grad_cost_file_in = "./examples/rho_test_data/grad_cost.csv" self.grad_object = grad.Find_Grad(self.ph_object, self.cfg) rho = self.grad_object.find_grad_rho() - self.assertAlmostEqual(rho['DevotedAcreage[CORN0]'], 3.375) - + self.assertAlmostEqual(rho["DevotedAcreage[CORN0]"], 3.375) + def test_compute_and_write_grad_rho(self): - self.cfg.grad_cost_file_in= './examples/rho_test_data/grad_cost.csv' + self.cfg.grad_cost_file_in = "./examples/rho_test_data/grad_cost.csv" self.grad_object = grad.Find_Grad(self.ph_object, self.cfg) self.grad_object.write_grad_rho() try: os.remove(self.cfg.grad_rho_file_out) except Exception: - raise RuntimeError('gradient.compute_and_write_grad_rho() did not write a csv file') + raise RuntimeError( + "gradient.compute_and_write_grad_rho() did not write a csv file" + ) def test_grad_cost_and_rho(self): - self.cfg.grad_cost_file_in= './examples/rho_test_data/grad_cost.csv' - grad.grad_cost_and_rho('examples.farmer', self.cfg) - with open(self.cfg.grad_cost_file_out, 'r') as f: + self.cfg.grad_cost_file_in = "./examples/rho_test_data/grad_cost.csv" + grad.grad_cost_and_rho("examples.farmer", self.cfg) + with open(self.cfg.grad_cost_file_out, "r") as f: read = csv.reader(f) rows = list(read) self.assertEqual(float(rows[1][2]), -150) - with open(self.cfg.grad_rho_file_out, 'r') as f: + with open(self.cfg.grad_rho_file_out, "r") as f: read = csv.reader(f) rows = list(read) self.assertAlmostEqual(float(rows[3][1]), 2.0616161616161617) os.remove(self.cfg.grad_cost_file_out) os.remove(self.cfg.grad_rho_file_out) - ############################################### - + + class Test_find_rho_farmer(unittest.TestCase): - """ Test the find rho code using farmer.""" + """Test the find rho code using farmer.""" def _create_ph_farmer(self): self.cfg.num_scens = 3 @@ -140,7 +149,9 @@ def _create_ph_farmer(self): all_scenario_names = farmer.scenario_names_creator(self.cfg.num_scens) scenario_creator_kwargs = farmer.kw_creator(self.cfg) beans = (self.cfg, scenario_creator, scenario_denouement, all_scenario_names) - hub_dict = vanilla.ph_hub(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + hub_dict = vanilla.ph_hub( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) list_of_spoke_dict = list() wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() @@ -151,44 +162,50 @@ def _create_ph_farmer(self): def setUp(self): self.cfg = _create_cfg() self.ph_object = self._create_ph_farmer() - self.cfg.grad_cost_file_in = './examples/rho_test_data/grad_cost.csv' - self.cfg.grad_rho_file_out = './_test_rho.csv' + self.cfg.grad_cost_file_in = "./examples/rho_test_data/grad_cost.csv" + self.cfg.grad_rho_file_out = "./_test_rho.csv" self.cfg.grad_order_stat = 0.4 def test_grad_rho_init(self): self.rho_object = find_rho.Find_Rho(self.ph_object, self.cfg) - + def test_w_denom(self): - self.cfg.grad_cost_file_in= './examples/rho_test_data/grad_cost.csv' + self.cfg.grad_cost_file_in = "./examples/rho_test_data/grad_cost.csv" ### ?? self.cfg.grad_cost_file_in = self.cfg.grad_cost_file_out self.rho_object = find_rho.Find_Rho(self.ph_object, self.cfg) - k0, s0 = list(self.rho_object.ph_object.local_scenarios.items())[0] - denom = {node.name: self.rho_object._w_denom(s0, node) for node in s0._mpisppy_node_list} - self.assertAlmostEqual(denom["ROOT"][0], 25.0) + k0, s0 = list(self.rho_object.ph_object.local_scenarios.items())[0] + denom = { + node.name: self.rho_object._w_denom(s0, node) + for node in s0._mpisppy_node_list + } + self.assertAlmostEqual(denom["ROOT"][0], 25.0) def test_prox_denom(self): - self.cfg.grad_cost_file_out= './examples/rho_test_data/grad_cost.csv' + self.cfg.grad_cost_file_out = "./examples/rho_test_data/grad_cost.csv" self.cfg.grad_cost_file_in = self.cfg.grad_cost_file_out self.rho_object = find_rho.Find_Rho(self.ph_object, self.cfg) k0, s0 = list(self.rho_object.ph_object.local_scenarios.items())[0] - denom = {node.name: self.rho_object._prox_denom(s0, node) for node in s0._mpisppy_node_list} + denom = { + node.name: self.rho_object._prox_denom(s0, node) + for node in s0._mpisppy_node_list + } self.assertAlmostEqual(denom["ROOT"][0], 1250.0) def test_grad_denom(self): - self.cfg.grad_cost_file_out= './examples/rho_test_data/grad_cost.csv' + self.cfg.grad_cost_file_out = "./examples/rho_test_data/grad_cost.csv" self.cfg.grad_cost_file_in = self.cfg.grad_cost_file_out self.rho_object = find_rho.Find_Rho(self.ph_object, self.cfg) denom = self.rho_object._grad_denom() - self.assertAlmostEqual(denom[0], 21.48148148148148) + self.assertAlmostEqual(denom[0], 21.48148148148148) def test_compute_rho(self): - self.cfg.grad_cost_file_out= './examples/rho_test_data/grad_cost.csv' + self.cfg.grad_cost_file_out = "./examples/rho_test_data/grad_cost.csv" self.cfg.grad_cost_file_in = self.cfg.grad_cost_file_out self.rho_object = find_rho.Find_Rho(self.ph_object, self.cfg) rho = self.rho_object.compute_rho(indep_denom=True) - self.assertAlmostEqual(rho['DevotedAcreage[CORN0]'], 6.982758620689655) + self.assertAlmostEqual(rho["DevotedAcreage[CORN0]"], 6.982758620689655) rho = self.rho_object.compute_rho() - self.assertAlmostEqual(rho['DevotedAcreage[CORN0]'], 8.163805471726114) + self.assertAlmostEqual(rho["DevotedAcreage[CORN0]"], 8.163805471726114) def test_compute_and_write_grad_rho(self): pass @@ -204,54 +221,57 @@ def test_compute_and_write_grad_rho(self): def test_rho_setter(self): self.cfg.grad_rho_setter = True - self.cfg.rho_file_in = './examples/rho_test_data/rho.csv' + self.cfg.rho_file_in = "./examples/rho_test_data/rho.csv" self.rho_object = find_rho.Find_Rho(self.ph_object, self.cfg) self.set_rho = find_rho.Set_Rho(self.cfg) k0, s0 = list(self.rho_object.ph_object.local_scenarios.items())[0] rho_list = self.set_rho.rho_setter(s0) - nlen = s0._mpisppy_data.nlens['ROOT'] + nlen = s0._mpisppy_data.nlens["ROOT"] self.assertEqual(len(rho_list), nlen) id_var, rho = rho_list[0] self.assertIsInstance(id_var, int) self.assertAlmostEqual(rho, 6.982758620689654) - -#***************************************************************************** + +# ***************************************************************************** + class Test_grad_extension_farmer(unittest.TestCase): - """ Test the gradient extension code using farmer. + """Test the gradient extension code using farmer. See also: farmer_rho_demo.py writen by DLW Sept 2023 TBD: this code should be re-organized""" def setUp(self): print("test grad setup") self.cfg = _create_cfg() - self.cfg.xhatpath = './examples/rho_test_data/farmer_cyl_nonants.npy' - self.cfg.grad_cost_file_out = '_test_grad_cost.csv' - self.cfg.grad_rho_file_out = './_test_grad_rho.csv' + self.cfg.xhatpath = "./examples/rho_test_data/farmer_cyl_nonants.npy" + self.cfg.grad_cost_file_out = "_test_grad_cost.csv" + self.cfg.grad_rho_file_out = "./_test_grad_rho.csv" self.cfg.grad_order_stat = 0.5 self.cfg.max_iterations = 0 - def _run_ph_farmer(self): ext_classes = [Gradient_extension] - + self.cfg.num_scens = 3 scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement all_scenario_names = farmer.scenario_names_creator(self.cfg.num_scens) scenario_creator_kwargs = farmer.kw_creator(self.cfg) beans = (self.cfg, scenario_creator, scenario_denouement, all_scenario_names) - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=MultiExtension, - ph_converger = None) # ??? DLW Oct 2023: is this correct? - hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes" : ext_classes} - + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=MultiExtension, + ph_converger=None, + ) # ??? DLW Oct 2023: is this correct? + hub_dict["opt_kwargs"]["extension_kwargs"] = {"ext_classes": ext_classes} + + hub_dict["opt_kwargs"]["options"]["gradient_extension_options"] = { + "cfg": self.cfg + } + hub_dict["opt_kwargs"]["extensions"] = MultiExtension - hub_dict['opt_kwargs']['options']['gradient_extension_options'] = {'cfg': self.cfg} - hub_dict['opt_kwargs']['extensions'] = MultiExtension - list_of_spoke_dict = list() wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() @@ -261,13 +281,13 @@ def _run_ph_farmer(self): def test_grad_extensions(self): print("** test grad extensions **") - self.cfg.grad_rho_file_out = './_test_rho.csv' - #self.cfg.grad_cost_file_in = './examples/rho_test_data/grad_cost.csv' - self.cfg.xhatpath = './examples/rho_test_data/farmer_cyl_nonants.npy' + self.cfg.grad_rho_file_out = "./_test_rho.csv" + # self.cfg.grad_cost_file_in = './examples/rho_test_data/grad_cost.csv' + self.cfg.xhatpath = "./examples/rho_test_data/farmer_cyl_nonants.npy" self.cfg.max_iterations = 4 self.ph_object = self._run_ph_farmer() self.assertAlmostEqual(self.ph_object.conv, 2.128968965420187, places=1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/test_headers.py b/mpisppy/tests/test_headers.py index 36c1302a5..4052a9a05 100644 --- a/mpisppy/tests/test_headers.py +++ b/mpisppy/tests/test_headers.py @@ -17,6 +17,7 @@ yaml = pytest.importorskip("yaml") addheader = pytest.importorskip("addheader") + def test_headers(): root = Path(mpisppy.__file__).parent.parent diff --git a/mpisppy/tests/test_pickle_bundle.py b/mpisppy/tests/test_pickle_bundle.py index 4682b048b..589f1086e 100644 --- a/mpisppy/tests/test_pickle_bundle.py +++ b/mpisppy/tests/test_pickle_bundle.py @@ -7,9 +7,7 @@ # full copyright and license information. ############################################################################### # Provide some test for pickled bundles -""" - -""" +""" """ import os import tempfile @@ -25,19 +23,28 @@ __version__ = 0.1 -solver_available, solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) module_dir = os.path.dirname(os.path.abspath(__file__)) badguys = list() -#***************************************************************************** + +# ***************************************************************************** class Test_pickle_bundles(unittest.TestCase): - """ Test the pickle bundle code using aircond.""" + """Test the pickle bundle code using aircond.""" @classmethod def setUpClass(self): - self.refmodelname ="mpisppy.tests.examples.aircond" # amalgamator compatible - self.aircond_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'examples', 'aircond') + self.refmodelname = "mpisppy.tests.examples.aircond" # amalgamator compatible + self.aircond_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), + "..", + "..", + "examples", + "aircond", + ) self.BF1 = 2 self.BF2 = 2 @@ -45,13 +52,13 @@ def setUpClass(self): self.SPB = self.BF2 * self.BF3 # implies bundle count self.SC = self.BF1 * self.BF2 * self.BF3 self.BPF = self.BF1 # bundle count (by design) - self.BF_str = f"--branching-factors \"{self.BF1} {self.BF2} {self.BF3}\"" + self.BF_str = f'--branching-factors "{self.BF1} {self.BF2} {self.BF3}"' - self.BI=150 - self.NC=1 - self.QSC=0.3 - self.SD=80 - self.OTC=1.5 + self.BI = 150 + self.NC = 1 + self.QSC = 0.3 + self.SD = 80 + self.OTC = 1.5 self.EC = f"--Capacity 200 --QuadShortCoeff {self.QSC} --BeginInventory {self.BI} --mu-dev 0 --sigma-dev {self.SD} --start-seed 0 --NegInventoryCost={self.NC} --OvertimeProdCost={self.OTC}" def setUp(self): @@ -59,11 +66,10 @@ def setUp(self): self.tempdir = tempfile.TemporaryDirectory() self.tempdir_name = self.tempdir.name os.chdir(self.aircond_dir) - + def tearDown(self): os.chdir(self.cwd) - def test_pickle_bundler(self): cmdstr = f"python bundle_pickler.py {self.BF_str} --pickle-bundles-dir={self.tempdir_name} --scenarios-per-bundle={self.SPB} {self.EC}" ret = os.system(cmdstr) @@ -76,13 +82,15 @@ def test_chain(self): ret = os.system(cmdstr) if ret != 0: raise RuntimeError(f"pickler part of test run failed with code {ret}") - - cmdstr = f"python aircond_cylinders.py --branching-factors=\"{self.BPF}\" --unpickle-bundles-dir={self.tempdir_name} --scenarios-per-bundle={self.SPB} {self.EC} "+\ - f"--default-rho=1 --max-solver-threads=2 --bundles-per-rank=0 --max-iterations=2 --solver-name={solver_name}" + + cmdstr = ( + f'python aircond_cylinders.py --branching-factors="{self.BPF}" --unpickle-bundles-dir={self.tempdir_name} --scenarios-per-bundle={self.SPB} {self.EC} ' + + f"--default-rho=1 --max-solver-threads=2 --bundles-per-rank=0 --max-iterations=2 --solver-name={solver_name}" + ) ret = os.system(cmdstr) if ret != 0: raise RuntimeError(f"cylinders part of test run failed with code {ret}") -if __name__ == '__main__': + +if __name__ == "__main__": unittest.main() - diff --git a/mpisppy/tests/test_pysp_model.py b/mpisppy/tests/test_pysp_model.py index 817bb479e..021d69db7 100644 --- a/mpisppy/tests/test_pysp_model.py +++ b/mpisppy/tests/test_pysp_model.py @@ -11,25 +11,27 @@ import os.path from mpisppy.utils.pysp_model import PySPModel -from mpisppy.tests.test_ef_ph import _get_ph_base_options,\ - solver_available,\ - round_pos_sig +from mpisppy.tests.test_ef_ph import ( + _get_ph_base_options, + solver_available, + round_pos_sig, +) import mpisppy.opt.ef import mpisppy.opt.ph file_dir = os.path.dirname(os.path.abspath(__file__)) -sizes_dir = os.path.join(file_dir,'examples','sizes') +sizes_dir = os.path.join(file_dir, "examples", "sizes") + class Test_sizes_abstract(unittest.TestCase): - """ Test PySPModel using abstract sizes case """ + """Test PySPModel using abstract sizes case""" def setUp(self): - self.pysp_sizes3 = PySPModel(os.path.join(sizes_dir, - 'ReferenceModel.py'), - os.path.join(sizes_dir, - 'SIZES3', 'ScenarioStructure.dat') - ) + self.pysp_sizes3 = PySPModel( + os.path.join(sizes_dir, "ReferenceModel.py"), + os.path.join(sizes_dir, "SIZES3", "ScenarioStructure.dat"), + ) def tearDown(self): self.pysp_sizes3.close() @@ -37,12 +39,13 @@ def tearDown(self): def test_ph_constructor(self): pysp_sizes = self.pysp_sizes3 options = _get_ph_base_options() - options['PHIterLimit'] = 0 - ph = mpisppy.opt.ph.PH(options, - pysp_sizes.all_scenario_names, - pysp_sizes.scenario_creator, - lambda *args : None, - ) + options["PHIterLimit"] = 0 + ph = mpisppy.opt.ph.PH( + options, + pysp_sizes.all_scenario_names, + pysp_sizes.scenario_creator, + lambda *args: None, + ) assert ph is not None def test_ef_constructor(self): @@ -55,9 +58,7 @@ def test_ef_constructor(self): ) assert ef is not None - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_ef_solve(self): pysp_sizes = self.pysp_sizes3 options = _get_ph_base_options() @@ -69,9 +70,9 @@ def test_ef_solve(self): ) results = ef.solve_extensive_form(tee=False) pyo.assert_optimal_termination(results) - sig2eobj = round_pos_sig(pyo.value(ef.ef.EF_Obj),2) + sig2eobj = round_pos_sig(pyo.value(ef.ef.EF_Obj), 2) self.assertEqual(220000.0, sig2eobj) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/test_sc.py b/mpisppy/tests/test_sc.py index e9cd1610e..258f711cf 100644 --- a/mpisppy/tests/test_sc.py +++ b/mpisppy/tests/test_sc.py @@ -20,27 +20,54 @@ class TestSC(unittest.TestCase): def setUp(self): self.original_path = sys.path - example_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', '..', 'examples', 'farmer') + example_dir = os.path.join( + os.path.dirname(os.path.abspath(__file__)), "..", "..", "examples", "farmer" + ) sys.path.append(example_dir) def tearDown(self): sys.path = self.original_path - + def test_farmer_example(self): import schur_complement as sc_example - linear_solver = parapint.linalg.MPISchurComplementLinearSolver(subproblem_solvers={ndx: parapint.linalg.ScipyInterface(compute_inertia=True) for ndx in range(3)}, - schur_complement_solver=parapint.linalg.ScipyInterface(compute_inertia=True)) + linear_solver = parapint.linalg.MPISchurComplementLinearSolver( + subproblem_solvers={ + ndx: parapint.linalg.ScipyInterface(compute_inertia=True) + for ndx in range(3) + }, + schur_complement_solver=parapint.linalg.ScipyInterface( + compute_inertia=True + ), + ) sc_opt = sc_example.solve_with_sc(scen_count=3, linear_solver=linear_solver) sc_sol = sc_opt.gather_var_values_to_rank0() if rank == 0: - self.assertAlmostEqual(sc_sol[('Scenario0', 'DevotedAcreage[CORN0]')], 80, places=5) - self.assertAlmostEqual(sc_sol[('Scenario0', 'DevotedAcreage[SUGAR_BEETS0]')], 250, places=5) - self.assertAlmostEqual(sc_sol[('Scenario0', 'DevotedAcreage[WHEAT0]')], 170, places=5) - self.assertAlmostEqual(sc_sol[('Scenario1', 'DevotedAcreage[CORN0]')], 80, places=5) - self.assertAlmostEqual(sc_sol[('Scenario1', 'DevotedAcreage[SUGAR_BEETS0]')], 250, places=5) - self.assertAlmostEqual(sc_sol[('Scenario1', 'DevotedAcreage[WHEAT0]')], 170, places=5) - self.assertAlmostEqual(sc_sol[('Scenario2', 'DevotedAcreage[CORN0]')], 80, places=5) - self.assertAlmostEqual(sc_sol[('Scenario2', 'DevotedAcreage[SUGAR_BEETS0]')], 250, places=5) - self.assertAlmostEqual(sc_sol[('Scenario2', 'DevotedAcreage[WHEAT0]')], 170, places=5) + self.assertAlmostEqual( + sc_sol[("Scenario0", "DevotedAcreage[CORN0]")], 80, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario0", "DevotedAcreage[SUGAR_BEETS0]")], 250, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario0", "DevotedAcreage[WHEAT0]")], 170, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario1", "DevotedAcreage[CORN0]")], 80, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario1", "DevotedAcreage[SUGAR_BEETS0]")], 250, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario1", "DevotedAcreage[WHEAT0]")], 170, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario2", "DevotedAcreage[CORN0]")], 80, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario2", "DevotedAcreage[SUGAR_BEETS0]")], 250, places=5 + ) + self.assertAlmostEqual( + sc_sol[("Scenario2", "DevotedAcreage[WHEAT0]")], 170, places=5 + ) diff --git a/mpisppy/tests/test_stoch_admmWrapper.py b/mpisppy/tests/test_stoch_admmWrapper.py index 7f9e76180..9ec6a5166 100644 --- a/mpisppy/tests/test_stoch_admmWrapper.py +++ b/mpisppy/tests/test_stoch_admmWrapper.py @@ -14,7 +14,10 @@ from mpisppy.tests.utils import get_solver import os -solver_available, solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) + class TestStochAdmmWrapper(unittest.TestCase): def setUp(self): @@ -22,7 +25,7 @@ def setUp(self): def tearDown(self): pass - + def _cfg_creator(self, num_stoch_scens, num_admm_subproblems): # Needs to be modified cfg = config.Config() @@ -32,10 +35,12 @@ def _cfg_creator(self, num_stoch_scens, num_admm_subproblems): cfg.num_admm_subproblems = num_admm_subproblems return cfg - + def _make_admm(self, num_stoch_scens, num_admm_subproblems, verbose=False): cfg = self._cfg_creator(num_stoch_scens, num_admm_subproblems) - admm, all_admm_stoch_subproblem_scenario_names = stoch_distr_admm_cylinders._make_admm(cfg,n_cylinders=1,verbose=verbose) + admm, all_admm_stoch_subproblem_scenario_names = ( + stoch_distr_admm_cylinders._make_admm(cfg, n_cylinders=1, verbose=verbose) + ) return admm def _get_base_options(self): @@ -48,14 +53,13 @@ def _get_base_options(self): cfg.quick_assign("num_batches", int, 2) cfg.quick_assign("batch_size", int, 10) - def test_constructor(self): - for num_stoch_scens in range(3,5): - for num_admm_subproblems in range(3,5): + for num_stoch_scens in range(3, 5): + for num_admm_subproblems in range(3, 5): self._make_admm(num_stoch_scens, num_admm_subproblems) def test_variable_probability(self): - admm = self._make_admm(4,3) + admm = self._make_admm(4, 3) q = dict() for sname, s in admm.local_admm_stoch_subproblem_scenarios.items(): q[sname] = admm.var_prob_list(s) @@ -63,29 +67,31 @@ def test_variable_probability(self): self.assertEqual(q["ADMM_STOCH_Region3_StochasticScenario1"][0][1], 0) def test_admmWrapper_scenario_creator(self): - admm = self._make_admm(4,3) + admm = self._make_admm(4, 3) sname = "ADMM_STOCH_Region3_StochasticScenario1" q = admm.admmWrapper_scenario_creator(sname) self.assertTrue(q.y__DC1DC2__.is_fixed()) self.assertFalse(q.y["DC3_1DC1"].is_fixed()) - + def _slack_name(self, dummy_node): return f"y[{dummy_node}]" def test_assign_variable_probs_error1(self): - admm = self._make_admm(2,3) - admm.consensus_vars["Region1"].append((self._slack_name("DC2DC3"),2)) # The variable is added in the second stage + admm = self._make_admm(2, 3) + admm.consensus_vars["Region1"].append( + (self._slack_name("DC2DC3"), 2) + ) # The variable is added in the second stage self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) - + def test_assign_variable_probs_error2(self): - admm = self._make_admm(2,3) - admm.consensus_vars["Region1"].remove((self._slack_name("DC3_1DC1"),2)) + admm = self._make_admm(2, 3) + admm.consensus_vars["Region1"].remove((self._slack_name("DC3_1DC1"), 2)) self.assertRaises(RuntimeError, admm.assign_variable_probs, admm) - def _extracting_output(self, line): import re - pattern = r'\[\s*\d+\.\d+\]\s+\d+\s+(?:L\s*B?|B\s*L?)?\s+([-.\d]+)\s+([-.\d]+)' + + pattern = r"\[\s*\d+\.\d+\]\s+\d+\s+(?:L\s*B?|B\s*L?)?\s+([-.\d]+)\s+([-.\d]+)" match = re.search(pattern, line) @@ -94,27 +100,40 @@ def _extracting_output(self, line): inner_bound = match.group(2) return float(outer_bound), float(inner_bound) else: - raise RuntimeError("The test is probably not correctly adapted: can't match the format of the line") - + raise RuntimeError( + "The test is probably not correctly adapted: can't match the format of the line" + ) def test_values(self): - command_line_pairs = [(f"mpiexec -np 3 python -u -m mpi4py stoch_distr_admm_cylinders.py --num-stoch-scens 10 --num-admm-subproblems 2 --default-rho 10 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --rel-gap 0.001 --num-stage 3" \ - , f"python stoch_distr_ef.py --solver-name {solver_name} --num-stoch-scens 10 --num-admm-subproblems 2 --num-stage 3"), \ - (f"mpiexec -np 3 python -u -m mpi4py stoch_distr_admm_cylinders.py --num-stoch-scens 5 --num-admm-subproblems 3 --default-rho 5 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --rel-gap 0.01 --ensure-xhat-feas" \ - , f"python stoch_distr_ef.py --solver-name {solver_name} --num-stoch-scens 5 --num-admm-subproblems 3 --ensure-xhat-feas"), \ - (f"mpiexec -np 6 python -u -m mpi4py stoch_distr_admm_cylinders.py --num-stoch-scens 4 --num-admm-subproblems 5 --default-rho 15 --solver-name {solver_name} --max-iterations 30 --xhatxbar --lagrangian --mnpr 5 --scalable --ensure-xhat-feas" \ - , f"python stoch_distr_ef.py --solver-name {solver_name} --num-stoch-scens 4 --num-admm-subproblems 5 --mnpr 5 --scalable --ensure-xhat-feas") ] - #command_line = f"mpiexec -np 6 python -m mpi4py examples/stoch_distr/stoch_distr_admm_cylinders.py --num-admm-subproblems 2 --num-stoch-scens 4 --default-rho 10 --solver-name {solver_name} --max-iterations 100 --xhatxbar --lagrangian" + command_line_pairs = [ + ( + f"mpiexec -np 3 python -u -m mpi4py stoch_distr_admm_cylinders.py --num-stoch-scens 10 --num-admm-subproblems 2 --default-rho 10 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --rel-gap 0.001 --num-stage 3", + f"python stoch_distr_ef.py --solver-name {solver_name} --num-stoch-scens 10 --num-admm-subproblems 2 --num-stage 3", + ), + ( + f"mpiexec -np 3 python -u -m mpi4py stoch_distr_admm_cylinders.py --num-stoch-scens 5 --num-admm-subproblems 3 --default-rho 5 --solver-name {solver_name} --max-iterations 50 --xhatxbar --lagrangian --rel-gap 0.01 --ensure-xhat-feas", + f"python stoch_distr_ef.py --solver-name {solver_name} --num-stoch-scens 5 --num-admm-subproblems 3 --ensure-xhat-feas", + ), + ( + f"mpiexec -np 6 python -u -m mpi4py stoch_distr_admm_cylinders.py --num-stoch-scens 4 --num-admm-subproblems 5 --default-rho 15 --solver-name {solver_name} --max-iterations 30 --xhatxbar --lagrangian --mnpr 5 --scalable --ensure-xhat-feas", + f"python stoch_distr_ef.py --solver-name {solver_name} --num-stoch-scens 4 --num-admm-subproblems 5 --mnpr 5 --scalable --ensure-xhat-feas", + ), + ] + # command_line = f"mpiexec -np 6 python -m mpi4py examples/stoch_distr/stoch_distr_admm_cylinders.py --num-admm-subproblems 2 --num-stoch-scens 4 --default-rho 10 --solver-name {solver_name} --max-iterations 100 --xhatxbar --lagrangian" original_dir = os.getcwd() for j in range(len(command_line_pairs)): - if j == 0: # The first line is executed in the test directory because it has a 3-stage problem. This one does not insure xhatfeasibility but luckily works - target_directory = 'examples/stoch_distr' - if j != 0: # The other lines are executed in the real directory because it ensures xhat feasibility - target_directory = '../../examples/stoch_distr' + if ( + j == 0 + ): # The first line is executed in the test directory because it has a 3-stage problem. This one does not insure xhatfeasibility but luckily works + target_directory = "examples/stoch_distr" + if ( + j != 0 + ): # The other lines are executed in the real directory because it ensures xhat feasibility + target_directory = "../../examples/stoch_distr" os.chdir(target_directory) objectives = {} command = command_line_pairs[j][0].split() - + result = subprocess.run(command, capture_output=True, text=True) if result.stderr: print("Error output:") @@ -122,39 +141,46 @@ def test_values(self): # Check the standard output if result.stdout: - result_by_line = result.stdout.strip().split('\n') - + result_by_line = result.stdout.strip().split("\n") + target_line = "Iter. Best Bound Best Incumbent Rel. Gap Abs. Gap" precedent_line_target = False i = 0 for line in result_by_line: if precedent_line_target: - if i%2 == 1: + if i % 2 == 1: outer_bound, inner_bound = self._extracting_output(line) objectives["outer bound"] = outer_bound objectives["inner bound"] = inner_bound - precedent_line_target = False + precedent_line_target = False i += 1 elif target_line in line: precedent_line_target = True - + # For the EF command = command_line_pairs[j][1].split() result = subprocess.run(command, capture_output=True, text=True) - result_by_line = result.stdout.strip().split('\n') + result_by_line = result.stdout.strip().split("\n") for i in range(len(result_by_line)): - if "EF objective" in result_by_line[-i-1]: #should be on last line but we can check - decomposed_line = result_by_line[-i-1].split(': ') - objectives["EF objective"] = float(decomposed_line[1])#math.ceil(float(decomposed_line[1])) + if ( + "EF objective" in result_by_line[-i - 1] + ): # should be on last line but we can check + decomposed_line = result_by_line[-i - 1].split(": ") + objectives["EF objective"] = float( + decomposed_line[1] + ) # math.ceil(float(decomposed_line[1])) try: - correct_order = objectives["outer bound"] <= objectives["EF objective"] <= objectives["inner bound"] + correct_order = ( + objectives["outer bound"] + <= objectives["EF objective"] + <= objectives["inner bound"] + ) except Exception: raise RuntimeError("The output could not be read to capture the values") assert correct_order, f' We obtained {objectives["outer bound"]=}, {objectives["EF objective"]=}, {objectives["inner bound"]=}' os.chdir(original_dir) - -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/test_w_writer.py b/mpisppy/tests/test_w_writer.py index ea09b9c61..96385af18 100644 --- a/mpisppy/tests/test_w_writer.py +++ b/mpisppy/tests/test_w_writer.py @@ -28,7 +28,10 @@ __version__ = 0.1 -solver_available,solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) + def _create_cfg(): cfg = config.Config() @@ -41,16 +44,18 @@ def _create_cfg(): cfg.default_rho = 1 return cfg -#***************************************************************************** + +# ***************************************************************************** + class Test_w_writer_farmer(unittest.TestCase): - """ Test the gradient code using farmer.""" + """Test the gradient code using farmer.""" def _create_ph_farmer(self, ph_extensions=None, max_iter=100): - self.w_file_name = './examples/w_test_data/w_file.csv' - self.temp_w_file_name = './examples/w_test_data/_temp_w_file.csv' - self.xbar_file_name = './examples/w_test_data/xbar_file.csv' - self.temp_xbar_file_name = './examples/w_test_data/_temp_xbar_file.csv' + self.w_file_name = "./examples/w_test_data/w_file.csv" + self.temp_w_file_name = "./examples/w_test_data/_temp_w_file.csv" + self.xbar_file_name = "./examples/w_test_data/xbar_file.csv" + self.temp_xbar_file_name = "./examples/w_test_data/_temp_xbar_file.csv" self.cfg.num_scens = 3 scenario_creator = farmer.scenario_creator scenario_denouement = farmer.scenario_denouement @@ -58,15 +63,19 @@ def _create_ph_farmer(self, ph_extensions=None, max_iter=100): scenario_creator_kwargs = farmer.kw_creator(self.cfg) self.cfg.max_iterations = max_iter beans = (self.cfg, scenario_creator, scenario_denouement, all_scenario_names) - hub_dict = vanilla.ph_hub(*beans, scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions) - if ph_extensions==WXBarWriter: #tbd - hub_dict['opt_kwargs']['options']["W_and_xbar_writer"] = {"Wcsvdir": "Wdir"} - hub_dict['opt_kwargs']['options']['W_fname'] = self.temp_w_file_name - hub_dict['opt_kwargs']['options']['Xbar_fname'] = self.temp_xbar_file_name - if ph_extensions==WXBarReader: - hub_dict['opt_kwargs']['options']["W_and_xbar_reader"] = {"Wcsvdir": "Wdir"} - hub_dict['opt_kwargs']['options']['init_W_fname'] = self.w_file_name - hub_dict['opt_kwargs']['options']['init_Xbar_fname'] = self.xbar_file_name + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=ph_extensions, + ) + if ph_extensions == WXBarWriter: # tbd + hub_dict["opt_kwargs"]["options"]["W_and_xbar_writer"] = {"Wcsvdir": "Wdir"} + hub_dict["opt_kwargs"]["options"]["W_fname"] = self.temp_w_file_name + hub_dict["opt_kwargs"]["options"]["Xbar_fname"] = self.temp_xbar_file_name + if ph_extensions == WXBarReader: + hub_dict["opt_kwargs"]["options"]["W_and_xbar_reader"] = {"Wcsvdir": "Wdir"} + hub_dict["opt_kwargs"]["options"]["init_W_fname"] = self.w_file_name + hub_dict["opt_kwargs"]["options"]["init_Xbar_fname"] = self.xbar_file_name list_of_spoke_dict = list() wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() @@ -77,10 +86,10 @@ def _create_ph_farmer(self, ph_extensions=None, max_iter=100): def setUp(self): self.cfg = _create_cfg() self.ph_object = None - + def test_wwriter(self): self.ph_object = self._create_ph_farmer(ph_extensions=WXBarWriter, max_iter=5) - with open(self.temp_w_file_name, 'r') as f: + with open(self.temp_w_file_name, "r") as f: read = csv.reader(f) rows = list(read) self.assertAlmostEqual(float(rows[1][2]), 70.84705093609978, places=5) @@ -89,7 +98,7 @@ def test_wwriter(self): def test_xbarwriter(self): self.ph_object = self._create_ph_farmer(ph_extensions=WXBarWriter, max_iter=5) - with open(self.temp_xbar_file_name, 'r') as f: + with open(self.temp_xbar_file_name, "r") as f: read = csv.reader(f) rows = list(read) self.assertAlmostEqual(float(rows[1][1]), 274.2239371483933, places=5) @@ -99,18 +108,27 @@ def test_xbarwriter(self): def test_wreader(self): self.ph_object = self._create_ph_farmer(ph_extensions=WXBarReader, max_iter=1) for sname, scenario in self.ph_object.local_scenarios.items(): - if sname == 'scen0': - self.assertAlmostEqual(scenario._mpisppy_model.W[("ROOT", 1)]._value, 70.84705093609978) - if sname == 'scen1': - self.assertAlmostEqual(scenario._mpisppy_model.W[("ROOT", 0)]._value, -41.104251445950844) + if sname == "scen0": + self.assertAlmostEqual( + scenario._mpisppy_model.W[("ROOT", 1)]._value, 70.84705093609978 + ) + if sname == "scen1": + self.assertAlmostEqual( + scenario._mpisppy_model.W[("ROOT", 0)]._value, -41.104251445950844 + ) def test_xbarreader(self): self.ph_object = self._create_ph_farmer(ph_extensions=WXBarReader, max_iter=1) for sname, scenario in self.ph_object.local_scenarios.items(): - if sname == 'scen0': - self.assertAlmostEqual(scenario._mpisppy_model.xbars[("ROOT", 1)]._value, 274.2239371483933) - if sname == 'scen1': - self.assertAlmostEqual(scenario._mpisppy_model.xbars[("ROOT", 0)]._value, 96.88717449844287) + if sname == "scen0": + self.assertAlmostEqual( + scenario._mpisppy_model.xbars[("ROOT", 1)]._value, 274.2239371483933 + ) + if sname == "scen1": + self.assertAlmostEqual( + scenario._mpisppy_model.xbars[("ROOT", 0)]._value, 96.88717449844287 + ) + -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/test_with_cylinders.py b/mpisppy/tests/test_with_cylinders.py index 4b035b62b..d0770c128 100644 --- a/mpisppy/tests/test_with_cylinders.py +++ b/mpisppy/tests/test_with_cylinders.py @@ -23,10 +23,14 @@ __version__ = 0.1 from mpi4py import MPI + comm = MPI.COMM_WORLD assert comm.size == 2, "These tests need two ranks" -solver_available,solver_name, persistent_available, persistent_solver_name= get_solver() +solver_available, solver_name, persistent_available, persistent_solver_name = ( + get_solver() +) + def _create_cfg(): cfg = config.Config() @@ -38,11 +42,12 @@ def _create_cfg(): cfg.default_rho = 1 return cfg -#***************************************************************************** - +# ***************************************************************************** + + class Test_farmer_with_cylinders(unittest.TestCase): - """ Test the find rho code using farmer.""" + """Test the find rho code using farmer.""" def _create_stuff(self, iters=5): # assumes setup has been called; very specific... @@ -53,51 +58,51 @@ def _create_stuff(self, iters=5): all_scenario_names = farmer.scenario_names_creator(self.cfg.num_scens) scenario_creator_kwargs = farmer.kw_creator(self.cfg) beans = (self.cfg, scenario_creator, scenario_denouement, all_scenario_names) - hub_dict = vanilla.ph_hub(*beans, scenario_creator_kwargs=scenario_creator_kwargs) + hub_dict = vanilla.ph_hub( + *beans, scenario_creator_kwargs=scenario_creator_kwargs + ) return scenario_creator_kwargs, beans, hub_dict - def setUp(self): self.cfg = _create_cfg() - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhatxbar_extended(self): from mpisppy.extensions.test_extension import TestExtension - + self.cfg.xhatxbar_args() scenario_creator_kwargs, beans, hub_dict = self._create_stuff() list_of_spoke_dict = list() # xhat shuffle bound spoke - xhatxbar_spoke = vanilla.xhatxbar_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=TestExtension) + xhatxbar_spoke = vanilla.xhatxbar_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=TestExtension, + ) list_of_spoke_dict.append(xhatxbar_spoke) wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() if wheel.global_rank == 1: xhat_object = wheel.spcomm.opt - self.assertIn('post_solve', xhat_object._TestExtension_who_is_called) + self.assertIn("post_solve", xhat_object._TestExtension_who_is_called) - - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhatshuffle_extended(self): print("begin xhatshuffle_extended with test extension") from mpisppy.extensions.test_extension import TestExtension - + self.cfg.xhatxbar_args() scenario_creator_kwargs, beans, hub_dict = self._create_stuff() list_of_spoke_dict = list() # xhat shuffle bound spoke ext = TestExtension - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=ext) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ext + ) list_of_spoke_dict.append(xhatshuffle_spoke) wheel = WheelSpinner(hub_dict, list_of_spoke_dict) @@ -105,36 +110,36 @@ def test_xhatshuffle_extended(self): if wheel.global_rank == 1: xhat_object = wheel.spcomm.opt print(f"{xhat_object._TestExtension_who_is_called =}") - self.assertIn('post_solve', xhat_object._TestExtension_who_is_called) - + self.assertIn("post_solve", xhat_object._TestExtension_who_is_called) - @unittest.skipIf(not solver_available, - "no solver is available") + @unittest.skipIf(not solver_available, "no solver is available") def test_xhatshuffle_coverage(self): print("begin xhatshuffle_coverage") from helper_extension import TestHelperExtension - + self.cfg.xhatxbar_args() scenario_creator_kwargs, beans, hub_dict = self._create_stuff(iters=1) list_of_spoke_dict = list() # xhat shuffle bound spoke ext = TestHelperExtension - xhatshuffle_spoke = vanilla.xhatshuffle_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=ext) + xhatshuffle_spoke = vanilla.xhatshuffle_spoke( + *beans, scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ext + ) list_of_spoke_dict.append(xhatshuffle_spoke) wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() if wheel.global_rank == 1: xhat_object = wheel.spcomm.opt - for idx,v in xhat_object._TestHelperExtension_checked.items(): - self.assertNotEqual(v[0], v[1], f"xhatshuffle does not seem to change things for {idx} (and maybe others)") - - - @unittest.skipIf(not solver_available, - "no solver is available") + for idx, v in xhat_object._TestHelperExtension_checked.items(): + self.assertNotEqual( + v[0], + v[1], + f"xhatshuffle does not seem to change things for {idx} (and maybe others)", + ) + + @unittest.skipIf(not solver_available, "no solver is available") def test_lagrangian(self): print("Start lagrangian") scenario_creator_kwargs, beans, hub_dict = self._create_stuff() @@ -142,16 +147,18 @@ def test_lagrangian(self): self.cfg.lagrangian_args() list_of_spoke_dict = list() # xhat shuffle bound spoke - lagrangian_spoke = vanilla.lagrangian_spoke(*beans, - scenario_creator_kwargs=scenario_creator_kwargs,) + lagrangian_spoke = vanilla.lagrangian_spoke( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ) list_of_spoke_dict.append(lagrangian_spoke) wheel = WheelSpinner(hub_dict, list_of_spoke_dict) wheel.spin() if wheel.global_rank == 1: - #print(f"{wheel.spcomm.bound= }") + # print(f"{wheel.spcomm.bound= }") self.assertAlmostEqual(wheel.spcomm.bound, -109499.5160897, 1) -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/mpisppy/tests/utils.py b/mpisppy/tests/utils.py index f3b3f1020..65cb753b6 100644 --- a/mpisppy/tests/utils.py +++ b/mpisppy/tests/utils.py @@ -11,9 +11,12 @@ import pyomo.environ as pyo from math import log10, floor + def get_solver(): - solvers = [n+e for e in ('_persistent', '') for n in ("cplex","gurobi","xpress")] - + solvers = [ + n + e for e in ("_persistent", "") for n in ("cplex", "gurobi", "xpress") + ] + for solver_name in solvers: try: solver_available = pyo.SolverFactory(solver_name).available() @@ -21,17 +24,18 @@ def get_solver(): solver_available = False if solver_available: break - - if '_persistent' in solver_name: + + if "_persistent" in solver_name: persistent_solver_name = solver_name else: - persistent_solver_name = solver_name+"_persistent" + persistent_solver_name = solver_name + "_persistent" try: persistent_available = pyo.SolverFactory(persistent_solver_name).available() except Exception: persistent_available = False - + return solver_available, solver_name, persistent_available, persistent_solver_name + def round_pos_sig(x, sig=1): - return round(x, sig-int(floor(log10(abs(x))))-1) + return round(x, sig - int(floor(log10(abs(x)))) - 1) diff --git a/mpisppy/utils/admmWrapper.py b/mpisppy/utils/admmWrapper.py index 73f71c005..2393c672b 100644 --- a/mpisppy/utils/admmWrapper.py +++ b/mpisppy/utils/admmWrapper.py @@ -6,27 +6,31 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#creating the class AdmmWrapper +# creating the class AdmmWrapper import mpisppy.utils.sputils as sputils import pyomo.environ as pyo from mpisppy import MPI + global_rank = MPI.COMM_WORLD.Get_rank() + def _consensus_vars_number_creator(consensus_vars): """associates to each consensus vars the number of time it appears Args: - consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables present in the subproblem Returns: - consensus_vars_number (dict): dictionary whose keys are the consensus variables + consensus_vars_number (dict): dictionary whose keys are the consensus variables and values are the number of subproblems the variable is linked to. """ - consensus_vars_number={} + consensus_vars_number = {} for subproblem in consensus_vars: for var in consensus_vars[subproblem]: - if var not in consensus_vars_number: # instanciates consensus_vars_number[var] + if ( + var not in consensus_vars_number + ): # instanciates consensus_vars_number[var] consensus_vars_number[var] = 0 consensus_vars_number[var] += 1 for var in consensus_vars_number: @@ -34,62 +38,67 @@ def _consensus_vars_number_creator(consensus_vars): print(f"The consensus variable {var} appears in a single subproblem") return consensus_vars_number -class AdmmWrapper(): - """ This class assigns variable probabilities and creates wrapper for the scenario creator - Args: - options (dict): options - all_scenario_names (list): all scenario names - scenario_creator (fct): returns a concrete model with special things - consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables - present in the subproblem - n_cylinder (int): number of cylinders that will ultimately be used - mpicomm (MPI comm): creates communication - scenario_creator_kwargs (dict): kwargs passed directly to scenario_creator. - verbose (boolean): if True gives extra debugging information - - Attributes: - local_scenarios (dict of scenario objects): concrete models with - extra data, key is name - local_scenario_names (list): names of locals +class AdmmWrapper: + """This class assigns variable probabilities and creates wrapper for the scenario creator + + Args: + options (dict): options + all_scenario_names (list): all scenario names + scenario_creator (fct): returns a concrete model with special things + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + present in the subproblem + n_cylinder (int): number of cylinders that will ultimately be used + mpicomm (MPI comm): creates communication + scenario_creator_kwargs (dict): kwargs passed directly to scenario_creator. + verbose (boolean): if True gives extra debugging information + + Attributes: + local_scenarios (dict of scenario objects): concrete models with + extra data, key is name + local_scenario_names (list): names of locals """ - def __init__(self, - options, - all_scenario_names, - scenario_creator, #supplied by the user/ modeller, used only here - consensus_vars, - n_cylinders, - mpicomm, - scenario_creator_kwargs=None, - verbose=None, + + def __init__( + self, + options, + all_scenario_names, + scenario_creator, # supplied by the user/ modeller, used only here + consensus_vars, + n_cylinders, + mpicomm, + scenario_creator_kwargs=None, + verbose=None, ): assert len(options) == 0, "no options supported by AdmmWrapper" # We need local_scenarios self.local_scenarios = {} scenario_tree = sputils._ScenTree(["ROOT"], all_scenario_names) - assert mpicomm.Get_size() % n_cylinders == 0, \ - f"{mpicomm.Get_size()=} and {n_cylinders=}, but {mpicomm.Get_size() % n_cylinders=} should be 0" - ranks_per_cylinder = mpicomm.Get_size() // n_cylinders - - scenario_names_to_rank, _rank_slices, _scenario_slices =\ - scenario_tree.scen_names_to_ranks(ranks_per_cylinder) + assert ( + mpicomm.Get_size() % n_cylinders == 0 + ), f"{mpicomm.Get_size()=} and {n_cylinders=}, but {mpicomm.Get_size() % n_cylinders=} should be 0" + ranks_per_cylinder = mpicomm.Get_size() // n_cylinders + + scenario_names_to_rank, _rank_slices, _scenario_slices = ( + scenario_tree.scen_names_to_ranks(ranks_per_cylinder) + ) self.cylinder_rank = mpicomm.Get_rank() // n_cylinders - #self.cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder + # self.cylinder_rank = mpicomm.Get_rank() % ranks_per_cylinder self.all_scenario_names = all_scenario_names - #taken from spbase + # taken from spbase self.local_scenario_names = [ all_scenario_names[i] for i in _rank_slices[self.cylinder_rank] ] for sname in self.local_scenario_names: s = scenario_creator(sname, **scenario_creator_kwargs) self.local_scenarios[sname] = s - #we are not collecting instantiation time + # we are not collecting instantiation time self.consensus_vars = consensus_vars self.verbose = verbose self.consensus_vars_number = _consensus_vars_number_creator(consensus_vars) - #check_consensus_vars(consensus_vars) + # check_consensus_vars(consensus_vars) self.assign_variable_probs(verbose=self.verbose) self.number_of_scenario = len(all_scenario_names) @@ -105,59 +114,72 @@ def var_prob_list(self, s): where it is present. Otherwise it has a probability 0. """ return self.varprob_dict[s] - + def assign_variable_probs(self, verbose=False): self.varprob_dict = {} - #we collect the consensus variables - all_consensus_vars = {var_stage_tuple: None for admm_subproblem_names in self.consensus_vars for var_stage_tuple in self.consensus_vars[admm_subproblem_names]} + # we collect the consensus variables + all_consensus_vars = { + var_stage_tuple: None + for admm_subproblem_names in self.consensus_vars + for var_stage_tuple in self.consensus_vars[admm_subproblem_names] + } error_list1 = [] error_list2 = [] - for sname,s in self.local_scenarios.items(): + for sname, s in self.local_scenarios.items(): if verbose: - print(f"AdmmWrapper.assign_variable_probs is processing scenario: {sname}") + print( + f"AdmmWrapper.assign_variable_probs is processing scenario: {sname}" + ) varlist = list() self.varprob_dict[s] = list() for vstr in all_consensus_vars.keys(): v = s.find_component(vstr) if vstr in self.consensus_vars[sname]: if v is not None: - #variables that should be on the model - self.varprob_dict[s].append((id(v),1/(self.consensus_vars_number[vstr]))) + # variables that should be on the model + self.varprob_dict[s].append( + (id(v), 1 / (self.consensus_vars_number[vstr])) + ) else: - error_list1.append((sname,vstr)) + error_list1.append((sname, vstr)) else: - if v is None: + if v is None: # This var will not be indexed but that might not matter?? # Going to replace the brackets - v2str = vstr.replace("[","__").replace("]","__") # To distinguish the consensus_vars fixed at 0 + v2str = vstr.replace("[", "__").replace( + "]", "__" + ) # To distinguish the consensus_vars fixed at 0 v = pyo.Var() - + ### Lines equivalent to setattr(s, v2str, v) without warning s.del_component(v2str) s.add_component(v2str, v) v.fix(0) - self.varprob_dict[s].append((id(v),0)) + self.varprob_dict[s].append((id(v), 0)) else: - error_list2.append((sname,vstr)) - if v is not None: #if None the error is trapped earlier + error_list2.append((sname, vstr)) + if v is not None: # if None the error is trapped earlier varlist.append(v) objfunc = sputils.find_active_objective(s) - #this will overwrite the nonants already there - sputils.attach_root_node(s, objfunc, varlist) + # this will overwrite the nonants already there + sputils.attach_root_node(s, objfunc, varlist) if len(error_list1) + len(error_list2) > 0: - raise RuntimeError (f"for each pair (scenario, variable) of the following list, the variable appears" - f"in consensus_vars, but not in the model:\n {error_list1} \n" - f"for each pair (scenario, variable) of the following list, the variable appears " - f"in the model, but not in consensus var: \n {error_list2}") - + raise RuntimeError( + f"for each pair (scenario, variable) of the following list, the variable appears" + f"in consensus_vars, but not in the model:\n {error_list1} \n" + f"for each pair (scenario, variable) of the following list, the variable appears " + f"in the model, but not in consensus var: \n {error_list2}" + ) def admmWrapper_scenario_creator(self, sname): - #this is the function the user will supply for all cylinders - assert sname in self.local_scenario_names, f"{global_rank=} {sname=} \n {self.local_scenario_names=}" - #should probably be deleted as it takes time + # this is the function the user will supply for all cylinders + assert ( + sname in self.local_scenario_names + ), f"{global_rank=} {sname=} \n {self.local_scenario_names=}" + # should probably be deleted as it takes time scenario = self.local_scenarios[sname] # Grabs the objective function and multiplies its value by the number of scenarios to compensate for the probabilities @@ -165,4 +187,3 @@ def admmWrapper_scenario_creator(self, sname): obj.expr = obj.expr * self.number_of_scenario return scenario - \ No newline at end of file diff --git a/mpisppy/utils/amalgamator.py b/mpisppy/utils/amalgamator.py index b051bbdc5..3f02bce2d 100644 --- a/mpisppy/utils/amalgamator.py +++ b/mpisppy/utils/amalgamator.py @@ -53,35 +53,39 @@ You might want to copy your cfg before passing it in. """ -hubs_and_multi_compatibility = {'ph': True, - 'aph': True, - #'lshaped':False, No parser = not incuded - #'cross_scen_hub':False, No parser = not included - } - -spokes_and_multi_compatibility = {'fwph':False, - 'lagrangian':True, - 'lagranger':True, - 'xhatlooper':False, - 'xhatshuffle':True, - 'xhatspecific':True, - 'xhatlshaped':False, - 'slammax':False, - 'slammin':False, - 'cross_scenario_cuts':False} - -default_unused_spokes = ['xhatlooper', 'xhatspecific'] - -extensions_classes = {'fixer':Fixer, - #NOTE: Before adding other extensions classes there, create: - # - a function for it in config.py - # - a function add_EXTNAME in vanila.py (or cfg_vanilla) - - } - -#========== +hubs_and_multi_compatibility = { + "ph": True, + "aph": True, + #'lshaped':False, No parser = not incuded + #'cross_scen_hub':False, No parser = not included +} + +spokes_and_multi_compatibility = { + "fwph": False, + "lagrangian": True, + "lagranger": True, + "xhatlooper": False, + "xhatshuffle": True, + "xhatspecific": True, + "xhatlshaped": False, + "slammax": False, + "slammin": False, + "cross_scenario_cuts": False, +} + +default_unused_spokes = ["xhatlooper", "xhatspecific"] + +extensions_classes = { + "fixer": Fixer, + # NOTE: Before adding other extensions classes there, create: + # - a function for it in config.py + # - a function add_EXTNAME in vanila.py (or cfg_vanilla) +} + +# ========== # Utilities to interact with config + def _bool_option(cfg, oname): return oname in cfg and cfg[oname] @@ -91,20 +95,21 @@ def add_options(cfg, parser_choice=None): # (note: by "parser" we mean "config") assert parser_choice is not None - parser_name = parser_choice+"_args" + parser_name = parser_choice + "_args" adder = getattr(cfg, parser_name) adder() -#========== -#Cylinder name checks +# ========== +# Cylinder name checks + def find_hub(cylinders, is_multi=False): hubs = set(cylinders).intersection(set(hubs_and_multi_compatibility.keys())) if len(hubs) == 1: hub = list(hubs)[0] if is_multi and not hubs_and_multi_compatibility[hub]: - raise RuntimeError(f"The hub {hub} does not work with multistage problems" ) + raise RuntimeError(f"The hub {hub} does not work with multistage problems") else: raise RuntimeError("There must be exactly one hub among cylinders") return hub @@ -115,21 +120,30 @@ def find_spokes(cylinders, is_multi=False): for c in cylinders: if c not in hubs_and_multi_compatibility: if c not in spokes_and_multi_compatibility: - raise RuntimeError(f"The cylinder {c} do not exist or cannot be called via amalgamator.") + raise RuntimeError( + f"The cylinder {c} do not exist or cannot be called via amalgamator." + ) if is_multi and not spokes_and_multi_compatibility[c]: - raise RuntimeError(f"The spoke {c} does not work with multistage problems" ) + raise RuntimeError( + f"The spoke {c} does not work with multistage problems" + ) if c in default_unused_spokes: - print(f"{c} is unused by default. Please specify --with-{c}=True in the command line to activate this spoke") + print( + f"{c} is unused by default. Please specify --with-{c}=True in the command line to activate this spoke" + ) spokes.append(c) return spokes -#========== + +# ========== def check_module_ama(module): # Complain if the module lacks things needed. - everything = ["scenario_names_creator", - "scenario_creator", - "inparser_adder", - "kw_creator"] # start and denouement can be missing. + everything = [ + "scenario_names_creator", + "scenario_creator", + "inparser_adder", + "kw_creator", + ] # start and denouement can be missing. you_can_have_it_all = True for ething in everything: if not hasattr(module, ething): @@ -139,24 +153,26 @@ def check_module_ama(module): raise RuntimeError(f"Module {module} not complete for from_module") -#========== +# ========== def from_module(mname, cfg, extraargs_fct=None, use_command_line=True): - """ Try to get everything from one file (this will not always be possible). + """Try to get everything from one file (this will not always be possible). Args: mname (str): the module name (module must have certain functions) or you can pass in a module that has already been imported - cfg (Config): Amalgamator options or extra arguments to use + cfg (Config): Amalgamator options or extra arguments to use in addition with the command line extraargs_fct (fct) : a function to add extra arguments, e.g. for MMW use_command_line (bool): should we take into account the command line to populate cfg ? default is True - + Returns: ama (Amalgamator): the instantiated object - + """ if not isinstance(cfg, config.Config): - raise RuntimeError(f"amalgamator from_model bad cfg type={type(cfg)}; should be Config") + raise RuntimeError( + f"amalgamator from_model bad cfg type={type(cfg)}; should be Config" + ) if inspect.ismodule(mname): m = mname @@ -164,24 +180,29 @@ def from_module(mname, cfg, extraargs_fct=None, use_command_line=True): m = importlib.import_module(mname) check_module_ama(m) - cfg = Amalgamator_parser(cfg, m.inparser_adder, - extraargs_fct=extraargs_fct, - use_command_line=use_command_line) - cfg.add_and_assign('_mpisppy_probability', description="Uniform prob.", domain=float, default=None, value= 1/cfg['num_scens']) - start = cfg['start'] if 'start' in cfg else 0 - sn = m.scenario_names_creator(cfg['num_scens'], start=start) + cfg = Amalgamator_parser( + cfg, + m.inparser_adder, + extraargs_fct=extraargs_fct, + use_command_line=use_command_line, + ) + cfg.add_and_assign( + "_mpisppy_probability", + description="Uniform prob.", + domain=float, + default=None, + value=1 / cfg["num_scens"], + ) + start = cfg["start"] if "start" in cfg else 0 + sn = m.scenario_names_creator(cfg["num_scens"], start=start) dn = m.scenario_denouement if hasattr(m, "scenario_denouement") else None - ama = Amalgamator(cfg, - sn, - m.scenario_creator, - m.kw_creator, - scenario_denouement=dn) + ama = Amalgamator(cfg, sn, m.scenario_creator, m.kw_creator, scenario_denouement=dn) return ama - - -#========== + + +# ========== def Amalgamator_parser(cfg, inparser_adder, extraargs_fct=None, use_command_line=True): - """ Helper function for Amalgamator. + """Helper function for Amalgamator. Args: cfg (Config): Amalgamator control options, etc; might be added to or changed inparser_adder (fct): returns updated ArgumentParser the problem @@ -193,7 +214,7 @@ def Amalgamator_parser(cfg, inparser_adder, extraargs_fct=None, use_command_line """ # TBD: should we copy? - + if use_command_line: if _bool_option(cfg, "EF_2stage"): cfg.EF2() @@ -205,28 +226,30 @@ def Amalgamator_parser(cfg, inparser_adder, extraargs_fct=None, use_command_line elif _bool_option(cfg, "mstage"): cfg.multistage() else: - raise RuntimeError("The problem type (2stage or mstage) must be specified") + raise RuntimeError( + "The problem type (2stage or mstage) must be specified" + ) cfg.two_sided_args() cfg.mip_options() - - #Adding cylinders + + # Adding cylinders if "cylinders" not in cfg: raise RuntimeError("A cylinder list must be specified") - - for cylinder in cfg['cylinders']: - #NOTE: This returns an error if the cylinder yyyy has no yyyy_args in config.py + + for cylinder in cfg["cylinders"]: + # NOTE: This returns an error if the cylinder yyyy has no yyyy_args in config.py add_options(cfg, cylinder) - - #Adding extensions + + # Adding extensions if "extensions" in cfg: - for extension in cfg['extensions']: + for extension in cfg["extensions"]: add_options(cfg, extension) - + inparser_adder(cfg) - + if extraargs_fct is not None: extraargs_fct() - + prg = cfg.get("program_name") cfg.parse_command_line(prg) @@ -240,28 +263,34 @@ def Amalgamator_parser(cfg, inparser_adder, extraargs_fct=None, use_command_line options_dict["EF_solver_options"] = {"mipgap": options_dict["EF_mipgap"]} """ else: - #Checking if cfg has all the options we need + # Checking if cfg has all the options we need if not (_bool_option(cfg, "EF_2stage") or _bool_option(cfg, "EF_mstage")): - raise RuntimeError("For now, completly bypassing command line only works with EF." ) - if 'EF_solver_name' not in cfg: - raise RuntimeError("EF_solver_name must be specified for the amalgamator." ) - if 'num_scens' not in cfg: - raise RuntimeWarning("cfg should have a number of scenarios to compute a xhat") - if _bool_option(cfg, 'EF-mstage') and 'branching_factors' not in cfg: - raise RuntimeError("For a multistage problem, cfg must have a 'branching_factors' attribute with branching factors") + raise RuntimeError( + "For now, completly bypassing command line only works with EF." + ) + if "EF_solver_name" not in cfg: + raise RuntimeError("EF_solver_name must be specified for the amalgamator.") + if "num_scens" not in cfg: + raise RuntimeWarning( + "cfg should have a number of scenarios to compute a xhat" + ) + if _bool_option(cfg, "EF-mstage") and "branching_factors" not in cfg: + raise RuntimeError( + "For a multistage problem, cfg must have a 'branching_factors' attribute with branching factors" + ) return cfg - -#======================================== -class Amalgamator(): + +# ======================================== +class Amalgamator: """Takes a scenario list and a scenario creator (and options) as input. The ides is to produce an outer bound on the objective function (solving the EF directly or by decomposition) and/or an x-hat with inner bound; however, what it does is controlled by its constructor options and by user options. This thing basically wraps the functionality of the "standard" *_cylinder examples. - + It may be an extenisble base class, but not abstract. Args: @@ -272,9 +301,15 @@ class Amalgamator(): scenario_denouement (fct): (optional) called at conclusion """ - def __init__(self, cfg, - scenario_names, scenario_creator, kw_creator, - scenario_denouement=None, verbose=True): + def __init__( + self, + cfg, + scenario_names, + scenario_creator, + kw_creator, + scenario_denouement=None, + verbose=True, + ): self.cfg = cfg self.scenario_names = scenario_names self.scenario_creator = scenario_creator @@ -284,18 +319,25 @@ def __init__(self, cfg, self.verbose = verbose self.is_EF = _bool_option(cfg, "EF_2stage") or _bool_option(cfg, "EF_mstage") if self.is_EF: - sroot, self.solver_name, self.solver_options = solver_spec.solver_specification(cfg, ["EF", ""]) + sroot, self.solver_name, self.solver_options = ( + solver_spec.solver_specification(cfg, ["EF", ""]) + ) self.is_multi = _bool_option(cfg, "EF-mstage") or _bool_option(cfg, "mstage") if self.is_multi and "all_nodenames" not in cfg: if "branching_factors" in cfg: - ndnms = sputils.create_nodenames_from_branching_factors(cfg["branching_factors"]) - self.cfg.quick_assign("all_nodenames", domain=pyofig.ListOf(str), value=ndnms) + ndnms = sputils.create_nodenames_from_branching_factors( + cfg["branching_factors"] + ) + self.cfg.quick_assign( + "all_nodenames", domain=pyofig.ListOf(str), value=ndnms + ) else: - raise RuntimeError("For a multistage problem, please provide branching_factors or all_nodenames") - - def run(self): + raise RuntimeError( + "For a multistage problem, please provide branching_factors or all_nodenames" + ) - """ Top-level execution.""" + def run(self): + """Top-level execution.""" if self.is_EF: ef = sputils.create_EF( self.scenario_names, @@ -304,46 +346,51 @@ def run(self): suppress_warnings=True, ) - tee_ef_solves = self.cfg.get('tee_ef_solves',False) - + tee_ef_solves = self.cfg.get("tee_ef_solves", False) + solver_name = self.solver_name solver = pyo.SolverFactory(solver_name) if hasattr(self, "solver_options") and (self.solver_options is not None): - for option_key,option_value in self.solver_options.items(): + for option_key, option_value in self.solver_options.items(): solver.options[option_key] = option_value - if self.verbose : + if self.verbose: global_toc("Starting EF solve") - if 'persistent' in solver_name: + if "persistent" in solver_name: solver.set_instance(ef, symbolic_solver_labels=True) results = solver.solve(tee=tee_ef_solves) else: - results = solver.solve(ef, tee=tee_ef_solves, symbolic_solver_labels=True,) + results = solver.solve( + ef, + tee=tee_ef_solves, + symbolic_solver_labels=True, + ) if self.verbose: global_toc("Completed EF solve") - self.EF_Obj = pyo.value(ef.EF_Obj) objs = sputils.get_objs(ef) - + self.is_minimizing = objs[0].is_minimizing - #TBD : Write a function doing this + # TBD : Write a function doing this if self.is_minimizing: - self.best_outer_bound = results.Problem[0]['Lower bound'] - self.best_inner_bound = results.Problem[0]['Upper bound'] + self.best_outer_bound = results.Problem[0]["Lower bound"] + self.best_inner_bound = results.Problem[0]["Upper bound"] else: - self.best_inner_bound = results.Problem[0]['Upper bound'] - self.best_outer_bound = results.Problem[0]['Lower bound'] + self.best_inner_bound = results.Problem[0]["Upper bound"] + self.best_outer_bound = results.Problem[0]["Lower bound"] self.ef = ef - - if 'write_solution' in self.cfg: - if 'first_stage_solution' in self.cfg['write_solution']: - sputils.write_ef_first_stage_solution(self.ef, - self.cfg['write_solution']['first_stage_solution']) - if 'tree_solution' in self.cfg['write_solution']: - sputils.write_ef_tree_solution(self.ef, - self.cfg['write_solution']['tree_solution']) - + + if "write_solution" in self.cfg: + if "first_stage_solution" in self.cfg["write_solution"]: + sputils.write_ef_first_stage_solution( + self.ef, self.cfg["write_solution"]["first_stage_solution"] + ) + if "tree_solution" in self.cfg["write_solution"]: + sputils.write_ef_tree_solution( + self.ef, self.cfg["write_solution"]["tree_solution"] + ) + self.xhats = sputils.nonant_cache_from_ef(ef) self.local_xhats = self.xhats # Every scenario is local for EF self.first_stage_solution = {"ROOT": self.xhats["ROOT"]} @@ -351,65 +398,68 @@ def run(self): else: self.ef = None - #Create a hub dict - hub_name = find_hub(self.cfg['cylinders'], self.is_multi) - hub_creator = getattr(vanilla, hub_name+'_hub') - beans = {"cfg": self.cfg, - "scenario_creator": self.scenario_creator, - "scenario_denouement": self.scenario_denouement, - "all_scenario_names": self.scenario_names, - "scenario_creator_kwargs": self.kwargs} + # Create a hub dict + hub_name = find_hub(self.cfg["cylinders"], self.is_multi) + hub_creator = getattr(vanilla, hub_name + "_hub") + beans = { + "cfg": self.cfg, + "scenario_creator": self.scenario_creator, + "scenario_denouement": self.scenario_denouement, + "all_scenario_names": self.scenario_names, + "scenario_creator_kwargs": self.kwargs, + } if self.is_multi: beans["all_nodenames"] = self.cfg["all_nodenames"] hub_dict = hub_creator(**beans) - - #Add extensions - if 'extensions' in self.cfg: - for extension in self.cfg['extensions']: - extension_creator = getattr(vanilla, 'add_'+extension) + + # Add extensions + if "extensions" in self.cfg: + for extension in self.cfg["extensions"]: + extension_creator = getattr(vanilla, "add_" + extension) hub_dict = extension_creator(hub_dict, self.cfg) - - #Create spoke dicts - potential_spokes = find_spokes(self.cfg['cylinders'], - self.is_multi) - #We only use the spokes with an associated command line arg set to True + + # Create spoke dicts + potential_spokes = find_spokes(self.cfg["cylinders"], self.is_multi) + # We only use the spokes with an associated command line arg set to True spokes = [spoke for spoke in potential_spokes if self.cfg[spoke]] list_of_spoke_dict = list() for spoke in spokes: - spoke_creator = getattr(vanilla, spoke+'_spoke') + spoke_creator = getattr(vanilla, spoke + "_spoke") spoke_beans = copy.deepcopy(beans) if spoke == "xhatspecific": spoke_beans["scenario_dict"] = self.cfg["scenario_dict"] spoke_dict = spoke_creator(**spoke_beans) list_of_spoke_dict.append(spoke_dict) - - ws = WheelSpinner(hub_dict, list_of_spoke_dict) + + ws = WheelSpinner(hub_dict, list_of_spoke_dict) ws.run() spcomm = ws.spcomm - + self.opt = spcomm.opt self.on_hub = ws.on_hub() - + if self.on_hub: # we are on a hub rank self.best_inner_bound = spcomm.BestInnerBound self.best_outer_bound = spcomm.BestOuterBound - #NOTE: We do not get bounds on every rank, only on hub + # NOTE: We do not get bounds on every rank, only on hub # This should change if we want to use cylinders for MMW - + # prior to June 2022, the wite options were a two-level dictionary; now flattened - if 'first_stage_solution_csv' in self.cfg: - ws.write_first_stage_solution(self.cfg['first_stage_solution_csv']) - if 'tree_solution_csv' in self.cfg: - ws.write_tree_solution(self.cfg['tree_solution_csv']) - - if self.on_hub: #we are on a hub rank + if "first_stage_solution_csv" in self.cfg: + ws.write_first_stage_solution(self.cfg["first_stage_solution_csv"]) + if "tree_solution_csv" in self.cfg: + ws.write_tree_solution(self.cfg["tree_solution_csv"]) + + if self.on_hub: # we are on a hub rank a_sname = self.opt.local_scenario_names[0] root = self.opt.local_scenarios[a_sname]._mpisppy_node_list[0] - self.first_stage_solution = {"ROOT":[pyo.value(var) for var in root.nonant_vardata_list]} + self.first_stage_solution = { + "ROOT": [pyo.value(var) for var in root.nonant_vardata_list] + } self.local_xhats = ws.local_nonant_cache() - - #TODO: Add a xhats attribute, similar to the output of nonant_cache_from_ef + + # TODO: Add a xhats attribute, similar to the output of nonant_cache_from_ef # It means doing a MPI operation over hub ranks @@ -425,9 +475,27 @@ def run(self): } """ cfg = config.Config() - cfg.add_and_assign("2stage", description="2stage vsus mstage", domain=bool, default=None, value=True) - cfg.add_and_assign("cylinders", description="list of cylinders", domain=pyofig.ListOf(str), default=None, value=["ph"]) - cfg.add_and_assign("extensions", description="list of extensions", domain=pyofig.ListOf(str), default=None, value= []) + cfg.add_and_assign( + "2stage", + description="2stage vsus mstage", + domain=bool, + default=None, + value=True, + ) + cfg.add_and_assign( + "cylinders", + description="list of cylinders", + domain=pyofig.ListOf(str), + default=None, + value=["ph"], + ) + cfg.add_and_assign( + "extensions", + description="list of extensions", + domain=pyofig.ListOf(str), + default=None, + value=[], + ) # num_scens_reqd has been deprecated ama = from_module("mpisppy.tests.examples.farmer", cfg) @@ -446,9 +514,10 @@ def run(self): - linear/quadratic versus non-linear """ # these options need to be tested - ama_options = {"2stage": True, # 2stage vs. mstage - "cylinders": ['ph','cross_scenario_cuts'], - "extensions": ['cross_scenario_cuts'], - "program_name": "amalgamator test main", - "num_scens_reqd": True, - } + ama_options = { + "2stage": True, # 2stage vs. mstage + "cylinders": ["ph", "cross_scenario_cuts"], + "extensions": ["cross_scenario_cuts"], + "program_name": "amalgamator test main", + "num_scens_reqd": True, + } diff --git a/mpisppy/utils/baseparsers.py b/mpisppy/utils/baseparsers.py index dab7d78a5..323f1cb59 100644 --- a/mpisppy/utils/baseparsers.py +++ b/mpisppy/utils/baseparsers.py @@ -7,136 +7,179 @@ # full copyright and license information. ############################################################################### # set up the most common parser args for mpi-sppy examples -""" NOTE TO NEW USERS: just using these parsers will not, itself, do anything. - You have to use the values when you create the dictionaries that are passed - to WheelSpinner. Further note that not all examples use all values - in the parsers. +"""NOTE TO NEW USERS: just using these parsers will not, itself, do anything. +You have to use the values when you create the dictionaries that are passed +to WheelSpinner. Further note that not all examples use all values +in the parsers. """ + import argparse + def _common_args(inparser): # NOTE: if you want abbreviations, override the arguments in your example # do not add abbreviations here. parser = inparser - parser.add_argument("--max-iterations", - help="ph max-iterations (default 1)", - dest="max_iterations", - type=int, - default=1) - - parser.add_argument("--solver-name", - help = "solver name (default gurobi)", - dest="solver_name", - type = str, - default="gurobi") - - parser.add_argument("--seed", - help="Seed for random numbers (default is 1134)", - dest="seed", - type=int, - default=1134) - - parser.add_argument("--default-rho", - help="Global rho for PH (default None)", - dest="default_rho", - type=float, - default=None) - - parser.add_argument("--bundles-per-rank", - help="bundles per rank (default 0 (no bundles))", - dest="bundles_per_rank", - type=int, - default=0) - - parser.add_argument('--with-verbose', - help="verbose output", - dest='with_verbose', - action='store_true') - parser.add_argument('--no-verbose', - help="do not verbose output (default)", - dest='with_verbose', - action='store_false') + parser.add_argument( + "--max-iterations", + help="ph max-iterations (default 1)", + dest="max_iterations", + type=int, + default=1, + ) + + parser.add_argument( + "--solver-name", + help="solver name (default gurobi)", + dest="solver_name", + type=str, + default="gurobi", + ) + + parser.add_argument( + "--seed", + help="Seed for random numbers (default is 1134)", + dest="seed", + type=int, + default=1134, + ) + + parser.add_argument( + "--default-rho", + help="Global rho for PH (default None)", + dest="default_rho", + type=float, + default=None, + ) + + parser.add_argument( + "--bundles-per-rank", + help="bundles per rank (default 0 (no bundles))", + dest="bundles_per_rank", + type=int, + default=0, + ) + + parser.add_argument( + "--with-verbose", + help="verbose output", + dest="with_verbose", + action="store_true", + ) + parser.add_argument( + "--no-verbose", + help="do not verbose output (default)", + dest="with_verbose", + action="store_false", + ) parser.set_defaults(with_verbose=False) - parser.add_argument('--with-display-progress', - help="display progress at each iteration", - dest='with_display_progress', - action='store_true') - parser.add_argument('--no-display-progress', - help="do not display progress at each iteration (default)", - dest='with_display_progress', - action='store_false') + parser.add_argument( + "--with-display-progress", + help="display progress at each iteration", + dest="with_display_progress", + action="store_true", + ) + parser.add_argument( + "--no-display-progress", + help="do not display progress at each iteration (default)", + dest="with_display_progress", + action="store_false", + ) parser.set_defaults(with_display_progress=False) - parser.add_argument('--with-display-convergence-detail', - help="display non-anticipative variable convergence statistics at each iteration", - dest='with_display_convergence_detail', - action='store_true') - parser.add_argument('--no-display-convergence-detail', - help="do not display non-anticipative variable convergence statistics at each iteration (default)", - dest='with_display_convergence_detail', - action='store_false') - parser.set_defaults(with_display_convergence_detail=False) - - parser.add_argument("--max-solver-threads", - help="Limit on threads per solver (default None)", - dest="max_solver_threads", - type=int, - default=None) - - parser.add_argument("--intra-hub-conv-thresh", - help="Within hub convergence threshold (default 1e-10)", - dest="intra_hub_conv_thresh", - type=float, - default=1e-10) - - parser.add_argument("--trace-prefix", - help="Prefix for bound spoke trace files. If None " - "bound spoke trace files are not written.", - dest="trace_prefix", - type=str, - default=None) - - parser.add_argument("--with-tee-rank0-solves", - help="Some cylinders support tee of rank 0 solves." - "(With multiple cylinder this could be confusing.)", - dest="tee_rank0_solves", - action='store_true') - parser.add_argument("--no-tee-rank0-solves", - help="Some cylinders support tee of rank 0 solves.", - dest="tee_rank0_solves", - action='store_false') + parser.add_argument( + "--with-display-convergence-detail", + help="display non-anticipative variable convergence statistics at each iteration", + dest="with_display_convergence_detail", + action="store_true", + ) + parser.add_argument( + "--no-display-convergence-detail", + help="do not display non-anticipative variable convergence statistics at each iteration (default)", + dest="with_display_convergence_detail", + action="store_false", + ) + parser.set_defaults(with_display_convergence_detail=False) + + parser.add_argument( + "--max-solver-threads", + help="Limit on threads per solver (default None)", + dest="max_solver_threads", + type=int, + default=None, + ) + + parser.add_argument( + "--intra-hub-conv-thresh", + help="Within hub convergence threshold (default 1e-10)", + dest="intra_hub_conv_thresh", + type=float, + default=1e-10, + ) + + parser.add_argument( + "--trace-prefix", + help="Prefix for bound spoke trace files. If None " + "bound spoke trace files are not written.", + dest="trace_prefix", + type=str, + default=None, + ) + + parser.add_argument( + "--with-tee-rank0-solves", + help="Some cylinders support tee of rank 0 solves." + "(With multiple cylinder this could be confusing.)", + dest="tee_rank0_solves", + action="store_true", + ) + parser.add_argument( + "--no-tee-rank0-solves", + help="Some cylinders support tee of rank 0 solves.", + dest="tee_rank0_solves", + action="store_false", + ) parser.set_defaults(tee_rank0_solves=False) - parser.add_argument("--auxilliary", - help="Free text for use by hackers (default '').", - dest="auxilliary", - type=str, - default='') - - parser.add_argument("--linearize-binary-proximal-terms", - help="For PH, linearize the proximal terms for " - "all binary nonanticipative variables", - dest="linearize_binary_proximal_terms", - action='store_true') - - parser.add_argument("--linearize-proximal-terms", - help="For PH, linearize the proximal terms for " - "all nonanticipative variables", - dest="linearize_proximal_terms", - action='store_true') - - parser.add_argument("--proximal-linearization-tolerance", - help="For PH, when linearizing proximal terms, " - "a cut will be added if the proximal term approximation " - "is looser than this value (default 1e-1)", - dest="proximal_linearization_tolerance", - type=float, - default=1.e-1) + parser.add_argument( + "--auxilliary", + help="Free text for use by hackers (default '').", + dest="auxilliary", + type=str, + default="", + ) + + parser.add_argument( + "--linearize-binary-proximal-terms", + help="For PH, linearize the proximal terms for " + "all binary nonanticipative variables", + dest="linearize_binary_proximal_terms", + action="store_true", + ) + + parser.add_argument( + "--linearize-proximal-terms", + help="For PH, linearize the proximal terms for " + "all nonanticipative variables", + dest="linearize_proximal_terms", + action="store_true", + ) + + parser.add_argument( + "--proximal-linearization-tolerance", + help="For PH, when linearizing proximal terms, " + "a cut will be added if the proximal term approximation " + "is looser than this value (default 1e-1)", + dest="proximal_linearization_tolerance", + type=float, + default=1.0e-1, + ) return parser - + + def make_parser(progname=None, num_scens_reqd=False): # make a parser for the program named progname # NOTE: if you want abbreviations, override the arguments in your example @@ -144,9 +187,7 @@ def make_parser(progname=None, num_scens_reqd=False): parser = argparse.ArgumentParser(prog=progname, conflict_handler="resolve") if num_scens_reqd: - parser.add_argument( - "num_scens", help="Number of scenarios", type=int - ) + parser.add_argument("num_scens", help="Number of scenarios", type=int) else: parser.add_argument( "--num-scens", @@ -158,16 +199,19 @@ def make_parser(progname=None, num_scens_reqd=False): parser = _common_args(parser) return parser + def _basic_multistage(progname=None, num_scens_reqd=False): parser = argparse.ArgumentParser(prog=progname, conflict_handler="resolve") - parser.add_argument("--branching-factors", - help="Spaces delimited branching factors (e.g., 2 2)", - dest="branching_factors", - nargs="*", - type=int, - default=None) - + parser.add_argument( + "--branching-factors", + help="Spaces delimited branching factors (e.g., 2 2)", + dest="branching_factors", + nargs="*", + type=int, + default=None, + ) + return parser @@ -179,6 +223,7 @@ def make_multistage_parser(progname=None): parser = _common_args(parser) return parser + #### EF #### def make_EF2_parser(progname=None, num_scens_reqd=False): # create a parser just for EF two-stage (does not call _common_args) @@ -187,9 +232,7 @@ def make_EF2_parser(progname=None, num_scens_reqd=False): parser = argparse.ArgumentParser(prog=progname, conflict_handler="resolve") if num_scens_reqd: - parser.add_argument( - "num_scens", help="Number of scenarios", type=int - ) + parser.add_argument("num_scens", help="Number of scenarios", type=int) else: parser.add_argument( "--num-scens", @@ -198,31 +241,33 @@ def make_EF2_parser(progname=None, num_scens_reqd=False): type=int, default=None, ) - - parser.add_argument("--EF-solver-name", - help = "solver name (default gurobi)", - dest="EF_solver_name", - type = str, - default="gurobi") - - - parser.add_argument("--EF-mipgap", - help="mip gap option for the solver if needed (default None)", - dest="EF_mipgap", - type=float, - default=None) + + parser.add_argument( + "--EF-solver-name", + help="solver name (default gurobi)", + dest="EF_solver_name", + type=str, + default="gurobi", + ) + + parser.add_argument( + "--EF-mipgap", + help="mip gap option for the solver if needed (default None)", + dest="EF_mipgap", + type=float, + default=None, + ) return parser + def make_EF_multistage_parser(progname=None, num_scens_reqd=False): # create a parser just for EF multi-stage (does not call _common_args) # NOTE: if you want abbreviations, override the arguments in your example # do not add abbreviations here. parser = _basic_multistage(progname=None) - + if num_scens_reqd: - parser.add_argument( - "num_scens", help="Number of scenarios", type=int - ) + parser.add_argument("num_scens", help="Number of scenarios", type=int) else: parser.add_argument( "--num-scens", @@ -231,254 +276,336 @@ def make_EF_multistage_parser(progname=None, num_scens_reqd=False): type=int, default=None, ) - - parser.add_argument("--EF-solver-name", - help = "solver name (default gurobi)", - dest="EF_solver_name", - type = str, - default="gurobi") - - - parser.add_argument("--EF-mipgap", - help="mip gap option for the solver if needed (default None)", - dest="EF_mipgap", - type=float, - default=None) + + parser.add_argument( + "--EF-solver-name", + help="solver name (default gurobi)", + dest="EF_solver_name", + type=str, + default="gurobi", + ) + + parser.add_argument( + "--EF-mipgap", + help="mip gap option for the solver if needed (default None)", + dest="EF_mipgap", + type=float, + default=None, + ) return parser + + ##### common additions to the command line ##### + def two_sided_args(inparser): # add commands to inparser and also return the result parser = inparser - parser.add_argument("--rel-gap", - help="relative termination gap (default 0.05)", - dest="rel_gap", - type=float, - default=0.05) - - parser.add_argument("--abs-gap", - help="absolute termination gap (default 0)", - dest="abs_gap", - type=float, - default=0.) - - parser.add_argument("--max-stalled-iters", - help="maximum iterations with no reduction in gap (default 100)", - dest="max_stalled_iters", - type=int, - default=100) + parser.add_argument( + "--rel-gap", + help="relative termination gap (default 0.05)", + dest="rel_gap", + type=float, + default=0.05, + ) + + parser.add_argument( + "--abs-gap", + help="absolute termination gap (default 0)", + dest="abs_gap", + type=float, + default=0.0, + ) + + parser.add_argument( + "--max-stalled-iters", + help="maximum iterations with no reduction in gap (default 100)", + dest="max_stalled_iters", + type=int, + default=100, + ) return parser + def mip_options(inparser): parser = inparser - parser.add_argument("--iter0-mipgap", - help="mip gap option for iteration 0 (default None)", - dest="iter0_mipgap", - type=float, - default=None) - - parser.add_argument("--iterk-mipgap", - help="mip gap option non-zero iterations (default None)", - dest="iterk_mipgap", - type=float, - default=None) + parser.add_argument( + "--iter0-mipgap", + help="mip gap option for iteration 0 (default None)", + dest="iter0_mipgap", + type=float, + default=None, + ) + + parser.add_argument( + "--iterk-mipgap", + help="mip gap option non-zero iterations (default None)", + dest="iterk_mipgap", + type=float, + default=None, + ) return parser + def aph_args(inparser): parser = inparser - parser.add_argument('--aph-gamma', - help='Gamma parameter associated with asychronous projective hedging (default 1.0)', - dest='aph_gamma', - type=float, - default=1.0) - parser.add_argument('--aph-nu', - help='Nu parameter associated with asychronous projective hedging (default 1.0)', - dest="aph_nu", - type=float, - default=1.0) - parser.add_argument('--aph-frac-needed', - help='Fraction of sub-problems required before computing projective step (default 1.0)', - dest='aph_frac_needed', - type=float, - default=1.0) - parser.add_argument('--aph-dispatch-frac', - help='Fraction of sub-problems to dispatch at each step of asychronous projective hedging (default 1.0)', - dest='aph_dispatch_frac', - type=float, - default=1.0) - parser.add_argument('--aph-sleep-seconds', - help='Spin-lock sleep time for APH (default 0.01)', - dest='aph_sleep_seconds', - type=float, - default=0.01) - return parser + parser.add_argument( + "--aph-gamma", + help="Gamma parameter associated with asychronous projective hedging (default 1.0)", + dest="aph_gamma", + type=float, + default=1.0, + ) + parser.add_argument( + "--aph-nu", + help="Nu parameter associated with asychronous projective hedging (default 1.0)", + dest="aph_nu", + type=float, + default=1.0, + ) + parser.add_argument( + "--aph-frac-needed", + help="Fraction of sub-problems required before computing projective step (default 1.0)", + dest="aph_frac_needed", + type=float, + default=1.0, + ) + parser.add_argument( + "--aph-dispatch-frac", + help="Fraction of sub-problems to dispatch at each step of asychronous projective hedging (default 1.0)", + dest="aph_dispatch_frac", + type=float, + default=1.0, + ) + parser.add_argument( + "--aph-sleep-seconds", + help="Spin-lock sleep time for APH (default 0.01)", + dest="aph_sleep_seconds", + type=float, + default=0.01, + ) + return parser + def fixer_args(inparser): parser = inparser - parser.add_argument('--with-fixer', - help="have an integer fixer extension (default)", - dest='with_fixer', - action='store_true') - parser.add_argument('--no-fixer', - help="do not have an integer fixer extension", - dest='with_fixer', - action='store_false') + parser.add_argument( + "--with-fixer", + help="have an integer fixer extension (default)", + dest="with_fixer", + action="store_true", + ) + parser.add_argument( + "--no-fixer", + help="do not have an integer fixer extension", + dest="with_fixer", + action="store_false", + ) parser.set_defaults(with_fixer=True) - parser.add_argument("--fixer-tol", - help="fixer bounds tolerance (default 1e-4)", - dest="fixer_tol", - type=float, - default=1e-2) + parser.add_argument( + "--fixer-tol", + help="fixer bounds tolerance (default 1e-4)", + dest="fixer_tol", + type=float, + default=1e-2, + ) return parser def fwph_args(inparser): parser = inparser - parser.add_argument('--with-fwph', - help="have an fwph spoke (default)", - dest='with_fwph', - action='store_true') - parser.add_argument('--no-fwph', - help="do not have an fwph spoke", - dest='with_fwph', - action='store_false') + parser.add_argument( + "--with-fwph", + help="have an fwph spoke (default)", + dest="with_fwph", + action="store_true", + ) + parser.add_argument( + "--no-fwph", + help="do not have an fwph spoke", + dest="with_fwph", + action="store_false", + ) parser.set_defaults(with_fwph=True) - parser.add_argument("--fwph-iter-limit", - help="maximum fwph iterations (default 10)", - dest="fwph_iter_limit", - type=int, - default=10) - - parser.add_argument("--fwph-weight", - help="fwph weight (default 0)", - dest="fwph_weight", - type=float, - default=0.0) - - parser.add_argument("--fwph-conv-thresh", - help="fwph convergence threshold (default 1e-4)", - dest="fwph_conv_thresh", - type=float, - default=1e-4) - - parser.add_argument("--fwph-stop-check-tol", - help="fwph tolerance for Gamma^t (default 1e-4)", - dest="fwph_stop_check_tol", - type=float, - default=1e-4) - - parser.add_argument("--fwph-mipgap", - help="mip gap option FW subproblems iterations (default None)", - dest="fwph_mipgap", - type=float, - default=None) + parser.add_argument( + "--fwph-iter-limit", + help="maximum fwph iterations (default 10)", + dest="fwph_iter_limit", + type=int, + default=10, + ) + + parser.add_argument( + "--fwph-weight", + help="fwph weight (default 0)", + dest="fwph_weight", + type=float, + default=0.0, + ) + + parser.add_argument( + "--fwph-conv-thresh", + help="fwph convergence threshold (default 1e-4)", + dest="fwph_conv_thresh", + type=float, + default=1e-4, + ) + + parser.add_argument( + "--fwph-stop-check-tol", + help="fwph tolerance for Gamma^t (default 1e-4)", + dest="fwph_stop_check_tol", + type=float, + default=1e-4, + ) + + parser.add_argument( + "--fwph-mipgap", + help="mip gap option FW subproblems iterations (default None)", + dest="fwph_mipgap", + type=float, + default=None, + ) return parser + def lagrangian_args(inparser): parser = inparser - parser.add_argument('--with-lagrangian', - help="have a lagrangian spoke (default)", - dest='with_lagrangian', - action='store_true') - parser.add_argument('--no-lagrangian', - help="do not have a lagrangian spoke", - dest='with_lagrangian', - action='store_false') + parser.add_argument( + "--with-lagrangian", + help="have a lagrangian spoke (default)", + dest="with_lagrangian", + action="store_true", + ) + parser.add_argument( + "--no-lagrangian", + help="do not have a lagrangian spoke", + dest="with_lagrangian", + action="store_false", + ) parser.set_defaults(with_lagrangian=True) - parser.add_argument("--lagrangian-iter0-mipgap", - help="lgr. iter0 solver option mipgap (default None)", - dest="lagrangian_iter0_mipgap", - type=float, - default=None) - - parser.add_argument("--lagrangian-iterk-mipgap", - help="lgr. iterk solver option mipgap (default None)", - dest="lagrangian_iterk_mipgap", - type=float, - default=None) + parser.add_argument( + "--lagrangian-iter0-mipgap", + help="lgr. iter0 solver option mipgap (default None)", + dest="lagrangian_iter0_mipgap", + type=float, + default=None, + ) + + parser.add_argument( + "--lagrangian-iterk-mipgap", + help="lgr. iterk solver option mipgap (default None)", + dest="lagrangian_iterk_mipgap", + type=float, + default=None, + ) return parser def lagranger_args(inparser): parser = inparser - parser.add_argument('--with-lagranger', - help="have a special lagranger spoke (default)", - dest='with_lagranger', - action='store_true') - parser.add_argument('--no-lagranger', - help="do not have a special lagranger spoke", - dest='with_lagranger', - action='store_false') + parser.add_argument( + "--with-lagranger", + help="have a special lagranger spoke (default)", + dest="with_lagranger", + action="store_true", + ) + parser.add_argument( + "--no-lagranger", + help="do not have a special lagranger spoke", + dest="with_lagranger", + action="store_false", + ) parser.set_defaults(with_lagranger=True) - parser.add_argument("--lagranger-iter0-mipgap", - help="lagranger iter0 mipgap (default None)", - dest="lagranger_iter0_mipgap", - type=float, - default=None) - - parser.add_argument("--lagranger-iterk-mipgap", - help="lagranger iterk mipgap (default None)", - dest="lagranger_iterk_mipgap", - type=float, - default=None) - - parser.add_argument("--lagranger-rho-rescale-factors-json", - help="json file: rho rescale factors (default None)", - dest="lagranger_rho_rescale_factors_json", - type=str, - default=None) + parser.add_argument( + "--lagranger-iter0-mipgap", + help="lagranger iter0 mipgap (default None)", + dest="lagranger_iter0_mipgap", + type=float, + default=None, + ) + + parser.add_argument( + "--lagranger-iterk-mipgap", + help="lagranger iterk mipgap (default None)", + dest="lagranger_iterk_mipgap", + type=float, + default=None, + ) + + parser.add_argument( + "--lagranger-rho-rescale-factors-json", + help="json file: rho rescale factors (default None)", + dest="lagranger_rho_rescale_factors_json", + type=str, + default=None, + ) return parser def xhatlooper_args(inparser): parser = inparser - parser.add_argument('--with-xhatlooper', - help="have an xhatlooper spoke (default)", - dest='with_xhatlooper', - action='store_true') - parser.add_argument('--no-xhatlooper', - help="do not have an xhatlooper spoke", - dest='with_xhatlooper', - action='store_false') + parser.add_argument( + "--with-xhatlooper", + help="have an xhatlooper spoke (default)", + dest="with_xhatlooper", + action="store_true", + ) + parser.add_argument( + "--no-xhatlooper", + help="do not have an xhatlooper spoke", + dest="with_xhatlooper", + action="store_false", + ) parser.set_defaults(with_xhatlooper=False) - parser.add_argument("--xhat-scen-limit", - help="scenario limit xhat looper to try (default 3)", - dest="xhat_scen_limit", - type=int, - default=3) + parser.add_argument( + "--xhat-scen-limit", + help="scenario limit xhat looper to try (default 3)", + dest="xhat_scen_limit", + type=int, + default=3, + ) return parser def xhatshuffle_args(inparser): parser = inparser - parser.add_argument('--with-xhatshuffle', - help="have an xhatshuffle spoke (default)", - dest='with_xhatshuffle', - action='store_true') - parser.add_argument('--no-xhatshuffle', - help="do not have an xhatshuffle spoke", - dest='with_xhatshuffle', - action='store_false') + parser.add_argument( + "--with-xhatshuffle", + help="have an xhatshuffle spoke (default)", + dest="with_xhatshuffle", + action="store_true", + ) + parser.add_argument( + "--no-xhatshuffle", + help="do not have an xhatshuffle spoke", + dest="with_xhatshuffle", + action="store_false", + ) parser.set_defaults(with_xhatshuffle=True) - parser.add_argument('--add-reversed-shuffle', - help="using also the reversed shuffling (multistage only, default True)", - dest = 'add_reversed_shuffle', - action='store_true') + parser.add_argument( + "--add-reversed-shuffle", + help="using also the reversed shuffling (multistage only, default True)", + dest="add_reversed_shuffle", + action="store_true", + ) parser.set_defaults(add_reversed_shuffle=True) - parser.add_argument('--xhatshuffle-iter-step', - help="step in shuffled list between 2 scenarios to try (default None)", - dest="xhatshuffle_iter_step", - type=int, - default=None) + parser.add_argument( + "--xhatshuffle-iter-step", + help="step in shuffled list between 2 scenarios to try (default None)", + dest="xhatshuffle_iter_step", + type=int, + default=None, + ) return parser @@ -486,14 +613,18 @@ def xhatshuffle_args(inparser): def xhatspecific_args(inparser): # we will not try to get the specification from the command line parser = inparser - parser.add_argument('--with-xhatspecific', - help="have an xhatspecific spoke (default)", - dest='with_xhatspecific', - action='store_true') - parser.add_argument('--no-xhatspecific', - help="do not have an xhatspecific spoke", - dest='with_xhatspecific', - action='store_false') + parser.add_argument( + "--with-xhatspecific", + help="have an xhatspecific spoke (default)", + dest="with_xhatspecific", + action="store_true", + ) + parser.add_argument( + "--no-xhatspecific", + help="do not have an xhatspecific spoke", + dest="with_xhatspecific", + action="store_false", + ) parser.set_defaults(with_xhatspecific=False) return parser @@ -502,73 +633,95 @@ def xhatspecific_args(inparser): def xhatlshaped_args(inparser): # we will not try to get the specification from the command line parser = inparser - parser.add_argument('--with-xhatlshaped', - help="have an xhatlshaped spoke (default)", - dest='with_xhatlshaped', - action='store_true') - parser.add_argument('--no-xhatlshaped', - help="do not have an xhatlshaped spoke", - dest='with_xhatlshaped', - action='store_false') + parser.add_argument( + "--with-xhatlshaped", + help="have an xhatlshaped spoke (default)", + dest="with_xhatlshaped", + action="store_true", + ) + parser.add_argument( + "--no-xhatlshaped", + help="do not have an xhatlshaped spoke", + dest="with_xhatlshaped", + action="store_false", + ) parser.set_defaults(with_xhatlshaped=True) return parser + def slammax_args(inparser): # we will not try to get the specification from the command line parser = inparser - parser.add_argument('--with-slammax', - help="have an slammax spoke (default)", - dest='with_slammax', - action='store_true') - parser.add_argument('--no-slammax', - help="do not have an slammax spoke", - dest='with_slammax', - action='store_false') + parser.add_argument( + "--with-slammax", + help="have an slammax spoke (default)", + dest="with_slammax", + action="store_true", + ) + parser.add_argument( + "--no-slammax", + help="do not have an slammax spoke", + dest="with_slammax", + action="store_false", + ) parser.set_defaults(with_slammax=True) return parser + def slammin_args(inparser): # we will not try to get the specification from the command line parser = inparser - parser.add_argument('--with-slammin', - help="have an slammin spoke (default)", - dest='with_slammin', - action='store_true') - parser.add_argument('--no-slammin', - help="do not have an slammin spoke", - dest='with_slammin', - action='store_false') + parser.add_argument( + "--with-slammin", + help="have an slammin spoke (default)", + dest="with_slammin", + action="store_true", + ) + parser.add_argument( + "--no-slammin", + help="do not have an slammin spoke", + dest="with_slammin", + action="store_false", + ) parser.set_defaults(with_slammin=True) return parser + def cross_scenario_cuts_args(inparser): # we will not try to get the specification from the command line parser = inparser - parser.add_argument('--with-cross-scenario-cuts', - help="have a cross scenario cuts spoke (default)", - dest='with_cross_scenario_cuts', - action='store_true') - parser.add_argument('--no-cross-scenario-cuts', - help="do not have a cross scenario cuts spoke", - dest='with_cross_scenario_cuts', - action='store_false') + parser.add_argument( + "--with-cross-scenario-cuts", + help="have a cross scenario cuts spoke (default)", + dest="with_cross_scenario_cuts", + action="store_true", + ) + parser.add_argument( + "--no-cross-scenario-cuts", + help="do not have a cross scenario cuts spoke", + dest="with_cross_scenario_cuts", + action="store_false", + ) parser.set_defaults(with_cross_scenario_cuts=True) - parser.add_argument("--cross-scenario-iter-cnt", - help="cross scen check bound improve iterations " - "(default 4)", - dest="cross_scenario_iter_cnt", - type=int, - default=4) - - parser.add_argument("--eta-bounds-mipgap", - help="mipgap for determining eta bounds for cross " - "scenario cuts (default 0.01)", - dest="eta_bounds_mipgap", - type=float, - default=0.01) + parser.add_argument( + "--cross-scenario-iter-cnt", + help="cross scen check bound improve iterations " "(default 4)", + dest="cross_scenario_iter_cnt", + type=int, + default=4, + ) + + parser.add_argument( + "--eta-bounds-mipgap", + help="mipgap for determining eta bounds for cross " + "scenario cuts (default 0.01)", + dest="eta_bounds_mipgap", + type=float, + default=0.01, + ) return parser diff --git a/mpisppy/utils/callbacks/termination/solver_callbacks.py b/mpisppy/utils/callbacks/termination/solver_callbacks.py index 360186220..bd9bc8ae1 100644 --- a/mpisppy/utils/callbacks/termination/solver_callbacks.py +++ b/mpisppy/utils/callbacks/termination/solver_callbacks.py @@ -12,13 +12,14 @@ # are (mostly) defined in the context of MIPs. If we want to expand to LPs # and other non-branch-and-bound contexts, additional work is required. -def check_user_termination_callback_signature(user_termination_callback): +def check_user_termination_callback_signature(user_termination_callback): import inspect + return len(inspect.signature(user_termination_callback).parameters) == 3 + def set_cplex_callback(solver, user_termination_callback): - cplex = solver._cplex cplex_model = solver._solver_model @@ -39,7 +40,6 @@ def __call__(self): def set_gurobi_callback(solver, user_termination_callback): - gurobi_model = solver._solver_model gurobi_model._terminate_function = user_termination_callback @@ -62,7 +62,6 @@ def gurobi_callback(gurobi_model, where): def set_xpress_callback(solver, user_termination_callback): - xpress_problem = solver._solver_model def cbchecktime_callback(xpress_problem, termination_callback): diff --git a/mpisppy/utils/callbacks/termination/termination_callbacks.py b/mpisppy/utils/callbacks/termination/termination_callbacks.py index c736e7d57..f03380022 100644 --- a/mpisppy/utils/callbacks/termination/termination_callbacks.py +++ b/mpisppy/utils/callbacks/termination/termination_callbacks.py @@ -20,6 +20,7 @@ XpressPersistent: tc.set_xpress_callback, } + def supports_termination_callback(solver_instance): """ Determines if this module supports a solver instance @@ -57,7 +58,7 @@ def set_termination_callback(solver_instance, termination_callback): if not tc.check_user_termination_callback_signature(termination_callback): raise RuntimeError( "Provided user termination callback did not match expected signature with 3 positional arguments" - ) + ) try: _termination_callback_solvers_to_setters[solver_instance.__class__]( diff --git a/mpisppy/utils/callbacks/termination/tests/test_termination.py b/mpisppy/utils/callbacks/termination/tests/test_termination.py index f5162b679..fa365d754 100644 --- a/mpisppy/utils/callbacks/termination/tests/test_termination.py +++ b/mpisppy/utils/callbacks/termination/tests/test_termination.py @@ -17,6 +17,7 @@ import time + class _TestTermination: def __init__(self): self.model = model @@ -39,14 +40,13 @@ def solve(self): class CPLEXTermination(_TestTermination): - _solver_name = "cplex_persistent" def _set_time_limit(self): self._solver.options["timelimit"] = 20 + class GurobiTermination(_TestTermination): - _solver_name = "gurobi_persistent" def _set_time_limit(self): @@ -54,7 +54,6 @@ def _set_time_limit(self): class XpressTermination(_TestTermination): - _solver_name = "xpress_persistent" def _set_time_limit(self): @@ -66,7 +65,6 @@ def _set_time_limit(self): reason="cplex_persistent not available", ) def test_cplex_termination_callback(): - st = time.time() cplextest = CPLEXTermination() try: @@ -76,35 +74,32 @@ def test_cplex_termination_callback(): end = time.time() assert end - st < 5 - + @pytest.mark.skipif( not SolverFactory("gurobi_persistent").available(exception_flag=False), reason="gurobi_persistent not available", ) def test_gurobi_termination_callback(): - st = time.time() gurobitest = GurobiTermination() gurobitest.solve() end = time.time() assert end - st < 5 - + @pytest.mark.skipif( not SolverFactory("xpress_persistent").available(exception_flag=False), reason="xpress_persistent not available", ) def test_xpress_termination_callback(): - st = time.time() xpresstest = XpressTermination() xpresstest.solve() end = time.time() assert end - st < 5 - + def test_unsupported(): - cbc = SolverFactory("cbc") assert not supports_termination_callback("xpress_persistent") diff --git a/mpisppy/utils/cfg_vanilla.py b/mpisppy/utils/cfg_vanilla.py index 57ae43469..e29b5772a 100644 --- a/mpisppy/utils/cfg_vanilla.py +++ b/mpisppy/utils/cfg_vanilla.py @@ -6,10 +6,10 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -""" **** cfg version of vanilla that uses the Pyomo configuration system - Plain versions of dictionaries that can be modified for each example as needed. - ASSUME the corresponding args have been set up. - IDIOM: we feel free to have unused dictionary entries.""" +"""**** cfg version of vanilla that uses the Pyomo configuration system +Plain versions of dictionaries that can be modified for each example as needed. +ASSUME the corresponding args have been set up. +IDIOM: we feel free to have unused dictionary entries.""" import copy @@ -43,10 +43,12 @@ from mpisppy.utils.wxbarreader import WXBarReader from mpisppy.utils.wxbarwriter import WXBarWriter + def _hasit(cfg, argname): # aside: Config objects act like a dict or an object TBD: so why the and? return cfg.get(argname) is not None and cfg[argname] is not None + def shared_options(cfg): shoptions = { "solver_name": cfg.solver_name, @@ -59,8 +61,8 @@ def shared_options(cfg): "iter0_solver_options": dict(), "iterk_solver_options": dict(), "tee-rank0-solves": cfg.tee_rank0_solves, - "trace_prefix" : cfg.trace_prefix, - "presolve" : cfg.presolve, + "trace_prefix": cfg.trace_prefix, + "presolve": cfg.presolve, } if _hasit(cfg, "max_solver_threads"): shoptions["iter0_solver_options"]["threads"] = cfg.max_solver_threads @@ -74,29 +76,35 @@ def shared_options(cfg): return shoptions -def add_multistage_options(cylinder_dict,all_nodenames,branching_factors): + +def add_multistage_options(cylinder_dict, all_nodenames, branching_factors): cylinder_dict = copy.deepcopy(cylinder_dict) if branching_factors is not None: if hasattr(cylinder_dict["opt_kwargs"], "options"): - cylinder_dict["opt_kwargs"]["options"]["branching_factors"] = branching_factors + cylinder_dict["opt_kwargs"]["options"]["branching_factors"] = ( + branching_factors + ) if all_nodenames is None: - all_nodenames = sputils.create_nodenames_from_branching_factors(branching_factors) + all_nodenames = sputils.create_nodenames_from_branching_factors( + branching_factors + ) if all_nodenames is not None: cylinder_dict["opt_kwargs"]["all_nodenames"] = all_nodenames return cylinder_dict + def ph_hub( - cfg, - scenario_creator, - scenario_denouement, - all_scenario_names, - scenario_creator_kwargs=None, - ph_extensions=None, - extension_kwargs=None, - ph_converger=None, - rho_setter=None, - variable_probability=None, - all_nodenames=None, + cfg, + scenario_creator, + scenario_denouement, + all_scenario_names, + scenario_creator_kwargs=None, + ph_extensions=None, + extension_kwargs=None, + ph_converger=None, + rho_setter=None, + variable_probability=None, + all_nodenames=None, ): shoptions = shared_options(cfg) options = copy.deepcopy(shoptions) @@ -108,9 +116,13 @@ def ph_hub( hub_dict = { "hub_class": PHHub, - "hub_kwargs": {"options": {"rel_gap": cfg.rel_gap, - "abs_gap": cfg.abs_gap, - "max_stalled_iters": cfg.max_stalled_iters}}, + "hub_kwargs": { + "options": { + "rel_gap": cfg.rel_gap, + "abs_gap": cfg.abs_gap, + "max_stalled_iters": cfg.max_stalled_iters, + } + }, "opt_class": PH, "opt_kwargs": { "options": options, @@ -123,15 +135,16 @@ def ph_hub( "extensions": ph_extensions, "extension_kwargs": extension_kwargs, "ph_converger": ph_converger, - "all_nodenames": all_nodenames - } + "all_nodenames": all_nodenames, + }, } add_wxbar_read_write(hub_dict, cfg) add_ph_tracking(hub_dict, cfg) return hub_dict -def aph_hub(cfg, +def aph_hub( + cfg, scenario_creator, scenario_denouement, all_scenario_names, @@ -143,34 +156,37 @@ def aph_hub(cfg, all_nodenames=None, ): # TBD: March 2023: multiple extensions needs work - hub_dict = ph_hub(cfg, - scenario_creator, - scenario_denouement, - all_scenario_names, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=ph_extensions, - extension_kwargs=extension_kwargs, - rho_setter=rho_setter, - variable_probability=variable_probability, - all_nodenames = all_nodenames, - ) - - hub_dict['hub_class'] = APHHub - hub_dict['opt_class'] = APH - - hub_dict['opt_kwargs']['options']['APHgamma'] = cfg.aph_gamma - hub_dict['opt_kwargs']['options']['APHnu'] = cfg.aph_nu - hub_dict['opt_kwargs']['options']['async_frac_needed'] = cfg.aph_frac_needed - hub_dict['opt_kwargs']['options']['dispatch_frac'] = cfg.aph_dispatch_frac - hub_dict['opt_kwargs']['options']['async_sleep_secs'] = cfg.aph_sleep_seconds + hub_dict = ph_hub( + cfg, + scenario_creator, + scenario_denouement, + all_scenario_names, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=ph_extensions, + extension_kwargs=extension_kwargs, + rho_setter=rho_setter, + variable_probability=variable_probability, + all_nodenames=all_nodenames, + ) + + hub_dict["hub_class"] = APHHub + hub_dict["opt_class"] = APH + + hub_dict["opt_kwargs"]["options"]["APHgamma"] = cfg.aph_gamma + hub_dict["opt_kwargs"]["options"]["APHnu"] = cfg.aph_nu + hub_dict["opt_kwargs"]["options"]["async_frac_needed"] = cfg.aph_frac_needed + hub_dict["opt_kwargs"]["options"]["dispatch_frac"] = cfg.aph_dispatch_frac + hub_dict["opt_kwargs"]["options"]["async_sleep_secs"] = cfg.aph_sleep_seconds return hub_dict -def extension_adder(hub_dict,ext_class): +def extension_adder(hub_dict, ext_class): # TBD March 2023: this is not really good enough - if "extensions" not in hub_dict["opt_kwargs"] or \ - hub_dict["opt_kwargs"]["extensions"] is None: + if ( + "extensions" not in hub_dict["opt_kwargs"] + or hub_dict["opt_kwargs"]["extensions"] is None + ): hub_dict["opt_kwargs"]["extensions"] = ext_class elif hub_dict["opt_kwargs"]["extensions"] == MultiExtension: if hub_dict["opt_kwargs"]["extension_kwargs"] is None: @@ -178,52 +194,63 @@ def extension_adder(hub_dict,ext_class): if ext_class not in hub_dict["opt_kwargs"]["extension_kwargs"]["ext_classes"]: hub_dict["opt_kwargs"]["extension_kwargs"]["ext_classes"].append(ext_class) elif hub_dict["opt_kwargs"]["extensions"] != ext_class: - #ext_class is the second extension + # ext_class is the second extension if "extensions_kwargs" not in hub_dict["opt_kwargs"]: hub_dict["opt_kwargs"]["extension_kwargs"] = {} - hub_dict["opt_kwargs"]["extension_kwargs"]["ext_classes"] = \ - [hub_dict["opt_kwargs"]["extensions"], ext_class] + hub_dict["opt_kwargs"]["extension_kwargs"]["ext_classes"] = [ + hub_dict["opt_kwargs"]["extensions"], + ext_class, + ] hub_dict["opt_kwargs"]["extensions"] = MultiExtension return hub_dict -def add_fixer(hub_dict, - cfg, - ): - hub_dict = extension_adder(hub_dict,Fixer) - hub_dict["opt_kwargs"]["options"]["fixeroptions"] = {"verbose":False, - "boundtol": cfg.fixer_tol, - "id_fix_list_fct": cfg.id_fix_list_fct} +def add_fixer( + hub_dict, + cfg, +): + hub_dict = extension_adder(hub_dict, Fixer) + hub_dict["opt_kwargs"]["options"]["fixeroptions"] = { + "verbose": False, + "boundtol": cfg.fixer_tol, + "id_fix_list_fct": cfg.id_fix_list_fct, + } return hub_dict -def add_cross_scenario_cuts(hub_dict, - cfg, - ): - #WARNING: Do not use without a cross_scenario_cuts spoke + +def add_cross_scenario_cuts( + hub_dict, + cfg, +): + # WARNING: Do not use without a cross_scenario_cuts spoke hub_dict = extension_adder(hub_dict, CrossScenarioExtension) - hub_dict["opt_kwargs"]["options"]["cross_scen_options"]\ - = {"check_bound_improve_iterations" : cfg.cross_scenario_iter_cnt} + hub_dict["opt_kwargs"]["options"]["cross_scen_options"] = { + "check_bound_improve_iterations": cfg.cross_scenario_iter_cnt + } return hub_dict -def add_reduced_costs_fixer(hub_dict, - cfg, - ): - #WARNING: Do not use without a reduced_costs_spoke spoke + +def add_reduced_costs_fixer( + hub_dict, + cfg, +): + # WARNING: Do not use without a reduced_costs_spoke spoke hub_dict = extension_adder(hub_dict, ReducedCostsFixer) hub_dict["opt_kwargs"]["options"]["rc_options"] = { - "verbose": cfg.rc_verbose, - "debug": cfg.rc_debug, - "use_rc_fixer": cfg.rc_fixer, - "zero_rc_tol": cfg.rc_zero_tol, - "fix_fraction_target_iter0": cfg.rc_fix_fraction_iter0, - "fix_fraction_target_iterK": cfg.rc_fix_fraction_iterk, - "use_rc_bt": cfg.rc_bound_tightening, - "rc_bound_tol": cfg.rc_bound_tol, - } + "verbose": cfg.rc_verbose, + "debug": cfg.rc_debug, + "use_rc_fixer": cfg.rc_fixer, + "zero_rc_tol": cfg.rc_zero_tol, + "fix_fraction_target_iter0": cfg.rc_fix_fraction_iter0, + "fix_fraction_target_iterK": cfg.rc_fix_fraction_iterk, + "use_rc_bt": cfg.rc_bound_tightening, + "rc_bound_tol": cfg.rc_bound_tol, + } return hub_dict + def add_wxbar_read_write(hub_dict, cfg): """ Add the wxbar read and write extensions to the hub_dict @@ -232,73 +259,89 @@ def add_wxbar_read_write(hub_dict, cfg): At the moment, the options are not stored in a extension options dict() but are 'loose' in the hub options dict """ - if _hasit(cfg, 'init_W_fname') or _hasit(cfg, 'init_Xbar_fname'): + if _hasit(cfg, "init_W_fname") or _hasit(cfg, "init_Xbar_fname"): hub_dict = extension_adder(hub_dict, WXBarReader) hub_dict["opt_kwargs"]["options"].update( - {"init_W_fname" : cfg.init_W_fname, - "init_Xbar_fname" : cfg.init_Xbar_fname, - "init_separate_W_files" : cfg.init_separate_W_files - }) - if _hasit(cfg, 'W_fname') or _hasit(cfg, 'Xbar_fname'): + { + "init_W_fname": cfg.init_W_fname, + "init_Xbar_fname": cfg.init_Xbar_fname, + "init_separate_W_files": cfg.init_separate_W_files, + } + ) + if _hasit(cfg, "W_fname") or _hasit(cfg, "Xbar_fname"): hub_dict = extension_adder(hub_dict, WXBarWriter) hub_dict["opt_kwargs"]["options"].update( - {"W_fname" : cfg.W_fname, - "Xbar_fname" : cfg.Xbar_fname, - "separate_W_files" : cfg.separate_W_files - }) + { + "W_fname": cfg.W_fname, + "Xbar_fname": cfg.Xbar_fname, + "separate_W_files": cfg.separate_W_files, + } + ) return hub_dict + def add_ph_tracking(cylinder_dict, cfg, spoke=False): - """ Manage the phtracker extension and bridge gap between config and ph options dict - Args: - cylinder_dict (dict): the hub or spoke dictionary - cfg (dict): the configuration dictionary - spoke (bool, optional): Whether the cylinder is a spoke. Defaults to False. - Returns: - cylinder_dict (dict): the updated hub or spoke dictionary - - for cfg.track_* flags, the ints are mapped as followed: - - 0: do not track - 1: track for all cylinders - 2: track for hub only - 3: track for spokes only - 4: track and plot for all cylinders - 5: track and plot for hub - 6: track and plot for spokes - - If 'ph_track_progress' is True in the cfg dictionary, this function adds the - ph tracking extension to the cylinder dict with the specified tracking options. + """Manage the phtracker extension and bridge gap between config and ph options dict + Args: + cylinder_dict (dict): the hub or spoke dictionary + cfg (dict): the configuration dictionary + spoke (bool, optional): Whether the cylinder is a spoke. Defaults to False. + Returns: + cylinder_dict (dict): the updated hub or spoke dictionary + + for cfg.track_* flags, the ints are mapped as followed: + + 0: do not track + 1: track for all cylinders + 2: track for hub only + 3: track for spokes only + 4: track and plot for all cylinders + 5: track and plot for hub + 6: track and plot for spokes + + If 'ph_track_progress' is True in the cfg dictionary, this function adds the + ph tracking extension to the cylinder dict with the specified tracking options. """ - if _hasit(cfg, 'ph_track_progress') and cfg.ph_track_progress: + if _hasit(cfg, "ph_track_progress") and cfg.ph_track_progress: from mpisppy.extensions.phtracker import PHTracker + cylinder_dict = extension_adder(cylinder_dict, PHTracker) phtrackeroptions = {"results_folder": cfg.tracking_folder} - t_vars = ['convergence', 'xbars', 'duals', 'nonants', 'scen_gaps', 'reduced_costs'] + t_vars = [ + "convergence", + "xbars", + "duals", + "nonants", + "scen_gaps", + "reduced_costs", + ] for t_var in t_vars: - if _hasit(cfg, f'track_{t_var}'): - trval = cfg[f'track_{t_var}'] - if ((trval in {1, 4} or \ - (not spoke and trval in {2, 5}) or \ - (spoke and trval in {3, 6}))): - phtrackeroptions[f'track_{t_var}'] = True + if _hasit(cfg, f"track_{t_var}"): + trval = cfg[f"track_{t_var}"] + if ( + trval in {1, 4} + or (not spoke and trval in {2, 5}) + or (spoke and trval in {3, 6}) + ): + phtrackeroptions[f"track_{t_var}"] = True if trval in {4, 5, 6}: - phtrackeroptions[f'plot_{t_var}'] = True + phtrackeroptions[f"plot_{t_var}"] = True # because convergence maps to multiple tracking options - if phtrackeroptions.get('track_convergence'): - phtrackeroptions['track_bounds'] = True - phtrackeroptions['track_gaps'] = True - if phtrackeroptions.get('plot_convergence'): - phtrackeroptions['plot_bounds'] = True - phtrackeroptions['plot_gaps'] = True + if phtrackeroptions.get("track_convergence"): + phtrackeroptions["track_bounds"] = True + phtrackeroptions["track_gaps"] = True + if phtrackeroptions.get("plot_convergence"): + phtrackeroptions["plot_bounds"] = True + phtrackeroptions["plot_gaps"] = True cylinder_dict["opt_kwargs"]["options"]["phtracker_options"] = phtrackeroptions return cylinder_dict + def fwph_spoke( cfg, scenario_creator, @@ -338,7 +381,7 @@ def fwph_spoke( "scenario_creator_kwargs": scenario_creator_kwargs, "scenario_denouement": scenario_denouement, "all_nodenames": all_nodenames, - "rho_setter" : rho_setter, + "rho_setter": rho_setter, }, } return fw_dict @@ -346,19 +389,19 @@ def fwph_spoke( # The next function is to provide some standardization def _PHBase_spoke_foundation( - spoke_class, - cfg, - scenario_creator, - scenario_denouement, - all_scenario_names, - scenario_creator_kwargs=None, - rho_setter=None, - all_nodenames=None, - ph_extensions=None, - ): + spoke_class, + cfg, + scenario_creator, + scenario_denouement, + all_scenario_names, + scenario_creator_kwargs=None, + rho_setter=None, + all_nodenames=None, + ph_extensions=None, +): # only the shared options shoptions = shared_options(cfg) - my_options = copy.deepcopy(shoptions) # extra safe... + my_options = copy.deepcopy(shoptions) # extra safe... spoke_dict = { "spoke_class": spoke_class, "opt_class": PHBase, @@ -367,8 +410,8 @@ def _PHBase_spoke_foundation( "all_scenario_names": all_scenario_names, "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - 'scenario_denouement': scenario_denouement, - } + "scenario_denouement": scenario_denouement, + }, } if all_nodenames is not None: spoke_dict["opt_kwargs"]["all_nodenames"] = all_nodenames @@ -379,17 +422,18 @@ def _PHBase_spoke_foundation( return spoke_dict + def _Xhat_Eval_spoke_foundation( - spoke_class, - cfg, - scenario_creator, - scenario_denouement, - all_scenario_names, - scenario_creator_kwargs=None, - rho_setter=None, - all_nodenames=None, - ph_extensions=None, - ): + spoke_class, + cfg, + scenario_creator, + scenario_denouement, + all_scenario_names, + scenario_creator_kwargs=None, + rho_setter=None, + all_nodenames=None, + ph_extensions=None, +): spoke_dict = _PHBase_spoke_foundation( spoke_class, cfg, @@ -399,7 +443,8 @@ def _Xhat_Eval_spoke_foundation( scenario_creator_kwargs=scenario_creator_kwargs, rho_setter=rho_setter, all_nodenames=all_nodenames, - ph_extensions=ph_extensions) + ph_extensions=ph_extensions, + ) spoke_dict["opt_class"] = Xhat_Eval if ph_extensions is not None: spoke_dict["opt_kwargs"]["ph_extensions"] = ph_extensions @@ -427,11 +472,13 @@ def lagrangian_spoke( all_nodenames=all_nodenames, ) if cfg.lagrangian_iter0_mipgap is not None: - lagrangian_spoke["opt_kwargs"]["options"]["iter0_solver_options"]\ - ["mipgap"] = cfg.lagrangian_iter0_mipgap + lagrangian_spoke["opt_kwargs"]["options"]["iter0_solver_options"]["mipgap"] = ( + cfg.lagrangian_iter0_mipgap + ) if cfg.lagrangian_iterk_mipgap is not None: - lagrangian_spoke["opt_kwargs"]["options"]["iterk_solver_options"]\ - ["mipgap"] = cfg.lagrangian_iterk_mipgap + lagrangian_spoke["opt_kwargs"]["options"]["iterk_solver_options"]["mipgap"] = ( + cfg.lagrangian_iterk_mipgap + ) add_ph_tracking(lagrangian_spoke, cfg, spoke=True) return lagrangian_spoke @@ -471,7 +518,7 @@ def lagranger_spoke( all_scenario_names, scenario_creator_kwargs=None, rho_setter=None, - all_nodenames = None, + all_nodenames=None, ): lagranger_spoke = _PHBase_spoke_foundation( LagrangerOuterBound, @@ -484,15 +531,17 @@ def lagranger_spoke( all_nodenames=all_nodenames, ) if cfg.lagranger_iter0_mipgap is not None: - lagranger_spoke["opt_kwargs"]["options"]["iter0_solver_options"]\ - ["mipgap"] = cfg.lagranger_iter0_mipgap + lagranger_spoke["opt_kwargs"]["options"]["iter0_solver_options"]["mipgap"] = ( + cfg.lagranger_iter0_mipgap + ) if cfg.lagranger_iterk_mipgap is not None: - lagranger_spoke["opt_kwargs"]["options"]["iterk_solver_options"]\ - ["mipgap"] = cfg.lagranger_iterk_mipgap + lagranger_spoke["opt_kwargs"]["options"]["iterk_solver_options"]["mipgap"] = ( + cfg.lagranger_iterk_mipgap + ) if cfg.lagranger_rho_rescale_factors_json is not None: - lagranger_spoke["opt_kwargs"]["options"]\ - ["lagranger_rho_rescale_factors_json"]\ - = cfg.lagranger_rho_rescale_factors_json + lagranger_spoke["opt_kwargs"]["options"][ + "lagranger_rho_rescale_factors_json" + ] = cfg.lagranger_rho_rescale_factors_json add_ph_tracking(lagranger_spoke, cfg, spoke=True) return lagranger_spoke @@ -517,14 +566,17 @@ def subgradient_spoke( all_nodenames=all_nodenames, ) if cfg.subgradient_iter0_mipgap is not None: - subgradient_spoke["opt_kwargs"]["options"]["iter0_solver_options"]\ - ["mipgap"] = cfg.subgradient_iter0_mipgap + subgradient_spoke["opt_kwargs"]["options"]["iter0_solver_options"]["mipgap"] = ( + cfg.subgradient_iter0_mipgap + ) if cfg.subgradient_iterk_mipgap is not None: - subgradient_spoke["opt_kwargs"]["options"]["iterk_solver_options"]\ - ["mipgap"] = cfg.subgradient_iterk_mipgap + subgradient_spoke["opt_kwargs"]["options"]["iterk_solver_options"]["mipgap"] = ( + cfg.subgradient_iterk_mipgap + ) if cfg.subgradient_rho_multiplier is not None: - subgradient_spoke["opt_kwargs"]["options"]["subgradient_rho_multiplier"]\ - = cfg.subgradient_rho_multiplier + subgradient_spoke["opt_kwargs"]["options"]["subgradient_rho_multiplier"] = ( + cfg.subgradient_rho_multiplier + ) add_ph_tracking(subgradient_spoke, cfg, spoke=True) return subgradient_spoke @@ -538,9 +590,8 @@ def xhatlooper_spoke( scenario_creator_kwargs=None, ph_extensions=None, ): - xhatlooper_dict = _Xhat_Eval_spoke_foundation( - XhatLooperInnerBound, + XhatLooperInnerBound, cfg, scenario_creator, scenario_denouement, @@ -549,26 +600,30 @@ def xhatlooper_spoke( ph_extensions=ph_extensions, ) - xhatlooper_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for xhat + xhatlooper_dict["opt_kwargs"]["options"]["bundles_per_rank"] = ( + 0 # no bundles for xhat + ) xhatlooper_dict["opt_kwargs"]["options"]["xhat_looper_options"] = { - "xhat_solver_options": xhatlooper_dict["opt_kwargs"]["options"]["iterk_solver_options"], + "xhat_solver_options": xhatlooper_dict["opt_kwargs"]["options"][ + "iterk_solver_options" + ], "scen_limit": cfg.xhat_scen_limit, "dump_prefix": "delme", "csvname": "looper.csv", } - + return xhatlooper_dict def xhatxbar_spoke( - cfg, - scenario_creator, - scenario_denouement, - all_scenario_names, - scenario_creator_kwargs=None, - variable_probability=None, - ph_extensions=None, - all_nodenames=None, + cfg, + scenario_creator, + scenario_denouement, + all_scenario_names, + scenario_creator_kwargs=None, + variable_probability=None, + ph_extensions=None, + all_nodenames=None, ): xhatxbar_dict = _Xhat_Eval_spoke_foundation( XhatXbarInnerBound, @@ -581,13 +636,17 @@ def xhatxbar_spoke( all_nodenames=all_nodenames, ) - xhatxbar_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for xhat + xhatxbar_dict["opt_kwargs"]["options"]["bundles_per_rank"] = ( + 0 # no bundles for xhat + ) xhatxbar_dict["opt_kwargs"]["options"]["xhat_xbar_options"] = { - "xhat_solver_options": xhatxbar_dict["opt_kwargs"]["options"]["iterk_solver_options"], + "xhat_solver_options": xhatxbar_dict["opt_kwargs"]["options"][ + "iterk_solver_options" + ], "dump_prefix": "delme", "csvname": "looper.csv", } - + xhatxbar_dict["opt_kwargs"]["variable_probability"] = variable_probability return xhatxbar_dict @@ -602,9 +661,8 @@ def xhatshuffle_spoke( scenario_creator_kwargs=None, ph_extensions=None, ): - xhatshuffle_dict = _Xhat_Eval_spoke_foundation( - XhatShuffleInnerBound, + XhatShuffleInnerBound, cfg, scenario_creator, scenario_denouement, @@ -613,16 +671,24 @@ def xhatshuffle_spoke( scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions, ) - xhatshuffle_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for xhat + xhatshuffle_dict["opt_kwargs"]["options"]["bundles_per_rank"] = ( + 0 # no bundles for xhat + ) xhatshuffle_dict["opt_kwargs"]["options"]["xhat_looper_options"] = { - "xhat_solver_options": xhatshuffle_dict["opt_kwargs"]["options"]["iterk_solver_options"], + "xhat_solver_options": xhatshuffle_dict["opt_kwargs"]["options"][ + "iterk_solver_options" + ], "dump_prefix": "delme", "csvname": "looper.csv", } if _hasit(cfg, "add_reversed_shuffle"): - xhatshuffle_dict["opt_kwargs"]["options"]["xhat_looper_options"]["reverse"] = cfg.add_reversed_shuffle + xhatshuffle_dict["opt_kwargs"]["options"]["xhat_looper_options"]["reverse"] = ( + cfg.add_reversed_shuffle + ) if _hasit(cfg, "add_reversed_shuffle"): - xhatshuffle_dict["opt_kwargs"]["options"]["xhatshuffle_iter_step"] = cfg.xhatshuffle_iter_step + xhatshuffle_dict["opt_kwargs"]["options"]["xhatshuffle_iter_step"] = ( + cfg.xhatshuffle_iter_step + ) return xhatshuffle_dict @@ -635,11 +701,10 @@ def xhatspecific_spoke( scenario_dict, all_nodenames=None, scenario_creator_kwargs=None, - ph_extensions=None, + ph_extensions=None, ): - xhatspecific_dict = _Xhat_Eval_spoke_foundation( - XhatSpecificInnerBound, + XhatSpecificInnerBound, cfg, scenario_creator, scenario_denouement, @@ -647,9 +712,12 @@ def xhatspecific_spoke( scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions, ) - xhatspecific_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for xhat + xhatspecific_dict["opt_kwargs"]["options"]["bundles_per_rank"] = ( + 0 # no bundles for xhat + ) return xhatspecific_dict + def xhatlshaped_spoke( cfg, scenario_creator, @@ -658,7 +726,6 @@ def xhatlshaped_spoke( scenario_creator_kwargs=None, ph_extensions=None, ): - xhatlshaped_dict = _Xhat_Eval_spoke_foundation( XhatLShapedInnerBound, cfg, @@ -668,10 +735,13 @@ def xhatlshaped_spoke( scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions, ) - xhatlshaped_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for xhat + xhatlshaped_dict["opt_kwargs"]["options"]["bundles_per_rank"] = ( + 0 # no bundles for xhat + ) return xhatlshaped_dict + def slammax_spoke( cfg, scenario_creator, @@ -680,7 +750,6 @@ def slammax_spoke( scenario_creator_kwargs=None, ph_extensions=None, ): - slammax_dict = _Xhat_Eval_spoke_foundation( SlamMaxHeuristic, cfg, @@ -690,9 +759,12 @@ def slammax_spoke( scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions, ) - slammax_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for slamming + slammax_dict["opt_kwargs"]["options"]["bundles_per_rank"] = ( + 0 # no bundles for slamming + ) return slammax_dict + def slammin_spoke( cfg, scenario_creator, @@ -710,7 +782,9 @@ def slammin_spoke( scenario_creator_kwargs=scenario_creator_kwargs, ph_extensions=ph_extensions, ) - slammin_dict["opt_kwargs"]["options"]['bundles_per_rank'] = 0 # no bundles for slamming + slammin_dict["opt_kwargs"]["options"]["bundles_per_rank"] = ( + 0 # no bundles for slamming + ) return slammin_dict @@ -722,20 +796,20 @@ def cross_scenario_cuts_spoke( scenario_creator_kwargs=None, all_nodenames=None, ): - if _hasit(cfg, "max_solver_threads"): - sp_solver_options = {"threads":cfg.max_solver_threads} + sp_solver_options = {"threads": cfg.max_solver_threads} else: sp_solver_options = dict() if _hasit(cfg, "eta_bounds_mipgap"): sp_solver_options["mipgap"] = cfg.eta_bounds_mipgap - ls_options = { "root_solver" : cfg.solver_name, - "sp_solver": cfg.solver_name, - "sp_solver_options" : sp_solver_options, - "verbose": cfg.verbose, - } + ls_options = { + "root_solver": cfg.solver_name, + "sp_solver": cfg.solver_name, + "sp_solver_options": sp_solver_options, + "verbose": cfg.verbose, + } cut_spoke = { "spoke_class": CrossScenarioCutSpoke, "opt_class": LShapedMethod, @@ -745,12 +819,13 @@ def cross_scenario_cuts_spoke( "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, "scenario_denouement": scenario_denouement, - "all_nodenames": all_nodenames - }, - } + "all_nodenames": all_nodenames, + }, + } return cut_spoke + # run PH with smaller rho to compute LB def ph_ob_spoke( cfg, @@ -771,24 +846,21 @@ def ph_ob_spoke( "all_scenario_names": all_scenario_names, "scenario_creator": scenario_creator, "scenario_creator_kwargs": scenario_creator_kwargs, - 'scenario_denouement': scenario_denouement, + "scenario_denouement": scenario_denouement, "rho_setter": rho_setter, "all_nodenames": all_nodenames, "variable_probability": variable_probability, - } + }, } if cfg.ph_ob_rho_rescale_factors_json is not None: - ph_ob_spoke["opt_kwargs"]["options"]\ - ["ph_ob_rho_rescale_factors_json"]\ - = cfg.ph_ob_rho_rescale_factors_json - ph_ob_spoke["opt_kwargs"]["options"]["ph_ob_initial_rho_rescale_factor"]\ - = cfg.ph_ob_initial_rho_rescale_factor + ph_ob_spoke["opt_kwargs"]["options"]["ph_ob_rho_rescale_factors_json"] = ( + cfg.ph_ob_rho_rescale_factors_json + ) + ph_ob_spoke["opt_kwargs"]["options"]["ph_ob_initial_rho_rescale_factor"] = ( + cfg.ph_ob_initial_rho_rescale_factor + ) if cfg.ph_ob_gradient_rho: - ph_ob_spoke["opt_kwargs"]["options"]\ - ["ph_ob_gradient_rho"]\ - = dict() - ph_ob_spoke["opt_kwargs"]["options"]\ - ["ph_ob_gradient_rho"]["cfg"]\ - = cfg + ph_ob_spoke["opt_kwargs"]["options"]["ph_ob_gradient_rho"] = dict() + ph_ob_spoke["opt_kwargs"]["options"]["ph_ob_gradient_rho"]["cfg"] = cfg return ph_ob_spoke diff --git a/mpisppy/utils/config.py b/mpisppy/utils/config.py index 397b1179a..6502595aa 100644 --- a/mpisppy/utils/config.py +++ b/mpisppy/utils/config.py @@ -11,7 +11,7 @@ # supporting member functions. # NOTE: the xxxx_args() naming convention is used by amalgamator.py -""" Notes +"""Notes The default for all 'with' options is False and we are dropping the with_ (and we are dropping the `no` side that was in baseparsers.py) (so we are also dropping the use of with_) @@ -50,16 +50,23 @@ import argparse import pyomo.common.config as pyofig + # class to inherit from ConfigDict with a name field class Config(pyofig.ConfigDict): # remember that the parent uses slots - #=============== - def add_to_config(self, name, description, domain, default, - argparse=True, - complain=False, - argparse_args=None): - """ Add an arg to the self dict. + # =============== + def add_to_config( + self, + name, + description, + domain, + default, + argparse=True, + complain=False, + argparse_args=None, + ): + """Add an arg to the self dict. Args: name (str): the argument name, underscore seperated description (str): free text description @@ -74,20 +81,21 @@ def add_to_config(self, name, description, domain, default, print(f"Duplicate {name} will not be added to self.") # raise RuntimeError(f"Trying to add duplicate {name} to self.") else: - c = self.declare(name, pyofig.ConfigValue( - description = description, - domain = domain, - default = default)) + c = self.declare( + name, + pyofig.ConfigValue( + description=description, domain=domain, default=default + ), + ) if argparse: if argparse_args is not None: c.declare_as_argument(**argparse_args) else: c.declare_as_argument() - - #=============== + # =============== def add_and_assign(self, name, description, domain, default, value, complain=False): - """ Add an arg to the self dict and assign it a value + """Add an arg to the self dict and assign it a value Args: name (str): the argument name, underscore separated description (str): free text description @@ -98,16 +106,17 @@ def add_and_assign(self, name, description, domain, default, value, complain=Fal """ if name in self: if complain: - print(f"Duplicate {name} will not be added to self by add_and_assign {value}.") + print( + f"Duplicate {name} will not be added to self by add_and_assign {value}." + ) # raise RuntimeError(f"Trying to add duplicate {name} to self.") else: self.add_to_config(name, description, domain, default, argparse=False) self[name] = value - - #=============== + # =============== def dict_assign(self, name, description, domain, default, value): - """ mimic dict assignment + """mimic dict assignment Args: name (str): the argument name, underscore separated description (str): free text description @@ -120,10 +129,9 @@ def dict_assign(self, name, description, domain, default, value): else: self[name] = value - - #=============== + # =============== def quick_assign(self, name, domain, value): - """ mimic dict assignment with fewer args + """mimic dict assignment with fewer args Args: name (str): the argument name, underscore separated domain (type): see pyomo config docs @@ -131,10 +139,9 @@ def quick_assign(self, name, domain, value): """ self.dict_assign(name, f"field for {name}", domain, None, value) - - #=============== + # =============== def get(self, name, ifmissing=None): - """ replcate the behavior of dict get""" + """replcate the behavior of dict get""" if name in self: return self[name] else: @@ -142,115 +149,149 @@ def get(self, name, ifmissing=None): def add_solver_specs(self, prefix=""): sstr = f"{prefix}_solver" if prefix != "" else "solver" - self.add_to_config(f"{sstr}_name", - description= "solver name (default None)", - domain = str, - default=None) + self.add_to_config( + f"{sstr}_name", + description="solver name (default None)", + domain=str, + default=None, + ) - self.add_to_config(f"{sstr}_options", - description= "solver options; space delimited with = for values (default None)", - domain = str, - default=None) + self.add_to_config( + f"{sstr}_options", + description="solver options; space delimited with = for values (default None)", + domain=str, + default=None, + ) def _common_args(self): - raise RuntimeError("_common_args is no longer used. See comments at top of config.py") + raise RuntimeError( + "_common_args is no longer used. See comments at top of config.py" + ) def popular_args(self): - self.add_to_config("max_iterations", - description="hub max iiterations (default 1)", - domain=int, - default=1) + self.add_to_config( + "max_iterations", + description="hub max iiterations (default 1)", + domain=int, + default=1, + ) self.add_solver_specs(prefix="") - self.add_to_config("seed", - description="Seed for random numbers (default is 1134)", - domain=int, - default=1134) - - self.add_to_config("default_rho", - description="Global rho for PH (default None)", - domain=float, - default=None) - - self.add_to_config("bundles_per_rank", - description="bundles per rank (default 0 (no bundles))", - domain=int, - default=0) - - self.add_to_config('verbose', - description="verbose output", - domain=bool, - default=False) - - self.add_to_config('display_progress', - description="display progress at each iteration", - domain=bool, - default=False) - - self.add_to_config('display_convergence_detail', - description="display non-anticipative variable convergence statistics at each iteration", - domain=bool, - default=False) - - self.add_to_config("max_solver_threads", - description="Limit on threads per solver (default None)", - domain=int, - default=None) - - self.add_to_config("intra_hub_conv_thresh", - description="Within hub convergence threshold (default 1e-10)", - domain=float, - default=1e-10) - - self.add_to_config("trace_prefix", - description="Prefix for bound spoke trace files. If None " - "bound spoke trace files are not written.", - domain=str, - default=None) - - self.add_to_config("tee_rank0_solves", - description="Some cylinders support tee of rank 0 solves." - "(With multiple cylinders this could be confusing.)", - domain=bool, - default=False) - - self.add_to_config("auxilliary", - description="Free text for use by hackers (default '').", - domain=str, - default='') - - self.add_to_config("presolve", - description="Run the distributed presolver. " - "Currently only does distributed feasibility-based bounds tightening.", - domain=bool, - default=False) + self.add_to_config( + "seed", + description="Seed for random numbers (default is 1134)", + domain=int, + default=1134, + ) - def ph_args(self): - self.add_to_config("linearize_binary_proximal_terms", - description="For PH, linearize the proximal terms for " - "all binary nonanticipative variables", - domain=bool, - default=False) + self.add_to_config( + "default_rho", + description="Global rho for PH (default None)", + domain=float, + default=None, + ) + self.add_to_config( + "bundles_per_rank", + description="bundles per rank (default 0 (no bundles))", + domain=int, + default=0, + ) - self.add_to_config("linearize_proximal_terms", - description="For PH, linearize the proximal terms for " - "all nonanticipative variables", - domain=bool, - default=False) + self.add_to_config( + "verbose", description="verbose output", domain=bool, default=False + ) + self.add_to_config( + "display_progress", + description="display progress at each iteration", + domain=bool, + default=False, + ) - self.add_to_config("proximal_linearization_tolerance", - description="For PH, when linearizing proximal terms, " - "a cut will be added if the proximal term approximation " - "is looser than this value (default 1e-1)", - domain=float, - default=1.e-1) + self.add_to_config( + "display_convergence_detail", + description="display non-anticipative variable convergence statistics at each iteration", + domain=bool, + default=False, + ) - def make_parser(self, progname=None, num_scens_reqd=False): - raise RuntimeError("make_parser is no longer used. See comments at top of config.py") + self.add_to_config( + "max_solver_threads", + description="Limit on threads per solver (default None)", + domain=int, + default=None, + ) + + self.add_to_config( + "intra_hub_conv_thresh", + description="Within hub convergence threshold (default 1e-10)", + domain=float, + default=1e-10, + ) + + self.add_to_config( + "trace_prefix", + description="Prefix for bound spoke trace files. If None " + "bound spoke trace files are not written.", + domain=str, + default=None, + ) + + self.add_to_config( + "tee_rank0_solves", + description="Some cylinders support tee of rank 0 solves." + "(With multiple cylinders this could be confusing.)", + domain=bool, + default=False, + ) + + self.add_to_config( + "auxilliary", + description="Free text for use by hackers (default '').", + domain=str, + default="", + ) + + self.add_to_config( + "presolve", + description="Run the distributed presolver. " + "Currently only does distributed feasibility-based bounds tightening.", + domain=bool, + default=False, + ) + + def ph_args(self): + self.add_to_config( + "linearize_binary_proximal_terms", + description="For PH, linearize the proximal terms for " + "all binary nonanticipative variables", + domain=bool, + default=False, + ) + + self.add_to_config( + "linearize_proximal_terms", + description="For PH, linearize the proximal terms for " + "all nonanticipative variables", + domain=bool, + default=False, + ) + self.add_to_config( + "proximal_linearization_tolerance", + description="For PH, when linearizing proximal terms, " + "a cut will be added if the proximal term approximation " + "is looser than this value (default 1e-1)", + domain=float, + default=1.0e-1, + ) + + def make_parser(self, progname=None, num_scens_reqd=False): + raise RuntimeError( + "make_parser is no longer used. See comments at top of config.py" + ) def num_scens_optional(self): self.add_to_config( @@ -267,55 +308,64 @@ def num_scens_required(self): description="Number of scenarios (default None)", domain=int, default=None, - argparse_args = {"required": True} + argparse_args={"required": True}, ) - def _basic_multistage(self, progname=None, num_scens_reqd=False): - raise RuntimeError("_basic_multistage is no longer used. See comments at top of config.py") + raise RuntimeError( + "_basic_multistage is no longer used. See comments at top of config.py" + ) def add_branching_factors(self): - self.add_to_config("branching_factors", - description="Spaces delimited branching factors (e.g., 2 2)", - domain=pyofig.ListOf(int, pyofig.PositiveInt), - default=None) - + self.add_to_config( + "branching_factors", + description="Spaces delimited branching factors (e.g., 2 2)", + domain=pyofig.ListOf(int, pyofig.PositiveInt), + default=None, + ) def make_multistage_parser(self, progname=None): - raise RuntimeError("make_multistage_parser is no longer used. See comments at top of config.py") + raise RuntimeError( + "make_multistage_parser is no longer used. See comments at top of config.py" + ) def multistage(self): self.add_branching_factors() self.popular_args() - #### EF #### def EF_base(self): self.add_solver_specs(prefix="EF") - self.add_to_config("EF_mipgap", - description="mip gap option for the solver if needed (default None)", - domain=float, - default=None) - self.add_to_config("tee_EF", - description="Show log output if solving the extensive form directly", - domain=bool, - default=False) + self.add_to_config( + "EF_mipgap", + description="mip gap option for the solver if needed (default None)", + domain=float, + default=None, + ) + self.add_to_config( + "tee_EF", + description="Show log output if solving the extensive form directly", + domain=bool, + default=False, + ) # Some EF programs only do EF and don't check this. - self.add_to_config("EF", - description="Solve the extensive form directly; ignore most other directives.", - domain=bool, - default=False) - + self.add_to_config( + "EF", + description="Solve the extensive form directly; ignore most other directives.", + domain=bool, + default=False, + ) def EF2(self): self.EF_base() - self.add_to_config("num_scens", - description="Number of scenarios (default None)", - domain=int, - default=None) - + self.add_to_config( + "num_scens", + description="Number of scenarios (default None)", + domain=int, + default=None, + ) def EF_multistage(self): self.EF_base() @@ -326,440 +376,531 @@ def EF_multistage(self): def two_sided_args(self): # add commands to and also return the result - self.add_to_config("rel_gap", - description="relative termination gap (default 0.05)", - domain=float, - default=0.05) - - self.add_to_config("abs_gap", - description="absolute termination gap (default 0)", - domain=float, - default=0.) - - self.add_to_config("max_stalled_iters", - description="maximum iterations with no reduction in gap (default 100)", - domain=int, - default=100) + self.add_to_config( + "rel_gap", + description="relative termination gap (default 0.05)", + domain=float, + default=0.05, + ) + self.add_to_config( + "abs_gap", + description="absolute termination gap (default 0)", + domain=float, + default=0.0, + ) + self.add_to_config( + "max_stalled_iters", + description="maximum iterations with no reduction in gap (default 100)", + domain=int, + default=100, + ) def mip_options(self): + self.add_to_config( + "iter0_mipgap", + description="mip gap option for iteration 0 (default None)", + domain=float, + default=None, + ) - self.add_to_config("iter0_mipgap", - description="mip gap option for iteration 0 (default None)", - domain=float, - default=None) - - self.add_to_config("iterk_mipgap", - description="mip gap option non-zero iterations (default None)", - domain=float, - default=None) - + self.add_to_config( + "iterk_mipgap", + description="mip gap option non-zero iterations (default None)", + domain=float, + default=None, + ) def aph_args(self): - - self.add_to_config('aph_gamma', - description='Gamma parameter associated with asychronous projective hedging (default 1.0)', - domain=float, - default=1.0) - self.add_to_config('aph_nu', - description='Nu parameter associated with asychronous projective hedging (default 1.0)', - domain=float, - default=1.0) - self.add_to_config('aph_frac_needed', - description='Fraction of sub-problems required before computing projective step (default 1.0)', - domain=float, - default=1.0) - self.add_to_config('aph_dispatch_frac', - description='Fraction of sub-problems to dispatch at each step of asychronous projective hedging (default 1.0)', - domain=float, - default=1.0) - self.add_to_config('aph_sleep_seconds', - description='Spin-lock sleep time for APH (default 0.01)', - domain=float, - default=0.01) - + self.add_to_config( + "aph_gamma", + description="Gamma parameter associated with asychronous projective hedging (default 1.0)", + domain=float, + default=1.0, + ) + self.add_to_config( + "aph_nu", + description="Nu parameter associated with asychronous projective hedging (default 1.0)", + domain=float, + default=1.0, + ) + self.add_to_config( + "aph_frac_needed", + description="Fraction of sub-problems required before computing projective step (default 1.0)", + domain=float, + default=1.0, + ) + self.add_to_config( + "aph_dispatch_frac", + description="Fraction of sub-problems to dispatch at each step of asychronous projective hedging (default 1.0)", + domain=float, + default=1.0, + ) + self.add_to_config( + "aph_sleep_seconds", + description="Spin-lock sleep time for APH (default 0.01)", + domain=float, + default=0.01, + ) def fixer_args(self): + self.add_to_config( + "fixer", + description="have an integer fixer extension", + domain=bool, + default=False, + ) - self.add_to_config('fixer', - description="have an integer fixer extension", - domain=bool, - default=False) - - self.add_to_config("fixer_tol", - description="fixer bounds tolerance (default 1e-4)", - domain=float, - default=1e-2) - + self.add_to_config( + "fixer_tol", + description="fixer bounds tolerance (default 1e-4)", + domain=float, + default=1e-2, + ) def gapper_args(self): - - self.add_to_config('mipgaps_json', - description="path to json file with a mipgap schedule for PH iterations", - domain=str, - default=None) - + self.add_to_config( + "mipgaps_json", + description="path to json file with a mipgap schedule for PH iterations", + domain=str, + default=None, + ) def fwph_args(self): + self.add_to_config( + "fwph", description="have an fwph spoke", domain=bool, default=False + ) - self.add_to_config('fwph', - description="have an fwph spoke", - domain=bool, - default=False) - - self.add_to_config("fwph_iter_limit", - description="maximum fwph iterations (default 10)", - domain=int, - default=10) - - self.add_to_config("fwph_weight", - description="fwph weight (default 0)", - domain=float, - default=0.0) - - self.add_to_config("fwph_conv_thresh", - description="fwph convergence threshold (default 1e-4)", - domain=float, - default=1e-4) + self.add_to_config( + "fwph_iter_limit", + description="maximum fwph iterations (default 10)", + domain=int, + default=10, + ) - self.add_to_config("fwph_stop_check_tol", - description="fwph tolerance for Gamma^t (default 1e-4)", - domain=float, - default=1e-4) + self.add_to_config( + "fwph_weight", + description="fwph weight (default 0)", + domain=float, + default=0.0, + ) - self.add_to_config("fwph_mipgap", - description="mip gap option FW subproblems iterations (default None)", - domain=float, - default=None) + self.add_to_config( + "fwph_conv_thresh", + description="fwph convergence threshold (default 1e-4)", + domain=float, + default=1e-4, + ) + self.add_to_config( + "fwph_stop_check_tol", + description="fwph tolerance for Gamma^t (default 1e-4)", + domain=float, + default=1e-4, + ) + self.add_to_config( + "fwph_mipgap", + description="mip gap option FW subproblems iterations (default None)", + domain=float, + default=None, + ) def lagrangian_args(self): + self.add_to_config( + "lagrangian", + description="have a lagrangian spoke", + domain=bool, + default=False, + ) - self.add_to_config('lagrangian', - description="have a lagrangian spoke", - domain=bool, - default=False) - - self.add_to_config("lagrangian_iter0_mipgap", - description="lgr. iter0 solver option mipgap (default None)", - domain=float, - default=None) - - self.add_to_config("lagrangian_iterk_mipgap", - description="lgr. iterk solver option mipgap (default None)", - domain=float, - default=None) + self.add_to_config( + "lagrangian_iter0_mipgap", + description="lgr. iter0 solver option mipgap (default None)", + domain=float, + default=None, + ) + self.add_to_config( + "lagrangian_iterk_mipgap", + description="lgr. iterk solver option mipgap (default None)", + domain=float, + default=None, + ) def reduced_costs_args(self): + self.add_to_config( + "reduced_costs", + description="have a reduced costs spoke", + domain=bool, + default=False, + ) - self.add_to_config('reduced_costs', - description="have a reduced costs spoke", - domain=bool, - default=False) - - self.add_to_config('rc_verbose', - description="verbose output for reduced costs", - domain=bool, - default=False) - - self.add_to_config('rc_debug', - description="debug output for reduced costs", - domain=bool, - default=False) - - self.add_to_config('rc_fixer', - description="use the reduced cost fixer", - domain=bool, - default=False) - - self.add_to_config('rc_zero_tol', - description="vars with rc below tol will never be fixed", - domain=float, - default=1e-4) - - self.add_to_config('rc_fix_fraction_iter0', - description="target fix fraction for rc fixer in first iteration", - domain=float, - default=0.0) - - self.add_to_config('rc_fix_fraction_iterk', - description="target fix fraction for rc fixer in subsequent iterations", - domain=float, - default=0.8) - - self.add_to_config('rc_bound_tightening', - description="use reduced cost bound tightening", - domain=bool, - default=True) - - self.add_to_config('rc_bound_tol', - description="tol to consider vars at bound", - domain=float, - default=1e-6) + self.add_to_config( + "rc_verbose", + description="verbose output for reduced costs", + domain=bool, + default=False, + ) + self.add_to_config( + "rc_debug", + description="debug output for reduced costs", + domain=bool, + default=False, + ) - def lagranger_args(self): + self.add_to_config( + "rc_fixer", + description="use the reduced cost fixer", + domain=bool, + default=False, + ) - self.add_to_config('lagranger', - description="have a special lagranger spoke", - domain=bool, - default=False) + self.add_to_config( + "rc_zero_tol", + description="vars with rc below tol will never be fixed", + domain=float, + default=1e-4, + ) - self.add_to_config("lagranger_iter0_mipgap", - description="lagranger iter0 mipgap (default None)", - domain=float, - default=None) + self.add_to_config( + "rc_fix_fraction_iter0", + description="target fix fraction for rc fixer in first iteration", + domain=float, + default=0.0, + ) - self.add_to_config("lagranger_iterk_mipgap", - description="lagranger iterk mipgap (default None)", - domain=float, - default=None) + self.add_to_config( + "rc_fix_fraction_iterk", + description="target fix fraction for rc fixer in subsequent iterations", + domain=float, + default=0.8, + ) - self.add_to_config("lagranger_rho_rescale_factors_json", - description="json file: rho rescale factors (default None)", - domain=str, - default=None) + self.add_to_config( + "rc_bound_tightening", + description="use reduced cost bound tightening", + domain=bool, + default=True, + ) + self.add_to_config( + "rc_bound_tol", + description="tol to consider vars at bound", + domain=float, + default=1e-6, + ) - def subgradient_args(self): + def lagranger_args(self): + self.add_to_config( + "lagranger", + description="have a special lagranger spoke", + domain=bool, + default=False, + ) - self.add_to_config('subgradient', - description="have a subgradient spoke", - domain=bool, - default=False) + self.add_to_config( + "lagranger_iter0_mipgap", + description="lagranger iter0 mipgap (default None)", + domain=float, + default=None, + ) - self.add_to_config("subgradient_iter0_mipgap", - description="lgr. iter0 solver option mipgap (default None)", - domain=float, - default=None) + self.add_to_config( + "lagranger_iterk_mipgap", + description="lagranger iterk mipgap (default None)", + domain=float, + default=None, + ) - self.add_to_config("subgradient_iterk_mipgap", - description="lgr. iterk solver option mipgap (default None)", - domain=float, - default=None) + self.add_to_config( + "lagranger_rho_rescale_factors_json", + description="json file: rho rescale factors (default None)", + domain=str, + default=None, + ) - self.add_to_config("subgradient_rho_multiplier", - description="rescale rho (update step size) by this factor", - domain=float, - default=None) + def subgradient_args(self): + self.add_to_config( + "subgradient", + description="have a subgradient spoke", + domain=bool, + default=False, + ) + self.add_to_config( + "subgradient_iter0_mipgap", + description="lgr. iter0 solver option mipgap (default None)", + domain=float, + default=None, + ) - def ph_ob_args(self): + self.add_to_config( + "subgradient_iterk_mipgap", + description="lgr. iterk solver option mipgap (default None)", + domain=float, + default=None, + ) - self.add_to_config("ph_ob", - description="use PH to compute outer bound", - domain=bool, - default=False) - self.add_to_config("ph_ob_rho_rescale_factors_json", - description="json file with {iternum: rho rescale factor} (default None)", - domain=str, - default=None) - self.add_to_config("ph_ob_initial_rho_rescale_factor", - description="Used to rescale rho initially (will be done regardless of other rescaling (default 0.1)", - domain=float, - default=0.1) - self.add_to_config("ph_ob_gradient_rho", - description="use gradient-based rho in PH OB", - domain=bool, - default=False) + self.add_to_config( + "subgradient_rho_multiplier", + description="rescale rho (update step size) by this factor", + domain=float, + default=None, + ) + def ph_ob_args(self): + self.add_to_config( + "ph_ob", + description="use PH to compute outer bound", + domain=bool, + default=False, + ) + self.add_to_config( + "ph_ob_rho_rescale_factors_json", + description="json file with {iternum: rho rescale factor} (default None)", + domain=str, + default=None, + ) + self.add_to_config( + "ph_ob_initial_rho_rescale_factor", + description="Used to rescale rho initially (will be done regardless of other rescaling (default 0.1)", + domain=float, + default=0.1, + ) + self.add_to_config( + "ph_ob_gradient_rho", + description="use gradient-based rho in PH OB", + domain=bool, + default=False, + ) def xhatlooper_args(self): + self.add_to_config( + "xhatlooper", + description="have an xhatlooper spoke", + domain=bool, + default=False, + ) - self.add_to_config('xhatlooper', - description="have an xhatlooper spoke", - domain=bool, - default=False) - - self.add_to_config("xhat_scen_limit", - description="scenario limit xhat looper to try (default 3)", - domain=int, - default=3) + self.add_to_config( + "xhat_scen_limit", + description="scenario limit xhat looper to try (default 3)", + domain=int, + default=3, + ) def xhatshuffle_args(self): + self.add_to_config( + "xhatshuffle", + description="have an xhatshuffle spoke", + domain=bool, + default=False, + ) - self.add_to_config('xhatshuffle', - description="have an xhatshuffle spoke", - domain=bool, - default=False) - - self.add_to_config('add_reversed_shuffle', - description="using also the reversed shuffling (multistage only, default True)", - domain=bool, - default=False) - - self.add_to_config('xhatshuffle_iter_step', - description="step in shuffled list between 2 scenarios to try (default None)", - domain=int, - default=None) + self.add_to_config( + "add_reversed_shuffle", + description="using also the reversed shuffling (multistage only, default True)", + domain=bool, + default=False, + ) + self.add_to_config( + "xhatshuffle_iter_step", + description="step in shuffled list between 2 scenarios to try (default None)", + domain=int, + default=None, + ) def mult_rho_args(self): + self.add_to_config( + "mult_rho", + description="Have mult_rho extension (default False)", + domain=bool, + default=False, + ) - self.add_to_config('mult_rho', - description="Have mult_rho extension (default False)", - domain=bool, - default=False) - - self.add_to_config('mult_rho_convergence_tolerance', - description="rhomult does nothing with convergence below this (default 1e-4)", - domain=float, - default=1e-4) + self.add_to_config( + "mult_rho_convergence_tolerance", + description="rhomult does nothing with convergence below this (default 1e-4)", + domain=float, + default=1e-4, + ) - self.add_to_config('mult_rho_update_stop_iteration', - description="stop doing rhomult rho updates after this iteration (default None)", - domain=int, - default=None) + self.add_to_config( + "mult_rho_update_stop_iteration", + description="stop doing rhomult rho updates after this iteration (default None)", + domain=int, + default=None, + ) - self.add_to_config('mult_rho_update_start_iteration', - description="start doing rhomult rho updates on this iteration (default 2)", - domain=int, - default=2) + self.add_to_config( + "mult_rho_update_start_iteration", + description="start doing rhomult rho updates on this iteration (default 2)", + domain=int, + default=2, + ) def mult_rho_to_dict(self): assert hasattr(self, "mult_rho") - return {"mult_rho": self.mult_rho, - "convergence_tolerance": self.mult_rho_convergence_tolerance, - "rho_update_stop_iteration": self.mult_rho_update_stop_iteration, - "rho_update_start_iteration": self.mult_rho_update_start_iteration, - "verbose": False} - - + return { + "mult_rho": self.mult_rho, + "convergence_tolerance": self.mult_rho_convergence_tolerance, + "rho_update_stop_iteration": self.mult_rho_update_stop_iteration, + "rho_update_start_iteration": self.mult_rho_update_start_iteration, + "verbose": False, + } def xhatspecific_args(self): # we will not try to get the specification from the command line - self.add_to_config('xhatspecific', - description="have an xhatspecific spoke", - domain=bool, - default=False) - + self.add_to_config( + "xhatspecific", + description="have an xhatspecific spoke", + domain=bool, + default=False, + ) def xhatxbar_args(self): - - self.add_to_config('xhatxbar', - description="have an xhatxbar spoke", - domain=bool, - default=False) - - - + self.add_to_config( + "xhatxbar", description="have an xhatxbar spoke", domain=bool, default=False + ) def xhatlshaped_args(self): # we will not try to get the specification from the command line - self.add_to_config('xhatlshaped', - description="have an xhatlshaped spoke", - domain=bool, - default=False) + self.add_to_config( + "xhatlshaped", + description="have an xhatlshaped spoke", + domain=bool, + default=False, + ) def wtracker_args(self): + self.add_to_config( + "wtracker", + description="Use a wtracker extension", + domain=bool, + default=False, + ) - self.add_to_config('wtracker', - description="Use a wtracker extension", - domain=bool, - default=False) - - self.add_to_config('wtracker_file_prefix', - description="prefix for rank by rank wtracker files (default '')", - domain=str, - default='') - - self.add_to_config('wtracker_wlen', - description="max length of iteration window for xtracker (default 20)", - domain=int, - default=20) + self.add_to_config( + "wtracker_file_prefix", + description="prefix for rank by rank wtracker files (default '')", + domain=str, + default="", + ) - self.add_to_config('wtracker_reportlen', - description="max length of long reports for xtracker (default 100)", - domain=int, - default=100) + self.add_to_config( + "wtracker_wlen", + description="max length of iteration window for xtracker (default 20)", + domain=int, + default=20, + ) - self.add_to_config('wtracker_stdevthresh', - description="Ignore moving std dev below this value (default None)", - domain=float, - default=None) + self.add_to_config( + "wtracker_reportlen", + description="max length of long reports for xtracker (default 100)", + domain=int, + default=100, + ) + self.add_to_config( + "wtracker_stdevthresh", + description="Ignore moving std dev below this value (default None)", + domain=float, + default=None, + ) def slammax_args(self): # we will not try to get the specification from the command line - self.add_to_config('slammax', - description="have a slammax spoke", - domain=bool, - default=False) - + self.add_to_config( + "slammax", description="have a slammax spoke", domain=bool, default=False + ) def slammin_args(self): # we will not try to get the specification from the command line - self.add_to_config('slammin', - description="have a slammin spoke", - domain=bool, - default=False) - + self.add_to_config( + "slammin", description="have a slammin spoke", domain=bool, default=False + ) def cross_scenario_cuts_args(self): # we will not try to get the specification from the command line - self.add_to_config('cross_scenario_cuts', - description="have a cross scenario cuts spoke", - domain=bool, - default=False) - - self.add_to_config("cross_scenario_iter_cnt", - description="cross scen check bound improve iterations " - "(default 4)", - domain=int, - default=4) - - self.add_to_config("eta_bounds_mipgap", - description="mipgap for determining eta bounds for cross " - "scenario cuts (default 0.01)", - domain=float, - default=0.01) - + self.add_to_config( + "cross_scenario_cuts", + description="have a cross scenario cuts spoke", + domain=bool, + default=False, + ) + + self.add_to_config( + "cross_scenario_iter_cnt", + description="cross scen check bound improve iterations " "(default 4)", + domain=int, + default=4, + ) + + self.add_to_config( + "eta_bounds_mipgap", + description="mipgap for determining eta bounds for cross " + "scenario cuts (default 0.01)", + domain=float, + default=0.01, + ) + # note: grad_rho_args was subsumed by gradient_args def gradient_args(self): - - self.add_to_config("xhatpath", - description="path to npy file with xhat", - domain=str, - default='') - self.add_to_config("grad_cost_file_out", - description="name of the gradient cost file for output (will be csv)", - domain=str, - default='') - self.add_to_config("grad_cost_file_in", - description="path to csv file with (grad based?) costs", - domain=str, - default='') - self.add_to_config("grad_rho_file_out", - description="name of the gradient rho output file (must be csv)", - domain=str, - default='') - self.add_to_config("rho_file_in", - description="name of the (gradient) rho input file (must be csv)", - domain=str, - default='') - self.add_to_config("grad_display_rho", - description="display rho during gradient calcs (default True)", - domain=bool, - default=True) + self.add_to_config( + "xhatpath", description="path to npy file with xhat", domain=str, default="" + ) + self.add_to_config( + "grad_cost_file_out", + description="name of the gradient cost file for output (will be csv)", + domain=str, + default="", + ) + self.add_to_config( + "grad_cost_file_in", + description="path to csv file with (grad based?) costs", + domain=str, + default="", + ) + self.add_to_config( + "grad_rho_file_out", + description="name of the gradient rho output file (must be csv)", + domain=str, + default="", + ) + self.add_to_config( + "rho_file_in", + description="name of the (gradient) rho input file (must be csv)", + domain=str, + default="", + ) + self.add_to_config( + "grad_display_rho", + description="display rho during gradient calcs (default True)", + domain=bool, + default=True, + ) # likely unused presently -# self.add_to_config("grad_pd_thresh", -# description="threshold for dual/primal during gradient calcs", -# domain=float, -# default=0.1) - - self.add_to_config('grad_rho_setter', - description="use rho setter from a rho file", - domain=bool, - default=False) + # self.add_to_config("grad_pd_thresh", + # description="threshold for dual/primal during gradient calcs", + # domain=float, + # default=0.1) + + self.add_to_config( + "grad_rho_setter", + description="use rho setter from a rho file", + domain=bool, + default=False, + ) """ all occurances of rho_path converted to grad_rho_file July 2024 self.add_to_config("rho_path", @@ -767,121 +908,163 @@ def gradient_args(self): domain=str, default='') """ - self.add_to_config("grad_order_stat", - description="order statistic for rho: must be between 0 (the min) and 1 (the max); 0.5 iis the average", - domain=float, - default=-1.0) - self.add_to_config("grad_rho_relative_bound", - description="factor that bounds rho/cost", - domain=float, - default=1e3) - - def dynamic_gradient_args(self): # AKA adaptive + self.add_to_config( + "grad_order_stat", + description="order statistic for rho: must be between 0 (the min) and 1 (the max); 0.5 iis the average", + domain=float, + default=-1.0, + ) + self.add_to_config( + "grad_rho_relative_bound", + description="factor that bounds rho/cost", + domain=float, + default=1e3, + ) + def dynamic_gradient_args(self): # AKA adaptive self.gradient_args() - self.add_to_config('grad_dynamic_primal_crit', - description="Use dynamic gradient-based primal criterion for update", - domain=bool, - default=False) - - self.add_to_config('grad_dynamic_dual_crit', - description="Use dynamic gradient-based dual criterion for update", - domain=bool, - default=False) - - self.add_to_config("grad_dynamic_primal_thresh", - description="primal threshold for diff during gradient calcs", - domain=float, - default=0.1) - - self.add_to_config("grad_dynamic_dual_thresh", - description="dual threshold for abs norm during gradient calcs", - domain=float, - default=0.1) + self.add_to_config( + "grad_dynamic_primal_crit", + description="Use dynamic gradient-based primal criterion for update", + domain=bool, + default=False, + ) + + self.add_to_config( + "grad_dynamic_dual_crit", + description="Use dynamic gradient-based dual criterion for update", + domain=bool, + default=False, + ) + + self.add_to_config( + "grad_dynamic_primal_thresh", + description="primal threshold for diff during gradient calcs", + domain=float, + default=0.1, + ) + + self.add_to_config( + "grad_dynamic_dual_thresh", + description="dual threshold for abs norm during gradient calcs", + domain=float, + default=0.1, + ) def converger_args(self): - self.add_to_config("use_norm_rho_converger", - description="Use the norm rho converger", - domain=bool, - default=False) - self.add_to_config("primal_dual_converger", - description="Use the primal dual converger", - domain=bool, - default=False) - self.add_to_config("primal_dual_converger_tol", - description="Tolerance for primal dual converger (default 1e-2)", - domain=float, - default=1e-2) + self.add_to_config( + "use_norm_rho_converger", + description="Use the norm rho converger", + domain=bool, + default=False, + ) + self.add_to_config( + "primal_dual_converger", + description="Use the primal dual converger", + domain=bool, + default=False, + ) + self.add_to_config( + "primal_dual_converger_tol", + description="Tolerance for primal dual converger (default 1e-2)", + domain=float, + default=1e-2, + ) def tracking_args(self): - self.add_to_config("tracking_folder", - description="Path of results folder (default results)", - domain=str, - default="results") - self.add_to_config("ph_track_progress", - description="Adds tracking extension to all" - " ph opt cylinders (default False). Use --track_*" - " to specificy what and how to track." - " See mpisppy.utils.cfg_vanilla.add_ph_tracking for details", - domain=bool, - default=False) - self.add_to_config("track_convergence", - description="Adds convergence tracking ie" - " gaps and bounds (default 0)", - domain=int, - default=0) - self.add_to_config("track_xbars", - description="Adds xbar tracking (default 0)", - domain=int, - default=0) - self.add_to_config("track_duals", - description="Adds w tracking (default 0)", - domain=int, - default=0) - self.add_to_config("track_nonants", - description="Adds nonant tracking (default 0)", - domain=int, - default=0) - self.add_to_config('track_scen_gaps', - description="Adds scenario gap tracking (default 0)", - domain=int, - default=0) - self.add_to_config("track_reduced_costs", - description="Adds reduced costs tracking (default 0)", - domain=int, - default=0) - + self.add_to_config( + "tracking_folder", + description="Path of results folder (default results)", + domain=str, + default="results", + ) + self.add_to_config( + "ph_track_progress", + description="Adds tracking extension to all" + " ph opt cylinders (default False). Use --track_*" + " to specificy what and how to track." + " See mpisppy.utils.cfg_vanilla.add_ph_tracking for details", + domain=bool, + default=False, + ) + self.add_to_config( + "track_convergence", + description="Adds convergence tracking ie" " gaps and bounds (default 0)", + domain=int, + default=0, + ) + self.add_to_config( + "track_xbars", + description="Adds xbar tracking (default 0)", + domain=int, + default=0, + ) + self.add_to_config( + "track_duals", + description="Adds w tracking (default 0)", + domain=int, + default=0, + ) + self.add_to_config( + "track_nonants", + description="Adds nonant tracking (default 0)", + domain=int, + default=0, + ) + self.add_to_config( + "track_scen_gaps", + description="Adds scenario gap tracking (default 0)", + domain=int, + default=0, + ) + self.add_to_config( + "track_reduced_costs", + description="Adds reduced costs tracking (default 0)", + domain=int, + default=0, + ) def wxbar_read_write_args(self): - self.add_to_config("init_W_fname", - description="Path of initial W file (default None)", - domain=str, - default=None) - self.add_to_config("init_Xbar_fname", - description="Path of initial Xbar file (default None)", - domain=str, - default=None) - self.add_to_config("init_separate_W_files", - description="If True, W is read from separate files (default False)", - domain=bool, - default=False) - self.add_to_config("W_fname", - description="Path of final W file (default None)", - domain=str, - default=None) - self.add_to_config("Xbar_fname", - description="Path of final Xbar file (default None)", - domain=str, - default=None) - self.add_to_config("separate_W_files", - description="If True, writes W to separate files (default False)", - domain=bool, - default=False) - - - #================ - def create_parser(self,progname=None): + self.add_to_config( + "init_W_fname", + description="Path of initial W file (default None)", + domain=str, + default=None, + ) + self.add_to_config( + "init_Xbar_fname", + description="Path of initial Xbar file (default None)", + domain=str, + default=None, + ) + self.add_to_config( + "init_separate_W_files", + description="If True, W is read from separate files (default False)", + domain=bool, + default=False, + ) + self.add_to_config( + "W_fname", + description="Path of final W file (default None)", + domain=str, + default=None, + ) + self.add_to_config( + "Xbar_fname", + description="Path of final Xbar file (default None)", + domain=str, + default=None, + ) + self.add_to_config( + "separate_W_files", + description="If True, writes W to separate files (default False)", + domain=bool, + default=False, + ) + + # ================ + def create_parser(self, progname=None): # seldom used if len(self) == 0: raise RuntimeError("create parser called before Config is populated") @@ -889,7 +1072,7 @@ def create_parser(self,progname=None): self.initialize_argparse(parser) return parser - #================ + # ================ def parse_command_line(self, progname=None): # often used, but the return value less so if len(self) == 0: @@ -899,13 +1082,14 @@ def parse_command_line(self, progname=None): args = self.import_argparse(args) return args -#================= + +# ================= if __name__ == "__main__": # a place for ad hoc testing by developers config = Config() - config.popular_args() # populates self + config.popular_args() # populates self config.display() - for i,j in config.items(): + for i, j in config.items(): print(i, j) print(dir(config)) print(config._all_slots) @@ -915,13 +1099,15 @@ def parse_command_line(self, progname=None): # most codes do not use create_parser; they use parse_command_line instead parser = config.create_parser("tester") parser.add_argument( - "num_scens", help="Number of scenarios", type=int, - ) + "num_scens", + help="Number of scenarios", + type=int, + ) - args=parser.parse_args(['3', '--max-iterations', '99', '--solver-name', 'cplex']) + args = parser.parse_args(["3", "--max-iterations", "99", "--solver-name", "cplex"]) args = config.import_argparse(args) config.display() - #parser.parse_args(['--help']) + # parser.parse_args(['--help']) diff --git a/mpisppy/utils/find_rho.py b/mpisppy/utils/find_rho.py index ec7226197..7270f9e1c 100644 --- a/mpisppy/utils/find_rho.py +++ b/mpisppy/utils/find_rho.py @@ -25,7 +25,6 @@ import mpisppy.phbase as phbase - # Could also pass, e.g., sys.stdout instead of a filename """mpisppy.log.setup_logger("mpisppy.utils.find_rho", "findrho.log", @@ -35,8 +34,8 @@ ############################################################################ -class Find_Rho(): - """ Interface to compute rhos from Ws for a given ph object and write them in a file +class Find_Rho: + """Interface to compute rhos from Ws for a given ph object and write them in a file DLW July 2024 ? Is it from W's or from costs? Args: @@ -54,24 +53,23 @@ def __init__(self, ph_object, cfg): self.cfg = cfg self.c = dict() - if cfg.get("grad_cost_file_in", ifmissing='') == '': + if cfg.get("grad_cost_file_in", ifmissing="") == "": raise RuntimeError("Find_Rho constructor called without grad_cost_file_in") else: - if (not os.path.exists(self.cfg.grad_cost_file_in)): - raise RuntimeError(f'Could not find file {self.cfg.grad_cost_file_in}') - with open(self.cfg.grad_cost_file_in, 'r') as f: + if not os.path.exists(self.cfg.grad_cost_file_in): + raise RuntimeError(f"Could not find file {self.cfg.grad_cost_file_in}") + with open(self.cfg.grad_cost_file_in, "r") as f: for line in f: - if (line.startswith('#')): + if line.startswith("#"): continue - line = line.split(',') + line = line.split(",") sname = line[0] - vname = ','.join(line[1:-1]) - cval = float(line[-1]) + vname = ",".join(line[1:-1]) + cval = float(line[-1]) self.c[(sname, vname)] = cval - def _w_denom(self, s, node): - """ Computes the denominator for w-based rho. This denominator is scenario dependant. + """Computes the denominator for w-based rho. This denominator is scenario dependant. Args: s (Pyomo Concrete Model): scenario @@ -81,11 +79,16 @@ def _w_denom(self, s, node): w_denom (numpy array): denominator """ - assert node.name == "ROOT", "gradient-based compute rho only works for two stage for now" + assert ( + node.name == "ROOT" + ), "gradient-based compute rho only works for two stage for now" nlen = s._mpisppy_data.nlens[node.name] - xbar_array = np.array([s._mpisppy_model.xbars[(node.name,j)]._value for j in range(nlen)]) - nonants_array = np.fromiter((v._value for v in node.nonant_vardata_list), - dtype='d', count=nlen) + xbar_array = np.array( + [s._mpisppy_model.xbars[(node.name, j)]._value for j in range(nlen)] + ) + nonants_array = np.fromiter( + (v._value for v in node.nonant_vardata_list), dtype="d", count=nlen + ) w_denom = np.abs(nonants_array - xbar_array) denom_max = np.max(w_denom) for i in range(len(w_denom)): @@ -93,9 +96,8 @@ def _w_denom(self, s, node): w_denom[i] = max(denom_max, self.ph_object.E1_tolerance) return w_denom - def _prox_denom(self, s, node): - """ Computes the denominator corresponding to the proximal term. This denominator is scenario dependant. + """Computes the denominator corresponding to the proximal term. This denominator is scenario dependant. Args: s (Pyomo Concrete Model): scenario @@ -107,13 +109,15 @@ def _prox_denom(self, s, node): """ assert node.name == "ROOT", "compute rho only works for two stage for now" nlen = s._mpisppy_data.nlens[node.name] - xbar_array = np.array([s._mpisppy_model.xbars[(node.name,j)]._value for j in range(nlen)]) - nonants_array = np.fromiter((v._value for v in node.nonant_vardata_list), - dtype='d', count=nlen) + xbar_array = np.array( + [s._mpisppy_model.xbars[(node.name, j)]._value for j in range(nlen)] + ) + nonants_array = np.fromiter( + (v._value for v in node.nonant_vardata_list), dtype="d", count=nlen + ) prox_denom = 2 * np.square(nonants_array - xbar_array) return prox_denom - def _grad_denom(self): """Computes the scenario independant denominator in the WW heuristic. @@ -127,30 +131,34 @@ def _grad_denom(self): for node in scenario._mpisppy_node_list: assert node.name == "ROOT", "compute rho only works for two stage for now" nlen0 = scenario._mpisppy_data.nlens["ROOT"] - g_denom = np.zeros(nlen0, dtype='d') - xbar_array = np.array([scenario._mpisppy_model.xbars[("ROOT",j)]._value for j in range(nlen0)]) + g_denom = np.zeros(nlen0, dtype="d") + xbar_array = np.array( + [scenario._mpisppy_model.xbars[("ROOT", j)]._value for j in range(nlen0)] + ) denom = 0 - for k,s in self.ph_object.local_scenarios.items(): + for k, s in self.ph_object.local_scenarios.items(): nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: ndn = node.name nlen = nlens[ndn] - nonants_array = np.fromiter((v._value for v in node.nonant_vardata_list), - dtype='d', count=nlen) + nonants_array = np.fromiter( + (v._value for v in node.nonant_vardata_list), dtype="d", count=nlen + ) probs = s._mpisppy_data.prob_coeff[ndn] * np.ones(nlen) # using 1 as a default value because the variable values are converged if the diff is 0, so ... denom += probs * np.maximum(np.abs(nonants_array - xbar_array), 1.0) print("{denom=}") - self.ph_object.comms["ROOT"].Allreduce([denom, MPI.DOUBLE], - [g_denom, MPI.DOUBLE], - op=MPI.SUM) + self.ph_object.comms["ROOT"].Allreduce( + [denom, MPI.DOUBLE], [g_denom, MPI.DOUBLE], op=MPI.SUM + ) if self.ph_object.cylinder_rank == 0: - g_denom = np.maximum(np.ones(len(g_denom))/self.cfg.grad_rho_relative_bound, g_denom) + g_denom = np.maximum( + np.ones(len(g_denom)) / self.cfg.grad_rho_relative_bound, g_denom + ) return g_denom - def compute_rho(self, indep_denom=False): - """ Computes rhos for each scenario and each variable using the WW heuristic + """Computes rhos for each scenario and each variable using the WW heuristic and first order condition. Returns: @@ -160,101 +168,142 @@ def compute_rho(self, indep_denom=False): vnames = OrderedSet(vname for (sname, vname) in self.c.keys()) k0, s0 = list(self.ph_object.local_scenarios.items())[0] # TBD: drop vname_to_idx and use the map already provided - vname_to_idx = {var.name : ndn_i[1] for ndn_i, var in s0._mpisppy_data.nonant_indices.items()} - cost = {k : np.array([self.c[k, vname] - for vname in vnames]) - for k in local_snames} + vname_to_idx = { + var.name: ndn_i[1] for ndn_i, var in s0._mpisppy_data.nonant_indices.items() + } + cost = { + k: np.array([self.c[k, vname] for vname in vnames]) for k in local_snames + } if indep_denom: grad_denom = self._grad_denom() loc_denom = {k: grad_denom for k in local_snames} else: # two-stage only for now - loc_denom = {k: self._w_denom(s, s._mpisppy_node_list[0]) - for k, s in self.ph_object.local_scenarios.items()} - prob_list = [s._mpisppy_data.prob_coeff["ROOT"] - for s in self.ph_object.local_scenarios.values()] + loc_denom = { + k: self._w_denom(s, s._mpisppy_node_list[0]) + for k, s in self.ph_object.local_scenarios.items() + } + prob_list = [ + s._mpisppy_data.prob_coeff["ROOT"] + for s in self.ph_object.local_scenarios.values() + ] w = dict() for k, scenario in self.ph_object.local_scenarios.items(): - w[k] = np.array([scenario._mpisppy_model.W[ndn_i]._value - for ndn_i in scenario._mpisppy_data.nonant_indices]) + w[k] = np.array( + [ + scenario._mpisppy_model.W[ndn_i]._value + for ndn_i in scenario._mpisppy_data.nonant_indices + ] + ) - rho = {k : np.abs(np.divide(cost[k] - w[k], loc_denom[k])) for k in local_snames} + rho = {k: np.abs(np.divide(cost[k] - w[k], loc_denom[k])) for k in local_snames} - local_rhos = {vname: [rho_list[idx] for _, rho_list in rho.items()] - for vname, idx in vname_to_idx.items()} + local_rhos = { + vname: [rho_list[idx] for _, rho_list in rho.items()] + for vname, idx in vname_to_idx.items() + } # Compute a scenario independant rho from a list of rhos using a triangular distribution. alpha = self.cfg.grad_order_stat - assert (alpha >= 0 and alpha <= 1), f"For grad_order_stat 0 is the min, 0.5 the average, 1 the max; {alpha=} is invalid." + assert ( + alpha >= 0 and alpha <= 1 + ), f"For grad_order_stat 0 is the min, 0.5 the average, 1 the max; {alpha=} is invalid." vcnt = len(local_rhos) # variable count - rho_mins = np.empty(vcnt, dtype='d') # global - rho_maxes = np.empty(vcnt, dtype='d') - rho_means = np.empty(vcnt, dtype='d') - - local_rho_mins = np.fromiter((min(rho_vals) for rho_vals in local_rhos.values()), dtype='d') - local_rho_maxes = np.fromiter((max(rho_vals) for rho_vals in local_rhos.values()), dtype='d') + rho_mins = np.empty(vcnt, dtype="d") # global + rho_maxes = np.empty(vcnt, dtype="d") + rho_means = np.empty(vcnt, dtype="d") + + local_rho_mins = np.fromiter( + (min(rho_vals) for rho_vals in local_rhos.values()), dtype="d" + ) + local_rho_maxes = np.fromiter( + (max(rho_vals) for rho_vals in local_rhos.values()), dtype="d" + ) local_prob = np.sum(prob_list) - local_wgted_means = np.fromiter((np.dot(rho_vals, prob_list) * local_prob for rho_vals in local_rhos.values()), dtype='d') - - self.ph_object.comms["ROOT"].Allreduce([local_rho_mins, MPI.DOUBLE], - [rho_mins, MPI.DOUBLE], - op=MPI.MIN) - self.ph_object.comms["ROOT"].Allreduce([local_rho_maxes, MPI.DOUBLE], - [rho_maxes, MPI.DOUBLE], - op=MPI.MAX) - - self.ph_object.comms["ROOT"].Allreduce([local_wgted_means, MPI.DOUBLE], - [rho_means, MPI.DOUBLE], - op=MPI.SUM) + local_wgted_means = np.fromiter( + ( + np.dot(rho_vals, prob_list) * local_prob + for rho_vals in local_rhos.values() + ), + dtype="d", + ) + + self.ph_object.comms["ROOT"].Allreduce( + [local_rho_mins, MPI.DOUBLE], [rho_mins, MPI.DOUBLE], op=MPI.MIN + ) + self.ph_object.comms["ROOT"].Allreduce( + [local_rho_maxes, MPI.DOUBLE], [rho_maxes, MPI.DOUBLE], op=MPI.MAX + ) + + self.ph_object.comms["ROOT"].Allreduce( + [local_wgted_means, MPI.DOUBLE], [rho_means, MPI.DOUBLE], op=MPI.SUM + ) if alpha == 0.5: - rhos = {vname: float(rho_mean) for vname, rho_mean in zip(local_rhos.keys(), rho_means)} + rhos = { + vname: float(rho_mean) + for vname, rho_mean in zip(local_rhos.keys(), rho_means) + } elif alpha == 0.0: - rhos = {vname: float(rho_min) for vname, rho_min in zip(local_rhos.keys(), rho_mins)} + rhos = { + vname: float(rho_min) + for vname, rho_min in zip(local_rhos.keys(), rho_mins) + } elif alpha == 1.0: - rhos = {vname: float(rho_max) for vname, rho_max in zip(local_rhos.keys(), rho_maxes)} + rhos = { + vname: float(rho_max) + for vname, rho_max in zip(local_rhos.keys(), rho_maxes) + } elif alpha < 0.5: - rhos = {vname: float(rho_min + alpha * 2 * (rho_mean - rho_min))\ - for vname, rho_min, rho_mean in zip(local_rhos.keys(), rho_mins, rho_means)} + rhos = { + vname: float(rho_min + alpha * 2 * (rho_mean - rho_min)) + for vname, rho_min, rho_mean in zip( + local_rhos.keys(), rho_mins, rho_means + ) + } elif alpha > 0.5: - rhos = {vname: float(2 * rho_mean - rho_max) + alpha * 2 * (rho_max - rho_mean)\ - for vname, rho_mean, rho_max in zip(local_rhos.keys(), rho_means, rho_maxes)} + rhos = { + vname: float(2 * rho_mean - rho_max) + alpha * 2 * (rho_max - rho_mean) + for vname, rho_mean, rho_max in zip( + local_rhos.keys(), rho_means, rho_maxes + ) + } else: raise RuntimeError("Coding error.") return rhos - def write_grad_rho(self): - """ Write the computed rhos in the file --grad-rho-file-out. - Note: this file was originally opened for append access + """Write the computed rhos in the file --grad-rho-file-out. + Note: this file was originally opened for append access """ - if self.cfg.grad_rho_file_out == '': + if self.cfg.grad_rho_file_out == "": raise RuntimeError("write_grad_rho called without grad_rho_file_out") else: rho_data = self.compute_rho() if self.ph_object.cylinder_rank == 0: - with open(self.cfg.grad_rho_file_out, 'w') as file: + with open(self.cfg.grad_rho_file_out, "w") as file: writer = csv.writer(file) - writer.writerow(['#Rho values']) - for (vname, rho) in rho_data.items(): + writer.writerow(["#Rho values"]) + for vname, rho in rho_data.items(): writer.writerow([vname, rho]) - comm = self.ph_object.comms['ROOT'] - comm.Barrier() + comm = self.ph_object.comms["ROOT"] + comm.Barrier() -class Set_Rho(): - """ Interface to set the computed rhos in PH. +class Set_Rho: + """Interface to set the computed rhos in PH. Args: cfg (Config): config object """ + def __init__(self, cfg): self.cfg = cfg def rho_setter(self, scenario): - """ rho setter to be used in the PH algorithm + """rho setter to be used in the PH algorithm Args: scenario (Pyomo Concrete Model): scenario @@ -265,13 +314,15 @@ def rho_setter(self, scenario): """ assert self.cfg is not None, "you have to give the rho_setter a cfg" - assert self.cfg.rho_file_in != '', "use --rho-file-in to give the path of your rhos file" + assert ( + self.cfg.rho_file_in != "" + ), "use --rho-file-in to give the path of your rhos file" rhofile = self.cfg.rho_file_in rho_list = list() with open(rhofile) as infile: reader = csv.reader(infile) for row in reader: - if (row[0].startswith('#')): + if row[0].startswith("#"): continue else: fullname = row[0] @@ -279,15 +330,17 @@ def rho_setter(self, scenario): if vo is not None: rho_list.append((id(vo), float(row[1]))) else: - raise RuntimeError(f"rho values from {rhofile} found Var {fullname} " - f"that is not found in the scenario given (name={scenario._name})") + raise RuntimeError( + f"rho values from {rhofile} found Var {fullname} " + f"that is not found in the scenario given (name={scenario._name})" + ) return rho_list + # ====================================================================== - #====================================================================== def _parser_setup(): - """ Set up config object and return it, but don't parse + """Set up config object and return it, but don't parse Returns: cfg (Config): config object @@ -301,13 +354,13 @@ def _parser_setup(): cfg.num_scens_required() cfg.popular_args() cfg.two_sided_args() - cfg.ph_args() + cfg.ph_args() return cfg def get_rho_from_W(mname, original_cfg): - """ Creates a ph object from cfg and using the module functions. Then computes rhos from the Ws. + """Creates a ph object from cfg and using the module functions. Then computes rhos from the Ws. Args: mname (str): module name @@ -316,42 +369,48 @@ def get_rho_from_W(mname, original_cfg): to the call to Find_Rho.rhos() (and perhaps other things) """ - assert original_cfg.grad_rho_file != '' + assert original_cfg.grad_rho_file != "" try: model_module = importlib.import_module(mname) except Exception: raise RuntimeError(f"Could not import module: {mname}") cfg = copy.deepcopy(original_cfg) - cfg.max_iterations = 0 #we only need x0 here + cfg.max_iterations = 0 # we only need x0 here - #create ph_object via vanilla + # create ph_object via vanilla scenario_creator = model_module.scenario_creator scenario_denouement = model_module.scenario_denouement - scen_names_creator_args = inspect.getargspec(model_module.scenario_names_creator).args #partition requires to do that - if scen_names_creator_args[0] == 'cfg': + scen_names_creator_args = inspect.getargspec( + model_module.scenario_names_creator + ).args # partition requires to do that + if scen_names_creator_args[0] == "cfg": all_scenario_names = model_module.scenario_names_creator(cfg) - else : + else: all_scenario_names = model_module.scenario_names_creator(cfg.num_scens) scenario_creator_kwargs = model_module.kw_creator(cfg) variable_probability = None - if hasattr(model_module, '_variable_probability'): + if hasattr(model_module, "_variable_probability"): variable_probability = model_module._variable_probability beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=WXBarWriter, - variable_probability=variable_probability) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=WXBarWriter, + variable_probability=variable_probability, + ) list_of_spoke_dict = list() wheel = WheelSpinner(hub_dict, list_of_spoke_dict) - wheel.spin() #TODO: steal only what's needed in WheelSpinner + wheel.spin() # TODO: steal only what's needed in WheelSpinner if wheel.strata_rank == 0: # don't do this for bound ranks ph_object = wheel.spcomm.opt - #============================================================================== + # ============================================================================== # Compute rhos Find_Rho(ph_object, cfg).rhos() if __name__ == "__main__": - print("call find_rho.get_rho_from_W(modulename, cfg) and use --grad_cost_file_in --rho-file to compute and write rhos") + print( + "call find_rho.get_rho_from_W(modulename, cfg) and use --grad_cost_file_in --rho-file to compute and write rhos" + ) diff --git a/mpisppy/utils/gradient.py b/mpisppy/utils/gradient.py index d86b6d6c5..dfec9cbe9 100644 --- a/mpisppy/utils/gradient.py +++ b/mpisppy/utils/gradient.py @@ -30,10 +30,11 @@ level=logging.CRITICAL) logger = logging.getLogger("mpisppy.utils.find_grad")""" + ############################################################################ -class Find_Grad(): +class Find_Grad: """Interface to compute and write gradient cost - + Args: ph_object (PHBase): ph object cfg (Config): config object @@ -43,22 +44,20 @@ class Find_Grad(): """ - def __init__(self, - ph_object, - cfg): + def __init__(self, ph_object, cfg): self.ph_object = ph_object self.cfg = cfg self.c = dict() - #====================================================================== + # ====================================================================== def compute_grad(self, sname, scenario): - """ Computes gradient cost for a given scenario - + """Computes gradient cost for a given scenario + Args: sname (str): the scenario name scenario (Pyomo Concrete Model): scenario - + Returns: grad_dict (dict): a dictionnary {nonant indice: -gradient} @@ -72,9 +71,9 @@ def compute_grad(self, sname, scenario): if var_data.domain is pyo.Binary: all_discrete_vars.append((var_data, pyo.Binary)) elif var_data.domain is pyo.Integers: - all_discrete_vars.append((var_data, pyo.Integers)) - - relax_int = pyo.TransformationFactory('core.relax_integer_vars') + all_discrete_vars.append((var_data, pyo.Integers)) + + relax_int = pyo.TransformationFactory("core.relax_integer_vars") relax_int.apply_to(scenario) nlp = PyomoNLP(scenario) @@ -83,23 +82,26 @@ def compute_grad(self, sname, scenario): except Exception: raise RuntimeError("Cannot compute the gradient") grad = nlp.evaluate_grad_objective() - grad_dict = {ndn_i: -grad[ndn_i[1]] - for ndn_i, var in scenario._mpisppy_data.nonant_indices.items()} + grad_dict = { + ndn_i: -grad[ndn_i[1]] + for ndn_i, var in scenario._mpisppy_data.nonant_indices.items() + } - for (var_data, var_domain) in all_discrete_vars: + for var_data, var_domain in all_discrete_vars: var_data.domain = var_domain return grad_dict - def find_grad_cost(self): - """ Computes gradient cost for all scenarios. - + """Computes gradient cost for all scenarios. + ASSUMES: The cfg object should contain an xhat path corresponding to the xhat file. """ - assert self.cfg.xhatpath != '', "to compute gradient cost, you have to give an xhat path using --xhatpath" + assert ( + self.cfg.xhatpath != "" + ), "to compute gradient cost, you have to give an xhat path using --xhatpath" self.ph_object.disable_W_and_prox() xhatfile = self.cfg.xhatpath @@ -107,54 +109,55 @@ def find_grad_cost(self): self.ph_object._save_nonants() self.ph_object._fix_nonants(xhat) self.ph_object.solve_loop() - for (sname, scenario) in self.ph_object.local_scenarios.items(): + for sname, scenario in self.ph_object.local_scenarios.items(): for node in scenario._mpisppy_node_list: for v in node.nonant_vardata_list: v.unfix() - grad_dict = {sname: self.compute_grad(sname, scenario) - for sname, scenario in self.ph_object.local_scenarios.items()} - local_costs = {(sname, var.name): grad_dict[sname][node.name, ix] - for (sname, scenario) in self.ph_object.local_scenarios.items() - for node in scenario._mpisppy_node_list - for (ix, var) in enumerate(node.nonant_vardata_list)} - comm = self.ph_object.comms['ROOT'] + grad_dict = { + sname: self.compute_grad(sname, scenario) + for sname, scenario in self.ph_object.local_scenarios.items() + } + local_costs = { + (sname, var.name): grad_dict[sname][node.name, ix] + for (sname, scenario) in self.ph_object.local_scenarios.items() + for node in scenario._mpisppy_node_list + for (ix, var) in enumerate(node.nonant_vardata_list) + } + comm = self.ph_object.comms["ROOT"] costs = comm.gather(local_costs, root=0) - if (self.ph_object.cylinder_rank == 0): - self.c = {key: val - for cost in costs - for key, val in cost.items()} + if self.ph_object.cylinder_rank == 0: + self.c = {key: val for cost in costs for key, val in cost.items()} comm.Barrier() self.ph_object._restore_nonants() self.ph_object.reenable_W_and_prox() - def write_grad_cost(self): - """ Writes gradient cost for all scenarios. + """Writes gradient cost for all scenarios. - ASSUMES: + ASSUMES: The cfg object should contain an xhat path corresponding to the xhat file. """ self.find_grad_cost() - if (self.ph_object.cylinder_rank == 0): - with open(self.cfg.grad_cost_file_out, 'w') as f: + if self.ph_object.cylinder_rank == 0: + with open(self.cfg.grad_cost_file_out, "w") as f: writer = csv.writer(f) - writer.writerow(['#grad cost values']) - for (key, val) in self.c.items(): + writer.writerow(["#grad cost values"]) + for key, val in self.c.items(): sname, vname = key[0], key[1] - row = ','.join([sname, vname, str(val)]) + '\n' + row = ",".join([sname, vname, str(val)]) + "\n" f.write(row) # all ranks rely on the existence of this file. # barrier perhaps should be imposed at the caller level. - comm = self.ph_object.comms['ROOT'] + comm = self.ph_object.comms["ROOT"] comm.Barrier() -#==================================================================================== + # ==================================================================================== -# TBD from JPW- why are these two here at all? SHOULD NOT BE IN THE "GRADIENT" FILE -# (partial answer, the ph_object is needed; but maybe the code could be untangled) + # TBD from JPW- why are these two here at all? SHOULD NOT BE IN THE "GRADIENT" FILE + # (partial answer, the ph_object is needed; but maybe the code could be untangled) def find_grad_rho(self): """computes rho based on cost file for all variables (does not write ????). @@ -166,32 +169,33 @@ def find_grad_rho(self): return find_rho.Find_Rho(self.ph_object, self.cfg).compute_rho() def write_grad_rho(self): - """Writes gradient rho for all variables. (Does not compute it) + """Writes gradient rho for all variables. (Does not compute it) ASSUMES: The cfg object should contain a grad_cost_file_in. """ - if self.cfg.grad_rho_file_out == '': - raise RuntimeError("write_grad_rho without grad_rho_file_out") - else: - rho_data = self.find_grad_rho() - if self.ph_object.cylinder_rank == 0: - with open(self.cfg.grad_rho_file_out, 'w', newline='') as file: - writer = csv.writer(file) - writer.writerow(['#grad rho values']) - for (vname, rho) in rho_data.items(): - writer.writerow([vname, rho_data[vname]]) - - # barrier added to avoid race-conditions involving other ranks reading. - comm = self.ph_object.comms['ROOT'] - comm.Barrier() + if self.cfg.grad_rho_file_out == "": + raise RuntimeError("write_grad_rho without grad_rho_file_out") + else: + rho_data = self.find_grad_rho() + if self.ph_object.cylinder_rank == 0: + with open(self.cfg.grad_rho_file_out, "w", newline="") as file: + writer = csv.writer(file) + writer.writerow(["#grad rho values"]) + for vname, rho in rho_data.items(): + writer.writerow([vname, rho_data[vname]]) + + # barrier added to avoid race-conditions involving other ranks reading. + comm = self.ph_object.comms["ROOT"] + comm.Barrier() + ################################################################################### def _parser_setup(): - """ Set up config object and return it, but don't parse + """Set up config object and return it, but don't parse Returns: cfg (Config): config object @@ -207,54 +211,62 @@ def _parser_setup(): cfg.popular_args() cfg.two_sided_args() cfg.ph_args() - + cfg.gradient_args() return cfg def grad_cost_and_rho(mname, original_cfg): - """ Creates a ph object from cfg and using the module 'mname' functions. Then computes the corresponding grad cost and rho. + """Creates a ph object from cfg and using the module 'mname' functions. Then computes the corresponding grad cost and rho. Args: mname (str): module name original_cfg (Config object): config object """ - if (original_cfg.grad_rho_file_out == '') and (original_cfg.grad_cost_file_out == ''): - raise RuntimeError ("Presently, grad-rho-file-out and grad-cost-file cannot both be empty") + if (original_cfg.grad_rho_file_out == "") and ( + original_cfg.grad_cost_file_out == "" + ): + raise RuntimeError( + "Presently, grad-rho-file-out and grad-cost-file cannot both be empty" + ) try: model_module = importlib.import_module(mname) except Exception: raise RuntimeError(f"Could not import module: {mname}") cfg = copy.deepcopy(original_cfg) - cfg.max_iterations = 0 #we only need x0 here + cfg.max_iterations = 0 # we only need x0 here - #create ph_object via vanilla + # create ph_object via vanilla scenario_creator = model_module.scenario_creator scenario_denouement = model_module.scenario_denouement - scen_names_creator_args = inspect.getfullargspec(model_module.scenario_names_creator).args #partition requires to do that - if scen_names_creator_args[0] == 'cfg': + scen_names_creator_args = inspect.getfullargspec( + model_module.scenario_names_creator + ).args # partition requires to do that + if scen_names_creator_args[0] == "cfg": all_scenario_names = model_module.scenario_names_creator(cfg) - else : + else: all_scenario_names = model_module.scenario_names_creator(cfg.num_scens) scenario_creator_kwargs = model_module.kw_creator(cfg) variable_probability = None - if hasattr(model_module, '_variable_probability'): + if hasattr(model_module, "_variable_probability"): variable_probability = model_module._variable_probability beans = (cfg, scenario_creator, scenario_denouement, all_scenario_names) - hub_dict = vanilla.ph_hub(*beans, - scenario_creator_kwargs=scenario_creator_kwargs, - ph_extensions=WXBarWriter, - variable_probability=variable_probability) + hub_dict = vanilla.ph_hub( + *beans, + scenario_creator_kwargs=scenario_creator_kwargs, + ph_extensions=WXBarWriter, + variable_probability=variable_probability, + ) list_of_spoke_dict = list() wheel = WheelSpinner(hub_dict, list_of_spoke_dict) - wheel.spin() #TODO: steal only what's needed in WheelSpinner + wheel.spin() # TODO: steal only what's needed in WheelSpinner if wheel.strata_rank == 0: # don't do this for bound ranks ph_object = wheel.spcomm.opt - - #============================================================================== + + # ============================================================================== # Compute grad cost and rhos Find_Grad(ph_object, cfg).write_grad_cost() Find_Grad(ph_object, cfg).write_grad_rho() @@ -263,5 +275,3 @@ def grad_cost_and_rho(mname, original_cfg): if __name__ == "__main__": #### not valid as of July 2024: print("call gradient.grad_cost_and_rho(modulename, cfg) and use --xhatpath --grad-cost-file --grad-rho-file to compute and write gradient cost and rho") print("no main") - - diff --git a/mpisppy/utils/listener_util/demo_listener_util.py b/mpisppy/utils/listener_util/demo_listener_util.py index c44ccd18f..6a2a66f70 100644 --- a/mpisppy/utils/listener_util/demo_listener_util.py +++ b/mpisppy/utils/listener_util/demo_listener_util.py @@ -26,9 +26,11 @@ rank = fullcomm.Get_rank() n_proc = fullcomm.Get_size() -logging.basicConfig(level=logging.DEBUG, - format='(%(threadName)-10s) %(message)s', - ) +logging.basicConfig( + level=logging.DEBUG, + format="(%(threadName)-10s) %(message)s", +) + def worker_bee(synchronizer, buzz=1e6, sting=100.0): """ @@ -45,14 +47,14 @@ def worker_bee(synchronizer, buzz=1e6, sting=100.0): but in this example we print). Note: To make this fun, we are randomly distribute the load. - For even more fun, we occasionally try to + For even more fun, we occasionally try to reduce the listner sleep time but the listener might not see it since it might sleep through the whole thing. This is subtle. - + We are going to track the time that each rank last reported just to show - one way to do it. To spell out the way we will do it: - allocate n_proc of the vector to seconds_since_start and each rank - will put its seconds_since_start into its spot; hence, the sum will be + one way to do it. To spell out the way we will do it: + allocate n_proc of the vector to seconds_since_start and each rank + will put its seconds_since_start into its spot; hence, the sum will be the report (since the others will have contributed zero). The number of times a rank has contributed to the sum could be tracked in an analogous way. @@ -61,79 +63,95 @@ def worker_bee(synchronizer, buzz=1e6, sting=100.0): local_sum_sofar = 0 local_iters_sofar = 0 global_iters_sofar = 0 - old_sleep = np.zeros(1, dtype='d') + old_sleep = np.zeros(1, dtype="d") old_sleep[0] = synchronizer.sleep_secs[0] # We will send/receive sum (1), iters (1) and secs (n_proc) # and we are going to concatenat all "vectors" into one. - local_concat = {"FirstReduce": {"ROOT": np.zeros(2 + n_proc, dtype='d')}} - global_concat = {"FirstReduce": {"ROOT": \ - np.zeros(len(local_concat["FirstReduce"]["ROOT"]), \ - dtype='d')}} + local_concat = {"FirstReduce": {"ROOT": np.zeros(2 + n_proc, dtype="d")}} + global_concat = { + "FirstReduce": { + "ROOT": np.zeros(len(local_concat["FirstReduce"]["ROOT"]), dtype="d") + } + } # In this trivial example, we are going enable the side gig and # nothing will disable it. Normally, the listener side gig would # be expected to disable it. # Gratuitous call to enable the side_gig - synchronizer.compute_global_data(local_concat, - global_concat, - rednames=["FirstReduce"], - enable_side_gig = True) + synchronizer.compute_global_data( + local_concat, global_concat, rednames=["FirstReduce"], enable_side_gig=True + ) while global_iters_sofar < buzz: # attempt at less sleep, maybe (but don't do it twice in a row) - if np.random.uniform() > 0.1 and old_sleep[0] \ - == synchronizer.sleep_secs[0]: + if np.random.uniform() > 0.1 and old_sleep[0] == synchronizer.sleep_secs[0]: old_sleep[0] = synchronizer.sleep_secs[0] synchronizer.sleep_secs[0] /= 10 - logging.debug ("TRYING to reduce sleep to {} from rank={}".\ - format(synchronizer.sleep_secs[0], rank)) + logging.debug( + "TRYING to reduce sleep to {} from rank={}".format( + synchronizer.sleep_secs[0], rank + ) + ) elif old_sleep[0] != synchronizer.sleep_secs[0]: synchronizer.sleep_secs[0] = old_sleep[0] - logging.debug ("putting sleep back to {} from rank={}".\ - format(synchronizer.sleep_secs[0], rank)) + logging.debug( + "putting sleep back to {} from rank={}".format( + synchronizer.sleep_secs[0], rank + ) + ) localiterstodo = int(np.random.uniform() * buzz / n_proc) if rank == 0: - logging.debug("**rank 0: iterstodo="+str(localiterstodo)) + logging.debug("**rank 0: iterstodo=" + str(localiterstodo)) for i in range(localiterstodo): local_sum_sofar += np.random.normal(0, sting) - + local_iters_sofar += localiterstodo if rank == 0: - logging.debug("rank 0: iterstodo {} iters sofar {} sum_so_far {}="\ - .format(localiterstodo, local_iters_sofar, local_sum_sofar)) + logging.debug( + "rank 0: iterstodo {} iters sofar {} sum_so_far {}=".format( + localiterstodo, local_iters_sofar, local_sum_sofar + ) + ) local_concat["FirstReduce"]["ROOT"][0] = local_iters_sofar local_concat["FirstReduce"]["ROOT"][1] = local_sum_sofar - local_concat["FirstReduce"]["ROOT"][2+rank] \ - = (dt.datetime.now() - startdt).total_seconds() + local_concat["FirstReduce"]["ROOT"][2 + rank] = ( + dt.datetime.now() - startdt + ).total_seconds() # Only do "FirstReduce". - synchronizer.compute_global_data(local_concat, - global_concat, - rednames=["FirstReduce"]) + synchronizer.compute_global_data( + local_concat, global_concat, rednames=["FirstReduce"] + ) global_iters_sofar = global_concat["FirstReduce"]["ROOT"][0] global_sum = global_concat["FirstReduce"]["ROOT"][1] if rank == 0: - logging.debug(" rank 0: global_iters {} global_sum_so_far {}"\ - .format(global_iters_sofar, global_sum)) + logging.debug( + " rank 0: global_iters {} global_sum_so_far {}".format( + global_iters_sofar, global_sum + ) + ) # tell the listener threads to shut down synchronizer.quitting = 1 if rank == 0: - print ("Rank 0 termination") - print ("Based on {} iterations, the average was {}".\ - format(global_iters_sofar, global_sum / global_iters_sofar)) - print ("In case you are curious:\n rank \t last report *in* (sec)") + print("Rank 0 termination") + print( + "Based on {} iterations, the average was {}".format( + global_iters_sofar, global_sum / global_iters_sofar + ) + ) + print("In case you are curious:\n rank \t last report *in* (sec)") for r in range(n_proc): - print (r, "\t", global_concat["FirstReduce"]["ROOT"][2+r]) - + print(r, "\t", global_concat["FirstReduce"]["ROOT"][2 + r]) -#================================= -def side_gig(synchro, msg = None): - """ Demonstrate a listener side gig. This will be called by the listener. + +# ================================= +def side_gig(synchro, msg=None): + """Demonstrate a listener side gig. This will be called by the listener. This is a small, silly function. Usually, the side-gig will check to see if it should do something or just pass. dlw babble: we can *look* at synchronizer.global_data for @@ -142,11 +160,11 @@ def side_gig(synchro, msg = None): This is either "hackish" or "c-like" depending on your point of view. Args: snchro (object): the Synchronizer object where the listener lives - msg (str): just to show keyword arg; normally, this would be + msg (str): just to show keyword arg; normally, this would be something (e.g., an object) that is modified """ if synchro._rank == 0: - print (" (rank 0) ^*^* side_gig msg=", str(msg)) + print(" (rank 0) ^*^* side_gig msg=", str(msg)) logging.debug("enter side gig on rank %d" % rank) # Just to demonstrate how to do it, we will just return if nothing @@ -158,14 +176,16 @@ def side_gig(synchro, msg = None): # but in most applications, this function will be in an object. prevredname = "FirstReduce" allthesame = True - if len(side_gig.prev_red_prev_concat) == 0: # first time + if len(side_gig.prev_red_prev_concat) == 0: # first time for cname, clen in synchro.Lens[prevredname].items(): - side_gig.prev_red_prev_concat[cname] = np.zeros(clen, dtype='d') - allthesame = False # in case they are actually all zero + side_gig.prev_red_prev_concat[cname] = np.zeros(clen, dtype="d") + allthesame = False # in case they are actually all zero for cname in synchro.Lens[prevredname]: - if not np.array_equal(side_gig.prev_red_prev_concat[cname], - synchro.global_data[prevredname][cname]): + if not np.array_equal( + side_gig.prev_red_prev_concat[cname], + synchro.global_data[prevredname][cname], + ): allthesame = False break if allthesame: @@ -180,87 +200,97 @@ def side_gig(synchro, msg = None): # this particular side_gig is very silly lastsideresult = synchro.global_data["SecondReduce"]["ROOT"] - logging.debug("In case you are curious, before this listener call to"\ - +"side_gig, the value of the secondreduce was {} on rank {}"\ - .format(lastsideresult, rank)) - + logging.debug( + "In case you are curious, before this listener call to" + + "side_gig, the value of the secondreduce was {} on rank {}".format( + lastsideresult, rank + ) + ) + # dst, src - np.copyto(side_gig.prev_red_prev_concat[cname], - synchro.global_data[prevredname][cname]) - + np.copyto( + side_gig.prev_red_prev_concat[cname], synchro.global_data[prevredname][cname] + ) + # For the second reduce, we are going to sum the ranks, which is silly. - + # Normally, the concats would be created once in an __init__, btw. - local_concat = {"SecondReduce": {"ROOT": np.zeros(1, dtype='d')}} + local_concat = {"SecondReduce": {"ROOT": np.zeros(1, dtype="d")}} local_concat["SecondReduce"]["ROOT"][0] = rank # We can (and should) do a dangerous put in the side_gig because # the listener will have the lock. If the worker gets to its compute_global # then it will have to wait for the lock. synchro._unsafe_put_local_data("SecondReduce", local_concat) - # global data for the second reduce will be updated when we return to the # listener and # it will be available for the next get (we don't need a compute). # But it is not available now because the reduction has not been done yet. -side_gig.prev_red_prev_concat = {}#[cname]=synchro.global_data[prevredname][cname] - + +side_gig.prev_red_prev_concat = {} # [cname]=synchro.global_data[prevredname][cname] + ####### Main ####### -usemsg = "usage: python demo_listener_util.py iters sleep seed; e.g.,\n" + \ - "python demo_listener_util.py 1e5 0.5 1134" +usemsg = ( + "usage: python demo_listener_util.py iters sleep seed; e.g.,\n" + + "python demo_listener_util.py 1e5 0.5 1134" +) if len(sys.argv) != 4: - raise RuntimeError(usemsg) + raise RuntimeError(usemsg) try: iters = int(sys.argv[1]) except Exception: - raise RuntimeError(sys.argv[1]+" is not a valid iters\n"+usemsg) + raise RuntimeError(sys.argv[1] + " is not a valid iters\n" + usemsg) try: sleep = float(sys.argv[2]) except Exception: - raise RuntimeError(sys.argv[2]+" is not a valid sleep\n"+usemsg) + raise RuntimeError(sys.argv[2] + " is not a valid sleep\n" + usemsg) try: seed = int(sys.argv[3]) except Exception: - raise RuntimeError(sys.argv[3]+" is not a valid seed\n"+usemsg) + raise RuntimeError(sys.argv[3] + " is not a valid seed\n" + usemsg) -sting = 1 # standard devation +sting = 1 # standard devation np.random.seed(seed) # Note: at this point the first reduce is the only reduce -Lens = collections.OrderedDict({"FirstReduce": {"ROOT": 2+n_proc}}) +Lens = collections.OrderedDict({"FirstReduce": {"ROOT": 2 + n_proc}}) if rank == 0: logging.debug("iters %d, sleep %f, seed %d" % (iters, sleep, seed)) # "ROOT" is required to be the name of the global comm -synchronizer = listener_util.Synchronizer(comms = {"ROOT": fullcomm}, - Lens = Lens, - work_fct = worker_bee, - rank = rank, - sleep_secs = sleep, - asynch = True) +synchronizer = listener_util.Synchronizer( + comms={"ROOT": fullcomm}, + Lens=Lens, + work_fct=worker_bee, + rank=rank, + sleep_secs=sleep, + asynch=True, +) args = [synchronizer] kwargs = {"buzz": iters, "sting": sting} synchronizer.run(args, kwargs) ### now demo the use of a listener side gig between two reductions ### if rank == 0: - print ("testing side gig") + print("testing side gig") logging.debug("testing side gig on rank %d" % rank) kwargs = {"msg": "Oh wow, the side gig is running"} -Lens = collections.OrderedDict({"FirstReduce": {"ROOT": 2+n_proc}, - "SecondReduce": {"ROOT": 1}}) -listener_gigs = {"FirstReduce": (side_gig, kwargs), - "SecondReduce": None} - -synchronizer = listener_util.Synchronizer(comms = {"ROOT": fullcomm}, - Lens = Lens, - work_fct = worker_bee, - rank = rank, - sleep_secs = sleep, - asynch = True, - listener_gigs = listener_gigs) +Lens = collections.OrderedDict( + {"FirstReduce": {"ROOT": 2 + n_proc}, "SecondReduce": {"ROOT": 1}} +) +listener_gigs = {"FirstReduce": (side_gig, kwargs), "SecondReduce": None} + +synchronizer = listener_util.Synchronizer( + comms={"ROOT": fullcomm}, + Lens=Lens, + work_fct=worker_bee, + rank=rank, + sleep_secs=sleep, + asynch=True, + listener_gigs=listener_gigs, +) args = [synchronizer] kwargs = {"buzz": iters, "sting": sting} synchronizer.run(args, kwargs) diff --git a/mpisppy/utils/listener_util/doc/conf.py b/mpisppy/utils/listener_util/doc/conf.py index 2da411b51..e0de4affc 100644 --- a/mpisppy/utils/listener_util/doc/conf.py +++ b/mpisppy/utils/listener_util/doc/conf.py @@ -28,7 +28,7 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # -sys.path.insert(0, os.path.abspath('.')) +sys.path.insert(0, os.path.abspath(".")) # -- General configuration ------------------------------------------------ @@ -40,45 +40,47 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.autodoc', - 'sphinx.ext.coverage', - 'sphinx.ext.mathjax', - 'sphinx.ext.viewcode', - 'sphinx.ext.napoleon', - 'sphinx.ext.ifconfig', - 'sphinx.ext.inheritance_diagram', - 'sphinx.ext.autosummary', - 'sphinx.ext.doctest'] - #'sphinx.ext.githubpages'] +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.mathjax", + "sphinx.ext.viewcode", + "sphinx.ext.napoleon", + "sphinx.ext.ifconfig", + "sphinx.ext.inheritance_diagram", + "sphinx.ext.autosummary", + "sphinx.ext.doctest", +] +#'sphinx.ext.githubpages'] viewcode_import = True -#napoleon_include_private_with_doc = True +# napoleon_include_private_with_doc = True # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] -source_suffix = '.rst' +source_suffix = ".rst" # The root toctree document. -root_doc = 'index' +root_doc = "index" # General information about the project. -project = u'PySP' -copyright = u'2019, Sandia National Laboratories' -author = u'David L. Woodruff' +project = "PySP" +copyright = "2019, Sandia National Laboratories" +author = "David L. Woodruff" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = '0.1' +version = "0.1" # The full version, including alpha/beta/rc tags. -release = '0.1' +release = "0.1" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -90,10 +92,10 @@ # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = "sphinx" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -105,27 +107,30 @@ # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # -#html_theme = 'alabaster' -on_rtd = os.environ.get('READTHEDOCS', None) == 'True' +# html_theme = 'alabaster' +on_rtd = os.environ.get("READTHEDOCS", None) == "True" if not on_rtd: # only import and set the theme if we're building docs locally import sphinx_rtd_theme - html_theme = 'sphinx_rtd_theme' + + html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] + # Override default css to get a larger width for local build def setup(app): - app.add_stylesheet('theme_overrides.css') + app.add_stylesheet("theme_overrides.css") + html_context = { - 'css_files': [ - '_static/theme_overrides.css', + "css_files": [ + "_static/theme_overrides.css", ], } else: html_context = { - 'css_files': [ - 'https://media.readthedocs.org/css/sphinx_rtd_theme.css', - 'https://media.readthedocs.org/css/readthedocs-doc-embed.css', - '_static/theme_overrides.css', + "css_files": [ + "https://media.readthedocs.org/css/sphinx_rtd_theme.css", + "https://media.readthedocs.org/css/readthedocs-doc-embed.css", + "_static/theme_overrides.css", ], } @@ -138,15 +143,15 @@ def setup(app): # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -#html_static_path = ['_static'] +# html_static_path = ['_static'] -#html_favicon = "../logos/pyomo/favicon.ico" +# html_favicon = "../logos/pyomo/favicon.ico" # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'listener_util' +htmlhelp_basename = "listener_util" # -- Options for LaTeX output --------------------------------------------- @@ -155,15 +160,12 @@ def setup(app): # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -173,8 +175,13 @@ def setup(app): # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (root_doc, 'listener_util.tex', 'listener_util Documentation', - 'listener_util', 'manual'), + ( + root_doc, + "listener_util.tex", + "listener_util Documentation", + "listener_util", + "manual", + ), ] @@ -182,10 +189,7 @@ def setup(app): # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (root_doc, 'listener_util', 'listener_util Documentation', - [author], 1) -] +man_pages = [(root_doc, "listener_util", "listener_util Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -194,10 +198,16 @@ def setup(app): # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (root_doc, 'listener_util', 'listener_util Documentation', - author, 'listener_util', 'One line description of project.', - 'Miscellaneous'), + ( + root_doc, + "listener_util", + "listener_util Documentation", + author, + "listener_util", + "One line description of project.", + "Miscellaneous", + ), ] -#autodoc_member_order = 'bysource' -#autodoc_member_order = 'groupwise' +# autodoc_member_order = 'bysource' +# autodoc_member_order = 'groupwise' diff --git a/mpisppy/utils/listener_util/listener_util.py b/mpisppy/utils/listener_util/listener_util.py index 366c60aaf..b996c5126 100644 --- a/mpisppy/utils/listener_util/listener_util.py +++ b/mpisppy/utils/listener_util/listener_util.py @@ -16,6 +16,7 @@ To avoid errors from Pyomo use, solve_keyword_args["use_signal_handling"] = False """ + import numpy as np import collections import mpisppy.MPI as mpi @@ -30,8 +31,8 @@ class Synchronizer(object): Args: comms (dict of mpi comms): keys are strings: mpi comms There must be a key "ROOT" that is the global comm - Lens (sorted dict of dict of ints): - the keys are reduction step names (defines them), + Lens (sorted dict of dict of ints): + the keys are reduction step names (defines them), then comms' keys. Contains length of vectors to sum work_fct (function): the main worker function (args can be supplied when run is called.) @@ -55,19 +56,20 @@ class Synchronizer(object): As of Python 3.7 async is a reserved word. Using asynch instead. """ - def __init__(self, comms, Lens, work_fct, rank, - sleep_secs, asynch=False, listener_gigs = None): + def __init__( + self, comms, Lens, work_fct, rank, sleep_secs, asynch=False, listener_gigs=None + ): self.asynch = asynch self.comms = comms self.Lens = Lens self._rank = rank - self.sleep_secs = np.zeros(1, dtype='d') + self.sleep_secs = np.zeros(1, dtype="d") self.sleep_secs[0] = sleep_secs - self.sleep_touse = np.zeros(1, dtype='d') # min over ranks + self.sleep_touse = np.zeros(1, dtype="d") # min over ranks # The side gigs are very dangerous. self.listener_gigs = listener_gigs - self.enable_side_gig = False # TBD should be gigs & indexed by reduction - + self.enable_side_gig = False # TBD should be gigs & indexed by reduction + self.global_quitting = 0 self.quitting = 0 self.work_fct = work_fct @@ -79,26 +81,28 @@ def __init__(self, comms, Lens, work_fct, rank, self.local_data[redname] = {} self.global_data[redname] = {} for commname, ell in self.Lens[redname].items(): - assert(commname in self.comms) - self.local_data[redname][commname] = np.zeros(ell, dtype='d') - self.global_data[redname][commname] = np.zeros(ell, dtype='d') + assert commname in self.comms + self.local_data[redname][commname] = np.zeros(ell, dtype="d") + self.global_data[redname][commname] = np.zeros(ell, dtype="d") self.data_lock = threading.Lock() def run(self, args, kwargs): if self.asynch: # THE WORKER - wthread = threading.Thread(name=self.work_fct.__name__, - target=self.work_fct, - args = args, - kwargs = kwargs) + wthread = threading.Thread( + name=self.work_fct.__name__, + target=self.work_fct, + args=args, + kwargs=kwargs, + ) ## ph = threading.Thread(name='ph_main', target=ph_main) - #ph.setDaemon(True) + # ph.setDaemon(True) listenargs = [self._rank, self] - listener = threading.Thread(name='listener', - target=Synchronizer.listener_daemon, - args=listenargs) - #listener.setDaemon(True) + listener = threading.Thread( + name="listener", target=Synchronizer.listener_daemon, args=listenargs + ) + # listener.setDaemon(True) listener.start() wthread.start() @@ -109,28 +113,33 @@ def run(self, args, kwargs): ###=====================### def _check_Lens(self, local_data_in, global_data_out, redname, cname): - """ Essentially local to compute_global_data - """ - if len(local_data_in[redname][cname]) \ - != self.Lens[redname][cname]: + """Essentially local to compute_global_data""" + if len(local_data_in[redname][cname]) != self.Lens[redname][cname]: self.global_quitting = 1 - print ("\nERROR listener_util:cname={} len in={} Lens={}".\ - format(cname, len(local_data_in[redname][cname]), - self.Lens[redname][cname])) - if len(global_data_out[redname][cname]) \ - != self.Lens[redname][cname]: + print( + "\nERROR listener_util:cname={} len in={} Lens={}".format( + cname, len(local_data_in[redname][cname]), self.Lens[redname][cname] + ) + ) + if len(global_data_out[redname][cname]) != self.Lens[redname][cname]: self.global_quitting = 1 - print ("\nERROR listener_util:cname={} len out={} Lens={}".\ - format(cname, len(global_data_out[redname][cname]), - self.Lens[redname][cname])) + print( + "\nERROR listener_util:cname={} len out={} Lens={}".format( + cname, + len(global_data_out[redname][cname]), + self.Lens[redname][cname], + ) + ) - ######################################################################### - def compute_global_data(self, local_data_in, - global_data_out, - enable_side_gig = False, - rednames=None, - keep_up=False): + def compute_global_data( + self, + local_data_in, + global_data_out, + enable_side_gig=False, + rednames=None, + keep_up=False, + ): """ Data processing. Cache local_data_in so it will be reduced later by the listener. Copy out the data that @@ -141,7 +150,7 @@ def compute_global_data(self, local_data_in, Args: local_data_in (dict): data computed locally global_data_out (dict): global version (often a sum over locals) - enable_side_gig (boolean): sets a flag that allows the + enable_side_gig (boolean): sets a flag that allows the listener to run side gigs. It is intended to be a run once authorization and the side gig code itself disables it. rednames (list of str): optional list of reductions to report @@ -153,9 +162,9 @@ def compute_global_data(self, local_data_in, Note np.copy is dst, src """ if self.asynch: - logging.debug('Starting comp_glob update on Rank %d' % self._rank) - self.data_lock.acquire() - logging.debug('Lock aquired by comp_glob on Rank %d' % self._rank) + logging.debug("Starting comp_glob update on Rank %d" % self._rank) + self.data_lock.acquire() + logging.debug("Lock aquired by comp_glob on Rank %d" % self._rank) if rednames is None: reds = self.Lens.keys() else: @@ -164,44 +173,52 @@ def compute_global_data(self, local_data_in, for cname in self.Lens[redname].keys(): if self.Lens[redname][cname] == 0: continue - self._check_Lens(local_data_in, global_data_out, - redname, cname) + self._check_Lens(local_data_in, global_data_out, redname, cname) if keep_up: """ Capture the new data that will be summed. Note: if you want this to be sparse, the caller should probably do it. """ - global_data_out[redname][cname] \ - = self.global_data[redname][cname] \ - - self.local_data[redname][cname] \ + global_data_out[redname][cname] = ( + self.global_data[redname][cname] + - self.local_data[redname][cname] + local_data_in[redname][cname] + ) # The listener might sleep a long time, so update global - np.copyto(self.global_data[redname][cname], - global_data_out[redname][cname]) # dst, scr + np.copyto( + self.global_data[redname][cname], + global_data_out[redname][cname], + ) # dst, scr else: - np.copyto(global_data_out[redname][cname], - self.global_data[redname][cname]) + np.copyto( + global_data_out[redname][cname], + self.global_data[redname][cname], + ) # The next copy is done after global, # so that keep_up can have the previous local_data.(dst, src) - np.copyto(self.local_data[redname][cname], - local_data_in[redname][cname]) + np.copyto( + self.local_data[redname][cname], local_data_in[redname][cname] + ) self.data_lock.release() - logging.debug('Lock released on Rank %d' % self._rank) - logging.debug('Ending update on Rank %d' % self._rank) + logging.debug("Lock released on Rank %d" % self._rank) + logging.debug("Ending update on Rank %d" % self._rank) if enable_side_gig: if self.enable_side_gig: raise RuntimeError("side gig already enabled.") else: - self.enable_side_gig = True # the side_gig must disable - else: # synchronous + self.enable_side_gig = True # the side_gig must disable + else: # synchronous for redname in self.Lens.keys(): for cname in self.Lens[redname].keys(): if self.Lens[redname][cname] == 0: continue comm = self.comms[cname] - comm.Allreduce([local_data_in[cname], mpi.DOUBLE], - [global_data_out[cname], mpi.DOUBLE], - op=mpi.SUM) + comm.Allreduce( + [local_data_in[cname], mpi.DOUBLE], + [global_data_out[cname], mpi.DOUBLE], + op=mpi.SUM, + ) + #################### def get_global_data(self, global_data_out): """ @@ -213,20 +230,22 @@ def get_global_data(self, global_data_out): As of March 2019, not used internally (duplicates code in compute) """ if self.asynch: - logging.debug('Enter get_global_data') - logging.debug(' get_glob wants lock on Rank %d' % self._rank) - self.data_lock.acquire() - logging.debug('Lock acquired by get_glob on Rank %d' % self._rank) + logging.debug("Enter get_global_data") + logging.debug(" get_glob wants lock on Rank %d" % self._rank) + self.data_lock.acquire() + logging.debug("Lock acquired by get_glob on Rank %d" % self._rank) for redname in self.Lens.keys(): for cname in self.Lens[redname].keys(): if self.Lens[redname][cname] == 0: continue # dst, src - np.copyto(global_data_out[redname][cname], - self.global_data[redname][cname]) + np.copyto( + global_data_out[redname][cname], + self.global_data[redname][cname], + ) self.data_lock.release() - logging.debug('Lock released on Rank %d' % self._rank) - logging.debug('Leave get_global_data') + logging.debug("Lock released on Rank %d" % self._rank) + logging.debug("Leave get_global_data") else: raise RuntimeError("get_global_data called for sycnhronous") @@ -243,14 +262,14 @@ def _unsafe_get_global_data(self, redname, global_data_out): As of March 2019, not used internally (duplicates code in compute) """ if self.asynch: - logging.debug('Enter _usafe_get_global_data, redname={}'\ - .format(redname)) + logging.debug("Enter _usafe_get_global_data, redname={}".format(redname)) for cname in self.Lens[redname].keys(): if self.Lens[redname][cname] == 0: continue - np.copyto(global_data_out[redname][cname], - self.global_data[redname][cname]) # dst, src - logging.debug('Leave _unsafe_get_global_data') + np.copyto( + global_data_out[redname][cname], self.global_data[redname][cname] + ) # dst, src + logging.debug("Leave _unsafe_get_global_data") else: raise RuntimeError("_unsafe_get_global_data called for sycnhronous") @@ -267,72 +286,76 @@ def _unsafe_put_local_data(self, redname, local_data_in): As of March 2019, not used internally (duplicates code in compute) """ if self.asynch: - logging.debug('Enter _usafe_put_local_data, redname={}'\ - .format(redname)) + logging.debug("Enter _usafe_put_local_data, redname={}".format(redname)) for cname in self.Lens[redname].keys(): if self.Lens[redname][cname] == 0: continue - np.copyto(self.local_data[redname][cname], - local_data_in[redname][cname],) # dst, src - logging.debug('Leave _unsafe_put_locobal_data') + np.copyto( + self.local_data[redname][cname], + local_data_in[redname][cname], + ) # dst, src + logging.debug("Leave _unsafe_put_locobal_data") else: raise RuntimeError("_unsafe_put_local_data called for sycnhronous") - @staticmethod def listener_daemon(rank, synchronizer): # both args added by DLW March 2019 # listener side_gigs added by DLW March 2019. - logging.debug('Starting Listener on Rank %d' % rank) + logging.debug("Starting Listener on Rank %d" % rank) while synchronizer.global_quitting == 0: # IDEA (Bill): Add a Barrier here??? synchronizer.data_lock.acquire() - logging.debug('Locked; starting AllReduce on Rank %d' % rank) + logging.debug("Locked; starting AllReduce on Rank %d" % rank) for redname in synchronizer.Lens.keys(): for cname in synchronizer.Lens[redname].keys(): if synchronizer.Lens[redname][cname] == 0: continue comm = synchronizer.comms[cname] - logging.debug(' redname %s cname %s pre-reduce on rank %d' \ - % (redname, cname, rank)) - comm.Allreduce([synchronizer.local_data[redname][cname], - mpi.DOUBLE], - [synchronizer.global_data[redname][cname], - mpi.DOUBLE], - op=mpi.SUM) - logging.debug(' post-reduce %s on rank %d' % (redname,rank)) - if synchronizer.enable_side_gig \ - and synchronizer.listener_gigs is not None \ - and synchronizer.listener_gigs[redname] is not None: + logging.debug( + " redname %s cname %s pre-reduce on rank %d" + % (redname, cname, rank) + ) + comm.Allreduce( + [synchronizer.local_data[redname][cname], mpi.DOUBLE], + [synchronizer.global_data[redname][cname], mpi.DOUBLE], + op=mpi.SUM, + ) + logging.debug(" post-reduce %s on rank %d" % (redname, rank)) + if ( + synchronizer.enable_side_gig + and synchronizer.listener_gigs is not None + and synchronizer.listener_gigs[redname] is not None + ): args = [synchronizer] fct, kwargs = synchronizer.listener_gigs[redname] if kwargs is not None: fct(*args, **kwargs) else: fct(*args) - logging.debug('Still Locked; ending AllReduces on Rank %d' % rank) + logging.debug("Still Locked; ending AllReduces on Rank %d" % rank) synchronizer.global_quitting = synchronizer.comms["ROOT"].allreduce( - synchronizer.quitting, op=mpi.SUM) - sleep_touse = np.zeros(1, dtype='d') + synchronizer.quitting, op=mpi.SUM + ) + sleep_touse = np.zeros(1, dtype="d") sleep_touse[0] = synchronizer.sleep_secs[0] - synchronizer.comms["ROOT"].Allreduce([synchronizer.sleep_secs, - mpi.DOUBLE], - [sleep_touse, mpi.DOUBLE], - op=mpi.MIN) + synchronizer.comms["ROOT"].Allreduce( + [synchronizer.sleep_secs, mpi.DOUBLE], + [sleep_touse, mpi.DOUBLE], + op=mpi.MIN, + ) - logging.debug(' releasing lock on Rank %d' % rank) + logging.debug(" releasing lock on Rank %d" % rank) synchronizer.data_lock.release() - logging.debug(' sleep for %f on Rank %d' % (sleep_touse, rank)) + logging.debug(" sleep for %f on Rank %d" % (sleep_touse, rank)) try: time.sleep(sleep_touse[0]) except: print("sleep_touse={}".format(sleep_touse)) raise - logging.debug('Exiting listener on Rank %d' % rank) - - -#synchronizer = Synchronizer(CropsLen, comm, sync=True) -#synchronizer.run() + logging.debug("Exiting listener on Rank %d" % rank) +# synchronizer = Synchronizer(CropsLen, comm, sync=True) +# synchronizer.run() diff --git a/mpisppy/utils/lshaped_cuts.py b/mpisppy/utils/lshaped_cuts.py index 6a6d3e037..e3f0a3423 100644 --- a/mpisppy/utils/lshaped_cuts.py +++ b/mpisppy/utils/lshaped_cuts.py @@ -19,21 +19,21 @@ solver_dual_sign_convention = dict() -solver_dual_sign_convention['ipopt'] = -1 -solver_dual_sign_convention['gurobi'] = -1 -solver_dual_sign_convention['gurobi_direct'] = -1 -solver_dual_sign_convention['gurobi_persistent'] = -1 -solver_dual_sign_convention['cplex'] = -1 -solver_dual_sign_convention['cplex_direct'] = -1 -solver_dual_sign_convention['cplexdirect'] = -1 -solver_dual_sign_convention['cplex_persistent'] = -1 -solver_dual_sign_convention['glpk'] = -1 -solver_dual_sign_convention['cbc'] = -1 -solver_dual_sign_convention['xpress_direct'] = -1 -solver_dual_sign_convention['xpress_persistent'] = -1 +solver_dual_sign_convention["ipopt"] = -1 +solver_dual_sign_convention["gurobi"] = -1 +solver_dual_sign_convention["gurobi_direct"] = -1 +solver_dual_sign_convention["gurobi_persistent"] = -1 +solver_dual_sign_convention["cplex"] = -1 +solver_dual_sign_convention["cplex_direct"] = -1 +solver_dual_sign_convention["cplexdirect"] = -1 +solver_dual_sign_convention["cplex_persistent"] = -1 +solver_dual_sign_convention["glpk"] = -1 +solver_dual_sign_convention["cbc"] = -1 +solver_dual_sign_convention["xpress_direct"] = -1 +solver_dual_sign_convention["xpress_persistent"] = -1 -@declare_custom_block(name='LShapedCutGenerator') +@declare_custom_block(name="LShapedCutGenerator") class LShapedCutGeneratorData(bc.BendersCutGeneratorData): def __init__(self, component): super().__init__(component) @@ -43,29 +43,46 @@ def __init__(self, component): def set_ls(self, ls): self.ls = ls self.global_subproblem_count = len(self.ls.all_scenario_names) - self._subproblem_ndx_map = dict.fromkeys(range(len(self.ls.local_scenario_names))) + self._subproblem_ndx_map = dict.fromkeys( + range(len(self.ls.local_scenario_names)) + ) for s in self._subproblem_ndx_map.keys(): - self._subproblem_ndx_map[s] = self.ls.all_scenario_names.index(self.ls.local_scenario_names[s]) + self._subproblem_ndx_map[s] = self.ls.all_scenario_names.index( + self.ls.local_scenario_names[s] + ) # print(self._subproblem_ndx_map) self.all_root_etas = list(self.ls.root.eta.values()) def global_num_subproblems(self): return self.global_subproblem_count - def add_subproblem(self, subproblem_fn, subproblem_fn_kwargs, root_eta, subproblem_solver='gurobi_persistent', - relax_subproblem_cons=False, subproblem_solver_options=None): + def add_subproblem( + self, + subproblem_fn, + subproblem_fn_kwargs, + root_eta, + subproblem_solver="gurobi_persistent", + relax_subproblem_cons=False, + subproblem_solver_options=None, + ): # print(self._subproblem_ndx_map) # self.all_root_etas.append(root_eta) # self.global_subproblem_count += 1 - if subproblem_fn_kwargs['scenario_name'] in self.ls.local_scenario_names: + if subproblem_fn_kwargs["scenario_name"] in self.ls.local_scenario_names: # self.local_subproblem_count += 1 self.root_etas.append(root_eta) subproblem, complicating_vars_map = subproblem_fn(**subproblem_fn_kwargs) self.subproblems.append(subproblem) self.complicating_vars_maps.append(complicating_vars_map) - bc._setup_subproblem(subproblem, root_vars=[complicating_vars_map[i] for i in self.root_vars if - i in complicating_vars_map], - relax_subproblem_cons=relax_subproblem_cons) + bc._setup_subproblem( + subproblem, + root_vars=[ + complicating_vars_map[i] + for i in self.root_vars + if i in complicating_vars_map + ], + relax_subproblem_cons=relax_subproblem_cons, + ) # self._subproblem_ndx_map[self.local_subproblem_count - 1] = self.global_subproblem_count - 1 @@ -73,7 +90,9 @@ def add_subproblem(self, subproblem_fn, subproblem_fn_kwargs, root_eta, subprobl subproblem_solver = pe.SolverFactory(subproblem_solver) self.subproblem_solvers.append(subproblem_solver) if isinstance(subproblem_solver, PersistentSolver): - set_instance_retry(subproblem, subproblem_solver, subproblem_fn_kwargs['scenario_name']) + set_instance_retry( + subproblem, subproblem_solver, subproblem_fn_kwargs["scenario_name"] + ) if subproblem_solver_options: - for k,v in subproblem_solver_options.items(): + for k, v in subproblem_solver_options.items(): subproblem_solver.options[k] = v diff --git a/mpisppy/utils/pickle_bundle.py b/mpisppy/utils/pickle_bundle.py index 06bac6d39..a84124160 100644 --- a/mpisppy/utils/pickle_bundle.py +++ b/mpisppy/utils/pickle_bundle.py @@ -15,11 +15,12 @@ # BTW: ssn (not in this repo) uses this as of March 2022 import os -import dill +import dill from mpisppy import global_toc + def dill_pickle(model, fname): - """ serialize model using dill to file name""" + """serialize model using dill to file name""" global_toc(f"about to pickle to {fname}") with open(fname, "wb") as f: dill.dump(model, f) @@ -27,8 +28,8 @@ def dill_pickle(model, fname): def dill_unpickle(fname): - """ load a model from fname""" - + """load a model from fname""" + global_toc(f"about to unpickle {fname}") with open(fname, "rb") as f: m = dill.load(f) @@ -37,37 +38,56 @@ def dill_unpickle(fname): def pickle_bundle_config(cfg): - """ Add command line options for creation and use of "proper" bundles + """Add command line options for creation and use of "proper" bundles args: cfg (Config): the Config object to which we add""" - cfg.add_to_config('pickle_bundles_dir', - description="Write bundles to a dill pickle files in this dir (default None)", - domain=str, - default=None) - - cfg.add_to_config('unpickle_bundles_dir', - description="Read bundles from a dill pickle files in this dir; (default None)", - domain=str, - default=None) - cfg.add_to_config("scenarios_per_bundle", - description="Used for `proper` bundles only (default None)", - domain=int, - default=None) + cfg.add_to_config( + "pickle_bundles_dir", + description="Write bundles to a dill pickle files in this dir (default None)", + domain=str, + default=None, + ) + + cfg.add_to_config( + "unpickle_bundles_dir", + description="Read bundles from a dill pickle files in this dir; (default None)", + domain=str, + default=None, + ) + cfg.add_to_config( + "scenarios_per_bundle", + description="Used for `proper` bundles only (default None)", + domain=int, + default=None, + ) + def check_args(cfg): - """ make sure the pickle bundle args make sense""" - assert(cfg.pickle_bundles_dir is None or cfg.unpickle_bundles_dir is None) + """make sure the pickle bundle args make sense""" + assert cfg.pickle_bundles_dir is None or cfg.unpickle_bundles_dir is None if cfg.scenarios_per_bundle is None: - raise RuntimeError("For proper bundles, --scenarios-per-bundle must be specified") + raise RuntimeError( + "For proper bundles, --scenarios-per-bundle must be specified" + ) if cfg.get("bundles_per_rank") is not None and cfg.bundles_per_rank != 0: - raise RuntimeError("For proper bundles, --scenarios-per-bundle must be specified " - "and --bundles-per-rank cannot be") + raise RuntimeError( + "For proper bundles, --scenarios-per-bundle must be specified " + "and --bundles-per-rank cannot be" + ) if cfg.pickle_bundles_dir is not None and not os.path.isdir(cfg.pickle_bundles_dir): - raise RuntimeError(f"Directory to pickle into not found: {cfg.pickle_bundles_dir}") - if cfg.unpickle_bundles_dir is not None and not os.path.isdir(cfg.unpickle_bundles_dir): - raise RuntimeError(f"Directory to load pickle files from not found: {cfg.unpickle_bundles_dir}") + raise RuntimeError( + f"Directory to pickle into not found: {cfg.pickle_bundles_dir}" + ) + if cfg.unpickle_bundles_dir is not None and not os.path.isdir( + cfg.unpickle_bundles_dir + ): + raise RuntimeError( + f"Directory to load pickle files from not found: {cfg.unpickle_bundles_dir}" + ) + def have_proper_bundles(cfg): - """ boolean to indicate we have pickled bundles""" - return (hasattr(cfg, "pickle_bundles_dir") and cfg.pickle_bundles_dir is not None)\ - or (hasattr(cfg, "unpickle_bundles_dir") and cfg.unpickle_bundles_dir is not None) + """boolean to indicate we have pickled bundles""" + return ( + hasattr(cfg, "pickle_bundles_dir") and cfg.pickle_bundles_dir is not None + ) or (hasattr(cfg, "unpickle_bundles_dir") and cfg.unpickle_bundles_dir is not None) diff --git a/mpisppy/utils/prox_approx.py b/mpisppy/utils/prox_approx.py index c11edb5bd..9f780f1bb 100644 --- a/mpisppy/utils/prox_approx.py +++ b/mpisppy/utils/prox_approx.py @@ -10,18 +10,25 @@ from math import isclose from pyomo.core.expr.numeric_expr import LinearExpression + # helpers for distance from y = x**2 def _f(val, x_pnt, y_pnt): - return (( val - x_pnt )**2 + ( val**2 - y_pnt )**2)/2. + return ((val - x_pnt) ** 2 + (val**2 - y_pnt) ** 2) / 2.0 + + def _df(val, x_pnt, y_pnt): - #return 2*(val - x_pnt) + 4*(val**2 - y_pnt)*val - return val*(1 - 2*y_pnt + 2*val*val) - x_pnt + # return 2*(val - x_pnt) + 4*(val**2 - y_pnt)*val + return val * (1 - 2 * y_pnt + 2 * val * val) - x_pnt + + def _d2f(val, x_pnt, y_pnt): - return 1 + 6*val*val - 2*y_pnt + return 1 + 6 * val * val - 2 * y_pnt + def _newton_step(val, x_pnt, y_pnt): return val - _df(val, x_pnt, y_pnt) / _d2f(val, x_pnt, y_pnt) + class ProxApproxManager: __slots__ = () @@ -31,10 +38,12 @@ def __new__(cls, xvar, xvarsqrd, xbar, xsqvar_cuts, ndn_i): else: return ProxApproxManagerContinuous(xvar, xvarsqrd, xbar, xsqvar_cuts, ndn_i) + class _ProxApproxManager: - ''' + """ A helper class to manage proximal approximations - ''' + """ + __slots__ = () def __init__(self, xvar, xvarsqrd, xbar, xsqvar_cuts, ndn_i): @@ -57,29 +66,29 @@ def _store_bounds(self): self.ub = self.xvar.ub def add_cut(self, val, persistent_solver=None): - ''' + """ create a cut at val - ''' + """ pass def check_tol_add_cut(self, tolerance, persistent_solver=None): - ''' + """ add a cut if the tolerance is not satified - ''' + """ x_pnt = self.xvar.value x_bar = self.xbar.value y_pnt = self.xvarsqrd.value f_val = x_pnt**2 - #print(f"y-distance: {actual_val - measured_val})") + # print(f"y-distance: {actual_val - measured_val})") if y_pnt is None: self.add_cut(x_pnt, persistent_solver) if not isclose(x_pnt, x_bar, abs_tol=1e-6): - self.add_cut(2*x_bar - x_pnt, persistent_solver) + self.add_cut(2 * x_bar - x_pnt, persistent_solver) return True if (f_val - y_pnt) > tolerance: - ''' + """ In this case, we project the point x_pnt, y_pnt onto the curve y = x**2 by finding the minimum distance between y = x**2 and x_pnt, y_pnt. @@ -87,68 +96,71 @@ def check_tol_add_cut(self, tolerance, persistent_solver=None): This involves solving a cubic equation, so instead we start at x_pnt, y_pnt and run newtons algorithm to get an approximate good-enough solution. - ''' + """ this_val = x_pnt - #print(f"initial distance: {_f(this_val, x_pnt, y_pnt)**(0.5)}") - #print(f"this_val: {this_val}") + # print(f"initial distance: {_f(this_val, x_pnt, y_pnt)**(0.5)}") + # print(f"this_val: {this_val}") next_val = _newton_step(this_val, x_pnt, y_pnt) while not isclose(this_val, next_val, rel_tol=1e-6, abs_tol=1e-6): - #print(f"newton step distance: {_f(next_val, x_pnt, y_pnt)**(0.5)}") - #print(f"next_val: {next_val}") + # print(f"newton step distance: {_f(next_val, x_pnt, y_pnt)**(0.5)}") + # print(f"next_val: {next_val}") this_val = next_val next_val = _newton_step(this_val, x_pnt, y_pnt) self.add_cut(next_val, persistent_solver) if not isclose(next_val, x_bar, abs_tol=1e-6): - self.add_cut(2*x_bar - next_val, persistent_solver) + self.add_cut(2 * x_bar - next_val, persistent_solver) return True return False -class ProxApproxManagerContinuous(_ProxApproxManager): +class ProxApproxManagerContinuous(_ProxApproxManager): def add_cut(self, val, persistent_solver=None): - ''' + """ create a cut at val using a taylor approximation - ''' + """ # handled by bound if val == 0: return 0 # f'(a) = 2*val # f(a) - f'(a)a = val*val - 2*val*val - f_p_a = 2*val - const = -(val*val) + f_p_a = 2 * val + const = -(val * val) ## f(x) >= f(a) + f'(a)(x - a) ## f(x) >= f'(a) x + (f(a) - f'(a)a) ## (0 , f(x) - f'(a) x - (f(a) - f'(a)a) , None) - expr = LinearExpression( linear_coefs=[1, -f_p_a], - linear_vars=[self.xvarsqrd, self.xvar], - constant=-const ) + expr = LinearExpression( + linear_coefs=[1, -f_p_a], + linear_vars=[self.xvarsqrd, self.xvar], + constant=-const, + ) self.cuts[self.var_index, self.cut_index] = (0, expr, None) if persistent_solver is not None: persistent_solver.add_constraint(self.cuts[self.var_index, self.cut_index]) self.cut_index += 1 - #print(f"added continuous cut for {self.xvar.name} at {val}, lb: {self.xvar.lb}, ub: {self.xvar.ub}") + # print(f"added continuous cut for {self.xvar.name} at {val}, lb: {self.xvar.lb}, ub: {self.xvar.ub}") return 1 + def _compute_mb(val): ## [(n+1)^2 - n^2] = 2n+1 ## [(n+1) - n] = 1 ## -> m = 2n+1 - m = 2*val+1 + m = 2 * val + 1 ## b = n^2 - (2n+1)*n ## = -n^2 - n ## = -n (n+1) - b = -val*(val+1) - return m,b + b = -val * (val + 1) + return m, b -class ProxApproxManagerDiscrete(_ProxApproxManager): +class ProxApproxManagerDiscrete(_ProxApproxManager): def add_cut(self, val, persistent_solver=None): - ''' + """ create up to two cuts at val, exploiting integrality - ''' + """ val = int(round(val)) ## cuts are indexed by the x-value to the right @@ -158,59 +170,64 @@ def add_cut(self, val, persistent_solver=None): ## So, a cut to the RIGHT of the point 3 is the cut for (3,4), ## which is indexed by 4 - if (*self.var_index, val+1) not in self.cuts and val < self.ub: - m,b = _compute_mb(val) - expr = LinearExpression( linear_coefs=[1, -m], - linear_vars=[self.xvarsqrd, self.xvar], - constant=-b ) - #print(f"adding cut for {(val, val+1)}") - self.cuts[self.var_index, val+1] = (0, expr, None) + if (*self.var_index, val + 1) not in self.cuts and val < self.ub: + m, b = _compute_mb(val) + expr = LinearExpression( + linear_coefs=[1, -m], + linear_vars=[self.xvarsqrd, self.xvar], + constant=-b, + ) + # print(f"adding cut for {(val, val+1)}") + self.cuts[self.var_index, val + 1] = (0, expr, None) if persistent_solver is not None: - persistent_solver.add_constraint(self.cuts[self.var_index, val+1]) + persistent_solver.add_constraint(self.cuts[self.var_index, val + 1]) cuts_added += 1 ## Similarly, a cut to the LEFT of the point 3 is the cut for (2,3), ## which is indexed by 3 if (*self.var_index, val) not in self.cuts and val > self.lb: - m,b = _compute_mb(val-1) - expr = LinearExpression( linear_coefs=[1, -m], - linear_vars=[self.xvarsqrd, self.xvar], - constant=-b ) - #print(f"adding cut for {(val-1, val)}") + m, b = _compute_mb(val - 1) + expr = LinearExpression( + linear_coefs=[1, -m], + linear_vars=[self.xvarsqrd, self.xvar], + constant=-b, + ) + # print(f"adding cut for {(val-1, val)}") self.cuts[self.var_index, val] = (0, expr, None) if persistent_solver is not None: persistent_solver.add_constraint(self.cuts[self.var_index, val]) cuts_added += 1 - #print(f"added {cuts_added} integer cut(s) for {self.xvar.name} at {val}, lb: {self.xvar.lb}, ub: {self.xvar.ub}") + # print(f"added {cuts_added} integer cut(s) for {self.xvar.name} at {val}, lb: {self.xvar.lb}, ub: {self.xvar.ub}") return cuts_added -if __name__ == '__main__': + +if __name__ == "__main__": import pyomo.environ as pyo m = pyo.ConcreteModel() bounds = (-100, 100) - m.x = pyo.Var(bounds = bounds) - #m.x = pyo.Var(within=pyo.Integers, bounds = bounds) + m.x = pyo.Var(bounds=bounds) + # m.x = pyo.Var(within=pyo.Integers, bounds = bounds) m.xsqrd = pyo.Var(within=pyo.NonNegativeReals) m.zero = pyo.Param(initialize=-73.2, mutable=True) ## ( x - zero )^2 = x^2 - 2 x zero + zero^2 - m.obj = pyo.Objective( expr = m.xsqrd - 2*m.zero*m.x + m.zero**2 ) + m.obj = pyo.Objective(expr=m.xsqrd - 2 * m.zero * m.x + m.zero**2) m.xsqrdobj = pyo.Constraint([0], pyo.Integers) - s = pyo.SolverFactory('xpress_persistent') + s = pyo.SolverFactory("xpress_persistent") prox_manager = ProxApproxManager(m.x, m.xsqrd, m.zero, m.xsqrdobj, 0) s.set_instance(m) m.pprint() new_cuts = True iter_cnt = 0 while new_cuts: - s.solve(m,tee=False) + s.solve(m, tee=False) print(f"x: {pyo.value(m.x):.2e}, obj: {pyo.value(m.obj):.2e}") new_cuts = prox_manager.check_tol_add_cut(1e-1, persistent_solver=s) - #m.pprint() + # m.pprint() iter_cnt += 1 print(f"cuts: {len(m.xsqrdobj)}, iters: {iter_cnt}") diff --git a/mpisppy/utils/pysp_model/archivereader.py b/mpisppy/utils/pysp_model/archivereader.py index 3e13275fc..d6067ba84 100644 --- a/mpisppy/utils/pysp_model/archivereader.py +++ b/mpisppy/utils/pysp_model/archivereader.py @@ -18,21 +18,29 @@ # This file was originally part of PyUtilib, available: https://github.com/PyUtilib/pyutilib # Copied without modification from pyutilib/misc/archivereader.py -__all__ = ['ArchiveReaderFactory', 'ArchiveReader', 'ZipArchiveReader', - 'TarArchiveReader', 'DirArchiveReader', 'FileArchiveReader', - 'GzipFileArchiveReader', 'BZ2FileArchiveReader'] +__all__ = [ + "ArchiveReaderFactory", + "ArchiveReader", + "ZipArchiveReader", + "TarArchiveReader", + "DirArchiveReader", + "FileArchiveReader", + "GzipFileArchiveReader", + "BZ2FileArchiveReader", +] import os import tempfile import shutil import posixpath from pyomo.common.dependencies import attempt_import + zipfile, zipfile_available = attempt_import("zipfile") tarfile, tarfile_available = attempt_import("tarfile") gzip, gzip_available = attempt_import("gzip") bz2, bz2_available = attempt_import("bz2") -_sep = '/' +_sep = "/" _WindowsError = None try: @@ -58,9 +66,13 @@ def ArchiveReaderFactory(dirname, **kwds): if not os.path.exists(ArchiveReader.normalize_name(dirname)): - raise IOError("Cannot find file or directory `" + dirname + - "'\nPath expanded to: '" + ArchiveReader.normalize_name( - dirname) + "'") + raise IOError( + "Cannot find file or directory `" + + dirname + + "'\nPath expanded to: '" + + ArchiveReader.normalize_name(dirname) + + "'" + ) if ArchiveReader.isDir(dirname): return DirArchiveReader(dirname, **kwds) elif zipfile_available and ArchiveReader.isZip(dirname): @@ -74,13 +86,14 @@ def ArchiveReaderFactory(dirname, **kwds): elif ArchiveReader.isFile(dirname): return FileArchiveReader(dirname, **kwds) else: - raise ValueError("ArchiveReaderFactory was given an " - "unrecognized archive type with " - "name '%s'" % dirname) + raise ValueError( + "ArchiveReaderFactory was given an " + "unrecognized archive type with " + "name '%s'" % dirname + ) class ArchiveReader: - @staticmethod def isDir(name): return os.path.isdir(ArchiveReader.normalize_name(name)) @@ -102,11 +115,13 @@ def isTar(name): @staticmethod def isArchivedFile(name): - return (not (ArchiveReader.isDir(name) or ArchiveReader.isFile(name))) and \ - (tarfile_available and ArchiveReader.isTar(name)) or \ - (zipfile_available and ArchiveReader.isZip(name)) or \ - (gzip_available and ArchiveReader.isGzipFile(name)) or \ - (bz2_available and ArchiveReader.isBZ2File(name)) + return ( + (not (ArchiveReader.isDir(name) or ArchiveReader.isFile(name))) + and (tarfile_available and ArchiveReader.isTar(name)) + or (zipfile_available and ArchiveReader.isZip(name)) + or (gzip_available and ArchiveReader.isGzipFile(name)) + or (bz2_available and ArchiveReader.isBZ2File(name)) + ) @staticmethod def isGzipFile(name): @@ -151,26 +166,28 @@ def normalize_name(filename): @staticmethod def _posix_name(filename): if filename is not None: - return filename.replace('\\', '/') + return filename.replace("\\", "/") def __init__(self, name, *args, **kwds): posixabsname = self.normalize_name(name) if not os.path.exists(posixabsname): - raise IOError("cannot find file or directory `" + posixabsname + - "'") + raise IOError("cannot find file or directory `" + posixabsname + "'") self._abspath = os.path.dirname(posixabsname) self._basename = os.path.basename(posixabsname) self._archive_name = posixabsname - subdir = kwds.pop('subdir', None) - if (subdir is not None) and (subdir.strip() == ''): + subdir = kwds.pop("subdir", None) + if (subdir is not None) and (subdir.strip() == ""): subdir = None - maxdepth = kwds.pop('maxdepth', None) - self._filter = kwds.pop('filter', None) + maxdepth = kwds.pop("maxdepth", None) + self._filter = kwds.pop("filter", None) - self._subdir = posixpath.normpath(ArchiveReader._posix_name(subdir))+_sep \ - if (subdir is not None) else None + self._subdir = ( + posixpath.normpath(ArchiveReader._posix_name(subdir)) + _sep + if (subdir is not None) + else None + ) self._maxdepth = maxdepth if (self._maxdepth is not None) and (self._maxdepth < 0): @@ -186,8 +203,7 @@ def __init__(self, name, *args, **kwds): self._init(*args, **kwds) if self._filter is not None: - self._names_list = [_f for _f in self._names_list \ - if not self._filter(_f)] + self._names_list = [_f for _f in self._names_list if not self._filter(_f)] def name(self): return self._archive_name @@ -204,8 +220,7 @@ def __exit__(self, type, value, traceback): def close(self): if self._handler is not None: self._handler.close() - if (self._workdir is not None) and \ - (os.path.exists(self._workdir)): + if (self._workdir is not None) and (os.path.exists(self._workdir)): shutil.rmtree(self._workdir, True) self._workdir = None @@ -214,14 +229,12 @@ def clear_extractions(self): # prior to extracting them and we don't know what they # contained for name in self._extractions: - if os.path.exists(name) and \ - (not os.path.isdir(name)): + if os.path.exists(name) and (not os.path.isdir(name)): os.remove(name) self._extractions.clear() # If the directories existed in the workdir, then they will be # taken care of here - if (self._workdir is not None) and \ - (os.path.exists(self._workdir)): + if (self._workdir is not None) and (os.path.exists(self._workdir)): for root, dirs, files in os.walk(self._workdir): while len(dirs): shutil.rmtree(posixpath.join(root, dirs.pop()), True) @@ -239,8 +252,10 @@ def _validate_name(self, name): if (name is None) and (len(self._names_list) == 0): raise KeyError("The archive is empty!") elif (name is None) and (len(self._names_list) > 1): - raise KeyError("A name argument is required when " - "the archive has more than one member") + raise KeyError( + "A name argument is required when " + "the archive has more than one member" + ) elif name is None: name = self._names_list[0] @@ -258,14 +273,17 @@ def _validate_name(self, name): # checkname = name + _sep if (self._maxdepth is None) or ( - checkname.count(_sep) <= self._maxdepth + 1): + checkname.count(_sep) <= self._maxdepth + 1 + ): for othername in self._fulldepth_names_list: if othername.startswith(checkname): self._artificial_dirs.add(name) self._names_list.append(name) return None, name - msg = ("There is no item named '%s' in " - "the archive %s" % (name, self._basename)) + msg = "There is no item named '%s' in " "the archive %s" % ( + name, + self._basename, + ) if self._subdir is not None: msg += ", subdirectory: " + self._subdir raise KeyError(msg) @@ -281,8 +299,9 @@ def _openImp(self, name, *args, **kwds): def extract(self, member=None, path=None, recursive=False, *args, **kwds): absolute_name, relative_name = self._validate_name(member) - dst, children = self._extractImp(absolute_name, relative_name, path, - recursive, *args, **kwds) + dst, children = self._extractImp( + absolute_name, relative_name, path, recursive, *args, **kwds + ) self._extractions.add(dst) if not recursive: assert len(children) == 0 @@ -290,16 +309,10 @@ def extract(self, member=None, path=None, recursive=False, *args, **kwds): self._extractions.update(children) return dst - def _extractImp(self, absolute_name, relative_name, path, recursive, *args, - **kwds): + def _extractImp(self, absolute_name, relative_name, path, recursive, *args, **kwds): raise NotImplementedError("This method has not been " "implemented") - def extractall(self, - path=None, - members=None, - recursive=False, - *args, - **kwds): + def extractall(self, path=None, members=None, recursive=False, *args, **kwds): names = None if members is not None: names = set(members) @@ -311,8 +324,9 @@ def extractall(self, dsts = [] while names: absolute_name, relative_name = self._validate_name(names.pop()) - dst, children = self._extractImp(absolute_name, relative_name, path, - recursive, *args, **kwds) + dst, children = self._extractImp( + absolute_name, relative_name, path, recursive, *args, **kwds + ) if not recursive: assert len(children) == 0 dsts.append(dst) @@ -329,13 +343,11 @@ def extractall(self, # raising an exception @staticmethod def _copytree(src, dst, ignores=None, maxdepth=None): - assert os.path.exists(src) and os.path.isdir(src) if not os.path.exists(dst): os.makedirs(dst) if (maxdepth is None) or (maxdepth > 0): - if maxdepth is not None: maxdepth -= 1 @@ -352,7 +364,8 @@ def _copytree(src, dst, ignores=None, maxdepth=None): dstname = posixpath.join(dst, name) if os.path.isdir(srcname): ArchiveReader._copytree( - srcname, dstname, ignores=ignores, maxdepth=maxdepth) + srcname, dstname, ignores=ignores, maxdepth=maxdepth + ) else: shutil.copy2(srcname, dstname) try: @@ -363,28 +376,28 @@ def _copytree(src, dst, ignores=None, maxdepth=None): class _ziptar_base(ArchiveReader): - @staticmethod def _fixnames(names, subdir, maxdepth): names_list = [posixpath.normpath(name) for name in names] subdir_depth = 0 if subdir is not None: - names_list = [name for name in names_list \ - if name.startswith(subdir)] + names_list = [name for name in names_list if name.startswith(subdir)] subdir_depth = subdir.count(_sep) fulldepth_names_list = list(names_list) if maxdepth is not None: - names_list = [name for name in fulldepth_names_list \ - if (name.count(_sep) - subdir_depth) <= maxdepth] + names_list = [ + name + for name in fulldepth_names_list + if (name.count(_sep) - subdir_depth) <= maxdepth + ] if subdir is not None: - names_list = [name.replace(subdir,'') \ - for name in names_list] - fulldepth_names_list = [name.replace(subdir,'') \ - for name in fulldepth_names_list] + names_list = [name.replace(subdir, "") for name in names_list] + fulldepth_names_list = [ + name.replace(subdir, "") for name in fulldepth_names_list + ] return names_list, fulldepth_names_list, subdir_depth def _extractImp(self, absolute_name, relative_name, path, recursive): - use_handler = True if absolute_name is None: # This case implies that this was an artificially @@ -392,8 +405,11 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): # archive even though it technically exists # (a rare but possible edge case) use_handler = False - absolute_name = relative_name if ( - self._subdir is None) else self._subdir + relative_name + absolute_name = ( + relative_name + if (self._subdir is None) + else self._subdir + relative_name + ) tmp_dst = posixpath.join(self._workdir, absolute_name) @@ -408,7 +424,6 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): dst = tmp_dst if path is not None: - dst = posixpath.join(path, relative_name) if os.path.isdir(tmp_dst): # updated the timestamp if it exists, otherwise @@ -424,15 +439,18 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): children = [] if os.path.isdir(dst) and recursive: - new_names = [relname for relname in self._names_list \ - if relname.startswith(relative_name)] + new_names = [ + relname + for relname in self._names_list + if relname.startswith(relative_name) + ] if relative_name in new_names: new_names.remove(relative_name) for childname in new_names: - absolute_childname, relative_childname = self._validate_name( - childname) + absolute_childname, relative_childname = self._validate_name(childname) childdst, recursives = self._extractImp( - absolute_childname, relative_childname, path, False) + absolute_childname, relative_childname, path, False + ) assert len(recursives) == 0 children.append(childdst) @@ -440,21 +458,22 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): class ZipArchiveReader(_ziptar_base): - def _init(self, *args, **kwds): if zipfile is None: raise ImportError("zipfile support is disabled") - assert (self._abspath is not None) - assert (self._basename is not None) - assert (self._archive_name is not None) + assert self._abspath is not None + assert self._basename is not None + assert self._archive_name is not None if not self.isZip(self._archive_name): - raise TypeError("Unrecognized zipfile format for file: %s" % - (self._archive_name)) + raise TypeError( + "Unrecognized zipfile format for file: %s" % (self._archive_name) + ) self._handler = zipfile.ZipFile(self._archive_name, *args, **kwds) - self._names_list, self._fulldepth_names_list, self._subdir_depth = \ + self._names_list, self._fulldepth_names_list, self._subdir_depth = ( self._fixnames(self._handler.namelist(), self._subdir, self._maxdepth) + ) def _openImp(self, absolute_name, relative_name, *args, **kwds): f = None @@ -465,27 +484,30 @@ def _openImp(self, absolute_name, relative_name, *args, **kwds): except KeyError: # when this method is called we have already verified the name # existed in the list, so this must be a directory - raise IOError("Failed to open with zipfile, this must be a " - "directory: %s" % (absolute_name)) + raise IOError( + "Failed to open with zipfile, this must be a " + "directory: %s" % (absolute_name) + ) return f class TarArchiveReader(_ziptar_base): - def _init(self, *args, **kwds): if not tarfile_available: raise ImportError("tarfile support is disabled") - assert (self._abspath is not None) - assert (self._basename is not None) - assert (self._archive_name is not None) + assert self._abspath is not None + assert self._basename is not None + assert self._archive_name is not None if not self.isTar(self._archive_name): - raise TypeError("Unrecognized tarfile format for file: %s" % - (self._archive_name)) + raise TypeError( + "Unrecognized tarfile format for file: %s" % (self._archive_name) + ) self._handler = tarfile.open(self._archive_name, *args, **kwds) - self._names_list, self._fulldepth_names_list, self._subdir_depth = \ + self._names_list, self._fulldepth_names_list, self._subdir_depth = ( self._fixnames(self._handler.getnames(), self._subdir, self._maxdepth) + ) def _openImp(self, absolute_name, relative_name, *args, **kwds): f = None @@ -494,39 +516,44 @@ def _openImp(self, absolute_name, relative_name, *args, **kwds): if f is None: # when this method is called we have already verified the name # existed in the list, so this must be a directory - raise IOError("Failed to open with tarfile, this must be a " - "directory: %s" % (absolute_name)) + raise IOError( + "Failed to open with tarfile, this must be a " + "directory: %s" % (absolute_name) + ) return f class DirArchiveReader(ArchiveReader): - def _init(self, *args, **kwds): - assert (self._abspath is not None) - assert (self._basename is not None) - assert (self._archive_name is not None) + assert self._abspath is not None + assert self._basename is not None + assert self._archive_name is not None if kwds: - raise ValueError("Unexpected keyword options found " - "while initializing '%s':\n\t%s" % - (type(self).__name__, - ','.join(sorted(kwds.keys())))) + raise ValueError( + "Unexpected keyword options found " + "while initializing '%s':\n\t%s" + % (type(self).__name__, ",".join(sorted(kwds.keys()))) + ) if args: - raise ValueError("Unexpected arguments found " - "while initializing '%s':\n\t%s" % - (type(self).__name__, ','.join(args))) + raise ValueError( + "Unexpected arguments found " + "while initializing '%s':\n\t%s" % (type(self).__name__, ",".join(args)) + ) if not self.isDir(self._archive_name): - raise TypeError("Path not recognized as a directory: %s" % - (self._archive_name)) + raise TypeError( + "Path not recognized as a directory: %s" % (self._archive_name) + ) rootdir = self._archive_name if self._subdir is not None: rootdir = posixpath.join(rootdir, self._subdir) if not os.path.exists(rootdir): raise IOError( - "Subdirectory '%s' does not exists in root directory: %s" % - (self._subdir, self._archive_name)) + "Subdirectory '%s' does not exists in root directory: %s" + % (self._subdir, self._archive_name) + ) self._names_list = self._walk(rootdir, maxdepth=self._maxdepth + 1) else: self._names_list = self._walk(rootdir, maxdepth=self._maxdepth) @@ -539,8 +566,8 @@ def _walk(rootdir, maxdepth=None): prefix = posixpath.relpath(ArchiveReader._posix_name(root), rootdir) if prefix.endswith(_sep): prefix = prefix[:-1] - if prefix == '.': - prefix = '' + if prefix == ".": + prefix = "" for dname in dirs: names_list.append(posixpath.join(prefix, dname)) if maxdepth is not None and prefix.count(_sep) >= maxdepth: @@ -550,7 +577,6 @@ def _walk(rootdir, maxdepth=None): return names_list def _extractImp(self, absolute_name, relative_name, path, recursive): - assert absolute_name is not None if path is not None: @@ -584,32 +610,34 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): def _openImp(self, absolute_name, relative_name, *args, **kwds): assert absolute_name is not None return open( - posixpath.join(self._archive_name, absolute_name), 'rb', *args, ** - kwds) + posixpath.join(self._archive_name, absolute_name), "rb", *args, **kwds + ) class FileArchiveReader(ArchiveReader): - _handler_class = open def _extract_name(self, name): return name def _init(self): - assert (self._abspath is not None) - assert (self._basename is not None) - assert (self._archive_name is not None) + assert self._abspath is not None + assert self._basename is not None + assert self._archive_name is not None if self._subdir is not None: - raise ValueError("'subdir' keyword option is not handled by " - "'%s'" % (type(self).__name__)) + raise ValueError( + "'subdir' keyword option is not handled by " + "'%s'" % (type(self).__name__) + ) if self._maxdepth is not None: - raise ValueError("'maxdepth' keyword option is not handled by " - "'%s'" % (type(self).__name__)) + raise ValueError( + "'maxdepth' keyword option is not handled by " + "'%s'" % (type(self).__name__) + ) if not self.isFile(self._archive_name): - raise TypeError("Path does not point to a file: %s" % - (self._archive_name)) + raise TypeError("Path does not point to a file: %s" % (self._archive_name)) extract_name = self._extract_name(self._basename) if extract_name is not None: self._names_list = [extract_name] @@ -624,14 +652,16 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): assert absolute_name == self._extract_name assert relative_name == self._extract_name if recursive: - raise ValueError("Recursive extraction does not make " - "sense for compressed file archive types") + raise ValueError( + "Recursive extraction does not make " + "sense for compressed file archive types" + ) if path is not None: dst = posixpath.join(path, relative_name) else: dst = posixpath.join(self._workdir, absolute_name) - with open(dst, 'wb') as dstf: - handler = self._handler_class(self._archive_name, 'rb') + with open(dst, "wb") as dstf: + handler = self._handler_class(self._archive_name, "rb") shutil.copyfileobj(handler, dstf) handler.close() @@ -639,11 +669,10 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): def _openImp(self, absolute_name, relative_name, *args, **kwds): assert absolute_name == self._extract_name - return self._handler_class(self._archive_name, 'rb', *args, **kwds) + return self._handler_class(self._archive_name, "rb", *args, **kwds) class GzipFileArchiveReader(FileArchiveReader): - _handler_class = gzip.GzipFile if gzip_available else None def _extract_name(self, name): @@ -657,11 +686,12 @@ def _extract_name(self, name): def __init__(self, *args, **kwds): if not gzip_available: raise ImportError("gzip support is disabled") - self._suffix = kwds.pop('suffix', '.gz') + self._suffix = kwds.pop("suffix", ".gz") super(GzipFileArchiveReader, self).__init__(*args, **kwds) if not self.isGzipFile(self._archive_name): - raise TypeError("Unrecognized gzip format for file: %s" % - (self._archive_name)) + raise TypeError( + "Unrecognized gzip format for file: %s" % (self._archive_name) + ) def _extractImp(self, absolute_name, relative_name, path, recursive): if self._extract_name is None: @@ -669,30 +699,32 @@ def _extractImp(self, absolute_name, relative_name, path, recursive): "Extraction disabled. File suffix %s does not " "match expected suffix for compression type %s. " "The default suffix can be changed by passing " - "'suffix=.' into the ArchiveReader constructor." % ( - os.path.splitext(self._basename)[1], self._suffix)) - return FileArchiveReader._extractImp(self, absolute_name, relative_name, - path, recursive) + "'suffix=.' into the ArchiveReader constructor." + % (os.path.splitext(self._basename)[1], self._suffix) + ) + return FileArchiveReader._extractImp( + self, absolute_name, relative_name, path, recursive + ) class BZ2FileArchiveReader(FileArchiveReader): - _handler_class = bz2.BZ2File if bz2_available else None def _extract_name(self, name): # see the man page for bzip2 basename, ext = os.path.splitext(name) - if ext in ('.bz2', '.bz'): + if ext in (".bz2", ".bz"): return basename - elif ext in ('.tbz2', '.tbz'): - return basename + '.tar' + elif ext in (".tbz2", ".tbz"): + return basename + ".tar" else: - return name + '.out' + return name + ".out" def __init__(self, *args, **kwds): if not bz2_available: raise ImportError("bz2 support is disabled") super(BZ2FileArchiveReader, self).__init__(*args, **kwds) if not self.isBZ2File(self._archive_name): - raise TypeError("Unrecognized bzip2 format for file: %s" % - (self._archive_name)) + raise TypeError( + "Unrecognized bzip2 format for file: %s" % (self._archive_name) + ) diff --git a/mpisppy/utils/pysp_model/instance_factory.py b/mpisppy/utils/pysp_model/instance_factory.py index 8f7b229d4..e44eb070c 100644 --- a/mpisppy/utils/pysp_model/instance_factory.py +++ b/mpisppy/utils/pysp_model/instance_factory.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -19,7 +19,7 @@ # This file was originally part of PySP and Pyomo, available: https://github.com/Pyomo/pysp # Copied with modification from pysp/scenariotree/instance_factory.py -__all__ = ('ScenarioTreeInstanceFactory',) +__all__ = ("ScenarioTreeInstanceFactory",) import os import posixpath @@ -28,32 +28,26 @@ import copy import logging -from .archivereader import (ArchiveReaderFactory, - ArchiveReader) +from .archivereader import ArchiveReaderFactory, ArchiveReader from pyomo.dataportal import DataPortal -from pyomo.core import (Block, - AbstractModel) +from pyomo.core import Block, AbstractModel from pyomo.core.base.block import _BlockData from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args from pyomo.common.gc_manager import PauseGC from pyomo.common.fileutils import import_file -from .tree_structure_model import \ - (CreateAbstractScenarioTreeModel, - ScenarioTreeModelFromNetworkX) -from .tree_structure import \ - ScenarioTree - -from pyomo.common.dependencies import ( - networkx, networkx_available as has_networkx +from .tree_structure_model import ( + CreateAbstractScenarioTreeModel, + ScenarioTreeModelFromNetworkX, ) +from .tree_structure import ScenarioTree + +from pyomo.common.dependencies import networkx, networkx_available as has_networkx logger = logging.getLogger("mpisppy.utils.pysp_model") -def _extract_pathspec( - pathspec, - default_basename, - archives=None): + +def _extract_pathspec(pathspec, default_basename, archives=None): """Obtain a file location from a pathspec. Extracts a file location from the provided input @@ -91,8 +85,9 @@ def _extract_pathspec( the next time it is called. """ - logger.debug("expanding pathspec %s to %s" - % (pathspec, os.path.expanduser(pathspec))) + logger.debug( + "expanding pathspec %s to %s" % (pathspec, os.path.expanduser(pathspec)) + ) pathspec = os.path.expanduser(pathspec) if archives is None: @@ -107,58 +102,65 @@ def _extract_pathspec( if not os.path.exists(pathspec): logger.debug("pathspec does not exist, normalizing name") - (normalized_location, _, archive_subdir) = \ - ArchiveReader.normalize_name(pathspec).rpartition(',') + (normalized_location, _, archive_subdir) = ArchiveReader.normalize_name( + pathspec + ).rpartition(",") if default_basename is not None: extension = os.path.splitext(default_basename)[1].strip() - assert extension != '' + assert extension != "" if archive_subdir.endswith(extension): - logger.debug("recognized extension type '%s' appears " - "after comma, treating as file" % (extension)) + logger.debug( + "recognized extension type '%s' appears " + "after comma, treating as file" % (extension) + ) basename = os.path.basename(archive_subdir) archive_subdir = os.path.dirname(archive_subdir).strip() - if archive_subdir == '': + if archive_subdir == "": archive_subdir = None else: logger.debug("pathspec exists, normalizing name") - normalized_location = \ - ArchiveReader.normalize_name(pathspec) + normalized_location = ArchiveReader.normalize_name(pathspec) - logger.debug("normalized pathspec: (%s, %s, %s)" - % (normalized_location, archive_subdir, basename)) + logger.debug( + "normalized pathspec: (%s, %s, %s)" + % (normalized_location, archive_subdir, basename) + ) if ArchiveReader.isArchivedFile(normalized_location): logger.debug("pathspec defines a recognized archive type") - for prev_archive_inputs, prev_archive, prev_unarchived_dir \ - in archives: - if (normalized_location == \ - prev_archive_inputs[0]) and \ - ((prev_archive_inputs[1] is None) or \ - ((archive_subdir is not None) and \ - (archive_subdir.startswith(prev_archive_inputs[1]+'/')))): + for prev_archive_inputs, prev_archive, prev_unarchived_dir in archives: + if (normalized_location == prev_archive_inputs[0]) and ( + (prev_archive_inputs[1] is None) + or ( + (archive_subdir is not None) + and (archive_subdir.startswith(prev_archive_inputs[1] + "/")) + ) + ): logger.debug("pathspec matches previous archive") unarchived_dir = prev_unarchived_dir if archive_subdir is not None: if prev_archive_inputs[1] is not None: unarchived_dir = posixpath.join( unarchived_dir, - os.path.relpath(archive_subdir, - start=prev_archive_inputs[1])) + os.path.relpath( + archive_subdir, start=prev_archive_inputs[1] + ), + ) else: - unarchived_dir = posixpath.join(unarchived_dir, - archive_subdir) + unarchived_dir = posixpath.join(unarchived_dir, archive_subdir) logger.debug("unarchived directory: %s" % (unarchived_dir)) break - else: # if no break occurs in previous for-loop - archive = ArchiveReaderFactory( - normalized_location, - subdir=archive_subdir) + else: # if no break occurs in previous for-loop + archive = ArchiveReaderFactory(normalized_location, subdir=archive_subdir) unarchived_dir = archive.normalize_name( - tempfile.mkdtemp(prefix='pysp_unarchived')) - archives.append(((normalized_location, archive_subdir), - archive, - unarchived_dir)) - logger.debug("New archive opened. Temporary archive " - "extraction directory: %s" % (unarchived_dir)) + tempfile.mkdtemp(prefix="pysp_unarchived") + ) + archives.append( + ((normalized_location, archive_subdir), archive, unarchived_dir) + ) + logger.debug( + "New archive opened. Temporary archive " + "extraction directory: %s" % (unarchived_dir) + ) archive.extractall(path=unarchived_dir) if basename is not None: filename = posixpath.join(unarchived_dir, basename) @@ -170,8 +172,7 @@ def _extract_pathspec( else: logger.debug("pathspec defines a standard path") if archive_subdir is not None: - unarchived_dir = posixpath.join(normalized_location, - archive_subdir) + unarchived_dir = posixpath.join(normalized_location, archive_subdir) else: unarchived_dir = normalized_location @@ -187,6 +188,7 @@ def _extract_pathspec( return filename, archives + def _find_reference_model_or_callback(src): """Tries to find a single reference model or callback for generating scenario models.""" @@ -196,10 +198,12 @@ def _find_reference_model_or_callback(src): dir_module = dir(module) if "pysp_instance_creation_callback" in dir_module: callback = getattr(module, "pysp_instance_creation_callback") - if not hasattr(callback,"__call__"): - raise TypeError("'pysp_instance_creation_callback' " - "object found in source '%s' is not " - "callable" % (src)) + if not hasattr(callback, "__call__"): + raise TypeError( + "'pysp_instance_creation_callback' " + "object found in source '%s' is not " + "callable" % (src) + ) else: matching_names = [] for attr_name in dir_module: @@ -208,14 +212,16 @@ def _find_reference_model_or_callback(src): reference_model = obj matching_names.append(attr_name) if len(matching_names) > 1: - raise ValueError("Multiple objects found in source '%s' " - "that could be a reference model. Make " - "sure there is only one Pyomo model in " - "the source file. Object names: %s" - % (str(matching_names))) + raise ValueError( + "Multiple objects found in source '%s' " + "that could be a reference model. Make " + "sure there is only one Pyomo model in " + "the source file. Object names: %s" % (str(matching_names)) + ) return module, reference_model, callback + def _find_scenariotree(src=None, module=None): """Tries to find a single reference model or callback for generating scenario models.""" @@ -230,14 +236,15 @@ def _find_scenariotree(src=None, module=None): dir_module = dir(module) if "pysp_scenario_tree_model_callback" in dir_module: callback = getattr(module, "pysp_scenario_tree_model_callback") - if not hasattr(callback,"__call__"): - raise TypeError("'pysp_scenario_tree_model_callback' " - "object found in source '%s' is not " - "callable" % (src)) + if not hasattr(callback, "__call__"): + raise TypeError( + "'pysp_scenario_tree_model_callback' " + "object found in source '%s' is not " + "callable" % (src) + ) attrs = [(None, callback())] else: - attrs = [(attr_name,getattr(module, attr_name)) - for attr_name in dir_module] + attrs = [(attr_name, getattr(module, attr_name)) for attr_name in dir_module] matching_names = [] for attr_name, obj in attrs: if isinstance(obj, ScenarioTree): @@ -246,27 +253,24 @@ def _find_scenariotree(src=None, module=None): elif isinstance(obj, (_BlockData, Block)): scenario_tree_model = obj matching_names.append(attr_name) - elif has_networkx and \ - isinstance(obj, networkx.DiGraph): + elif has_networkx and isinstance(obj, networkx.DiGraph): scenario_tree_model = obj matching_names.append(attr_name) if len(matching_names) > 1: - raise ValueError("Multiple objects found in source '%s' " - "that could act as a scenario tree " - "specification. Make sure there is only " - "one Pyomo model, ScenarioTree, or " - "networkx.DiGraph object in the source " - "file. Object names: %s" - % (str(matching_names))) + raise ValueError( + "Multiple objects found in source '%s' " + "that could act as a scenario tree " + "specification. Make sure there is only " + "one Pyomo model, ScenarioTree, or " + "networkx.DiGraph object in the source " + "file. Object names: %s" % (str(matching_names)) + ) return module, scenario_tree_object, scenario_tree_model -class ScenarioTreeInstanceFactory: - def __init__(self, - model, - scenario_tree, - data=None): +class ScenarioTreeInstanceFactory: + def __init__(self, model, scenario_tree, data=None): """Class to help manage construction of scenario tree models. This class is designed to help manage the various input formats @@ -327,36 +331,32 @@ def __init__(self, self._closed = False def _init(self, model, scenario_tree, data): - self._model_filename = None self._model_module = None self._model_object = None self._model_callback = None if isinstance(model, str): logger.debug("A model filename was provided.") - self._model_filename, self._archives = \ - _extract_pathspec(model, - "ReferenceModel.py", - archives=self._archives) + self._model_filename, self._archives = _extract_pathspec( + model, "ReferenceModel.py", archives=self._archives + ) if not os.path.exists(self._model_filename): - logger.error("Failed to extract reference model python file " - "from path specification: %s" - % (model)) - raise IOError("path does not exist: %s" - % (self._model_filename)) + logger.error( + "Failed to extract reference model python file " + "from path specification: %s" % (model) + ) + raise IOError("path does not exist: %s" % (self._model_filename)) assert self._model_filename is not None assert self._model_filename.endswith(".py") - (self._model_module, - self._model_object, - self._model_callback) = \ + (self._model_module, self._model_object, self._model_callback) = ( _find_reference_model_or_callback(self._model_filename) - if (self._model_object is None) and \ - (self._model_callback is None): + ) + if (self._model_object is None) and (self._model_callback is None): raise AttributeError( "No reference Pyomo model or " "'pysp_instance_creation_callback' " - "function object found in src: %s" - % (self._model_filename)) + "function object found in src: %s" % (self._model_filename) + ) elif hasattr(model, "__call__"): logger.debug("A model callback function was provided.") self._model_callback = model @@ -365,7 +365,8 @@ def _init(self, model, scenario_tree, data): raise TypeError( "model argument object has incorrect type: %s. " "Must be a string type, a callback, or a Pyomo model." - % (type(model))) + % (type(model)) + ) logger.debug("A model object was provided.") self._model_object = model @@ -376,150 +377,166 @@ def _init(self, model, scenario_tree, data): for scenario in scenario_tree.scenarios: if scenario.instance is not None: raise ValueError( - "The scenario tree can not be linked with instances") + "The scenario tree can not be linked with instances" + ) if hasattr(scenario_tree, "_scenario_instance_factory"): del scenario_tree._scenario_instance_factory self._scenario_tree = scenario_tree - elif has_networkx and \ - isinstance(scenario_tree, networkx.DiGraph): + elif has_networkx and isinstance(scenario_tree, networkx.DiGraph): self._scenario_tree_model = scenario_tree elif isinstance(scenario_tree, str): - logger.debug("scenario tree input is a string, attempting " - "to load file specification: %s" - % (scenario_tree)) + logger.debug( + "scenario tree input is a string, attempting " + "to load file specification: %s" % (scenario_tree) + ) self._scenario_tree_filename = None if not scenario_tree.endswith(".py"): - self._scenario_tree_filename, self._archives = \ - _extract_pathspec(scenario_tree, - "ScenarioStructure.dat", - archives=self._archives) + self._scenario_tree_filename, self._archives = _extract_pathspec( + scenario_tree, "ScenarioStructure.dat", archives=self._archives + ) if not os.path.exists(self._scenario_tree_filename): - logger.debug("Failed to extract scenario tree structure " - ".dat file from path specification: %s" - % (scenario_tree)) + logger.debug( + "Failed to extract scenario tree structure " + ".dat file from path specification: %s" % (scenario_tree) + ) self._scenario_tree_filename = None if self._scenario_tree_filename is None: - self._scenario_tree_filename, self._archives = \ - _extract_pathspec(scenario_tree, - "ScenarioStructure.py", - archives=self._archives) + self._scenario_tree_filename, self._archives = _extract_pathspec( + scenario_tree, "ScenarioStructure.py", archives=self._archives + ) if not os.path.exists(self._scenario_tree_filename): - logger.debug("Failed to locate scenario tree structure " - ".py file with path specification: %s" - % (scenario_tree)) + logger.debug( + "Failed to locate scenario tree structure " + ".py file with path specification: %s" % (scenario_tree) + ) self._scenario_tree_filename = None if self._scenario_tree_filename is None: - raise ValueError("Failed to extract scenario tree structure " - "file with .dat or .py extension from path " - "specification: %s" % (scenario_tree)) + raise ValueError( + "Failed to extract scenario tree structure " + "file with .dat or .py extension from path " + "specification: %s" % (scenario_tree) + ) elif self._scenario_tree_filename.endswith(".py"): if self._scenario_tree_filename == self._model_filename: # try not to clobber the model import - (self._scenario_tree_module, - self._scenario_tree, - self._scenario_tree_model) = \ - _find_scenariotree(module=self._model_module) + ( + self._scenario_tree_module, + self._scenario_tree, + self._scenario_tree_model, + ) = _find_scenariotree(module=self._model_module) else: - (self._scenario_tree_module, - self._scenario_tree, - self._scenario_tree_model) = \ - _find_scenariotree(src=self._scenario_tree_filename) - if (self._scenario_tree is None) and \ - (self._scenario_tree_model is None): + ( + self._scenario_tree_module, + self._scenario_tree, + self._scenario_tree_model, + ) = _find_scenariotree(src=self._scenario_tree_filename) + if (self._scenario_tree is None) and ( + self._scenario_tree_model is None + ): raise AttributeError( "No scenario tree or " "'pysp_scenario_tree_model_callback' " - "function found in src: %s" - % (self._scenario_tree_filename)) + "function found in src: %s" % (self._scenario_tree_filename) + ) elif self._scenario_tree_filename.endswith(".dat"): - self._scenario_tree_model = \ - CreateAbstractScenarioTreeModel().\ - create_instance(filename=self._scenario_tree_filename) + self._scenario_tree_model = ( + CreateAbstractScenarioTreeModel().create_instance( + filename=self._scenario_tree_filename + ) + ) else: assert False elif scenario_tree is None: if self._model_module is not None: self._scenario_tree_filename = self._model_filename - (self._scenario_tree_module, - self._scenario_tree, - self._scenario_tree_model) = \ - _find_scenariotree(module=self._model_module) - if (self._scenario_tree is None) and \ - (self._scenario_tree_model is None): + ( + self._scenario_tree_module, + self._scenario_tree, + self._scenario_tree_model, + ) = _find_scenariotree(module=self._model_module) + if (self._scenario_tree is None) and ( + self._scenario_tree_model is None + ): raise ValueError( "No input was provided for the scenario tree " "and no callback or scenario tree object was " - "found with the model") + "found with the model" + ) else: raise ValueError( "No input was provided for the scenario tree " "but there is no module to search for a " "'pysp_scenario_tree_model_callback' function " - "or a ScenarioTree object.") + "or a ScenarioTree object." + ) else: self._scenario_tree_model = scenario_tree if self._scenario_tree is None: - if (not isinstance(self._scenario_tree_model, - (_BlockData, Block))) and \ - ((not has_networkx) or \ - (not isinstance(self._scenario_tree_model, - networkx.DiGraph))): + if (not isinstance(self._scenario_tree_model, (_BlockData, Block))) and ( + (not has_networkx) + or (not isinstance(self._scenario_tree_model, networkx.DiGraph)) + ): raise TypeError( "scenario tree model object has incorrect type: %s. " "Must be a string type, Pyomo model, or a " - "networkx.DiGraph object." % (type(scenario_tree))) + "networkx.DiGraph object." % (type(scenario_tree)) + ) if isinstance(self._scenario_tree_model, (_BlockData, Block)): if not self._scenario_tree_model.is_constructed(): - raise ValueError( - "scenario tree model is not constructed") + raise ValueError("scenario tree model is not constructed") self._data_directory = None if data is None: if self.scenario_tree_directory() is not None: - logger.debug("data directory is set to the scenario tree " - "directory: %s" - % (self.scenario_tree_directory())) + logger.debug( + "data directory is set to the scenario tree " + "directory: %s" % (self.scenario_tree_directory()) + ) self._data_directory = self.scenario_tree_directory() elif self.model_directory() is not None: - logger.debug("data directory is set to the reference model " - "directory: %s" - % (self.model_directory())) + logger.debug( + "data directory is set to the reference model " + "directory: %s" % (self.model_directory()) + ) self._data_directory = self.model_directory() else: - if (self._model_callback is None) and \ - isinstance(self._model_object, AbstractModel) and \ - (not self._model_object.is_constructed()): + if ( + (self._model_callback is None) + and isinstance(self._model_object, AbstractModel) + and (not self._model_object.is_constructed()) + ): raise ValueError( "A data location is required since no model " "callback was provided and no other location could " - "be inferred.") + "be inferred." + ) logger.debug("no data directory is required") else: - logger.debug("data location is provided, attempting " - "to load specification: %s" - % (data)) - self._data_directory, self._archives = \ - _extract_pathspec(data, - None, - archives=self._archives) + logger.debug( + "data location is provided, attempting " + "to load specification: %s" % (data) + ) + self._data_directory, self._archives = _extract_pathspec( + data, None, archives=self._archives + ) if not os.path.exists(self._data_directory): - logger.error("Failed to extract data directory " - "from path specification: %s" - % (data)) - raise IOError("path does not exist: %s" - % (self._data_directory)) + logger.error( + "Failed to extract data directory " + "from path specification: %s" % (data) + ) + raise IOError("path does not exist: %s" % (self._data_directory)) def __getstate__(self): self.close() raise NotImplementedError("Do not deepcopy or serialize this class") - def __setstate__(self,d): + def __setstate__(self, d): self.close() raise NotImplementedError("Do not deepcopy or serialize this class") def close(self): - for _,archive,tmpdir in self._archives: + for _, archive, tmpdir in self._archives: if os.path.exists(tmpdir): shutil.rmtree(tmpdir, True) archive.close() @@ -555,17 +572,21 @@ def data_directory(self): # # construct a scenario instance - just like it sounds! # - def construct_scenario_instance(self, - scenario_name, - scenario_tree, - profile_memory=False, - output_instance_construction_time=False, - compile_instance=False, - verbose=False): + def construct_scenario_instance( + self, + scenario_name, + scenario_tree, + profile_memory=False, + output_instance_construction_time=False, + compile_instance=False, + verbose=False, + ): assert not self._closed if not scenario_tree.contains_scenario(scenario_name): - raise ValueError("ScenarioTree does not contain scenario " - "with name %s." % (scenario_name)) + raise ValueError( + "ScenarioTree does not contain scenario " + "with name %s." % (scenario_name) + ) scenario = scenario_tree.get_scenario(scenario_name) node_name_list = [n._name for n in scenario._node_list] @@ -576,15 +597,14 @@ def construct_scenario_instance(self, scenario_instance = None try: - if self._model_callback is not None: - assert self._model_object is None try: _scenario_tree_arg = None # new callback signature - if (self._scenario_tree_filename is not None) and \ - self._scenario_tree_filename.endswith('.dat'): + if ( + self._scenario_tree_filename is not None + ) and self._scenario_tree_filename.endswith(".dat"): # we started with a .dat file, so # send the PySP scenario tree _scenario_tree_arg = scenario_tree @@ -596,32 +616,33 @@ def construct_scenario_instance(self, else: # send the PySP scenario tree _scenario_tree_arg = scenario_tree - scenario_instance = self._model_callback(_scenario_tree_arg, - scenario_name, - node_name_list) + scenario_instance = self._model_callback( + _scenario_tree_arg, scenario_name, node_name_list + ) except TypeError: # old callback signature # TODO: - #logger.warning( + # logger.warning( # "DEPRECATED: The 'pysp_instance_creation_callback' function " # "signature has changed. An additional argument should be " # "added to the beginning of the arguments list that will be " # "set to the user provided scenario tree object when called " # "by PySP (e.g., a Pyomo scenario tree model instance, " # "a networkx tree, or a PySP ScenarioTree object.") - scenario_instance = self._model_callback(scenario_name, - node_name_list) + scenario_instance = self._model_callback( + scenario_name, node_name_list + ) elif self._model_object is not None: - - if (not isinstance(self._model_object, AbstractModel)) or \ - (self._model_object.is_constructed()): + if (not isinstance(self._model_object, AbstractModel)) or ( + self._model_object.is_constructed() + ): scenario_instance = self._model_object.clone() elif scenario_tree._scenario_based_data: assert self.data_directory() is not None - scenario_data_filename = \ - os.path.join(self.data_directory(), - str(scenario_name)) + scenario_data_filename = os.path.join( + self.data_directory(), str(scenario_name) + ) # JPW: The following is a hack to support # initialization of block instances, which # don't work with .dat files at the @@ -630,97 +651,106 @@ def construct_scenario_instance(self, # and expanded into the node-based data read # logic (where yaml is completely ignored at # the moment. - if os.path.exists(scenario_data_filename+'.dat'): - scenario_data_filename = \ - scenario_data_filename + ".dat" + if os.path.exists(scenario_data_filename + ".dat"): + scenario_data_filename = scenario_data_filename + ".dat" data = None - elif os.path.exists(scenario_data_filename+'.yaml'): + elif os.path.exists(scenario_data_filename + ".yaml"): if not yaml_available: raise ValueError( "Found yaml data file for scenario '%s' " "but he PyYAML module is not available" - % (scenario_name)) - scenario_data_filename = \ - scenario_data_filename+".yaml" + % (scenario_name) + ) + scenario_data_filename = scenario_data_filename + ".yaml" with open(scenario_data_filename) as f: data = yaml.load(f, **yaml_load_args) else: raise RuntimeError( "Cannot find a data file for scenario '%s' " "in directory: %s\nRecognized formats: .dat, " - ".yaml" % (scenario_name, self.data_directory())) + ".yaml" % (scenario_name, self.data_directory()) + ) if verbose: - print("Data for scenario=%s loads from file=%s" - % (scenario_name, scenario_data_filename)) + print( + "Data for scenario=%s loads from file=%s" + % (scenario_name, scenario_data_filename) + ) if data is None: - scenario_instance = \ - self._model_object.create_instance( - filename=scenario_data_filename, - profile_memory=profile_memory, - report_timing=output_instance_construction_time) + scenario_instance = self._model_object.create_instance( + filename=scenario_data_filename, + profile_memory=profile_memory, + report_timing=output_instance_construction_time, + ) else: - scenario_instance = \ - self._model_object.create_instance( - data, - profile_memory=profile_memory, - report_timing=output_instance_construction_time) + scenario_instance = self._model_object.create_instance( + data, + profile_memory=profile_memory, + report_timing=output_instance_construction_time, + ) else: assert self.data_directory() is not None data_files = [] for node_name in node_name_list: - node_data_filename = \ - os.path.join(self.data_directory(), - str(node_name)+".dat") + node_data_filename = os.path.join( + self.data_directory(), str(node_name) + ".dat" + ) if not os.path.exists(node_data_filename): raise RuntimeError( "Cannot find a data file for scenario tree " "node '%s' in directory: %s\nRecognized " - "formats: .dat" % (node_name, - self.data_directory())) + "formats: .dat" % (node_name, self.data_directory()) + ) data_files.append(node_data_filename) scenario_data = DataPortal(model=self._model_object) for data_file in data_files: if verbose: - print("Node data for scenario=%s partially " - "loading from file=%s" - % (scenario_name, data_file)) + print( + "Node data for scenario=%s partially " + "loading from file=%s" % (scenario_name, data_file) + ) scenario_data.load(filename=data_file) scenario_instance = self._model_object.create_instance( scenario_data, profile_memory=profile_memory, - report_timing=output_instance_construction_time) + report_timing=output_instance_construction_time, + ) else: - raise RuntimeError("Unable to construct scenario instance. " - "Neither a reference model or callback " - "is defined.") + raise RuntimeError( + "Unable to construct scenario instance. " + "Neither a reference model or callback " + "is defined." + ) # name each instance with the scenario name scenario_instance._name = scenario_name if compile_instance: - from pyomo.repn.beta.matrix import \ - compile_block_linear_constraints + from pyomo.repn.beta.matrix import compile_block_linear_constraints + compile_block_linear_constraints( scenario_instance, "_PySP_compiled_linear_constraints", - verbose=verbose) + verbose=verbose, + ) except: - logger.error("Failed to create model instance for scenario=%s" - % (scenario_name)) + logger.error( + "Failed to create model instance for scenario=%s" % (scenario_name) + ) raise return scenario_instance def construct_instances_for_scenario_tree( - self, - scenario_tree, - profile_memory=False, - output_instance_construction_time=False, - compile_scenario_instances=False, - verbose=False): + self, + scenario_tree, + profile_memory=False, + output_instance_construction_time=False, + compile_scenario_instances=False, + verbose=False, + ): assert not self._closed if scenario_tree._scenario_based_data: @@ -732,7 +762,6 @@ def construct_instances_for_scenario_tree( scenario_instances = {} for scenario in scenario_tree._scenarios: - # the construction of instances takes little overhead in terms # of memory potentially lost in the garbage-collection sense # (mainly only that due to parsing and instance @@ -744,37 +773,37 @@ def construct_instances_for_scenario_tree( # instances have been created. scenario_instance = None with PauseGC(): - scenario_instance = \ - self.construct_scenario_instance( - scenario._name, - scenario_tree, - profile_memory=profile_memory, - output_instance_construction_time=output_instance_construction_time, - compile_instance=compile_scenario_instances, - verbose=verbose) + scenario_instance = self.construct_scenario_instance( + scenario._name, + scenario_tree, + profile_memory=profile_memory, + output_instance_construction_time=output_instance_construction_time, + compile_instance=compile_scenario_instances, + verbose=verbose, + ) scenario_instances[scenario._name] = scenario_instance assert scenario_instance.local_name == scenario.name return scenario_instances - def generate_scenario_tree(self, - downsample_fraction=1.0, - include_scenarios=None, - bundles=None, - random_bundles=None, - random_seed=None, - verbose=True): - + def generate_scenario_tree( + self, + downsample_fraction=1.0, + include_scenarios=None, + bundles=None, + random_bundles=None, + random_seed=None, + verbose=True, + ): scenario_tree_model = self._scenario_tree_model if scenario_tree_model is not None: - if has_networkx and \ - isinstance(scenario_tree_model, networkx.DiGraph): - scenario_tree_model = \ - ScenarioTreeModelFromNetworkX(scenario_tree_model) + if has_networkx and isinstance(scenario_tree_model, networkx.DiGraph): + scenario_tree_model = ScenarioTreeModelFromNetworkX(scenario_tree_model) else: - assert isinstance(scenario_tree_model, (_BlockData, Block)), \ - str(scenario_tree_model)+" "+str(type(scenario_tree_model)) + assert isinstance(scenario_tree_model, (_BlockData, Block)), ( + str(scenario_tree_model) + " " + str(type(scenario_tree_model)) + ) if bundles is not None: if isinstance(bundles, str): @@ -782,9 +811,11 @@ def generate_scenario_tree(self, raise ValueError( "A bundles file can not be used when the " "scenario tree input was not a Pyomo " - "model or ScenarioStructure.dat file.") - logger.debug("attempting to locate bundle file for input: %s" - % (bundles)) + "model or ScenarioStructure.dat file." + ) + logger.debug( + "attempting to locate bundle file for input: %s" % (bundles) + ) # we interpret the scenario bundle specification in one of # two ways. if the supplied name is a file, it is used # directly. otherwise, it is interpreted as the root of a @@ -792,24 +823,26 @@ def generate_scenario_tree(self, # directory. orig_input = bundles if not bundles.endswith(".dat"): - bundles = bundles+".dat" + bundles = bundles + ".dat" bundles = os.path.expanduser(bundles) if not os.path.exists(bundles): if self.data_directory() is None: raise ValueError( "Could not locate bundle .dat file from input " "'%s'. Path does not exist and there is no data " - "directory to search in." % (orig_input)) + "directory to search in." % (orig_input) + ) bundles = os.path.join(self.data_directory(), bundles) if not os.path.exists(bundles): - raise ValueError("Could not locate bundle .dat file " - "from input '%s' as absolute path or " - "relative to data directory: %s" - % (orig_input, self.data_directory())) + raise ValueError( + "Could not locate bundle .dat file " + "from input '%s' as absolute path or " + "relative to data directory: %s" + % (orig_input, self.data_directory()) + ) if verbose: - print("Scenario tree bundle specification filename=%s" - % (bundles)) + print("Scenario tree bundle specification filename=%s" % (bundles)) scenario_tree_model = scenario_tree_model.clone() scenario_tree_model.Bundling = True @@ -827,8 +860,10 @@ def generate_scenario_tree(self, # construct the scenario tree # if scenario_tree_model is not None: - scenario_tree = ScenarioTree(scenariotreeinstance=scenario_tree_model, - scenariobundlelist=include_scenarios) + scenario_tree = ScenarioTree( + scenariotreeinstance=scenario_tree_model, + scenariobundlelist=include_scenarios, + ) else: assert self._scenario_tree is not None if include_scenarios is None: @@ -837,18 +872,15 @@ def generate_scenario_tree(self, # note: any bundles will be lost if self._scenario_tree.contains_bundles(): raise ValueError( - "Can not compress a scenario tree that " - "contains bundles") + "Can not compress a scenario tree that " "contains bundles" + ) scenario_tree = self._scenario_tree.make_compressed( - include_scenarios, - normalize=True) + include_scenarios, normalize=True + ) # compress/down-sample the scenario tree, if requested - if (downsample_fraction is not None) and \ - (downsample_fraction < 1.0): - scenario_tree.downsample(downsample_fraction, - random_seed, - verbose) + if (downsample_fraction is not None) and (downsample_fraction < 1.0): + scenario_tree.downsample(downsample_fraction, random_seed, verbose) # # create bundles from a dict, if requested @@ -856,39 +888,42 @@ def generate_scenario_tree(self, if bundles is not None: if not isinstance(bundles, str): if verbose: - print("Adding bundles to scenario tree from " - "user-specified dict") + print("Adding bundles to scenario tree from " "user-specified dict") if scenario_tree.contains_bundles(): if verbose: - print("Scenario tree already contains bundles. " - "All existing bundles will be removed.") + print( + "Scenario tree already contains bundles. " + "All existing bundles will be removed." + ) for bundle in list(scenario_tree.bundles): scenario_tree.remove_bundle(bundle.name) for bundle_name in bundles: - scenario_tree.add_bundle(bundle_name, - bundles[bundle_name]) + scenario_tree.add_bundle(bundle_name, bundles[bundle_name]) # # create random bundles, if requested # - if (random_bundles is not None) and \ - (random_bundles > 0): + if (random_bundles is not None) and (random_bundles > 0): if bundles is not None: - raise ValueError("Cannot specify both random " - "bundles and a bundles specification") + raise ValueError( + "Cannot specify both random " "bundles and a bundles specification" + ) num_scenarios = len(scenario_tree._scenarios) if random_bundles > num_scenarios: - raise ValueError("Cannot create more random bundles " - "than there are scenarios!") + raise ValueError( + "Cannot create more random bundles " "than there are scenarios!" + ) if verbose: - print("Creating "+str(random_bundles)+ - " random bundles using seed=" - +str(random_seed)) - - scenario_tree.create_random_bundles(random_bundles, - random_seed) + print( + "Creating " + + str(random_bundles) + + " random bundles using seed=" + + str(random_seed) + ) + + scenario_tree.create_random_bundles(random_bundles, random_seed) scenario_tree._scenario_instance_factory = self diff --git a/mpisppy/utils/pysp_model/phutils.py b/mpisppy/utils/pysp_model/phutils.py index 4b6c01f75..dcfca038a 100644 --- a/mpisppy/utils/pysp_model/phutils.py +++ b/mpisppy/utils/pysp_model/phutils.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -20,11 +20,8 @@ # Copied with modification from pysp/phutils.py - class BasicSymbolMap: - def __init__(self): - # maps object id()s to their assigned symbol. self.byObject = {} @@ -37,13 +34,12 @@ def getByObjectDictionary(self): def updateSymbols(self, data_stream): # check if the input is a generator / iterator, # if so, we need to copy since we use it twice - if hasattr(data_stream, '__iter__') and \ - not hasattr(data_stream, '__len__'): + if hasattr(data_stream, "__iter__") and not hasattr(data_stream, "__len__"): data_stream = list(data_stream) - self.byObject.update((id(obj), label) for obj,label in data_stream) - self.bySymbol.update((label,obj) for obj,label in data_stream) + self.byObject.update((id(obj), label) for obj, label in data_stream) + self.bySymbol.update((label, obj) for obj, label in data_stream) - def createSymbol(self, obj ,label): + def createSymbol(self, obj, label): self.byObject[id(obj)] = label self.bySymbol[label] = obj @@ -59,9 +55,11 @@ def getObject(self, label): def pprint(self, **kwds): print("BasicSymbolMap:") - lines = [repr(label)+" <-> "+obj.name+" (id="+str(id(obj))+")" - for label, obj in self.bySymbol.items()] - print('\n'.join(sorted(lines))) + lines = [ + repr(label) + " <-> " + obj.name + " (id=" + str(id(obj)) + ")" + for label, obj in self.bySymbol.items() + ] + print("\n".join(sorted(lines))) print("") @@ -70,24 +68,26 @@ def pprint(self, **kwds): # _nontuple = (str, int, float) -def indexToString(index): + +def indexToString(index): if index is None: - return '' + return "" # if the input type is a string or an int, then this isn't a tuple! # TODO: Why aren't we just checking for tuple? if isinstance(index, _nontuple): - return "["+str(index)+"]" + return "[" + str(index) + "]" result = "[" - for i in range(0,len(index)): + for i in range(0, len(index)): result += str(index[i]) if i != len(index) - 1: result += "," result += "]" return result + # # a simple utility to determine if a variable name contains an index specification. # in other words, is the reference to a complete variable (e.g., "foo") - which may @@ -95,15 +95,19 @@ def indexToString(index): # or "foo[1,*]". # -def isVariableNameIndexed(variable_name): - left_bracket_count = variable_name.count('[') - right_bracket_count = variable_name.count(']') +def isVariableNameIndexed(variable_name): + left_bracket_count = variable_name.count("[") + right_bracket_count = variable_name.count("]") if (left_bracket_count == 1) and (right_bracket_count == 1): return True elif (left_bracket_count == 1) or (right_bracket_count == 1): - raise ValueError("Illegally formed variable name="+variable_name+"; if indexed, variable names must contain matching left and right brackets") + raise ValueError( + "Illegally formed variable name=" + + variable_name + + "; if indexed, variable names must contain matching left and right brackets" + ) else: return False @@ -116,36 +120,36 @@ def isVariableNameIndexed(variable_name): # if the conversion works! # -def extractVariableNameAndIndex(variable_name): +def extractVariableNameAndIndex(variable_name): if not isVariableNameIndexed(variable_name): raise ValueError( "Non-indexed variable name passed to " - "function extractVariableNameAndIndex()") + "function extractVariableNameAndIndex()" + ) - pieces = variable_name.split('[') + pieces = variable_name.split("[") name = pieces[0].strip() - full_index = pieces[1].rstrip(']') + full_index = pieces[1].rstrip("]") # even nested tuples in pyomo are "flattened" into # one-dimensional tuples. to accomplish flattening # replace all parens in the string with commas and # proceed with the split. - full_index = full_index.replace("(",",").replace(")",",") - indices = full_index.split(',') + full_index = full_index.replace("(", ",").replace(")", ",") + indices = full_index.split(",") return_index = () for index in indices: - # unlikely, but strip white-space from the string. - index=index.strip() + index = index.strip() # if the tuple contains nested tuples, then the nested # tuples have single quotes - "'" characters - around # strings. remove these, as otherwise you have an # illegal index. - index = index.replace("\'","") + index = index.replace("'", "") # if the index is an integer, make it one! transformed_index = None @@ -169,8 +173,8 @@ def extractVariableNameAndIndex(variable_name): # 1 if slices are specified, e.g., [*,1]. # -def extractComponentIndices(component, index_template): +def extractComponentIndices(component, index_template): component_index_dimension = component.dim() # do special handling for the case where the component is @@ -179,11 +183,12 @@ def extractComponentIndices(component, index_template): # commonly, given that the empty string is hard to specify # in the scenario tree input data - a single wildcard character. if component_index_dimension == 0: - if (index_template != '') and (index_template != "*"): - raise RuntimeError( - "Index template=%r specified for scalar object=%s" - % (index_template, component.name)) - return [None] + if (index_template != "") and (index_template != "*"): + raise RuntimeError( + "Index template=%r specified for scalar object=%s" + % (index_template, component.name) + ) + return [None] # from this point on, we're dealing with an indexed component. if index_template == "": @@ -199,40 +204,43 @@ def extractComponentIndices(component, index_template): raise RuntimeError( "The dimension of index template=%s (%s) does match " "the dimension of component=%s (%s)" - % (index_template, - len(index_template), - component.name, - component_index_dimension)) + % ( + index_template, + len(index_template), + component.name, + component_index_dimension, + ) + ) # cache for efficiency - iterator_range = [i for i,match_str in enumerate(index_template) - if match_str != '*'] + iterator_range = [ + i for i, match_str in enumerate(index_template) if match_str != "*" + ] if len(iterator_range) == 0: return list(component) elif len(iterator_range) == component_index_dimension: - if (len(index_template) == 1) and \ - (index_template[0] in component): + if (len(index_template) == 1) and (index_template[0] in component): return index_template elif index_template in component: return [index_template] else: raise ValueError( "The index %s is not valid for component named: %s" - % (str(tuple(index_template)), component.name)) + % (str(tuple(index_template)), component.name) + ) result = [] for index in component: - # if the input index is not a tuple, make it one for processing # purposes. however, return the original index always. if component_index_dimension == 1: - modified_index = (index,) + modified_index = (index,) else: - modified_index = index + modified_index = index - match_found = True # until proven otherwise + match_found = True # until proven otherwise for i in iterator_range: if index_template[i] != modified_index[i]: match_found = False diff --git a/mpisppy/utils/pysp_model/pysp_model.py b/mpisppy/utils/pysp_model/pysp_model.py index d7ffd16f7..ad185b274 100644 --- a/mpisppy/utils/pysp_model/pysp_model.py +++ b/mpisppy/utils/pysp_model/pysp_model.py @@ -15,19 +15,22 @@ logger = logging.getLogger("mpisppy.utils.pysp_model") + def _get_cost_expression(model, cost_variable): return model.find_component(cost_variable[0])[cost_variable[1]] + def _yeild_componentdata_from_indexlist(component, index_list): for index in index_list: if type(index) is tuple: - indices = tuple(idx if idx != '*' else slice(None) for idx in index) + indices = tuple(idx if idx != "*" else slice(None) for idx in index) yield from component.__getitem__(indices) - elif index == '*' or index == '': + elif index == "*" or index == "": yield from component.values() else: yield component.__getitem__(index) + def _get_nonant_list_from_templates(model, templates): nonant_list = [] for name, index_list in templates.items(): @@ -37,17 +40,32 @@ def _get_nonant_list_from_templates(model, templates): nonant_list.append(vardata) elif isinstance(component, pyo.Block): for blockdata in _yeild_componentdata_from_indexlist(component, index_list): - for vardata in blockdata.component_data_objects(ctype=pyo.Var, active=True, descend_into=True): + for vardata in blockdata.component_data_objects( + ctype=pyo.Var, active=True, descend_into=True + ): nonant_list.append(vardata) else: - raise RuntimeError("Cannot extract non-anticipative variables from component {component.name}") + raise RuntimeError( + "Cannot extract non-anticipative variables from component {component.name}" + ) return nonant_list + def _get_nonant_list(model, pysp_node): - return _get_nonant_list_from_templates(model, {**pysp_node.stage._variable_templates, **pysp_node._variable_templates}) + return _get_nonant_list_from_templates( + model, {**pysp_node.stage._variable_templates, **pysp_node._variable_templates} + ) + def _get_derived_nonant_list(model, pysp_node): - return _get_nonant_list_from_templates(model, {**pysp_node.stage._derived_variable_templates, **pysp_node._derived_variable_templates}) + return _get_nonant_list_from_templates( + model, + { + **pysp_node.stage._derived_variable_templates, + **pysp_node._derived_variable_templates, + }, + ) + def _get_nodenames(root_node): root_node._mpisppy_stage = 1 @@ -57,6 +75,7 @@ def _get_nodenames(root_node): _add_next_stage(root_node, nodenames) return nodenames + def _add_next_stage(node, nodenames): for idx, child in enumerate(node.children): child._mpisppy_name = node._mpisppy_name + f"_{idx}" @@ -67,7 +86,7 @@ def _add_next_stage(node, nodenames): class PySPModel: - """A class for instantiating PySP models for use in mpisppy. + """A class for instantiating PySP models for use in mpisppy. Args: model: The reference scenario model. Can be set @@ -114,18 +133,18 @@ class PySPModel: scenario_denouement (fct): A blank scenario_denouement function for use in mpisppy """ - def __init__(self, - model, - scenario_tree, - data_dir=None): - self._scenario_tree_instance_factory = \ - ScenarioTreeInstanceFactory(model, scenario_tree, data=data_dir) - self._pysp_scenario_tree = self._scenario_tree_instance_factory.generate_scenario_tree() + def __init__(self, model, scenario_tree, data_dir=None): + self._scenario_tree_instance_factory = ScenarioTreeInstanceFactory( + model, scenario_tree, data=data_dir + ) + self._pysp_scenario_tree = ( + self._scenario_tree_instance_factory.generate_scenario_tree() + ) ## get the things out of the tree model we need self._all_scenario_names = [s.name for s in self._pysp_scenario_tree.scenarios] - + ## check for more than two stages ## gripe if we see bundles if self._pysp_scenario_tree.bundles: @@ -136,7 +155,8 @@ def __init__(self, def scenario_creator_callback(self, scenario_name, **kwargs): ## fist, get the model out model = self._scenario_tree_instance_factory.construct_scenario_instance( - scenario_name, self._pysp_scenario_tree) + scenario_name, self._pysp_scenario_tree + ) tree_scenario = self._pysp_scenario_tree.get_scenario(scenario_name) @@ -146,40 +166,52 @@ def scenario_creator_callback(self, scenario_name, **kwargs): if node.is_leaf_node(): raise Exception("Unexpected leaf node") - node._mpisppy_cost_expression = _get_cost_expression(model, node._cost_variable) + node._mpisppy_cost_expression = _get_cost_expression( + model, node._cost_variable + ) node._mpisppy_nonant_list = _get_nonant_list(model, node) node._mpisppy_nonant_ef_suppl_list = _get_derived_nonant_list(model, node) ## add the things mpisppy expects to the model model._mpisppy_probability = tree_scenario.probability - model._mpisppy_node_list = [mpisppyScenarioNode( - name=node._mpisppy_name, - cond_prob=node.conditional_probability, - stage=node._mpisppy_stage, - cost_expression=node._mpisppy_cost_expression, - nonant_list=node._mpisppy_nonant_list, - scen_model=None, - nonant_ef_suppl_list=node._mpisppy_nonant_ef_suppl_list, - parent_name=node._mpisppy_parent_name, - ) - for node in non_leaf_nodes - ] - - for _ in model.component_data_objects(pyo.Objective, active=True, descend_into=True): + model._mpisppy_node_list = [ + mpisppyScenarioNode( + name=node._mpisppy_name, + cond_prob=node.conditional_probability, + stage=node._mpisppy_stage, + cost_expression=node._mpisppy_cost_expression, + nonant_list=node._mpisppy_nonant_list, + scen_model=None, + nonant_ef_suppl_list=node._mpisppy_nonant_ef_suppl_list, + parent_name=node._mpisppy_parent_name, + ) + for node in non_leaf_nodes + ] + + for _ in model.component_data_objects( + pyo.Objective, active=True, descend_into=True + ): break - else: # no break - print("Provided model has no objective; using PySP auto-generated objective") + else: # no break + print( + "Provided model has no objective; using PySP auto-generated objective" + ) # attach PySP objective leaf_node = tree_scenario.node_list[-1] - leaf_node._mpisppy_cost_expression = _get_cost_expression(model, leaf_node._cost_variable) + leaf_node._mpisppy_cost_expression = _get_cost_expression( + model, leaf_node._cost_variable + ) if hasattr(model, "_PySPModel_objective"): raise RuntimeError("provided model has attribute _PySPModel_objective") - model._PySPModel_objective = pyo.Objective(expr=\ - pyo.quicksum(node._mpisppy_cost_expression for node in tree_scenario.node_list)) + model._PySPModel_objective = pyo.Objective( + expr=pyo.quicksum( + node._mpisppy_cost_expression for node in tree_scenario.node_list + ) + ) return model @@ -194,7 +226,7 @@ def close(self): @property def scenario_creator(self): - return lambda *args,**kwargs : self.scenario_creator_callback(*args,**kwargs) + return lambda *args, **kwargs: self.scenario_creator_callback(*args, **kwargs) @property def all_scenario_names(self): @@ -206,4 +238,4 @@ def all_nodenames(self): @property def scenario_denouement(self): - return lambda *args,**kwargs: None + return lambda *args, **kwargs: None diff --git a/mpisppy/utils/pysp_model/tests/test_archivereader.py b/mpisppy/utils/pysp_model/tests/test_archivereader.py index c4dafc03e..dbacf20fd 100644 --- a/mpisppy/utils/pysp_model/tests/test_archivereader.py +++ b/mpisppy/utils/pysp_model/tests/test_archivereader.py @@ -20,76 +20,75 @@ import pyomo.common.unittest as unittest -from mpisppy.utils.pysp_model.archivereader import \ - ArchiveReaderFactory, DirArchiveReader, FileArchiveReader, \ - TarArchiveReader, ZipArchiveReader, BZ2FileArchiveReader, \ - GzipFileArchiveReader, tarfile_available, zipfile_available, \ - gzip_available, bz2_available +from mpisppy.utils.pysp_model.archivereader import ( + ArchiveReaderFactory, + DirArchiveReader, + FileArchiveReader, + TarArchiveReader, + ZipArchiveReader, + BZ2FileArchiveReader, + GzipFileArchiveReader, + tarfile_available, + zipfile_available, + gzip_available, + bz2_available, +) currdir = os.path.dirname(os.path.abspath(__file__)) -testdatadir = os.path.join(currdir, 'archivereader') +testdatadir = os.path.join(currdir, "archivereader") class TestArchiveReaderFactory(unittest.TestCase): - def test_ArchiveReaderFactory_dir(self): - archive = ArchiveReaderFactory( - os.path.join(testdatadir, 'archive_flat')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "archive_flat")) self.assertTrue(isinstance(archive, DirArchiveReader)) @unittest.skipUnless(tarfile_available, "tarfile support is disabled") def test_ArchiveReaderFactory_tar(self): - archive = ArchiveReaderFactory( - os.path.join(testdatadir, 'archive_flat.tar')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "archive_flat.tar")) self.assertTrue(isinstance(archive, TarArchiveReader)) @unittest.skipUnless(tarfile_available, "tarfile support is disabled") def test_ArchiveReaderFactory_targz(self): - archive = ArchiveReaderFactory( - os.path.join(testdatadir, 'archive_flat.tar.gz')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "archive_flat.tar.gz")) self.assertTrue(isinstance(archive, TarArchiveReader)) @unittest.skipUnless(tarfile_available, "tarfile support is disabled") def test_ArchiveReaderFactory_tgz(self): - archive = ArchiveReaderFactory( - os.path.join(testdatadir, 'archive_flat.tgz')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "archive_flat.tgz")) self.assertTrue(isinstance(archive, TarArchiveReader)) @unittest.skipUnless(zipfile_available, "zipfile support is disabled") def test_ArchiveReaderFactory_zip(self): - archive = ArchiveReaderFactory( - os.path.join(testdatadir, 'archive_flat.zip')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "archive_flat.zip")) self.assertTrue(isinstance(archive, ZipArchiveReader)) def test_ArchiveReaderFactory_file(self): - archive = ArchiveReaderFactory(os.path.join(testdatadir, 'fileC.txt')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "fileC.txt")) self.assertTrue(isinstance(archive, FileArchiveReader)) @unittest.skipUnless(gzip_available, "gzip support is disabled") def test_ArchiveReaderFactory_gzip(self): - archive = ArchiveReaderFactory( - os.path.join(testdatadir, 'fileC.txt.gz')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "fileC.txt.gz")) self.assertTrue(isinstance(archive, GzipFileArchiveReader)) @unittest.skipUnless(bz2_available, "bz2 support is disabled") def test_ArchiveReaderFactory_bzip2(self): - archive = ArchiveReaderFactory( - os.path.join(testdatadir, 'fileC.txt.bz2')) + archive = ArchiveReaderFactory(os.path.join(testdatadir, "fileC.txt.bz2")) self.assertTrue(isinstance(archive, BZ2FileArchiveReader)) def test_ArchiveReaderFactory_nonexist(self): try: - ArchiveReaderFactory( - os.path.join(testdatadir, '_does_not_exist_')) + ArchiveReaderFactory(os.path.join(testdatadir, "_does_not_exist_")) except IOError: pass else: self.fail( - "ArchiveReaderFactory should raise exception with nonexistent archive") + "ArchiveReaderFactory should raise exception with nonexistent archive" + ) class _TestArchiveReaderBaseNested(object): - archive_class = None archive_name = None archive_class_kwds = {} @@ -103,58 +102,64 @@ def tearDown(self): def test_default(self): a = self._a = self.archive_class( - os.path.join(testdatadir, self.archive_name), - **self.archive_class_kwds) - tmpdir = a.extract('directory', recursive=True) + os.path.join(testdatadir, self.archive_name), **self.archive_class_kwds + ) + tmpdir = a.extract("directory", recursive=True) self.assertEqual(os.path.exists(tmpdir), True) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), True) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), True) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), True) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), True) a.clear_extractions() self.assertEqual(os.path.exists(tmpdir), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), False) - tmpdir = a.extract('directory', recursive=True) + tmpdir = a.extract("directory", recursive=True) self.assertEqual(os.path.exists(tmpdir), True) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), True) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), True) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), True) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), True) a.clear_extractions() self.assertEqual(os.path.exists(tmpdir), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), False) tmpnames = a.extractall() for name in tmpnames: self.assertEqual(os.path.exists(name), True) - names = [a.normalize_name(name).replace( - a.normalize_name(a._workdir) + '/', '') for name in tmpnames] + names = [ + a.normalize_name(name).replace(a.normalize_name(a._workdir) + "/", "") + for name in tmpnames + ] self.assertEqual( sorted(names), - sorted(['directory', posixpath.join('directory', 'fileA.txt'), - posixpath.join('directory', 'fileB.txt')])) + sorted( + [ + "directory", + posixpath.join("directory", "fileA.txt"), + posixpath.join("directory", "fileB.txt"), + ] + ), + ) a.clear_extractions() for name in tmpnames: self.assertEqual(os.path.exists(name), False) self.assertEqual( sorted(a.getnames()), - sorted(['directory', posixpath.join('directory', 'fileA.txt'), - posixpath.join('directory', 'fileB.txt')])) - f = a.open(posixpath.join('directory', 'fileA.txt')) + sorted( + [ + "directory", + posixpath.join("directory", "fileA.txt"), + posixpath.join("directory", "fileB.txt"), + ] + ), + ) + f = a.open(posixpath.join("directory", "fileA.txt")) try: - self.assertEqual(f.read().strip().decode(), 'this is fileA') + self.assertEqual(f.read().strip().decode(), "this is fileA") finally: f.close() - f = a.open(posixpath.join('directory', 'fileB.txt')) + f = a.open(posixpath.join("directory", "fileB.txt")) try: - self.assertEqual(f.read().strip().decode(), 'this is fileB') + self.assertEqual(f.read().strip().decode(), "this is fileB") finally: f.close() a.close() @@ -163,77 +168,87 @@ def test_maxdepth0(self): a = self._a = self.archive_class( os.path.join(testdatadir, self.archive_name), maxdepth=0, - **self.archive_class_kwds) - tmpdir = a.extract('directory', recursive=True) + **self.archive_class_kwds, + ) + tmpdir = a.extract("directory", recursive=True) self.assertEqual(os.path.exists(tmpdir), True) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), False) a.clear_extractions() self.assertEqual(os.path.exists(tmpdir), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), False) tmpnames = a.extractall() for name in tmpnames: self.assertEqual(os.path.exists(name), True) - names = [a.normalize_name(name).replace( - a.normalize_name(a._workdir) + '/', '') for name in tmpnames] - self.assertEqual(sorted(names), sorted(['directory'])) + names = [ + a.normalize_name(name).replace(a.normalize_name(a._workdir) + "/", "") + for name in tmpnames + ] + self.assertEqual(sorted(names), sorted(["directory"])) a.clear_extractions() for name in tmpnames: self.assertEqual(os.path.exists(name), False) - self.assertEqual(sorted(a.getnames()), sorted(['directory'])) + self.assertEqual(sorted(a.getnames()), sorted(["directory"])) a.close() def test_maxdepth1(self): a = self._a = self.archive_class( os.path.join(testdatadir, self.archive_name), maxdepth=1, - **self.archive_class_kwds) + **self.archive_class_kwds, + ) - tmpdir = a.extract('directory', recursive=True) + tmpdir = a.extract("directory", recursive=True) self.assertEqual(os.path.exists(tmpdir), True) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), True) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), True) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), True) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), True) a.clear_extractions() self.assertEqual(os.path.exists(tmpdir), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileA.txt')), False) - self.assertEqual( - os.path.exists(os.path.join(tmpdir, 'fileB.txt')), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileA.txt")), False) + self.assertEqual(os.path.exists(os.path.join(tmpdir, "fileB.txt")), False) tmpnames = a.extractall() for name in tmpnames: self.assertEqual(os.path.exists(name), True) - names = [a.normalize_name(name).replace( - a.normalize_name(a._workdir) + '/', '') for name in tmpnames] + names = [ + a.normalize_name(name).replace(a.normalize_name(a._workdir) + "/", "") + for name in tmpnames + ] self.assertEqual( sorted(names), - sorted(['directory', posixpath.join('directory', 'fileA.txt'), - posixpath.join('directory', 'fileB.txt')])) + sorted( + [ + "directory", + posixpath.join("directory", "fileA.txt"), + posixpath.join("directory", "fileB.txt"), + ] + ), + ) a.clear_extractions() for name in tmpnames: self.assertEqual(os.path.exists(name), False) self.assertEqual( sorted(a.getnames()), - sorted(['directory', posixpath.join('directory', 'fileA.txt'), - posixpath.join('directory', 'fileB.txt')])) - f = a.open(posixpath.join('directory', 'fileA.txt')) + sorted( + [ + "directory", + posixpath.join("directory", "fileA.txt"), + posixpath.join("directory", "fileB.txt"), + ] + ), + ) + f = a.open(posixpath.join("directory", "fileA.txt")) try: - self.assertEqual(f.read().strip().decode(), 'this is fileA') + self.assertEqual(f.read().strip().decode(), "this is fileA") finally: f.close() - f = a.open(posixpath.join('directory', 'fileB.txt')) + f = a.open(posixpath.join("directory", "fileB.txt")) try: - self.assertEqual(f.read().strip().decode(), 'this is fileB') + self.assertEqual(f.read().strip().decode(), "this is fileB") finally: f.close() a.close() @@ -241,35 +256,39 @@ def test_maxdepth1(self): def test_subdir_maxdepth0(self): a = self._a = self.archive_class( os.path.join(testdatadir, self.archive_name), - subdir='directory', + subdir="directory", maxdepth=0, - **self.archive_class_kwds) + **self.archive_class_kwds, + ) tmpnames = a.extractall() for name in tmpnames: self.assertEqual(os.path.exists(name), True) - names = [a.normalize_name(name).replace( - posixpath.join(a.normalize_name(a._workdir), a._subdir), '') - for name in tmpnames] - self.assertEqual(sorted(names), sorted(['fileA.txt', 'fileB.txt'])) + names = [ + a.normalize_name(name).replace( + posixpath.join(a.normalize_name(a._workdir), a._subdir), "" + ) + for name in tmpnames + ] + self.assertEqual(sorted(names), sorted(["fileA.txt", "fileB.txt"])) a.clear_extractions() for name in tmpnames: self.assertEqual(os.path.exists(name), False) a.close() a = self._a = self.archive_class( os.path.join(testdatadir, self.archive_name), - subdir='directory', + subdir="directory", maxdepth=0, - **self.archive_class_kwds) - self.assertEqual( - sorted(a.getnames()), sorted(['fileA.txt', 'fileB.txt'])) - f = a.open('fileA.txt') + **self.archive_class_kwds, + ) + self.assertEqual(sorted(a.getnames()), sorted(["fileA.txt", "fileB.txt"])) + f = a.open("fileA.txt") try: - self.assertEqual(f.read().strip().decode(), 'this is fileA') + self.assertEqual(f.read().strip().decode(), "this is fileA") finally: f.close() - f = a.open('fileB.txt') + f = a.open("fileB.txt") try: - self.assertEqual(f.read().strip().decode(), 'this is fileB') + self.assertEqual(f.read().strip().decode(), "this is fileB") finally: f.close() a.close() @@ -277,91 +296,85 @@ def test_subdir_maxdepth0(self): def test_subdir_maxdepth1(self): a = self._a = self.archive_class( os.path.join(testdatadir, self.archive_name), - subdir='directory', + subdir="directory", maxdepth=1, - **self.archive_class_kwds) + **self.archive_class_kwds, + ) tmpnames = a.extractall() for name in tmpnames: self.assertEqual(os.path.exists(name), True) - names = [a.normalize_name(name).replace( - posixpath.join(a.normalize_name(a._workdir), a._subdir), '') - for name in tmpnames] - self.assertEqual(sorted(names), sorted(['fileA.txt', 'fileB.txt'])) + names = [ + a.normalize_name(name).replace( + posixpath.join(a.normalize_name(a._workdir), a._subdir), "" + ) + for name in tmpnames + ] + self.assertEqual(sorted(names), sorted(["fileA.txt", "fileB.txt"])) a.clear_extractions() for name in tmpnames: self.assertEqual(os.path.exists(name), False) - self.assertEqual( - sorted(a.getnames()), sorted(['fileA.txt', 'fileB.txt'])) - f = a.open('fileA.txt') + self.assertEqual(sorted(a.getnames()), sorted(["fileA.txt", "fileB.txt"])) + f = a.open("fileA.txt") try: - self.assertEqual(f.read().strip().decode(), 'this is fileA') + self.assertEqual(f.read().strip().decode(), "this is fileA") finally: f.close() - f = a.open('fileB.txt') + f = a.open("fileB.txt") try: - self.assertEqual(f.read().strip().decode(), 'this is fileB') + self.assertEqual(f.read().strip().decode(), "this is fileB") finally: f.close() a.close() def dir_svn_junk_filter(x): - return (fnmatch.fnmatch(x,"*.junk") or \ - fnmatch.fnmatch(x,"*.svn*")) + return fnmatch.fnmatch(x, "*.junk") or fnmatch.fnmatch(x, "*.svn*") class TestDirArchiveReader(_TestArchiveReaderBaseNested, unittest.TestCase): - archive_class = DirArchiveReader - archive_name = 'archive_directory' - archive_class_kwds = {'filter': dir_svn_junk_filter} + archive_name = "archive_directory" + archive_class_kwds = {"filter": dir_svn_junk_filter} class TestDirArchiveReaderWin(_TestArchiveReaderBaseNested, unittest.TestCase): - archive_class = DirArchiveReader - archive_name = 'win_archive_directory' - archive_class_kwds = {'filter': dir_svn_junk_filter} + archive_name = "win_archive_directory" + archive_class_kwds = {"filter": dir_svn_junk_filter} @unittest.skipUnless(tarfile_available, "tarfile support is disabled") class TestTarArchiveReader1(_TestArchiveReaderBaseNested, unittest.TestCase): - archive_class = TarArchiveReader - archive_name = 'archive_directory.tgz' + archive_name = "archive_directory.tgz" @unittest.skipUnless(tarfile_available, "tarfile support is disabled") class TestTarArchiveReader2(_TestArchiveReaderBaseNested, unittest.TestCase): - archive_class = TarArchiveReader - archive_name = 'archive_directory.tar.gz' + archive_name = "archive_directory.tar.gz" @unittest.skipUnless(tarfile_available, "tarfile support is disabled") class TestTarArchiveReader3(_TestArchiveReaderBaseNested, unittest.TestCase): - archive_class = TarArchiveReader - archive_name = 'archive_directory.tar' + archive_name = "archive_directory.tar" @unittest.skipUnless(zipfile_available, "zipfile support is disabled") class TestZipArchiveReader(_TestArchiveReaderBaseNested, unittest.TestCase): - archive_class = ZipArchiveReader - archive_name = 'archive_directory.zip' + archive_name = "archive_directory.zip" @unittest.skipUnless(zipfile_available, "zipfile support is disabled") class TestZipArchiveReaderWin(_TestArchiveReaderBaseNested, unittest.TestCase): - archive_class = ZipArchiveReader - archive_name = 'win_archive_directory.zip' + archive_name = "win_archive_directory.zip" class _TestFileArchiveReaderBase(object): - archive_class = None archive_name = None @@ -373,28 +386,27 @@ def tearDown(self): self._a.close() def test1(self): - a = self._a = self.archive_class( - os.path.join(testdatadir, self.archive_name)) - f = a.open('fileC.txt') + a = self._a = self.archive_class(os.path.join(testdatadir, self.archive_name)) + f = a.open("fileC.txt") try: - self.assertEqual(f.read().strip().decode(), 'this is fileC') + self.assertEqual(f.read().strip().decode(), "this is fileC") finally: f.close() f = a.open() try: - self.assertEqual(f.read().strip().decode(), 'this is fileC') + self.assertEqual(f.read().strip().decode(), "this is fileC") finally: f.close() dst = a.extract() self.assertEqual(os.path.exists(dst), True) - with open(dst, 'rb') as f: - self.assertEqual(f.read().strip().decode(), 'this is fileC') + with open(dst, "rb") as f: + self.assertEqual(f.read().strip().decode(), "this is fileC") a.clear_extractions() self.assertEqual(os.path.exists(dst), False) - dst = a.extract('fileC.txt') + dst = a.extract("fileC.txt") self.assertEqual(os.path.exists(dst), True) - with open(dst, 'rb') as f: - self.assertEqual(f.read().strip().decode(), 'this is fileC') + with open(dst, "rb") as f: + self.assertEqual(f.read().strip().decode(), "this is fileC") a.clear_extractions() self.assertEqual(os.path.exists(dst), False) a.close() @@ -402,22 +414,19 @@ def test1(self): @unittest.skipUnless(gzip_available, "gzip support is disabled") class TestGzipFileArchiveReader(_TestFileArchiveReaderBase, unittest.TestCase): - archive_class = GzipFileArchiveReader - archive_name = 'fileC.txt.gz' + archive_name = "fileC.txt.gz" @unittest.skipUnless(bz2_available, "bz2 support is disabled") class TestBZ2FileArchiveReader(_TestFileArchiveReaderBase, unittest.TestCase): - archive_class = BZ2FileArchiveReader - archive_name = 'fileC.txt.bz2' + archive_name = "fileC.txt.bz2" class TestFileArchiveReader(_TestFileArchiveReaderBase, unittest.TestCase): - archive_class = FileArchiveReader - archive_name = 'fileC.txt' + archive_name = "fileC.txt" if __name__ == "__main__": diff --git a/mpisppy/utils/pysp_model/tests/test_instancefactory.py b/mpisppy/utils/pysp_model/tests/test_instancefactory.py index d678662e5..b39616ee2 100644 --- a/mpisppy/utils/pysp_model/tests/test_instancefactory.py +++ b/mpisppy/utils/pysp_model/tests/test_instancefactory.py @@ -28,12 +28,11 @@ from pyomo.common.dependencies import networkx_available as has_networkx from pyomo.common.dependencies import yaml_available -from mpisppy.utils.pysp_model.instance_factory import \ - ScenarioTreeInstanceFactory -from mpisppy.utils.pysp_model.tree_structure_model import \ - CreateAbstractScenarioTreeModel -from mpisppy.utils.pysp_model.tree_structure import \ - ScenarioTree +from mpisppy.utils.pysp_model.instance_factory import ScenarioTreeInstanceFactory +from mpisppy.utils.pysp_model.tree_structure_model import ( + CreateAbstractScenarioTreeModel, +) +from mpisppy.utils.pysp_model.tree_structure import ScenarioTree from pyomo.common.fileutils import import_file @@ -42,14 +41,16 @@ testdatadir = join(thisdir, "testdata") reference_test_model = None + + def setUpModule(): global reference_test_model reference_test_model = import_file( - join(testdatadir, "reference_test_model.py")).model + join(testdatadir, "reference_test_model.py") + ).model class Test(unittest.TestCase): - @classmethod def setUpClass(cls): pass @@ -69,86 +70,95 @@ def setUp(self): del sys.modules["both_callbacks"] def _get_testfname_prefix(self): - class_name, test_name = self.id().split('.')[-2:] - return join(thisdir, class_name+"."+test_name) + class_name, test_name = self.id().split(".")[-2:] + return join(thisdir, class_name + "." + test_name) def _check_factory(self, factory): scenario_tree = factory.generate_scenario_tree() - instances = factory.construct_instances_for_scenario_tree(scenario_tree, - verbose=True) + instances = factory.construct_instances_for_scenario_tree( + scenario_tree, verbose=True + ) self.assertEqual(len(instances), 3) self.assertEqual(instances["s1"].p(), 1) self.assertEqual(instances["s2"].p(), 2) self.assertEqual(instances["s3"].p(), 3) - instances = factory.construct_instances_for_scenario_tree(scenario_tree, - compile_scenario_instances=True, - verbose=True) + instances = factory.construct_instances_for_scenario_tree( + scenario_tree, compile_scenario_instances=True, verbose=True + ) self.assertEqual(len(instances), 3) self.assertEqual(instances["s1"].p(), 1) self.assertEqual(instances["s2"].p(), 2) self.assertEqual(instances["s3"].p(), 3) - self.assertEqual(factory.construct_scenario_instance("s1", scenario_tree, verbose=True).p(), 1) + self.assertEqual( + factory.construct_scenario_instance("s1", scenario_tree, verbose=True).p(), + 1, + ) with self.assertRaises(ValueError): factory.construct_scenario_instance("s0", scenario_tree, verbose=True) with self.assertRaises(ValueError): - scenario_tree = factory.generate_scenario_tree(random_bundles=1000, verbose=True) + scenario_tree = factory.generate_scenario_tree( + random_bundles=1000, verbose=True + ) with self.assertRaises(ValueError): - scenario_tree = factory.generate_scenario_tree(random_bundles=2, bundles={}, verbose=True) + scenario_tree = factory.generate_scenario_tree( + random_bundles=2, bundles={}, verbose=True + ) scenario_tree = factory.generate_scenario_tree(random_bundles=2, verbose=True) self.assertEqual(scenario_tree.contains_bundles(), True) self.assertEqual(len(scenario_tree._scenario_bundles), 2) - scenario_tree = factory.generate_scenario_tree(downsample_fraction=0.1, verbose=True) - scenario_tree = factory.generate_scenario_tree(bundles={'b1': ['s1'], - 'b2': ['s2'], - 'b3': ['s3']}, - verbose=True) + scenario_tree = factory.generate_scenario_tree( + downsample_fraction=0.1, verbose=True + ) + scenario_tree = factory.generate_scenario_tree( + bundles={"b1": ["s1"], "b2": ["s2"], "b3": ["s3"]}, verbose=True + ) self.assertEqual(scenario_tree.contains_bundles(), True) self.assertEqual(len(scenario_tree.bundles), 3) if factory._scenario_tree is not None: self.assertTrue(factory._scenario_tree_model is None) with self.assertRaises(ValueError): scenario_tree = factory.generate_scenario_tree( - bundles=join(testdatadir, "bundles.dat"), - verbose=True) + bundles=join(testdatadir, "bundles.dat"), verbose=True + ) else: scenario_tree = factory.generate_scenario_tree( - bundles=join(testdatadir, "bundles.dat"), - verbose=True) + bundles=join(testdatadir, "bundles.dat"), verbose=True + ) self.assertEqual(scenario_tree.contains_bundles(), True) self.assertEqual(len(scenario_tree.bundles), 3) scenario_tree = factory.generate_scenario_tree( - bundles=join(testdatadir, "bundles"), - verbose=True) + bundles=join(testdatadir, "bundles"), verbose=True + ) self.assertEqual(scenario_tree.contains_bundles(), True) self.assertEqual(len(scenario_tree.bundles), 3) - if (factory.data_directory() is not None) and \ - exists(join(factory.data_directory(), "bundles.dat")): + if (factory.data_directory() is not None) and exists( + join(factory.data_directory(), "bundles.dat") + ): scenario_tree = factory.generate_scenario_tree( - bundles="bundles.dat", - verbose=True) + bundles="bundles.dat", verbose=True + ) self.assertEqual(scenario_tree.contains_bundles(), True) self.assertEqual(len(scenario_tree.bundles), 3) scenario_tree = factory.generate_scenario_tree( - bundles="bundles", - verbose=True) + bundles="bundles", verbose=True + ) self.assertEqual(scenario_tree.contains_bundles(), True) self.assertEqual(len(scenario_tree.bundles), 3) with self.assertRaises(ValueError): scenario_tree = factory.generate_scenario_tree( - bundles="bundles.notexists", - verbose=True) + bundles="bundles.notexists", verbose=True + ) # model: name of .py file with model # scenario_tree: name of .dat file def test_init1(self): self.assertTrue("reference_test_model" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "reference_test_model.py"), - scenario_tree=join(testdatadir, - "reference_test_scenario_tree.dat")) as factory: + model=join(testdatadir, "reference_test_model.py"), + scenario_tree=join(testdatadir, "reference_test_scenario_tree.dat"), + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -160,8 +170,8 @@ def test_init1(self): def test_init1_default(self): self.assertTrue("ReferenceModel" not in sys.modules) with ScenarioTreeInstanceFactory( - model=testdatadir, - scenario_tree=testdatadir) as factory: + model=testdatadir, scenario_tree=testdatadir + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -173,10 +183,9 @@ def test_init1_default(self): def test_init2(self): self.assertTrue("reference_test_model" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "archive_test.tgz,reference_test_model.py"), - scenario_tree=join(testdatadir, - "reference_test_scenario_tree.dat")) as factory: + model=join(testdatadir, "archive_test.tgz,reference_test_model.py"), + scenario_tree=join(testdatadir, "reference_test_scenario_tree.dat"), + ) as factory: self.assertEqual(len(factory._archives), 1) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -188,8 +197,8 @@ def test_init2(self): def test_init2_default(self): self.assertTrue("ReferenceModel" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, "archive_test.tgz,"), - scenario_tree=testdatadir) as factory: + model=join(testdatadir, "archive_test.tgz,"), scenario_tree=testdatadir + ) as factory: self.assertEqual(len(factory._archives), 1) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -201,10 +210,11 @@ def test_init2_default(self): def test_init3(self): self.assertTrue("reference_test_model" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "reference_test_model.py"), - scenario_tree=join(testdatadir, - "archive_test.tgz,reference_test_scenario_tree.dat")) as factory: + model=join(testdatadir, "reference_test_model.py"), + scenario_tree=join( + testdatadir, "archive_test.tgz,reference_test_scenario_tree.dat" + ), + ) as factory: self.assertEqual(len(factory._archives), 1) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -216,9 +226,8 @@ def test_init3(self): def test_init3_default(self): self.assertTrue("ReferenceModel" not in sys.modules) with ScenarioTreeInstanceFactory( - model=testdatadir, - scenario_tree=join(testdatadir, - "archive_test.tgz")) as factory: + model=testdatadir, scenario_tree=join(testdatadir, "archive_test.tgz") + ) as factory: self.assertEqual(len(factory._archives), 1) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -229,14 +238,14 @@ def test_init3_default(self): # scenario_tree: name of copy of archive with .dat file def test_init4(self): self.assertTrue("reference_test_model" not in sys.modules) - archive_copy = self._get_testfname_prefix()+".archive_copy.tgz" - shutil.copyfile(join(testdatadir, "archive_test.tgz"), - archive_copy) + archive_copy = self._get_testfname_prefix() + ".archive_copy.tgz" + shutil.copyfile(join(testdatadir, "archive_test.tgz"), archive_copy) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "archive_test.tgz,reference_test_model.py"), - scenario_tree=join(testdatadir, - archive_copy+",reference_test_scenario_tree.dat")) as factory: + model=join(testdatadir, "archive_test.tgz,reference_test_model.py"), + scenario_tree=join( + testdatadir, archive_copy + ",reference_test_scenario_tree.dat" + ), + ) as factory: self.assertEqual(len(factory._archives), 2) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -249,13 +258,12 @@ def test_init4(self): # scenario_tree: name of copy of archive with ScenarioStructure.dat file def test_init4_default(self): self.assertTrue("ReferenceModel" not in sys.modules) - archive_copy = self._get_testfname_prefix()+".archive_copy.tgz" - shutil.copyfile(join(testdatadir, "archive_test.tgz"), - archive_copy) + archive_copy = self._get_testfname_prefix() + ".archive_copy.tgz" + shutil.copyfile(join(testdatadir, "archive_test.tgz"), archive_copy) with ScenarioTreeInstanceFactory( - model=join(testdatadir, "archive_test.tgz,"), - scenario_tree=join(testdatadir, - archive_copy+",")) as factory: + model=join(testdatadir, "archive_test.tgz,"), + scenario_tree=join(testdatadir, archive_copy + ","), + ) as factory: self.assertEqual(len(factory._archives), 2) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -269,10 +277,11 @@ def test_init4_default(self): def test_init5(self): self.assertTrue("reference_test_model" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "archive_test.tgz,reference_test_model.py"), - scenario_tree=join(testdatadir, - "archive_test.tgz,reference_test_scenario_tree.dat")) as factory: + model=join(testdatadir, "archive_test.tgz,reference_test_model.py"), + scenario_tree=join( + testdatadir, "archive_test.tgz,reference_test_scenario_tree.dat" + ), + ) as factory: self.assertEqual(len(factory._archives), 1) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -285,8 +294,9 @@ def test_init5(self): def test_init5_default(self): self.assertTrue("ReferenceModel" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, "archive_test.tgz,"), - scenario_tree=join(testdatadir, "archive_test.tgz,")) as factory: + model=join(testdatadir, "archive_test.tgz,"), + scenario_tree=join(testdatadir, "archive_test.tgz,"), + ) as factory: self.assertEqual(len(factory._archives), 1) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -298,39 +308,42 @@ def test_init5_default(self): # scenario_tree: Pyomo scenario tree model def test_init6(self): self.assertTrue("reference_test_model" not in sys.modules) - scenario_tree_model = CreateAbstractScenarioTreeModel().\ - create_instance( - join(testdatadir, "reference_test_scenario_tree.dat")) + scenario_tree_model = CreateAbstractScenarioTreeModel().create_instance( + join(testdatadir, "reference_test_scenario_tree.dat") + ) with ScenarioTreeInstanceFactory( - model=join(testdatadir, "reference_test_model.py"), - scenario_tree=scenario_tree_model) as factory: + model=join(testdatadir, "reference_test_model.py"), + scenario_tree=scenario_tree_model, + ) as factory: self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is None) self._check_factory(factory) with self.assertRaises(TypeError): with ScenarioTreeInstanceFactory( - model=join(testdatadir, "reference_test_model.py"), - scenario_tree=int): + model=join(testdatadir, "reference_test_model.py"), scenario_tree=int + ): pass with self.assertRaises(ValueError): with ScenarioTreeInstanceFactory( - model=join(testdatadir, "reference_test_model.py"), - scenario_tree=None): + model=join(testdatadir, "reference_test_model.py"), scenario_tree=None + ): pass with self.assertRaises(TypeError): with ScenarioTreeInstanceFactory( - model=None, - scenario_tree=scenario_tree_model): + model=None, scenario_tree=scenario_tree_model + ): pass with self.assertRaises(IOError): with ScenarioTreeInstanceFactory( - model=join(testdatadir, "reference_test_model_does_not_exist.py"), - scenario_tree=scenario_tree_model): + model=join(testdatadir, "reference_test_model_does_not_exist.py"), + scenario_tree=scenario_tree_model, + ): pass with self.assertRaises(ValueError): with ScenarioTreeInstanceFactory( - model=join(testdatadir, "reference_test_model.py"), - scenario_tree=CreateAbstractScenarioTreeModel()): + model=join(testdatadir, "reference_test_model.py"), + scenario_tree=CreateAbstractScenarioTreeModel(), + ): pass self.assertEqual(len(factory._archives), 0) self.assertTrue("reference_test_model" in sys.modules) @@ -339,26 +352,27 @@ def test_init6(self): # scenario_tree: Pyomo scenario tree model def test_init7(self): self.assertTrue("reference_test_model" not in sys.modules) - scenario_tree_model = CreateAbstractScenarioTreeModel().\ - create_instance( - join(testdatadir, "reference_test_scenario_tree.dat")) + scenario_tree_model = CreateAbstractScenarioTreeModel().create_instance( + join(testdatadir, "reference_test_scenario_tree.dat") + ) with self.assertRaises(ValueError): with ScenarioTreeInstanceFactory( - model=reference_test_model, - scenario_tree=scenario_tree_model) as factory: + model=reference_test_model, scenario_tree=scenario_tree_model + ) as factory: pass with ScenarioTreeInstanceFactory( - model=reference_test_model, - scenario_tree=scenario_tree_model, - data=testdatadir) as factory: + model=reference_test_model, + scenario_tree=scenario_tree_model, + data=testdatadir, + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is None) self._check_factory(factory) self.assertEqual(factory._closed, True) with ScenarioTreeInstanceFactory( - model=reference_test_model, - scenario_tree=join(testdatadir, - "reference_test_scenario_tree.dat")) as factory: + model=reference_test_model, + scenario_tree=join(testdatadir, "reference_test_scenario_tree.dat"), + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is not None) self._check_factory(factory) @@ -370,10 +384,10 @@ def test_init8(self): self.assertTrue("reference_test_model" not in sys.modules) self.assertTrue("reference_test_scenario_tree_model" not in sys.modules) with ScenarioTreeInstanceFactory( - model=reference_test_model, - scenario_tree=join(testdatadir, - "reference_test_scenario_tree_model.py"), - data=testdatadir) as factory: + model=reference_test_model, + scenario_tree=join(testdatadir, "reference_test_scenario_tree_model.py"), + data=testdatadir, + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is not None) self.assertTrue(factory._scenario_tree_module is not None) @@ -388,10 +402,9 @@ def test_init8(self): def test_init9(self): self.assertTrue("reference_test_model_with_callback" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "reference_test_model_with_callback.py"), - scenario_tree=join(testdatadir, - "reference_test_scenario_tree.dat")) as factory: + model=join(testdatadir, "reference_test_model_with_callback.py"), + scenario_tree=join(testdatadir, "reference_test_scenario_tree.dat"), + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -405,15 +418,14 @@ def test_init9(self): @unittest.skipIf(not yaml_available, "PyYAML is not available") def test_init10(self): with ScenarioTreeInstanceFactory( - model=testdatadir, - scenario_tree=join(testdatadir, - "reference_test_scenario_tree.dat"), - data=join(testdatadir, "yaml_data")) as factory: + model=testdatadir, + scenario_tree=join(testdatadir, "reference_test_scenario_tree.dat"), + data=join(testdatadir, "yaml_data"), + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) - self.assertTrue(factory.data_directory(), - join(testdatadir, "yaml_data")) + self.assertTrue(factory.data_directory(), join(testdatadir, "yaml_data")) self._check_factory(factory) self.assertEqual(len(factory._archives), 0) @@ -422,14 +434,15 @@ def test_init10(self): # data: name of directory with .dat files def test_init11(self): self.assertTrue("reference_test_model" not in sys.modules) - scenario_tree_model = CreateAbstractScenarioTreeModel().\ - create_instance( - join(testdatadir, "reference_test_scenario_tree.dat")) + scenario_tree_model = CreateAbstractScenarioTreeModel().create_instance( + join(testdatadir, "reference_test_scenario_tree.dat") + ) scenario_tree_model.ScenarioBasedData = False with ScenarioTreeInstanceFactory( - model=reference_test_model, - scenario_tree=scenario_tree_model, - data=testdatadir) as factory: + model=reference_test_model, + scenario_tree=scenario_tree_model, + data=testdatadir, + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is None) self._check_factory(factory) @@ -440,9 +453,10 @@ def test_init11(self): # scenario_tree: Pyomo scenario tree model def test_init12(self): self.assertTrue("reference_test_model" not in sys.modules) - scenario_tree_model = CreateAbstractScenarioTreeModel().\ - create_instance( - join(testdatadir, "reference_test_scenario_tree.dat")) + scenario_tree_model = CreateAbstractScenarioTreeModel().create_instance( + join(testdatadir, "reference_test_scenario_tree.dat") + ) + def scenario_model_callback(scenario_tree, scenario_name, node_list): self.assertIs(scenario_tree, scenario_tree_model) instance = reference_test_model.create_instance() @@ -454,9 +468,10 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): assert scenario_name == "s3" instance.p = 3.0 return instance + with ScenarioTreeInstanceFactory( - model=scenario_model_callback, - scenario_tree=scenario_tree_model) as factory: + model=scenario_model_callback, scenario_tree=scenario_tree_model + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is None) self._check_factory(factory) @@ -467,19 +482,19 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): # scenario_tree: Pyomo scenario tree model def test_init13(self): model = reference_test_model.create_instance() - scenario_tree_model = CreateAbstractScenarioTreeModel().\ - create_instance( - join(testdatadir, "reference_test_scenario_tree.dat")) + scenario_tree_model = CreateAbstractScenarioTreeModel().create_instance( + join(testdatadir, "reference_test_scenario_tree.dat") + ) with ScenarioTreeInstanceFactory( - model=model, - scenario_tree=scenario_tree_model) as factory: + model=model, scenario_tree=scenario_tree_model + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is None) scenario_tree = factory.generate_scenario_tree() instances = factory.construct_instances_for_scenario_tree( - scenario_tree, - verbose=True) + scenario_tree, verbose=True + ) self.assertEqual(len(instances), 3) self.assertEqual(instances["s1"].p(), model.p()) self.assertEqual(instances["s2"].p(), model.p()) @@ -493,21 +508,21 @@ def test_init13(self): # data: name of a directory with .dat files def test_init14(self): self.assertTrue("reference_test_model" not in sys.modules) - scenario_tree_model = CreateAbstractScenarioTreeModel().\ - create_instance( - join(testdatadir, "reference_test_scenario_tree.dat")) + scenario_tree_model = CreateAbstractScenarioTreeModel().create_instance( + join(testdatadir, "reference_test_scenario_tree.dat") + ) with ScenarioTreeInstanceFactory( - model=reference_test_model, - scenario_tree=scenario_tree_model, - data=testdatadir) as factory: + model=reference_test_model, + scenario_tree=scenario_tree_model, + data=testdatadir, + ) as factory: scenario_tree = factory.generate_scenario_tree() self.assertEqual(factory._closed, True) self.assertEqual(len(factory._archives), 0) # start with a scenario tree (not a scenario tree model) with ScenarioTreeInstanceFactory( - model=reference_test_model, - scenario_tree=scenario_tree, - data=testdatadir) as factory: + model=reference_test_model, scenario_tree=scenario_tree, data=testdatadir + ) as factory: self._check_factory(factory) self.assertEqual(factory._closed, True) self.assertEqual(len(factory._archives), 0) @@ -519,10 +534,9 @@ def test_init14(self): def test_init15(self): self.assertTrue("reference_test_model" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "reference_test_model.py"), - scenario_tree=join(testdatadir, - "ScenarioStructure.py")) as factory: + model=join(testdatadir, "reference_test_model.py"), + scenario_tree=join(testdatadir, "ScenarioStructure.py"), + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -535,12 +549,10 @@ def test_init15(self): def test_init16(self): self.assertTrue("reference_test_model" not in sys.modules) self.assertTrue("ScenarioStructure" not in sys.modules) - nx_tree = import_file(os.path.join(testdatadir, - "ScenarioStructure.py")).G + nx_tree = import_file(os.path.join(testdatadir, "ScenarioStructure.py")).G with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "reference_test_model.py"), - scenario_tree=nx_tree) as factory: + model=join(testdatadir, "reference_test_model.py"), scenario_tree=nx_tree + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is None) @@ -554,12 +566,10 @@ def test_init16(self): def test_init17(self): self.assertTrue("reference_test_model" not in sys.modules) self.assertTrue("ScenarioStructure" not in sys.modules) - nx_tree = import_file(os.path.join(testdatadir, - "ScenarioStructure.py")).G + nx_tree = import_file(os.path.join(testdatadir, "ScenarioStructure.py")).G with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "reference_test_model.py"), - scenario_tree=nx_tree) as factory: + model=join(testdatadir, "reference_test_model.py"), scenario_tree=nx_tree + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is None) @@ -596,8 +606,8 @@ def test_init17(self): def test_init18(self): self.assertTrue("reference_test_model" not in sys.modules) self.assertTrue("ScenarioStructure" not in sys.modules) - nx_tree = import_file(os.path.join(testdatadir, - "ScenarioStructure.py")).G + nx_tree = import_file(os.path.join(testdatadir, "ScenarioStructure.py")).G + def scenario_model_callback(scenario_tree, scenario_name, node_list): self.assertIs(scenario_tree, nx_tree) instance = reference_test_model.create_instance() @@ -609,9 +619,10 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): assert scenario_name == "s3" instance.p = 3.0 return instance + with ScenarioTreeInstanceFactory( - model=scenario_model_callback, - scenario_tree=nx_tree) as factory: + model=scenario_model_callback, scenario_tree=nx_tree + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is None) self._check_factory(factory) @@ -622,6 +633,7 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): # scenario_tree: name of .dat file def test_init19(self): self.assertTrue("reference_test_model" not in sys.modules) + def scenario_model_callback(scenario_tree, scenario_name, node_list): self.assertTrue(isinstance(scenario_tree, ScenarioTree)) instance = reference_test_model.create_instance() @@ -633,10 +645,11 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): assert scenario_name == "s3" instance.p = 3.0 return instance + with ScenarioTreeInstanceFactory( - model=scenario_model_callback, - scenario_tree=join(testdatadir, - "reference_test_scenario_tree.dat")) as factory: + model=scenario_model_callback, + scenario_tree=join(testdatadir, "reference_test_scenario_tree.dat"), + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is not None) self._check_factory(factory) @@ -647,6 +660,7 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): # scenario_tree: PySP scenario tree def test_init20(self): self.assertTrue("reference_test_model" not in sys.modules) + def scenario_model_callback(scenario_tree, scenario_name, node_list): self.assertTrue(isinstance(scenario_tree, ScenarioTree)) instance = reference_test_model.create_instance() @@ -658,14 +672,15 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): assert scenario_name == "s3" instance.p = 3.0 return instance + with ScenarioTreeInstanceFactory( - model=scenario_model_callback, - scenario_tree=join(testdatadir, - "reference_test_scenario_tree.dat")) as factory: + model=scenario_model_callback, + scenario_tree=join(testdatadir, "reference_test_scenario_tree.dat"), + ) as factory: my_scenario_tree = factory.generate_scenario_tree() with ScenarioTreeInstanceFactory( - model=scenario_model_callback, - scenario_tree=my_scenario_tree) as factory: + model=scenario_model_callback, scenario_tree=my_scenario_tree + ) as factory: self.assertTrue(factory.model_directory() is None) self.assertTrue(factory.scenario_tree_directory() is None) self._check_factory(factory) @@ -678,10 +693,9 @@ def scenario_model_callback(scenario_tree, scenario_name, node_list): def test_init21(self): self.assertTrue("both_callbacks" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "both_callbacks.py"), - scenario_tree=join(testdatadir, - "both_callbacks.py")) as factory: + model=join(testdatadir, "both_callbacks.py"), + scenario_tree=join(testdatadir, "both_callbacks.py"), + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -695,9 +709,8 @@ def test_init21(self): def test_init22(self): self.assertTrue("both_callbacks" not in sys.modules) with ScenarioTreeInstanceFactory( - model=join(testdatadir, - "both_callbacks.py"), - scenario_tree=None) as factory: + model=join(testdatadir, "both_callbacks.py"), scenario_tree=None + ) as factory: self.assertEqual(len(factory._archives), 0) self.assertTrue(factory.model_directory() is not None) self.assertTrue(factory.scenario_tree_directory() is not None) @@ -705,5 +718,6 @@ def test_init22(self): self.assertEqual(len(factory._archives), 0) self.assertTrue("both_callbacks" in sys.modules) + if __name__ == "__main__": unittest.main() diff --git a/mpisppy/utils/pysp_model/tests/test_scenariotree.py b/mpisppy/utils/pysp_model/tests/test_scenariotree.py index 79149da82..54a34d2c1 100644 --- a/mpisppy/utils/pysp_model/tests/test_scenariotree.py +++ b/mpisppy/utils/pysp_model/tests/test_scenariotree.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -21,42 +21,39 @@ import pyomo.common.unittest as unittest -from mpisppy.utils.pysp_model.tree_structure_model import \ - (ScenarioTreeModelFromNetworkX, - CreateConcreteTwoStageScenarioTreeModel) +from mpisppy.utils.pysp_model.tree_structure_model import ( + ScenarioTreeModelFromNetworkX, + CreateConcreteTwoStageScenarioTreeModel, +) from mpisppy.utils.pysp_model.tree_structure import ScenarioTree -from mpisppy.utils.pysp_model.pysp_model import _get_nonant_list, _get_derived_nonant_list -from pyomo.core import (ConcreteModel, - Set, - Var, - Expression, - Objective, - Block, - value) -from pyomo.common.dependencies import ( - networkx, networkx_available as has_networkx +from mpisppy.utils.pysp_model.pysp_model import ( + _get_nonant_list, + _get_derived_nonant_list, ) +from pyomo.core import ConcreteModel, Set, Var, Expression, Objective, Block, value +from pyomo.common.dependencies import networkx, networkx_available as has_networkx + def _get_names(iterable): return [_.name for _ in iterable] -class TestScenarioTree(unittest.TestCase): +class TestScenarioTree(unittest.TestCase): def _get_block_model(self): model = ConcreteModel() - model.s = Set(initialize=[1,2]) + model.s = Set(initialize=[1, 2]) b = Block(concrete=True) - b.s = Set(initialize=[1,2]) + b.s = Set(initialize=[1, 2]) b.x = Var() b.X = Var(model.s) model.b1 = b.clone() model.b2 = b.clone() model.b3 = b.clone() model.b4 = b.clone() - model.B1 = Block(model.s, rule=lambda _,i: b.clone()) - model.B2 = Block(model.s, rule=lambda _,i: b.clone()) - model.B3 = Block(model.s, rule=lambda _,i: b.clone()) - model.B4 = Block(model.s, rule=lambda _,i: b.clone()) + model.B1 = Block(model.s, rule=lambda _, i: b.clone()) + model.B2 = Block(model.s, rule=lambda _, i: b.clone()) + model.B3 = Block(model.s, rule=lambda _, i: b.clone()) + model.B4 = Block(model.s, rule=lambda _, i: b.clone()) model.FirstStageCost = Expression(expr=0.0) model.SecondStageCost = Expression(expr=0.0) model.obj = Objective(expr=0.0) @@ -64,12 +61,12 @@ def _get_block_model(self): def test_indexedblock_noindextemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("B1") - st_model.StageDerivedVariables['Stage1'].add("B2") - st_model.NodeVariables['RootNode'].add("B3") - st_model.NodeDerivedVariables['RootNode'].add("B4") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("B1") + st_model.StageDerivedVariables["Stage1"].add("B2") + st_model.NodeVariables["RootNode"].add("B3") + st_model.NodeDerivedVariables["RootNode"].add("B4") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -85,42 +82,49 @@ def test_indexedblock_noindextemplate(self): assert len(root_derived_nonant_names) == 12 for name in ( - "B1[1].x", "B1[2].x", - "B3[1].x", "B3[2].x", - ): + "B1[1].x", + "B1[2].x", + "B3[1].x", + "B3[2].x", + ): assert name in root_nonant_names for name in ( - "B1[1].X", "B1[2].X", - "B3[1].X", "B3[2].X", - ): + "B1[1].X", + "B1[2].X", + "B3[1].X", + "B3[2].X", + ): var = model.find_component(name) for vardata in var.values(): assert vardata.name in root_nonant_names for name in ( - "B2[1].x", "B2[2].x", - "B4[1].x", "B4[2].x", - ): + "B2[1].x", + "B2[2].x", + "B4[1].x", + "B4[2].x", + ): assert name in root_derived_nonant_names for name in ( - "B2[1].X", "B2[2].X", - "B4[1].X", "B4[2].X", - ): + "B2[1].X", + "B2[2].X", + "B4[1].X", + "B4[2].X", + ): var = model.find_component(name) for vardata in var.values(): assert vardata.name in root_derived_nonant_names - def test_indexedblock_wildcardtemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("B1[*]") - st_model.StageDerivedVariables['Stage1'].add("B2[*]") - st_model.NodeVariables['RootNode'].add("B3[*]") - st_model.NodeDerivedVariables['RootNode'].add("B4[*]") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("B1[*]") + st_model.StageDerivedVariables["Stage1"].add("B2[*]") + st_model.NodeVariables["RootNode"].add("B3[*]") + st_model.NodeDerivedVariables["RootNode"].add("B4[*]") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -136,42 +140,49 @@ def test_indexedblock_wildcardtemplate(self): assert len(root_derived_nonant_names) == 12 for name in ( - "B1[1].x", "B1[2].x", - "B3[1].x", "B3[2].x", - ): + "B1[1].x", + "B1[2].x", + "B3[1].x", + "B3[2].x", + ): assert name in root_nonant_names for name in ( - "B1[1].X", "B1[2].X", - "B3[1].X", "B3[2].X", - ): + "B1[1].X", + "B1[2].X", + "B3[1].X", + "B3[2].X", + ): var = model.find_component(name) for vardata in var.values(): assert vardata.name in root_nonant_names for name in ( - "B2[1].x", "B2[2].x", - "B4[1].x", "B4[2].x", - ): + "B2[1].x", + "B2[2].x", + "B4[1].x", + "B4[2].x", + ): assert name in root_derived_nonant_names for name in ( - "B2[1].X", "B2[2].X", - "B4[1].X", "B4[2].X", - ): + "B2[1].X", + "B2[2].X", + "B4[1].X", + "B4[2].X", + ): var = model.find_component(name) for vardata in var.values(): assert vardata.name in root_derived_nonant_names - def test_singletonblock_wildcardtemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("b1[*]") - st_model.StageDerivedVariables['Stage1'].add("b2[*]") - st_model.NodeVariables['RootNode'].add("b3[*]") - st_model.NodeDerivedVariables['RootNode'].add("b4[*]") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("b1[*]") + st_model.StageDerivedVariables["Stage1"].add("b2[*]") + st_model.NodeVariables["RootNode"].add("b3[*]") + st_model.NodeDerivedVariables["RootNode"].add("b4[*]") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -199,15 +210,14 @@ def test_singletonblock_wildcardtemplate(self): for vardata in var.values(): assert vardata.name in root_derived_nonant_names - def test_singletonblock_noindextemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("b1") - st_model.StageDerivedVariables['Stage1'].add("b2") - st_model.NodeVariables['RootNode'].add("b3") - st_model.NodeDerivedVariables['RootNode'].add("b4") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("b1") + st_model.StageDerivedVariables["Stage1"].add("b2") + st_model.NodeVariables["RootNode"].add("b3") + st_model.NodeDerivedVariables["RootNode"].add("b4") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -235,15 +245,14 @@ def test_singletonblock_noindextemplate(self): for vardata in var.values(): assert vardata.name in root_derived_nonant_names - def test_singletonvar_noindextemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("x") - st_model.StageDerivedVariables['Stage1'].add("y") - st_model.NodeVariables['RootNode'].add("z") - st_model.NodeDerivedVariables['RootNode'].add("q") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("x") + st_model.StageDerivedVariables["Stage1"].add("y") + st_model.NodeVariables["RootNode"].add("z") + st_model.NodeDerivedVariables["RootNode"].add("q") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -262,7 +271,7 @@ def test_singletonvar_noindextemplate(self): root = scenario_tree.findRootNode() root_nonant_names = _get_names(_get_nonant_list(model, root)) root_derived_nonant_names = _get_names(_get_derived_nonant_list(model, root)) - assert len(root_nonant_names) == 2 + assert len(root_nonant_names) == 2 assert len(root_derived_nonant_names) == 2 for name in ("x", "z"): @@ -271,15 +280,14 @@ def test_singletonvar_noindextemplate(self): for name in ("y", "q"): assert name in root_derived_nonant_names - def test_singletonvar_wildcardtemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("x[*]") - st_model.StageDerivedVariables['Stage1'].add("y[*]") - st_model.NodeVariables['RootNode'].add("z[*]") - st_model.NodeDerivedVariables['RootNode'].add("q[*]") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("x[*]") + st_model.StageDerivedVariables["Stage1"].add("y[*]") + st_model.NodeVariables["RootNode"].add("z[*]") + st_model.NodeDerivedVariables["RootNode"].add("q[*]") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -307,15 +315,14 @@ def test_singletonvar_wildcardtemplate(self): for name in ("y", "q"): assert name in root_derived_nonant_names - def test_multiindexedvar_singlewildcardtemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("x[*,* ]") - st_model.StageDerivedVariables['Stage1'].add("y[ *,*]") - st_model.NodeVariables['RootNode'].add("z[*,*]") - st_model.NodeDerivedVariables['RootNode'].add("q[ * , * ]") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("x[*,* ]") + st_model.StageDerivedVariables["Stage1"].add("y[ *,*]") + st_model.NodeVariables["RootNode"].add("z[*,*]") + st_model.NodeDerivedVariables["RootNode"].add("q[ * , * ]") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -323,7 +330,7 @@ def test_multiindexedvar_singlewildcardtemplate(self): self.assertEqual(len(scenario_tree.scenarios), 1) model = ConcreteModel() - model.s = Set(initialize=[(1,'a'),(2,'b'),(3,'c')]) + model.s = Set(initialize=[(1, "a"), (2, "b"), (3, "c")]) model.x = Var(model.s) model.y = Var(model.s) model.z = Var(model.s) @@ -349,15 +356,14 @@ def test_multiindexedvar_singlewildcardtemplate(self): var = indexed_var[index] assert var.name in root_derived_nonant_names - def test_indexedvar_indextemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("x[*]") - st_model.StageDerivedVariables['Stage1'].add("y[*]") - st_model.NodeVariables['RootNode'].add("z[*]") - st_model.NodeDerivedVariables['RootNode'].add("q[*]") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("x[*]") + st_model.StageDerivedVariables["Stage1"].add("y[*]") + st_model.NodeVariables["RootNode"].add("z[*]") + st_model.NodeDerivedVariables["RootNode"].add("q[*]") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -365,7 +371,7 @@ def test_indexedvar_indextemplate(self): self.assertEqual(len(scenario_tree.scenarios), 1) model = ConcreteModel() - model.s = Set(initialize=[1,2,3]) + model.s = Set(initialize=[1, 2, 3]) model.x = Var(model.s) model.y = Var(model.s) model.z = Var(model.s) @@ -391,15 +397,14 @@ def test_indexedvar_indextemplate(self): var = indexed_var[index] assert var.name in root_derived_nonant_names - def test_indexedvar_noindextemplate(self): st_model = CreateConcreteTwoStageScenarioTreeModel(1) - st_model.StageVariables['Stage1'].add("x") - st_model.StageDerivedVariables['Stage1'].add("y") - st_model.NodeVariables['RootNode'].add("z") - st_model.NodeDerivedVariables['RootNode'].add("q") - st_model.StageCost['Stage1'] = "FirstStageCost" - st_model.StageCost['Stage2'] = "SecondStageCost" + st_model.StageVariables["Stage1"].add("x") + st_model.StageDerivedVariables["Stage1"].add("y") + st_model.NodeVariables["RootNode"].add("z") + st_model.NodeDerivedVariables["RootNode"].add("q") + st_model.StageCost["Stage1"] = "FirstStageCost" + st_model.StageCost["Stage2"] = "SecondStageCost" scenario_tree = ScenarioTree(scenariotreeinstance=st_model) self.assertEqual(len(scenario_tree.stages), 2) @@ -407,7 +412,7 @@ def test_indexedvar_noindextemplate(self): self.assertEqual(len(scenario_tree.scenarios), 1) model = ConcreteModel() - model.s = Set(initialize=[1,2,3]) + model.s = Set(initialize=[1, 2, 3]) model.x = Var(model.s) model.y = Var(model.s) model.z = Var(model.s) @@ -436,7 +441,6 @@ def test_indexedvar_noindextemplate(self): @unittest.skipIf(not has_networkx, "Requires networkx module") class TestScenarioTreeFromNetworkX(unittest.TestCase): - def test_empty(self): G = networkx.DiGraph() with self.assertRaises(networkx.NetworkXPointlessConcept): @@ -481,9 +485,7 @@ def test_missing_node_name(self): G.add_node("C") G.add_edge("R", "C", weight=1) with self.assertRaises(KeyError): - ScenarioTreeModelFromNetworkX( - G, - node_name_attribute="name") + ScenarioTreeModelFromNetworkX(G, node_name_attribute="name") def test_missing_scenario_name(self): G = networkx.DiGraph() @@ -491,9 +493,7 @@ def test_missing_scenario_name(self): G.add_node("C") G.add_edge("R", "C", weight=1) with self.assertRaises(KeyError): - ScenarioTreeModelFromNetworkX( - G, - scenario_name_attribute="name") + ScenarioTreeModelFromNetworkX(G, scenario_name_attribute="name") def test_missing_weight(self): G = networkx.DiGraph() @@ -505,9 +505,13 @@ def test_missing_weight(self): def test_bad_weight1(self): G = networkx.DiGraph() - G.add_node("R",) - G.add_node("C",) - G.add_edge("R", "C",weight=0.8) + G.add_node( + "R", + ) + G.add_node( + "C", + ) + G.add_edge("R", "C", weight=0.8) with self.assertRaises(ValueError): ScenarioTreeModelFromNetworkX(G) @@ -523,12 +527,13 @@ def test_bad_weight2(self): def test_bad_custom_stage_names1(self): G = networkx.DiGraph() - G.add_node("R",) + G.add_node( + "R", + ) G.add_node("C1") G.add_edge("R", "C1", weight=1.0) with self.assertRaises(ValueError): - ScenarioTreeModelFromNetworkX( - G, stage_names=["Stage1"]) + ScenarioTreeModelFromNetworkX(G, stage_names=["Stage1"]) def test_bad_custom_stage_names2(self): G = networkx.DiGraph() @@ -536,8 +541,7 @@ def test_bad_custom_stage_names2(self): G.add_node("C1") G.add_edge("R", "C1", weight=1.0) with self.assertRaises(ValueError): - ScenarioTreeModelFromNetworkX( - G, stage_names=["Stage1","Stage1"]) + ScenarioTreeModelFromNetworkX(G, stage_names=["Stage1", "Stage1"]) def test_two_stage(self): G = networkx.DiGraph() @@ -547,24 +551,16 @@ def test_two_stage(self): G.add_node("Child2") G.add_edge("Root", "Child2", weight=0.2) model = ScenarioTreeModelFromNetworkX(G) + self.assertEqual(sorted(list(model.Stages)), sorted(["Stage1", "Stage2"])) self.assertEqual( - sorted(list(model.Stages)), - sorted(["Stage1", "Stage2"])) - self.assertEqual( - sorted(list(model.Nodes)), - sorted(["Root", "Child1", "Child2"])) - self.assertEqual( - sorted(list(model.Children["Root"])), - sorted(["Child1", "Child2"])) - self.assertEqual( - sorted(list(model.Children["Child1"])), - sorted([])) + sorted(list(model.Nodes)), sorted(["Root", "Child1", "Child2"]) + ) self.assertEqual( - sorted(list(model.Children["Child2"])), - sorted([])) - self.assertEqual( - sorted(list(model.Scenarios)), - sorted(["Child1", "Child2"])) + sorted(list(model.Children["Root"])), sorted(["Child1", "Child2"]) + ) + self.assertEqual(sorted(list(model.Children["Child1"])), sorted([])) + self.assertEqual(sorted(list(model.Children["Child2"])), sorted([])) + self.assertEqual(sorted(list(model.Scenarios)), sorted(["Child1", "Child2"])) self.assertEqual(value(model.ConditionalProbability["Root"]), 1.0) self.assertEqual(value(model.ConditionalProbability["Child1"]), 0.8) self.assertEqual(value(model.ConditionalProbability["Child2"]), 0.2) @@ -578,39 +574,22 @@ def test_two_stage(self): def test_two_stage_more_node_attributes(self): G = networkx.DiGraph() - G.add_node("Root", - cost="c1", - variables=["x"], - derived_variables=["y"]) - G.add_node("Child1", - cost="c2", - variables=["q"], - derived_variables=["z"]) + G.add_node("Root", cost="c1", variables=["x"], derived_variables=["y"]) + G.add_node("Child1", cost="c2", variables=["q"], derived_variables=["z"]) G.add_edge("Root", "Child1", weight=0.8) - G.add_node("Child2", - cost="c2", - variables=["q"], - derived_variables=["z"]) + G.add_node("Child2", cost="c2", variables=["q"], derived_variables=["z"]) G.add_edge("Root", "Child2", weight=0.2) model = ScenarioTreeModelFromNetworkX(G) + self.assertEqual(sorted(list(model.Stages)), sorted(["Stage1", "Stage2"])) self.assertEqual( - sorted(list(model.Stages)), - sorted(["Stage1", "Stage2"])) - self.assertEqual( - sorted(list(model.Nodes)), - sorted(["Root", "Child1", "Child2"])) - self.assertEqual( - sorted(list(model.Children["Root"])), - sorted(["Child1", "Child2"])) + sorted(list(model.Nodes)), sorted(["Root", "Child1", "Child2"]) + ) self.assertEqual( - sorted(list(model.Children["Child1"])), - sorted([])) - self.assertEqual( - sorted(list(model.Children["Child2"])), - sorted([])) - self.assertEqual( - sorted(list(model.Scenarios)), - sorted(["Child1", "Child2"])) + sorted(list(model.Children["Root"])), sorted(["Child1", "Child2"]) + ) + self.assertEqual(sorted(list(model.Children["Child1"])), sorted([])) + self.assertEqual(sorted(list(model.Children["Child2"])), sorted([])) + self.assertEqual(sorted(list(model.Scenarios)), sorted(["Child1", "Child2"])) self.assertEqual(value(model.ConditionalProbability["Root"]), 1.0) self.assertEqual(value(model.ConditionalProbability["Child1"]), 0.8) self.assertEqual(value(model.ConditionalProbability["Child2"]), 0.2) @@ -650,26 +629,19 @@ def test_two_stage_custom_names(self): G, edge_probability_attribute="probability", node_name_attribute="label", - stage_names=["T1","T2"], - scenario_name_attribute="scenario") - self.assertEqual( - sorted(list(model.Stages)), - sorted(["T1", "T2"])) - self.assertEqual( - sorted(list(model.Nodes)), - sorted(["Root", "Child1", "Child2"])) - self.assertEqual( - sorted(list(model.Children["Root"])), - sorted(["Child1", "Child2"])) - self.assertEqual( - sorted(list(model.Children["Child1"])), - sorted([])) - self.assertEqual( - sorted(list(model.Children["Child2"])), - sorted([])) - self.assertEqual( - sorted(list(model.Scenarios)), - sorted(["S1", "S2"])) + stage_names=["T1", "T2"], + scenario_name_attribute="scenario", + ) + self.assertEqual(sorted(list(model.Stages)), sorted(["T1", "T2"])) + self.assertEqual( + sorted(list(model.Nodes)), sorted(["Root", "Child1", "Child2"]) + ) + self.assertEqual( + sorted(list(model.Children["Root"])), sorted(["Child1", "Child2"]) + ) + self.assertEqual(sorted(list(model.Children["Child1"])), sorted([])) + self.assertEqual(sorted(list(model.Children["Child2"])), sorted([])) + self.assertEqual(sorted(list(model.Scenarios)), sorted(["S1", "S2"])) self.assertEqual(value(model.ConditionalProbability["Root"]), 1.0) self.assertEqual(value(model.ConditionalProbability["Child1"]), 0.8) self.assertEqual(value(model.ConditionalProbability["Child2"]), 0.2) @@ -682,71 +654,44 @@ def test_two_stage_custom_names(self): ScenarioTree(scenariotreeinstance=model) def test_multi_stage(self): - G = networkx.balanced_tree(3,2,networkx.DiGraph()) - model = ScenarioTreeModelFromNetworkX( - G, - edge_probability_attribute=None) + G = networkx.balanced_tree(3, 2, networkx.DiGraph()) + model = ScenarioTreeModelFromNetworkX(G, edge_probability_attribute=None) self.assertEqual( - sorted(list(model.Stages)), - sorted(["Stage1", "Stage2", "Stage3"])) + sorted(list(model.Stages)), sorted(["Stage1", "Stage2", "Stage3"]) + ) self.assertEqual( sorted(list(model.Nodes)), - sorted([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])) - self.assertEqual( - sorted(list(model.Children[0])), - sorted([1,2,3])) - self.assertEqual( - sorted(list(model.Children[1])), - sorted([4,5,6])) - self.assertEqual( - sorted(list(model.Children[2])), - sorted([7,8,9])) - self.assertEqual( - sorted(list(model.Children[3])), - sorted([10,11,12])) - self.assertEqual( - sorted(list(model.Children[4])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[5])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[6])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[7])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[8])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[9])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[10])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[11])), - sorted([])) - self.assertEqual( - sorted(list(model.Children[12])), - sorted([])) - self.assertEqual( - sorted(list(model.Scenarios)), - sorted([4, 5, 6, 7, 8, 9, 10, 11, 12])) + sorted([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]), + ) + self.assertEqual(sorted(list(model.Children[0])), sorted([1, 2, 3])) + self.assertEqual(sorted(list(model.Children[1])), sorted([4, 5, 6])) + self.assertEqual(sorted(list(model.Children[2])), sorted([7, 8, 9])) + self.assertEqual(sorted(list(model.Children[3])), sorted([10, 11, 12])) + self.assertEqual(sorted(list(model.Children[4])), sorted([])) + self.assertEqual(sorted(list(model.Children[5])), sorted([])) + self.assertEqual(sorted(list(model.Children[6])), sorted([])) + self.assertEqual(sorted(list(model.Children[7])), sorted([])) + self.assertEqual(sorted(list(model.Children[8])), sorted([])) + self.assertEqual(sorted(list(model.Children[9])), sorted([])) + self.assertEqual(sorted(list(model.Children[10])), sorted([])) + self.assertEqual(sorted(list(model.Children[11])), sorted([])) + self.assertEqual(sorted(list(model.Children[12])), sorted([])) + self.assertEqual( + sorted(list(model.Scenarios)), sorted([4, 5, 6, 7, 8, 9, 10, 11, 12]) + ) self.assertEqual(value(model.ConditionalProbability[0]), 1.0) - self.assertAlmostEqual(value(model.ConditionalProbability[1]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[2]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[3]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[4]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[5]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[6]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[7]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[8]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[9]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[10]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[11]), 1.0/3) - self.assertAlmostEqual(value(model.ConditionalProbability[12]), 1.0/3) + self.assertAlmostEqual(value(model.ConditionalProbability[1]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[2]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[3]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[4]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[5]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[6]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[7]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[8]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[9]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[10]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[11]), 1.0 / 3) + self.assertAlmostEqual(value(model.ConditionalProbability[12]), 1.0 / 3) model.StageCost["Stage1"] = "c1" model.StageCost["Stage2"] = "c2" model.StageCost["Stage3"] = "c3" @@ -769,33 +714,17 @@ def test_unbalanced(self): G.add_node("01") G.add_edge("0", "00") G.add_edge("0", "01") - model = ScenarioTreeModelFromNetworkX( - G, - edge_probability_attribute=None) - self.assertEqual( - sorted(list(model.Stages)), - sorted(["Stage1", "Stage2", "Stage3"])) - self.assertEqual( - sorted(list(model.Nodes)), - sorted(["R","0","1","00","01"])) - self.assertEqual( - sorted(list(model.Children["R"])), - sorted(["0", "1"])) - self.assertEqual( - sorted(list(model.Children["0"])), - sorted(["00","01"])) - self.assertEqual( - sorted(list(model.Children["1"])), - sorted([])) - self.assertEqual( - sorted(list(model.Children["00"])), - sorted([])) - self.assertEqual( - sorted(list(model.Children["01"])), - sorted([])) - self.assertEqual( - sorted(list(model.Scenarios)), - sorted(["00", "01", "1"])) + model = ScenarioTreeModelFromNetworkX(G, edge_probability_attribute=None) + self.assertEqual( + sorted(list(model.Stages)), sorted(["Stage1", "Stage2", "Stage3"]) + ) + self.assertEqual(sorted(list(model.Nodes)), sorted(["R", "0", "1", "00", "01"])) + self.assertEqual(sorted(list(model.Children["R"])), sorted(["0", "1"])) + self.assertEqual(sorted(list(model.Children["0"])), sorted(["00", "01"])) + self.assertEqual(sorted(list(model.Children["1"])), sorted([])) + self.assertEqual(sorted(list(model.Children["00"])), sorted([])) + self.assertEqual(sorted(list(model.Children["01"])), sorted([])) + self.assertEqual(sorted(list(model.Scenarios)), sorted(["00", "01", "1"])) self.assertEqual(value(model.ConditionalProbability["R"]), 1.0) self.assertEqual(value(model.ConditionalProbability["0"]), 0.5) self.assertEqual(value(model.ConditionalProbability["1"]), 0.5) @@ -815,37 +744,31 @@ def test_bundles(self): G = networkx.DiGraph() G.add_node("r") for i in range(4): - G.add_node("u"+str(i), bundle=i%2) - G.add_edge("r", "u"+str(i)) - model = ScenarioTreeModelFromNetworkX( - G, - edge_probability_attribute=None) - self.assertEqual( - sorted(list(model.Stages)), - sorted(["Stage1", "Stage2"])) + G.add_node("u" + str(i), bundle=i % 2) + G.add_edge("r", "u" + str(i)) + model = ScenarioTreeModelFromNetworkX(G, edge_probability_attribute=None) + self.assertEqual(sorted(list(model.Stages)), sorted(["Stage1", "Stage2"])) self.assertEqual( - sorted(list(model.Nodes)), - sorted(["r", "u0", "u1", "u2", "u3"])) + sorted(list(model.Nodes)), sorted(["r", "u0", "u1", "u2", "u3"]) + ) self.assertEqual( - sorted(list(model.Children["r"])), - sorted(["u0", "u1", "u2", "u3"])) + sorted(list(model.Children["r"])), sorted(["u0", "u1", "u2", "u3"]) + ) for i in range(4): - self.assertEqual( - sorted(list(model.Children["u"+str(i)])), - sorted([])) + self.assertEqual(sorted(list(model.Children["u" + str(i)])), sorted([])) self.assertEqual( - sorted(list(model.Scenarios)), - sorted(["u0", "u1", "u2", "u3"])) + sorted(list(model.Scenarios)), sorted(["u0", "u1", "u2", "u3"]) + ) self.assertEqual(value(model.ConditionalProbability["r"]), 1.0) for i in range(4): - self.assertEqual(value(model.ConditionalProbability["u"+str(i)]), - 0.25) + self.assertEqual(value(model.ConditionalProbability["u" + str(i)]), 0.25) self.assertEqual(model.Bundling.value, True) self.assertEqual(list(model.Bundles), [0, 1]) for k, bundle_name in enumerate(model.Bundles): - self.assertEqual(list(model.BundleScenarios[bundle_name]), - ["u"+str(i) for i in range(4) - if i%2 == k]) + self.assertEqual( + list(model.BundleScenarios[bundle_name]), + ["u" + str(i) for i in range(4) if i % 2 == k], + ) model.StageCost["Stage1"] = "c1" model.StageCost["Stage2"] = "c2" model.StageVariables["Stage1"].add("x") @@ -855,25 +778,20 @@ def test_bundles_incomplete(self): G = networkx.DiGraph() G.add_node("r") for i in range(4): - G.add_node("u"+str(i), bundle="B") - G.add_edge("r", "u"+str(i)) - model = ScenarioTreeModelFromNetworkX( - G, - edge_probability_attribute=None) + G.add_node("u" + str(i), bundle="B") + G.add_edge("r", "u" + str(i)) + model = ScenarioTreeModelFromNetworkX(G, edge_probability_attribute=None) self.assertEqual(model.Bundling.value, True) self.assertEqual(list(model.Bundles), ["B"]) - self.assertEqual(list(model.BundleScenarios["B"]), - ["u"+str(i) for i in range(4)]) + self.assertEqual( + list(model.BundleScenarios["B"]), ["u" + str(i) for i in range(4)] + ) G.nodes["u0"]["bundle"] = None with self.assertRaises(ValueError): - ScenarioTreeModelFromNetworkX( - G, - edge_probability_attribute=None) + ScenarioTreeModelFromNetworkX(G, edge_probability_attribute=None) del G.nodes["u0"]["bundle"] with self.assertRaises(ValueError): - ScenarioTreeModelFromNetworkX( - G, - edge_probability_attribute=None) + ScenarioTreeModelFromNetworkX(G, edge_probability_attribute=None) if __name__ == "__main__": diff --git a/mpisppy/utils/pysp_model/tests/testdata/ScenarioStructure.py b/mpisppy/utils/pysp_model/tests/testdata/ScenarioStructure.py index 6aaaf2e3f..a1140a391 100644 --- a/mpisppy/utils/pysp_model/tests/testdata/ScenarioStructure.py +++ b/mpisppy/utils/pysp_model/tests/testdata/ScenarioStructure.py @@ -9,24 +9,13 @@ import networkx G = networkx.DiGraph() -G.add_node("root", - variables=["x"], - cost="cost[1]") +G.add_node("root", variables=["x"], cost="cost[1]") -G.add_node("s1", - cost="cost[2]") -G.add_edge("root", - "s1", - weight=0.33333333) +G.add_node("s1", cost="cost[2]") +G.add_edge("root", "s1", weight=0.33333333) -G.add_node("s2", - cost="cost[2]") -G.add_edge("root", - "s2", - weight=0.33333334) +G.add_node("s2", cost="cost[2]") +G.add_edge("root", "s2", weight=0.33333334) -G.add_node("s3", - cost="cost[2]") -G.add_edge("root", - "s3", - weight=0.33333333) +G.add_node("s3", cost="cost[2]") +G.add_edge("root", "s3", weight=0.33333333) diff --git a/mpisppy/utils/pysp_model/tests/testdata/both_callbacks.py b/mpisppy/utils/pysp_model/tests/testdata/both_callbacks.py index 87bf3e29c..cf1c5d234 100644 --- a/mpisppy/utils/pysp_model/tests/testdata/both_callbacks.py +++ b/mpisppy/utils/pysp_model/tests/testdata/both_callbacks.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -21,19 +21,32 @@ model = AbstractModel() model.x = Var() model.p = Param(mutable=True, initialize=1.0) + + def cost_rule(model, i): if i == 1: return model.x else: return 0.0 -model.cost = Expression([1,2], rule=cost_rule) + + +model.cost = Expression([1, 2], rule=cost_rule) + + def o_rule(model): return model.x + + model.o = Objective(rule=o_rule) + + def c_rule(model): return model.x >= model.p + + model.c = Constraint(rule=c_rule) + def pysp_instance_creation_callback(scenario_tree, scenario_name, node_names): instance = model.create_instance() if scenario_name == "s1": @@ -45,29 +58,19 @@ def pysp_instance_creation_callback(scenario_tree, scenario_name, node_names): instance.p.value = 3.0 return instance + def pysp_scenario_tree_model_callback(): import networkx G = networkx.DiGraph() - G.add_node("root", - variables=["x"], - cost="cost[1]") - - G.add_node("s1", - cost="cost[2]") - G.add_edge("root", - "s1", - weight=0.33333333) - - G.add_node("s2", - cost="cost[2]") - G.add_edge("root", - "s2", - weight=0.33333334) - - G.add_node("s3", - cost="cost[2]") - G.add_edge("root", - "s3", - weight=0.33333333) + G.add_node("root", variables=["x"], cost="cost[1]") + + G.add_node("s1", cost="cost[2]") + G.add_edge("root", "s1", weight=0.33333333) + + G.add_node("s2", cost="cost[2]") + G.add_edge("root", "s2", weight=0.33333334) + + G.add_node("s3", cost="cost[2]") + G.add_edge("root", "s3", weight=0.33333333) return G diff --git a/mpisppy/utils/pysp_model/tests/testdata/reference_test_model.py b/mpisppy/utils/pysp_model/tests/testdata/reference_test_model.py index bbd5037e3..02b6864ac 100644 --- a/mpisppy/utils/pysp_model/tests/testdata/reference_test_model.py +++ b/mpisppy/utils/pysp_model/tests/testdata/reference_test_model.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -21,15 +21,27 @@ model = AbstractModel() model.x = Var() model.p = Param(mutable=True, initialize=1.0) + + def cost_rule(model, i): if i == 1: return model.x else: return 0.0 -model.cost = Expression([1,2], rule=cost_rule) + + +model.cost = Expression([1, 2], rule=cost_rule) + + def o_rule(model): return model.x + + model.o = Objective(rule=o_rule) + + def c_rule(model): return model.x >= model.p + + model.c = Constraint(rule=c_rule) diff --git a/mpisppy/utils/pysp_model/tests/testdata/reference_test_model_with_callback.py b/mpisppy/utils/pysp_model/tests/testdata/reference_test_model_with_callback.py index c2d8820da..8f2a44f4f 100644 --- a/mpisppy/utils/pysp_model/tests/testdata/reference_test_model_with_callback.py +++ b/mpisppy/utils/pysp_model/tests/testdata/reference_test_model_with_callback.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -21,19 +21,32 @@ model = AbstractModel() model.x = Var() model.p = Param(mutable=True, initialize=1.0) + + def cost_rule(model, i): if i == 1: return model.x else: return 0.0 -model.cost = Expression([1,2], rule=cost_rule) + + +model.cost = Expression([1, 2], rule=cost_rule) + + def o_rule(model): return model.x + + model.o = Objective(rule=o_rule) + + def c_rule(model): return model.x >= model.p + + model.c = Constraint(rule=c_rule) + def pysp_instance_creation_callback(scenario_tree, scenario_name, node_names): instance = model.create_instance() if scenario_name == "s1": diff --git a/mpisppy/utils/pysp_model/tests/testdata/reference_test_scenario_tree_model.py b/mpisppy/utils/pysp_model/tests/testdata/reference_test_scenario_tree_model.py index 1df6694fe..8d64e9286 100644 --- a/mpisppy/utils/pysp_model/tests/testdata/reference_test_scenario_tree_model.py +++ b/mpisppy/utils/pysp_model/tests/testdata/reference_test_scenario_tree_model.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -22,8 +22,9 @@ import os import tempfile -from mpisppy.utils.pysp_model.tree_structure_model import \ - CreateAbstractScenarioTreeModel +from mpisppy.utils.pysp_model.tree_structure_model import ( + CreateAbstractScenarioTreeModel, +) with tempfile.NamedTemporaryFile(mode="w", suffix=".dat", delete=False) as f: f.write(""" diff --git a/mpisppy/utils/pysp_model/tree_structure.py b/mpisppy/utils/pysp_model/tree_structure.py index 08fe48a61..df11ffa34 100644 --- a/mpisppy/utils/pysp_model/tree_structure.py +++ b/mpisppy/utils/pysp_model/tree_structure.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -19,11 +19,13 @@ # This file was originally part of PySP and Pyomo, available: https://github.com/Pyomo/pysp # Copied with modification from pysp/scenariotree/tree_structure.py -__all__ = ('ScenarioTreeNode', - 'ScenarioTreeStage', - 'Scenario', - 'ScenarioTreeBundle', - 'ScenarioTree') +__all__ = ( + "ScenarioTreeNode", + "ScenarioTreeStage", + "Scenario", + "ScenarioTreeBundle", + "ScenarioTree", +) import sys import random @@ -32,28 +34,30 @@ try: from collections import OrderedDict -except ImportError: #pragma:nocover +except ImportError: # pragma:nocover from ordereddict import OrderedDict from pyomo.common.collections import ComponentMap -from pyomo.core import (value, ComponentUID) -from .phutils import (indexToString, - isVariableNameIndexed, - extractVariableNameAndIndex, - ) +from pyomo.core import value, ComponentUID +from .phutils import ( + indexToString, + isVariableNameIndexed, + extractVariableNameAndIndex, +) logger = logging.getLogger("mpisppy.utils.pysp_model") CUID_repr_version = 1 + class _CUIDLabeler: def __init__(self): self._cuid_map = ComponentMap() def update_cache(self, block): self._cuid_map.update( - ComponentUID.generate_cuid_string_map( - block, repr_version=CUID_repr_version)) + ComponentUID.generate_cuid_string_map(block, repr_version=CUID_repr_version) + ) def clear_cache(self): self._cuid_map = {} @@ -66,16 +70,14 @@ def __call__(self, obj): self._cuid_map[obj] = cuid return cuid -class ScenarioTreeNode: - """ Constructor - """ +class ScenarioTreeNode: + """Constructor""" VARIABLE_FIXED = 0 VARIABLE_FREED = 1 def __init__(self, name, conditional_probability, stage): - # self-explanatory! self._name = name @@ -152,7 +154,6 @@ def __init__(self, name, conditional_probability, stage): # node variables ids that are fixed (along with the value to fix) self._fixed = {} - @property def name(self): return self._name @@ -186,7 +187,6 @@ def probability(self): # last stage in the scenario tree. # def is_leaf_node(self): - return self._stage.is_last_stage() # @@ -198,11 +198,9 @@ def is_derived_variable(self, variable_name, variable_index): class ScenarioTreeStage: + """Constructor""" - """ Constructor - """ def __init__(self): - self._name = "" # a collection of ScenarioTreeNode objects associated with this stage. @@ -245,15 +243,13 @@ def scenario_tree(self): # the scenario tree. # def is_last_stage(self): - return self == self._scenario_tree._stages[-1] + class Scenario: + """Constructor""" - """ Constructor - """ def __init__(self): - self._name = None # allows for construction of node list self._leaf_node = None @@ -333,11 +329,8 @@ def node_stage_index(self, tree_node): return self._node_list.index(tree_node) - class ScenarioTreeBundle: - def __init__(self): - self._name = None self._scenario_names = [] # This is a compressed scenario tree, just for the bundle. @@ -362,22 +355,20 @@ def scenario_tree(self): def probability(self): return self._probability -class ScenarioTree: +class ScenarioTree: # a utility to construct scenario bundles. def _construct_scenario_bundles(self, bundles): - for bundle_name in bundles: - scenario_list = [] bundle_probability = 0.0 for scenario_name in bundles[bundle_name]: scenario_list.append(scenario_name) - bundle_probability += \ - self._scenario_map[scenario_name].probability + bundle_probability += self._scenario_map[scenario_name].probability - scenario_tree_for_bundle = self.make_compressed(scenario_list, - normalize=True) + scenario_tree_for_bundle = self.make_compressed( + scenario_list, normalize=True + ) scenario_tree_for_bundle.validate() @@ -396,24 +387,25 @@ def _construct_scenario_bundles(self, bundles): # _stages and _stage_map attributes. # - def _construct_stages(self, - stage_names, - stage_variable_names, - stage_cost_variable_names, - stage_derived_variable_names): - + def _construct_stages( + self, + stage_names, + stage_variable_names, + stage_cost_variable_names, + stage_derived_variable_names, + ): # construct the stage objects, which will leave them # largely uninitialized - no variable information, in particular. for stage_name in stage_names: - new_stage = ScenarioTreeStage() new_stage._name = stage_name new_stage._scenario_tree = self for variable_string in stage_variable_names[stage_name]: if isVariableNameIndexed(variable_string): - variable_name, match_template = \ - extractVariableNameAndIndex(variable_string) + variable_name, match_template = extractVariableNameAndIndex( + variable_string + ) else: variable_name = variable_string match_template = "" @@ -425,14 +417,17 @@ def _construct_stages(self, if stage_name in stage_derived_variable_names: for variable_string in stage_derived_variable_names[stage_name]: if isVariableNameIndexed(variable_string): - variable_name, match_template = \ - extractVariableNameAndIndex(variable_string) + variable_name, match_template = extractVariableNameAndIndex( + variable_string + ) else: variable_name = variable_string match_template = "" if variable_name not in new_stage._derived_variable_templates: new_stage._derived_variable_templates[variable_name] = [] - new_stage._derived_variable_templates[variable_name].append(match_template) + new_stage._derived_variable_templates[variable_name].append( + match_template + ) # de-reference is required to access the parameter value # TBD March 2020: make it so the stages always know their cost names. @@ -440,8 +435,9 @@ def _construct_stages(self, cost_variable_string = stage_cost_variable_names[stage_name].value if cost_variable_string is not None: if isVariableNameIndexed(cost_variable_string): - cost_variable_name, cost_variable_index = \ + cost_variable_name, cost_variable_index = ( extractVariableNameAndIndex(cost_variable_string) + ) else: cost_variable_name = cost_variable_string cost_variable_index = None @@ -456,15 +452,13 @@ def _construct_stages(self, scenariotreeinstance - the pyomo model specifying all scenario tree (text) data. scenariobundlelist - a list of scenario names to retain, i.e., cull the rest to create a reduced tree! """ - def __init__(self, - scenariotreeinstance=None, - scenariobundlelist=None): + def __init__(self, scenariotreeinstance=None, scenariobundlelist=None): # some arbitrary identifier self._name = None # should be called once for each variable blended across a node - #self._id_labeler = CounterLabeler() + # self._id_labeler = CounterLabeler() self._id_labeler = _CUIDLabeler() # @@ -505,27 +499,38 @@ def __init__(self, node_variable_ids = scenariotreeinstance.NodeVariables stage_cost_variable_ids = scenariotreeinstance.StageCost node_cost_variable_ids = scenariotreeinstance.NodeCost - if any(scenariotreeinstance.StageCostVariable[i].value is not None - for i in scenariotreeinstance.StageCostVariable): - logger.warning("DEPRECATED: The 'StageCostVariable' scenario tree " - "model parameter has been renamed to 'StageCost'. " - "Please update your scenario tree structure model.") - if any(stage_cost_variable_ids[i].value is not None - for i in stage_cost_variable_ids): - raise ValueError("The 'StageCostVariable' and 'StageCost' " - "parameters can not both be used on a scenario " - "tree structure model.") + if any( + scenariotreeinstance.StageCostVariable[i].value is not None + for i in scenariotreeinstance.StageCostVariable + ): + logger.warning( + "DEPRECATED: The 'StageCostVariable' scenario tree " + "model parameter has been renamed to 'StageCost'. " + "Please update your scenario tree structure model." + ) + if any( + stage_cost_variable_ids[i].value is not None + for i in stage_cost_variable_ids + ): + raise ValueError( + "The 'StageCostVariable' and 'StageCost' " + "parameters can not both be used on a scenario " + "tree structure model." + ) else: stage_cost_variable_ids = scenariotreeinstance.StageCostVariable - if any(stage_cost_variable_ids[i].value is not None - for i in stage_cost_variable_ids) and \ - any(node_cost_variable_ids[i].value is not None - for i in node_cost_variable_ids): + if any( + stage_cost_variable_ids[i].value is not None + for i in stage_cost_variable_ids + ) and any( + node_cost_variable_ids[i].value is not None for i in node_cost_variable_ids + ): raise ValueError( "The 'StageCost' and 'NodeCost' parameters " "can not both be used on a scenario tree " - "structure model.") + "structure model." + ) stage_derived_variable_ids = scenariotreeinstance.StageDerivedVariables node_derived_variable_ids = scenariotreeinstance.NodeDerivedVariables scenario_ids = scenariotreeinstance.Scenarios @@ -540,13 +545,15 @@ def __init__(self, if not stage_ids.isordered(): raise ValueError( "An ordered set of stage IDs must be supplied in " - "the ScenarioTree constructor") + "the ScenarioTree constructor" + ) for node_id in node_ids: node_stage_id = node_stage_ids[node_id].value if node_stage_id != stage_ids.last(): - if (len(stage_variable_ids[node_stage_id]) == 0) and \ - (len(node_variable_ids[node_id]) == 0): + if (len(stage_variable_ids[node_stage_id]) == 0) and ( + len(node_variable_ids[node_id]) == 0 + ): raise ValueError( "Scenario tree node %s, belonging to stage %s, " "has not been declared with any variables. " @@ -555,7 +562,8 @@ def __init__(self, "is declared with at least one variable string " "template (e.g., x, x[*]) on the scenario tree " "or in ScenarioStructure.dat." - % (node_id, node_stage_id, node_stage_id, node_id)) + % (node_id, node_stage_id, node_stage_id, node_id) + ) # # construct the actual tree objects @@ -563,36 +571,40 @@ def __init__(self, # construct the stage objects w/o any linkages first; link them up # with tree nodes after these have been fully constructed. - self._construct_stages(stage_ids, - stage_variable_ids, - stage_cost_variable_ids, - stage_derived_variable_ids) + self._construct_stages( + stage_ids, + stage_variable_ids, + stage_cost_variable_ids, + stage_derived_variable_ids, + ) # construct the tree node objects themselves in a first pass, # and then link them up in a second pass to form the tree. # can't do a single pass because the objects may not exist. for tree_node_name in node_ids: - if tree_node_name not in node_stage_ids: - raise ValueError("No stage is assigned to tree node=%s" - % (tree_node_name)) + raise ValueError( + "No stage is assigned to tree node=%s" % (tree_node_name) + ) stage_name = value(node_stage_ids[tree_node_name]) if stage_name not in self._stage_map: - raise ValueError("Unknown stage=%s assigned to tree node=%s" - % (stage_name, tree_node_name)) + raise ValueError( + "Unknown stage=%s assigned to tree node=%s" + % (stage_name, tree_node_name) + ) node_stage = self._stage_map[stage_name] new_tree_node = ScenarioTreeNode( - tree_node_name, - value(node_probability_map[tree_node_name]), - node_stage) + tree_node_name, value(node_probability_map[tree_node_name]), node_stage + ) # extract the node variable match templates for variable_string in node_variable_ids[tree_node_name]: if isVariableNameIndexed(variable_string): - variable_name, match_template = \ - extractVariableNameAndIndex(variable_string) + variable_name, match_template = extractVariableNameAndIndex( + variable_string + ) else: variable_name = variable_string match_template = "" @@ -604,28 +616,31 @@ def __init__(self, if cost_variable_string is not None: assert node_stage._cost_variable is None if isVariableNameIndexed(cost_variable_string): - cost_variable_name, cost_variable_index = \ + cost_variable_name, cost_variable_index = ( extractVariableNameAndIndex(cost_variable_string) + ) else: cost_variable_name = cost_variable_string cost_variable_index = None else: assert node_stage._cost_variable is not None - cost_variable_name, cost_variable_index = \ - node_stage._cost_variable + cost_variable_name, cost_variable_index = node_stage._cost_variable new_tree_node._cost_variable = (cost_variable_name, cost_variable_index) # extract the node derived variable match templates for variable_string in node_derived_variable_ids[tree_node_name]: if isVariableNameIndexed(variable_string): - variable_name, match_template = \ - extractVariableNameAndIndex(variable_string) + variable_name, match_template = extractVariableNameAndIndex( + variable_string + ) else: variable_name = variable_string match_template = "" if variable_name not in new_tree_node._derived_variable_templates: new_tree_node._derived_variable_templates[variable_name] = [] - new_tree_node._derived_variable_templates[variable_name].append(match_template) + new_tree_node._derived_variable_templates[variable_name].append( + match_template + ) self._tree_nodes.append(new_tree_node) self._tree_node_map[tree_node_name] = new_tree_node @@ -648,13 +663,13 @@ def __init__(self, "Multiple parents specified for tree node=%s; " "existing parent node=%s; conflicting parent " "node=%s" - % (child_id, - child_node._parent.name, - this_node.name)) + % (child_id, child_node._parent.name, this_node.name) + ) else: - raise ValueError("Unknown child tree node=%s specified " - "for tree node=%s" - % (child_id, this_node.name)) + raise ValueError( + "Unknown child tree node=%s specified " + "for tree node=%s" % (child_id, this_node.name) + ) # at this point, the scenario tree nodes and the stages are set - no # two-pass logic necessary when constructing scenarios. @@ -663,17 +678,20 @@ def __init__(self, new_scenario._name = scenario_name if scenario_name not in scenario_leaf_ids: - raise ValueError("No leaf tree node specified for scenario=%s" - % (scenario_name)) + raise ValueError( + "No leaf tree node specified for scenario=%s" % (scenario_name) + ) else: scenario_leaf_node_name = value(scenario_leaf_ids[scenario_name]) if scenario_leaf_node_name not in self._tree_node_map: - raise ValueError("Uknown tree node=%s specified as leaf " - "of scenario=%s" % - (scenario_leaf_node_name, scenario_name)) + raise ValueError( + "Uknown tree node=%s specified as leaf " + "of scenario=%s" % (scenario_leaf_node_name, scenario_name) + ) else: - new_scenario._leaf_node = \ - self._tree_node_map[scenario_leaf_node_name] + new_scenario._leaf_node = self._tree_node_map[ + scenario_leaf_node_name + ] current_node = new_scenario._leaf_node while current_node is not None: @@ -722,8 +740,9 @@ def __init__(self, if value(scenariotreeinstance.Bundling[None]): bundles = OrderedDict() for bundle_name in scenariotreeinstance.Bundles: - bundles[bundle_name] = \ - list(scenariotreeinstance.BundleScenarios[bundle_name]) + bundles[bundle_name] = list( + scenariotreeinstance.BundleScenarios[bundle_name] + ) self._construct_scenario_bundles(bundles) @property @@ -813,17 +832,12 @@ def get_node(self, name): # (and conditional probabilities) are renormalized. # - def compress(self, - scenario_bundle_list, - normalize=True): - + def compress(self, scenario_bundle_list, normalize=True): # scan for and mark all referenced scenarios and # tree nodes in the bundle list - all stages will # obviously remain. try: - for scenario_name in scenario_bundle_list: - scenario = self._scenario_map[scenario_name] scenario.retain = True @@ -833,10 +847,11 @@ def compress(self, node.retain = True except KeyError: - raise ValueError("Scenario=%s selected for " - "bundling not present in " - "scenario tree" - % (scenario_name)) + raise ValueError( + "Scenario=%s selected for " + "bundling not present in " + "scenario tree" % (scenario_name) + ) # scan for any non-retained scenarios and tree nodes. scenarios_to_delete = [] @@ -890,15 +905,14 @@ def compress(self, # Handle re-normalization of probabilities if requested # if normalize: - # re-normalize the conditional probabilities of the # children at each tree node (leaf-to-root stage order). for stage in reversed(self._stages[:-1]): - for tree_node in stage._tree_nodes: - norm_factor = sum(child_tree_node._conditional_probability - for child_tree_node - in tree_node._children) + norm_factor = sum( + child_tree_node._conditional_probability + for child_tree_node in tree_node._children + ) # the user may specify that the probability of a # scenario is 0.0, and while odd, we should allow the # edge case. @@ -912,14 +926,14 @@ def compress(self, # update absolute probabilities (root-to-leaf stage order) for stage in self._stages[1:]: for tree_node in stage._tree_nodes: - tree_node._probability = \ - tree_node._parent._probability * \ - tree_node._conditional_probability + tree_node._probability = ( + tree_node._parent._probability + * tree_node._conditional_probability + ) # update scenario probabilities for scenario in self._scenarios: - scenario._probability = \ - scenario._leaf_node._probability + scenario._probability = scenario._leaf_node._probability # now that we've culled the scenarios, cull the bundles. do # this in two passes. in the first pass, we identify the names @@ -930,7 +944,7 @@ def compress(self, # indices of the objects in the scenario tree bundle list bundles_to_delete = [] - for i in range(0,len(self._scenario_bundles)): + for i in range(0, len(self._scenario_bundles)): scenario_bundle = self._scenario_bundles[i] for scenario_name in scenario_bundle._scenario_names: if scenario_name not in self._scenario_map: @@ -941,8 +955,9 @@ def compress(self, deleted_bundle = self._scenario_bundles.pop(i) del self._scenario_bundle_map[deleted_bundle.name] - sum_bundle_probabilities = \ - sum(bundle._probability for bundle in self._scenario_bundles) + sum_bundle_probabilities = sum( + bundle._probability for bundle in self._scenario_bundles + ) for bundle in self._scenario_bundles: bundle._probability /= sum_bundle_probabilities @@ -955,10 +970,7 @@ def compress(self, # # *** Bundles are ignored. The compressed tree will not have them *** # - def make_compressed(self, - scenario_bundle_list, - normalize=False): - + def make_compressed(self, scenario_bundle_list, normalize=False): compressed_tree = ScenarioTree() compressed_tree._scenario_based_data = self._scenario_based_data # @@ -969,16 +981,20 @@ def make_compressed(self, # and the reference to the scenario tree compressed_tree_stage = ScenarioTreeStage() compressed_tree_stage._name = stage.name - compressed_tree_stage._variable_templates = copy.deepcopy(stage._variable_templates) - compressed_tree_stage._derived_variable_templates = \ - copy.deepcopy(stage._derived_variable_templates) + compressed_tree_stage._variable_templates = copy.deepcopy( + stage._variable_templates + ) + compressed_tree_stage._derived_variable_templates = copy.deepcopy( + stage._derived_variable_templates + ) compressed_tree_stage._cost_variable = copy.deepcopy(stage._cost_variable) # add the stage object to the compressed tree compressed_tree._stages.append(compressed_tree_stage) compressed_tree._stages[-1]._scenario_tree = compressed_tree - compressed_tree._stage_map = \ - dict((stage.name, stage) for stage in compressed_tree._stages) + compressed_tree._stage_map = dict( + (stage.name, stage) for stage in compressed_tree._stages + ) # # Copy Scenario and Node Data @@ -997,11 +1013,14 @@ def make_compressed(self, compressed_tree_node = ScenarioTreeNode( full_tree_node.name, full_tree_node._conditional_probability, - compressed_tree._stage_map[full_tree_node._stage.name]) - compressed_tree_node._variable_templates = \ - copy.deepcopy(full_tree_node._variable_templates) - compressed_tree_node._derived_variable_templates = \ - copy.deepcopy(full_tree_node._derived_variable_templates) + compressed_tree._stage_map[full_tree_node._stage.name], + ) + compressed_tree_node._variable_templates = copy.deepcopy( + full_tree_node._variable_templates + ) + compressed_tree_node._derived_variable_templates = copy.deepcopy( + full_tree_node._derived_variable_templates + ) compressed_tree_node._scenarios.append(compressed_tree_scenario) compressed_tree_node._stage._tree_nodes.append(compressed_tree_node) compressed_tree_node._probability = full_tree_node._probability @@ -1011,22 +1030,25 @@ def make_compressed(self, compressed_tree_scenario._node_list.append(compressed_tree_node) compressed_tree_scenario._leaf_node = compressed_tree_node compressed_tree._tree_nodes.append(compressed_tree_node) - compressed_tree._tree_node_map[compressed_tree_node.name] = \ + compressed_tree._tree_node_map[compressed_tree_node.name] = ( compressed_tree_node + ) previous_compressed_tree_node = compressed_tree_node full_tree_node = full_tree_node._parent while full_tree_node.name not in compressed_tree._tree_node_map: - ### copy the node compressed_tree_node = ScenarioTreeNode( full_tree_node.name, full_tree_node._conditional_probability, - compressed_tree._stage_map[full_tree_node.stage.name]) - compressed_tree_node._variable_templates = \ - copy.deepcopy(full_tree_node._variable_templates) - compressed_tree_node._derived_variable_templates = \ - copy.deepcopy(full_tree_node._derived_variable_templates) + compressed_tree._stage_map[full_tree_node.stage.name], + ) + compressed_tree_node._variable_templates = copy.deepcopy( + full_tree_node._variable_templates + ) + compressed_tree_node._derived_variable_templates = copy.deepcopy( + full_tree_node._derived_variable_templates + ) compressed_tree_node._probability = full_tree_node._probability compressed_tree_node._cost_variable = full_tree_node._cost_variable compressed_tree_node._scenarios.append(compressed_tree_scenario) @@ -1035,8 +1057,9 @@ def make_compressed(self, compressed_tree_scenario._node_list.append(compressed_tree_node) compressed_tree._tree_nodes.append(compressed_tree_node) - compressed_tree._tree_node_map[compressed_tree_node.name] = \ + compressed_tree._tree_node_map[compressed_tree_node.name] = ( compressed_tree_node + ) previous_compressed_tree_node._parent = compressed_tree_node compressed_tree_node._children.append(previous_compressed_tree_node) previous_compressed_tree_node = compressed_tree_node @@ -1049,8 +1072,9 @@ def make_compressed(self, # traverse the remaining nodes up to the root and update the # tree structure elements if full_tree_node is not None: - compressed_tree_node = \ - compressed_tree._tree_node_map[full_tree_node.name] + compressed_tree_node = compressed_tree._tree_node_map[ + full_tree_node.name + ] previous_compressed_tree_node._parent = compressed_tree_node compressed_tree_node._scenarios.append(compressed_tree_scenario) compressed_tree_node._children.append(previous_compressed_tree_node) @@ -1064,49 +1088,54 @@ def make_compressed(self, # makes sure this list is in root to leaf order compressed_tree_scenario._node_list.reverse() - assert compressed_tree_scenario._node_list[-1] is \ - compressed_tree_scenario._leaf_node - assert compressed_tree_scenario._node_list[0] is \ - compressed_tree_root + assert ( + compressed_tree_scenario._node_list[-1] + is compressed_tree_scenario._leaf_node + ) + assert compressed_tree_scenario._node_list[0] is compressed_tree_root # initialize solution related dictionaries for compressed_tree_node in compressed_tree_scenario._node_list: - compressed_tree_scenario._stage_costs[compressed_tree_node._stage.name] = None + compressed_tree_scenario._stage_costs[ + compressed_tree_node._stage.name + ] = None compressed_tree_scenario._x[compressed_tree_node.name] = {} compressed_tree_scenario._w[compressed_tree_node.name] = {} compressed_tree_scenario._rho[compressed_tree_node.name] = {} compressed_tree_scenario._fixed[compressed_tree_node.name] = set() compressed_tree_scenario._stale[compressed_tree_node.name] = set() - compressed_tree._scenario_map = \ - dict((scenario.name, scenario) for scenario in compressed_tree._scenarios) + compressed_tree._scenario_map = dict( + (scenario.name, scenario) for scenario in compressed_tree._scenarios + ) # # Handle re-normalization of probabilities if requested # if normalize: - # update conditional probabilities (leaf-to-root stage order) for compressed_tree_stage in reversed(compressed_tree._stages[:-1]): - for compressed_tree_node in compressed_tree_stage._tree_nodes: - norm_factor = \ - sum(compressed_tree_child_node._conditional_probability - for compressed_tree_child_node - in compressed_tree_node._children) + norm_factor = sum( + compressed_tree_child_node._conditional_probability + for compressed_tree_child_node in compressed_tree_node._children + ) # the user may specify that the probability of a # scenario is 0.0, and while odd, we should allow the # edge case. if norm_factor == 0.0: - for compressed_tree_child_node in \ - compressed_tree_node._children: + for ( + compressed_tree_child_node + ) in compressed_tree_node._children: compressed_tree_child_node._conditional_probability = 0.0 else: - for compressed_tree_child_node in \ - compressed_tree_node._children: - compressed_tree_child_node.\ - _conditional_probability /= norm_factor + for ( + compressed_tree_child_node + ) in compressed_tree_node._children: + compressed_tree_child_node._conditional_probability /= ( + norm_factor + ) assert abs(compressed_tree_root._probability - 1.0) < 1e-5 assert abs(compressed_tree_root._conditional_probability - 1.0) < 1e-5 @@ -1114,14 +1143,16 @@ def make_compressed(self, # update absolute probabilities (root-to-leaf stage order) for compressed_tree_stage in compressed_tree._stages[1:]: for compressed_tree_node in compressed_tree_stage._tree_nodes: - compressed_tree_node._probability = \ - compressed_tree_node._parent._probability * \ - compressed_tree_node._conditional_probability + compressed_tree_node._probability = ( + compressed_tree_node._parent._probability + * compressed_tree_node._conditional_probability + ) # update scenario probabilities for compressed_tree_scenario in compressed_tree._scenarios: - compressed_tree_scenario._probability = \ + compressed_tree_scenario._probability = ( compressed_tree_scenario._leaf_node._probability + ) return compressed_tree @@ -1136,29 +1167,34 @@ def make_compressed(self, # # def add_bundle(self, name, scenario_bundle_list): - if name in self._scenario_bundle_map: - raise ValueError("Cannot add a new bundle with name '%s', a bundle " - "with that name already exists." % (name)) + raise ValueError( + "Cannot add a new bundle with name '%s', a bundle " + "with that name already exists." % (name) + ) - bundle_scenario_tree = self.make_compressed(scenario_bundle_list, - normalize=True) + bundle_scenario_tree = self.make_compressed( + scenario_bundle_list, normalize=True + ) bundle = ScenarioTreeBundle() bundle._name = name bundle._scenario_names = scenario_bundle_list bundle._scenario_tree = bundle_scenario_tree # make sure this is computed with the un-normalized bundle scenarios - bundle._probability = sum(self._scenario_map[scenario_name]._probability - for scenario_name in scenario_bundle_list) + bundle._probability = sum( + self._scenario_map[scenario_name]._probability + for scenario_name in scenario_bundle_list + ) self._scenario_bundle_map[name] = bundle self._scenario_bundles.append(bundle) def remove_bundle(self, name): - if name not in self._scenario_bundle_map: - raise KeyError("Cannot remove bundle with name '%s', no bundle " - "with that name exists." % (name)) + raise KeyError( + "Cannot remove bundle with name '%s', no bundle " + "with that name exists." % (name) + ) bundle = self._scenario_bundle_map[name] del self._scenario_bundle_map[name] self._scenario_bundles.remove(bundle) @@ -1169,35 +1205,34 @@ def remove_bundle(self, name): # def downsample(self, fraction_to_retain, random_seed, verbose=False): - random_state = random.getstate() random.seed(random_seed) try: - number_to_retain = \ - max(int(round(float(len(self._scenarios)*fraction_to_retain))), 1) - random_list=random.sample(range(len(self._scenarios)), number_to_retain) + number_to_retain = max( + int(round(float(len(self._scenarios) * fraction_to_retain))), 1 + ) + random_list = random.sample(range(len(self._scenarios)), number_to_retain) scenario_bundle_list = [] for i in range(number_to_retain): scenario_bundle_list.append(self._scenarios[random_list[i]].name) if verbose: - print("Downsampling scenario tree - retained %s " - "scenarios: %s" - % (len(scenario_bundle_list), - str(scenario_bundle_list))) + print( + "Downsampling scenario tree - retained %s " + "scenarios: %s" + % (len(scenario_bundle_list), str(scenario_bundle_list)) + ) - self.compress(scenario_bundle_list) # do the downsampling + self.compress(scenario_bundle_list) # do the downsampling finally: random.setstate(random_state) - # # returns the root node of the scenario tree # def findRootNode(self): - for tree_node in self._tree_nodes: if tree_node._parent is None: return tree_node @@ -1209,7 +1244,6 @@ def findRootNode(self): # def computeIdentifierMaxLengths(self): - self._max_scenario_id_length = 0 for scenario in self._scenarios: if len(str(scenario.name)) > self._max_scenario_id_length: @@ -1220,7 +1254,6 @@ def computeIdentifierMaxLengths(self): # def validate(self): - # for any node, the sum of conditional probabilities of the children should sum to 1. for tree_node in self._tree_nodes: sum_probabilities = 0.0 @@ -1228,12 +1261,12 @@ def validate(self): for child in tree_node._children: sum_probabilities += child._conditional_probability if abs(1.0 - sum_probabilities) > 0.000001: - raise ValueError("ScenarioTree validation failed. " - "Reason: child conditional " - "probabilities for tree node=%s " - " sum to %s" - % (tree_node.name, - sum_probabilities)) + raise ValueError( + "ScenarioTree validation failed. " + "Reason: child conditional " + "probabilities for tree node=%s " + " sum to %s" % (tree_node.name, sum_probabilities) + ) # ensure that there is only one root node in the tree num_roots = 0 @@ -1244,25 +1277,25 @@ def validate(self): root_ids.append(tree_node.name) if num_roots != 1: - raise ValueError("ScenarioTree validation failed. " - "Reason: illegal set of root " - "nodes detected: " + str(root_ids)) + raise ValueError( + "ScenarioTree validation failed. " + "Reason: illegal set of root " + "nodes detected: " + str(root_ids) + ) # there must be at least one scenario passing through each tree node. for tree_node in self._tree_nodes: if len(tree_node._scenarios) == 0: - raise ValueError("ScenarioTree validation failed. " - "Reason: there are no scenarios " - "associated with tree node=%s" - % (tree_node.name)) + raise ValueError( + "ScenarioTree validation failed. " + "Reason: there are no scenarios " + "associated with tree node=%s" % (tree_node.name) + ) return False return True - def create_random_bundles(self, - num_bundles, - random_seed): - + def create_random_bundles(self, num_bundles, random_seed): random_state = random.getstate() random.seed(random_seed) try: @@ -1275,19 +1308,19 @@ def create_random_bundles(self, # scenario tree instance, which should already be there # (because it is defined in the abstract model). however, we # don't have a "clear" method on a set, so... - bundle_names = ["Bundle"+str(i) - for i in range(1, num_bundles+1)] + bundle_names = ["Bundle" + str(i) for i in range(1, num_bundles + 1)] bundles = OrderedDict() for i in range(num_bundles): bundles[bundle_names[i]] = [] scenario_index = 0 - while (scenario_index < num_scenarios): + while scenario_index < num_scenarios: for bundle_index in range(num_bundles): - if (scenario_index == num_scenarios): + if scenario_index == num_scenarios: break bundles[bundle_names[bundle_index]].append( - self._scenarios[sequence[scenario_index]].name) + self._scenarios[sequence[scenario_index]].name + ) scenario_index += 1 self._construct_scenario_bundles(bundles) @@ -1300,7 +1333,6 @@ def create_random_bundles(self, # def pprint(self): - print("Scenario Tree Detail") print("----------------------------------------------------") @@ -1318,7 +1350,10 @@ def pprint(self): else: print("\tParent=" + "None") if tree_node._conditional_probability is not None: - print("\tConditional probability=%4.4f" % tree_node._conditional_probability) + print( + "\tConditional probability=%4.4f" + % tree_node._conditional_probability + ) else: print("\tConditional probability=" + "***Undefined***") print("\tChildren:") @@ -1337,17 +1372,21 @@ def pprint(self): print("\tVariables: ") for variable_name in sorted(tree_node._variable_templates.keys()): match_templates = tree_node._variable_templates[variable_name] - sys.stdout.write("\t\t "+variable_name+" : ") + sys.stdout.write("\t\t " + variable_name + " : ") for match_template in match_templates: - sys.stdout.write(indexToString(match_template)+' ') + sys.stdout.write(indexToString(match_template) + " ") print("") if len(tree_node._derived_variable_templates) > 0: print("\tDerived Variables: ") - for variable_name in sorted(tree_node._derived_variable_templates.keys()): - match_templates = tree_node._derived_variable_templates[variable_name] - sys.stdout.write("\t\t "+variable_name+" : ") + for variable_name in sorted( + tree_node._derived_variable_templates.keys() + ): + match_templates = tree_node._derived_variable_templates[ + variable_name + ] + sys.stdout.write("\t\t " + variable_name + " : ") for match_template in match_templates: - sys.stdout.write(indexToString(match_template)+' ') + sys.stdout.write(indexToString(match_template) + " ") print("") print("") print("----------------------------------------------------") @@ -1362,17 +1401,17 @@ def pprint(self): print("\tVariables: ") for variable_name in sorted(stage._variable_templates.keys()): match_templates = stage._variable_templates[variable_name] - sys.stdout.write("\t\t "+variable_name+" : ") + sys.stdout.write("\t\t " + variable_name + " : ") for match_template in match_templates: - sys.stdout.write(indexToString(match_template)+' ') + sys.stdout.write(indexToString(match_template) + " ") print("") if len(stage._derived_variable_templates) > 0: print("\tDerived Variables: ") for variable_name in sorted(stage._derived_variable_templates.keys()): match_templates = stage._derived_variable_templates[variable_name] - sys.stdout.write("\t\t "+variable_name+" : ") + sys.stdout.write("\t\t " + variable_name + " : ") for match_template in match_templates: - sys.stdout.write(indexToString(match_template)+' ') + sys.stdout.write(indexToString(match_template) + " ") print("") print("\tCost Variable: ") if stage._cost_variable is not None: @@ -1406,50 +1445,54 @@ def pprint(self): for bundle_name in sorted(self._scenario_bundle_map.keys()): scenario_bundle = self._scenario_bundle_map[bundle_name] print("\tName=%s" % (bundle_name)) - print("\tProbability=%4.4f" % scenario_bundle._probability ) + print("\tProbability=%4.4f" % scenario_bundle._probability) sys.stdout.write("\tScenarios: ") for scenario_name in sorted(scenario_bundle._scenario_names): - sys.stdout.write(str(scenario_name)+' ') + sys.stdout.write(str(scenario_name) + " ") sys.stdout.write("\n") print("") print("----------------------------------------------------") - # # Save the tree structure in DOT file format # Nodes are labeled with absolute probabilities and # edges are labeled with conditional probabilities # def save_to_dot(self, filename): - def _visit_node(node): - f.write("%s%s [label=\"%s\"];\n" - % (node.name, - id(node), - str(node.name)+("\n(%.6g)" % (node._probability)))) + f.write( + '%s%s [label="%s"];\n' + % ( + node.name, + id(node), + str(node.name) + ("\n(%.6g)" % (node._probability)), + ) + ) for child_node in node._children: _visit_node(child_node) - f.write("%s%s -> %s%s [label=\"%.6g\"];\n" - % (node.name, - id(node), - child_node.name, - id(child_node), - child_node._conditional_probability)) + f.write( + '%s%s -> %s%s [label="%.6g"];\n' + % ( + node.name, + id(node), + child_node.name, + id(child_node), + child_node._conditional_probability, + ) + ) if len(node._children) == 0: assert len(node._scenarios) == 1 scenario = node._scenarios[0] - f.write("%s%s [label=\"%s\"];\n" - % (scenario.name, - id(scenario), - "scenario\n"+str(scenario.name))) - f.write("%s%s -> %s%s [style=dashed];\n" - % (node.name, - id(node), - scenario.name, - id(scenario))) - - with open(filename, 'w') as f: - + f.write( + '%s%s [label="%s"];\n' + % (scenario.name, id(scenario), "scenario\n" + str(scenario.name)) + ) + f.write( + "%s%s -> %s%s [style=dashed];\n" + % (node.name, id(node), scenario.name, id(scenario)) + ) + + with open(filename, "w") as f: f.write("digraph ScenarioTree {\n") root_node = self.findRootNode() _visit_node(root_node) diff --git a/mpisppy/utils/pysp_model/tree_structure_model.py b/mpisppy/utils/pysp_model/tree_structure_model.py index 1a667d252..8f6e2973d 100644 --- a/mpisppy/utils/pysp_model/tree_structure_model.py +++ b/mpisppy/utils/pysp_model/tree_structure_model.py @@ -10,8 +10,8 @@ # # Pyomo: Python Optimization Modeling Objects # Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC -# Under the terms of Contract DE-NA0003525 with National Technology and -# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain +# Under the terms of Contract DE-NA0003525 with National Technology and +# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain # rights in this software. # This software is distributed under the 3-clause BSD License. # ___________________________________________________________________________ @@ -26,11 +26,17 @@ from pyomo.common.dependencies import attempt_import # The code below conforms to the networkx>=2.0 API -networkx, networkx_available = attempt_import('networkx', minimum_version="2.0") +networkx, networkx_available = attempt_import("networkx", minimum_version="2.0") + def CreateAbstractScenarioTreeModel(): from pyomo.core import ( - AbstractModel, Set, Param, Boolean, Any, UnitInterval, + AbstractModel, + Set, + Param, + Boolean, + Any, + UnitInterval, ) model = AbstractModel() @@ -41,43 +47,22 @@ def CreateAbstractScenarioTreeModel(): model.Stages = Set(ordered=True) model.Nodes = Set(ordered=True) - model.NodeStage = Param(model.Nodes, - within=model.Stages, - mutable=True) - model.Children = Set(model.Nodes, - within=model.Nodes, - initialize=[], - ordered=True) - model.ConditionalProbability = Param(model.Nodes, - within=UnitInterval, - mutable=True) + model.NodeStage = Param(model.Nodes, within=model.Stages, mutable=True) + model.Children = Set(model.Nodes, within=model.Nodes, initialize=[], ordered=True) + model.ConditionalProbability = Param(model.Nodes, within=UnitInterval, mutable=True) model.Scenarios = Set(ordered=True) - model.ScenarioLeafNode = Param(model.Scenarios, - within=model.Nodes, - mutable=True) - - model.StageVariables = Set(model.Stages, - initialize=[], - ordered=True) - - model.NodeVariables = Set(model.Nodes, - initialize=[], - ordered=True) - - model.StageCost = Param(model.Stages, - within=Any, - mutable=True, - default=None) - model.NodeCost = Param(model.Nodes, - within=Any, - mutable=True, - default=None) + model.ScenarioLeafNode = Param(model.Scenarios, within=model.Nodes, mutable=True) + + model.StageVariables = Set(model.Stages, initialize=[], ordered=True) + + model.NodeVariables = Set(model.Nodes, initialize=[], ordered=True) + + model.StageCost = Param(model.Stages, within=Any, mutable=True, default=None) + model.NodeCost = Param(model.Nodes, within=Any, mutable=True, default=None) # DEPRECATED - model.StageCostVariable = Param(model.Stages, - within=Any, - mutable=True) + model.StageCostVariable = Param(model.Stages, within=Any, mutable=True) # it is often the case that a subset of the stage variables are strictly "derived" # variables, in that their values are computable once the values of other variables @@ -85,12 +70,8 @@ def CreateAbstractScenarioTreeModel(): # as it is unnecessary to post non-anticipativity constraints for these variables. # further, attempting to force non-anticipativity - either implicitly or explicitly - # on these variables can cause issues with decomposition algorithms. - model.StageDerivedVariables = Set(model.Stages, - initialize=[], - ordered=True) - model.NodeDerivedVariables = Set(model.Nodes, - initialize=[], - ordered=True) + model.StageDerivedVariables = Set(model.Stages, initialize=[], ordered=True) + model.NodeDerivedVariables = Set(model.Nodes, initialize=[], ordered=True) # scenario data can be populated in one of two ways. the first is "scenario-based", # in which a single .dat file contains all of the data for each scenario. the .dat @@ -99,22 +80,18 @@ def CreateAbstractScenarioTreeModel(): # tree. the node-based method is more compact, but the scenario-based method is # often more natural when parameter data is generated via simulation. the default # is scenario-based. - model.ScenarioBasedData = Param(within=Boolean, - default=True, - mutable=True) + model.ScenarioBasedData = Param(within=Boolean, default=True, mutable=True) # do we bundle, and if so, how? - model.Bundling = Param(within=Boolean, - default=False, - mutable=True) + model.Bundling = Param(within=Boolean, default=False, mutable=True) # bundle names model.Bundles = Set(ordered=True) - model.BundleScenarios = Set(model.Bundles, - ordered=True) + model.BundleScenarios = Set(model.Bundles, ordered=True) return model + # # Generates a simple two-stage scenario tree model with the requested # number of scenarios. It is up to the user to include the remaining @@ -131,30 +108,32 @@ def CreateAbstractScenarioTreeModel(): def CreateConcreteTwoStageScenarioTreeModel(num_scenarios): m = CreateAbstractScenarioTreeModel() m = m.create_instance() - m.Stages.add('Stage1') - m.Stages.add('Stage2') - m.Nodes.add('RootNode') - for i in range(1, num_scenarios+1): - m.Nodes.add('LeafNode_Scenario'+str(i)) - m.Scenarios.add('Scenario'+str(i)) - m.NodeStage['RootNode'] = 'Stage1' - m.ConditionalProbability['RootNode'] = 1.0 + m.Stages.add("Stage1") + m.Stages.add("Stage2") + m.Nodes.add("RootNode") + for i in range(1, num_scenarios + 1): + m.Nodes.add("LeafNode_Scenario" + str(i)) + m.Scenarios.add("Scenario" + str(i)) + m.NodeStage["RootNode"] = "Stage1" + m.ConditionalProbability["RootNode"] = 1.0 for node in m.Nodes: - if node != 'RootNode': - m.NodeStage[node] = 'Stage2' - m.Children['RootNode'].add(node) + if node != "RootNode": + m.NodeStage[node] = "Stage2" + m.Children["RootNode"].add(node) m.Children[node].clear() - m.ConditionalProbability[node] = 1.0/num_scenarios - m.ScenarioLeafNode[node.replace('LeafNode_','')] = node + m.ConditionalProbability[node] = 1.0 / num_scenarios + m.ScenarioLeafNode[node.replace("LeafNode_", "")] = node return m + def ScenarioTreeModelFromNetworkX( - tree, - node_name_attribute=None, - edge_probability_attribute='weight', - stage_names=None, - scenario_name_attribute=None): + tree, + node_name_attribute=None, + edge_probability_attribute="weight", + stage_names=None, + scenario_name_attribute=None, +): """ Create a scenario tree model from a networkx tree. The height of the tree must be at least 1 (meaning at least @@ -245,58 +224,53 @@ def ScenarioTreeModelFromNetworkX( """ if not networkx.is_tree(tree): - raise TypeError( - "Graph object is not a tree " - "(see networkx.is_tree)") + raise TypeError("Graph object is not a tree " "(see networkx.is_tree)") if not networkx.is_directed(tree): - raise TypeError( - "Graph object is not directed " - "(see networkx.is_directed)") + raise TypeError("Graph object is not directed " "(see networkx.is_directed)") if not networkx.is_branching(tree): - raise TypeError( - "Grapn object is not a branching " - "(see networkx.is_branching") + raise TypeError("Grapn object is not a branching " "(see networkx.is_branching") in_degree_items = tree.in_degree() # Prior to networkx ~2.0, in_degree() returned a dictionary. # Now it is a view on items, so only call .items() for the old case - if hasattr(in_degree_items, 'items'): + if hasattr(in_degree_items, "items"): in_degree_items = in_degree_items.items() - root = [u for u,d in in_degree_items if d == 0] + root = [u for u, d in in_degree_items if d == 0] assert len(root) == 1 root = root[0] num_stages = networkx.eccentricity(tree, v=root) + 1 if num_stages < 2: - raise ValueError( - "The number of stages must be at least 2") + raise ValueError("The number of stages must be at least 2") m = CreateAbstractScenarioTreeModel() m = m.create_instance() if stage_names is not None: unique_stage_names = set() - for cnt, stage_name in enumerate(stage_names,1): + for cnt, stage_name in enumerate(stage_names, 1): m.Stages.add(stage_name) unique_stage_names.add(stage_name) if cnt != num_stages: raise ValueError( "incorrect number of stages names (%s), should be %s" - % (cnt, num_stages)) + % (cnt, num_stages) + ) if len(unique_stage_names) != cnt: raise ValueError("all stage names were not unique") else: for i in range(num_stages): - m.Stages.add('Stage'+str(i+1)) + m.Stages.add("Stage" + str(i + 1)) node_to_name = {} node_to_scenario = {} scenario_bundle = {} + def _setup(u, succ): if node_name_attribute is not None: if node_name_attribute not in tree.nodes[u]: raise KeyError( "node '%s' missing node name " - "attribute: '%s'" - % (u, node_name_attribute)) + "attribute: '%s'" % (u, node_name_attribute) + ) node_name = tree.nodes[u][node_name_attribute] else: node_name = u @@ -311,17 +285,16 @@ def _setup(u, succ): if scenario_name_attribute not in tree.nodes[u]: raise KeyError( "node '%s' missing scenario name " - "attribute: '%s'" - % (u, scenario_name_attribute)) + "attribute: '%s'" % (u, scenario_name_attribute) + ) scenario_name = tree.nodes[u][scenario_name_attribute] else: scenario_name = u node_to_scenario[u] = scenario_name m.Scenarios.add(scenario_name) - scenario_bundle[scenario_name] = \ - tree.nodes[u].get('bundle', None) - _setup(root, - networkx.dfs_successors(tree, root)) + scenario_bundle[scenario_name] = tree.nodes[u].get("bundle", None) + + _setup(root, networkx.dfs_successors(tree, root)) def _add_node(u, stage, succ, pred): node_name = node_to_name[u] @@ -332,19 +305,20 @@ def _add_node(u, stage, succ, pred): assert u in pred # prior to networkx ~2.0, we used a .edge attribute on DiGraph, # which no longer exists. - if hasattr(tree, 'edge'): + if hasattr(tree, "edge"): edge = tree.edge[pred[u]][u] else: - edge = tree.edges[pred[u],u] + edge = tree.edges[pred[u], u] probability = None if edge_probability_attribute is not None: if edge_probability_attribute not in edge: raise KeyError( "edge '(%s, %s)' missing probability attribute: '%s'" - % (pred[u], u, edge_probability_attribute)) + % (pred[u], u, edge_probability_attribute) + ) probability = edge[edge_probability_attribute] else: - probability = 1.0/len(succ[pred[u]]) + probability = 1.0 / len(succ[pred[u]]) m.ConditionalProbability[node_name] = probability # get node variables if "variables" in tree.nodes[u]: @@ -363,18 +337,18 @@ def _add_node(u, stage, succ, pred): if u in succ: child_names = [] for v in succ[u]: - child_names.append( - _add_node(v, stage+1, succ, pred)) + child_names.append(_add_node(v, stage + 1, succ, pred)) total_probability = 0.0 for child_name in child_names: m.Children[node_name].add(child_name) - total_probability += \ - pyomo.core.value(m.ConditionalProbability[child_name]) + total_probability += pyomo.core.value( + m.ConditionalProbability[child_name] + ) if abs(total_probability - 1.0) > 1e-5: raise ValueError( "edge probabilities leaving node '%s' " - "do not sum to 1 (total=%r)" - % (u, total_probability)) + "do not sum to 1 (total=%r)" % (u, total_probability) + ) else: # a leaf node scenario_name = node_to_scenario[u] @@ -383,24 +357,27 @@ def _add_node(u, stage, succ, pred): return node_name - _add_node(root, - 1, - networkx.dfs_successors(tree, root), - networkx.dfs_predecessors(tree, root)) + _add_node( + root, + 1, + networkx.dfs_successors(tree, root), + networkx.dfs_predecessors(tree, root), + ) if any(_b is not None for _b in scenario_bundle.values()): if any(_b is None for _b in scenario_bundle.values()): - raise ValueError("Incomplete bundle specification. " - "All scenarios require a bundle " - "identifier.") + raise ValueError( + "Incomplete bundle specification. " + "All scenarios require a bundle " + "identifier." + ) m.Bundling.value = True bundle_scenarios = {} for bundle_name in sorted(set(scenario_bundle.values())): m.Bundles.add(bundle_name) bundle_scenarios[bundle_name] = [] for scenario_name in m.Scenarios: - bundle_scenarios[scenario_bundle[scenario_name]].\ - append(scenario_name) + bundle_scenarios[scenario_bundle[scenario_name]].append(scenario_name) for bundle_name in m.Bundles: for scenario_name in sorted(bundle_scenarios[bundle_name]): m.BundleScenarios[bundle_name].add(scenario_name) diff --git a/mpisppy/utils/rho_utils.py b/mpisppy/utils/rho_utils.py index f99f16e52..75d6ccdee 100644 --- a/mpisppy/utils/rho_utils.py +++ b/mpisppy/utils/rho_utils.py @@ -9,8 +9,9 @@ import pandas as pd + def rhos_to_csv(s, filename): - """ write the rho values to a csv "fullname", rho + """write the rho values to a csv "fullname", rho Args: s (ConcreteModel): the scenario Pyomo model filenaame (str): file to which to write @@ -22,15 +23,15 @@ def rhos_to_csv(s, filename): fullname = vdata.name f.write(f'"{fullname}",{rho._value}\n') - + def rho_list_from_csv(s, filename): - """ read rho values from a file and return a list suitable for rho_setter + """read rho values from a file and return a list suitable for rho_setter Args: s (ConcreteModel): scenario whence the id values come filename (str): name of the csv file to read (fullname, rho) Returns: retlist (list of (id, rho) tuples); list suitable for rho_setter - """ + """ rhodf = pd.read_csv(filename) retlist = list() for idx, row in rhodf.iterrows(): @@ -39,6 +40,8 @@ def rho_list_from_csv(s, filename): if vo is not None: retlist.append((id(vo), row["rho"])) else: - raise RuntimeError(f"rho values from {filename} found Var {fullname} " - f"that is not found in the scenario given (name={s._name})") - return retlist + raise RuntimeError( + f"rho values from {filename} found Var {fullname} " + f"that is not found in the scenario given (name={s._name})" + ) + return retlist diff --git a/mpisppy/utils/solver_spec.py b/mpisppy/utils/solver_spec.py index 1cb1fca72..b0b6725ec 100644 --- a/mpisppy/utils/solver_spec.py +++ b/mpisppy/utils/solver_spec.py @@ -39,8 +39,9 @@ """ + def solver_specification(cfg, prefix="", name_required=True): - """ Look through cfg to find the soler_name and solver_options. + """Look through cfg to find the soler_name and solver_options. Args: cfg (Config): options, typically from the command line @@ -52,11 +53,13 @@ def solver_specification(cfg, prefix="", name_required=True): solver_name (str): the solver name (or None) solver_options (dict): the options dictionary created from the string """ - - if isinstance(prefix, (list,tuple)): + + if isinstance(prefix, (list, tuple)): root_list = prefix else: - root_list = [prefix, ] + root_list = [ + prefix, + ] idx_list = list() for sroot in root_list: @@ -66,7 +69,9 @@ def solver_specification(cfg, prefix="", name_required=True): solver_name = cfg[name_idx] options_idx = "solver_options" if sroot == "" else f"{sroot}_solver_options" ostr = cfg.get(options_idx) - solver_options = sputils.option_string_to_dict(ostr) # will return None for None + solver_options = sputils.option_string_to_dict( + ostr + ) # will return None for None break else: if name_required: diff --git a/mpisppy/utils/sputils.py b/mpisppy/utils/sputils.py index b33639cf2..367359da2 100644 --- a/mpisppy/utils/sputils.py +++ b/mpisppy/utils/sputils.py @@ -25,100 +25,128 @@ global_rank = MPI.COMM_WORLD.Get_rank() + def not_good_enough_results(results): - return (results is None) or (len(results.solution) == 0) or \ - (results.solution(0).status == SolutionStatus.infeasible) or \ - (results.solver.termination_condition == TerminationCondition.infeasible) or \ - (results.solver.termination_condition == TerminationCondition.infeasibleOrUnbounded) or \ - (results.solver.termination_condition == TerminationCondition.unbounded) + return ( + (results is None) + or (len(results.solution) == 0) + or (results.solution(0).status == SolutionStatus.infeasible) + or (results.solver.termination_condition == TerminationCondition.infeasible) + or ( + results.solver.termination_condition + == TerminationCondition.infeasibleOrUnbounded + ) + or (results.solver.termination_condition == TerminationCondition.unbounded) + ) + +_spin_the_wheel_move_msg = ( + "spin_the_wheel should now be used as the class " + "mpisppy.spin_the_wheel.WheelSpinner using the method `spin()`. Output " + "writers are now methods of the class WheelSpinner." +) -_spin_the_wheel_move_msg = \ - "spin_the_wheel should now be used as the class "\ - "mpisppy.spin_the_wheel.WheelSpinner using the method `spin()`. Output "\ - "writers are now methods of the class WheelSpinner." def spin_the_wheel(hub_dict, list_of_spoke_dict, comm_world=None): raise RuntimeError( - _spin_the_wheel_move_msg + \ - " See the example code below for a fix:\n" - ''' + _spin_the_wheel_move_msg + " See the example code below for a fix:\n" + """ from mpisppy.spin_the_wheel import WheelSpinner ws = WheelSpinner(hub_dict, list_of_spoke_dict) ws.spin(comm_world=comm_world) - ''' + """ ) + def first_stage_nonant_npy_serializer(file_name, scenario, bundling): # write just the nonants for ROOT in an npy file (e.g. for Conf Int) root = scenario._mpisppy_node_list[0] assert root.name == "ROOT" - root_nonants = np.fromiter((pyo.value(var) for var in root.nonant_vardata_list), float) + root_nonants = np.fromiter( + (pyo.value(var) for var in root.nonant_vardata_list), float + ) np.save(file_name, root_nonants) -def first_stage_nonant_writer( file_name, scenario, bundling ): - with open(file_name, 'w') as f: + +def first_stage_nonant_writer(file_name, scenario, bundling): + with open(file_name, "w") as f: root = scenario._mpisppy_node_list[0] assert root.name == "ROOT" for var in root.nonant_vardata_list: var_name = var.name if bundling: - dot_index = var_name.find('.') + dot_index = var_name.find(".") assert dot_index >= 0 - var_name = var_name[(dot_index+1):] + var_name = var_name[(dot_index + 1) :] f.write(f"{var_name},{pyo.value(var)}\n") -def scenario_tree_solution_writer( directory_name, scenario_name, scenario, bundling ): - with open(os.path.join(directory_name, scenario_name+'.csv'), 'w') as f: + +def scenario_tree_solution_writer(directory_name, scenario_name, scenario, bundling): + with open(os.path.join(directory_name, scenario_name + ".csv"), "w") as f: for var in scenario.component_data_objects( - ctype=(pyo.Var, pyo.Expression), - descend_into=True, - active=True, - sort=True): + ctype=(pyo.Var, pyo.Expression), descend_into=True, active=True, sort=True + ): var_name = var.name if bundling: - dot_index = var_name.find('.') + dot_index = var_name.find(".") assert dot_index >= 0 - var_name = var_name[(dot_index+1):] + var_name = var_name[(dot_index + 1) :] f.write(f"{var_name},{pyo.value(var)}\n") - -def write_spin_the_wheel_first_stage_solution(spcomm, opt_dict, solution_file_name, - first_stage_solution_writer=first_stage_nonant_writer): + + +def write_spin_the_wheel_first_stage_solution( + spcomm, + opt_dict, + solution_file_name, + first_stage_solution_writer=first_stage_nonant_writer, +): raise RuntimeError(_spin_the_wheel_move_msg) -def write_spin_the_wheel_tree_solution(spcomm, opt_dict, solution_directory_name, - scenario_tree_solution_writer=scenario_tree_solution_writer): + +def write_spin_the_wheel_tree_solution( + spcomm, + opt_dict, + solution_directory_name, + scenario_tree_solution_writer=scenario_tree_solution_writer, +): raise RuntimeError(_spin_the_wheel_move_msg) + def local_nonant_cache(spcomm): raise RuntimeError(_spin_the_wheel_move_msg) + ### a few Pyomo-related utilities ### def get_objs(scenario_instance, allow_none=False): - """ return the list of objective functions for scenario_instance""" - scenario_objs = scenario_instance.component_data_objects(pyo.Objective, - active=True, descend_into=True) + """return the list of objective functions for scenario_instance""" + scenario_objs = scenario_instance.component_data_objects( + pyo.Objective, active=True, descend_into=True + ) scenario_objs = list(scenario_objs) if (len(scenario_objs) == 0) and not allow_none: - raise RuntimeError(f"Scenario {scenario_instance.name} has no active " - "objective functions.") - if (len(scenario_objs) > 1): - print("WARNING: Scenario", scenario_instance.name, "has multiple active " - "objectives. Selecting the first objective.") + raise RuntimeError( + f"Scenario {scenario_instance.name} has no active " "objective functions." + ) + if len(scenario_objs) > 1: + print( + "WARNING: Scenario", + scenario_instance.name, + "has multiple active " "objectives. Selecting the first objective.", + ) return scenario_objs def stash_ref_objs(scenario_instance): """Stash a reference to active objs so - Reactivate_obj can use the reference to reactivate them/it later. + Reactivate_obj can use the reference to reactivate them/it later. """ scenario_instance._mpisppy_data.obj_list = get_objs(scenario_instance) def deact_objs(scenario_instance): - """ Deactivate objs + """Deactivate objs Args: scenario_instance (Pyomo ConcreteModel): the scenario Returns: @@ -132,44 +160,49 @@ def deact_objs(scenario_instance): def reactivate_objs(scenario_instance): - """ Reactivate ojbs stashed by stash_ref_objs """ + """Reactivate ojbs stashed by stash_ref_objs""" if not hasattr(scenario_instance._mpisppy_data, "obj_list"): raise RuntimeError("reactivate_objs called with prior call to stash_ref_objs") for obj in scenario_instance._mpisppy_data.obj_list: obj.activate() - -def create_EF(scenario_names, scenario_creator, scenario_creator_kwargs=None, - EF_name=None, suppress_warnings=False, - nonant_for_fixed_vars=True): - """ Create a ConcreteModel of the extensive form. - Args: - scenario_names (list of str): - Names for each scenario to be passed to the scenario_creator - function. - scenario_creator (callable): - Function which takes a scenario name as its first argument and - returns a concrete model corresponding to that scenario. - scenario_creator_kwargs (dict, optional): - Options to pass to `scenario_creator`. - EF_name (str, optional): - Name of the ConcreteModel of the EF. - suppress_warnings (boolean, optional): - If true, do not display warnings. Default False. - nonant_for_fixed_vars (bool--optional): If True, enforces - non-anticipativity constraints for all variables, including - those which have been fixed. Default is True. +def create_EF( + scenario_names, + scenario_creator, + scenario_creator_kwargs=None, + EF_name=None, + suppress_warnings=False, + nonant_for_fixed_vars=True, +): + """Create a ConcreteModel of the extensive form. - Returns: - EF_instance (ConcreteModel): - ConcreteModel of extensive form with explicit - non-anticipativity constraints. - - Note: - If any of the scenarios produced by scenario_creator do not have a - ._mpisppy_probability attribute, this function displays a warning, and assumes - that all scenarios are equally likely. + Args: + scenario_names (list of str): + Names for each scenario to be passed to the scenario_creator + function. + scenario_creator (callable): + Function which takes a scenario name as its first argument and + returns a concrete model corresponding to that scenario. + scenario_creator_kwargs (dict, optional): + Options to pass to `scenario_creator`. + EF_name (str, optional): + Name of the ConcreteModel of the EF. + suppress_warnings (boolean, optional): + If true, do not display warnings. Default False. + nonant_for_fixed_vars (bool--optional): If True, enforces + non-anticipativity constraints for all variables, including + those which have been fixed. Default is True. + + Returns: + EF_instance (ConcreteModel): + ConcreteModel of extensive form with explicit + non-anticipativity constraints. + + Note: + If any of the scenarios produced by scenario_creator do not have a + ._mpisppy_probability attribute, this function displays a warning, and assumes + that all scenarios are equally likely. """ if scenario_creator_kwargs is None: scenario_creator_kwargs = dict() @@ -178,17 +211,19 @@ def create_EF(scenario_names, scenario_creator, scenario_creator_kwargs=None, for name in scenario_names } - if (len(scen_dict) == 0): + if len(scen_dict) == 0: raise RuntimeError("create_EF() received empty scenario list") - elif (len(scen_dict) == 1): + elif len(scen_dict) == 1: scenario_instance = list(scen_dict.values())[0] scenario_instance._ef_scenario_names = list(scen_dict.keys()) if not suppress_warnings: print("WARNING: passed single scenario to create_EF()") # special code to patch in ref_vars scenario_instance.ref_vars = dict() - scenario_instance._nlens = {node.name: len(node.nonant_vardata_list) - for node in scenario_instance._mpisppy_node_list} + scenario_instance._nlens = { + node.name: len(node.nonant_vardata_list) + for node in scenario_instance._mpisppy_node_list + } for node in scenario_instance._mpisppy_node_list: ndn = node.name @@ -196,70 +231,76 @@ def create_EF(scenario_names, scenario_creator, scenario_creator_kwargs=None, v = node.nonant_vardata_list[i] if (ndn, i) not in scenario_instance.ref_vars: scenario_instance.ref_vars[(ndn, i)] = v - # patch in EF_Obj - scenario_objs = deact_objs(scenario_instance) - obj = scenario_objs[0] + # patch in EF_Obj + scenario_objs = deact_objs(scenario_instance) + obj = scenario_objs[0] sense = pyo.minimize if obj.is_minimizing() else pyo.maximize scenario_instance.EF_Obj = pyo.Objective(expr=obj.expr, sense=sense) return scenario_instance #### special return for single scenario # Check if every scenario has a specified probability - probs_specified = \ - all(hasattr(scen, '_mpisppy_probability') for scen in scen_dict.values()) - uniform_specified = \ - probs_specified and all(scen._mpisppy_probability == "uniform" for scen in scen_dict.values()) + probs_specified = all( + hasattr(scen, "_mpisppy_probability") for scen in scen_dict.values() + ) + uniform_specified = probs_specified and all( + scen._mpisppy_probability == "uniform" for scen in scen_dict.values() + ) if not probs_specified or uniform_specified: for scen in scen_dict.values(): scen._mpisppy_probability = 1 / len(scen_dict) if not suppress_warnings and not uniform_specified: - print('WARNING: At least one scenario is missing _mpisppy_probability attribute.', - 'Assuming equally-likely scenarios...') + print( + "WARNING: At least one scenario is missing _mpisppy_probability attribute.", + "Assuming equally-likely scenarios...", + ) - EF_instance = _create_EF_from_scen_dict(scen_dict, - EF_name=EF_name, - nonant_for_fixed_vars=nonant_for_fixed_vars) + EF_instance = _create_EF_from_scen_dict( + scen_dict, EF_name=EF_name, nonant_for_fixed_vars=nonant_for_fixed_vars + ) return EF_instance -def _create_EF_from_scen_dict(scen_dict, EF_name=None, - nonant_for_fixed_vars=True): - """ Create a ConcreteModel of the extensive form from a scenario - dictionary. - Args: - scen_dict (dict): Dictionary whose keys are scenario names and - values are ConcreteModel objects corresponding to each - scenario. - EF_name (str--optional): Name of the resulting EF model. - nonant_for_fixed_vars (bool--optional): If True, enforces - non-anticipativity constraints for all variables, including - those which have been fixed. Default is True. +def _create_EF_from_scen_dict(scen_dict, EF_name=None, nonant_for_fixed_vars=True): + """Create a ConcreteModel of the extensive form from a scenario + dictionary. - Returns: - EF_instance (ConcreteModel): ConcreteModel of extensive form with - explicity non-anticipativity constraints. - - Notes: - The non-anticipativity constraints are enforced by creating - "reference variables" at each node in the scenario tree (excluding - leaves) and enforcing that all the variables for each scenario at - that node are equal to the reference variables. - - This function is called directly when creating bundles for PH. - - Does NOT assume that each scenario is equally likely. Raises an - AttributeError if a scenario object is encountered which does not - have a ._mpisppy_probability attribute. - - Added the flag nonant_for_fixed_vars because original code only - enforced non-anticipativity for non-fixed vars, which is not always - desirable in the context of bundling. This allows for more - fine-grained control. + Args: + scen_dict (dict): Dictionary whose keys are scenario names and + values are ConcreteModel objects corresponding to each + scenario. + EF_name (str--optional): Name of the resulting EF model. + nonant_for_fixed_vars (bool--optional): If True, enforces + non-anticipativity constraints for all variables, including + those which have been fixed. Default is True. + + Returns: + EF_instance (ConcreteModel): ConcreteModel of extensive form with + explicity non-anticipativity constraints. + + Notes: + The non-anticipativity constraints are enforced by creating + "reference variables" at each node in the scenario tree (excluding + leaves) and enforcing that all the variables for each scenario at + that node are equal to the reference variables. + + This function is called directly when creating bundles for PH. + + Does NOT assume that each scenario is equally likely. Raises an + AttributeError if a scenario object is encountered which does not + have a ._mpisppy_probability attribute. + + Added the flag nonant_for_fixed_vars because original code only + enforced non-anticipativity for non-fixed vars, which is not always + desirable in the context of bundling. This allows for more + fine-grained control. """ is_min, clear = _models_have_same_sense(scen_dict) - if (not clear): - raise RuntimeError('Cannot build the extensive form out of models ' - 'with different objective senses') + if not clear: + raise RuntimeError( + "Cannot build the extensive form out of models " + "with different objective senses" + ) sense = pyo.minimize if is_min else pyo.maximize EF_instance = pyo.ConcreteModel(name=EF_name) EF_instance.EF_Obj = pyo.Objective(expr=0.0, sense=sense) @@ -267,24 +308,30 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # we don't strictly need these here, but it allows for eliding # of single scenarios and bundles when convenient EF_instance._mpisppy_data = pyo.Block(name="For non-Pyomo mpi-sppy data") - EF_instance._mpisppy_model = pyo.Block(name="For mpi-sppy Pyomo additions to the scenario model") + EF_instance._mpisppy_model = pyo.Block( + name="For mpi-sppy Pyomo additions to the scenario model" + ) EF_instance._mpisppy_data.scenario_feasible = None EF_instance._ef_scenario_names = [] EF_instance._mpisppy_probability = 0 - for (sname, scenario_instance) in scen_dict.items(): + for sname, scenario_instance in scen_dict.items(): EF_instance.add_component(sname, scenario_instance) EF_instance._ef_scenario_names.append(sname) # Now deactivate the scenario instance Objective scenario_objs = deact_objs(scenario_instance) - obj_func = scenario_objs[0] # Select the first objective + obj_func = scenario_objs[0] # Select the first objective try: - EF_instance.EF_Obj.expr += scenario_instance._mpisppy_probability * obj_func.expr - EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability + EF_instance.EF_Obj.expr += ( + scenario_instance._mpisppy_probability * obj_func.expr + ) + EF_instance._mpisppy_probability += scenario_instance._mpisppy_probability except AttributeError as e: - raise AttributeError("Scenario " + sname + " has no specified " - "probability. Specify a value for the attribute " - " _mpisppy_probability and try again.") from e + raise AttributeError( + "Scenario " + sname + " has no specified " + "probability. Specify a value for the attribute " + " _mpisppy_probability and try again." + ) from e # Normalization does nothing when solving the full EF, but is required for # appropraite scaling of EFs used as bundles. EF_instance.EF_Obj.expr /= EF_instance._mpisppy_probability @@ -292,33 +339,39 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # For each node in the scenario tree, we need to collect the # nonanticipative vars and create the constraints for them, # which we do using a reference variable. - ref_vars = dict() # keys are _nonant_indices (i.e. a node name and a - # variable number) + ref_vars = dict() # keys are _nonant_indices (i.e. a node name and a + # variable number) ref_suppl_vars = dict() - EF_instance._nlens = dict() + EF_instance._nlens = dict() - nonant_constr = pyo.Constraint(pyo.Any, name='_C_EF_') - EF_instance.add_component('_C_EF_', nonant_constr) + nonant_constr = pyo.Constraint(pyo.Any, name="_C_EF_") + EF_instance.add_component("_C_EF_", nonant_constr) - nonant_constr_suppl = pyo.Constraint(pyo.Any, name='_C_EF_suppl') - EF_instance.add_component('_C_EF_suppl', nonant_constr_suppl) + nonant_constr_suppl = pyo.Constraint(pyo.Any, name="_C_EF_suppl") + EF_instance.add_component("_C_EF_suppl", nonant_constr_suppl) - for (sname, s) in scen_dict.items(): - nlens = {node.name: len(node.nonant_vardata_list) - for node in s._mpisppy_node_list} - - for (node_name, num_nonant_vars) in nlens.items(): # copy nlens to EF - if (node_name in EF_instance._nlens.keys() and - num_nonant_vars != EF_instance._nlens[node_name]): - raise RuntimeError("Number of non-anticipative variables is " - "not consistent at node " + node_name + " in scenario " + - sname) + for sname, s in scen_dict.items(): + nlens = { + node.name: len(node.nonant_vardata_list) for node in s._mpisppy_node_list + } + + for node_name, num_nonant_vars in nlens.items(): # copy nlens to EF + if ( + node_name in EF_instance._nlens.keys() + and num_nonant_vars != EF_instance._nlens[node_name] + ): + raise RuntimeError( + "Number of non-anticipative variables is " + "not consistent at node " + node_name + " in scenario " + sname + ) EF_instance._nlens[node_name] = num_nonant_vars - nlens_ef_suppl = {node.name: len(node.nonant_ef_suppl_vardata_list) - for node in s._mpisppy_node_list} + nlens_ef_suppl = { + node.name: len(node.nonant_ef_suppl_vardata_list) + for node in s._mpisppy_node_list + } for node in s._mpisppy_node_list: ndn = node.name @@ -331,10 +384,12 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # Add a non-anticipativity constraint, except in the case when # the variable is fixed and nonant_for_fixed_vars=False. elif (nonant_for_fixed_vars) or (not v.is_fixed()): - expr = LinearExpression(linear_coefs=[1,-1], - linear_vars=[v,ref_vars[(ndn,i)]], - constant=0.) - nonant_constr[(ndn,i,sname)] = (expr, 0.0) + expr = LinearExpression( + linear_coefs=[1, -1], + linear_vars=[v, ref_vars[(ndn, i)]], + constant=0.0, + ) + nonant_constr[(ndn, i, sname)] = (expr, 0.0) for i in range(nlens_ef_suppl[ndn]): v = node.nonant_ef_suppl_vardata_list[i] @@ -345,58 +400,66 @@ def _create_EF_from_scen_dict(scen_dict, EF_name=None, # Add a non-anticipativity constraint, expect in the case when # the variable is fixed and nonant_for_fixed_vars=False. elif (nonant_for_fixed_vars) or (not v.is_fixed()): - expr = LinearExpression(linear_coefs=[1,-1], - linear_vars=[v,ref_suppl_vars[(ndn,i)]], - constant=0.) - nonant_constr_suppl[(ndn,i,sname)] = (expr, 0.0) + expr = LinearExpression( + linear_coefs=[1, -1], + linear_vars=[v, ref_suppl_vars[(ndn, i)]], + constant=0.0, + ) + nonant_constr_suppl[(ndn, i, sname)] = (expr, 0.0) EF_instance.ref_vars = ref_vars EF_instance.ref_suppl_vars = ref_suppl_vars - + return EF_instance + def _models_have_same_sense(models): - ''' Check if every model in the provided dict has the same objective sense. + """Check if every model in the provided dict has the same objective sense. - Input: - models (dict) -- Keys are scenario names, values are Pyomo - ConcreteModel objects. - Returns: - is_minimizing (bool) -- True if and only if minimizing. None if the - check fails. - check (bool) -- True only if all the models have the same sense (or - no models were provided) - Raises: - ValueError -- If any of the models has either none or multiple - active objectives. - ''' - if (len(models) == 0): + Input: + models (dict) -- Keys are scenario names, values are Pyomo + ConcreteModel objects. + Returns: + is_minimizing (bool) -- True if and only if minimizing. None if the + check fails. + check (bool) -- True only if all the models have the same sense (or + no models were provided) + Raises: + ValueError -- If any of the models has either none or multiple + active objectives. + """ + if len(models) == 0: return True, True - senses = [find_active_objective(scenario).is_minimizing() - for scenario in models.values()] + senses = [ + find_active_objective(scenario).is_minimizing() for scenario in models.values() + ] sense = senses[0] check = all(val == sense for val in senses) - if (check): + if check: return (sense == pyo.minimize), check return None, check + def is_persistent(solver): - return isinstance(solver, - pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver) - + return isinstance( + solver, pyo.pyomo.solvers.plugins.solvers.persistent_solver.PersistentSolver + ) + + def ef_scenarios(ef): - """ An iterator to give the scenario sub-models in an ef + """An iterator to give the scenario sub-models in an ef Args: ef (ConcreteModel): the full extensive form model Yields: scenario name, scenario instance (str, ConcreteModel) - """ + """ for sname in ef._ef_scenario_names: yield (sname, getattr(ef, sname)) + def ef_nonants(ef): - """ An iterator to give representative Vars subject to non-anticipitivity + """An iterator to give representative Vars subject to non-anticipitivity Args: ef (ConcreteModel): the full extensive form model @@ -406,106 +469,113 @@ def ef_nonants(ef): Note: not on an EF object because not all ef's are part of an EF object """ - for (ndn,i), var in ef.ref_vars.items(): + for (ndn, i), var in ef.ref_vars.items(): yield (ndn, var, pyo.value(var)) - + def ef_nonants_csv(ef, filename): - """ Dump the nonant vars from an ef to a csv file; truly a dump... + """Dump the nonant vars from an ef to a csv file; truly a dump... Args: ef (ConcreteModel): the full extensive form model filename (str): the full name of the csv output file """ with open(filename, "w") as outfile: outfile.write("Node, EF_VarName, Value\n") - for (ndname, varname, varval) in ef_nonants(ef): + for ndname, varname, varval in ef_nonants(ef): outfile.write("{}, {}, {}\n".format(ndname, varname, varval)) - -def nonant_cache_from_ef(ef,verbose=False): - """ Populate a nonant_cache from an ef. Also works with multi-stage + +def nonant_cache_from_ef(ef, verbose=False): + """Populate a nonant_cache from an ef. Also works with multi-stage Args: ef (mpi-sppy ef): a solved ef Returns: nonant_cache (dict of numpy arrays): a special structure for nonant values - """ + """ nonant_cache = dict() - nodenames = set([ndn for (ndn,i) in ef.ref_vars]) + nodenames = set([ndn for (ndn, i) in ef.ref_vars]) for ndn in sorted(nodenames): - nonant_cache[ndn]=[] + nonant_cache[ndn] = [] i = 0 - while ((ndn,i) in ef.ref_vars): - xvar = pyo.value(ef.ref_vars[(ndn,i)]) + while (ndn, i) in ef.ref_vars: + xvar = pyo.value(ef.ref_vars[(ndn, i)]) nonant_cache[ndn].append(xvar) if verbose: print("barfoo", i, xvar) - i+=1 + i += 1 return nonant_cache def ef_ROOT_nonants_npy_serializer(ef, filename): - """ write the root node nonants to be ready by a numpy load + """write the root node nonants to be ready by a numpy load Args: ef (ConcreteModel): the full extensive form model filename (str): the full name of the .npy output file """ - root_nonants = np.fromiter((v for ndn,var,v in ef_nonants(ef) if ndn == "ROOT"), float) + root_nonants = np.fromiter( + (v for ndn, var, v in ef_nonants(ef) if ndn == "ROOT"), float + ) np.save(filename, root_nonants) -def write_ef_first_stage_solution(ef, - solution_file_name, - first_stage_solution_writer=first_stage_nonant_writer): - """ + +def write_ef_first_stage_solution( + ef, solution_file_name, first_stage_solution_writer=first_stage_nonant_writer +): + """ Write a solution file, if a solution is available, to the solution_file_name provided Args: - ef : A Concrete Model of the Extensive Form (output of create_EF). + ef : A Concrete Model of the Extensive Form (output of create_EF). We assume it has already been solved. solution_file_name : filename to write the solution to first_stage_solution_writer (optional) : custom first stage solution writer function - + NOTE: This utility is replicating WheelSpinner.write_first_stage_solution for EF """ if global_rank == 0: - representative_scenario = getattr(ef,ef._ef_scenario_names[0]) - first_stage_solution_writer(solution_file_name, - representative_scenario, - bundling=False) - -def write_ef_tree_solution(ef, solution_directory_name, - scenario_tree_solution_writer=scenario_tree_solution_writer): - """ Write a tree solution directory, if available, to the solution_directory_name provided + representative_scenario = getattr(ef, ef._ef_scenario_names[0]) + first_stage_solution_writer( + solution_file_name, representative_scenario, bundling=False + ) + + +def write_ef_tree_solution( + ef, + solution_directory_name, + scenario_tree_solution_writer=scenario_tree_solution_writer, +): + """Write a tree solution directory, if available, to the solution_directory_name provided Args: - ef : A Concrete Model of the Extensive Form (output of create_EF). + ef : A Concrete Model of the Extensive Form (output of create_EF). We assume it has already been solved. solution_file_name : filename to write the solution to scenario_tree_solution_writer (optional) : custom scenario solution writer function - + NOTE: This utility is replicating WheelSpinner.write_tree_solution for EF """ - if global_rank==0: + if global_rank == 0: os.makedirs(solution_directory_name, exist_ok=True) for scenario_name, scenario in ef_scenarios(ef): - scenario_tree_solution_writer(solution_directory_name, - scenario_name, - scenario, - bundling=False) - + scenario_tree_solution_writer( + solution_directory_name, scenario_name, scenario, bundling=False + ) + def extract_num(string): - ''' Given a string, extract the longest contiguous - integer from the right-hand side of the string. + """Given a string, extract the longest contiguous + integer from the right-hand side of the string. + + Example: + scenario324 -> 324 - Example: - scenario324 -> 324 + TODO: Add Exception Handling + """ + return int(re.compile(r"(\d+)$").search(string).group(1)) - TODO: Add Exception Handling - ''' - return int(re.compile(r'(\d+)$').search(string).group(1)) -def node_idx(node_path,branching_factors): - ''' +def node_idx(node_path, branching_factors): + """ Computes a unique id for a given node in a scenario tree. It follows the path to the node, computing the unique id for each ascendant. @@ -520,22 +590,23 @@ def node_idx(node_path,branching_factors): ------- node_idx Node unique id. - + NOTE: Does not work with unbalanced trees. - ''' - if node_path == []: #ROOT node + """ + if node_path == []: # ROOT node return 0 else: - stage_id = 0 #node unique id among stage t+1 nodes. + stage_id = 0 # node unique id among stage t+1 nodes. for t in range(len(node_path)): - stage_id = node_path[t]+branching_factors[t]*stage_id - node_idx = _nodenum_before_stage(len(node_path),branching_factors)+stage_id + stage_id = node_path[t] + branching_factors[t] * stage_id + node_idx = _nodenum_before_stage(len(node_path), branching_factors) + stage_id return node_idx -def _extract_node_idx(nodename,branching_factors): + +def _extract_node_idx(nodename, branching_factors): """ - + Parameters ---------- @@ -550,21 +621,22 @@ def _extract_node_idx(nodename,branching_factors): A unique integer that can be used as a key to designate this scenario. """ - if nodename =='ROOT': + if nodename == "ROOT": return 0 else: - to_list = [int(x) for x in re.findall(r'\d+',nodename)] - return node_idx(to_list,branching_factors) + to_list = [int(x) for x in re.findall(r"\d+", nodename)] + return node_idx(to_list, branching_factors) + def parent_ndn(nodename): - if nodename == 'ROOT': + if nodename == "ROOT": return None else: - return re.search('(.+)_(\d+)',nodename).group(1) + return re.search("(.+)_(\d+)", nodename).group(1) + - def option_string_to_dict(ostr): - """ Convert a string to the standard dict for solver options. + """Convert a string to the standard dict for solver options. Intended for use in the calling program; not internal use here. Args: @@ -574,6 +646,7 @@ def option_string_to_dict(ostr): solver_options (dict): solver options """ + def convert_value_string_to_number(s): try: return int(s) @@ -596,13 +669,15 @@ def convert_value_string_to_number(s): option_key = this_option_pieces[0] solver_options[option_key] = None else: - raise RuntimeError("Illegally formed subsolve directive"\ - + " option=%s detected" % this_option_string) + raise RuntimeError( + "Illegally formed subsolve directive" + + " option=%s detected" % this_option_string + ) return solver_options def option_dict_to_string(odict): - """ Convert a standard dict for solver options to a string. + """Convert a standard dict for solver options to a string. Args: odict (dict): options dict for Pyomo @@ -628,8 +703,8 @@ def option_dict_to_string(odict): # Various utilities related to scenario rank maps (some may not be in use) -def scens_to_ranks(scen_count, n_proc, rank, branching_factors = None): - """ Determine the rank assignments that are made in spbase. +def scens_to_ranks(scen_count, n_proc, rank, branching_factors=None): + """Determine the rank assignments that are made in spbase. NOTE: Callers to this should call _scentree.scen_names_to_ranks Args: scen_count (int): number of scenarios @@ -652,7 +727,7 @@ def scens_to_ranks(scen_count, n_proc, rank, branching_factors = None): ) # for now, we are treating two-stage as a special case - if (branching_factors is None): + if branching_factors is None: avg = scen_count / n_proc slices = [list(range(int(i * avg), int((i + 1) * avg))) for i in range(n_proc)] return slices, None @@ -661,133 +736,152 @@ def scens_to_ranks(scen_count, n_proc, rank, branching_factors = None): # indecision as of May 2020 (delete this comment DLW) # just make sure things are consistent with what xhat will do... # TBD: streamline - all_scenario_names = ["ID"+str(i) for i in range(scen_count)] + all_scenario_names = ["ID" + str(i) for i in range(scen_count)] tree = _ScenTree(branching_factors, all_scenario_names) - scenario_names_to_ranks, slices, = tree.scen_name_to_rank(n_proc, rank) + ( + scenario_names_to_ranks, + slices, + ) = tree.scen_name_to_rank(n_proc, rank) return slices, scenario_names_to_ranks -def _nodenum_before_stage(t,branching_factors): - #How many nodes in a tree of stage 1,2,...,t ? - #Only works with branching factors + +def _nodenum_before_stage(t, branching_factors): + # How many nodes in a tree of stage 1,2,...,t ? + # Only works with branching factors return int(sum(np.prod(branching_factors[0:i]) for i in range(t))) + def find_leaves(all_nodenames): - #Take a list of all nodenames from a tree, and find the leaves of it. - #WARNING: We do NOT check that the tree is well constructed - - if all_nodenames is None or all_nodenames == ['ROOT']: - return {'ROOT':False} # 2 stage problem: no leaf nodes in all_nodenames - #A leaf is simply a root with no child n°0 + # Take a list of all nodenames from a tree, and find the leaves of it. + # WARNING: We do NOT check that the tree is well constructed + + if all_nodenames is None or all_nodenames == ["ROOT"]: + return {"ROOT": False} # 2 stage problem: no leaf nodes in all_nodenames + # A leaf is simply a root with no child n°0 is_leaf = dict() for ndn in all_nodenames: - if ndn+"_0" in all_nodenames: + if ndn + "_0" in all_nodenames: is_leaf[ndn] = False else: is_leaf[ndn] = True return is_leaf - - -class _TreeNode(): - #Create the subtree generated by a node, with associated scenarios + + +class _TreeNode: + # Create the subtree generated by a node, with associated scenarios # stages are 1-based, everything else is 0-based # scenario lists are stored as (first, last) indices in all_scenarios - #This is also checking that the nodes from all_nodenames are well-named. + # This is also checking that the nodes from all_nodenames are well-named. def __init__(self, Parent, scenfirst, scenlast, desc_leaf_dict, name): - #desc_leaf_dict is the output of find_leaves - self.scenfirst = scenfirst #id of the first scenario with this node - self.scenlast = scenlast #id of the last scenario with this node + # desc_leaf_dict is the output of find_leaves + self.scenfirst = scenfirst # id of the first scenario with this node + self.scenlast = scenlast # id of the last scenario with this node self.name = name - numscens = scenlast - scenfirst + 1 #number of scenarios with this node + numscens = scenlast - scenfirst + 1 # number of scenarios with this node self.is_leaf = False if Parent is None: - assert(self.name == "ROOT") + assert self.name == "ROOT" self.stage = 1 else: self.stage = Parent.stage + 1 - if len(desc_leaf_dict)==1 and list(desc_leaf_dict.keys()) == ['ROOT']: - #2-stage problem, we don't create leaf nodes + if len(desc_leaf_dict) == 1 and list(desc_leaf_dict.keys()) == ["ROOT"]: + # 2-stage problem, we don't create leaf nodes self.kids = [] - elif name+"_0" not in desc_leaf_dict: + elif name + "_0" not in desc_leaf_dict: self.is_leaf = True self.kids = [] else: - if len(desc_leaf_dict) < numscens: - raise RuntimeError(f"There are more scenarios ({numscens}) than remaining leaves, for the node {name}") + if len(desc_leaf_dict) < numscens: + raise RuntimeError( + f"There are more scenarios ({numscens}) than remaining leaves, for the node {name}" + ) # make children first = scenfirst self.kids = list() - child_regex = re.compile(name+'_\d*\Z') - child_list = [x for x in desc_leaf_dict if child_regex.match(x) ] + child_regex = re.compile(name + "_\d*\Z") + child_list = [x for x in desc_leaf_dict if child_regex.match(x)] for i in range(len(desc_leaf_dict)): - childname = name+f"_{i}" + childname = name + f"_{i}" if childname not in desc_leaf_dict: if len(child_list) != i: - raise RuntimeError("The all_nodenames argument is giving an inconsistent tree." - f"The node {name} has {len(child_list)} children, but {childname} is not one of them.") + raise RuntimeError( + "The all_nodenames argument is giving an inconsistent tree." + f"The node {name} has {len(child_list)} children, but {childname} is not one of them." + ) break - childdesc_regex = re.compile(childname+'(_\d*)*\Z') - child_leaf_dict = {ndn:desc_leaf_dict[ndn] for ndn in desc_leaf_dict \ - if childdesc_regex.match(ndn)} - #We determine the number of children of this node + childdesc_regex = re.compile(childname + "(_\d*)*\Z") + child_leaf_dict = { + ndn: desc_leaf_dict[ndn] + for ndn in desc_leaf_dict + if childdesc_regex.match(ndn) + } + # We determine the number of children of this node child_scens_num = sum(child_leaf_dict.values()) - last = first+child_scens_num - 1 - self.kids.append(_TreeNode(self, first, last, - child_leaf_dict, childname)) + last = first + child_scens_num - 1 + self.kids.append( + _TreeNode(self, first, last, child_leaf_dict, childname) + ) first += child_scens_num if last != scenlast: print("numscens, last, scenlast", numscens, last, scenlast) - raise RuntimeError(f"Tree node did not initialize correctly for node {name}") - + raise RuntimeError( + f"Tree node did not initialize correctly for node {name}" + ) def stage_max(self): - #Return the number of stages of a subtree. - #Also check that all the subtrees have the same number of stages - #i.e. that the leaves are always on the same stage. + # Return the number of stages of a subtree. + # Also check that all the subtrees have the same number of stages + # i.e. that the leaves are always on the same stage. if self.is_leaf: return 1 else: leaves = [child.stage_max() for child in self.kids] if leaves.count(leaves[0]) != len(leaves): - maxstage = max(leaves)+ self.stage - minstage = min(leaves)+ self.stage - raise RuntimeError("The all_nodenames argument is giving an inconsistent tree. " - f"The node {self.name} has descendant leaves with stages going from {minstage} to {maxstage}") - return 1+leaves[0] - - - - -class _ScenTree(): + maxstage = max(leaves) + self.stage + minstage = min(leaves) + self.stage + raise RuntimeError( + "The all_nodenames argument is giving an inconsistent tree. " + f"The node {self.name} has descendant leaves with stages going from {minstage} to {maxstage}" + ) + return 1 + leaves[0] + + +class _ScenTree: def __init__(self, all_nodenames, ScenNames): if all_nodenames is None: - all_nodenames = ['ROOT'] #2 stage problem: no leaf nodes + all_nodenames = ["ROOT"] # 2 stage problem: no leaf nodes self.ScenNames = ScenNames self.NumScens = len(ScenNames) first = 0 last = self.NumScens - 1 desc_leaf_dict = find_leaves(all_nodenames) self.rootnode = _TreeNode(None, first, last, desc_leaf_dict, "ROOT") + def _nonleaves(nd): if nd.is_leaf: return [] else: retval = [nd] for child in nd.kids: - retval+=_nonleaves(child) + retval += _nonleaves(child) return retval + self.nonleaves = _nonleaves(self.rootnode) - - self.NumStages = \ - 2 if all_nodenames == ['ROOT'] else self.rootnode.stage_max() - self.NonLeafTerminals = \ - [nd for nd in self.nonleaves if nd.stage == self.NumStages-1] - + + self.NumStages = 2 if all_nodenames == ["ROOT"] else self.rootnode.stage_max() + self.NonLeafTerminals = [ + nd for nd in self.nonleaves if nd.stage == self.NumStages - 1 + ] + self.NumLeaves = len(desc_leaf_dict) - len(self.nonleaves) - if self.NumStages>2 and self.NumLeaves != self.NumScens: - raise RuntimeError("The all_nodenames argument is giving an inconsistent tree." - f"There are {self.NumLeaves} leaves for this tree, but {self.NumScens} scenarios are given.") + if self.NumStages > 2 and self.NumLeaves != self.NumScens: + raise RuntimeError( + "The all_nodenames argument is giving an inconsistent tree." + f"There are {self.NumLeaves} leaves for this tree, but {self.NumScens} scenarios are given." + ) + def scen_names_to_ranks(self, n_proc): - """ + """ Args: n_proc: number of ranks in the cylinder (i.e., intra) @@ -813,7 +907,11 @@ def scen_names_to_ranks(self, n_proc): if n_proc == 1: for nd in self.nonleaves: scenario_names_to_rank[nd.name] = {s: 0 for s in self.ScenNames} - return scenario_names_to_rank, [list(range(self.NumScens))], [0]*self.NumScens + return ( + scenario_names_to_rank, + [list(range(self.NumScens))], + [0] * self.NumScens, + ) scen_count = len(self.ScenNames) avg = scen_count / n_proc @@ -822,17 +920,22 @@ def scen_names_to_ranks(self, n_proc): slices = [list(range(int(i * avg), int((i + 1) * avg))) for i in range(n_proc)] # scenario index -> rank - list_of_ranks_per_scenario_idx = [ rank for rank, scen_idxs in enumerate(slices) for _ in scen_idxs ] + list_of_ranks_per_scenario_idx = [ + rank for rank, scen_idxs in enumerate(slices) for _ in scen_idxs + ] + + scenario_names_to_rank["ROOT"] = { + s: rank for s, rank in zip(self.ScenNames, list_of_ranks_per_scenario_idx) + } - scenario_names_to_rank["ROOT"] = { s: rank for s,rank in zip(self.ScenNames, list_of_ranks_per_scenario_idx) } - def _recurse_do_node(node): for child in node.kids: - first_scen_idx = child.scenfirst last_scen_idx = child.scenlast - ranks_in_node = list_of_ranks_per_scenario_idx[first_scen_idx:last_scen_idx+1] + ranks_in_node = list_of_ranks_per_scenario_idx[ + first_scen_idx : last_scen_idx + 1 + ] minimum_rank_in_node = ranks_in_node[0] # IMPORTANT: @@ -841,11 +944,16 @@ def _recurse_do_node(node): # will then be offset by the minimum rank. As the ranks within each node are # contiguous, this is enough to infer the rank each scenario will have in this # node's comm - within_comm_ranks_in_node = [(rank-minimum_rank_in_node) for rank in ranks_in_node] + within_comm_ranks_in_node = [ + (rank - minimum_rank_in_node) for rank in ranks_in_node + ] - scenarios_in_nodes = self.ScenNames[first_scen_idx:last_scen_idx+1] + scenarios_in_nodes = self.ScenNames[first_scen_idx : last_scen_idx + 1] - scenario_names_to_rank[child.name] = { s : rank for s,rank in zip(scenarios_in_nodes, within_comm_ranks_in_node) } + scenario_names_to_rank[child.name] = { + s: rank + for s, rank in zip(scenarios_in_nodes, within_comm_ranks_in_node) + } if child not in self.NonLeafTerminals: _recurse_do_node(child) @@ -853,11 +961,13 @@ def _recurse_do_node(node): _recurse_do_node(self.rootnode) return scenario_names_to_rank, slices, list_of_ranks_per_scenario_idx - - + + ######## Utility to attach the one and only node to a two-stage scenario ####### -def attach_root_node(model, firstobj, varlist, nonant_ef_suppl_list=None, do_uniform=True): - """ Create a root node as a list to attach to a scenario model +def attach_root_node( + model, firstobj, varlist, nonant_ef_suppl_list=None, do_uniform=True +): + """Create a root node as a list to attach to a scenario model Args: model (ConcreteModel): model to which this will be attached firstobj (Pyomo Expression): First stage cost (e.g. model.FC) @@ -867,23 +977,35 @@ def attach_root_node(model, firstobj, varlist, nonant_ef_suppl_list=None, do_uni (important for bundling) do_uniform (boolean): controls a side-effect to deal with missing probs - Note: + Note: attaches a list consisting of one scenario node to the model """ model._mpisppy_node_list = [ - scenario_tree.ScenarioNode("ROOT", 1.0, 1, firstobj, varlist, model, - nonant_ef_suppl_list = nonant_ef_suppl_list) + scenario_tree.ScenarioNode( + "ROOT", + 1.0, + 1, + firstobj, + varlist, + model, + nonant_ef_suppl_list=nonant_ef_suppl_list, + ) ] if do_uniform: # Avoid a warning per scenario if not hasattr(model, "_mpisppy_probability"): model._mpisppy_probability = "uniform" - + ### utilities to check the slices and the map ### -def check4losses(numscens, branching_factors, - scenario_names_to_rank,slices,list_of_ranks_per_scenario_idx): - """ Check the data structures; gag and die if it looks bad. +def check4losses( + numscens, + branching_factors, + scenario_names_to_rank, + slices, + list_of_ranks_per_scenario_idx, +): + """Check the data structures; gag and die if it looks bad. Args: numscens (int): number of scenarios branching_factors (list of int): branching factors @@ -914,10 +1036,13 @@ def check4losses(numscens, branching_factors, raise RuntimeError("Internal error: slices is not correct") # not stage presence... - stagepresents = {stage: [False for _ in range(numscens)] for stage in range(len(branching_factors))} + stagepresents = { + stage: [False for _ in range(numscens)] + for stage in range(len(branching_factors)) + } # loop over the entire structure, marking those found as present for nodename, scenlist in scenario_names_to_rank.items(): - stagenum = nodename.count('_') + stagenum = nodename.count("_") for s in scenlist: snum = int(s[8:]) stagepresents[stagenum][snum] = True @@ -931,27 +1056,32 @@ def check4losses(numscens, branching_factors, raise RuntimeError("Internal error: scenario_name_to_rank") print("check4losses: OK") - + def disable_tictoc_output(): - f = open(os.devnull,"w") + f = open(os.devnull, "w") tt_timer._ostream = f + def reenable_tictoc_output(): # Primarily to re-enable after a disable tt_timer._ostream.close() tt_timer._ostream = sys.stdout - + def find_active_objective(pyomomodel): # return the only active objective or raise and error - obj = list(pyomomodel.component_data_objects( - Objective, active=True, descend_into=True)) + obj = list( + pyomomodel.component_data_objects(Objective, active=True, descend_into=True) + ) if len(obj) != 1: - raise RuntimeError("Could not identify exactly one active " - "Objective for model '%s' (found %d objectives)" - % (pyomomodel.name, len(obj))) + raise RuntimeError( + "Could not identify exactly one active " + "Objective for model '%s' (found %d objectives)" + % (pyomomodel.name, len(obj)) + ) return obj[0] + def create_nodenames_from_branching_factors(BFS): """ This function creates the node names of a tree without creating the whole tree. @@ -968,43 +1098,45 @@ def create_nodenames_from_branching_factors(BFS): """ stage_nodes = ["ROOT"] - nodenames = ['ROOT'] - if len(BFS)==1 : #2stage - return(nodenames) - for bf in BFS[:(len(BFS))]: + nodenames = ["ROOT"] + if len(BFS) == 1: # 2stage + return nodenames + for bf in BFS[: (len(BFS))]: old_stage_nodes = stage_nodes stage_nodes = [] for k in range(len(old_stage_nodes)): - stage_nodes += ['%s_%i'%(old_stage_nodes[k],b) for b in range(bf)] + stage_nodes += ["%s_%i" % (old_stage_nodes[k], b) for b in range(bf)] nodenames += stage_nodes return nodenames + def get_branching_factors_from_nodenames(all_nodenames): - #WARNING: Do not work with unbalanced trees + # WARNING: Do not work with unbalanced trees staget_node = "ROOT" branching_factors = [] - while staget_node+"_0" in all_nodenames: - child_regex = re.compile(staget_node+'_\d*\Z') - child_list = [x for x in all_nodenames if child_regex.match(x) ] - + while staget_node + "_0" in all_nodenames: + child_regex = re.compile(staget_node + "_\d*\Z") + child_list = [x for x in all_nodenames if child_regex.match(x)] + branching_factors.append(len(child_list)) staget_node += "_0" - if len(branching_factors)==1: - #2stage + if len(branching_factors) == 1: + # 2stage return None else: return branching_factors - + + def number_of_nodes(branching_factors): - #How many nodes does a tree with a given branching_factors have ? - last_node_stage_num = [i-1 for i in branching_factors] + # How many nodes does a tree with a given branching_factors have ? + last_node_stage_num = [i - 1 for i in branching_factors] return node_idx(last_node_stage_num, branching_factors) - + if __name__ == "__main__": - branching_factors = [2,2,2,3] + branching_factors = [2, 2, 2, 3] numscens = np.prod(branching_factors) - scennames = ["Scenario"+str(i) for i in range(numscens)] + scennames = ["Scenario" + str(i) for i in range(numscens)] testtree = _ScenTree(branching_factors, scennames) print("nonleaves:") for nd in testtree.nonleaves: @@ -1015,7 +1147,7 @@ def number_of_nodes(branching_factors): n_proc = 8 sntr, slices, ranks_per_scenario = testtree.scen_names_to_ranks(n_proc) print("map:") - for ndn,v in sntr.items(): + for ndn, v in sntr.items(): print(ndn, v) print(f"slices: {slices}") check4losses(numscens, branching_factors, sntr, slices, ranks_per_scenario) diff --git a/mpisppy/utils/stoch_admmWrapper.py b/mpisppy/utils/stoch_admmWrapper.py index e7183065a..ade47753c 100644 --- a/mpisppy/utils/stoch_admmWrapper.py +++ b/mpisppy/utils/stoch_admmWrapper.py @@ -6,88 +6,101 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -#creating the class stoch_admmWrapper +# creating the class stoch_admmWrapper import mpisppy.utils.sputils as sputils import pyomo.environ as pyo import mpisppy.scenario_tree as scenario_tree import numpy as np + def _consensus_vars_number_creator(consensus_vars): """associates to each consensus vars the number of time it appears Args: - consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables present in the subproblem Returns: - consensus_vars_number (dict): dictionary whose keys are the consensus variables + consensus_vars_number (dict): dictionary whose keys are the consensus variables and values are the number of subproblems the variable is linked to. """ - consensus_vars_number={} + consensus_vars_number = {} for subproblem in consensus_vars: for var_stage_tuple in consensus_vars[subproblem]: var = var_stage_tuple[0] - if var not in consensus_vars_number: # instanciates consensus_vars_number[var] + if ( + var not in consensus_vars_number + ): # instanciates consensus_vars_number[var] consensus_vars_number[var] = 0 consensus_vars_number[var] += 1 return consensus_vars_number -class Stoch_AdmmWrapper(): #add scenario_tree - """ Defines an interface to all strata (hubs and spokes) - Args: - options (dict): options - all_scenario_names (list): all scenario names - scenario_creator (fct): returns a concrete model with special things - consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables - present in the subproblem - n_cylinder (int): number of cylinders that will ultimately be used - mpicomm (MPI comm): creates communication - scenario_creator_kwargs (dict): kwargs passed directly to scenario_creator. - verbose (boolean): if True gives extra debugging information - - Attributes: - local_scenarios (dict of scenario objects): concrete models with - extra data, key is name - local_scenario_names (list): names of locals +class Stoch_AdmmWrapper: # add scenario_tree + """Defines an interface to all strata (hubs and spokes) + + Args: + options (dict): options + all_scenario_names (list): all scenario names + scenario_creator (fct): returns a concrete model with special things + consensus_vars (dict): dictionary which keys are the subproblems and values are the list of consensus variables + present in the subproblem + n_cylinder (int): number of cylinders that will ultimately be used + mpicomm (MPI comm): creates communication + scenario_creator_kwargs (dict): kwargs passed directly to scenario_creator. + verbose (boolean): if True gives extra debugging information + + Attributes: + local_scenarios (dict of scenario objects): concrete models with + extra data, key is name + local_scenario_names (list): names of locals """ - def __init__(self, - options, - all_admm_stoch_subproblem_scenario_names, - split_admm_stoch_subproblem_scenario_name, - admm_subproblem_names, - stoch_scenario_names, - scenario_creator, #supplied by the user/ modeller, used only here - consensus_vars, - n_cylinders, - mpicomm, - scenario_creator_kwargs=None, - verbose=None, - BFs=None, + + def __init__( + self, + options, + all_admm_stoch_subproblem_scenario_names, + split_admm_stoch_subproblem_scenario_name, + admm_subproblem_names, + stoch_scenario_names, + scenario_creator, # supplied by the user/ modeller, used only here + consensus_vars, + n_cylinders, + mpicomm, + scenario_creator_kwargs=None, + verbose=None, + BFs=None, ): assert len(options) == 0, "no options supported by stoch_admmWrapper" # We need local_scenarios self.local_admm_stoch_subproblem_scenarios = {} - scen_tree = sputils._ScenTree(["ROOT"], all_admm_stoch_subproblem_scenario_names) - assert mpicomm.Get_size() % n_cylinders == 0, \ - f"{mpicomm.Get_size()=} and {n_cylinders=}, but {mpicomm.Get_size() % n_cylinders=} should be 0" + scen_tree = sputils._ScenTree( + ["ROOT"], all_admm_stoch_subproblem_scenario_names + ) + assert ( + mpicomm.Get_size() % n_cylinders == 0 + ), f"{mpicomm.Get_size()=} and {n_cylinders=}, but {mpicomm.Get_size() % n_cylinders=} should be 0" ranks_per_cylinder = mpicomm.Get_size() // n_cylinders - - scenario_names_to_rank, _rank_slices, _scenario_slices =\ - scen_tree.scen_names_to_ranks(ranks_per_cylinder) + + scenario_names_to_rank, _rank_slices, _scenario_slices = ( + scen_tree.scen_names_to_ranks(ranks_per_cylinder) + ) cylinder_rank = mpicomm.Get_rank() // n_cylinders - + # taken from spbase self.local_admm_stoch_subproblem_scenarios_names = [ - all_admm_stoch_subproblem_scenario_names[i] for i in _rank_slices[cylinder_rank] + all_admm_stoch_subproblem_scenario_names[i] + for i in _rank_slices[cylinder_rank] ] for sname in self.local_admm_stoch_subproblem_scenarios_names: s = scenario_creator(sname, **scenario_creator_kwargs) self.local_admm_stoch_subproblem_scenarios[sname] = s # we are not collecting instantiation time - self.split_admm_stoch_subproblem_scenario_name = split_admm_stoch_subproblem_scenario_name + self.split_admm_stoch_subproblem_scenario_name = ( + split_admm_stoch_subproblem_scenario_name + ) self.consensus_vars = consensus_vars self.verbose = verbose self.consensus_vars_number = _consensus_vars_number_creator(consensus_vars) @@ -95,24 +108,29 @@ def __init__(self, self.stoch_scenario_names = stoch_scenario_names self.BFs = BFs self.number_admm_subproblems = len(self.admm_subproblem_names) - self.all_nodenames = self.create_node_names(num_admm_subproblems=len(admm_subproblem_names), num_stoch_scens=len(stoch_scenario_names)) + self.all_nodenames = self.create_node_names( + num_admm_subproblems=len(admm_subproblem_names), + num_stoch_scens=len(stoch_scenario_names), + ) self.assign_variable_probs(verbose=self.verbose) - def create_node_names(self, num_admm_subproblems, num_stoch_scens): - if self.BFs is not None: # already multi-stage problem initially - self.BFs.append(num_admm_subproblems) # Adds the last stage with admm_subproblems + if self.BFs is not None: # already multi-stage problem initially + self.BFs.append( + num_admm_subproblems + ) # Adds the last stage with admm_subproblems all_nodenames = sputils.create_nodenames_from_branching_factors(self.BFs) - else: # 2-stage problem initially + else: # 2-stage problem initially all_node_names_0 = ["ROOT"] all_node_names_1 = ["ROOT_" + str(i) for i in range(num_stoch_scens)] - all_node_names_2 = [parent + "_" + str(j) \ - for parent in all_node_names_1 \ - for j in range(num_admm_subproblems)] + all_node_names_2 = [ + parent + "_" + str(j) + for parent in all_node_names_1 + for j in range(num_admm_subproblems) + ] all_nodenames = all_node_names_0 + all_node_names_1 + all_node_names_2 return all_nodenames - def var_prob_list(self, s): """Associates probabilities to variables and raises exceptions if the model doesn't match the dictionary consensus_vars @@ -126,25 +144,36 @@ def var_prob_list(self, s): """ return self.varprob_dict[s] - def assign_variable_probs(self, verbose=False): self.varprob_dict = {} - #we collect the consensus variables - all_consensus_vars = {var_stage_tuple[0]: var_stage_tuple[1] for admm_subproblem_names in self.consensus_vars for var_stage_tuple in self.consensus_vars[admm_subproblem_names]} + # we collect the consensus variables + all_consensus_vars = { + var_stage_tuple[0]: var_stage_tuple[1] + for admm_subproblem_names in self.consensus_vars + for var_stage_tuple in self.consensus_vars[admm_subproblem_names] + } error_list1 = [] error_list2 = [] - for sname,s in self.local_admm_stoch_subproblem_scenarios.items(): + for sname, s in self.local_admm_stoch_subproblem_scenarios.items(): if verbose: - print(f"stoch_admmWrapper.assign_variable_probs is processing scenario: {sname}") - admm_subproblem_name = self.split_admm_stoch_subproblem_scenario_name(sname)[0] + print( + f"stoch_admmWrapper.assign_variable_probs is processing scenario: {sname}" + ) + admm_subproblem_name = self.split_admm_stoch_subproblem_scenario_name( + sname + )[0] # varlist[stage] will contain the variables at each stage - depth = len(s._mpisppy_node_list)+1 + depth = len(s._mpisppy_node_list) + 1 varlist = [[] for _ in range(depth)] - assert hasattr(s,"_mpisppy_probability"), f"the scenario {sname} doesn't have any _mpisppy_probability attribute" - if s._mpisppy_probability == "uniform": #if there is a two-stage scenario defined by sputils.attach_root_node - s._mpisppy_probability = 1/len(self.stoch_scenario_names) + assert hasattr( + s, "_mpisppy_probability" + ), f"the scenario {sname} doesn't have any _mpisppy_probability attribute" + if ( + s._mpisppy_probability == "uniform" + ): # if there is a two-stage scenario defined by sputils.attach_root_node + s._mpisppy_probability = 1 / len(self.stoch_scenario_names) self.varprob_dict[s] = list() for vstr in all_consensus_vars.keys(): @@ -154,85 +183,111 @@ def assign_variable_probs(self, verbose=False): if var_stage_tuple in self.consensus_vars[admm_subproblem_name]: if v is not None: # variables that should be on the model - if stage == depth: - # The node is a stochastic scenario, the probability at this node has not yet been defined - cond_prob = 1. + if stage == depth: + # The node is a stochastic scenario, the probability at this node has not yet been defined + cond_prob = 1.0 else: - prob_node = np.prod([s._mpisppy_node_list[ancestor_stage-1].cond_prob for ancestor_stage in range(1,stage+1)]) + prob_node = np.prod( + [ + s._mpisppy_node_list[ancestor_stage - 1].cond_prob + for ancestor_stage in range(1, stage + 1) + ] + ) # conditional probability of the scenario at the node (without considering the leaves as probabilities) - cond_prob = s._mpisppy_probability/prob_node - self.varprob_dict[s].append((id(v),cond_prob/(self.consensus_vars_number[vstr]))) + cond_prob = s._mpisppy_probability / prob_node + self.varprob_dict[s].append( + (id(v), cond_prob / (self.consensus_vars_number[vstr])) + ) # s._mpisppy_probability has not yet been divided by the number of subproblems, it the probability of the # stochastic scenario else: - error_list1.append((sname,vstr)) + error_list1.append((sname, vstr)) else: if v is None: # This var will not be indexed but that might not matter?? # Going to replace the brackets - v2str = vstr.replace("[","__").replace("]","__") # To distinguish the consensus_vars fixed at 0 + v2str = vstr.replace("[", "__").replace( + "]", "__" + ) # To distinguish the consensus_vars fixed at 0 v = pyo.Var() - + ### Lines equivalent to setattr(s, v2str, v) without warning - #s.del_component(v2str) UNUSEFUL - s.add_component(v2str, v) - #is the consensus variable should be earlier, then its not added in the good place, or is it? + # s.del_component(v2str) UNUSEFUL + s.add_component(v2str, v) + # is the consensus variable should be earlier, then its not added in the good place, or is it? v.fix(0) - self.varprob_dict[s].append((id(v),0)) + self.varprob_dict[s].append((id(v), 0)) else: - error_list2.append((sname,vstr)) - varlist[stage-1].append(v) + error_list2.append((sname, vstr)) + varlist[stage - 1].append(v) # Create the new scenario tree node for admm_consensus - assert hasattr(s,"_mpisppy_node_list"), f"the scenario {sname} doesn't have any _mpisppy_node_list attribute" + assert hasattr( + s, "_mpisppy_node_list" + ), f"the scenario {sname} doesn't have any _mpisppy_node_list attribute" parent = s._mpisppy_node_list[-1] - admm_subproblem_name, stoch_scenario_name = self.split_admm_stoch_subproblem_scenario_name(sname) + admm_subproblem_name, stoch_scenario_name = ( + self.split_admm_stoch_subproblem_scenario_name(sname) + ) num_scen = self.stoch_scenario_names.index(stoch_scenario_name) if self.BFs is not None: - node_num = num_scen % self.BFs[-2] #BFs has already been updated to include the last rank with the leaves + node_num = ( + num_scen % self.BFs[-2] + ) # BFs has already been updated to include the last rank with the leaves else: node_num = num_scen - node_name = parent.name + '_' + str(node_num) - - #could be more efficient with a dictionary rather than index - s._mpisppy_node_list.append(scenario_tree.ScenarioNode( - node_name, - 1/self.number_admm_subproblems, #branching probability at this node - parent.stage + 1, # The stage is the stage of the previous leaves - pyo.Expression(expr=0), # The cost is spread on the branches which are the subproblems - varlist[depth-1], - s) + node_name = parent.name + "_" + str(node_num) + + # could be more efficient with a dictionary rather than index + s._mpisppy_node_list.append( + scenario_tree.ScenarioNode( + node_name, + 1 + / self.number_admm_subproblems, # branching probability at this node + parent.stage + 1, # The stage is the stage of the previous leaves + pyo.Expression( + expr=0 + ), # The cost is spread on the branches which are the subproblems + varlist[depth - 1], + s, + ) ) s._mpisppy_probability /= self.number_admm_subproblems # underscores have a special signification in the tree for stage in range(1, depth): - #print(f"{stage, varlist[stage-1], sname=}") - old_node = s._mpisppy_node_list[stage-1] - s._mpisppy_node_list[stage-1] = scenario_tree.ScenarioNode( - old_node.name, - old_node.cond_prob, - old_node.stage, - old_node.cost_expression * self.number_admm_subproblems, - varlist[stage-1], - s) - - self.local_admm_stoch_subproblem_scenarios[sname] = s # I don't know whether this changes anything + # print(f"{stage, varlist[stage-1], sname=}") + old_node = s._mpisppy_node_list[stage - 1] + s._mpisppy_node_list[stage - 1] = scenario_tree.ScenarioNode( + old_node.name, + old_node.cond_prob, + old_node.stage, + old_node.cost_expression * self.number_admm_subproblems, + varlist[stage - 1], + s, + ) - if len(error_list1) + len(error_list2) > 0: - raise RuntimeError (f"for each pair (scenario, variable) of the following list, the variable appears" - f"in consensus_vars, but not in the model:\n {error_list1} \n" - f"for each pair (scenario, variable) of the following list, the variable appears " - f"in the model, but not in consensus var: \n {error_list2}") + self.local_admm_stoch_subproblem_scenarios[sname] = ( + s # I don't know whether this changes anything + ) + if len(error_list1) + len(error_list2) > 0: + raise RuntimeError( + f"for each pair (scenario, variable) of the following list, the variable appears" + f"in consensus_vars, but not in the model:\n {error_list1} \n" + f"for each pair (scenario, variable) of the following list, the variable appears " + f"in the model, but not in consensus var: \n {error_list2}" + ) def admmWrapper_scenario_creator(self, admm_stoch_subproblem_scenario_name): - scenario = self.local_admm_stoch_subproblem_scenarios[admm_stoch_subproblem_scenario_name] + scenario = self.local_admm_stoch_subproblem_scenarios[ + admm_stoch_subproblem_scenario_name + ] # Although every stage is already multiplied earlier, we must still multiply the overall objective function # Grabs the objective function and multiplies its value by the number of scenarios to compensate for the probabilities obj = sputils.find_active_objective(scenario) obj.expr = obj.expr * self.number_admm_subproblems - return scenario \ No newline at end of file + return scenario diff --git a/mpisppy/utils/wtracker.py b/mpisppy/utils/wtracker.py index 04898ebb9..7a115f25a 100644 --- a/mpisppy/utils/wtracker.py +++ b/mpisppy/utils/wtracker.py @@ -21,7 +21,8 @@ import mpisppy.opt.ph import mpisppy.MPI as MPI -class WTracker(): + +class WTracker: """ Args: PHB (PHBase): the object that has all the scenarios and _PHIter @@ -29,39 +30,42 @@ class WTracker(): - A utility to track W and compute interesting statistics and/or log the w values - We are going to take a comm as input so we can do a gather if we want to, so be careful if you use this with APH (you might want to call it from a lagrangian spoke) - + """ - def __init__(self, PHB): self.local_Ws = dict() # [iteration][sname](list of W vals) self.PHB = PHB # get the nonant variable names, bound by index to W vals arbitrary_scen = PHB.local_scenarios[list(PHB.local_scenarios.keys())[0]] - self.varnames = [var.name for node in arbitrary_scen._mpisppy_node_list - for (ix, var) in enumerate(node.nonant_vardata_list)] - if hasattr(self.PHB, '_PHIter'): + self.varnames = [ + var.name + for node in arbitrary_scen._mpisppy_node_list + for (ix, var) in enumerate(node.nonant_vardata_list) + ] + if hasattr(self.PHB, "_PHIter"): self.ph_iter = self.PHB._PHIter - elif hasattr(self.PHB, 'A_iter'): + elif hasattr(self.PHB, "A_iter"): self.ph_iter = self.PHB.A_iter else: raise RuntimeError("WTracker created before its PHBase object has _PHIter") - def grab_local_Ws(self): - """ Get the W values from the PHB that we are following - NOTE: we assume this is called only once per iteration + """Get the W values from the PHB that we are following + NOTE: we assume this is called only once per iteration """ - if hasattr(self.PHB, '_PHIter'): #update the iter value + if hasattr(self.PHB, "_PHIter"): # update the iter value self.ph_iter = self.PHB._PHIter - elif hasattr(self.PHB, 'A_iter'): + elif hasattr(self.PHB, "A_iter"): self.ph_iter = self.PHB.A_iter - scenario_Ws = {sname: [w._value for w in scenario._mpisppy_model.W.values()] - for (sname, scenario) in self.PHB.local_scenarios.items()} + scenario_Ws = { + sname: [w._value for w in scenario._mpisppy_model.W.values()] + for (sname, scenario) in self.PHB.local_scenarios.items() + } self.local_Ws[self.ph_iter] = scenario_Ws def compute_moving_stats(self, wlen, offsetback=0): - """ Use self.local_Ws to compute moving mean and stdev + """Use self.local_Ws to compute moving mean and stdev ASSUMES grab_local_Ws is called before this Args: wlen (int): desired window length @@ -75,20 +79,28 @@ def compute_moving_stats(self, wlen, offsetback=0): li = cI - offsetback fi = max(1, li - wlen) if li - fi < wlen: - return (f"WARNING: Not enough iterations ({cI}) for window len {wlen} and" - f" offsetback {offsetback}\n") + return ( + f"WARNING: Not enough iterations ({cI}) for window len {wlen} and" + f" offsetback {offsetback}\n" + ) else: window_stats = dict() wlist = dict() for idx, varname in enumerate(self.varnames): for sname in self.PHB.local_scenario_names: - wlist[(varname, sname)] = [self.local_Ws[i][sname][idx] for i in range(fi, li+1)] - window_stats[(varname, sname)] = (np.mean(wlist[(varname, sname)]), np.std(wlist[(varname, sname)])) + wlist[(varname, sname)] = [ + self.local_Ws[i][sname][idx] for i in range(fi, li + 1) + ] + window_stats[(varname, sname)] = ( + np.mean(wlist[(varname, sname)]), + np.std(wlist[(varname, sname)]), + ) return wlist, window_stats - - def report_by_moving_stats(self, wlen, reportlen=None, stdevthresh=None, file_prefix=''): - """ Compute window_stats then sort by "badness" to write to three files + def report_by_moving_stats( + self, wlen, reportlen=None, stdevthresh=None, file_prefix="" + ): + """Compute window_stats then sort by "badness" to write to three files ASSUMES grab_local_Ws is called before this Args: wlen (int): desired window length @@ -97,22 +109,31 @@ def report_by_moving_stats(self, wlen, reportlen=None, stdevthresh=None, file_pr NOTE: For large problems, this will create a lot of garbage for the collector """ - fname = f"{file_prefix}_summary_iter{self.ph_iter}_rank{self.PHB.global_rank}.txt" - stname = f"{file_prefix}_stdev_iter{self.ph_iter}_rank{self.PHB.global_rank}.csv" + fname = ( + f"{file_prefix}_summary_iter{self.ph_iter}_rank{self.PHB.global_rank}.txt" + ) + stname = ( + f"{file_prefix}_stdev_iter{self.ph_iter}_rank{self.PHB.global_rank}.csv" + ) cvname = f"{file_prefix}_cv_iter{self.ph_iter}_rank{self.PHB.global_rank}.csv" if self.PHB.cylinder_rank == 0: - print(f"Writing (a) W tracker report(s) to files with names like {fname}, {stname}, and {cvname}") + print( + f"Writing (a) W tracker report(s) to files with names like {fname}, {stname}, and {cvname}" + ) with open(fname, "w") as fil: fil.write(f"Moving Stats W Report at iteration {self.ph_iter}\n") - fil.write(f" {len(self.varnames)} nonants\n" - f" {len(self.PHB.local_scenario_names)} scenarios\n") - + fil.write( + f" {len(self.varnames)} nonants\n" + f" {len(self.PHB.local_scenario_names)} scenarios\n" + ) + total_traces = len(self.varnames) * len(self.PHB.local_scenario_names) wstats = self.compute_moving_stats(wlen)[1] if not isinstance(wstats, str): - Wsdf = pd.DataFrame.from_dict(wstats, orient='index', - columns=["mean", "stdev"]) + Wsdf = pd.DataFrame.from_dict( + wstats, orient="index", columns=["mean", "stdev"] + ) stt = stdevthresh if stdevthresh is not None else self.PHB.E1_tolerance @@ -121,32 +142,45 @@ def report_by_moving_stats(self, wlen, reportlen=None, stdevthresh=None, file_pr total_stdev = Wsdf["stdev"].sum() with open(fname, "a") as fil: - fil.write(f" {goodcnt} of {total_traces} have windowed stdev (unscaled) below {stt}\n") + fil.write( + f" {goodcnt} of {total_traces} have windowed stdev (unscaled) below {stt}\n" + ) fil.write(f" sum of stdev={total_stdev}\n") - fil.write(f"Sorted by windowed stdev, row limit={reportlen}, window len={wlen} in {stname}\n") + fil.write( + f"Sorted by windowed stdev, row limit={reportlen}, window len={wlen} in {stname}\n" + ) by_stdev = Wsdf.sort_values(by="stdev", ascending=False) - by_stdev[0:reportlen].to_csv(path_or_buf=stname, header=True, index=True, index_label=None, mode='w') + by_stdev[0:reportlen].to_csv( + path_or_buf=stname, header=True, index=True, index_label=None, mode="w" + ) # scaled - Wsdf["absCV"] = np.where(Wsdf["mean"] != 0, Wsdf["stdev"]/abs(Wsdf["mean"]), np.nan) + Wsdf["absCV"] = np.where( + Wsdf["mean"] != 0, Wsdf["stdev"] / abs(Wsdf["mean"]), np.nan + ) goodcnt = len(Wsdf[Wsdf["absCV"] <= stt]) mean_absCV = Wsdf["absCV"].mean() stdev_absCV = Wsdf["absCV"].std() zeroWcnt = len(Wsdf[Wsdf["mean"] == 0]) with open(fname, "a") as fil: - fil.write(f" {goodcnt} of ({total_traces} less meanzero {zeroWcnt}) have windowed absCV below {stt}\n") + fil.write( + f" {goodcnt} of ({total_traces} less meanzero {zeroWcnt}) have windowed absCV below {stt}\n" + ) fil.write(f" mean absCV={mean_absCV}, stdev={stdev_absCV}\n") - fil.write(f" Sorted by windowed abs CV, row limit={reportlen}, window len={wlen} in {cvname}\n") + fil.write( + f" Sorted by windowed abs CV, row limit={reportlen}, window len={wlen} in {cvname}\n" + ) by_absCV = Wsdf.sort_values(by="absCV", ascending=False) - by_absCV[0:reportlen].to_csv(path_or_buf=cvname, header=True, index=True, index_label=None, mode='w') + by_absCV[0:reportlen].to_csv( + path_or_buf=cvname, header=True, index=True, index_label=None, mode="w" + ) else: # not enough data with open(fname, "a") as fil: - fil.write(wstats) # warning string - + fil.write(wstats) # warning string def W_diff(self): - """ Compute the norm of the difference between to consecutive Ws / num_scenarios. + """Compute the norm of the difference between to consecutive Ws / num_scenarios. Returns: global_diff (float): difference between to consecutive Ws @@ -159,9 +193,11 @@ def W_diff(self): local_diff = np.zeros(1) varcount = 0 local_diff[0] = 0 - for (sname, scenario) in self.PHB.local_scenarios.items(): - local_wdiffs = [w - w1 - for w, w1 in zip(self.local_Ws[cI][sname], self.local_Ws[cI-1][sname])] + for sname, scenario in self.PHB.local_scenarios.items(): + local_wdiffs = [ + w - w1 + for w, w1 in zip(self.local_Ws[cI][sname], self.local_Ws[cI - 1][sname]) + ] for wdiff in local_wdiffs: local_diff[0] += abs(wdiff) varcount += 1 @@ -171,23 +207,24 @@ def W_diff(self): return global_diff[0] / self.PHB.n_proc def check_cross_zero(self, wlen, offsetback=0): - """NOTE: we assume this is called after grab_local_Ws - """ + """NOTE: we assume this is called after grab_local_Ws""" cI = self.ph_iter li = cI - offsetback fi = max(1, li - wlen) if li - fi < wlen: - return (f"WTRACKER WARNING: Not enough iterations ({cI}) for window len {wlen} and" - f" offsetback {offsetback}\n") + return ( + f"WTRACKER WARNING: Not enough iterations ({cI}) for window len {wlen} and" + f" offsetback {offsetback}\n" + ) else: print(f"{np.shape(self.local_Ws)}") - for i in range(fi+1, li+1): + for i in range(fi + 1, li + 1): for sname, _ in self.PHB.local_scenarios.items(): sgn_curr_iter = np.sign(np.array(self.local_Ws[fi][sname])) - sgn_last_iter = np.sign(np.array(self.local_Ws[fi-1][sname])) - if np.all(sgn_curr_iter!=sgn_last_iter): - return (f"WTRACKER BAD: Ws crossed zero, sensed at iter {i}") - return (f"WTRACKER GOOD: Ws did not cross zero over the past {wlen} iters") + sgn_last_iter = np.sign(np.array(self.local_Ws[fi - 1][sname])) + if np.all(sgn_curr_iter != sgn_last_iter): + return f"WTRACKER BAD: Ws crossed zero, sensed at iter {i}" + return f"WTRACKER GOOD: Ws did not cross zero over the past {wlen} iters" def check_w_stdev(self, wlen, stdevthresh, offsetback=0): _, window_stats = self.compute_moving_stats(wlen) @@ -195,23 +232,26 @@ def check_w_stdev(self, wlen, stdevthresh, offsetback=0): li = cI - offsetback fi = max(1, li - wlen) if li - fi < wlen: - return (f"WTRACKER WARNING: Not enough iterations ({cI}) for window len {wlen} and" - f" offsetback {offsetback}\n") + return ( + f"WTRACKER WARNING: Not enough iterations ({cI}) for window len {wlen} and" + f" offsetback {offsetback}\n" + ) else: for idx, varname in enumerate(self.varnames): for sname in self.PHB.local_scenario_names: - if np.abs(window_stats[(varname, sname)][1]) >= np.abs(stdevthresh * window_stats[(varname, sname)][0]): #stdev scaled by mean - return (f"WTRACKER BAD: at least one scaled stdev is bigger than {stdevthresh}") - return (f"WTRACKER GOOD: all scaled stdev are smaller than {stdevthresh}") - - + if np.abs(window_stats[(varname, sname)][1]) >= np.abs( + stdevthresh * window_stats[(varname, sname)][0] + ): # stdev scaled by mean + return f"WTRACKER BAD: at least one scaled stdev is bigger than {stdevthresh}" + return f"WTRACKER GOOD: all scaled stdev are smaller than {stdevthresh}" if __name__ == "__main__": # for ad hoc developer testing solver_name = "cplex" - + import mpisppy.tests.examples.farmer as farmer + options = {} options["asynchronousPH"] = False options["solver_name"] = solver_name @@ -229,7 +269,6 @@ def check_w_stdev(self, wlen, stdevthresh, offsetback=0): options["iter0_solver_options"] = {"mipgap": 0.001} options["iterk_solver_options"] = {"mipgap": 0.00001} - options["PHIterLimit"] = 1 ph = mpisppy.opt.ph.PH( options, diff --git a/mpisppy/utils/wxbarreader.py b/mpisppy/utils/wxbarreader.py index a3f4e5901..186eb8b6a 100644 --- a/mpisppy/utils/wxbarreader.py +++ b/mpisppy/utils/wxbarreader.py @@ -6,69 +6,71 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' An extension to initialize PH weights and/or PH xbar values from csv files. +"""An extension to initialize PH weights and/or PH xbar values from csv files. - To use, specify either or both of the following keys to the options dict: +To use, specify either or both of the following keys to the options dict: - "init_W_fname" : - "init_Xbar_fname": + "init_W_fname" : + "init_Xbar_fname": - If neither option is specified, this extension does nothing (i.e. is - silent--does not raise a warning/error). If only one is specified, then - only those values are initialized. The specified files should be - formatted as follows: +If neither option is specified, this extension does nothing (i.e. is +silent--does not raise a warning/error). If only one is specified, then +only those values are initialized. The specified files should be +formatted as follows: - (W values) csv with rows: scenario_name, variable_name, weight_value - (x values) csv with rows: variable_name, variable_value + (W values) csv with rows: scenario_name, variable_name, weight_value + (x values) csv with rows: variable_name, variable_value - Rows that begin with "#" are treated as comments. If the files are missing - any values, raises an error. Extra values are ignored. +Rows that begin with "#" are treated as comments. If the files are missing +any values, raises an error. Extra values are ignored. - TODO: - Check with bundles. +TODO: + Check with bundles. - Written: DLW, July 2019 - Modified: DTM, Aug 2019 -''' +Written: DLW, July 2019 +Modified: DTM, Aug 2019 +""" import mpisppy.utils.wxbarutils -import os # For checking if files exist +import os # For checking if files exist import mpisppy.extensions.extension import mpisppy.MPI as MPI n_proc = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() + class WXBarReader(mpisppy.extensions.extension.Extension): - """ Extension class for reading W values - """ - def __init__(self, ph): + """Extension class for reading W values""" - ''' Do a bunch of checking if files exist ''' + def __init__(self, ph): + """Do a bunch of checking if files exist""" w_fname, x_fname, sep_files = None, None, False - if ('init_separate_W_files' in ph.options): - sep_files = ph.options['init_separate_W_files'] - - if ('init_W_fname' in ph.options): - w_fname = ph.options['init_W_fname'] - if (not os.path.exists(w_fname)): - if (rank == 0): - if (sep_files): - print('Cannot find path', w_fname) + if "init_separate_W_files" in ph.options: + sep_files = ph.options["init_separate_W_files"] + + if "init_W_fname" in ph.options: + w_fname = ph.options["init_W_fname"] + if not os.path.exists(w_fname): + if rank == 0: + if sep_files: + print("Cannot find path", w_fname) else: - print('Cannot find file', w_fname) + print("Cannot find file", w_fname) quit() - if ('init_Xbar_fname' in ph.options): - x_fname = ph.options['init_Xbar_fname'] - if (not os.path.exists(x_fname)): - if (rank == 0): - print('Cannot find file', x_fname) + if "init_Xbar_fname" in ph.options: + x_fname = ph.options["init_Xbar_fname"] + if not os.path.exists(x_fname): + if rank == 0: + print("Cannot find file", x_fname) quit() - if (x_fname is None and w_fname is None and rank==0): - print('Warning: no input files provided to WXBarReader. ' - 'W and Xbar will be initialized to their default values.') + if x_fname is None and w_fname is None and rank == 0: + print( + "Warning: no input files provided to WXBarReader. " + "W and Xbar will be initialized to their default values." + ) self.PHB = ph self.cylinder_rank = rank @@ -79,24 +81,23 @@ def __init__(self, ph): def pre_iter0(self): pass - def post_iter0(self): pass def miditer(self): - ''' Called before the solveloop is called ''' + """Called before the solveloop is called""" if self.PHB._PHIter == 1: if self.w_fname: mpisppy.utils.wxbarutils.set_W_from_file( - self.w_fname, self.PHB, self.cylinder_rank, - sep_files=self.sep_files) - self.PHB._reenable_W() # This makes a big difference. + self.w_fname, self.PHB, self.cylinder_rank, sep_files=self.sep_files + ) + self.PHB._reenable_W() # This makes a big difference. if self.x_fname: mpisppy.utils.wxbarutils.set_xbar_from_file(self.x_fname, self.PHB) self.PHB._reenable_prox() def enditer(self): - ''' Called after the solve loop ''' + """Called after the solve loop""" pass def post_everything(self): diff --git a/mpisppy/utils/wxbartester.py b/mpisppy/utils/wxbartester.py index 15cc421c7..c714aaa71 100644 --- a/mpisppy/utils/wxbartester.py +++ b/mpisppy/utils/wxbartester.py @@ -6,8 +6,7 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Driver script to test the w/xbar read/write extensions using UC -''' +"""Driver script to test the w/xbar read/write extensions using UC""" import mpisppy.tests.examples.uc.uc_funcs as uc_funcs # this version is locked to three scenarios @@ -16,66 +15,70 @@ from mpisppy.opt.ph import PH -def nonsense(arg1, arg2, arg3): # Empty scenario_denouement + +def nonsense(arg1, arg2, arg3): # Empty scenario_denouement pass + def read_test(): - scen_count = 3 - scenario_creator = uc_funcs.pysp2_callback + scen_count = 3 + scenario_creator = uc_funcs.pysp2_callback scenario_denouement = nonsense - scenario_rhosetter = uc_funcs.scenario_rhos + scenario_rhosetter = uc_funcs.scenario_rhos PH_options = { - 'solver_name': 'gurobi', - 'PHIterLimit': 2, - 'defaultPHrho': 1, - 'convthresh': 1e-6, - 'verbose': False, - 'display_timing': False, - 'display_progress': False, - 'iter0_solver_options': dict(), - 'iterk_solver_options': dict(), - 'init_W_fname': 'david/weights.csv', # Path to the weight files - 'init_separate_W_files': False, - 'init_Xbar_fname': 'david/somexbars.csv', - 'extensions':WXBarReader, - 'rho_setter':scenario_rhosetter, + "solver_name": "gurobi", + "PHIterLimit": 2, + "defaultPHrho": 1, + "convthresh": 1e-6, + "verbose": False, + "display_timing": False, + "display_progress": False, + "iter0_solver_options": dict(), + "iterk_solver_options": dict(), + "init_W_fname": "david/weights.csv", # Path to the weight files + "init_separate_W_files": False, + "init_Xbar_fname": "david/somexbars.csv", + "extensions": WXBarReader, + "rho_setter": scenario_rhosetter, } - names = ['Scenario' + str(i+1) for i in range(scen_count)] + names = ["Scenario" + str(i + 1) for i in range(scen_count)] ph = PH(PH_options, names, scenario_creator, scenario_denouement) conv, obj, bound = ph.ph_main() + def write_test(): - scen_count = 3 - scenario_creator = uc_funcs.pysp2_callback + scen_count = 3 + scenario_creator = uc_funcs.pysp2_callback scenario_denouement = nonsense - scenario_rhosetter = uc_funcs.scenario_rhos + scenario_rhosetter = uc_funcs.scenario_rhos PH_options = { - 'solver_name': 'gurobi', - 'PHIterLimit': 2, - 'defaultPHrho': 1, - 'convthresh': 1e-6, - 'verbose': False, - 'display_timing': False, - 'display_progress': False, - 'iter0_solver_options': dict(), - 'iterk_solver_options': dict(), - 'W_fname': 'david/weights.csv', - 'separate_W_files': False, - 'Xbar_fname': 'somexbars.csv', - 'extensions':WXBarReader, - 'rho_setter':scenario_rhosetter, + "solver_name": "gurobi", + "PHIterLimit": 2, + "defaultPHrho": 1, + "convthresh": 1e-6, + "verbose": False, + "display_timing": False, + "display_progress": False, + "iter0_solver_options": dict(), + "iterk_solver_options": dict(), + "W_fname": "david/weights.csv", + "separate_W_files": False, + "Xbar_fname": "somexbars.csv", + "extensions": WXBarReader, + "rho_setter": scenario_rhosetter, } - names = ['Scenario' + str(i+1) for i in range(scen_count)] + names = ["Scenario" + str(i + 1) for i in range(scen_count)] ph = PH(PH_options, names, scenario_creator, scenario_denouement) conv, obj, bound = ph.ph_main() -if __name__=='__main__': + +if __name__ == "__main__": read_test() diff --git a/mpisppy/utils/wxbarutils.py b/mpisppy/utils/wxbarutils.py index f8eca3531..25c415e59 100644 --- a/mpisppy/utils/wxbarutils.py +++ b/mpisppy/utils/wxbarutils.py @@ -6,46 +6,47 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' Utilities for reading and writing W and - x-bar values in and out of csv files. - - Written: DLW July 2019 - Modified: DTM Aug 2019 - - Could stand to be re-factored a bit. - - When reading/writing the weight files, there are two options. - - 1. All weights are stored in a single main file. - - Reading: Set "init_W_fname" to the location of the main file - containing all of the weights. - - Writing: Set "W_fname" to the location of the main file that will - contain all the weights. - - 2. Each scenario's weights are stored in separate, individual files. - - Reading: Set "init_W_fname" to the directory containing the weight - files. This directory must contain one file per scenario, each - named _weights.csv. Set "init_separate_W_files" to - True in the options dictionary. - - Writing: Set "W_fname" to the directory that will contain the - weight files (this directory will be created if it does not already - exist). The weight files will be corretly named in accordance with - the naming convention above. Set "separate_W_files" to True in the - options dictionary. -''' +"""Utilities for reading and writing W and +x-bar values in and out of csv files. + +Written: DLW July 2019 +Modified: DTM Aug 2019 + +Could stand to be re-factored a bit. + +When reading/writing the weight files, there are two options. + + 1. All weights are stored in a single main file. + + Reading: Set "init_W_fname" to the location of the main file + containing all of the weights. + + Writing: Set "W_fname" to the location of the main file that will + contain all the weights. + + 2. Each scenario's weights are stored in separate, individual files. + + Reading: Set "init_W_fname" to the directory containing the weight + files. This directory must contain one file per scenario, each + named _weights.csv. Set "init_separate_W_files" to + True in the options dictionary. + + Writing: Set "W_fname" to the directory that will contain the + weight files (this directory will be created if it does not already + exist). The weight files will be corretly named in accordance with + the naming convention above. Set "separate_W_files" to True in the + options dictionary. +""" import os import numpy as np import pyomo.environ as pyo -''' W utilities ''' +""" W utilities """ + def write_W_to_file(PHB, fname, sep_files=False): - ''' + """ Args: PHB (PHBase object) -- Where the W values live fname (str) -- name of file to which we write. @@ -58,39 +59,40 @@ def write_W_to_file(PHB, fname, sep_files=False): file. This can apparently be accomplished using Collective MPI I/O (see https://mpi4py.readthedocs.io/en/stable/tutorial.html#mpi-io), but I am lazy and doing this for now. - ''' + """ - if (sep_files): - for (sname, scenario) in PHB.local_scenarios.items(): - scenario_Ws = {var.name: pyo.value(scenario._mpisppy_model.W[node.name, ix]) + if sep_files: + for sname, scenario in PHB.local_scenarios.items(): + scenario_Ws = { + var.name: pyo.value(scenario._mpisppy_model.W[node.name, ix]) for node in scenario._mpisppy_node_list - for (ix, var) in enumerate(node.nonant_vardata_list)} - scenario_fname = os.path.join(fname, sname + '_weights.csv') - with open(scenario_fname, 'w') as f: - for (vname, val) in scenario_Ws.items(): - row = ','.join([vname, str(val)]) + '\n' + for (ix, var) in enumerate(node.nonant_vardata_list) + } + scenario_fname = os.path.join(fname, sname + "_weights.csv") + with open(scenario_fname, "w") as f: + for vname, val in scenario_Ws.items(): + row = ",".join([vname, str(val)]) + "\n" f.write(row) else: - local_Ws = {(sname, var.name): pyo.value(scenario._mpisppy_model.W[node.name, ix]) - for (sname, scenario) in PHB.local_scenarios.items() - for node in scenario._mpisppy_node_list - for (ix, var) in enumerate(node.nonant_vardata_list)} - comm = PHB.comms['ROOT'] + local_Ws = { + (sname, var.name): pyo.value(scenario._mpisppy_model.W[node.name, ix]) + for (sname, scenario) in PHB.local_scenarios.items() + for node in scenario._mpisppy_node_list + for (ix, var) in enumerate(node.nonant_vardata_list) + } + comm = PHB.comms["ROOT"] Ws = comm.gather(local_Ws, root=0) - if (PHB.cylinder_rank == 0): - with open(fname, 'a') as f: + if PHB.cylinder_rank == 0: + with open(fname, "a") as f: for W in Ws: - for (key, val) in W.items(): + for key, val in W.items(): sname, vname = key[0], key[1] - row = ','.join([sname, vname, str(val)]) + '\n' + row = ",".join([sname, vname, str(val)]) + "\n" f.write(row) - - - def set_W_from_file(fname, PHB, rank, sep_files=False, disable_check=False): - ''' + """ Args: fname (str) -- if sep_files=False, file containing the dual weights. Otherwise, path of the directory containing the dual weight files @@ -101,180 +103,202 @@ def set_W_from_file(fname, PHB, rank, sep_files=False, disable_check=False): individual files, one per scenario. The files must be contained in the same directory, and must be named _weights.csv for each scenario name . - + Notes: Calls _check_W, which ensures that all required values were specified, - and that the specified weights satisfy the dual feasibility condition + and that the specified weights satisfy the dual feasibility condition sum_{s\in S} p_s * w_s = 0. - ''' - scenario_names_local = list(PHB.local_scenarios.keys()) + """ + scenario_names_local = list(PHB.local_scenarios.keys()) scenario_names_global = PHB.all_scenario_names - if (sep_files): + if sep_files: w_val_dict = dict() for sname in scenario_names_local: - scenario_fname = os.path.join(fname, sname + '_weights.csv') + scenario_fname = os.path.join(fname, sname + "_weights.csv") w_val_dict[sname] = _parse_W_csv_single(scenario_fname) else: - w_val_dict = _parse_W_csv(fname, scenario_names_local, - scenario_names_global, rank) + w_val_dict = _parse_W_csv( + fname, scenario_names_local, scenario_names_global, rank + ) if not disable_check: _check_W(w_val_dict, PHB, rank) - mp = {(sname, var.name): (node.name, ix) - for (sname, scenario) in PHB.local_scenarios.items() - for node in scenario._mpisppy_node_list - for (ix,var) in enumerate(node.nonant_vardata_list)} + mp = { + (sname, var.name): (node.name, ix) + for (sname, scenario) in PHB.local_scenarios.items() + for node in scenario._mpisppy_node_list + for (ix, var) in enumerate(node.nonant_vardata_list) + } - for (sname, d) in w_val_dict.items(): + for sname, d in w_val_dict.items(): for vname in d.keys(): scenario = PHB.local_scenarios[sname] node_name, ix = mp[sname, vname] scenario._mpisppy_model.W[node_name, ix] = w_val_dict[sname][vname] + def _parse_W_csv_single(fname): - ''' Read a file containing the weights for a single scenario. The file must - be formatted as + """Read a file containing the weights for a single scenario. The file must + be formatted as - variable_name,variable_value + variable_name,variable_value - (comma separated). Lines beginning with a "#" are treated as comments - and ignored. - ''' - if (not os.path.exists(fname)): - raise RuntimeError('Could not find file {fn}'.format(fn=fname)) + (comma separated). Lines beginning with a "#" are treated as comments + and ignored. + """ + if not os.path.exists(fname): + raise RuntimeError("Could not find file {fn}".format(fn=fname)) results = dict() - with open(fname, 'r') as f: + with open(fname, "r") as f: for line in f: - if (line.startswith('#')): + if line.startswith("#"): continue - line = line.split(',') - vname = ','.join(line[:-1]) - wval = float(line[-1]) + line = line.split(",") + vname = ",".join(line[:-1]) + wval = float(line[-1]) results[vname] = wval return results + def _parse_W_csv(fname, scenario_names_local, scenario_names_global, rank): - ''' Read a csv file containing weight information. - - Args: - fname (str) -- Filename of csv file to read - scenario_names_local (list of str) -- List of local scenario names - scenario_names_global (list of str) -- List of global scenario - names (i.e. all the scenario names in the entire model across - all ranks--each PHBase object stores this information). - - Return: - results (dict) -- Doubly-nested dict mapping - results[scenario_name][var_name] --> weight value (float) - - Notes: - This function is only called if sep_files=False, i.e., if all of - the weights are stored in a single root file. The file must be - formatted as: - - scenario_name,variable_name,weight_value - - Rows that begin with a "#" character are treated as comments. - The variable names _may_ contain commas (confusing, but simpler for - the user) - - Raises a RuntimeError if there are any missing scenarios. Prints a - warning if there are any extra scenarios. - - When this function returns, we are certain that - results.keys() == PHB.local_scenarios.keys() - - When run in parallel, this method requires multiple ranks to open - and read from the same file simultaneously. Apparently there are - safer ways to do this using MPI collective communication, but since - all we're doing here is reading files, I'm being lazy and doing it - this way. - ''' + """Read a csv file containing weight information. + + Args: + fname (str) -- Filename of csv file to read + scenario_names_local (list of str) -- List of local scenario names + scenario_names_global (list of str) -- List of global scenario + names (i.e. all the scenario names in the entire model across + all ranks--each PHBase object stores this information). + + Return: + results (dict) -- Doubly-nested dict mapping + results[scenario_name][var_name] --> weight value (float) + + Notes: + This function is only called if sep_files=False, i.e., if all of + the weights are stored in a single root file. The file must be + formatted as: + + scenario_name,variable_name,weight_value + + Rows that begin with a "#" character are treated as comments. + The variable names _may_ contain commas (confusing, but simpler for + the user) + + Raises a RuntimeError if there are any missing scenarios. Prints a + warning if there are any extra scenarios. + + When this function returns, we are certain that + results.keys() == PHB.local_scenarios.keys() + + When run in parallel, this method requires multiple ranks to open + and read from the same file simultaneously. Apparently there are + safer ways to do this using MPI collective communication, but since + all we're doing here is reading files, I'm being lazy and doing it + this way. + """ results = dict() seen = {name: False for name in scenario_names_local} - with open(fname, 'r') as f: + with open(fname, "r") as f: for line in f: - if (line.startswith('#')): + if line.startswith("#"): continue - line = line.split(',') + line = line.split(",") sname = line[0] - vname = ','.join(line[1:-1]) - wval = float(line[-1]) - - if (sname not in scenario_names_global): - if (rank == 0): - print('WARNING: Ignoring unknown scenario name', sname) + vname = ",".join(line[1:-1]) + wval = float(line[-1]) + + if sname not in scenario_names_global: + if rank == 0: + print("WARNING: Ignoring unknown scenario name", sname) continue - if (sname not in scenario_names_local): + if sname not in scenario_names_local: continue - if (sname in results): + if sname in results: results[sname][vname] = wval else: seen[sname] = True results[sname] = {vname: wval} - missing = [name for (name,is_seen) in seen.items() if not is_seen] - if (missing): - raise RuntimeError('rank ' + str(rank) +' could not find the following ' - 'scenarios in the provided weight file: ' + ', '.join(missing)) - + missing = [name for (name, is_seen) in seen.items() if not is_seen] + if missing: + raise RuntimeError( + "rank " + str(rank) + " could not find the following " + "scenarios in the provided weight file: " + ", ".join(missing) + ) + return results - + + def _check_W(w_val_dict, PHB, rank): - ''' + """ Args: - w_val_dict (dict) -- doubly-nested dict mapping + w_val_dict (dict) -- doubly-nested dict mapping w_val_dict[scenario_name][variable_name] = weight value. PHB (PHBase object) -- PHBase object rank (int) -- local rank Notes: Checks for three conditions: - + 1. Missing variables --> raises a RuntimeError 2. Extra variables --> prints a warning 3. Dual feasibility --> raises a RuntimeError - ''' + """ # By this point, we are certain that # w_val_dict.keys() == PHB.local_scenarios.keys() - for (sname, scenario) in PHB.local_scenarios.items(): - vn_model = set([var.name for node in scenario._mpisppy_node_list - for var in node.nonant_vardata_list]) + for sname, scenario in PHB.local_scenarios.items(): + vn_model = set( + [ + var.name + for node in scenario._mpisppy_node_list + for var in node.nonant_vardata_list + ] + ) vn_provided = set(w_val_dict[sname].keys()) diff = vn_model.difference(vn_provided) - if (diff): - raise RuntimeError(sname + ' is missing ' - 'the following variables: ' + ', '.join(list(diff))) + if diff: + raise RuntimeError( + sname + " is missing " + "the following variables: " + ", ".join(list(diff)) + ) diff = vn_provided.difference(vn_model) - if (diff): - print('Removing unknown variables:', ', '.join(list(diff))) + if diff: + print("Removing unknown variables:", ", ".join(list(diff))) for vname in diff: w_val_dict[sname].pop(vname, None) - - # At this point, we are sure that every local + + # At this point, we are sure that every local # scenario has the same set of variables - probs = {name: model._mpisppy_probability for (name, model) in - PHB.local_scenarios.items()} + probs = { + name: model._mpisppy_probability + for (name, model) in PHB.local_scenarios.items() + } checks = dict() - for vname in vn_model: # Ensured vn_model = vn_provided - checks[vname] = sum(probs[name] * w_val_dict[name][vname] - for name in PHB.local_scenarios.keys()) + for vname in vn_model: # Ensured vn_model = vn_provided + checks[vname] = sum( + probs[name] * w_val_dict[name][vname] for name in PHB.local_scenarios.keys() + ) - checks = PHB.comms['ROOT'].gather(checks, root=0) - if (rank == 0): + checks = PHB.comms["ROOT"].gather(checks, root=0) + if rank == 0: for vname in vn_model: dual = sum(c[vname] for c in checks) - if (abs(dual) > 1e-7): - raise RuntimeError('Provided weights do not satisfy ' - 'dual feasibility: \sum_{scenarios} prob(s) * w(s) != 0. ' - 'Error on variable ' + vname) + if abs(dual) > 1e-7: + raise RuntimeError( + "Provided weights do not satisfy " + "dual feasibility: \sum_{scenarios} prob(s) * w(s) != 0. " + "Error on variable " + vname + ) + + +""" X-bar utilities """ -''' X-bar utilities ''' def write_xbar_to_file(PHB, fname): - ''' + """ Args: PHB (PHBase object) -- Where the W values live fname (str) -- name of file to which we write. @@ -282,21 +306,24 @@ def write_xbar_to_file(PHB, fname): Notes: Each scenario maintains its own copy of xbars. We only need to write one of them to the file (i.e. no parallelism required). - ''' - if (PHB.cylinder_rank != 0): + """ + if PHB.cylinder_rank != 0: return sname = list(PHB.local_scenarios.keys())[0] scenario = PHB.local_scenarios[sname] - xbars = {var.name: pyo.value(scenario._mpisppy_model.xbars[node.name, ix]) - for node in scenario._mpisppy_node_list - for (ix, var) in enumerate(node.nonant_vardata_list)} - with open(fname, 'a') as f: - for (var_name, val) in xbars.items(): - row = ','.join([var_name, str(val)]) + '\n' + xbars = { + var.name: pyo.value(scenario._mpisppy_model.xbars[node.name, ix]) + for node in scenario._mpisppy_node_list + for (ix, var) in enumerate(node.nonant_vardata_list) + } + with open(fname, "a") as f: + for var_name, val in xbars.items(): + row = ",".join([var_name, str(val)]) + "\n" f.write(row) + def set_xbar_from_file(fname, PHB): - ''' Read all of the csv files in a directory and use them to populate + """Read all of the csv files in a directory and use them to populate _xbars and _xsqbars Args: @@ -306,77 +333,88 @@ def set_xbar_from_file(fname, PHB): Notes: Raises a RuntimeError if the provided file is missing any values for xbar (i.e. does not assume a default value for missing variables). - ''' + """ xbar_val_dict = _parse_xbar_csv(fname) - if (PHB.cylinder_rank == 0): + if PHB.cylinder_rank == 0: _check_xbar(xbar_val_dict, PHB) - for (sname, scenario) in PHB.local_scenarios.items(): + for sname, scenario in PHB.local_scenarios.items(): for node in scenario._mpisppy_node_list: - for (ix,var) in enumerate(node.nonant_vardata_list): + for ix, var in enumerate(node.nonant_vardata_list): val = xbar_val_dict[var.name] scenario._mpisppy_model.xbars[node.name, ix] = val scenario._mpisppy_model.xsqbars[node.name, ix] = val * val + def _parse_xbar_csv(fname): - ''' Read a csv file containing weight information. - - Args: - fname (str) -- Filename of csv file to read - Return: - results (dict) -- Dict mapping var_name --> variable value (float) - - Notes: - The file must be formatted as: - - variable_name,value - - Rows that begin with a "#" character are treated as comments. - The variable names _may_ contain commas (confusing, but simpler for - the user) - - When run in parallel, this method requires multiple ranks to open - and read from the same file simultaneously. Apparently there are - safer ways to do this using MPI collective communication, but since - all we're doing here is reading files, I'm being lazy and doing it - this way. - ''' + """Read a csv file containing weight information. + + Args: + fname (str) -- Filename of csv file to read + Return: + results (dict) -- Dict mapping var_name --> variable value (float) + + Notes: + The file must be formatted as: + + variable_name,value + + Rows that begin with a "#" character are treated as comments. + The variable names _may_ contain commas (confusing, but simpler for + the user) + + When run in parallel, this method requires multiple ranks to open + and read from the same file simultaneously. Apparently there are + safer ways to do this using MPI collective communication, but since + all we're doing here is reading files, I'm being lazy and doing it + this way. + """ results = dict() - with open(fname, 'r') as f: + with open(fname, "r") as f: for line in f: - if (line.startswith('#')): + if line.startswith("#"): continue - line = line.split(',') - vname = ','.join(line[:-1]) - val = float(line[-1]) - + line = line.split(",") + vname = ",".join(line[:-1]) + val = float(line[-1]) + results[vname] = val return results + def _check_xbar(xbar_val_dict, PHB): - ''' Make sure that a value was provided for every non-anticipative - variable. If any extra variable values were provided in the input file, - this function prints a warning. - ''' + """Make sure that a value was provided for every non-anticipative + variable. If any extra variable values were provided in the input file, + this function prints a warning. + """ sname = list(PHB.local_scenarios.keys())[0] scenario = PHB.local_scenarios[sname] - var_names = set([var.name for node in scenario._mpisppy_node_list - for var in node.nonant_vardata_list]) + var_names = set( + [ + var.name + for node in scenario._mpisppy_node_list + for var in node.nonant_vardata_list + ] + ) provided_vars = set(xbar_val_dict.keys()) set1 = var_names.difference(provided_vars) - if (set1): - raise RuntimeError('Could not find the following required variable ' - 'values in the provided input file: ' + ', '.join([v for v in set1])) + if set1: + raise RuntimeError( + "Could not find the following required variable " + "values in the provided input file: " + ", ".join([v for v in set1]) + ) set2 = provided_vars.difference(var_names) - if (set2): - print('Ignoring the following variables values provided in the ' - 'input file: ' + ', '.join([v for v in set2])) + if set2: + print( + "Ignoring the following variables values provided in the " + "input file: " + ", ".join([v for v in set2]) + ) def ROOT_xbar_npy_serializer(PHB, fname): - """ Write the root node xbar to be read by a numpy load. + """Write the root node xbar to be read by a numpy load. Args: PHB (PHBase object) -- Where the W values live fname (str) -- name of file to which we write. @@ -384,17 +422,20 @@ def ROOT_xbar_npy_serializer(PHB, fname): """ arbitrary_scen = PHB.local_scenarios[list(PHB.local_scenarios.keys())[0]] root_nlen = arbitrary_scen._mpisppy_data.nlens["ROOT"] - root_xbar_list = [pyo.value(arbitrary_scen._mpisppy_model.xbars["ROOT", ix]) for ix in range(root_nlen)] + root_xbar_list = [ + pyo.value(arbitrary_scen._mpisppy_model.xbars["ROOT", ix]) + for ix in range(root_nlen) + ] np.savetxt(fname, root_xbar_list) def fix_ef_ROOT_nonants(ef, root_nonants): - """ modify ef to have fixed values for the root nonants + """modify ef to have fixed values for the root nonants Args: ef (Pyomo ConcreteModel for an EF): the extensive form to modify root_nonants(list): the nonant values for the root node nonants """ - varlist = [var for (ndn,i), var in ef.ref_vars.items() if ndn == "ROOT"] + varlist = [var for (ndn, i), var in ef.ref_vars.items() if ndn == "ROOT"] assert len(varlist) == len(root_nonants) for var, vval in zip(varlist, root_nonants): var.fix(vval) diff --git a/mpisppy/utils/wxbarwriter.py b/mpisppy/utils/wxbarwriter.py index 46bfeb56a..248404bca 100644 --- a/mpisppy/utils/wxbarwriter.py +++ b/mpisppy/utils/wxbarwriter.py @@ -6,104 +6,111 @@ # All rights reserved. Please see the files COPYRIGHT.md and LICENSE.md for # full copyright and license information. ############################################################################### -''' An extension to save PH weights and/or PH xbar values to csv files. +"""An extension to save PH weights and/or PH xbar values to csv files. - To use, specify either or both of the following keys to the options dict: +To use, specify either or both of the following keys to the options dict: - "W_fname" : - "Xbar_fname": + "W_fname" : + "Xbar_fname": - If neither option is specified, this extension does nothing (prints a - warning). If only one is specified, then only those values are saved. - The resulting files will be output in the format: +If neither option is specified, this extension does nothing (prints a +warning). If only one is specified, then only those values are saved. +The resulting files will be output in the format: - (W values) csv with rows: scenario_name, variable_name, weight_value - (x values) csv with rows: variable_name, variable_value + (W values) csv with rows: scenario_name, variable_name, weight_value + (x values) csv with rows: variable_name, variable_value - This format is consistent with the input required by WXbarReader. +This format is consistent with the input required by WXbarReader. - TODO: - Check with bundles +TODO: + Check with bundles - Written: DLW, July 2019 - Modified: DTM, Aug 2019 -''' +Written: DLW, July 2019 +Modified: DTM, Aug 2019 +""" import os import mpisppy.utils.wxbarutils import mpisppy.extensions.extension -import mpisppy.MPI as MPI - +import mpisppy.MPI as MPI + n_proc = MPI.COMM_WORLD.Get_size() rank = MPI.COMM_WORLD.Get_rank() + class WXBarWriter(mpisppy.extensions.extension.Extension): - """ Extension class for writing the W values - """ + """Extension class for writing the W values""" + def __init__(self, ph): # Check a bunch of files w_fname, w_grad_fname, x_fname, sep_files = None, None, None, False - if ('W_fname' in ph.options): # W_fname is a path if separate_W_files=True - w_fname = ph.options['W_fname'] - if ('Xbar_fname' in ph.options): - x_fname = ph.options['Xbar_fname'] - if ('separate_W_files' in ph.options): - sep_files = ph.options['separate_W_files'] - - if (x_fname is None and w_fname is None and rank==0): - print('Warning: no output files provided to WXBarWriter. ' - 'No values will be saved.') - - if (w_fname and (not sep_files) and os.path.exists(w_fname) and rank==0): - print('Warning: specified W_fname ({fn})'.format(fn=w_fname) + - ' already exists. Results will be appended to this file.') - elif (w_fname and sep_files and (not os.path.exists(w_fname)) and rank==0): - print('Warning: path {path} does not exist. Creating...'.format( - path=w_fname)) + if "W_fname" in ph.options: # W_fname is a path if separate_W_files=True + w_fname = ph.options["W_fname"] + if "Xbar_fname" in ph.options: + x_fname = ph.options["Xbar_fname"] + if "separate_W_files" in ph.options: + sep_files = ph.options["separate_W_files"] + + if x_fname is None and w_fname is None and rank == 0: + print( + "Warning: no output files provided to WXBarWriter. " + "No values will be saved." + ) + + if w_fname and (not sep_files) and os.path.exists(w_fname) and rank == 0: + print( + "Warning: specified W_fname ({fn})".format(fn=w_fname) + + " already exists. Results will be appended to this file." + ) + elif w_fname and sep_files and (not os.path.exists(w_fname)) and rank == 0: + print( + "Warning: path {path} does not exist. Creating...".format(path=w_fname) + ) os.makedirs(w_fname, exist_ok=True) - if (x_fname and os.path.exists(x_fname) and rank==0): - print('Warning: specified Xbar_fname ({fn})'.format(fn=x_fname) + - ' already exists. Results will be appended to this file.') + if x_fname and os.path.exists(x_fname) and rank == 0: + print( + "Warning: specified Xbar_fname ({fn})".format(fn=x_fname) + + " already exists. Results will be appended to this file." + ) self.PHB = ph self.cylinder_rank = rank self.w_fname = w_fname self.w_grad_fname = w_grad_fname self.x_fname = x_fname - self.sep_files = sep_files # Write separate files for each - # scenario's dual weights + self.sep_files = sep_files # Write separate files for each + # scenario's dual weights def pre_iter0(self): pass def post_iter0(self): pass - + def miditer(self): pass def enditer(self): - """ if (self.w_fname): - fname = f'fname{self.PHB._PHIter}.csv' - mpisppy.utils.wxbarutils.write_W_to_file(self.PHB, w_fname, - sep_files=self.sep_files)""" - pass - + """if (self.w_fname): + fname = f'fname{self.PHB._PHIter}.csv' + mpisppy.utils.wxbarutils.write_W_to_file(self.PHB, w_fname, + sep_files=self.sep_files)""" + pass def post_everything(self): - if (self.w_fname): + if self.w_fname: fname = self.w_fname - mpisppy.utils.wxbarutils.write_W_to_file(self.PHB, fname, - sep_files=self.sep_files) - if (self.w_grad_fname): + mpisppy.utils.wxbarutils.write_W_to_file( + self.PHB, fname, sep_files=self.sep_files + ) + if self.w_grad_fname: # TODO: finish implementing? - #grad_fname = 'grad_fname.csv' - #mpisppy.utils.wxbarutils.write_W_grad_to_file(self.PHB, grad_fname, - #sep_files=self.sep_files) + # grad_fname = 'grad_fname.csv' + # mpisppy.utils.wxbarutils.write_W_grad_to_file(self.PHB, grad_fname, + # sep_files=self.sep_files) pass - if (self.x_fname): + if self.x_fname: mpisppy.utils.wxbarutils.write_xbar_to_file(self.PHB, self.x_fname) - diff --git a/mpisppy/utils/xhat_eval.py b/mpisppy/utils/xhat_eval.py index ac826fe31..fa02fd626 100644 --- a/mpisppy/utils/xhat_eval.py +++ b/mpisppy/utils/xhat_eval.py @@ -19,20 +19,21 @@ import mpisppy.utils.sputils as sputils import mpisppy.spopt - + fullcomm = MPI.COMM_WORLD global_rank = fullcomm.Get_rank() # Could also pass, e.g., sys.stdout instead of a filename -mpisppy.log.setup_logger("mpisppy.utils.xhat_eval", - "xhateval.log", - level=logging.CRITICAL) +mpisppy.log.setup_logger( + "mpisppy.utils.xhat_eval", "xhateval.log", level=logging.CRITICAL +) logger = logging.getLogger("mpisppy.utils.xhat_eval") + ############################################################################ class Xhat_Eval(mpisppy.spopt.SPOpt): - """ See SPOpt for list of args. """ - + """See SPOpt for list of args.""" + def __init__( self, options, @@ -44,8 +45,7 @@ def __init__( scenario_creator_kwargs=None, variable_probability=None, ph_extensions=None, - ): - + ): super().__init__( options, all_scenario_names, @@ -57,10 +57,9 @@ def __init__( scenario_creator_kwargs=scenario_creator_kwargs, variable_probability=variable_probability, ) - - self.verbose = self.options['verbose'] - self._subproblems_solvers_created = False + self.verbose = self.options["verbose"] + self._subproblems_solvers_created = False def _lazy_create_solvers(self): if self._subproblems_solvers_created: @@ -68,46 +67,57 @@ def _lazy_create_solvers(self): self._create_solvers(presolve=False) self._subproblems_solvers_created = True - - #====================================================================== - def solve_one(self, solver_options, k, s, - dtiming=False, - gripe=False, - tee=False, - verbose=False, - disable_pyomo_signal_handling=False, - update_objective=True, - compute_val_at_nonant=False): - + # ====================================================================== + def solve_one( + self, + solver_options, + k, + s, + dtiming=False, + gripe=False, + tee=False, + verbose=False, + disable_pyomo_signal_handling=False, + update_objective=True, + compute_val_at_nonant=False, + ): self._lazy_create_solvers() - pyomo_solve_time = super().solve_one(solver_options, k, s, - dtiming=dtiming, - gripe=gripe, - tee=tee, - verbose=verbose, - disable_pyomo_signal_handling=disable_pyomo_signal_handling, - update_objective=update_objective) - + pyomo_solve_time = super().solve_one( + solver_options, + k, + s, + dtiming=dtiming, + gripe=gripe, + tee=tee, + verbose=verbose, + disable_pyomo_signal_handling=disable_pyomo_signal_handling, + update_objective=update_objective, + ) if compute_val_at_nonant: objfct = self.saved_objectives[k] if self.verbose: - print ("caller", inspect.stack()[1][3]) - print ("E_Obj Scenario {}, prob={}, Obj={}, ObjExpr={}"\ - .format(k, s._mpisppy_probability, pyo.value(objfct), objfct.expr)) + print("caller", inspect.stack()[1][3]) + print( + "E_Obj Scenario {}, prob={}, Obj={}, ObjExpr={}".format( + k, s._mpisppy_probability, pyo.value(objfct), objfct.expr + ) + ) self.objs_dict[k] = pyo.value(objfct) - return(pyomo_solve_time) - - - def solve_loop(self, solver_options=None, - use_scenarios_not_subproblems=False, - dtiming=False, - gripe=False, - disable_pyomo_signal_handling=False, - tee=False, - verbose=False, - compute_val_at_nonant=False): - """ Loop over self.local_subproblems and solve them in a manner + return pyomo_solve_time + + def solve_loop( + self, + solver_options=None, + use_scenarios_not_subproblems=False, + dtiming=False, + gripe=False, + disable_pyomo_signal_handling=False, + tee=False, + verbose=False, + compute_val_at_nonant=False, + ): + """Loop over self.local_subproblems and solve them in a manner dicated by the arguments. In addition to changing the Var values in the scenarios, update _PySP_feas_indictor for each. @@ -119,13 +129,13 @@ def solve_loop(self, solver_options=None, use_scenarios_not_subproblems (boolean): for use by bounds dtiming (boolean): indicates that timing should be reported gripe (boolean): output a message if a solve fails - disable_pyomo_signal_handling (boolean): set to true for asynch, + disable_pyomo_signal_handling (boolean): set to true for asynch, ignored for persistent solvers. tee (boolean): show solver output to screen if possible verbose (boolean): indicates verbose output compute_val_at_nonant (boolean): indicate that self.objs_dict should be created and computed. - + NOTE: I am not sure what happens with solver_options None for a persistent solver. Do options persist? @@ -133,9 +143,11 @@ def solve_loop(self, solver_options=None, NOTE: set_objective takes care of W and prox changes. """ self._lazy_create_solvers() - def _vb(msg): + + def _vb(msg): if verbose and self.cylinder_rank == 0: - print ("(rank0) " + msg) + print("(rank0) " + msg) + logger.debug(" early solve_loop for rank={}".format(self.cylinder_rank)) # note that when there is no bundling, scenarios are subproblems @@ -143,38 +155,48 @@ def _vb(msg): s_source = self.local_scenarios else: s_source = self.local_subproblems - + if compute_val_at_nonant: - self.objs_dict={} - - for k,s in s_source.items(): + self.objs_dict = {} + + for k, s in s_source.items(): if tee: print(f"Tee solve for {k} on global rank {self.global_rank}") - logger.debug(" in loop solve_loop k={}, rank={}".format(k, self.cylinder_rank)) - - pyomo_solve_time = self.solve_one(solver_options, k, s, - dtiming=dtiming, - verbose=verbose, - tee=tee, - gripe=gripe, + logger.debug( + " in loop solve_loop k={}, rank={}".format(k, self.cylinder_rank) + ) + + pyomo_solve_time = self.solve_one( + solver_options, + k, + s, + dtiming=dtiming, + verbose=verbose, + tee=tee, + gripe=gripe, disable_pyomo_signal_handling=disable_pyomo_signal_handling, - compute_val_at_nonant=compute_val_at_nonant) + compute_val_at_nonant=compute_val_at_nonant, + ) if dtiming: all_pyomo_solve_times = self.mpicomm.gather(pyomo_solve_time, root=0) if self.cylinder_rank == 0: print("Pyomo solve times (seconds):") - print("\tmin=%4.2f mean=%4.2f max=%4.2f" % - (np.min(all_pyomo_solve_times), - np.mean(all_pyomo_solve_times), - np.max(all_pyomo_solve_times))) - - #====================================================================== + print( + "\tmin=%4.2f mean=%4.2f max=%4.2f" + % ( + np.min(all_pyomo_solve_times), + np.mean(all_pyomo_solve_times), + np.max(all_pyomo_solve_times), + ) + ) + + # ====================================================================== def Eobjective(self, verbose=False, fct=None): - """ Compute the expected value of the composition of the objective function + """Compute the expected value of the composition of the objective function and a given function fct across all scenarios. - Note: + Note: Assumes the optimization is done beforehand, therefore DOES NOT CHECK FEASIBILITY or NON-ANTICIPATIVITY! This method uses whatever the current value of the objective @@ -183,49 +205,49 @@ def Eobjective(self, verbose=False, fct=None): Args: verbose (boolean, optional): If True, displays verbose output. Default False. - + fct (function, optional): A function R-->R^p, such as x|-->(x,x^2,x^3). Default is None If fct is None, Eobjective returns the exepected value. - + Returns: float or numpy.array: - The expected objective function value. + The expected objective function value. If fct is R-->R, returns a float. If fct is R-->R^p with p>1, returns a np.array of length p """ self._lazy_create_solvers() if fct is None: return super().Eobjective(verbose=verbose) - + if not hasattr(self, "objs_dict"): - raise RuntimeError("Values of the objective functions for each scenario"+ - " at xhat have to be computed before running Eobjective") - + raise RuntimeError( + "Values of the objective functions for each scenario" + + " at xhat have to be computed before running Eobjective" + ) local_Eobjs = [] - for k,s in self.local_scenarios.items(): + for k, s in self.local_scenarios.items(): if k not in self.objs_dict: raise RuntimeError(f"No value has been calculated for the scenario {k}") local_Eobjs.append(s._mpisppy_probability * fct(self.objs_dict[k])) local_Eobjs = np.array(local_Eobjs) - local_Eobj = np.array([np.sum(local_Eobjs,axis=0)]) + local_Eobj = np.array([np.sum(local_Eobjs, axis=0)]) global_Eobj = np.zeros(len(local_Eobj)) self.mpicomm.Allreduce(local_Eobj, global_Eobj, op=MPI.SUM) - if len(global_Eobj)==1: + if len(global_Eobj) == 1: global_Eobj = global_Eobj[0] return global_Eobj - - - #============== - def evaluate_one(self, nonant_cache,scenario_name,s): - """ Evaluate xhat for one scenario. + + # ============== + def evaluate_one(self, nonant_cache, scenario_name, s): + """Evaluate xhat for one scenario. Args: nonant_cache(numpy vector): special numpy vector with nonant values (see spopt) scenario_name(str): TODO - + Returns: Eobj (float or None): Expected value (or None if infeasible) @@ -235,24 +257,28 @@ def evaluate_one(self, nonant_cache,scenario_name,s): self._fix_nonants(nonant_cache) if not hasattr(self, "objs_dict"): self.objs_dict = {} - - solver_options = self.options["solver_options"] if "solver_options" in self.options else None + solver_options = ( + self.options["solver_options"] if "solver_options" in self.options else None + ) k = scenario_name - self.solve_one(solver_options,k, s, - dtiming=False, - verbose=self.verbose, - tee=False, - gripe=True, - compute_val_at_nonant=True - ) - + self.solve_one( + solver_options, + k, + s, + dtiming=False, + verbose=self.verbose, + tee=False, + gripe=True, + compute_val_at_nonant=True, + ) + obj = self.objs_dict[k] - + return obj - + def evaluate(self, nonant_cache, fct=None): - """ Do the optimization and compute the expected value of the composition of the objective function + """Do the optimization and compute the expected value of the composition of the objective function and a given function fct across all scenarios. Args: @@ -268,74 +294,74 @@ def evaluate(self, nonant_cache, fct=None): self._lazy_create_solvers() self._fix_nonants(nonant_cache) - solver_options = self.options["solver_options"] if "solver_options" in self.options else None - - self.solve_loop(solver_options=solver_options, - use_scenarios_not_subproblems=True, - gripe=True, - tee=False, - verbose=self.verbose, - compute_val_at_nonant=True - ) - - Eobj = self.Eobjective(self.verbose,fct=fct) - + solver_options = ( + self.options["solver_options"] if "solver_options" in self.options else None + ) + + self.solve_loop( + solver_options=solver_options, + use_scenarios_not_subproblems=True, + gripe=True, + tee=False, + verbose=self.verbose, + compute_val_at_nonant=True, + ) + + Eobj = self.Eobjective(self.verbose, fct=fct) + return Eobj - - - - def fix_nonants_upto_stage(self,t,cache): - """ Fix the Vars subject to non-anticipativity at given values for stages 1 to t. + + def fix_nonants_upto_stage(self, t, cache): + """Fix the Vars subject to non-anticipativity at given values for stages 1 to t. Loop over the scenarios to restore, but loop over subproblems to alert persistent solvers. Args: cache (ndn dict of list or numpy vector): values at which to fix - WARNING: + WARNING: We are counting on Pyomo indices not to change order between when the cache_list is created and used. NOTE: You probably want to call _save_nonants right before calling this """ self._lazy_create_solvers() - for k,s in self.local_scenarios.items(): - + for k, s in self.local_scenarios.items(): persistent_solver = None - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin nlens = s._mpisppy_data.nlens for node in s._mpisppy_node_list: - if node.stage<=t: + if node.stage <= t: ndn = node.name if ndn not in cache: - raise RuntimeError("Could not find {} in {}"\ - .format(ndn, cache)) + raise RuntimeError("Could not find {} in {}".format(ndn, cache)) if cache[ndn] is None: - raise RuntimeError("Empty cache for scen={}, node={}".format(k, ndn)) + raise RuntimeError( + "Empty cache for scen={}, node={}".format(k, ndn) + ) if len(cache[ndn]) != nlens[ndn]: - raise RuntimeError("Needed {} nonant Vars for {}, got {}"\ - .format(nlens[ndn], ndn, len(cache[ndn]))) - for i in range(nlens[ndn]): + raise RuntimeError( + "Needed {} nonant Vars for {}, got {}".format( + nlens[ndn], ndn, len(cache[ndn]) + ) + ) + for i in range(nlens[ndn]): this_vardata = node.nonant_vardata_list[i] this_vardata._value = cache[ndn][i] this_vardata.fix() if persistent_solver is not None: persistent_solver.update_var(this_vardata) - - - - - #====================================================================== + + # ====================================================================== def _fix_nonants_at_value(self): - """ Fix the Vars subject to non-anticipativity at their current values. - Loop over the scenarios to restore, but loop over subproblems - to alert persistent solvers. + """Fix the Vars subject to non-anticipativity at their current values. + Loop over the scenarios to restore, but loop over subproblems + to alert persistent solvers. """ - for k,s in self.local_scenarios.items(): - + for k, s in self.local_scenarios.items(): persistent_solver = None if not self.bundling: - if (sputils.is_persistent(s._solver_plugin)): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin for var in s._mpisppy_data.nonant_indices.values(): @@ -345,8 +371,8 @@ def _fix_nonants_at_value(self): if self.bundling: # we might need to update persistent solvers rank_local = self.cylinder_rank - for k,s in self.local_subproblems.items(): - if (sputils.is_persistent(s._solver_plugin)): + for k, s in self.local_subproblems.items(): + if sputils.is_persistent(s._solver_plugin): persistent_solver = s._solver_plugin else: break # all solvers should be the same @@ -376,20 +402,19 @@ def calculate_incumbent(self, fix_nonants=True, verbose=False): if fix_nonants: self._fix_nonants_at_value() - self.solve_loop(solver_options=self.current_solver_options, - verbose=verbose) + self.solve_loop(solver_options=self.current_solver_options, verbose=verbose) infeasP = self.infeas_prob() - if infeasP != 0.: + if infeasP != 0.0: return None else: if verbose and self.cylinder_rank == 0: print(" Feasible xhat found") return self.Eobjective(verbose=verbose) - - if __name__ == "__main__": - print("For an example of the use of Xhat_Eval see, e.g., zhat4xhat.py, " - "which is in the confidence_intervals directory.") + print( + "For an example of the use of Xhat_Eval see, e.g., zhat4xhat.py, " + "which is in the confidence_intervals directory." + ) diff --git a/paperruns/larger_uc/todat.py b/paperruns/larger_uc/todat.py index 9e98b17d3..cd2b2814c 100644 --- a/paperruns/larger_uc/todat.py +++ b/paperruns/larger_uc/todat.py @@ -20,87 +20,86 @@ num_scenarios = 250 -columns = scenario_data.iloc[:,1:num_scenarios+1] +columns = scenario_data.iloc[:, 1 : num_scenarios + 1] OUTDIR = "NewInstance" os.mkdir(OUTDIR) -copyfile("RootNode.dat", OUTDIR+os.sep+"RootNode.dat") +copyfile("RootNode.dat", OUTDIR + os.sep + "RootNode.dat") -for scenario_index in range(0,num_scenarios): +for scenario_index in range(0, num_scenarios): + outfile = open(OUTDIR + os.sep + "Node" + str(scenario_index + 1) + ".dat", "w") - outfile = open(OUTDIR+os.sep+"Node"+str(scenario_index+1)+".dat",'w') - - this_column = columns.iloc[:,scenario_index:scenario_index+1].T.to_numpy().reshape((25,)) - print(scenario_index,"THIS COLUMN=",this_column) - print("AVERAGE WIND=",np.average(this_column)) + this_column = ( + columns.iloc[:, scenario_index : scenario_index + 1].T.to_numpy().reshape((25,)) + ) + print(scenario_index, "THIS COLUMN=", this_column) + print("AVERAGE WIND=", np.average(this_column)) print("param MinNondispatchablePower := ", file=outfile) - - for hour in range(0,24): - wind_this_hour = 0.0 # this_column[hour] - print("WIND", hour+1, wind_this_hour, file=outfile) - for hour in range(24,48): - wind_this_hour = 0.0 # this_column[23] - print("WIND", hour+1, wind_this_hour, file=outfile) + + for hour in range(0, 24): + wind_this_hour = 0.0 # this_column[hour] + print("WIND", hour + 1, wind_this_hour, file=outfile) + for hour in range(24, 48): + wind_this_hour = 0.0 # this_column[23] + print("WIND", hour + 1, wind_this_hour, file=outfile) print(";", file=outfile) print("\n", file=outfile) print("param MaxNondispatchablePower := ", file=outfile) - - for hour in range(0,24): + + for hour in range(0, 24): wind_this_hour = this_column[hour] * scale_factor - print("WIND", hour+1, wind_this_hour, file=outfile) - for hour in range(24,48): + print("WIND", hour + 1, wind_this_hour, file=outfile) + for hour in range(24, 48): wind_this_hour = this_column[23] * scale_factor - print("WIND", hour+1, wind_this_hour, file=outfile) + print("WIND", hour + 1, wind_this_hour, file=outfile) print(";", file=outfile) print("\n", file=outfile) outfile.close() -copyfile("ScenarioStructureBase.dat", OUTDIR+os.sep+"ScenarioStructure.dat") +copyfile("ScenarioStructureBase.dat", OUTDIR + os.sep + "ScenarioStructure.dat") -s_outfile = open(OUTDIR+os.sep+"ScenarioStructure.dat",'a') +s_outfile = open(OUTDIR + os.sep + "ScenarioStructure.dat", "a") print("\nset Stages := FirstStage SecondStage ;\n", file=s_outfile) print("set Nodes := ", file=s_outfile) print("RootNode", file=s_outfile) -for i in range(1,num_scenarios+1): - print("Node"+str(i), file=s_outfile) +for i in range(1, num_scenarios + 1): + print("Node" + str(i), file=s_outfile) print(";\n", file=s_outfile) print("param NodeStage := ", file=s_outfile) print("RootNode FirstStage", file=s_outfile) -for i in range(1,num_scenarios+1): - print("Node"+str(i)+" SecondStage", file=s_outfile) +for i in range(1, num_scenarios + 1): + print("Node" + str(i) + " SecondStage", file=s_outfile) print(";\n", file=s_outfile) print("set Children[RootNode] := ", file=s_outfile) -for i in range(1,num_scenarios+1): - print("Node"+str(i), file=s_outfile) +for i in range(1, num_scenarios + 1): + print("Node" + str(i), file=s_outfile) print(";\n", file=s_outfile) print("param ConditionalProbability :=", file=s_outfile) print("RootNode 1.0", file=s_outfile) -for i in range(1,num_scenarios+1): - print("Node"+str(i)+" "+str(1.0/float(num_scenarios)), file=s_outfile) +for i in range(1, num_scenarios + 1): + print("Node" + str(i) + " " + str(1.0 / float(num_scenarios)), file=s_outfile) print(";\n", file=s_outfile) print("set Scenarios := ", file=s_outfile) -for i in range(1,num_scenarios+1): - print("Scenario"+str(i), file=s_outfile) +for i in range(1, num_scenarios + 1): + print("Scenario" + str(i), file=s_outfile) print(";\n", file=s_outfile) print("param ScenarioLeafNode := ", file=s_outfile) -for i in range(1,num_scenarios+1): - print("Scenario"+str(i)+" Node"+str(i), file=s_outfile) +for i in range(1, num_scenarios + 1): + print("Scenario" + str(i) + " Node" + str(i), file=s_outfile) print(";\n", file=s_outfile) - -s_outfile.close() - +s_outfile.close() diff --git a/paperruns/larger_uc/uc_cylinders.py b/paperruns/larger_uc/uc_cylinders.py index ef8cc7e31..9e17385df 100644 --- a/paperruns/larger_uc/uc_cylinders.py +++ b/paperruns/larger_uc/uc_cylinders.py @@ -34,18 +34,19 @@ def _parse_args(): parser = baseparsers.xhatlooper_args(parser) parser = baseparsers.xhatshuffle_args(parser) parser = baseparsers.cross_scenario_cuts_args(parser) - parser.add_argument("--ph-mipgaps-json", - help="json file with mipgap schedule (default None)", - dest="ph_mipgaps_json", - type=str, - default=None) - + parser.add_argument( + "--ph-mipgaps-json", + help="json file with mipgap schedule (default None)", + dest="ph_mipgaps_json", + type=str, + default=None, + ) + args = parser.parse_args() return args def main(): - args = _parse_args() num_scen = args.num_scens @@ -58,11 +59,12 @@ def main(): fixer_tol = args.fixer_tol with_cross_scenario_cuts = args.with_cross_scenario_cuts - scensavail = [3,50,100,250,500,750,1000] + scensavail = [3, 50, 100, 250, 500, 750, 1000] if num_scen not in scensavail: - raise RuntimeError("num-scen was {}, but must be in {}".\ - format(num_scen, scensavail)) - + raise RuntimeError( + "num-scen was {}, but must be in {}".format(num_scen, scensavail) + ) + scenario_creator_kwargs = { "scenario_count": num_scen, "path": str(num_scen) + "scenarios_wind", @@ -71,7 +73,7 @@ def main(): scenario_denouement = uc.scenario_denouement all_scenario_names = [f"Scenario{i+1}" for i in range(num_scen)] rho_setter = uc._rho_setter - + # Things needed for vanilla cylinders beans = (args, scenario_creator, scenario_denouement, all_scenario_names) @@ -90,11 +92,12 @@ def main(): multi_ext = {"ext_classes": [Gapper]} if with_cross_scenario_cuts: multi_ext["ext_classes"].append(CrossScenarioExtension) - + hub_dict["opt_kwargs"]["extension_kwargs"] = multi_ext if with_cross_scenario_cuts: - hub_dict["opt_kwargs"]["options"]["cross_scen_options"]\ - = {"check_bound_improve_iterations" : args.cross_scenario_iter_cnt} + hub_dict["opt_kwargs"]["options"]["cross_scen_options"] = { + "check_bound_improve_iterations": args.cross_scenario_iter_cnt + } if with_fixer: hub_dict["opt_kwargs"]["options"]["fixeroptions"] = { @@ -110,14 +113,14 @@ def main(): mipgapdict = None hub_dict["opt_kwargs"]["options"]["gapperoptions"] = { "verbose": args.with_verbose, - "mipgapdict": mipgapdict - } - + "mipgapdict": mipgapdict, + } + if args.default_rho is None: # since we are using a rho_setter anyway - hub_dict.opt_kwargs.options["defaultPHrho"] = 1 + hub_dict.opt_kwargs.options["defaultPHrho"] = 1 ### end ph spoke ### - + # FWPH spoke if with_fwph: fw_spoke = vanilla.fwph_spoke( @@ -145,7 +148,7 @@ def main(): *beans, scenario_creator_kwargs=scenario_creator_kwargs, ) - + # cross scenario cut spoke if with_cross_scenario_cuts: cross_scenario_cuts_spoke = vanilla.cross_scenario_cuts_spoke( diff --git a/paperruns/larger_uc/uc_funcs.py b/paperruns/larger_uc/uc_funcs.py index a81c16352..9b7862364 100644 --- a/paperruns/larger_uc/uc_funcs.py +++ b/paperruns/larger_uc/uc_funcs.py @@ -29,25 +29,28 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non if path is None: raise ValueError("UC scenario creator requires a path kwarg") - #print("Building instance for scenario =", scenario_name) + # print("Building instance for scenario =", scenario_name) scennum = sputils.extract_num(scenario_name) uc_model_params = pdp.get_uc_model() scenario_data = DataPortal(model=uc_model_params) - scenario_data.load(filename=path+os.sep+"RootNode.dat") - scenario_data.load(filename=path+os.sep+"Node"+str(scennum)+".dat") + scenario_data.load(filename=path + os.sep + "RootNode.dat") + scenario_data.load(filename=path + os.sep + "Node" + str(scennum) + ".dat") - scenario_params = uc_model_params.create_instance(scenario_data, - report_timing=False, - name=scenario_name) + scenario_params = uc_model_params.create_instance( + scenario_data, report_timing=False, name=scenario_name + ) - scenario_md = md.ModelData(pdp.create_model_data_dict_params(scenario_params, keep_names=True)) + scenario_md = md.ModelData( + pdp.create_model_data_dict_params(scenario_params, keep_names=True) + ) ## TODO: use the "power_balance_constraints" for now. In the future, networks should be ## handled with a custom callback -- also consider other base models - scenario_instance = uc.create_tight_unit_commitment_model(scenario_md, - network_constraints='power_balance_constraints') + scenario_instance = uc.create_tight_unit_commitment_model( + scenario_md, network_constraints="power_balance_constraints" + ) # hold over string attribute from Egret, # causes warning wth LShaped/Benders @@ -55,17 +58,21 @@ def pysp_instance_creation_callback(scenario_name, path=None, scenario_count=Non return scenario_instance + def scenario_creator(scenario_name, path=None, scenario_count=None): return pysp2_callback(scenario_name, path=path, scenario_count=scenario_count) + def pysp2_callback(scenario_name, path=None, scenario_count=None): - ''' The callback needs to create an instance and then attach - the PySP nodes to it in a list _mpisppy_node_list ordered by stages. - Optionally attach _PHrho. Standard (1.0) PySP signature for now... - ''' + """The callback needs to create an instance and then attach + the PySP nodes to it in a list _mpisppy_node_list ordered by stages. + Optionally attach _PHrho. Standard (1.0) PySP signature for now... + """ instance = pysp_instance_creation_callback( - scenario_name, path=path, scenario_count=scenario_count, + scenario_name, + path=path, + scenario_count=scenario_count, ) # now attach the one and only tree node (ROOT is a reserved word) @@ -80,100 +87,117 @@ def pysp2_callback(scenario_name, path=None, scenario_count=None): [instance.UnitStart, instance.UnitStop, instance.StartupIndicator], )] """ - sputils.attach_root_node(instance, - instance.StageCost["Stage_1"], - [instance.UnitOn], - nonant_ef_suppl_list = [instance.UnitStart, - instance.UnitStop, - instance.StartupIndicator]) + sputils.attach_root_node( + instance, + instance.StageCost["Stage_1"], + [instance.UnitOn], + nonant_ef_suppl_list=[ + instance.UnitStart, + instance.UnitStop, + instance.StartupIndicator, + ], + ) return instance + def scenario_denouement(rank, scenario_name, scenario): -# print("First stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["FirstStage"])) -# print("Second stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["SecondStage"])) + # print("First stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["FirstStage"])) + # print("Second stage cost for scenario",scenario_name,"is",pyo.value(scenario.StageCost["SecondStage"])) pass -def scenario_rhosa(scenario_instance): +def scenario_rhosa(scenario_instance): return scenario_rhos(scenario_instance) -def _rho_setter(scenario_instance): +def _rho_setter(scenario_instance): return scenario_rhos(scenario_instance) + def scenario_rhos(scenario_instance, rho_scale_factor=0.1): computed_rhos = [] for t in scenario_instance.TimePeriods: for g in scenario_instance.ThermalGenerators: - min_power = pyo.value(scenario_instance.MinimumPowerOutput[g,t]) - max_power = pyo.value(scenario_instance.MaximumPowerOutput[g,t]) + min_power = pyo.value(scenario_instance.MinimumPowerOutput[g, t]) + max_power = pyo.value(scenario_instance.MaximumPowerOutput[g, t]) avg_power = min_power + ((max_power - min_power) / 2.0) - min_cost = pyo.value(scenario_instance.MinimumProductionCost[g,t]) + min_cost = pyo.value(scenario_instance.MinimumProductionCost[g, t]) - avg_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, avg_power) + min_cost - #max_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, max_power) + min_cost + avg_cost = ( + scenario_instance.ComputeProductionCosts( + scenario_instance, g, t, avg_power + ) + + min_cost + ) + # max_cost = scenario_instance.ComputeProductionCosts(scenario_instance, g, t, max_power) + min_cost computed_rho = rho_scale_factor * avg_cost - computed_rhos.append((id(scenario_instance.UnitOn[g,t]), computed_rho)) - + computed_rhos.append((id(scenario_instance.UnitOn[g, t]), computed_rho)) + return computed_rhos -def scenario_rhos_trial_from_file(scenario_instance, rho_scale_factor=0.01, - fname=None): - ''' First computes the standard rho values (as computed by scenario_rhos() - above). Then reads rho values from the specified file (raises error if - no file specified) which is a csv formatted (var_name,rho_value). If - the rho_value specified in the file is strictly positive, it replaces - the value computed by scenario_rhos(). - - DTM: I wrote this function to test some specific things--I don't think - this will have a general purpose use, and can probably be deleted. - ''' - if (fname is None): - raise RuntimeError('Please provide an "fname" kwarg to ' - 'the "rho_setter_kwargs" option in options') - computed_rhos = scenario_rhos(scenario_instance, - rho_scale_factor=rho_scale_factor) + +def scenario_rhos_trial_from_file(scenario_instance, rho_scale_factor=0.01, fname=None): + """First computes the standard rho values (as computed by scenario_rhos() + above). Then reads rho values from the specified file (raises error if + no file specified) which is a csv formatted (var_name,rho_value). If + the rho_value specified in the file is strictly positive, it replaces + the value computed by scenario_rhos(). + + DTM: I wrote this function to test some specific things--I don't think + this will have a general purpose use, and can probably be deleted. + """ + if fname is None: + raise RuntimeError( + 'Please provide an "fname" kwarg to ' + 'the "rho_setter_kwargs" option in options' + ) + computed_rhos = scenario_rhos(scenario_instance, rho_scale_factor=rho_scale_factor) try: trial_rhos = _get_saved_rhos(fname) except Exception: - raise RuntimeError('Formatting issue in specified rho file ' + fname + - '. Format should be (variable_name,rho_value) for ' - 'each row, with no blank lines, and no ' - 'extra/commented lines') - + raise RuntimeError( + "Formatting issue in specified rho file " + + fname + + ". Format should be (variable_name,rho_value) for " + "each row, with no blank lines, and no " + "extra/commented lines" + ) + index = 0 for b in sorted(scenario_instance.Buses): for t in sorted(scenario_instance.TimePeriods): for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): - var = scenario_instance.UnitOn[g,t] + var = scenario_instance.UnitOn[g, t] try: trial_rho = trial_rhos[var.name] except KeyError: - raise RuntimeError(var.name + ' is missing from ' - 'the specified rho file ' + fname) - if (trial_rho >= 1e-14): - print('Using a trial rho') + raise RuntimeError( + var.name + " is missing from " "the specified rho file " + fname + ) + if trial_rho >= 1e-14: + print("Using a trial rho") computed_rhos[index] = (id(var), trial_rho) index += 1 - + return computed_rhos + def _get_saved_rhos(fname): - ''' Return a dict of trial rho values, indexed by variable name. - ''' + """Return a dict of trial rho values, indexed by variable name.""" rhos = dict() - with open(fname, 'r') as f: + with open(fname, "r") as f: for line in f: - line = line.split(',') - vname = ','.join(line[:-1]) + line = line.split(",") + vname = ",".join(line[:-1]) rho = float(line[-1]) rhos[vname] = rho return rhos + def id_fix_list_fct(scenario_instance): - """ specify tuples used by the fixer. + """specify tuples used by the fixer. Args: s (ConcreteModel): the sizes instance. @@ -193,15 +217,21 @@ def id_fix_list_fct(scenario_instance): for b in sorted(scenario_instance.Buses): for t in sorted(scenario_instance.TimePeriods): for g in sorted(scenario_instance.ThermalGeneratorsAtBus[b]): - - iter0tuples.append(fixer.Fixer_tuple(scenario_instance.UnitOn[g,t], - th=0.01, nb=None, lb=0, ub=None)) - - iterktuples.append(fixer.Fixer_tuple(scenario_instance.UnitOn[g,t], - th=0.01, nb=None, lb=6, ub=6)) + iter0tuples.append( + fixer.Fixer_tuple( + scenario_instance.UnitOn[g, t], th=0.01, nb=None, lb=0, ub=None + ) + ) + + iterktuples.append( + fixer.Fixer_tuple( + scenario_instance.UnitOn[g, t], th=0.01, nb=None, lb=6, ub=6 + ) + ) return iter0tuples, iterktuples + def write_solution(spcomm, opt_dict, solution_dir): from mpisppy.cylinders.xhatshufflelooper_bounder import XhatShuffleInnerBound from mpisppy.extensions.xhatclosest import XhatClosest @@ -225,12 +255,17 @@ def write_solution(spcomm, opt_dict, solution_dir): # do some checks, to make sure the solution we print will be nonantipative if best_strata_rank != 0: - assert opt_dict["spoke_class"] in (XhatShuffleInnerBound, ) - else: # this is the hub, TODO: also could check for XhatSpecific - assert opt_dict["opt_class"] in (PH, ) + assert opt_dict["spoke_class"] in (XhatShuffleInnerBound,) + else: # this is the hub, TODO: also could check for XhatSpecific + assert opt_dict["opt_class"] in (PH,) assert XhatClosest in opt_dict["opt_kwargs"]["extension_kwargs"]["ext_classes"] - assert "keep_solution" in opt_dict["opt_kwargs"]["options"]["xhat_closest_options"] - assert opt_dict["opt_kwargs"]["options"]["xhat_closest_options"]["keep_solution"] is True + assert ( + "keep_solution" in opt_dict["opt_kwargs"]["options"]["xhat_closest_options"] + ) + assert ( + opt_dict["opt_kwargs"]["options"]["xhat_closest_options"]["keep_solution"] + is True + ) ## if we've passed the above checks, the scenarios should have the tree solution @@ -243,7 +278,7 @@ def write_solution(spcomm, opt_dict, solution_dir): spcomm.cylinder_comm.Barrier() for sname, s in spcomm.opt.local_scenarios.items(): - file_name = os.path.join(solution_dir, sname+'.json') + file_name = os.path.join(solution_dir, sname + ".json") mds = uc._save_uc_results(s, relaxed=False) mds.write(file_name) diff --git a/run-mpitests.py b/run-mpitests.py index e30c0709d..242d058e4 100644 --- a/run-mpitests.py +++ b/run-mpitests.py @@ -10,6 +10,6 @@ from runtests.mpi import Tester import os.path -if __name__ == '__main__': +if __name__ == "__main__": tester = Tester(os.path.join(os.path.abspath(__file__)), ".") tester.main(sys.argv[1:]) diff --git a/setup.py b/setup.py index 4f6a0ef88..c3c30549a 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ import sys # We raise an error if trying to install with python2 -if sys.version[0] == '2': +if sys.version[0] == "2": print("Error: This package must be installed with python3") sys.exit(1) @@ -25,25 +25,25 @@ # intentionally leaving out mpi4py to help readthedocs setup( - name='mpi-sppy', - version='0.12.2.dev0', + name="mpi-sppy", + version="0.12.2.dev0", description="mpi-sppy", long_description=long_description, - url='https://github.com/Pyomo/mpi-sppy', - author='David Woodruff', - author_email='dlwoodruff@ucdavis.edu', + url="https://github.com/Pyomo/mpi-sppy", + author="David Woodruff", + author_email="dlwoodruff@ucdavis.edu", packages=packages, - python_requires='>=3.8', + python_requires=">=3.8", install_requires=[ - 'sortedcollections', - 'numpy<2', - 'scipy', - 'pyomo>=6.4', + "sortedcollections", + "numpy<2", + "scipy", + "pyomo>=6.4", ], extras_require={ - 'doc': [ - 'sphinx_rtd_theme', - 'sphinx', + "doc": [ + "sphinx_rtd_theme", + "sphinx", ] }, )