From 43938d9ebebf12c439beaceaacff3e2f823f2d92 Mon Sep 17 00:00:00 2001 From: vfisikop Date: Fri, 16 Feb 2024 17:22:47 +0200 Subject: [PATCH] Build volestipy with lpsolve --- .github/workflows/ubuntu.yml | 5 +++++ README.md | 43 +++++++++++++++++++++--------------- dingo/bindings/bindings.h | 1 - setup.py | 38 +++++++++++++++++++++++++++++-- volesti | 2 +- 5 files changed, 67 insertions(+), 22 deletions(-) diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 89279f0e..342c6005 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -32,6 +32,11 @@ jobs: wget -O boost_1_76_0.tar.bz2 https://boostorg.jfrog.io/artifactory/main/release/1.76.0/source/boost_1_76_0.tar.bz2; tar xjf boost_1_76_0.tar.bz2; rm boost_1_76_0.tar.bz2; + - name: Download and unzip the lp-solve library + run: | + wget https://sourceforge.net/projects/lpsolve/files/lpsolve/5.5.2.11/lp_solve_5.5.2.11_source.tar.gz + tar xzvf lp_solve_5.5.2.11_source.tar.gz + rm lp_solve_5.5.2.11_source.tar.gz - name: Install dependencies run: | sudo apt-get install libsuitesparse-dev; diff --git a/README.md b/README.md index 9d0e5c03..486271ad 100644 --- a/README.md +++ b/README.md @@ -30,6 +30,13 @@ tar xjf boost_1_76_0.tar.bz2 rm boost_1_76_0.tar.bz2 ``` +You will also need to download and unzip the lpsolve library: +``` +wget https://sourceforge.net/projects/lpsolve/files/lpsolve/5.5.2.11/lp_solve_5.5.2.11_source.tar.gz +tar xzvf lp_solve_5.5.2.11_source.tar.gz +rm lp_solve_5.5.2.11_source.tar.gz +``` + Then, you need to install the dependencies for the PySPQR library; for Debian/Ubuntu Linux, run ``` @@ -48,14 +55,14 @@ To exploit the fast implementations of dingo, you have to install the [Gurobi so pip3 install -i https://pypi.gurobi.com gurobipy ``` -Then, you will need a [license](https://www.gurobi.com/downloads/end-user-license-agreement-academic/). For more information, we refer to the Gurobi [download center](https://www.gurobi.com/downloads/). +Then, you will need a [license](https://www.gurobi.com/downloads/end-user-license-agreement-academic/). For more information, we refer to the Gurobi [download center](https://www.gurobi.com/downloads/). ## Unit tests -Now, you can run the unit tests by the following commands: +Now, you can run the unit tests by the following commands: ``` python3 tests/fba.py python3 tests/full_dimensional.py @@ -72,14 +79,14 @@ python3 tests/fast_implementation_test.py ## Tutorial -You can have a look at our [Google Colab notebook](https://colab.research.google.com/github/GeomScale/dingo/blob/develop/tutorials/dingo_tutorial.ipynb) +You can have a look at our [Google Colab notebook](https://colab.research.google.com/github/GeomScale/dingo/blob/develop/tutorials/dingo_tutorial.ipynb) on how to use `dingo`. ## Documentation -It quite simple to use dingo in your code. In general, dingo provides two classes: +It quite simple to use dingo in your code. In general, dingo provides two classes: - `metabolic_network` represents a metabolic network - `polytope_sampler` can be used to sample from the flux space of a metabolic network or from a general convex polytope. @@ -94,35 +101,35 @@ sampler = PolytopeSampler(model) steady_states = sampler.generate_steady_states() ``` -`dingo` can also load a model given in `.sbml` format using the following command, +`dingo` can also load a model given in `.sbml` format using the following command, ```python model = MetabolicNetwork.from_sbml('path/to/model_file.sbml') ``` -The output variable `steady_states` is a `numpy` array that contains the steady states of the model column-wise. You could ask from the `sampler` for more statistical guarantees on sampling, +The output variable `steady_states` is a `numpy` array that contains the steady states of the model column-wise. You could ask from the `sampler` for more statistical guarantees on sampling, ```python steady_states = sampler.generate_steady_states(ess=2000, psrf = True) ``` -The `ess` stands for the effective sample size (ESS) (default value is `1000`) and the `psrf` is a flag to request an upper bound equal to 1.1 for the value of the *potential scale reduction factor* of each marginal flux (default option is `False`). +The `ess` stands for the effective sample size (ESS) (default value is `1000`) and the `psrf` is a flag to request an upper bound equal to 1.1 for the value of the *potential scale reduction factor* of each marginal flux (default option is `False`). You could also ask for parallel MMCS algorithm, ```python -steady_states = sampler.generate_steady_states(ess=2000, psrf = True, +steady_states = sampler.generate_steady_states(ess=2000, psrf = True, parallel_mmcs = True, num_threads = 2) ``` -The default option is to run the sequential [Multiphase Monte Carlo Sampling algorithm](https://arxiv.org/abs/2012.05503) (MMCS) algorithm. +The default option is to run the sequential [Multiphase Monte Carlo Sampling algorithm](https://arxiv.org/abs/2012.05503) (MMCS) algorithm. -**Tip**: After the first run of MMCS algorithm the polytope stored in object `sampler` is usually more rounded than the initial one. Thus, the function `generate_steady_states()` becomes more efficient from run to run. +**Tip**: After the first run of MMCS algorithm the polytope stored in object `sampler` is usually more rounded than the initial one. Thus, the function `generate_steady_states()` becomes more efficient from run to run. #### Rounding the polytope -`dingo` provides three methods to round a polytope: (i) Bring the polytope to John position by apllying to it the transformation that maps the largest inscribed ellipsoid of the polytope to the unit ball, (ii) Bring the polytope to near-isotropic position by using uniform sampling with Billiard Walk, (iii) Apply to the polytope the transformation that maps the smallest enclosing ellipsoid of a uniform sample from the interior of the polytope to the unit ball. +`dingo` provides three methods to round a polytope: (i) Bring the polytope to John position by apllying to it the transformation that maps the largest inscribed ellipsoid of the polytope to the unit ball, (ii) Bring the polytope to near-isotropic position by using uniform sampling with Billiard Walk, (iii) Apply to the polytope the transformation that maps the smallest enclosing ellipsoid of a uniform sample from the interior of the polytope to the unit ball. ```python from dingo import MetabolicNetwork, PolytopeSampler @@ -152,7 +159,7 @@ steady_states = map_samples_to_steady_states(samples, N, N_shift, Tr, Tr_shift) #### Other MCMC sampling methods -To use any other MCMC sampling method that `dingo` provides you can use the following piece of code: +To use any other MCMC sampling method that `dingo` provides you can use the following piece of code: ```python sampler = polytope_sampler(model) @@ -160,7 +167,7 @@ steady_states = sampler.generate_steady_states_no_multiphase() #default paramete ``` The MCMC methods that dingo (through `volesti` library) provides are the following: (i) 'cdhr': Coordinate Directions Hit-and-Run, (ii) 'rdhr': Random Directions Hit-and-Run, -(iii) 'billiard_walk', (iv) 'ball_walk', (v) 'dikin_walk', (vi) 'john_walk', (vii) 'vaidya_walk'. +(iii) 'billiard_walk', (iv) 'ball_walk', (v) 'dikin_walk', (vi) 'john_walk', (vii) 'vaidya_walk'. @@ -175,7 +182,7 @@ sampler = polytope_sampler(model) #set fast mode to use gurobi library sampler.set_fast_mode() -#set slow mode to use scipy functions +#set slow mode to use scipy functions sampler.set_slow_mode() ``` @@ -196,7 +203,7 @@ max_biomass_flux_vector = fva_output[2] max_biomass_objective = fva_output[3] ``` -The output of FVA method is tuple that contains `numpy` arrays. The vectors `min_fluxes` and `max_fluxes` contains the minimum and the maximum values of each flux. The vector `max_biomass_flux_vector` is the optimal flux vector according to the biomass objective function and `max_biomass_objective` is the value of that optimal solution. +The output of FVA method is tuple that contains `numpy` arrays. The vectors `min_fluxes` and `max_fluxes` contains the minimum and the maximum values of each flux. The vector `max_biomass_flux_vector` is the optimal flux vector according to the biomass objective function and `max_biomass_objective` is the value of that optimal solution. To apply FBA method, @@ -207,7 +214,7 @@ max_biomass_flux_vector = fba_output[0] max_biomass_objective = fba_output[1] ``` -while the output vectors are the same with the previous example. +while the output vectors are the same with the previous example. @@ -240,7 +247,7 @@ model.biomass_function(obj_fun) # apply FVA using the new objective function fva_output = model.fva() -# sample from the flux space by restricting +# sample from the flux space by restricting # the fluxes according to the new objective function sampler = polytope_sampler(model) steady_states = sampler.generate_steady_states() @@ -283,7 +290,7 @@ model = MetabolicNetwork.from_json('path/to/e_coli_core.json') sampler = PolytopeSampler(model) steady_states = sampler.generate_steady_states(ess = 3000) -# plot the copula between the 13th (PPC) and the 14th (ACONTa) reaction in e-coli +# plot the copula between the 13th (PPC) and the 14th (ACONTa) reaction in e-coli reactions = model.reactions data_flux2=[steady_states[12],reactions[12]] diff --git a/dingo/bindings/bindings.h b/dingo/bindings/bindings.h index aa4cd66c..8aa29740 100644 --- a/dingo/bindings/bindings.h +++ b/dingo/bindings/bindings.h @@ -13,7 +13,6 @@ #ifndef VOLESTIBINDINGS_H #define VOLESTIBINDINGS_H -#define DISABLE_LPSOLVE #define DISABLE_NLP_ORACLES #include // from SOB volume - exactly the same for CG and CB methods diff --git a/setup.py b/setup.py index 42f08b5b..c3a90e1e 100755 --- a/setup.py +++ b/setup.py @@ -2,6 +2,7 @@ # dingo is part of GeomScale project # Copyright (c) 2021 Apostolos Chalkis +# Copyright (c) 2024 Vissarion Fisikopoulos # Licensed under GNU LGPL.3, see LICENCE file @@ -27,6 +28,8 @@ source_directory_list = ["dingo", join("dingo", "bindings")] compiler_args = ["-std=c++11", "-O3", "-DBOOST_NO_AUTO_PTR", "-ldl", "-lm", "-fopenmp"] +lp_solve_compiler_args = ["-DYY_NEVER_INTERACTIVE", "-DLoadInverseLib=0", "-DLoadLanguageLib=0", +"-DRoleIsExternalInvEngine", "-DINVERSE_ACTIVE=3", "-DLoadableBlasLib=0"] link_args = ["-O3", "-fopenmp"] @@ -38,6 +41,12 @@ join("eigen"), join("boost_1_76_0"), join("boost_1_76_0", "boost"), + join("lp_solve_5.5"), + join("lp_solve_5.5", "bfp"), + join("lp_solve_5.5", "bfp", "bfp_LUSOL"), + join("lp_solve_5.5", "bfp", "bfp_LUSOL", "LUSOL"), + join("lp_solve_5.5", "colamd"), + join("lp_solve_5.5", "shared"), join("volesti", "external"), join("volesti", "external", "minimum_ellipsoid"), # include and add the directories on the "include" directory @@ -49,7 +58,32 @@ join("volesti", "include", "cartesian_geom"), ] -src_files = ["dingo/volestipy.pyx", "dingo/bindings/bindings.cpp"] +src_files = ["lp_solve_5.5/bfp/bfp_LUSOL/lp_LUSOL.c" + , "lp_solve_5.5/bfp/bfp_LUSOL/LUSOL/lusol.c" + , "lp_solve_5.5/colamd/colamd.c" + , "lp_solve_5.5/ini.c" + , "lp_solve_5.5/shared/commonlib.c" + , "lp_solve_5.5/shared/mmio.c" + , "lp_solve_5.5/shared/myblas.c" + , "lp_solve_5.5/lp_crash.c" + , "lp_solve_5.5/lp_Hash.c" + , "lp_solve_5.5/lp_lib.c" + , "lp_solve_5.5/lp_matrix.c" + , "lp_solve_5.5/lp_MDO.c" + , "lp_solve_5.5/lp_mipbb.c" + , "lp_solve_5.5/lp_MPS.c" + , "lp_solve_5.5/lp_params.c" + , "lp_solve_5.5/lp_presolve.c" + , "lp_solve_5.5/lp_price.c" + , "lp_solve_5.5/lp_pricePSE.c" + , "lp_solve_5.5/lp_report.c" + , "lp_solve_5.5/lp_scale.c" + , "lp_solve_5.5/lp_simplex.c" + , "lp_solve_5.5/lp_SOS.c" + , "lp_solve_5.5/lp_utils.c" + , "lp_solve_5.5/lp_wlp.c" + , "dingo/volestipy.pyx" + , "dingo/bindings/bindings.cpp"] # Return the directory that contains the NumPy *.h header files. # Extension modules that need to compile against NumPy should use this @@ -61,7 +95,7 @@ language="c++", sources=src_files, include_dirs=extra_include_dirs + extra_volesti_include_dirs, - extra_compile_args=compiler_args, + extra_compile_args=compiler_args + lp_solve_compiler_args, extra_link_args=link_args, ) print("The Extension function is OK.") diff --git a/volesti b/volesti index ad049d53..93bbc500 160000 --- a/volesti +++ b/volesti @@ -1 +1 @@ -Subproject commit ad049d53996aba072f95d6f9d9810f3ffb4e76e1 +Subproject commit 93bbc5008b8ec4682fd20702e0531e4dc889e963