From 5790d7d7639d4a35bf432c92f8d879298d843ca8 Mon Sep 17 00:00:00 2001 From: kapil-agnihotri Date: Wed, 4 Sep 2024 16:17:50 +0200 Subject: [PATCH] refactor: use model classes and also refactor original config implementation to use model classes --- .gitignore | 2 + configuration.json | 2 +- ...-robustness-on-weights-no-sensitivity.json | 34 ++ ...ata-input-sample-from-ranking-service.json | 34 ++ input_files/toy_example/car_data.csv | 2 +- mcda/configuration/config.py | 2 +- mcda/mcda_ranking_run.py | 170 +++++++ mcda/mcda_run.py | 170 ++----- mcda/mcda_with_robustness.py | 19 +- mcda/mcda_without_robustness.py | 11 +- mcda/utils/application_enums.py | 40 ++ mcda/utils/utils_for_main.py | 301 +++++++------ mcda/utils/utils_for_parallelization.py | 4 +- mcda/utils/utils_for_plotting.py | 2 +- models/__init__.py | 25 + models/base_model_.py | 69 +++ models/configuration.py | 218 +++++++++ models/configuration_modified_input.py | 218 +++++++++ ...ion_modified_input_monte_carlo_sampling.py | 101 +++++ ...configuration_modified_input_robustness.py | 132 ++++++ ...onfiguration_modified_input_sensitivity.py | 138 ++++++ models/configuration_monte_carlo_sampling.py | 101 +++++ models/configuration_robustness.py | 132 ++++++ models/configuration_sensitivity.py | 138 ++++++ models/convert_csv_to_json_body.py | 96 ++++ models/error.py | 96 ++++ models/exact_indicator_values.py | 66 +++ models/inline_response200.py | 64 +++ models/inline_response2001.py | 36 ++ models/inline_response2002.py | 36 ++ models/log_normal_indicator_values.py | 96 ++++ models/normal_indicator_values.py | 96 ++++ models/poisson_indicator_values.py | 66 +++ models/sensitivity_mcda.py | 151 +++++++ models/sensitivity_pro_mcda.py | 145 ++++++ models/sensitivity_ranked_alternatives.py | 37 ++ .../sensitivity_ranked_alternatives_inner.py | 426 ++++++++++++++++++ models/simple_mcda.py | 126 ++++++ models/simple_pro_mcda.py | 126 ++++++ models/uniform_indicator_values.py | 96 ++++ tests/unit_tests/test_aggregation.py | 4 +- tests/unit_tests/test_config.py | 2 +- tests/unit_tests/test_mcda_run.py | 18 +- tests/unit_tests/test_mcda_with_robustness.py | 41 +- .../test_mcda_without_robustness.py | 70 ++- tests/unit_tests/test_normalization.py | 4 +- tests/unit_tests/test_utils_for_main.py | 28 +- .../test_utils_for_parallelization.py | 16 +- 48 files changed, 3675 insertions(+), 332 deletions(-) create mode 100644 input_files/toy_example/car-data-from-ranking-service-robustness-on-weights-no-sensitivity.json create mode 100644 input_files/toy_example/car-data-input-sample-from-ranking-service.json create mode 100644 mcda/mcda_ranking_run.py create mode 100644 mcda/utils/application_enums.py create mode 100644 models/__init__.py create mode 100644 models/base_model_.py create mode 100644 models/configuration.py create mode 100644 models/configuration_modified_input.py create mode 100644 models/configuration_modified_input_monte_carlo_sampling.py create mode 100644 models/configuration_modified_input_robustness.py create mode 100644 models/configuration_modified_input_sensitivity.py create mode 100644 models/configuration_monte_carlo_sampling.py create mode 100644 models/configuration_robustness.py create mode 100644 models/configuration_sensitivity.py create mode 100644 models/convert_csv_to_json_body.py create mode 100644 models/error.py create mode 100644 models/exact_indicator_values.py create mode 100644 models/inline_response200.py create mode 100644 models/inline_response2001.py create mode 100644 models/inline_response2002.py create mode 100644 models/log_normal_indicator_values.py create mode 100644 models/normal_indicator_values.py create mode 100644 models/poisson_indicator_values.py create mode 100644 models/sensitivity_mcda.py create mode 100644 models/sensitivity_pro_mcda.py create mode 100644 models/sensitivity_ranked_alternatives.py create mode 100644 models/sensitivity_ranked_alternatives_inner.py create mode 100644 models/simple_mcda.py create mode 100644 models/simple_pro_mcda.py create mode 100644 models/uniform_indicator_values.py diff --git a/.gitignore b/.gitignore index a2eb120..bb9192e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,5 @@ .DS_Store .idea .ipynb_checkpoints/ +__pycache__/ +output_files \ No newline at end of file diff --git a/configuration.json b/configuration.json index 3549107..d1f6600 100644 --- a/configuration.json +++ b/configuration.json @@ -22,4 +22,4 @@ "output_directory_path": "toy_example" -} \ No newline at end of file +} diff --git a/input_files/toy_example/car-data-from-ranking-service-robustness-on-weights-no-sensitivity.json b/input_files/toy_example/car-data-from-ranking-service-robustness-on-weights-no-sensitivity.json new file mode 100644 index 0000000..aa19565 --- /dev/null +++ b/input_files/toy_example/car-data-from-ranking-service-robustness-on-weights-no-sensitivity.json @@ -0,0 +1,34 @@ +{ + "inputMatrix": { + "Fuel Efficiency":{"A":30.0,"B":25.0,"C":35.0,"D":40.0,"E":38.0,"F":40.0}, + "Safety Rating":{"A":5.0,"B":4.0,"C":5.0,"D":3.0,"E":1.0,"F":4.0}, + "Price":{"A":25000.0,"B":30000.0,"C":20000.0,"D":24000.0,"E":15000.0,"F":28000.0}, + "Cargo 'Space":{"A":15.0,"B":20.0,"C":10.0,"D":30.0,"E":10.0,"F":15.0}, + "Acceleration":{"A":8.0,"B":6.0,"C":10.0,"D":7.0,"E":10.0,"F":9.0}, + "Warranty":{"A":3.0,"B":5.0,"C":2.0,"D":1.0,"E":3.0,"F":4.0} + }, + "weights": [ + 0.8, 0.9, 0.5, 0.5, 0.1, 0.7 + ], + "polarity": [ + "+","+","-","+","+","+" + ], + "sensitivity": { + "sensitivityOn": "yes", + "normalization": "minmax", + "aggregation": "weighted_sum" + }, + "robustness": { + "robustness": "weights", + "onWeightsLevel": "all", + "givenWeights": [ + 0.8, 0.9, 0.5, 0.5, 0.1, 0.7 + ] + }, + "monteCarloSampling": { + "monteCarloRuns": 10000, + "marginalDistributions": [ + "normal", "normal", "normal", "normal", "normal", "normal" + ] + } +} \ No newline at end of file diff --git a/input_files/toy_example/car-data-input-sample-from-ranking-service.json b/input_files/toy_example/car-data-input-sample-from-ranking-service.json new file mode 100644 index 0000000..643d70a --- /dev/null +++ b/input_files/toy_example/car-data-input-sample-from-ranking-service.json @@ -0,0 +1,34 @@ +{ + "inputMatrix": { + "Fuel Efficiency":{"A":30.0,"B":25.0,"C":35.0,"D":40.0,"E":38.0,"F":40.0}, + "Safety Rating":{"A":5.0,"B":4.0,"C":5.0,"D":3.0,"E":1.0,"F":4.0}, + "Price":{"A":25000.0,"B":30000.0,"C":20000.0,"D":24000.0,"E":15000.0,"F":28000.0}, + "Cargo 'Space":{"A":15.0,"B":20.0,"C":10.0,"D":30.0,"E":10.0,"F":15.0}, + "Acceleration":{"A":8.0,"B":6.0,"C":10.0,"D":7.0,"E":10.0,"F":9.0}, + "Warranty":{"A":3.0,"B":5.0,"C":2.0,"D":1.0,"E":3.0,"F":4.0} + }, + "weights": [ + 0.8, 0.9, 0.5, 0.5, 0.1, 0.7 + ], + "polarity": [ + "+","+","-","+","+","+" + ], + "sensitivity": { + "sensitivityOn": "no", + "normalization": "minmax", + "aggregation": "weighted_sum" + }, + "robustness": { + "robustness": "none", + "onWeightsLevel": "none", + "givenWeights": [ + 0.8, 0.9, 0.5, 0.5, 0.1, 0.7 + ] + }, + "monteCarloSampling": { + "monteCarloRuns": 10000, + "marginalDistributions": [ + "normal", "normal", "normal", "normal", "normal", "normal" + ] + } +} \ No newline at end of file diff --git a/input_files/toy_example/car_data.csv b/input_files/toy_example/car_data.csv index d0fdf80..e695ae5 100644 --- a/input_files/toy_example/car_data.csv +++ b/input_files/toy_example/car_data.csv @@ -4,4 +4,4 @@ B,25,4,30000,20,6,5 C,35,5,20000,10,10,2 D,40,3,24000,30,7,1 E,38,1,15000,10,10,3 -F,40,4,28000,15,9,4 +F,40,4,28000,15,9,4 \ No newline at end of file diff --git a/mcda/configuration/config.py b/mcda/configuration/config.py index 7b69db4..40a0736 100644 --- a/mcda/configuration/config.py +++ b/mcda/configuration/config.py @@ -71,7 +71,7 @@ def __init__(self, input_config: dict): # keys_of_dict_values = self._keys_of_dict_values self._validate(input_config, valid_keys, str_values, - int_values, list_values, dict_values) + int_values, list_values, dict_values) self._config = copy.deepcopy(input_config) def _validate(self, input_config, valid_keys, str_values, int_values, list_values, dict_values): diff --git a/mcda/mcda_ranking_run.py b/mcda/mcda_ranking_run.py new file mode 100644 index 0000000..8b77834 --- /dev/null +++ b/mcda/mcda_ranking_run.py @@ -0,0 +1,170 @@ +#! /usr/bin/env python3 + +""" +This script serves as the main entry point for running all pieces of functionality in a consequential way by +following the settings given in the configuration file 'configuration.json'. + +Usage (from root directory): + $ python3 -m mcda.mcda_run -c configuration.json +""" + +import time +from ProMCDA.mcda.utils.application_enums import * +from ProMCDA.mcda.utils.utils_for_main import * +from ProMCDA.mcda.utils.utils_for_plotting import * +from ProMCDA.mcda.utils.utils_for_parallelization import * +from ProMCDA.models.configuration import Configuration + +log = logging.getLogger(__name__) + +FORMATTER: str = '%(levelname)s: %(asctime)s - %(name)s - %(message)s' +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=FORMATTER) +logger = logging.getLogger("ProMCDA") + +# randomly assign seed if not specified as environment variable +RANDOM_SEED = os.environ.get('random_seed') if os.environ.get('random_seed') else 67 +NUM_CORES = os.environ.get('num_cores') if os.environ.get('num_cores') else 1 + + +# noinspection PyTypeChecker +def main_using_model(input_config: dict) -> dict: + """ + Execute the ProMCDA (Probabilistic Multi-Criteria Decision Analysis) process. + + Parameters: + - input_config : Configuration parameters for the ProMCDA process. + + Raises: + - ValueError: If there are issues with the input matrix, weights, or indicators. + + This function performs the ProMCDA process based on the provided configuration. + It handles various aspects such as the sensitivity analysis and the robustness analysis. + The results are saved in output files, and plots are generated to visualize the scores and rankings. + + Note: Ensure that the input matrix, weights, polarities and indicators (with or without uncertainty) + are correctly specified in the input configuration. + + :param input_config: dict + :return: None + """ + is_robustness_indicators = 0 + is_robustness_weights = 0 + f_norm = None + f_agg = None + marginal_pdf = [] + + # Extracting relevant configuration values + config = Configuration.from_dict(input_config) + input_matrix = pd.DataFrame(config.input_matrix) + index_column_name = input_matrix.index.name + index_column_values = input_matrix.index.tolist() + polar = config.polarity + robustness = config.robustness.robustness + mc_runs = config.monte_carlo_sampling.monte_carlo_runs + + f_agg, f_norm, is_robustness_indicators, is_robustness_weights, marginal_pdf = verify_input(config, f_agg, f_norm, + is_robustness_indicators, + is_robustness_weights, + marginal_pdf, mc_runs, + robustness) + + # Check the input matrix for duplicated rows in the alternatives, rescale negative indicator values and + # drop the column containing the alternatives + input_matrix_no_alternatives = check_input_matrix(input_matrix) + + if is_robustness_indicators == 0: + num_indicators = input_matrix_no_alternatives.shape[1] + # Process indicators and weights based on input parameters in the configuration + polar, weights = get_polar_and_weights(config, input_matrix_no_alternatives, is_robustness_indicators, + is_robustness_weights, mc_runs, num_indicators, polar) + return run_mcda_without_indicator_uncertainty(config, index_column_name, index_column_values, + input_matrix_no_alternatives, weights, f_norm, f_agg, + is_robustness_weights) + else: + num_non_exact_and_non_poisson = len(marginal_pdf) - marginal_pdf.count('exact') - marginal_pdf.count('poisson') + num_indicators = (input_matrix_no_alternatives.shape[1] - num_non_exact_and_non_poisson) + polar, weights = get_polar_and_weights(config, input_matrix_no_alternatives, is_robustness_indicators, + is_robustness_weights, mc_runs, num_indicators, polar) + return run_mcda_with_indicator_uncertainty(config, input_matrix_no_alternatives, index_column_name, + index_column_values, mc_runs, RANDOM_SEED, + config.sensitivity.sensitivity_on, f_agg, f_norm, + weights, polar, marginal_pdf) + + +def get_polar_and_weights(config, input_matrix_no_alternatives, is_robustness_indicators, is_robustness_weights, + mc_runs, num_indicators, polar): + # Process indicators and weights based on input parameters in the configuration + polar, weights = process_indicators_and_weights(config, input_matrix_no_alternatives, is_robustness_indicators, + is_robustness_weights, polar, mc_runs, num_indicators) + try: + check_indicator_weights_polarities(num_indicators, polar, config) + except ValueError as e: + logging.error(str(e), stack_info=True) + raise + return polar, weights + + +def verify_input(config, f_agg, f_norm, is_robustness_indicators, is_robustness_weights, marginal_pdf, mc_runs, + robustness): + # Check for sensitivity-related configuration errors + if config.sensitivity.sensitivity_on == SensitivityAnalysis.NO.value: + f_norm = config.sensitivity.normalization + f_agg = config.sensitivity.aggregation + check_valid_values(config.sensitivity.normalization, SensitivityNormalization, + 'The available normalization functions are: minmax, target, standardized, rank.') + check_valid_values(config.sensitivity.aggregation, SensitivityAggregation, + """The available aggregation functions are: weighted_sum, geometric, harmonic, minimum. + Watch the correct spelling in the configuration file.""") + logger.info("ProMCDA will only use one pair of norm/agg functions: " + f_norm + '/' + f_agg) + else: + logger.info("ProMCDA will use a set of different pairs of norm/agg functions") + + # Check for robustness-related configuration errors + if robustness == RobustnessAnalysis.NONE.value: + logger.info("ProMCDA will without uncertainty on the indicators or weights") + logger.info("Read input matrix without uncertainties!") + else: + check_config_error((config.robustness.robustness == RobustnessAnalysis.NONE.value and + config.robustness.on_weights_level != RobustnessWightLevels.NONE.value), + 'Robustness analysis is expected using weights but none is specified! Please clarify.') + + check_config_error((config.robustness.robustness == RobustnessAnalysis.WEIGHTS.value and + config.robustness.on_weights_level == RobustnessWightLevels.NONE.value), + 'Robustness analysis is requested on the weights: but on all or single? Please clarify.') + + check_config_error((config.robustness.robustness == RobustnessAnalysis.INDICATORS.value and + config.robustness.on_weights_level != RobustnessWightLevels.NONE.value), + 'Robustness analysis is requested: but on weights or indicators? Please clarify.') + + # Check settings for robustness analysis on weights or indicators + if config.robustness.robustness == RobustnessAnalysis.WEIGHTS.value and config.robustness.on_weights_level != RobustnessWightLevels.NONE.value: + logger.info(f"""ProMCDA will consider uncertainty on the weights. + Number of Monte Carlo runs: {mc_runs} + logger.info("The random seed used is: {RANDOM_SEED}""") + is_robustness_weights = 1 + + if config.robustness.robustness == RobustnessAnalysis.INDICATORS.value and config.robustness.on_weights_level == RobustnessWightLevels.NONE.value: + logger.info(f"""ProMCDA will consider uncertainty on the indicators. + Number of Monte Carlo runs: {mc_runs} + logger.info("The random seed used is: {RANDOM_SEED}""") + is_robustness_indicators = 1 + + marginal_pdf = config.monte_carlo_sampling.marginal_distributions + logger.info("Read input matrix with uncertainty of the indicators!") + return f_agg, f_norm, is_robustness_indicators, is_robustness_weights, marginal_pdf + + +def read_matrix_from_file(column_names_list: list[str], file_from_stream) -> {}: + result_dict = utils_for_main.read_matrix_from_file(file_from_stream).to_dict() + if len(column_names_list) != len(result_dict.keys()): + return {"error": "Number of provided column names does not match the CSV columns"}, 400 + return {col: result_dict[col] for col in column_names_list if col in result_dict} + + +if __name__ == '__main__': + t = time.time() + config_path = parse_args() + input_config = get_config(config_path) + main_using_model(input_config) + elapsed = time.time() - t + logger.info("All calculations finished in seconds {}".format(elapsed)) diff --git a/mcda/mcda_run.py b/mcda/mcda_run.py index 6337882..29c8af9 100644 --- a/mcda/mcda_run.py +++ b/mcda/mcda_run.py @@ -7,14 +7,13 @@ Usage (from root directory): $ python3 -m mcda.mcda_run -c configuration.json """ - +import json import time -import logging -from mcda.configuration.config import Config -from mcda.utils.utils_for_main import * -from mcda.utils.utils_for_plotting import * -from mcda.utils.utils_for_parallelization import * +from ProMCDA.mcda import mcda_ranking_run +from ProMCDA.mcda.configuration.config import Config +from ProMCDA.mcda.utils.utils_for_main import * +from ProMCDA.mcda.utils.utils_for_parallelization import * log = logging.getLogger(__name__) @@ -22,6 +21,7 @@ logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=FORMATTER) logger = logging.getLogger("ProMCDA") + # noinspection PyTypeChecker def main(input_config: dict): """ @@ -43,128 +43,54 @@ def main(input_config: dict): :param input_config: dict :return: None """ - is_sensitivity = None - is_robustness = None - is_robustness_indicators = 0 - is_robustness_weights = 0 - f_norm = None - f_agg = None - marginal_pdf = [] - num_unique = [] + ranking_input_config = config_dict_to_configuration_model(input_config) + mcda_ranking_run.main_using_model(ranking_input_config) - t = time.time() +def config_dict_to_configuration_model(input_config): # Extracting relevant configuration values config = Config(input_config) input_matrix = read_matrix(config.input_matrix_path) - index_column_name = input_matrix.index.name - index_column_values = input_matrix.index.tolist() - polar = config.polarity_for_each_indicator - is_sensitivity = config.sensitivity['sensitivity_on'] - is_robustness = config.robustness['robustness_on'] - mc_runs = config.monte_carlo_sampling["monte_carlo_runs"] - random_seed = config.monte_carlo_sampling["random_seed"] - - # Check for sensitivity-related configuration errors - if is_sensitivity == "no": - f_norm = config.sensitivity['normalization'] - f_agg = config.sensitivity['aggregation'] - check_config_error(f_norm not in ['minmax', 'target', 'standardized', 'rank'], - 'The available normalization functions are: minmax, target, standardized, rank.') - check_config_error(f_agg not in ['weighted_sum', 'geometric', 'harmonic', 'minimum'], - 'The available aggregation functions are: weighted_sum, geometric, harmonic, minimum.' - '\nWatch the correct spelling in the configuration file.') - logger.info("ProMCDA will only use one pair of norm/agg functions: " + f_norm + '/' + f_agg) - else: - logger.info("ProMCDA will use a set of different pairs of norm/agg functions") - - # Check for robustness-related configuration errors - if is_robustness == "no": - logger.info("ProMCDA will run without uncertainty on the indicators or weights") - logger.info("Read input matrix without uncertainties at {}".format(config.input_matrix_path)) - else: - check_config_error((config.robustness["on_single_weights"] == "no" and - config.robustness["on_all_weights"] == "no" and - config.robustness["on_indicators"] == "no"), - 'Robustness analysis is requested but where is not defined: weights or indicators? Please clarify.') - - check_config_error((config.robustness["on_single_weights"] == "yes" and - config.robustness["on_all_weights"] == "yes" and - config.robustness["on_indicators"] == "no"), - 'Robustness analysis is requested on the weights: but on all or one at a time? Please clarify.') - - check_config_error(((config.robustness["on_single_weights"] == "yes" and - config.robustness["on_all_weights"] == "yes" and - config.robustness["on_indicators"] == "yes") or - (config.robustness["on_single_weights"] == "yes" and - config.robustness["on_all_weights"] == "no" and - config.robustness["on_indicators"] == "yes") or - (config.robustness["on_single_weights"] == "no" and - config.robustness["on_all_weights"] == "yes" and - config.robustness["on_indicators"] == "yes")), - 'Robustness analysis is requested: but on weights or indicators? Please clarify.') - - # Check seetings for robustness analysis on weights or indicators - condition_robustness_on_weights = ( - (config.robustness['on_single_weights'] == 'yes' and - config.robustness['on_all_weights'] == 'no' and - config.robustness['on_indicators'] == 'no') or - (config.robustness['on_single_weights'] == 'no' and - config.robustness['on_all_weights'] == 'yes' and - config.robustness['on_indicators'] == 'no') - ) - condition_robustness_on_indicators = ( - (config.robustness['on_single_weights'] == 'no' and - config.robustness['on_all_weights'] == 'no' and - config.robustness['on_indicators'] == 'yes') - ) - - - is_robustness_weights, is_robustness_indicators = \ - check_config_setting(condition_robustness_on_weights, - condition_robustness_on_indicators, - mc_runs, random_seed) - - marginal_pdf = config.monte_carlo_sampling["marginal_distribution_for_each_indicator"] - logger.info("Read input matrix with uncertainty of the indicators at {}".format( - config.input_matrix_path)) - - # Check the input matrix for duplicated rows in the alternatives, rescale negative indicator values and - # drop the column containing the alternatives - input_matrix_no_alternatives = check_input_matrix(input_matrix) - if is_robustness_indicators == 0: - num_indicators = input_matrix_no_alternatives.shape[1] - else: - num_non_exact_and_non_poisson = len(marginal_pdf) - marginal_pdf.count('exact') - marginal_pdf.count('poisson') - num_indicators = (input_matrix_no_alternatives.shape[1] - num_non_exact_and_non_poisson) - - # Process indicators and weights based on input parameters in the configuration - polar, weights = process_indicators_and_weights(config, input_matrix_no_alternatives, is_robustness_indicators, - is_robustness_weights, polar, mc_runs, num_indicators) - - # Check the number of indicators, weights, and polarities - try: - check_indicator_weights_polarities(num_indicators, polar, config) - except ValueError as e: - logging.error(str(e), stack_info=True) - raise - - # If there is no uncertainty of the indicators: - if is_robustness_indicators == 0: - run_mcda_without_indicator_uncertainty(input_config, index_column_name, index_column_values, - input_matrix_no_alternatives, weights, f_norm, f_agg, - is_robustness_weights) - # else (i.e. there is uncertainty): - else: - run_mcda_with_indicator_uncertainty(input_config, input_matrix_no_alternatives, index_column_name, - index_column_values, mc_runs, random_seed, is_sensitivity, f_agg, f_norm, - weights, polar, marginal_pdf) - - logger.info("ProMCDA finished calculations: check the output files") - elapsed = time.time() - t - logger.info("All calculations finished in seconds {}".format(elapsed)) + # input_matrix = input_matrix.dropna() + robustness = "none" + on_weights_level = "none" + if config.robustness["robustness_on"] == "yes" and config.robustness["on_single_weights"] == "yes": + robustness = "weights" + on_weights_level = "single" + elif config.robustness["robustness_on"] == "yes" and config.robustness["on_all_weights"] == "yes": + robustness = "weights" + on_weights_level = "all" + elif config.robustness["robustness_on"] == "yes" and config.robustness["on_indicators"] == "yes": + robustness = "indicators" + input_json = input_matrix.to_json() + os.environ['NUM_CORES'] = input_config["monte_carlo_sampling"]["num_cores"] + os.environ['RANDOM_SEED'] = input_config["monte_carlo_sampling"]["random_seed"] + ranking_input_config = { + "inputMatrix": input_matrix, + "weights": input_config["robustness"]["given_weights"], + "polarity": config.polarity_for_each_indicator, + "sensitivity": { + "sensitivityOn": input_config["sensitivity"]["sensitivity_on"], + "normalization": input_config["sensitivity"]["normalization"], + "aggregation": input_config["sensitivity"]["aggregation"] + }, + "robustness": { + "robustness": robustness, + "onWeightsLevel": on_weights_level, + "givenWeights": config.robustness["given_weights"] + }, + "monteCarloSampling": { + "monteCarloRuns": config.monte_carlo_sampling["monte_carlo_runs"], + "marginalDistributions": config.monte_carlo_sampling["marginal_distribution_for_each_indicator"] + } + } + return ranking_input_config + if __name__ == '__main__': + t = time.time() config_path = parse_args() input_config = get_config(config_path) - main(input_config=input_config) + main(input_config) + elapsed = time.time() - t + logger.info("All calculations finished in seconds {}".format(elapsed)) diff --git a/mcda/mcda_with_robustness.py b/mcda/mcda_with_robustness.py index 449d0cb..02f4c50 100644 --- a/mcda/mcda_with_robustness.py +++ b/mcda/mcda_with_robustness.py @@ -6,7 +6,8 @@ import pandas as pd import numpy as np -from mcda.configuration.config import Config +from ProMCDA.mcda.utils.application_enums import MonteCarloMarginalDistributions +from ProMCDA.models.configuration import Configuration log = logging.getLogger(__name__) @@ -29,7 +30,7 @@ class MCDAWithRobustness: """ - def __init__(self, config: Config, input_matrix: pd.DataFrame(), is_exact_pdf_mask=None, is_poisson_pdf_mask=None, + def __init__(self, config: Configuration, input_matrix: pd.DataFrame(), is_exact_pdf_mask=None, is_poisson_pdf_mask=None, random_seed=None): self.is_exact_pdf_mask = is_exact_pdf_mask self.is_poisson_pdf_mask = is_poisson_pdf_mask @@ -99,8 +100,8 @@ def create_n_randomly_sampled_matrices(self) -> List[pd.DataFrame]: :return list_random_matrix: List[pd.DataFrame] """ - marginal_pdf = self._config.monte_carlo_sampling["marginal_distribution_for_each_indicator"] - num_runs = self._config.monte_carlo_sampling["monte_carlo_runs"] # N + marginal_pdf = self._config.monte_carlo_sampling.marginal_distributions + num_runs = self._config.monte_carlo_sampling.monte_carlo_runs input_matrix = self._input_matrix # (AxnI) is_exact_pdf_mask = self.is_exact_pdf_mask is_poisson_pdf_mask = self.is_poisson_pdf_mask @@ -136,19 +137,19 @@ def create_n_randomly_sampled_matrices(self) -> List[pd.DataFrame]: distribution_type = marginal_pdf[i] - if distribution_type == 'exact': + if distribution_type == MonteCarloMarginalDistributions.EXACT.value: samples = self.repeat_series_to_create_df( parameter1, num_runs).T - elif distribution_type == 'normal': + elif distribution_type == MonteCarloMarginalDistributions.NORMAL.value: samples = np.random.normal( loc=parameter1, scale=parameter2, size=(num_runs, len(parameter1))) - elif distribution_type == 'uniform': + elif distribution_type == MonteCarloMarginalDistributions.UNIFORM.value: samples = np.random.uniform( low=parameter1, high=parameter2, size=(num_runs, len(parameter1))) - elif distribution_type == 'lnorm': + elif distribution_type == MonteCarloMarginalDistributions.LOGNORMAL.value: samples = np.random.lognormal( mean=parameter1, sigma=parameter2, size=(num_runs, len(parameter1))) - elif distribution_type == 'poisson': + elif distribution_type == MonteCarloMarginalDistributions.POISSON.value: samples = np.random.poisson( lam=parameter1, size=(num_runs, len(parameter1))) else: diff --git a/mcda/mcda_without_robustness.py b/mcda/mcda_without_robustness.py index 2eb52a4..458c025 100644 --- a/mcda/mcda_without_robustness.py +++ b/mcda/mcda_without_robustness.py @@ -3,9 +3,10 @@ import logging import pandas as pd -from mcda.configuration.config import Config -from mcda.mcda_functions.normalization import Normalization -from mcda.mcda_functions.aggregation import Aggregation +from ProMCDA.mcda.configuration.config import Config +from ProMCDA.mcda.mcda_functions.normalization import Normalization +from ProMCDA.mcda.mcda_functions.aggregation import Aggregation +from ProMCDA.models.configuration import Configuration log = logging.getLogger(__name__) @@ -23,7 +24,7 @@ class MCDAWithoutRobustness: However, it's possible to have randomly sampled weights. """ - def __init__(self, config: Config, input_matrix: pd.DataFrame): + def __init__(self, config: Configuration, input_matrix: pd.DataFrame): self.normalized_indicators = None self.weights = None self._config = copy.deepcopy(config) @@ -50,7 +51,7 @@ def normalize_indicators(self, method=None) -> dict: and another with the range (0.1, +inf). """ norm = Normalization(self._input_matrix, - self._config.polarity_for_each_indicator) + self._config.polarity) normalized_indicators = {} diff --git a/mcda/utils/application_enums.py b/mcda/utils/application_enums.py new file mode 100644 index 0000000..ba7dda0 --- /dev/null +++ b/mcda/utils/application_enums.py @@ -0,0 +1,40 @@ +from enum import Enum + + +class RobustnessAnalysis(Enum): + NONE = "none" + INDICATORS = "indicators" + WEIGHTS = "weights" + + +class RobustnessWightLevels(Enum): + NONE = "none" + ALL = "all" + SINGLE = "single" + + +class SensitivityAnalysis(Enum): + YES = "yes" + NO = "no" + + +class SensitivityNormalization(Enum): + MINMAX = "minmax" + STANDARDIZED = "standardized" + TARGET = "target" + rank = "RANK" + + +class SensitivityAggregation(Enum): + WEIGHTED_SUM = "weighted_sum" + GEOMETRIC = "geometric" + HARMONIC = "harmonic" + MINIMUM = "minimum" + + +class MonteCarloMarginalDistributions(Enum): + NORMAL = "normal" + LOGNORMAL = "lognormal" + EXACT = "exact" + POISSON = "poisson" + UNIFORM = "uniform" \ No newline at end of file diff --git a/mcda/utils/utils_for_main.py b/mcda/utils/utils_for_main.py index ea6f789..3aad3ab 100644 --- a/mcda/utils/utils_for_main.py +++ b/mcda/utils/utils_for_main.py @@ -1,4 +1,5 @@ - +import copy +import io import os import argparse import json @@ -6,6 +7,7 @@ import random import logging import sys +from pprint import pprint from typing import Union, Any, List, Tuple from typing import Optional @@ -15,20 +17,20 @@ from sklearn import preprocessing from sklearn.preprocessing import MinMaxScaler -import mcda.utils.utils_for_parallelization as utils_for_parallelization -import mcda.utils.utils_for_plotting as utils_for_plotting -from mcda.configuration.config import Config -from mcda.mcda_without_robustness import MCDAWithoutRobustness -from mcda.mcda_with_robustness import MCDAWithRobustness +import ProMCDA.mcda.utils.utils_for_parallelization as utils_for_parallelization +import ProMCDA.mcda.utils.utils_for_plotting as utils_for_plotting +from ProMCDA.mcda.mcda_without_robustness import MCDAWithoutRobustness +from ProMCDA.mcda.mcda_with_robustness import MCDAWithRobustness +from ProMCDA.mcda.utils.application_enums import RobustnessAnalysis, RobustnessWightLevels, SensitivityAnalysis +from ProMCDA.models.configuration import Configuration DEFAULT_INPUT_DIRECTORY_PATH = './input_files' # present in the root directory of ProMCDA DEFAULT_OUTPUT_DIRECTORY_PATH = './output_files' # present in the root directory of ProMCDA -CUSTOM_INPUT_PATH = os.environ.get('PROMCDA_INPUT_DIRECTORY_PATH') # check if an environmental variable is set -CUSTOM_OUTPUT_PATH = os.environ.get('PROMCDA_OUTPUT_DIRECTORY_PATH') # check if an environmental variable is set - -input_directory_path = CUSTOM_INPUT_PATH if CUSTOM_INPUT_PATH else DEFAULT_INPUT_DIRECTORY_PATH -output_directory_path = CUSTOM_OUTPUT_PATH if CUSTOM_OUTPUT_PATH else DEFAULT_OUTPUT_DIRECTORY_PATH +input_directory_path = os.environ.get('PROMCDA_INPUT_DIRECTORY_PATH') if os.environ.get( + 'PROMCDA_INPUT_DIRECTORY_PATH') else DEFAULT_INPUT_DIRECTORY_PATH +output_directory_path = os.environ.get('PROMCDA_OUTPUT_DIRECTORY_PATH') if os.environ.get( + 'PROMCDA_OUTPUT_DIRECTORY_PATH') else DEFAULT_OUTPUT_DIRECTORY_PATH log = logging.getLogger(__name__) @@ -39,6 +41,14 @@ logger = logging.getLogger("ProMCDA") +def check_valid_values(value, enum_class, error_message): + try: + enum_class(value) + except: + logger.error('Error Message', stack_info=True) + raise ValueError(error_message) + + def check_config_error(condition: bool, error_message: str): """ Check a condition and raise a ValueError with a specified error message if the condition is True. @@ -60,46 +70,7 @@ def check_config_error(condition: bool, error_message: str): raise ValueError(error_message) -def check_config_setting(condition_robustness_on_weights: bool, condition_robustness_on_indicators: bool, mc_runs: int, - random_seed: int) -> (int, int): - """ - Checks configuration settings and logs information messages. - - Returns: - - is_robustness_weights, is_robustness_indicators, booleans indicating if robustness is considered - on weights or indicators. - - Example: - ```python - is_robustness_weights, is_robustness_indicators = check_config_setting(True, False, 1000, 42) - ``` - - :param condition_robustness_on_weights: bool - :param condition_robustness_on_indicators: bool - :param mc_runs: int - :param random_seed: int - :return: (is_robustness_weights, is_robustness_indicators) - :rtype: Tuple[int, int] - """ - is_robustness_weights = 0 - is_robustness_indicators = 0 - - if condition_robustness_on_weights: - logger.info("ProMCDA will consider uncertainty on the weights.") - logger.info("Number of Monte Carlo runs: {}".format(mc_runs)) - logger.info("The random seed used is: {}".format(random_seed)) - is_robustness_weights = 1 - - elif condition_robustness_on_indicators: - logger.info("ProMCDA will consider uncertainty on the indicators.") - logger.info("Number of Monte Carlo runs: {}".format(mc_runs)) - logger.info("The random seed used is: {}".format(random_seed)) - is_robustness_indicators = 1 - - return is_robustness_weights, is_robustness_indicators - - -def process_indicators_and_weights(config: dict, input_matrix: pd.DataFrame, +def process_indicators_and_weights(config: Configuration, input_matrix: pd.DataFrame, is_robustness_indicators: int, is_robustness_weights: int, polar: List[str], mc_runs: int, num_indicators: int) \ -> Tuple[List[str], Union[list, List[list], dict]]: @@ -171,10 +142,11 @@ def process_indicators_and_weights(config: dict, input_matrix: pd.DataFrame, def _handle_polarities_and_weights(is_robustness_indicators: int, is_robustness_weights: int, num_unique, - col_to_drop_indexes: np.ndarray, polar: List[str], config: dict, mc_runs: int, + col_to_drop_indexes: np.ndarray, polar: List[str], config: Configuration, + mc_runs: int, num_indicators: int) \ -> Union[Tuple[List[str], list, None, None], Tuple[List[str], None, List[List], None], - Tuple[List[str], None, None, dict]]: + Tuple[List[str], None, None, dict]]: """ Manage polarities and weights based on the specified robustness settings, ensuring that the appropriate adjustments and normalizations are applied before returning the necessary data structures. @@ -190,7 +162,7 @@ def _handle_polarities_and_weights(is_robustness_indicators: int, is_robustness_ # Managing weights if is_robustness_weights == 0: - fixed_weights = config.robustness["given_weights"] + fixed_weights = config.robustness.given_weights if any(value == 1 for value in num_unique): fixed_weights = pop_indexed_elements(col_to_drop_indexes, fixed_weights) norm_fixed_weights = check_norm_sum_weights(fixed_weights) @@ -209,7 +181,7 @@ def _handle_polarities_and_weights(is_robustness_indicators: int, is_robustness_ # Return None for norm_fixed_weights and one of the other two cases of randomness -def _handle_robustness_weights(config: dict, mc_runs: int, num_indicators: int) \ +def _handle_robustness_weights(config: Configuration, mc_runs: int, num_indicators: int) \ -> Tuple[Union[List[list], None], Union[dict, None]]: """ Handle the generation and normalization of random weights based on the specified settings @@ -222,20 +194,17 @@ def _handle_robustness_weights(config: dict, mc_runs: int, num_indicators: int) logger.error('Error Message', stack_info=True) raise ValueError('The number of MC runs should be larger than 0 for a robustness analysis') - if config.robustness["on_single_weights"] == "no" and config.robustness["on_all_weights"] == "yes": + if (config.robustness.robustness == RobustnessAnalysis.WEIGHTS.value + and config.robustness.on_weights_level == RobustnessWightLevels.ALL.value): random_weights = randomly_sample_all_weights(num_indicators, mc_runs) - for weights in random_weights: - weights = check_norm_sum_weights(weights) - norm_random_weights.append(weights) + norm_random_weights = [check_norm_sum_weights(weights) for weights in random_weights] return norm_random_weights, None # Return norm_random_weights, and None for rand_weight_per_indicator - elif config.robustness["on_single_weights"] == "yes" and config.robustness["on_all_weights"] == "no": + elif (config.robustness.robustness == RobustnessAnalysis.WEIGHTS.value + and config.robustness.on_weights_level == RobustnessWightLevels.SINGLE.value): i = 0 while i < num_indicators: random_weights = randomly_sample_ix_weight(num_indicators, i, mc_runs) - norm_random_weight = [] - for weights in random_weights: - weights = check_norm_sum_weights(weights) - norm_random_weight.append(weights) + norm_random_weight = [check_norm_sum_weights(weights) for weights in random_weights] rand_weight_per_indicator["indicator_{}".format(i + 1)] = norm_random_weight i += 1 return None, rand_weight_per_indicator # Return None for norm_random_weights, and rand_weight_per_indicator @@ -258,7 +227,7 @@ def _handle_no_robustness_indicators(input_matrix: pd.DataFrame): logger.info("Number of indicators: {}".format(num_indicators)) -def check_indicator_weights_polarities(num_indicators: int, polar: List[str], config: dict): +def check_indicator_weights_polarities(num_indicators: int, polar: List[str], config: Configuration): """ Check the consistency of indicators, polarities, and fixed weights in a configuration. @@ -284,8 +253,8 @@ def check_indicator_weights_polarities(num_indicators: int, polar: List[str], co raise ValueError('The number of polarities does not correspond to the no. of indicators') # Check the number of fixed weights if "on_all_weights" is set to "no" - if (config.robustness["on_all_weights"] == "no") and ( - num_indicators != len(config.robustness["given_weights"])): + if (config.robustness.on_weights_level != RobustnessWightLevels.ALL.value and + num_indicators != len(config.robustness.given_weights)): raise ValueError('The no. of fixed weights does not correspond to the no. of indicators') @@ -345,6 +314,33 @@ def ensure_directory_exists(path): raise # Re-raise the exception to propagate it to the caller +def read_matrix_from_file(file_from_stream) -> pd.DataFrame: + """ + Read an input file from a stream and return it as a DataFrame. + Set the 'Alternatives' column as index column. + + Note: a default path is assigned to the input_matrix_path in mcda_run.py, and it is used + unless a custom path is set in an environmental variable in the environment. + + Parameters: + - input_matrix_path (str): path to the CSV file containing the input matrix. + + Raises: + - Exception: If an error occurs during the file reading or DataFrame creation. + + :param file_from_stream: str + :rtype: pd.DataFrame + """ + try: + matrix = pd.read_csv(io.StringIO(file_from_stream.stream.read().decode("utf-8")), sep="[,;:]", decimal='.', engine='python') + data_types = {col: 'float64' for col in matrix.columns[1:]} + matrix = matrix.astype(data_types) + alternatives_column_name = matrix.columns[0] + matrix = matrix.set_index(alternatives_column_name) + return matrix + except Exception as e: + print(e) + def read_matrix(input_matrix_path: str) -> pd.DataFrame: """ @@ -443,7 +439,7 @@ def get_config(config_path: str) -> dict: print(e) -def save_df(df: pd.DataFrame, folder_path: str, filename: str): +def save_df(df: pd.DataFrame, folder_path: str, filename: str) -> {}: """ Save a DataFrame to a CSV file with a timestamped filename. @@ -484,10 +480,13 @@ def save_df(df: pd.DataFrame, folder_path: str, filename: str): try: df.to_csv(path_or_buf=full_output_path, index=False) + df_modified = df.set_index(df.columns[0]) + return df_modified[df.columns[1]].to_dict() except IOError as e: logging.error(f"Error while writing data frame into a CSV file: {e}") -def save_dict(dictionary: dict, folder_path: str, filename: str): + +def save_dict(dictionary: dict, folder_path: str, filename: str) -> {}: """ Save a dictionary to a binary file using pickle with a timestamped filename. @@ -529,7 +528,7 @@ def save_dict(dictionary: dict, folder_path: str, filename: str): logging.error(f"Error while dumping the dictionary into a pickle file: {e}") -def save_config(config: dict, folder_path: str, filename: str): +def save_config(config: Configuration, folder_path: str, filename: str): """ Save a configuration dictionary to a JSON file with a timestamped filename. @@ -567,7 +566,7 @@ def save_config(config: dict, folder_path: str, filename: str): try: with open(full_output_path, 'w') as fp: - json.dump(config, fp) + json.dump(config.to_dict(), fp) except IOError as e: logging.error(f"Error while dumping the configuration into a JSON file: {e}") @@ -731,7 +730,8 @@ def pop_indexed_elements(indexes: np.ndarray, original_list: list) -> list: return new_list -def check_parameters_pdf(input_matrix: pd.DataFrame, config: dict, for_testing=False) -> Union[List[bool], None]: +def check_parameters_pdf(input_matrix: pd.DataFrame, config: Configuration, for_testing=False) -> Union[ + List[bool], None]: """ Check conditions on parameters based on the type of probability distribution function (PDF) for each indicator and raise logging information in case of any problem. @@ -756,12 +756,10 @@ def check_parameters_pdf(input_matrix: pd.DataFrame, config: dict, for_testing=F :param for_testing: bool :return: Union[list, None] """ - config = Config(config) - satisfies_condition = False problem_logged = False - marginal_pdf = config.monte_carlo_sampling["marginal_distribution_for_each_indicator"] + marginal_pdf = config.monte_carlo_sampling.marginal_distributions is_exact_pdf_mask = check_if_pdf_is_exact(marginal_pdf) is_poisson_pdf_mask = check_if_pdf_is_poisson(marginal_pdf) is_uniform_pdf_mask = check_if_pdf_is_uniform(marginal_pdf) @@ -877,10 +875,10 @@ def check_if_pdf_is_uniform(marginal_pdf: list) -> list: return uniform_pdf_mask -def run_mcda_without_indicator_uncertainty(input_config: dict, index_column_name: str, index_column_values: list, +def run_mcda_without_indicator_uncertainty(config: Configuration, index_column_name: str, index_column_values: list, input_matrix: pd.DataFrame, weights: Union[list, List[list], dict], - f_norm: str, f_agg: str, is_robustness_weights: int): + f_norm: str, f_agg: str, is_robustness_weights: int) -> dict: """ Runs ProMCDA without uncertainty on the indicators, i.e. without performing a robustness analysis. @@ -911,33 +909,35 @@ def run_mcda_without_indicator_uncertainty(input_config: dict, index_column_name all_weights_score_stds = pd.DataFrame all_weights_score_means_normalized = pd.DataFrame iterative_random_w_score_means_normalized = {} - iterative_random_weights_statistics = {} iterative_random_w_score_means = {} iterative_random_w_score_stds = {} logger.info("Start ProMCDA without robustness of the indicators") - config = Config(input_config) - is_sensitivity = config.sensitivity['sensitivity_on'] - is_robustness = config.robustness['robustness_on'] + is_sensitivity = config.sensitivity.sensitivity_on == SensitivityAnalysis.YES.value + is_robustness = config.robustness.robustness != RobustnessAnalysis.NONE.value mcda_no_uncert = MCDAWithoutRobustness(config, input_matrix) normalized_indicators = mcda_no_uncert.normalize_indicators() if is_sensitivity == "yes" \ else mcda_no_uncert.normalize_indicators(f_norm) - if is_robustness == "no": + if not is_robustness: scores = mcda_no_uncert.aggregate_indicators(normalized_indicators, weights) \ - if is_sensitivity == "yes" \ + if is_sensitivity \ else mcda_no_uncert.aggregate_indicators(normalized_indicators, weights, f_agg) normalized_scores = rescale_minmax(scores) - elif config.robustness["on_all_weights"] == "yes" and config.robustness["robustness_on"] == "yes": + elif (config.robustness.robustness == RobustnessAnalysis.WEIGHTS.value + and config.robustness.on_weights_level == RobustnessWightLevels.ALL.value): # ALL RANDOMLY SAMPLED WEIGHTS (MCDA runs num_samples times) all_weights_score_means, all_weights_score_stds, \ all_weights_score_means_normalized, all_weights_score_stds_normalized = \ - _compute_scores_for_all_random_weights(normalized_indicators, is_sensitivity, weights, f_agg) - elif (config.robustness["on_single_weights"] == "yes") and (config.robustness["robustness_on"] == "yes"): + _compute_scores_for_all_random_weights(normalized_indicators, config.sensitivity.sensitivity_on, weights, + f_agg) + elif (config.robustness.robustness == RobustnessAnalysis.WEIGHTS.value + and config.robustness.on_weights_level == RobustnessWightLevels.SINGLE.value): # ONE RANDOMLY SAMPLED WEIGHT A TIME (MCDA runs (num_samples * num_indicators) times) - iterative_random_weights_statistics: dict = _compute_scores_for_single_random_weight( - normalized_indicators, weights, is_sensitivity, index_column_name, index_column_values, f_agg, input_matrix) + iterative_random_weights_statistics = _compute_scores_for_single_random_weight( + normalized_indicators, weights, config.sensitivity.sensitivity_on, index_column_name, index_column_values, + f_agg, input_matrix) iterative_random_w_score_means = iterative_random_weights_statistics['score_means'] iterative_random_w_score_stds = iterative_random_weights_statistics['score_stds'] iterative_random_w_score_means_normalized = iterative_random_weights_statistics['score_means_normalized'] @@ -945,14 +945,14 @@ def run_mcda_without_indicator_uncertainty(input_config: dict, index_column_name ranks = _compute_ranks(scores, index_column_name, index_column_values, all_weights_score_means, iterative_random_w_score_means) - _save_output_files(scores=scores, normalized_scores=normalized_scores, ranks=ranks, - score_means=all_weights_score_means, score_stds=all_weights_score_stds, - score_means_normalized=all_weights_score_means_normalized, - iterative_random_w_score_means=iterative_random_w_score_means, - iterative_random_w_score_means_normalized=iterative_random_w_score_means_normalized, - iterative_random_w_score_stds=iterative_random_w_score_stds, - index_column_name=index_column_name, index_column_values=index_column_values, - input_config=input_config) + response = _save_output_files(scores=scores, normalized_scores=normalized_scores, ranks=ranks, + score_means=all_weights_score_means, score_stds=all_weights_score_stds, + score_means_normalized=all_weights_score_means_normalized, + iterative_random_w_score_means=iterative_random_w_score_means, + iterative_random_w_score_means_normalized=iterative_random_w_score_means_normalized, + iterative_random_w_score_stds=iterative_random_w_score_stds, + index_column_name=index_column_name, index_column_values=index_column_values, + config=config) _plot_and_save_charts(scores=scores, normalized_scores=normalized_scores, score_means=all_weights_score_means, score_stds=all_weights_score_stds, @@ -960,14 +960,15 @@ def run_mcda_without_indicator_uncertainty(input_config: dict, index_column_name iterative_random_w_score_means=iterative_random_w_score_means, iterative_random_w_score_stds=iterative_random_w_score_stds, iterative_random_w_score_means_normalized=iterative_random_w_score_means_normalized, - input_matrix=input_matrix, config=input_config, + input_matrix=input_matrix, config=config, is_robustness_weights=is_robustness_weights) + return response -def run_mcda_with_indicator_uncertainty(input_config: dict, input_matrix: pd.DataFrame, index_column_name: str, +def run_mcda_with_indicator_uncertainty(config: Configuration, input_matrix: pd.DataFrame, index_column_name: str, index_column_values: list, mc_runs: int, random_seed: int, is_sensitivity: str, f_agg: str, f_norm: str, weights: Union[List[list], List[pd.DataFrame], dict], - polar: List[str], marginal_pdf: List[str]) -> None: + polar: List[str], marginal_pdf: List[str]) -> dict: """ Runs ProMCDA with uncertainty on the indicators, i.e. with a robustness analysis. @@ -995,7 +996,6 @@ def run_mcda_with_indicator_uncertainty(input_config: dict, input_matrix: pd.Dat :return: None """ logger.info("Start ProMCDA with uncertainty on the indicators") - config = Config(input_config) is_robustness_indicators = True all_indicators_scores_normalized = [] @@ -1007,7 +1007,7 @@ def run_mcda_with_indicator_uncertainty(input_config: dict, input_matrix: pd.Dat logger.info("The number of Monte-Carlo runs is only {}".format(mc_runs)) logger.info("A meaningful number of Monte-Carlo runs is equal or larger than 1000") - check_parameters_pdf(input_matrix, input_config) + check_parameters_pdf(input_matrix, config) is_exact_pdf_mask = check_if_pdf_is_exact(marginal_pdf) is_poisson_pdf_mask = check_if_pdf_is_poisson(marginal_pdf) @@ -1015,9 +1015,11 @@ def run_mcda_with_indicator_uncertainty(input_config: dict, input_matrix: pd.Dat n_random_input_matrices = mcda_with_uncert.create_n_randomly_sampled_matrices() if is_sensitivity == "yes": - n_normalized_input_matrices = utils_for_parallelization.parallelize_normalization(n_random_input_matrices, polar) + n_normalized_input_matrices = utils_for_parallelization.parallelize_normalization(n_random_input_matrices, + polar) else: - n_normalized_input_matrices = utils_for_parallelization.parallelize_normalization(n_random_input_matrices, polar, f_norm) + n_normalized_input_matrices = utils_for_parallelization.parallelize_normalization(n_random_input_matrices, + polar, f_norm) args_for_parallel_agg = [(weights, normalized_indicators) for normalized_indicators in n_normalized_input_matrices] @@ -1038,16 +1040,16 @@ def run_mcda_with_indicator_uncertainty(input_config: dict, input_matrix: pd.Dat ranks = all_indicators_scores_means.rank(pct=True) - _save_output_files(scores=None, normalized_scores=None, - ranks=ranks, - score_means=all_indicators_scores_means, - score_stds=all_indicators_scores_stds, - score_means_normalized=all_indicators_means_scores_normalized, - iterative_random_w_score_means=None, - iterative_random_w_score_means_normalized=None, - iterative_random_w_score_stds=None, - input_config=input_config, - index_column_name=index_column_name, index_column_values=index_column_values) + response = _save_output_files(scores=None, normalized_scores=None, + ranks=ranks, + score_means=all_indicators_scores_means, + score_stds=all_indicators_scores_stds, + score_means_normalized=all_indicators_means_scores_normalized, + iterative_random_w_score_means=None, + iterative_random_w_score_means_normalized=None, + iterative_random_w_score_stds=None, + config=config, + index_column_name=index_column_name, index_column_values=index_column_values) _plot_and_save_charts(scores=None, normalized_scores=None, score_means=all_indicators_scores_means, score_stds=all_indicators_scores_stds, @@ -1055,9 +1057,11 @@ def run_mcda_with_indicator_uncertainty(input_config: dict, input_matrix: pd.Dat iterative_random_w_score_means=None, iterative_random_w_score_stds=None, iterative_random_w_score_means_normalized=None, - input_matrix=input_matrix, config=input_config, + input_matrix=input_matrix, config=config, is_robustness_indicators=is_robustness_indicators) + return response + def _compute_scores_for_all_random_weights(indicators: dict, is_sensitivity: str, weights: Union[List[str], List[pd.DataFrame], dict, None], @@ -1182,39 +1186,57 @@ def _save_output_files(scores: Optional[pd.DataFrame], iterative_random_w_score_means: Optional[dict], iterative_random_w_score_stds: Optional[pd.DataFrame], iterative_random_w_score_means_normalized: Optional[dict], - input_config: dict, + config: Configuration, index_column_name: str, - index_column_values: list) -> None: + index_column_values: list) -> dict: """ Save output files based of the computed scores, ranks, and configuration data. """ - config = Config(input_config) - full_output_path = os.path.join(output_directory_path, config.output_file_path) - logger.info("Saving results in {}".format(full_output_path)) - check_path_exists(config.output_file_path) - + output_filepath = "toy_example" + # full_output_path = os.path.join(output_directory_path, output_filepath ) + logger.info("Saving results in {}".format(output_filepath)) + # check_path_exists(output_filepath) + response = {} if scores is not None and not scores.empty: scores.insert(0, index_column_name, index_column_values) normalized_scores.insert(0, index_column_name, index_column_values) ranks.insert(0, index_column_name, index_column_values) + scores_response = save_df(scores, output_filepath, 'scores.csv') + normalized_response = save_df(normalized_scores, output_filepath, 'normalized_scores.csv') + ranks_response = save_df(ranks, output_filepath, 'ranks.csv') + response = { + "rawScores": scores_response, + "normalizedScores": normalized_response, + "ranks": ranks_response + } - save_df(scores, config.output_file_path, 'scores.csv') - save_df(normalized_scores, config.output_file_path, 'normalized_scores.csv') - save_df(ranks, config.output_file_path, 'ranks.csv') elif score_means is not None and not score_means.empty: score_means.insert(0, index_column_name, index_column_values) score_stds.insert(0, index_column_name, index_column_values) score_means_normalized.insert(0, index_column_name, index_column_values) - save_df(score_means, config.output_file_path, 'score_means.csv') - save_df(score_stds, config.output_file_path, 'score_stds.csv') - save_df(score_means_normalized, config.output_file_path, 'score_means_normalized.csv') + raw_scores_averages = save_df(score_means, output_filepath, 'score_means.csv') + raw_scores_standard_deviations = save_df(score_stds, output_filepath, 'score_stds.csv') + normalized_scores_averages = save_df(score_means_normalized, output_filepath, 'score_means_normalized.csv') + response = { + "rawScoresAverages": raw_scores_averages, + "rawScoresStandardDeviations": raw_scores_standard_deviations, + "normalizedScoresAverages": normalized_scores_averages + } elif iterative_random_w_score_means is not None: - save_dict(iterative_random_w_score_means, config.output_file_path, 'score_means.pkl') - save_dict(iterative_random_w_score_stds, config.output_file_path, 'score_stds.pkl') - save_dict(iterative_random_w_score_means_normalized, config.output_file_path, 'score_means_normalized.pkl') + raw_scores_averages = save_dict(iterative_random_w_score_means, output_filepath, 'score_means.pkl') + raw_scores_standard_deviations = save_dict(iterative_random_w_score_stds.to_dict(), output_filepath, + 'score_stds.pkl') + normalized_scores_averages = save_dict(iterative_random_w_score_means_normalized, output_filepath, + 'score_means_normalized.pkl') + response = { + "rawScoresAverages": raw_scores_averages, + "rawScoresStandardDeviations": raw_scores_standard_deviations, + "normalizedScoresAverages": normalized_scores_averages + } + save_config(config, output_filepath, 'configuration.json') - save_config(input_config, config.output_file_path, 'configuration.json') + return response def _plot_and_save_charts(scores: Optional[pd.DataFrame], @@ -1226,21 +1248,20 @@ def _plot_and_save_charts(scores: Optional[pd.DataFrame], iterative_random_w_score_stds: Optional[dict], iterative_random_w_score_means_normalized: Optional[dict], input_matrix: pd.DataFrame, - config: dict, + config: Configuration, is_robustness_weights=None, is_robustness_indicators=None) -> None: """ Generate plots based on the computed scores and save them. """ - config = Config(config) num_indicators = input_matrix.shape[1] - + output_file_path = "toy_example" if scores is not None and not scores.empty: plot_no_norm_scores = utils_for_plotting.plot_non_norm_scores_without_uncert(scores) - utils_for_plotting.save_figure(plot_no_norm_scores, config.output_file_path, "MCDA_rough_scores.png") + utils_for_plotting.save_figure(plot_no_norm_scores, output_file_path, "MCDA_rough_scores.png") plot_norm_scores = utils_for_plotting.plot_norm_scores_without_uncert(normalized_scores) - utils_for_plotting.save_figure(plot_norm_scores, config.output_file_path, "MCDA_norm_scores.png") + utils_for_plotting.save_figure(plot_norm_scores, output_file_path, "MCDA_norm_scores.png") elif score_means is not None and not score_means.empty: if is_robustness_weights is not None and is_robustness_weights == 1: @@ -1252,8 +1273,8 @@ def _plot_and_save_charts(scores: Optional[pd.DataFrame], chart_mean_scores_norm = utils_for_plotting.plot_mean_scores(score_means_normalized, "not_plot_std", "indicators", score_stds) - utils_for_plotting.save_figure(chart_mean_scores, config.output_file_path, "MCDA_rough_scores.png") - utils_for_plotting.save_figure(chart_mean_scores_norm, config.output_file_path, "MCDA_norm_scores.png") + utils_for_plotting.save_figure(chart_mean_scores, output_file_path, "MCDA_rough_scores.png") + utils_for_plotting.save_figure(chart_mean_scores_norm, output_file_path, "MCDA_norm_scores.png") elif iterative_random_w_score_means is not None: images = [] @@ -1274,5 +1295,5 @@ def _plot_and_save_charts(scores: Optional[pd.DataFrame], images.append(plot_weight_mean_scores) images_norm.append(plot_weight_mean_scores_norm) - utils_for_plotting.combine_images(images, config.output_file_path, "MCDA_one_weight_randomness_rough_scores.png") - utils_for_plotting.combine_images(images_norm, config.output_file_path, "MCDA_one_weight_randomness_norm_scores.png") + utils_for_plotting.combine_images(images, output_file_path, "MCDA_one_weight_randomness_rough_scores.png") + utils_for_plotting.combine_images(images_norm, output_file_path, "MCDA_one_weight_randomness_norm_scores.png") diff --git a/mcda/utils/utils_for_parallelization.py b/mcda/utils/utils_for_parallelization.py index 73da4ce..902df22 100644 --- a/mcda/utils/utils_for_parallelization.py +++ b/mcda/utils/utils_for_parallelization.py @@ -1,5 +1,5 @@ -from mcda.mcda_functions.aggregation import Aggregation -from mcda.mcda_functions.normalization import Normalization +from ProMCDA.mcda.mcda_functions.aggregation import Aggregation +from ProMCDA.mcda.mcda_functions.normalization import Normalization import sys import logging import pandas as pd diff --git a/mcda/utils/utils_for_plotting.py b/mcda/utils/utils_for_plotting.py index 2e9f516..c3f943c 100644 --- a/mcda/utils/utils_for_plotting.py +++ b/mcda/utils/utils_for_plotting.py @@ -9,7 +9,7 @@ import plotly.io as pio from PIL import Image -import mcda.utils.utils_for_main as utils_for_main +import ProMCDA.mcda.utils.utils_for_main as utils_for_main DEFAULT_OUTPUT_DIRECTORY_PATH = './output_files' # root directory of ProMCDA diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000..3f26e07 --- /dev/null +++ b/models/__init__.py @@ -0,0 +1,25 @@ +# coding: utf-8 + +# flake8: noqa +from __future__ import absolute_import +# import models into model package +from swagger_server.models.configuration import Configuration +from swagger_server.models.configuration_monte_carlo_sampling import ConfigurationMonteCarloSampling +from swagger_server.models.configuration_robustness import ConfigurationRobustness +from swagger_server.models.configuration_sensitivity import ConfigurationSensitivity +from swagger_server.models.convert_csv_to_json_body import ConvertCsvToJsonBody +from swagger_server.models.error import Error +from swagger_server.models.exact_indicator_values import ExactIndicatorValues +from swagger_server.models.inline_response200 import InlineResponse200 +from swagger_server.models.inline_response2001 import InlineResponse2001 +from swagger_server.models.inline_response2002 import InlineResponse2002 +from swagger_server.models.log_normal_indicator_values import LogNormalIndicatorValues +from swagger_server.models.normal_indicator_values import NormalIndicatorValues +from swagger_server.models.poisson_indicator_values import PoissonIndicatorValues +from swagger_server.models.sensitivity_mcda import SensitivityMCDA +from swagger_server.models.sensitivity_pro_mcda import SensitivityProMCDA +from swagger_server.models.sensitivity_ranked_alternatives import SensitivityRankedAlternatives +from swagger_server.models.sensitivity_ranked_alternatives_inner import SensitivityRankedAlternativesInner +from swagger_server.models.simple_mcda import SimpleMCDA +from swagger_server.models.simple_pro_mcda import SimpleProMCDA +from swagger_server.models.uniform_indicator_values import UniformIndicatorValues diff --git a/models/base_model_.py b/models/base_model_.py new file mode 100644 index 0000000..97999c3 --- /dev/null +++ b/models/base_model_.py @@ -0,0 +1,69 @@ +import pprint + +import six +import typing + +from swagger_server import util + +T = typing.TypeVar('T') + + +class Model(object): + # swaggerTypes: The key is attribute name and the + # value is attribute type. + swagger_types = {} + + # attributeMap: The key is attribute name and the + # value is json key in definition. + attribute_map = {} + + @classmethod + def from_dict(cls: typing.Type[T], dikt) -> T: + """Returns the dict as a model""" + return util.deserialize_model(dikt, cls) + + def to_dict(self): + """Returns the model properties as a dict + + :rtype: dict + """ + result = {} + + for attr, _ in six.iteritems(self.swagger_types): + value = getattr(self, attr) + if isinstance(value, list): + result[attr] = list(map( + lambda x: x.to_dict() if hasattr(x, "to_dict") else x, + value + )) + elif hasattr(value, "to_dict"): + result[attr] = value.to_dict() + elif isinstance(value, dict): + result[attr] = dict(map( + lambda item: (item[0], item[1].to_dict()) + if hasattr(item[1], "to_dict") else item, + value.items() + )) + else: + result[attr] = value + + return result + + def to_str(self): + """Returns the string representation of the model + + :rtype: str + """ + return pprint.pformat(self.to_dict()) + + def __repr__(self): + """For `print` and `pprint`""" + return self.to_str() + + def __eq__(self, other): + """Returns true if both objects are equal""" + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Returns true if both objects are not equal""" + return not self == other diff --git a/models/configuration.py b/models/configuration.py new file mode 100644 index 0000000..ccbcb7d --- /dev/null +++ b/models/configuration.py @@ -0,0 +1,218 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server.models.configuration_monte_carlo_sampling import ConfigurationMonteCarloSampling # noqa: F401,E501 +from swagger_server.models.configuration_robustness import ConfigurationRobustness # noqa: F401,E501 +from swagger_server.models.configuration_sensitivity import ConfigurationSensitivity # noqa: F401,E501 +from swagger_server import util + + +class Configuration(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, input_matrix: Dict[str, Dict[str, float]]=None, weights: List[float]=None, polarity: List[str]=None, sensitivity: ConfigurationSensitivity=None, robustness: ConfigurationRobustness=None, monte_carlo_sampling: ConfigurationMonteCarloSampling=None): # noqa: E501 + """Configuration - a model defined in Swagger + + :param input_matrix: The input_matrix of this Configuration. # noqa: E501 + :type input_matrix: Dict[str, Dict[str, float]] + :param weights: The weights of this Configuration. # noqa: E501 + :type weights: List[float] + :param polarity: The polarity of this Configuration. # noqa: E501 + :type polarity: List[str] + :param sensitivity: The sensitivity of this Configuration. # noqa: E501 + :type sensitivity: ConfigurationSensitivity + :param robustness: The robustness of this Configuration. # noqa: E501 + :type robustness: ConfigurationRobustness + :param monte_carlo_sampling: The monte_carlo_sampling of this Configuration. # noqa: E501 + :type monte_carlo_sampling: ConfigurationMonteCarloSampling + """ + self.swagger_types = { + 'input_matrix': Dict[str, Dict[str, float]], + 'weights': List[float], + 'polarity': List[str], + 'sensitivity': ConfigurationSensitivity, + 'robustness': ConfigurationRobustness, + 'monte_carlo_sampling': ConfigurationMonteCarloSampling + } + + self.attribute_map = { + 'input_matrix': 'inputMatrix', + 'weights': 'weights', + 'polarity': 'polarity', + 'sensitivity': 'sensitivity', + 'robustness': 'robustness', + 'monte_carlo_sampling': 'monteCarloSampling' + } + self._input_matrix = input_matrix + self._weights = weights + self._polarity = polarity + self._sensitivity = sensitivity + self._robustness = robustness + self._monte_carlo_sampling = monte_carlo_sampling + + @classmethod + def from_dict(cls, dikt) -> 'Configuration': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The Configuration of this Configuration. # noqa: E501 + :rtype: Configuration + """ + return util.deserialize_model(dikt, cls) + + @property + def input_matrix(self) -> Dict[str, Dict[str, float]]: + """Gets the input_matrix of this Configuration. + + An array where the columns represent the indicators and the rows their values for different alternatives. # noqa: E501 + + :return: The input_matrix of this Configuration. + :rtype: Dict[str, Dict[str, float]] + """ + return self._input_matrix + + @input_matrix.setter + def input_matrix(self, input_matrix: Dict[str, Dict[str, float]]): + """Sets the input_matrix of this Configuration. + + An array where the columns represent the indicators and the rows their values for different alternatives. # noqa: E501 + + :param input_matrix: The input_matrix of this Configuration. + :type input_matrix: Dict[str, Dict[str, float]] + """ + if input_matrix is None: + raise ValueError("Invalid value for `input_matrix`, must not be `None`") # noqa: E501 + + self._input_matrix = input_matrix + + @property + def weights(self) -> List[float]: + """Gets the weights of this Configuration. + + The weights associated to the indicators. # noqa: E501 + + :return: The weights of this Configuration. + :rtype: List[float] + """ + return self._weights + + @weights.setter + def weights(self, weights: List[float]): + """Sets the weights of this Configuration. + + The weights associated to the indicators. # noqa: E501 + + :param weights: The weights of this Configuration. + :type weights: List[float] + """ + if weights is None: + raise ValueError("Invalid value for `weights`, must not be `None`") # noqa: E501 + + self._weights = weights + + @property + def polarity(self) -> List[str]: + """Gets the polarity of this Configuration. + + The polarity of an indicator indicates whether its rising value adds positively or negatively to the MCDA scores. # noqa: E501 + + :return: The polarity of this Configuration. + :rtype: List[str] + """ + return self._polarity + + @polarity.setter + def polarity(self, polarity: List[str]): + """Sets the polarity of this Configuration. + + The polarity of an indicator indicates whether its rising value adds positively or negatively to the MCDA scores. # noqa: E501 + + :param polarity: The polarity of this Configuration. + :type polarity: List[str] + """ + allowed_values = ["+", "-"] # noqa: E501 + if not set(polarity).issubset(set(allowed_values)): + raise ValueError( + "Invalid values for `polarity` [{0}], must be a subset of [{1}]" # noqa: E501 + .format(", ".join(map(str, set(polarity) - set(allowed_values))), # noqa: E501 + ", ".join(map(str, allowed_values))) + ) + + self._polarity = polarity + + @property + def sensitivity(self) -> ConfigurationSensitivity: + """Gets the sensitivity of this Configuration. + + + :return: The sensitivity of this Configuration. + :rtype: ConfigurationSensitivity + """ + return self._sensitivity + + @sensitivity.setter + def sensitivity(self, sensitivity: ConfigurationSensitivity): + """Sets the sensitivity of this Configuration. + + + :param sensitivity: The sensitivity of this Configuration. + :type sensitivity: ConfigurationSensitivity + """ + if sensitivity is None: + raise ValueError("Invalid value for `sensitivity`, must not be `None`") # noqa: E501 + + self._sensitivity = sensitivity + + @property + def robustness(self) -> ConfigurationRobustness: + """Gets the robustness of this Configuration. + + + :return: The robustness of this Configuration. + :rtype: ConfigurationRobustness + """ + return self._robustness + + @robustness.setter + def robustness(self, robustness: ConfigurationRobustness): + """Sets the robustness of this Configuration. + + + :param robustness: The robustness of this Configuration. + :type robustness: ConfigurationRobustness + """ + if robustness is None: + raise ValueError("Invalid value for `robustness`, must not be `None`") # noqa: E501 + + self._robustness = robustness + + @property + def monte_carlo_sampling(self) -> ConfigurationMonteCarloSampling: + """Gets the monte_carlo_sampling of this Configuration. + + + :return: The monte_carlo_sampling of this Configuration. + :rtype: ConfigurationMonteCarloSampling + """ + return self._monte_carlo_sampling + + @monte_carlo_sampling.setter + def monte_carlo_sampling(self, monte_carlo_sampling: ConfigurationMonteCarloSampling): + """Sets the monte_carlo_sampling of this Configuration. + + + :param monte_carlo_sampling: The monte_carlo_sampling of this Configuration. + :type monte_carlo_sampling: ConfigurationMonteCarloSampling + """ + if monte_carlo_sampling is None: + raise ValueError("Invalid value for `monte_carlo_sampling`, must not be `None`") # noqa: E501 + + self._monte_carlo_sampling = monte_carlo_sampling diff --git a/models/configuration_modified_input.py b/models/configuration_modified_input.py new file mode 100644 index 0000000..07accc8 --- /dev/null +++ b/models/configuration_modified_input.py @@ -0,0 +1,218 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server.models.configuration_modified_input_monte_carlo_sampling import ConfigurationModifiedInputMonteCarloSampling # noqa: F401,E501 +from swagger_server.models.configuration_modified_input_robustness import ConfigurationModifiedInputRobustness # noqa: F401,E501 +from swagger_server.models.configuration_modified_input_sensitivity import ConfigurationModifiedInputSensitivity # noqa: F401,E501 +from swagger_server import util + + +class ConfigurationModifiedInput(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, input_matrix: Dict[str, Dict[str, float]]=None, weights: List[float]=None, polarity: List[str]=None, sensitivity: ConfigurationModifiedInputSensitivity=None, robustness: ConfigurationModifiedInputRobustness=None, monte_carlo_sampling: ConfigurationModifiedInputMonteCarloSampling=None): # noqa: E501 + """ConfigurationModifiedInput - a model defined in Swagger + + :param input_matrix: The input_matrix of this ConfigurationModifiedInput. # noqa: E501 + :type input_matrix: Dict[str, Dict[str, float]] + :param weights: The weights of this ConfigurationModifiedInput. # noqa: E501 + :type weights: List[float] + :param polarity: The polarity of this ConfigurationModifiedInput. # noqa: E501 + :type polarity: List[str] + :param sensitivity: The sensitivity of this ConfigurationModifiedInput. # noqa: E501 + :type sensitivity: ConfigurationModifiedInputSensitivity + :param robustness: The robustness of this ConfigurationModifiedInput. # noqa: E501 + :type robustness: ConfigurationModifiedInputRobustness + :param monte_carlo_sampling: The monte_carlo_sampling of this ConfigurationModifiedInput. # noqa: E501 + :type monte_carlo_sampling: ConfigurationModifiedInputMonteCarloSampling + """ + self.swagger_types = { + 'input_matrix': Dict[str, Dict[str, float]], + 'weights': List[float], + 'polarity': List[str], + 'sensitivity': ConfigurationModifiedInputSensitivity, + 'robustness': ConfigurationModifiedInputRobustness, + 'monte_carlo_sampling': ConfigurationModifiedInputMonteCarloSampling + } + + self.attribute_map = { + 'input_matrix': 'inputMatrix', + 'weights': 'weights', + 'polarity': 'polarity', + 'sensitivity': 'sensitivity', + 'robustness': 'robustness', + 'monte_carlo_sampling': 'monteCarloSampling' + } + self._input_matrix = input_matrix + self._weights = weights + self._polarity = polarity + self._sensitivity = sensitivity + self._robustness = robustness + self._monte_carlo_sampling = monte_carlo_sampling + + @classmethod + def from_dict(cls, dikt) -> 'ConfigurationModifiedInput': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The ConfigurationModifiedInput of this ConfigurationModifiedInput. # noqa: E501 + :rtype: ConfigurationModifiedInput + """ + return util.deserialize_model(dikt, cls) + + @property + def input_matrix(self) -> Dict[str, Dict[str, float]]: + """Gets the input_matrix of this ConfigurationModifiedInput. + + An array where the columns represent the indicators and the rows their values for different alternatives. # noqa: E501 + + :return: The input_matrix of this ConfigurationModifiedInput. + :rtype: Dict[str, Dict[str, float]] + """ + return self._input_matrix + + @input_matrix.setter + def input_matrix(self, input_matrix: Dict[str, Dict[str, float]]): + """Sets the input_matrix of this ConfigurationModifiedInput. + + An array where the columns represent the indicators and the rows their values for different alternatives. # noqa: E501 + + :param input_matrix: The input_matrix of this ConfigurationModifiedInput. + :type input_matrix: Dict[str, Dict[str, float]] + """ + if input_matrix is None: + raise ValueError("Invalid value for `input_matrix`, must not be `None`") # noqa: E501 + + self._input_matrix = input_matrix + + @property + def weights(self) -> List[float]: + """Gets the weights of this ConfigurationModifiedInput. + + The weights associated to the indicators. # noqa: E501 + + :return: The weights of this ConfigurationModifiedInput. + :rtype: List[float] + """ + return self._weights + + @weights.setter + def weights(self, weights: List[float]): + """Sets the weights of this ConfigurationModifiedInput. + + The weights associated to the indicators. # noqa: E501 + + :param weights: The weights of this ConfigurationModifiedInput. + :type weights: List[float] + """ + if weights is None: + raise ValueError("Invalid value for `weights`, must not be `None`") # noqa: E501 + + self._weights = weights + + @property + def polarity(self) -> List[str]: + """Gets the polarity of this ConfigurationModifiedInput. + + The polarity of an indicator indicates whether its rising value adds positively or negatively to the MCDA scores. # noqa: E501 + + :return: The polarity of this ConfigurationModifiedInput. + :rtype: List[str] + """ + return self._polarity + + @polarity.setter + def polarity(self, polarity: List[str]): + """Sets the polarity of this ConfigurationModifiedInput. + + The polarity of an indicator indicates whether its rising value adds positively or negatively to the MCDA scores. # noqa: E501 + + :param polarity: The polarity of this ConfigurationModifiedInput. + :type polarity: List[str] + """ + allowed_values = ["+", "-"] # noqa: E501 + if not set(polarity).issubset(set(allowed_values)): + raise ValueError( + "Invalid values for `polarity` [{0}], must be a subset of [{1}]" # noqa: E501 + .format(", ".join(map(str, set(polarity) - set(allowed_values))), # noqa: E501 + ", ".join(map(str, allowed_values))) + ) + + self._polarity = polarity + + @property + def sensitivity(self) -> ConfigurationModifiedInputSensitivity: + """Gets the sensitivity of this ConfigurationModifiedInput. + + + :return: The sensitivity of this ConfigurationModifiedInput. + :rtype: ConfigurationModifiedInputSensitivity + """ + return self._sensitivity + + @sensitivity.setter + def sensitivity(self, sensitivity: ConfigurationModifiedInputSensitivity): + """Sets the sensitivity of this ConfigurationModifiedInput. + + + :param sensitivity: The sensitivity of this ConfigurationModifiedInput. + :type sensitivity: ConfigurationModifiedInputSensitivity + """ + if sensitivity is None: + raise ValueError("Invalid value for `sensitivity`, must not be `None`") # noqa: E501 + + self._sensitivity = sensitivity + + @property + def robustness(self) -> ConfigurationModifiedInputRobustness: + """Gets the robustness of this ConfigurationModifiedInput. + + + :return: The robustness of this ConfigurationModifiedInput. + :rtype: ConfigurationModifiedInputRobustness + """ + return self._robustness + + @robustness.setter + def robustness(self, robustness: ConfigurationModifiedInputRobustness): + """Sets the robustness of this ConfigurationModifiedInput. + + + :param robustness: The robustness of this ConfigurationModifiedInput. + :type robustness: ConfigurationModifiedInputRobustness + """ + if robustness is None: + raise ValueError("Invalid value for `robustness`, must not be `None`") # noqa: E501 + + self._robustness = robustness + + @property + def monte_carlo_sampling(self) -> ConfigurationModifiedInputMonteCarloSampling: + """Gets the monte_carlo_sampling of this ConfigurationModifiedInput. + + + :return: The monte_carlo_sampling of this ConfigurationModifiedInput. + :rtype: ConfigurationModifiedInputMonteCarloSampling + """ + return self._monte_carlo_sampling + + @monte_carlo_sampling.setter + def monte_carlo_sampling(self, monte_carlo_sampling: ConfigurationModifiedInputMonteCarloSampling): + """Sets the monte_carlo_sampling of this ConfigurationModifiedInput. + + + :param monte_carlo_sampling: The monte_carlo_sampling of this ConfigurationModifiedInput. + :type monte_carlo_sampling: ConfigurationModifiedInputMonteCarloSampling + """ + if monte_carlo_sampling is None: + raise ValueError("Invalid value for `monte_carlo_sampling`, must not be `None`") # noqa: E501 + + self._monte_carlo_sampling = monte_carlo_sampling diff --git a/models/configuration_modified_input_monte_carlo_sampling.py b/models/configuration_modified_input_monte_carlo_sampling.py new file mode 100644 index 0000000..ade3e73 --- /dev/null +++ b/models/configuration_modified_input_monte_carlo_sampling.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ConfigurationModifiedInputMonteCarloSampling(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, monte_carlo_runs: int=None, marginal_distributions: List[str]=None): # noqa: E501 + """ConfigurationModifiedInputMonteCarloSampling - a model defined in Swagger + + :param monte_carlo_runs: The monte_carlo_runs of this ConfigurationModifiedInputMonteCarloSampling. # noqa: E501 + :type monte_carlo_runs: int + :param marginal_distributions: The marginal_distributions of this ConfigurationModifiedInputMonteCarloSampling. # noqa: E501 + :type marginal_distributions: List[str] + """ + self.swagger_types = { + 'monte_carlo_runs': int, + 'marginal_distributions': List[str] + } + + self.attribute_map = { + 'monte_carlo_runs': 'monteCarloRuns', + 'marginal_distributions': 'marginalDistributions' + } + self._monte_carlo_runs = monte_carlo_runs + self._marginal_distributions = marginal_distributions + + @classmethod + def from_dict(cls, dikt) -> 'ConfigurationModifiedInputMonteCarloSampling': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The ConfigurationModifiedInput_monteCarloSampling of this ConfigurationModifiedInputMonteCarloSampling. # noqa: E501 + :rtype: ConfigurationModifiedInputMonteCarloSampling + """ + return util.deserialize_model(dikt, cls) + + @property + def monte_carlo_runs(self) -> int: + """Gets the monte_carlo_runs of this ConfigurationModifiedInputMonteCarloSampling. + + Number of Monte Carlo iterations. # noqa: E501 + + :return: The monte_carlo_runs of this ConfigurationModifiedInputMonteCarloSampling. + :rtype: int + """ + return self._monte_carlo_runs + + @monte_carlo_runs.setter + def monte_carlo_runs(self, monte_carlo_runs: int): + """Sets the monte_carlo_runs of this ConfigurationModifiedInputMonteCarloSampling. + + Number of Monte Carlo iterations. # noqa: E501 + + :param monte_carlo_runs: The monte_carlo_runs of this ConfigurationModifiedInputMonteCarloSampling. + :type monte_carlo_runs: int + """ + if monte_carlo_runs is None: + raise ValueError("Invalid value for `monte_carlo_runs`, must not be `None`") # noqa: E501 + + self._monte_carlo_runs = monte_carlo_runs + + @property + def marginal_distributions(self) -> List[str]: + """Gets the marginal_distributions of this ConfigurationModifiedInputMonteCarloSampling. + + The marginal distributions describing the indicators. # noqa: E501 + + :return: The marginal_distributions of this ConfigurationModifiedInputMonteCarloSampling. + :rtype: List[str] + """ + return self._marginal_distributions + + @marginal_distributions.setter + def marginal_distributions(self, marginal_distributions: List[str]): + """Sets the marginal_distributions of this ConfigurationModifiedInputMonteCarloSampling. + + The marginal distributions describing the indicators. # noqa: E501 + + :param marginal_distributions: The marginal_distributions of this ConfigurationModifiedInputMonteCarloSampling. + :type marginal_distributions: List[str] + """ + allowed_values = ["normal", "lognormal", "exact", "poisson", "uniform"] # noqa: E501 + if not set(marginal_distributions).issubset(set(allowed_values)): + raise ValueError( + "Invalid values for `marginal_distributions` [{0}], must be a subset of [{1}]" # noqa: E501 + .format(", ".join(map(str, set(marginal_distributions) - set(allowed_values))), # noqa: E501 + ", ".join(map(str, allowed_values))) + ) + + self._marginal_distributions = marginal_distributions diff --git a/models/configuration_modified_input_robustness.py b/models/configuration_modified_input_robustness.py new file mode 100644 index 0000000..3940e29 --- /dev/null +++ b/models/configuration_modified_input_robustness.py @@ -0,0 +1,132 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ConfigurationModifiedInputRobustness(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, robustness: str=None, on_weights_level: str=None, given_weights: List[float]=None): # noqa: E501 + """ConfigurationModifiedInputRobustness - a model defined in Swagger + + :param robustness: The robustness of this ConfigurationModifiedInputRobustness. # noqa: E501 + :type robustness: str + :param on_weights_level: The on_weights_level of this ConfigurationModifiedInputRobustness. # noqa: E501 + :type on_weights_level: str + :param given_weights: The given_weights of this ConfigurationModifiedInputRobustness. # noqa: E501 + :type given_weights: List[float] + """ + self.swagger_types = { + 'robustness': str, + 'on_weights_level': str, + 'given_weights': List[float] + } + + self.attribute_map = { + 'robustness': 'robustness', + 'on_weights_level': 'onWeightsLevel', + 'given_weights': 'givenWeights' + } + self._robustness = robustness + self._on_weights_level = on_weights_level + self._given_weights = given_weights + + @classmethod + def from_dict(cls, dikt) -> 'ConfigurationModifiedInputRobustness': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The ConfigurationModifiedInput_robustness of this ConfigurationModifiedInputRobustness. # noqa: E501 + :rtype: ConfigurationModifiedInputRobustness + """ + return util.deserialize_model(dikt, cls) + + @property + def robustness(self) -> str: + """Gets the robustness of this ConfigurationModifiedInputRobustness. + + A robustness analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :return: The robustness of this ConfigurationModifiedInputRobustness. + :rtype: str + """ + return self._robustness + + @robustness.setter + def robustness(self, robustness: str): + """Sets the robustness of this ConfigurationModifiedInputRobustness. + + A robustness analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :param robustness: The robustness of this ConfigurationModifiedInputRobustness. + :type robustness: str + """ + allowed_values = ["none", "indicators", "weights"] # noqa: E501 + if robustness not in allowed_values: + raise ValueError( + "Invalid value for `robustness` ({0}), must be one of {1}" + .format(robustness, allowed_values) + ) + + self._robustness = robustness + + @property + def on_weights_level(self) -> str: + """Gets the on_weights_level of this ConfigurationModifiedInputRobustness. + + Where uncertainties are introduced. # noqa: E501 + + :return: The on_weights_level of this ConfigurationModifiedInputRobustness. + :rtype: str + """ + return self._on_weights_level + + @on_weights_level.setter + def on_weights_level(self, on_weights_level: str): + """Sets the on_weights_level of this ConfigurationModifiedInputRobustness. + + Where uncertainties are introduced. # noqa: E501 + + :param on_weights_level: The on_weights_level of this ConfigurationModifiedInputRobustness. + :type on_weights_level: str + """ + allowed_values = ["none", "all", "single"] # noqa: E501 + if on_weights_level not in allowed_values: + raise ValueError( + "Invalid value for `on_weights_level` ({0}), must be one of {1}" + .format(on_weights_level, allowed_values) + ) + + self._on_weights_level = on_weights_level + + @property + def given_weights(self) -> List[float]: + """Gets the given_weights of this ConfigurationModifiedInputRobustness. + + Values assigned to the weights associated with the indicators. # noqa: E501 + + :return: The given_weights of this ConfigurationModifiedInputRobustness. + :rtype: List[float] + """ + return self._given_weights + + @given_weights.setter + def given_weights(self, given_weights: List[float]): + """Sets the given_weights of this ConfigurationModifiedInputRobustness. + + Values assigned to the weights associated with the indicators. # noqa: E501 + + :param given_weights: The given_weights of this ConfigurationModifiedInputRobustness. + :type given_weights: List[float] + """ + + self._given_weights = given_weights diff --git a/models/configuration_modified_input_sensitivity.py b/models/configuration_modified_input_sensitivity.py new file mode 100644 index 0000000..bb9c4c3 --- /dev/null +++ b/models/configuration_modified_input_sensitivity.py @@ -0,0 +1,138 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ConfigurationModifiedInputSensitivity(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, sensitivity_on: str=None, normalization: str=None, aggregation: str=None): # noqa: E501 + """ConfigurationModifiedInputSensitivity - a model defined in Swagger + + :param sensitivity_on: The sensitivity_on of this ConfigurationModifiedInputSensitivity. # noqa: E501 + :type sensitivity_on: str + :param normalization: The normalization of this ConfigurationModifiedInputSensitivity. # noqa: E501 + :type normalization: str + :param aggregation: The aggregation of this ConfigurationModifiedInputSensitivity. # noqa: E501 + :type aggregation: str + """ + self.swagger_types = { + 'sensitivity_on': str, + 'normalization': str, + 'aggregation': str + } + + self.attribute_map = { + 'sensitivity_on': 'sensitivityOn', + 'normalization': 'normalization', + 'aggregation': 'aggregation' + } + self._sensitivity_on = sensitivity_on + self._normalization = normalization + self._aggregation = aggregation + + @classmethod + def from_dict(cls, dikt) -> 'ConfigurationModifiedInputSensitivity': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The ConfigurationModifiedInput_sensitivity of this ConfigurationModifiedInputSensitivity. # noqa: E501 + :rtype: ConfigurationModifiedInputSensitivity + """ + return util.deserialize_model(dikt, cls) + + @property + def sensitivity_on(self) -> str: + """Gets the sensitivity_on of this ConfigurationModifiedInputSensitivity. + + A sensitivity analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :return: The sensitivity_on of this ConfigurationModifiedInputSensitivity. + :rtype: str + """ + return self._sensitivity_on + + @sensitivity_on.setter + def sensitivity_on(self, sensitivity_on: str): + """Sets the sensitivity_on of this ConfigurationModifiedInputSensitivity. + + A sensitivity analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :param sensitivity_on: The sensitivity_on of this ConfigurationModifiedInputSensitivity. + :type sensitivity_on: str + """ + allowed_values = ["yes", "no"] # noqa: E501 + if sensitivity_on not in allowed_values: + raise ValueError( + "Invalid value for `sensitivity_on` ({0}), must be one of {1}" + .format(sensitivity_on, allowed_values) + ) + + self._sensitivity_on = sensitivity_on + + @property + def normalization(self) -> str: + """Gets the normalization of this ConfigurationModifiedInputSensitivity. + + The function used for normalizing the indicator values. # noqa: E501 + + :return: The normalization of this ConfigurationModifiedInputSensitivity. + :rtype: str + """ + return self._normalization + + @normalization.setter + def normalization(self, normalization: str): + """Sets the normalization of this ConfigurationModifiedInputSensitivity. + + The function used for normalizing the indicator values. # noqa: E501 + + :param normalization: The normalization of this ConfigurationModifiedInputSensitivity. + :type normalization: str + """ + allowed_values = ["minmax", "standardized", "target", "rank"] # noqa: E501 + if normalization not in allowed_values: + raise ValueError( + "Invalid value for `normalization` ({0}), must be one of {1}" + .format(normalization, allowed_values) + ) + + self._normalization = normalization + + @property + def aggregation(self) -> str: + """Gets the aggregation of this ConfigurationModifiedInputSensitivity. + + The function used for aggregating the indicator values. # noqa: E501 + + :return: The aggregation of this ConfigurationModifiedInputSensitivity. + :rtype: str + """ + return self._aggregation + + @aggregation.setter + def aggregation(self, aggregation: str): + """Sets the aggregation of this ConfigurationModifiedInputSensitivity. + + The function used for aggregating the indicator values. # noqa: E501 + + :param aggregation: The aggregation of this ConfigurationModifiedInputSensitivity. + :type aggregation: str + """ + allowed_values = ["weighted_sum", "geometric", "harmonic", "minimum"] # noqa: E501 + if aggregation not in allowed_values: + raise ValueError( + "Invalid value for `aggregation` ({0}), must be one of {1}" + .format(aggregation, allowed_values) + ) + + self._aggregation = aggregation diff --git a/models/configuration_monte_carlo_sampling.py b/models/configuration_monte_carlo_sampling.py new file mode 100644 index 0000000..307eb09 --- /dev/null +++ b/models/configuration_monte_carlo_sampling.py @@ -0,0 +1,101 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ConfigurationMonteCarloSampling(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, monte_carlo_runs: int=None, marginal_distributions: List[str]=None): # noqa: E501 + """ConfigurationMonteCarloSampling - a model defined in Swagger + + :param monte_carlo_runs: The monte_carlo_runs of this ConfigurationMonteCarloSampling. # noqa: E501 + :type monte_carlo_runs: int + :param marginal_distributions: The marginal_distributions of this ConfigurationMonteCarloSampling. # noqa: E501 + :type marginal_distributions: List[str] + """ + self.swagger_types = { + 'monte_carlo_runs': int, + 'marginal_distributions': List[str] + } + + self.attribute_map = { + 'monte_carlo_runs': 'monteCarloRuns', + 'marginal_distributions': 'marginalDistributions' + } + self._monte_carlo_runs = monte_carlo_runs + self._marginal_distributions = marginal_distributions + + @classmethod + def from_dict(cls, dikt) -> 'ConfigurationMonteCarloSampling': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The Configuration_monteCarloSampling of this ConfigurationMonteCarloSampling. # noqa: E501 + :rtype: ConfigurationMonteCarloSampling + """ + return util.deserialize_model(dikt, cls) + + @property + def monte_carlo_runs(self) -> int: + """Gets the monte_carlo_runs of this ConfigurationMonteCarloSampling. + + Number of Monte Carlo iterations. # noqa: E501 + + :return: The monte_carlo_runs of this ConfigurationMonteCarloSampling. + :rtype: int + """ + return self._monte_carlo_runs + + @monte_carlo_runs.setter + def monte_carlo_runs(self, monte_carlo_runs: int): + """Sets the monte_carlo_runs of this ConfigurationMonteCarloSampling. + + Number of Monte Carlo iterations. # noqa: E501 + + :param monte_carlo_runs: The monte_carlo_runs of this ConfigurationMonteCarloSampling. + :type monte_carlo_runs: int + """ + if monte_carlo_runs is None: + raise ValueError("Invalid value for `monte_carlo_runs`, must not be `None`") # noqa: E501 + + self._monte_carlo_runs = monte_carlo_runs + + @property + def marginal_distributions(self) -> List[str]: + """Gets the marginal_distributions of this ConfigurationMonteCarloSampling. + + The marginal distributions describing the indicators. # noqa: E501 + + :return: The marginal_distributions of this ConfigurationMonteCarloSampling. + :rtype: List[str] + """ + return self._marginal_distributions + + @marginal_distributions.setter + def marginal_distributions(self, marginal_distributions: List[str]): + """Sets the marginal_distributions of this ConfigurationMonteCarloSampling. + + The marginal distributions describing the indicators. # noqa: E501 + + :param marginal_distributions: The marginal_distributions of this ConfigurationMonteCarloSampling. + :type marginal_distributions: List[str] + """ + allowed_values = ["normal", "lognormal", "exact", "poisson", "uniform"] # noqa: E501 + if not set(marginal_distributions).issubset(set(allowed_values)): + raise ValueError( + "Invalid values for `marginal_distributions` [{0}], must be a subset of [{1}]" # noqa: E501 + .format(", ".join(map(str, set(marginal_distributions) - set(allowed_values))), # noqa: E501 + ", ".join(map(str, allowed_values))) + ) + + self._marginal_distributions = marginal_distributions diff --git a/models/configuration_robustness.py b/models/configuration_robustness.py new file mode 100644 index 0000000..06118d3 --- /dev/null +++ b/models/configuration_robustness.py @@ -0,0 +1,132 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ConfigurationRobustness(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, robustness: str=None, on_weights_level: str=None, given_weights: List[float]=None): # noqa: E501 + """ConfigurationRobustness - a model defined in Swagger + + :param robustness: The robustness of this ConfigurationRobustness. # noqa: E501 + :type robustness: str + :param on_weights_level: The on_weights_level of this ConfigurationRobustness. # noqa: E501 + :type on_weights_level: str + :param given_weights: The given_weights of this ConfigurationRobustness. # noqa: E501 + :type given_weights: List[float] + """ + self.swagger_types = { + 'robustness': str, + 'on_weights_level': str, + 'given_weights': List[float] + } + + self.attribute_map = { + 'robustness': 'robustness', + 'on_weights_level': 'onWeightsLevel', + 'given_weights': 'givenWeights' + } + self._robustness = robustness + self._on_weights_level = on_weights_level + self._given_weights = given_weights + + @classmethod + def from_dict(cls, dikt) -> 'ConfigurationRobustness': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The Configuration_robustness of this ConfigurationRobustness. # noqa: E501 + :rtype: ConfigurationRobustness + """ + return util.deserialize_model(dikt, cls) + + @property + def robustness(self) -> str: + """Gets the robustness of this ConfigurationRobustness. + + A robustness analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :return: The robustness of this ConfigurationRobustness. + :rtype: str + """ + return self._robustness + + @robustness.setter + def robustness(self, robustness: str): + """Sets the robustness of this ConfigurationRobustness. + + A robustness analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :param robustness: The robustness of this ConfigurationRobustness. + :type robustness: str + """ + allowed_values = ["none", "indicators", "weights"] # noqa: E501 + if robustness not in allowed_values: + raise ValueError( + "Invalid value for `robustness` ({0}), must be one of {1}" + .format(robustness, allowed_values) + ) + + self._robustness = robustness + + @property + def on_weights_level(self) -> str: + """Gets the on_weights_level of this ConfigurationRobustness. + + Where uncertainties are introduced. # noqa: E501 + + :return: The on_weights_level of this ConfigurationRobustness. + :rtype: str + """ + return self._on_weights_level + + @on_weights_level.setter + def on_weights_level(self, on_weights_level: str): + """Sets the on_weights_level of this ConfigurationRobustness. + + Where uncertainties are introduced. # noqa: E501 + + :param on_weights_level: The on_weights_level of this ConfigurationRobustness. + :type on_weights_level: str + """ + allowed_values = ["none", "all", "single"] # noqa: E501 + if on_weights_level not in allowed_values: + raise ValueError( + "Invalid value for `on_weights_level` ({0}), must be one of {1}" + .format(on_weights_level, allowed_values) + ) + + self._on_weights_level = on_weights_level + + @property + def given_weights(self) -> List[float]: + """Gets the given_weights of this ConfigurationRobustness. + + Values assigned to the weights associated with the indicators. # noqa: E501 + + :return: The given_weights of this ConfigurationRobustness. + :rtype: List[float] + """ + return self._given_weights + + @given_weights.setter + def given_weights(self, given_weights: List[float]): + """Sets the given_weights of this ConfigurationRobustness. + + Values assigned to the weights associated with the indicators. # noqa: E501 + + :param given_weights: The given_weights of this ConfigurationRobustness. + :type given_weights: List[float] + """ + + self._given_weights = given_weights diff --git a/models/configuration_sensitivity.py b/models/configuration_sensitivity.py new file mode 100644 index 0000000..f23c467 --- /dev/null +++ b/models/configuration_sensitivity.py @@ -0,0 +1,138 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ConfigurationSensitivity(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, sensitivity_on: str=None, normalization: str=None, aggregation: str=None): # noqa: E501 + """ConfigurationSensitivity - a model defined in Swagger + + :param sensitivity_on: The sensitivity_on of this ConfigurationSensitivity. # noqa: E501 + :type sensitivity_on: str + :param normalization: The normalization of this ConfigurationSensitivity. # noqa: E501 + :type normalization: str + :param aggregation: The aggregation of this ConfigurationSensitivity. # noqa: E501 + :type aggregation: str + """ + self.swagger_types = { + 'sensitivity_on': str, + 'normalization': str, + 'aggregation': str + } + + self.attribute_map = { + 'sensitivity_on': 'sensitivityOn', + 'normalization': 'normalization', + 'aggregation': 'aggregation' + } + self._sensitivity_on = sensitivity_on + self._normalization = normalization + self._aggregation = aggregation + + @classmethod + def from_dict(cls, dikt) -> 'ConfigurationSensitivity': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The Configuration_sensitivity of this ConfigurationSensitivity. # noqa: E501 + :rtype: ConfigurationSensitivity + """ + return util.deserialize_model(dikt, cls) + + @property + def sensitivity_on(self) -> str: + """Gets the sensitivity_on of this ConfigurationSensitivity. + + A sensitivity analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :return: The sensitivity_on of this ConfigurationSensitivity. + :rtype: str + """ + return self._sensitivity_on + + @sensitivity_on.setter + def sensitivity_on(self, sensitivity_on: str): + """Sets the sensitivity_on of this ConfigurationSensitivity. + + A sensitivity analysis is performed ['yes'] or not ['no']. # noqa: E501 + + :param sensitivity_on: The sensitivity_on of this ConfigurationSensitivity. + :type sensitivity_on: str + """ + allowed_values = ["yes", "no"] # noqa: E501 + if sensitivity_on not in allowed_values: + raise ValueError( + "Invalid value for `sensitivity_on` ({0}), must be one of {1}" + .format(sensitivity_on, allowed_values) + ) + + self._sensitivity_on = sensitivity_on + + @property + def normalization(self) -> str: + """Gets the normalization of this ConfigurationSensitivity. + + The function used for normalizing the indicator values. # noqa: E501 + + :return: The normalization of this ConfigurationSensitivity. + :rtype: str + """ + return self._normalization + + @normalization.setter + def normalization(self, normalization: str): + """Sets the normalization of this ConfigurationSensitivity. + + The function used for normalizing the indicator values. # noqa: E501 + + :param normalization: The normalization of this ConfigurationSensitivity. + :type normalization: str + """ + allowed_values = ["minmax", "standardized", "target", "rank"] # noqa: E501 + if normalization not in allowed_values: + raise ValueError( + "Invalid value for `normalization` ({0}), must be one of {1}" + .format(normalization, allowed_values) + ) + + self._normalization = normalization + + @property + def aggregation(self) -> str: + """Gets the aggregation of this ConfigurationSensitivity. + + The function used for aggregating the indicator values. # noqa: E501 + + :return: The aggregation of this ConfigurationSensitivity. + :rtype: str + """ + return self._aggregation + + @aggregation.setter + def aggregation(self, aggregation: str): + """Sets the aggregation of this ConfigurationSensitivity. + + The function used for aggregating the indicator values. # noqa: E501 + + :param aggregation: The aggregation of this ConfigurationSensitivity. + :type aggregation: str + """ + allowed_values = ["weighted_sum", "geometric", "harmonic", "minimum"] # noqa: E501 + if aggregation not in allowed_values: + raise ValueError( + "Invalid value for `aggregation` ({0}), must be one of {1}" + .format(aggregation, allowed_values) + ) + + self._aggregation = aggregation diff --git a/models/convert_csv_to_json_body.py b/models/convert_csv_to_json_body.py new file mode 100644 index 0000000..63c839d --- /dev/null +++ b/models/convert_csv_to_json_body.py @@ -0,0 +1,96 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ConvertCsvToJsonBody(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, file: str=None, column_names: str=None): # noqa: E501 + """ConvertCsvToJsonBody - a model defined in Swagger + + :param file: The file of this ConvertCsvToJsonBody. # noqa: E501 + :type file: str + :param column_names: The column_names of this ConvertCsvToJsonBody. # noqa: E501 + :type column_names: str + """ + self.swagger_types = { + 'file': str, + 'column_names': str + } + + self.attribute_map = { + 'file': 'file', + 'column_names': 'columnNames' + } + self._file = file + self._column_names = column_names + + @classmethod + def from_dict(cls, dikt) -> 'ConvertCsvToJsonBody': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The convertCsvToJson_body of this ConvertCsvToJsonBody. # noqa: E501 + :rtype: ConvertCsvToJsonBody + """ + return util.deserialize_model(dikt, cls) + + @property + def file(self) -> str: + """Gets the file of this ConvertCsvToJsonBody. + + The file to upload # noqa: E501 + + :return: The file of this ConvertCsvToJsonBody. + :rtype: str + """ + return self._file + + @file.setter + def file(self, file: str): + """Sets the file of this ConvertCsvToJsonBody. + + The file to upload # noqa: E501 + + :param file: The file of this ConvertCsvToJsonBody. + :type file: str + """ + if file is None: + raise ValueError("Invalid value for `file`, must not be `None`") # noqa: E501 + + self._file = file + + @property + def column_names(self) -> str: + """Gets the column_names of this ConvertCsvToJsonBody. + + A list of ordered comma separated column names to send with the file # noqa: E501 + + :return: The column_names of this ConvertCsvToJsonBody. + :rtype: str + """ + return self._column_names + + @column_names.setter + def column_names(self, column_names: str): + """Sets the column_names of this ConvertCsvToJsonBody. + + A list of ordered comma separated column names to send with the file # noqa: E501 + + :param column_names: The column_names of this ConvertCsvToJsonBody. + :type column_names: str + """ + if column_names is None: + raise ValueError("Invalid value for `column_names`, must not be `None`") # noqa: E501 + + self._column_names = column_names diff --git a/models/error.py b/models/error.py new file mode 100644 index 0000000..5f17e07 --- /dev/null +++ b/models/error.py @@ -0,0 +1,96 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class Error(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, code: int=None, message: str=None): # noqa: E501 + """Error - a model defined in Swagger + + :param code: The code of this Error. # noqa: E501 + :type code: int + :param message: The message of this Error. # noqa: E501 + :type message: str + """ + self.swagger_types = { + 'code': int, + 'message': str + } + + self.attribute_map = { + 'code': 'code', + 'message': 'message' + } + self._code = code + self._message = message + + @classmethod + def from_dict(cls, dikt) -> 'Error': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The Error of this Error. # noqa: E501 + :rtype: Error + """ + return util.deserialize_model(dikt, cls) + + @property + def code(self) -> int: + """Gets the code of this Error. + + Error code # noqa: E501 + + :return: The code of this Error. + :rtype: int + """ + return self._code + + @code.setter + def code(self, code: int): + """Sets the code of this Error. + + Error code # noqa: E501 + + :param code: The code of this Error. + :type code: int + """ + if code is None: + raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501 + + self._code = code + + @property + def message(self) -> str: + """Gets the message of this Error. + + Error description # noqa: E501 + + :return: The message of this Error. + :rtype: str + """ + return self._message + + @message.setter + def message(self, message: str): + """Sets the message of this Error. + + Error description # noqa: E501 + + :param message: The message of this Error. + :type message: str + """ + if message is None: + raise ValueError("Invalid value for `message`, must not be `None`") # noqa: E501 + + self._message = message diff --git a/models/exact_indicator_values.py b/models/exact_indicator_values.py new file mode 100644 index 0000000..8d5f254 --- /dev/null +++ b/models/exact_indicator_values.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class ExactIndicatorValues(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, val: float=None): # noqa: E501 + """ExactIndicatorValues - a model defined in Swagger + + :param val: The val of this ExactIndicatorValues. # noqa: E501 + :type val: float + """ + self.swagger_types = { + 'val': float + } + + self.attribute_map = { + 'val': 'val' + } + self._val = val + + @classmethod + def from_dict(cls, dikt) -> 'ExactIndicatorValues': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The ExactIndicatorValues of this ExactIndicatorValues. # noqa: E501 + :rtype: ExactIndicatorValues + """ + return util.deserialize_model(dikt, cls) + + @property + def val(self) -> float: + """Gets the val of this ExactIndicatorValues. + + The exact value of the indicator for a specific alternative. # noqa: E501 + + :return: The val of this ExactIndicatorValues. + :rtype: float + """ + return self._val + + @val.setter + def val(self, val: float): + """Sets the val of this ExactIndicatorValues. + + The exact value of the indicator for a specific alternative. # noqa: E501 + + :param val: The val of this ExactIndicatorValues. + :type val: float + """ + if val is None: + raise ValueError("Invalid value for `val`, must not be `None`") # noqa: E501 + + self._val = val diff --git a/models/inline_response200.py b/models/inline_response200.py new file mode 100644 index 0000000..6ad1651 --- /dev/null +++ b/models/inline_response200.py @@ -0,0 +1,64 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class InlineResponse200(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, input_matrix: Dict[str, Dict[str, float]]=None): # noqa: E501 + """InlineResponse200 - a model defined in Swagger + + :param input_matrix: The input_matrix of this InlineResponse200. # noqa: E501 + :type input_matrix: Dict[str, Dict[str, float]] + """ + self.swagger_types = { + 'input_matrix': Dict[str, Dict[str, float]] + } + + self.attribute_map = { + 'input_matrix': 'inputMatrix' + } + self._input_matrix = input_matrix + + @classmethod + def from_dict(cls, dikt) -> 'InlineResponse200': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The inline_response_200 of this InlineResponse200. # noqa: E501 + :rtype: InlineResponse200 + """ + return util.deserialize_model(dikt, cls) + + @property + def input_matrix(self) -> Dict[str, Dict[str, float]]: + """Gets the input_matrix of this InlineResponse200. + + An array where the columns represent the indicators and the rows their values for different alternatives. # noqa: E501 + + :return: The input_matrix of this InlineResponse200. + :rtype: Dict[str, Dict[str, float]] + """ + return self._input_matrix + + @input_matrix.setter + def input_matrix(self, input_matrix: Dict[str, Dict[str, float]]): + """Sets the input_matrix of this InlineResponse200. + + An array where the columns represent the indicators and the rows their values for different alternatives. # noqa: E501 + + :param input_matrix: The input_matrix of this InlineResponse200. + :type input_matrix: Dict[str, Dict[str, float]] + """ + + self._input_matrix = input_matrix diff --git a/models/inline_response2001.py b/models/inline_response2001.py new file mode 100644 index 0000000..001b27b --- /dev/null +++ b/models/inline_response2001.py @@ -0,0 +1,36 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class InlineResponse2001(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self): # noqa: E501 + """InlineResponse2001 - a model defined in Swagger + + """ + self.swagger_types = { + } + + self.attribute_map = { + } + + @classmethod + def from_dict(cls, dikt) -> 'InlineResponse2001': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The inline_response_200_1 of this InlineResponse2001. # noqa: E501 + :rtype: InlineResponse2001 + """ + return util.deserialize_model(dikt, cls) diff --git a/models/inline_response2002.py b/models/inline_response2002.py new file mode 100644 index 0000000..e13d1f1 --- /dev/null +++ b/models/inline_response2002.py @@ -0,0 +1,36 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class InlineResponse2002(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self): # noqa: E501 + """InlineResponse2002 - a model defined in Swagger + + """ + self.swagger_types = { + } + + self.attribute_map = { + } + + @classmethod + def from_dict(cls, dikt) -> 'InlineResponse2002': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The inline_response_200_2 of this InlineResponse2002. # noqa: E501 + :rtype: InlineResponse2002 + """ + return util.deserialize_model(dikt, cls) diff --git a/models/log_normal_indicator_values.py b/models/log_normal_indicator_values.py new file mode 100644 index 0000000..6cf4d06 --- /dev/null +++ b/models/log_normal_indicator_values.py @@ -0,0 +1,96 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class LogNormalIndicatorValues(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, avg: float=None, std: float=None): # noqa: E501 + """LogNormalIndicatorValues - a model defined in Swagger + + :param avg: The avg of this LogNormalIndicatorValues. # noqa: E501 + :type avg: float + :param std: The std of this LogNormalIndicatorValues. # noqa: E501 + :type std: float + """ + self.swagger_types = { + 'avg': float, + 'std': float + } + + self.attribute_map = { + 'avg': 'avg', + 'std': 'std' + } + self._avg = avg + self._std = std + + @classmethod + def from_dict(cls, dikt) -> 'LogNormalIndicatorValues': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The LogNormalIndicatorValues of this LogNormalIndicatorValues. # noqa: E501 + :rtype: LogNormalIndicatorValues + """ + return util.deserialize_model(dikt, cls) + + @property + def avg(self) -> float: + """Gets the avg of this LogNormalIndicatorValues. + + Logarithm of the average value of the indicator estimated for a specific alternative. # noqa: E501 + + :return: The avg of this LogNormalIndicatorValues. + :rtype: float + """ + return self._avg + + @avg.setter + def avg(self, avg: float): + """Sets the avg of this LogNormalIndicatorValues. + + Logarithm of the average value of the indicator estimated for a specific alternative. # noqa: E501 + + :param avg: The avg of this LogNormalIndicatorValues. + :type avg: float + """ + if avg is None: + raise ValueError("Invalid value for `avg`, must not be `None`") # noqa: E501 + + self._avg = avg + + @property + def std(self) -> float: + """Gets the std of this LogNormalIndicatorValues. + + Logarithm of the standard deviation value of the indicator estimated for a specific alternative. # noqa: E501 + + :return: The std of this LogNormalIndicatorValues. + :rtype: float + """ + return self._std + + @std.setter + def std(self, std: float): + """Sets the std of this LogNormalIndicatorValues. + + Logarithm of the standard deviation value of the indicator estimated for a specific alternative. # noqa: E501 + + :param std: The std of this LogNormalIndicatorValues. + :type std: float + """ + if std is None: + raise ValueError("Invalid value for `std`, must not be `None`") # noqa: E501 + + self._std = std diff --git a/models/normal_indicator_values.py b/models/normal_indicator_values.py new file mode 100644 index 0000000..f056da4 --- /dev/null +++ b/models/normal_indicator_values.py @@ -0,0 +1,96 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class NormalIndicatorValues(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, avg: float=None, std: float=None): # noqa: E501 + """NormalIndicatorValues - a model defined in Swagger + + :param avg: The avg of this NormalIndicatorValues. # noqa: E501 + :type avg: float + :param std: The std of this NormalIndicatorValues. # noqa: E501 + :type std: float + """ + self.swagger_types = { + 'avg': float, + 'std': float + } + + self.attribute_map = { + 'avg': 'avg', + 'std': 'std' + } + self._avg = avg + self._std = std + + @classmethod + def from_dict(cls, dikt) -> 'NormalIndicatorValues': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The NormalIndicatorValues of this NormalIndicatorValues. # noqa: E501 + :rtype: NormalIndicatorValues + """ + return util.deserialize_model(dikt, cls) + + @property + def avg(self) -> float: + """Gets the avg of this NormalIndicatorValues. + + Average value of the indicator estimated for a specific alternative. # noqa: E501 + + :return: The avg of this NormalIndicatorValues. + :rtype: float + """ + return self._avg + + @avg.setter + def avg(self, avg: float): + """Sets the avg of this NormalIndicatorValues. + + Average value of the indicator estimated for a specific alternative. # noqa: E501 + + :param avg: The avg of this NormalIndicatorValues. + :type avg: float + """ + if avg is None: + raise ValueError("Invalid value for `avg`, must not be `None`") # noqa: E501 + + self._avg = avg + + @property + def std(self) -> float: + """Gets the std of this NormalIndicatorValues. + + Standard deviation value of the indicator estimated for a specific alternative. # noqa: E501 + + :return: The std of this NormalIndicatorValues. + :rtype: float + """ + return self._std + + @std.setter + def std(self, std: float): + """Sets the std of this NormalIndicatorValues. + + Standard deviation value of the indicator estimated for a specific alternative. # noqa: E501 + + :param std: The std of this NormalIndicatorValues. + :type std: float + """ + if std is None: + raise ValueError("Invalid value for `std`, must not be `None`") # noqa: E501 + + self._std = std diff --git a/models/poisson_indicator_values.py b/models/poisson_indicator_values.py new file mode 100644 index 0000000..d254131 --- /dev/null +++ b/models/poisson_indicator_values.py @@ -0,0 +1,66 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class PoissonIndicatorValues(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, rate: float=None): # noqa: E501 + """PoissonIndicatorValues - a model defined in Swagger + + :param rate: The rate of this PoissonIndicatorValues. # noqa: E501 + :type rate: float + """ + self.swagger_types = { + 'rate': float + } + + self.attribute_map = { + 'rate': 'rate' + } + self._rate = rate + + @classmethod + def from_dict(cls, dikt) -> 'PoissonIndicatorValues': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The PoissonIndicatorValues of this PoissonIndicatorValues. # noqa: E501 + :rtype: PoissonIndicatorValues + """ + return util.deserialize_model(dikt, cls) + + @property + def rate(self) -> float: + """Gets the rate of this PoissonIndicatorValues. + + The average number of events occurring in a fixed interval of time or space for a specific alternative. # noqa: E501 + + :return: The rate of this PoissonIndicatorValues. + :rtype: float + """ + return self._rate + + @rate.setter + def rate(self, rate: float): + """Sets the rate of this PoissonIndicatorValues. + + The average number of events occurring in a fixed interval of time or space for a specific alternative. # noqa: E501 + + :param rate: The rate of this PoissonIndicatorValues. + :type rate: float + """ + if rate is None: + raise ValueError("Invalid value for `rate`, must not be `None`") # noqa: E501 + + self._rate = rate diff --git a/models/sensitivity_mcda.py b/models/sensitivity_mcda.py new file mode 100644 index 0000000..d17e5a8 --- /dev/null +++ b/models/sensitivity_mcda.py @@ -0,0 +1,151 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server.models.sensitivity_ranked_alternatives import SensitivityRankedAlternatives # noqa: F401,E501 +from swagger_server import util + + +class SensitivityMCDA(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, name: str=None, raw_scores: SensitivityRankedAlternatives=None, normalized_scores: SensitivityRankedAlternatives=None, ranks: SensitivityRankedAlternatives=None): # noqa: E501 + """SensitivityMCDA - a model defined in Swagger + + :param name: The name of this SensitivityMCDA. # noqa: E501 + :type name: str + :param raw_scores: The raw_scores of this SensitivityMCDA. # noqa: E501 + :type raw_scores: SensitivityRankedAlternatives + :param normalized_scores: The normalized_scores of this SensitivityMCDA. # noqa: E501 + :type normalized_scores: SensitivityRankedAlternatives + :param ranks: The ranks of this SensitivityMCDA. # noqa: E501 + :type ranks: SensitivityRankedAlternatives + """ + self.swagger_types = { + 'name': str, + 'raw_scores': SensitivityRankedAlternatives, + 'normalized_scores': SensitivityRankedAlternatives, + 'ranks': SensitivityRankedAlternatives + } + + self.attribute_map = { + 'name': 'name', + 'raw_scores': 'rawScores', + 'normalized_scores': 'normalizedScores', + 'ranks': 'ranks' + } + self._name = name + self._raw_scores = raw_scores + self._normalized_scores = normalized_scores + self._ranks = ranks + + @classmethod + def from_dict(cls, dikt) -> 'SensitivityMCDA': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The SensitivityMCDA of this SensitivityMCDA. # noqa: E501 + :rtype: SensitivityMCDA + """ + return util.deserialize_model(dikt, cls) + + @property + def name(self) -> str: + """Gets the name of this SensitivityMCDA. + + The name of the alternatives. # noqa: E501 + + :return: The name of this SensitivityMCDA. + :rtype: str + """ + return self._name + + @name.setter + def name(self, name: str): + """Sets the name of this SensitivityMCDA. + + The name of the alternatives. # noqa: E501 + + :param name: The name of this SensitivityMCDA. + :type name: str + """ + if name is None: + raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501 + + self._name = name + + @property + def raw_scores(self) -> SensitivityRankedAlternatives: + """Gets the raw_scores of this SensitivityMCDA. + + + :return: The raw_scores of this SensitivityMCDA. + :rtype: SensitivityRankedAlternatives + """ + return self._raw_scores + + @raw_scores.setter + def raw_scores(self, raw_scores: SensitivityRankedAlternatives): + """Sets the raw_scores of this SensitivityMCDA. + + + :param raw_scores: The raw_scores of this SensitivityMCDA. + :type raw_scores: SensitivityRankedAlternatives + """ + if raw_scores is None: + raise ValueError("Invalid value for `raw_scores`, must not be `None`") # noqa: E501 + + self._raw_scores = raw_scores + + @property + def normalized_scores(self) -> SensitivityRankedAlternatives: + """Gets the normalized_scores of this SensitivityMCDA. + + + :return: The normalized_scores of this SensitivityMCDA. + :rtype: SensitivityRankedAlternatives + """ + return self._normalized_scores + + @normalized_scores.setter + def normalized_scores(self, normalized_scores: SensitivityRankedAlternatives): + """Sets the normalized_scores of this SensitivityMCDA. + + + :param normalized_scores: The normalized_scores of this SensitivityMCDA. + :type normalized_scores: SensitivityRankedAlternatives + """ + if normalized_scores is None: + raise ValueError("Invalid value for `normalized_scores`, must not be `None`") # noqa: E501 + + self._normalized_scores = normalized_scores + + @property + def ranks(self) -> SensitivityRankedAlternatives: + """Gets the ranks of this SensitivityMCDA. + + + :return: The ranks of this SensitivityMCDA. + :rtype: SensitivityRankedAlternatives + """ + return self._ranks + + @ranks.setter + def ranks(self, ranks: SensitivityRankedAlternatives): + """Sets the ranks of this SensitivityMCDA. + + + :param ranks: The ranks of this SensitivityMCDA. + :type ranks: SensitivityRankedAlternatives + """ + if ranks is None: + raise ValueError("Invalid value for `ranks`, must not be `None`") # noqa: E501 + + self._ranks = ranks diff --git a/models/sensitivity_pro_mcda.py b/models/sensitivity_pro_mcda.py new file mode 100644 index 0000000..a146684 --- /dev/null +++ b/models/sensitivity_pro_mcda.py @@ -0,0 +1,145 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server.models.sensitivity_ranked_alternatives import SensitivityRankedAlternatives # noqa: F401,E501 +from swagger_server import util + + +class SensitivityProMCDA(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, name: str=None, raw_scores: SensitivityRankedAlternatives=None, avg_scores_std: SensitivityRankedAlternatives=None, normalized_scores: SensitivityRankedAlternatives=None): # noqa: E501 + """SensitivityProMCDA - a model defined in Swagger + + :param name: The name of this SensitivityProMCDA. # noqa: E501 + :type name: str + :param raw_scores: The raw_scores of this SensitivityProMCDA. # noqa: E501 + :type raw_scores: SensitivityRankedAlternatives + :param avg_scores_std: The avg_scores_std of this SensitivityProMCDA. # noqa: E501 + :type avg_scores_std: SensitivityRankedAlternatives + :param normalized_scores: The normalized_scores of this SensitivityProMCDA. # noqa: E501 + :type normalized_scores: SensitivityRankedAlternatives + """ + self.swagger_types = { + 'name': str, + 'raw_scores': SensitivityRankedAlternatives, + 'avg_scores_std': SensitivityRankedAlternatives, + 'normalized_scores': SensitivityRankedAlternatives + } + + self.attribute_map = { + 'name': 'name', + 'raw_scores': 'rawScores', + 'avg_scores_std': 'avgScoresStd', + 'normalized_scores': 'normalizedScores' + } + self._name = name + self._raw_scores = raw_scores + self._avg_scores_std = avg_scores_std + self._normalized_scores = normalized_scores + + @classmethod + def from_dict(cls, dikt) -> 'SensitivityProMCDA': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The SensitivityProMCDA of this SensitivityProMCDA. # noqa: E501 + :rtype: SensitivityProMCDA + """ + return util.deserialize_model(dikt, cls) + + @property + def name(self) -> str: + """Gets the name of this SensitivityProMCDA. + + The name of the alternatives. # noqa: E501 + + :return: The name of this SensitivityProMCDA. + :rtype: str + """ + return self._name + + @name.setter + def name(self, name: str): + """Sets the name of this SensitivityProMCDA. + + The name of the alternatives. # noqa: E501 + + :param name: The name of this SensitivityProMCDA. + :type name: str + """ + + self._name = name + + @property + def raw_scores(self) -> SensitivityRankedAlternatives: + """Gets the raw_scores of this SensitivityProMCDA. + + + :return: The raw_scores of this SensitivityProMCDA. + :rtype: SensitivityRankedAlternatives + """ + return self._raw_scores + + @raw_scores.setter + def raw_scores(self, raw_scores: SensitivityRankedAlternatives): + """Sets the raw_scores of this SensitivityProMCDA. + + + :param raw_scores: The raw_scores of this SensitivityProMCDA. + :type raw_scores: SensitivityRankedAlternatives + """ + + self._raw_scores = raw_scores + + @property + def avg_scores_std(self) -> SensitivityRankedAlternatives: + """Gets the avg_scores_std of this SensitivityProMCDA. + + + :return: The avg_scores_std of this SensitivityProMCDA. + :rtype: SensitivityRankedAlternatives + """ + return self._avg_scores_std + + @avg_scores_std.setter + def avg_scores_std(self, avg_scores_std: SensitivityRankedAlternatives): + """Sets the avg_scores_std of this SensitivityProMCDA. + + + :param avg_scores_std: The avg_scores_std of this SensitivityProMCDA. + :type avg_scores_std: SensitivityRankedAlternatives + """ + if avg_scores_std is None: + raise ValueError("Invalid value for `avg_scores_std`, must not be `None`") # noqa: E501 + + self._avg_scores_std = avg_scores_std + + @property + def normalized_scores(self) -> SensitivityRankedAlternatives: + """Gets the normalized_scores of this SensitivityProMCDA. + + + :return: The normalized_scores of this SensitivityProMCDA. + :rtype: SensitivityRankedAlternatives + """ + return self._normalized_scores + + @normalized_scores.setter + def normalized_scores(self, normalized_scores: SensitivityRankedAlternatives): + """Sets the normalized_scores of this SensitivityProMCDA. + + + :param normalized_scores: The normalized_scores of this SensitivityProMCDA. + :type normalized_scores: SensitivityRankedAlternatives + """ + + self._normalized_scores = normalized_scores diff --git a/models/sensitivity_ranked_alternatives.py b/models/sensitivity_ranked_alternatives.py new file mode 100644 index 0000000..45d62e5 --- /dev/null +++ b/models/sensitivity_ranked_alternatives.py @@ -0,0 +1,37 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server.models.sensitivity_ranked_alternatives_inner import SensitivityRankedAlternativesInner # noqa: F401,E501 +from swagger_server import util + + +class SensitivityRankedAlternatives(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self): # noqa: E501 + """SensitivityRankedAlternatives - a model defined in Swagger + + """ + self.swagger_types = { + } + + self.attribute_map = { + } + + @classmethod + def from_dict(cls, dikt) -> 'SensitivityRankedAlternatives': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The SensitivityRankedAlternatives of this SensitivityRankedAlternatives. # noqa: E501 + :rtype: SensitivityRankedAlternatives + """ + return util.deserialize_model(dikt, cls) diff --git a/models/sensitivity_ranked_alternatives_inner.py b/models/sensitivity_ranked_alternatives_inner.py new file mode 100644 index 0000000..018f747 --- /dev/null +++ b/models/sensitivity_ranked_alternatives_inner.py @@ -0,0 +1,426 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class SensitivityRankedAlternativesInner(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, ws_minmax: float=None, ws_target: float=None, ws_standardized_any: float=None, ws_rank: float=None, geom_minmax: float=None, geom_target: float=None, geom_standardized: float=None, geom_rank: float=None, harm_minmax: float=None, harm_target: float=None, harm_standardized: float=None, harm_rank: float=None, min_standardized_any: float=None): # noqa: E501 + """SensitivityRankedAlternativesInner - a model defined in Swagger + + :param ws_minmax: The ws_minmax of this SensitivityRankedAlternativesInner. # noqa: E501 + :type ws_minmax: float + :param ws_target: The ws_target of this SensitivityRankedAlternativesInner. # noqa: E501 + :type ws_target: float + :param ws_standardized_any: The ws_standardized_any of this SensitivityRankedAlternativesInner. # noqa: E501 + :type ws_standardized_any: float + :param ws_rank: The ws_rank of this SensitivityRankedAlternativesInner. # noqa: E501 + :type ws_rank: float + :param geom_minmax: The geom_minmax of this SensitivityRankedAlternativesInner. # noqa: E501 + :type geom_minmax: float + :param geom_target: The geom_target of this SensitivityRankedAlternativesInner. # noqa: E501 + :type geom_target: float + :param geom_standardized: The geom_standardized of this SensitivityRankedAlternativesInner. # noqa: E501 + :type geom_standardized: float + :param geom_rank: The geom_rank of this SensitivityRankedAlternativesInner. # noqa: E501 + :type geom_rank: float + :param harm_minmax: The harm_minmax of this SensitivityRankedAlternativesInner. # noqa: E501 + :type harm_minmax: float + :param harm_target: The harm_target of this SensitivityRankedAlternativesInner. # noqa: E501 + :type harm_target: float + :param harm_standardized: The harm_standardized of this SensitivityRankedAlternativesInner. # noqa: E501 + :type harm_standardized: float + :param harm_rank: The harm_rank of this SensitivityRankedAlternativesInner. # noqa: E501 + :type harm_rank: float + :param min_standardized_any: The min_standardized_any of this SensitivityRankedAlternativesInner. # noqa: E501 + :type min_standardized_any: float + """ + self.swagger_types = { + 'ws_minmax': float, + 'ws_target': float, + 'ws_standardized_any': float, + 'ws_rank': float, + 'geom_minmax': float, + 'geom_target': float, + 'geom_standardized': float, + 'geom_rank': float, + 'harm_minmax': float, + 'harm_target': float, + 'harm_standardized': float, + 'harm_rank': float, + 'min_standardized_any': float + } + + self.attribute_map = { + 'ws_minmax': 'wsMinmax', + 'ws_target': 'wsTarget', + 'ws_standardized_any': 'wsStandardizedAny', + 'ws_rank': 'wsRank', + 'geom_minmax': 'geomMinmax', + 'geom_target': 'geomTarget', + 'geom_standardized': 'geomStandardized', + 'geom_rank': 'geomRank', + 'harm_minmax': 'harmMinmax', + 'harm_target': 'harmTarget', + 'harm_standardized': 'harmStandardized', + 'harm_rank': 'harmRank', + 'min_standardized_any': 'minStandardizedAny' + } + self._ws_minmax = ws_minmax + self._ws_target = ws_target + self._ws_standardized_any = ws_standardized_any + self._ws_rank = ws_rank + self._geom_minmax = geom_minmax + self._geom_target = geom_target + self._geom_standardized = geom_standardized + self._geom_rank = geom_rank + self._harm_minmax = harm_minmax + self._harm_target = harm_target + self._harm_standardized = harm_standardized + self._harm_rank = harm_rank + self._min_standardized_any = min_standardized_any + + @classmethod + def from_dict(cls, dikt) -> 'SensitivityRankedAlternativesInner': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The SensitivityRankedAlternatives_inner of this SensitivityRankedAlternativesInner. # noqa: E501 + :rtype: SensitivityRankedAlternativesInner + """ + return util.deserialize_model(dikt, cls) + + @property + def ws_minmax(self) -> float: + """Gets the ws_minmax of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Minmax aggregation function. # noqa: E501 + + :return: The ws_minmax of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._ws_minmax + + @ws_minmax.setter + def ws_minmax(self, ws_minmax: float): + """Sets the ws_minmax of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Minmax aggregation function. # noqa: E501 + + :param ws_minmax: The ws_minmax of this SensitivityRankedAlternativesInner. + :type ws_minmax: float + """ + if ws_minmax is None: + raise ValueError("Invalid value for `ws_minmax`, must not be `None`") # noqa: E501 + + self._ws_minmax = ws_minmax + + @property + def ws_target(self) -> float: + """Gets the ws_target of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Target aggregation function. # noqa: E501 + + :return: The ws_target of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._ws_target + + @ws_target.setter + def ws_target(self, ws_target: float): + """Sets the ws_target of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Target aggregation function. # noqa: E501 + + :param ws_target: The ws_target of this SensitivityRankedAlternativesInner. + :type ws_target: float + """ + if ws_target is None: + raise ValueError("Invalid value for `ws_target`, must not be `None`") # noqa: E501 + + self._ws_target = ws_target + + @property + def ws_standardized_any(self) -> float: + """Gets the ws_standardized_any of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Standardized aggregation function. # noqa: E501 + + :return: The ws_standardized_any of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._ws_standardized_any + + @ws_standardized_any.setter + def ws_standardized_any(self, ws_standardized_any: float): + """Sets the ws_standardized_any of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Standardized aggregation function. # noqa: E501 + + :param ws_standardized_any: The ws_standardized_any of this SensitivityRankedAlternativesInner. + :type ws_standardized_any: float + """ + if ws_standardized_any is None: + raise ValueError("Invalid value for `ws_standardized_any`, must not be `None`") # noqa: E501 + + self._ws_standardized_any = ws_standardized_any + + @property + def ws_rank(self) -> float: + """Gets the ws_rank of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Rank aggregation function. # noqa: E501 + + :return: The ws_rank of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._ws_rank + + @ws_rank.setter + def ws_rank(self, ws_rank: float): + """Sets the ws_rank of this SensitivityRankedAlternativesInner. + + Weighted-sum normalization is combined with Rank aggregation function. # noqa: E501 + + :param ws_rank: The ws_rank of this SensitivityRankedAlternativesInner. + :type ws_rank: float + """ + if ws_rank is None: + raise ValueError("Invalid value for `ws_rank`, must not be `None`") # noqa: E501 + + self._ws_rank = ws_rank + + @property + def geom_minmax(self) -> float: + """Gets the geom_minmax of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Minmax aggregation function. # noqa: E501 + + :return: The geom_minmax of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._geom_minmax + + @geom_minmax.setter + def geom_minmax(self, geom_minmax: float): + """Sets the geom_minmax of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Minmax aggregation function. # noqa: E501 + + :param geom_minmax: The geom_minmax of this SensitivityRankedAlternativesInner. + :type geom_minmax: float + """ + if geom_minmax is None: + raise ValueError("Invalid value for `geom_minmax`, must not be `None`") # noqa: E501 + + self._geom_minmax = geom_minmax + + @property + def geom_target(self) -> float: + """Gets the geom_target of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Target aggregation function. # noqa: E501 + + :return: The geom_target of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._geom_target + + @geom_target.setter + def geom_target(self, geom_target: float): + """Sets the geom_target of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Target aggregation function. # noqa: E501 + + :param geom_target: The geom_target of this SensitivityRankedAlternativesInner. + :type geom_target: float + """ + if geom_target is None: + raise ValueError("Invalid value for `geom_target`, must not be `None`") # noqa: E501 + + self._geom_target = geom_target + + @property + def geom_standardized(self) -> float: + """Gets the geom_standardized of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Standardized aggregation function. # noqa: E501 + + :return: The geom_standardized of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._geom_standardized + + @geom_standardized.setter + def geom_standardized(self, geom_standardized: float): + """Sets the geom_standardized of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Standardized aggregation function. # noqa: E501 + + :param geom_standardized: The geom_standardized of this SensitivityRankedAlternativesInner. + :type geom_standardized: float + """ + if geom_standardized is None: + raise ValueError("Invalid value for `geom_standardized`, must not be `None`") # noqa: E501 + + self._geom_standardized = geom_standardized + + @property + def geom_rank(self) -> float: + """Gets the geom_rank of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Rank aggregation function. # noqa: E501 + + :return: The geom_rank of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._geom_rank + + @geom_rank.setter + def geom_rank(self, geom_rank: float): + """Sets the geom_rank of this SensitivityRankedAlternativesInner. + + Geometric normalization is combined with Rank aggregation function. # noqa: E501 + + :param geom_rank: The geom_rank of this SensitivityRankedAlternativesInner. + :type geom_rank: float + """ + if geom_rank is None: + raise ValueError("Invalid value for `geom_rank`, must not be `None`") # noqa: E501 + + self._geom_rank = geom_rank + + @property + def harm_minmax(self) -> float: + """Gets the harm_minmax of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Minmax aggregation function. # noqa: E501 + + :return: The harm_minmax of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._harm_minmax + + @harm_minmax.setter + def harm_minmax(self, harm_minmax: float): + """Sets the harm_minmax of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Minmax aggregation function. # noqa: E501 + + :param harm_minmax: The harm_minmax of this SensitivityRankedAlternativesInner. + :type harm_minmax: float + """ + if harm_minmax is None: + raise ValueError("Invalid value for `harm_minmax`, must not be `None`") # noqa: E501 + + self._harm_minmax = harm_minmax + + @property + def harm_target(self) -> float: + """Gets the harm_target of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Target aggregation function. # noqa: E501 + + :return: The harm_target of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._harm_target + + @harm_target.setter + def harm_target(self, harm_target: float): + """Sets the harm_target of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Target aggregation function. # noqa: E501 + + :param harm_target: The harm_target of this SensitivityRankedAlternativesInner. + :type harm_target: float + """ + if harm_target is None: + raise ValueError("Invalid value for `harm_target`, must not be `None`") # noqa: E501 + + self._harm_target = harm_target + + @property + def harm_standardized(self) -> float: + """Gets the harm_standardized of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Standardized aggregation function. # noqa: E501 + + :return: The harm_standardized of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._harm_standardized + + @harm_standardized.setter + def harm_standardized(self, harm_standardized: float): + """Sets the harm_standardized of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Standardized aggregation function. # noqa: E501 + + :param harm_standardized: The harm_standardized of this SensitivityRankedAlternativesInner. + :type harm_standardized: float + """ + if harm_standardized is None: + raise ValueError("Invalid value for `harm_standardized`, must not be `None`") # noqa: E501 + + self._harm_standardized = harm_standardized + + @property + def harm_rank(self) -> float: + """Gets the harm_rank of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Rank aggregation function. # noqa: E501 + + :return: The harm_rank of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._harm_rank + + @harm_rank.setter + def harm_rank(self, harm_rank: float): + """Sets the harm_rank of this SensitivityRankedAlternativesInner. + + Harmonic normalization is combined with Rank aggregation function. # noqa: E501 + + :param harm_rank: The harm_rank of this SensitivityRankedAlternativesInner. + :type harm_rank: float + """ + if harm_rank is None: + raise ValueError("Invalid value for `harm_rank`, must not be `None`") # noqa: E501 + + self._harm_rank = harm_rank + + @property + def min_standardized_any(self) -> float: + """Gets the min_standardized_any of this SensitivityRankedAlternativesInner. + + Minimum normalization is combined with Standardized aggregation function. # noqa: E501 + + :return: The min_standardized_any of this SensitivityRankedAlternativesInner. + :rtype: float + """ + return self._min_standardized_any + + @min_standardized_any.setter + def min_standardized_any(self, min_standardized_any: float): + """Sets the min_standardized_any of this SensitivityRankedAlternativesInner. + + Minimum normalization is combined with Standardized aggregation function. # noqa: E501 + + :param min_standardized_any: The min_standardized_any of this SensitivityRankedAlternativesInner. + :type min_standardized_any: float + """ + if min_standardized_any is None: + raise ValueError("Invalid value for `min_standardized_any`, must not be `None`") # noqa: E501 + + self._min_standardized_any = min_standardized_any diff --git a/models/simple_mcda.py b/models/simple_mcda.py new file mode 100644 index 0000000..0cb2f45 --- /dev/null +++ b/models/simple_mcda.py @@ -0,0 +1,126 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class SimpleMCDA(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, raw_scores: Dict[str, float]=None, normalized_scores: Dict[str, float]=None, ranks: Dict[str, float]=None): # noqa: E501 + """SimpleMCDA - a model defined in Swagger + + :param raw_scores: The raw_scores of this SimpleMCDA. # noqa: E501 + :type raw_scores: Dict[str, float] + :param normalized_scores: The normalized_scores of this SimpleMCDA. # noqa: E501 + :type normalized_scores: Dict[str, float] + :param ranks: The ranks of this SimpleMCDA. # noqa: E501 + :type ranks: Dict[str, float] + """ + self.swagger_types = { + 'raw_scores': Dict[str, float], + 'normalized_scores': Dict[str, float], + 'ranks': Dict[str, float] + } + + self.attribute_map = { + 'raw_scores': 'rawScores', + 'normalized_scores': 'normalizedScores', + 'ranks': 'ranks' + } + self._raw_scores = raw_scores + self._normalized_scores = normalized_scores + self._ranks = ranks + + @classmethod + def from_dict(cls, dikt) -> 'SimpleMCDA': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The SimpleMCDA of this SimpleMCDA. # noqa: E501 + :rtype: SimpleMCDA + """ + return util.deserialize_model(dikt, cls) + + @property + def raw_scores(self) -> Dict[str, float]: + """Gets the raw_scores of this SimpleMCDA. + + The scores generated via MCDA and linked to the alternatives. # noqa: E501 + + :return: The raw_scores of this SimpleMCDA. + :rtype: Dict[str, float] + """ + return self._raw_scores + + @raw_scores.setter + def raw_scores(self, raw_scores: Dict[str, float]): + """Sets the raw_scores of this SimpleMCDA. + + The scores generated via MCDA and linked to the alternatives. # noqa: E501 + + :param raw_scores: The raw_scores of this SimpleMCDA. + :type raw_scores: Dict[str, float] + """ + if raw_scores is None: + raise ValueError("Invalid value for `raw_scores`, must not be `None`") # noqa: E501 + + self._raw_scores = raw_scores + + @property + def normalized_scores(self) -> Dict[str, float]: + """Gets the normalized_scores of this SimpleMCDA. + + The scores generated via MCDA and linked to the alternatives normalized between 0 and 1. # noqa: E501 + + :return: The normalized_scores of this SimpleMCDA. + :rtype: Dict[str, float] + """ + return self._normalized_scores + + @normalized_scores.setter + def normalized_scores(self, normalized_scores: Dict[str, float]): + """Sets the normalized_scores of this SimpleMCDA. + + The scores generated via MCDA and linked to the alternatives normalized between 0 and 1. # noqa: E501 + + :param normalized_scores: The normalized_scores of this SimpleMCDA. + :type normalized_scores: Dict[str, float] + """ + if normalized_scores is None: + raise ValueError("Invalid value for `normalized_scores`, must not be `None`") # noqa: E501 + + self._normalized_scores = normalized_scores + + @property + def ranks(self) -> Dict[str, float]: + """Gets the ranks of this SimpleMCDA. + + The ranks generated via MCDA and linked to the alternatives. # noqa: E501 + + :return: The ranks of this SimpleMCDA. + :rtype: Dict[str, float] + """ + return self._ranks + + @ranks.setter + def ranks(self, ranks: Dict[str, float]): + """Sets the ranks of this SimpleMCDA. + + The ranks generated via MCDA and linked to the alternatives. # noqa: E501 + + :param ranks: The ranks of this SimpleMCDA. + :type ranks: Dict[str, float] + """ + if ranks is None: + raise ValueError("Invalid value for `ranks`, must not be `None`") # noqa: E501 + + self._ranks = ranks diff --git a/models/simple_pro_mcda.py b/models/simple_pro_mcda.py new file mode 100644 index 0000000..ca0d9c5 --- /dev/null +++ b/models/simple_pro_mcda.py @@ -0,0 +1,126 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class SimpleProMCDA(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, raw_scores_averages: Dict[str, float]=None, raw_scores_standard_deviations: Dict[str, float]=None, normalized_scores_averages: Dict[str, float]=None): # noqa: E501 + """SimpleProMCDA - a model defined in Swagger + + :param raw_scores_averages: The raw_scores_averages of this SimpleProMCDA. # noqa: E501 + :type raw_scores_averages: Dict[str, float] + :param raw_scores_standard_deviations: The raw_scores_standard_deviations of this SimpleProMCDA. # noqa: E501 + :type raw_scores_standard_deviations: Dict[str, float] + :param normalized_scores_averages: The normalized_scores_averages of this SimpleProMCDA. # noqa: E501 + :type normalized_scores_averages: Dict[str, float] + """ + self.swagger_types = { + 'raw_scores_averages': Dict[str, float], + 'raw_scores_standard_deviations': Dict[str, float], + 'normalized_scores_averages': Dict[str, float] + } + + self.attribute_map = { + 'raw_scores_averages': 'rawScoresAverages', + 'raw_scores_standard_deviations': 'rawScoresStandardDeviations', + 'normalized_scores_averages': 'normalizedScoresAverages' + } + self._raw_scores_averages = raw_scores_averages + self._raw_scores_standard_deviations = raw_scores_standard_deviations + self._normalized_scores_averages = normalized_scores_averages + + @classmethod + def from_dict(cls, dikt) -> 'SimpleProMCDA': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The SimpleProMCDA of this SimpleProMCDA. # noqa: E501 + :rtype: SimpleProMCDA + """ + return util.deserialize_model(dikt, cls) + + @property + def raw_scores_averages(self) -> Dict[str, float]: + """Gets the raw_scores_averages of this SimpleProMCDA. + + The averages of the scores generated via ProMCDA and linked to the alternatives. # noqa: E501 + + :return: The raw_scores_averages of this SimpleProMCDA. + :rtype: Dict[str, float] + """ + return self._raw_scores_averages + + @raw_scores_averages.setter + def raw_scores_averages(self, raw_scores_averages: Dict[str, float]): + """Sets the raw_scores_averages of this SimpleProMCDA. + + The averages of the scores generated via ProMCDA and linked to the alternatives. # noqa: E501 + + :param raw_scores_averages: The raw_scores_averages of this SimpleProMCDA. + :type raw_scores_averages: Dict[str, float] + """ + if raw_scores_averages is None: + raise ValueError("Invalid value for `raw_scores_averages`, must not be `None`") # noqa: E501 + + self._raw_scores_averages = raw_scores_averages + + @property + def raw_scores_standard_deviations(self) -> Dict[str, float]: + """Gets the raw_scores_standard_deviations of this SimpleProMCDA. + + The standard devations generated via ProMCDA and linked to the alternatives. # noqa: E501 + + :return: The raw_scores_standard_deviations of this SimpleProMCDA. + :rtype: Dict[str, float] + """ + return self._raw_scores_standard_deviations + + @raw_scores_standard_deviations.setter + def raw_scores_standard_deviations(self, raw_scores_standard_deviations: Dict[str, float]): + """Sets the raw_scores_standard_deviations of this SimpleProMCDA. + + The standard devations generated via ProMCDA and linked to the alternatives. # noqa: E501 + + :param raw_scores_standard_deviations: The raw_scores_standard_deviations of this SimpleProMCDA. + :type raw_scores_standard_deviations: Dict[str, float] + """ + if raw_scores_standard_deviations is None: + raise ValueError("Invalid value for `raw_scores_standard_deviations`, must not be `None`") # noqa: E501 + + self._raw_scores_standard_deviations = raw_scores_standard_deviations + + @property + def normalized_scores_averages(self) -> Dict[str, float]: + """Gets the normalized_scores_averages of this SimpleProMCDA. + + The averages of the scores generated via ProMCDA and linked to the alternatives normalized between 0 and 1. # noqa: E501 + + :return: The normalized_scores_averages of this SimpleProMCDA. + :rtype: Dict[str, float] + """ + return self._normalized_scores_averages + + @normalized_scores_averages.setter + def normalized_scores_averages(self, normalized_scores_averages: Dict[str, float]): + """Sets the normalized_scores_averages of this SimpleProMCDA. + + The averages of the scores generated via ProMCDA and linked to the alternatives normalized between 0 and 1. # noqa: E501 + + :param normalized_scores_averages: The normalized_scores_averages of this SimpleProMCDA. + :type normalized_scores_averages: Dict[str, float] + """ + if normalized_scores_averages is None: + raise ValueError("Invalid value for `normalized_scores_averages`, must not be `None`") # noqa: E501 + + self._normalized_scores_averages = normalized_scores_averages diff --git a/models/uniform_indicator_values.py b/models/uniform_indicator_values.py new file mode 100644 index 0000000..3d2f9e4 --- /dev/null +++ b/models/uniform_indicator_values.py @@ -0,0 +1,96 @@ +# coding: utf-8 + +from __future__ import absolute_import +from datetime import date, datetime # noqa: F401 + +from typing import List, Dict # noqa: F401 + +from swagger_server.models.base_model_ import Model +from swagger_server import util + + +class UniformIndicatorValues(Model): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + def __init__(self, min: float=None, max: float=None): # noqa: E501 + """UniformIndicatorValues - a model defined in Swagger + + :param min: The min of this UniformIndicatorValues. # noqa: E501 + :type min: float + :param max: The max of this UniformIndicatorValues. # noqa: E501 + :type max: float + """ + self.swagger_types = { + 'min': float, + 'max': float + } + + self.attribute_map = { + 'min': 'min', + 'max': 'max' + } + self._min = min + self._max = max + + @classmethod + def from_dict(cls, dikt) -> 'UniformIndicatorValues': + """Returns the dict as a model + + :param dikt: A dict. + :type: dict + :return: The UniformIndicatorValues of this UniformIndicatorValues. # noqa: E501 + :rtype: UniformIndicatorValues + """ + return util.deserialize_model(dikt, cls) + + @property + def min(self) -> float: + """Gets the min of this UniformIndicatorValues. + + The minimum value of the uniform distribution of the indicator estimated for a specific alternative. # noqa: E501 + + :return: The min of this UniformIndicatorValues. + :rtype: float + """ + return self._min + + @min.setter + def min(self, min: float): + """Sets the min of this UniformIndicatorValues. + + The minimum value of the uniform distribution of the indicator estimated for a specific alternative. # noqa: E501 + + :param min: The min of this UniformIndicatorValues. + :type min: float + """ + if min is None: + raise ValueError("Invalid value for `min`, must not be `None`") # noqa: E501 + + self._min = min + + @property + def max(self) -> float: + """Gets the max of this UniformIndicatorValues. + + The maximum value of the uniform distribution of the indicator estimated for a specific alternative. # noqa: E501 + + :return: The max of this UniformIndicatorValues. + :rtype: float + """ + return self._max + + @max.setter + def max(self, max: float): + """Sets the max of this UniformIndicatorValues. + + The maximum value of the uniform distribution of the indicator estimated for a specific alternative. # noqa: E501 + + :param max: The max of this UniformIndicatorValues. + :type max: float + """ + if max is None: + raise ValueError("Invalid value for `max`, must not be `None`") # noqa: E501 + + self._max = max diff --git a/tests/unit_tests/test_aggregation.py b/tests/unit_tests/test_aggregation.py index 154af15..7dda30e 100644 --- a/tests/unit_tests/test_aggregation.py +++ b/tests/unit_tests/test_aggregation.py @@ -3,8 +3,8 @@ from pandas.testing import assert_series_equal -from mcda.utils.utils_for_main import * -from mcda.mcda_functions.aggregation import Aggregation +from ProMCDA.mcda.utils.utils_for_main import * +from ProMCDA.mcda.mcda_functions.aggregation import Aggregation current_directory = os.path.dirname(os.path.abspath(__file__)) resources_directory = os.path.join(current_directory, '..', 'resources') diff --git a/tests/unit_tests/test_config.py b/tests/unit_tests/test_config.py index 5df4830..6fc6dfd 100644 --- a/tests/unit_tests/test_config.py +++ b/tests/unit_tests/test_config.py @@ -1,7 +1,7 @@ import unittest import pytest -from mcda.configuration.config import Config +from ProMCDA.mcda.configuration.config import Config class TestConfig(unittest.TestCase): diff --git a/tests/unit_tests/test_mcda_run.py b/tests/unit_tests/test_mcda_run.py index 44f2b95..408b173 100644 --- a/tests/unit_tests/test_mcda_run.py +++ b/tests/unit_tests/test_mcda_run.py @@ -5,7 +5,7 @@ from unittest.mock import patch -from mcda.mcda_run import main +from ProMCDA.mcda.mcda_run import main tests_directory = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) # Get the tests root directory @@ -63,7 +63,7 @@ def get_incorrect_config_2(): "monte_carlo_runs": 0, "num_cores": 4, "random_seed": 42, - "marginal_distribution_for_each_indicator": ["norm", "exact", "lnorm", "exact", "poisson", "exact"]}, + "marginal_distribution_for_each_indicator": ["norm", "exact", "lognormal", "exact", "poisson", "exact"]}, "output_directory_path": "/path/to/output" } @@ -115,7 +115,7 @@ def get_correct_config(): "num_cores": 1, "random_seed": 42, "marginal_distribution_for_each_indicator": ['uniform', 'exact', 'normal', 'normal', 'exact', - 'lnorm']}, + 'lognormal']}, "output_directory_path": "/path/to/output" } @@ -123,12 +123,12 @@ def setUp(self): # Set up logger in the test self.logger = logging.getLogger(__name__) - @patch('mcda.utils.utils_for_main.check_path_exists', side_effect=None) - @patch('mcda.utils.utils_for_main.save_config', side_effect=None) - @patch('mcda.utils.utils_for_main._plot_and_save_charts', side_effect=None) - @patch('mcda.utils.utils_for_main._save_output_files', side_effect=None) - @patch('mcda.utils.utils_for_main.save_df', side_effect=None) - @patch('mcda.utils.utils_for_plotting.save_figure', side_effect=None) + @patch('ProMCDA.mcda.utils.utils_for_main.check_path_exists', side_effect=None) + @patch('ProMCDA.mcda.utils.utils_for_main.save_config', side_effect=None) + @patch('ProMCDA.mcda.utils.utils_for_main._plot_and_save_charts', side_effect=None) + @patch('ProMCDA.mcda.utils.utils_for_main._save_output_files', side_effect=None) + @patch('ProMCDA.mcda.utils.utils_for_main.save_df', side_effect=None) + @patch('ProMCDA.mcda.utils.utils_for_plotting.save_figure', side_effect=None) def test_main_continue(self, mock_check_path_exists, mock_save_config, mock_plot_and_save_charts, diff --git a/tests/unit_tests/test_mcda_with_robustness.py b/tests/unit_tests/test_mcda_with_robustness.py index c91ec7a..9c34fc3 100644 --- a/tests/unit_tests/test_mcda_with_robustness.py +++ b/tests/unit_tests/test_mcda_with_robustness.py @@ -1,9 +1,13 @@ +import tempfile + import pandas as pd import numpy as np import unittest -from mcda.mcda_with_robustness import MCDAWithRobustness -from mcda.configuration.config import Config +from ProMCDA.mcda import mcda_run +from ProMCDA.mcda.mcda_with_robustness import MCDAWithRobustness +from ProMCDA.mcda.configuration.config import Config +from ProMCDA.models.configuration import Configuration class TestMCDA_with_robustness(unittest.TestCase): @@ -100,7 +104,7 @@ def get_list_random_input_matrices() -> list[pd.DataFrame]: config = TestMCDA_with_robustness.get_test_config() config = Config(config) mcda_with_robustness = MCDAWithRobustness(config, input_matrix, is_exact_pdf_mask, is_poisson_pdf_mask) - output_list = mcda_with_robustness.convert_list(out_list) + output_list = MCDAWithRobustness.convert_list(out_list) return output_list @@ -127,7 +131,17 @@ def test_repeat_series_to_create_df(self): def test_convert_list(self): # Given input_matrix = self.get_input_matrix() + + temp_path = None + with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file: + temp_path = tmp_file.name + + # Step 2: Store the DataFrame to the temporary file + input_matrix.to_csv(temp_path, index=True, columns=input_matrix.columns) + config = TestMCDA_with_robustness.get_test_config() + config["input_matrix_path"] = temp_path + config_based_on_model = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config)) input_list = TestMCDA_with_robustness.get_input_list() expected_output_list = TestMCDA_with_robustness.get_expected_out_list() is_exact_pdf_mask = (1, 0, 0, 0) @@ -135,7 +149,7 @@ def test_convert_list(self): # When config = Config(config) - mcda_with_robustness = MCDAWithRobustness(config, input_matrix, is_exact_pdf_mask, is_poisson_pdf_mask) + mcda_with_robustness = MCDAWithRobustness(config_based_on_model, input_matrix, is_exact_pdf_mask, is_poisson_pdf_mask) output_list = mcda_with_robustness.convert_list(input_list) # Then @@ -148,17 +162,26 @@ def test_convert_list(self): def test_create_n_randomly_sampled_matrices(self): # Given input_matrix = self.get_input_matrix() - input_matrix_rescale = self.get_input_matrix_rescale() + temp_path = None + with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file: + temp_path = tmp_file.name + + # Step 2: Store the DataFrame to the temporary file + input_matrix.to_csv(temp_path, index=True, columns=input_matrix.columns) + config = TestMCDA_with_robustness.get_test_config() + config["input_matrix_path"] = temp_path + input_matrix_rescale = self.get_input_matrix_rescale() + config_based_on_model = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config)) is_exact_pdf_mask = (1, 0, 0, 0) is_poisson_pdf_mask = (0, 0, 0, 1) # When - config = Config(config) - mcda_with_robustness = MCDAWithRobustness(config, input_matrix, + config_based_on_model = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config)) + mcda_with_robustness = MCDAWithRobustness(config_based_on_model, input_matrix, is_exact_pdf_mask, is_poisson_pdf_mask) n_random_matrices = mcda_with_robustness.create_n_randomly_sampled_matrices() - mcda_with_robustness_rescale = MCDAWithRobustness(config, input_matrix_rescale, + mcda_with_robustness_rescale = MCDAWithRobustness(config_based_on_model, input_matrix_rescale, is_exact_pdf_mask, is_poisson_pdf_mask) n_random_matrices_rescale = mcda_with_robustness_rescale.create_n_randomly_sampled_matrices() @@ -166,7 +189,7 @@ def test_create_n_randomly_sampled_matrices(self): # Then assert isinstance(n_random_matrices, list) - assert len(n_random_matrices) == config.monte_carlo_sampling["monte_carlo_runs"] + assert len(n_random_matrices) == config_based_on_model.monte_carlo_sampling.monte_carlo_runs for df1, df2 in zip(n_random_matrices, exp_n_random_matrices): assert df1.shape == (4, 4) assert df1.values.all() == df2.values.all() diff --git a/tests/unit_tests/test_mcda_without_robustness.py b/tests/unit_tests/test_mcda_without_robustness.py index 687ab22..c86494e 100644 --- a/tests/unit_tests/test_mcda_without_robustness.py +++ b/tests/unit_tests/test_mcda_without_robustness.py @@ -1,14 +1,16 @@ import os +import tempfile import unittest import pandas as pd from unittest import TestCase - -from mcda.mcda_without_robustness import MCDAWithoutRobustness -from mcda.configuration.config import Config -from mcda.mcda_functions.aggregation import Aggregation -import mcda.utils.utils_for_main as utils_for_main -import mcda.utils.utils_for_parallelization as utils_for_parallelization +from ProMCDA.mcda import mcda_run +from ProMCDA.mcda.mcda_without_robustness import MCDAWithoutRobustness +from ProMCDA.mcda.configuration.config import Config +from ProMCDA.mcda.mcda_functions.aggregation import Aggregation +import ProMCDA.mcda.utils.utils_for_main as utils_for_main +import ProMCDA.mcda.utils.utils_for_parallelization as utils_for_parallelization +from ProMCDA.models.configuration import Configuration current_directory = os.path.dirname(os.path.abspath(__file__)) resources_directory = os.path.join(current_directory, '..', 'resources') @@ -124,11 +126,19 @@ def get_list_of_df(): def test_normalize_indicators(self): # Given config_general = TestMCDA_without_robustness.get_test_config() - config_general = Config(config_general) input_matrix = TestMCDA_without_robustness.get_input_matrix() + temp_path = None + with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file: + temp_path = tmp_file.name + + # Step 2: Store the DataFrame to the temporary file + input_matrix.to_csv(temp_path, index=True, columns=input_matrix.columns) + config_general["input_matrix_path"] = temp_path + config_general = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config_general)) config_simple_mcda = TestMCDA_without_robustness.get_test_config_simple_mcda() - config_simple_mcda = Config(config_simple_mcda) + config_simple_mcda["input_matrix_path"] = temp_path + config_simple_mcda = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config_simple_mcda)) # When mcda_no_uncert_general = MCDAWithoutRobustness(config_general, input_matrix) @@ -163,25 +173,31 @@ def test_normalize_indicators(self): def test_aggregate_indicators(self): # Given config = TestMCDA_without_robustness.get_test_config() - config = Config(config) input_matrix = TestMCDA_without_robustness.get_input_matrix() - + temp_path = None + with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file: + temp_path = tmp_file.name + + # Step 2: Store the DataFrame to the temporary file + input_matrix.to_csv(temp_path, index=True, columns=input_matrix.columns) + config["input_matrix_path"] = temp_path + config = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config)) config_simple_mcda = TestMCDA_without_robustness.get_test_config_simple_mcda() - config_simple_mcda = Config(config_simple_mcda) + config_simple_mcda["input_matrix_path"] = temp_path + config_simple_mcda = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config_simple_mcda)) # When - weights = config.robustness["given_weights"] + weights = config.robustness.given_weights mcda_no_uncert = MCDAWithoutRobustness(config, input_matrix) normalized_indicators = mcda_no_uncert.normalize_indicators() mcda_no_uncert_simple_mcdaa = MCDAWithoutRobustness(config_simple_mcda, input_matrix) normalized_indicators_simple_mcda = mcda_no_uncert_simple_mcdaa.normalize_indicators( - config_simple_mcda.sensitivity['normalization']) + config_simple_mcda.sensitivity.normalization) res = mcda_no_uncert.aggregate_indicators(normalized_indicators, weights) res_simple_mcda = mcda_no_uncert_simple_mcdaa.aggregate_indicators(normalized_indicators_simple_mcda, weights, - config_simple_mcda.sensitivity[ - 'aggregation']) + config_simple_mcda.sensitivity.aggregation) col_names = ['ws-minmax_01', 'ws-target_01', 'ws-standardized_any', 'ws-rank', 'geom-minmax_without_zero', 'geom-target_without_zero', 'geom-standardized_without_zero', @@ -204,24 +220,34 @@ def test_aggregate_indicators(self): def test_aggregate_indicators_in_parallel(self): # Given config = TestMCDA_without_robustness.get_test_config_randomness() - config = Config(config) input_matrix = TestMCDA_without_robustness.get_input_matrix() - weights = config.robustness["given_weights"] + temp_path = None + with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file: + temp_path = tmp_file.name + + # Step 2: Store the DataFrame to the temporary file + input_matrix.to_csv(temp_path, index=True, columns=input_matrix.columns) + + config["input_matrix_path"] = temp_path + config_based_on_model = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config)) + weights = config_based_on_model.robustness.given_weights agg = Aggregation(weights) config_randomness_simple_mcda = TestMCDA_without_robustness.get_test_config_randomness_simple_mcda() - config_randomness_simple_mcda = Config(config_randomness_simple_mcda) + config_randomness_simple_mcda["input_matrix_path"] = temp_path + config_randomness_simple_mcda = ( + Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config_randomness_simple_mcda))) # When - mcda_no_uncert = MCDAWithoutRobustness(config, input_matrix) + mcda_no_uncert = MCDAWithoutRobustness(config_based_on_model, input_matrix) normalized_indicators = mcda_no_uncert.normalize_indicators() res = utils_for_parallelization.aggregate_indicators_in_parallel(agg, normalized_indicators) - mcda_no_uncert_simple_mcda = MCDAWithoutRobustness(config_randomness_simple_mcda, input_matrix) + mcda_no_uncert_simple_mcda = MCDAWithoutRobustness(config_based_on_model, input_matrix) normalized_indicators = mcda_no_uncert_simple_mcda.normalize_indicators( - config_randomness_simple_mcda.sensitivity['normalization']) + config_randomness_simple_mcda.sensitivity.normalization) res_simple_mcda = utils_for_parallelization.aggregate_indicators_in_parallel(agg, normalized_indicators, - config_randomness_simple_mcda.sensitivity['aggregation']) + config_randomness_simple_mcda.sensitivity.aggregation) col_names = ['ws-minmax_01', 'ws-target_01', 'ws-standardized_any', 'ws-rank', 'geom-minmax_without_zero', 'geom-target_without_zero', 'geom-standardized_without_zero', diff --git a/tests/unit_tests/test_normalization.py b/tests/unit_tests/test_normalization.py index 8224415..7df11e0 100644 --- a/tests/unit_tests/test_normalization.py +++ b/tests/unit_tests/test_normalization.py @@ -4,8 +4,8 @@ from pandas.testing import assert_frame_equal from numpy.testing import assert_almost_equal -from mcda.utils.utils_for_main import * -from mcda.mcda_functions.normalization import Normalization +from ProMCDA.mcda.utils.utils_for_main import * +from ProMCDA.mcda.mcda_functions.normalization import Normalization current_directory = os.path.dirname(os.path.abspath(__file__)) resources_directory = os.path.join(current_directory, '..', 'resources') diff --git a/tests/unit_tests/test_utils_for_main.py b/tests/unit_tests/test_utils_for_main.py index 0d665dc..6452935 100644 --- a/tests/unit_tests/test_utils_for_main.py +++ b/tests/unit_tests/test_utils_for_main.py @@ -1,9 +1,11 @@ +import tempfile import unittest from unittest import TestCase from unittest.mock import patch -from mcda.utils.utils_for_main import * -from mcda.utils.utils_for_main import _check_and_rescale_negative_indicators +from ProMCDA.mcda import mcda_run +from ProMCDA.mcda.utils.utils_for_main import * +from ProMCDA.mcda.utils.utils_for_main import _check_and_rescale_negative_indicators class TestUtils(unittest.TestCase): @@ -137,12 +139,34 @@ def test_pop_indexed_elements(self): isinstance(out_list, list) TestCase.assertListEqual(self, out_list, expected_list) + def test_print_input_parameters_pdf(self): + # Given + input_matrix_1 = TestUtils.get_input_matrix_1() + input_matrix_2 = TestUtils.get_input_matrix_2() + input_matrix_3 = TestUtils.get_input_matrix_3() + input_matrix_alternatives = TestUtils.get_input_matrix_alternatives() + car_data = read_matrix("/Users/kapil/workspace/ranking-service/ProMCDA/input_files/toy_example/car_data.csv") + pprint(f"input_matrix_1:\n {input_matrix_1.to_json()}") + pprint(f"input_matrix_2:\n {input_matrix_2.to_json()}") + pprint(f"input_matrix_alternatives:\n {input_matrix_alternatives.to_json()}") + pprint(f"car_data:\n {car_data}") + pprint(f"car_data:\n {car_data.to_json()}") + + def test_check_parameters_pdf(self): # Given input_matrix_1 = TestUtils.get_input_matrix_1() input_matrix_2 = TestUtils.get_input_matrix_2() input_matrix_3 = TestUtils.get_input_matrix_3() config = TestUtils.get_test_config() + temp_path = None + with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file: + temp_path = tmp_file.name + + # Step 2: Store the DataFrame to the temporary file + input_matrix_1.to_csv(temp_path, index=True, columns=input_matrix_1.columns) + config["input_matrix_path"] = temp_path + config = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config)) # When are_parameters_correct_1 = check_parameters_pdf(input_matrix_1, config, True) diff --git a/tests/unit_tests/test_utils_for_parallelization.py b/tests/unit_tests/test_utils_for_parallelization.py index 36d1a02..479299d 100644 --- a/tests/unit_tests/test_utils_for_parallelization.py +++ b/tests/unit_tests/test_utils_for_parallelization.py @@ -1,10 +1,12 @@ +import tempfile import unittest from statistics import mean, stdev from pandas.testing import assert_frame_equal -from mcda.mcda_without_robustness import * -from mcda.utils.utils_for_parallelization import * +from ProMCDA.mcda import mcda_run +from ProMCDA.mcda.mcda_without_robustness import * +from ProMCDA.mcda.utils.utils_for_parallelization import * class TestUtilsForParallelization(unittest.TestCase): @@ -74,7 +76,15 @@ def get_output_dict() -> list[dict]: data = {'0': [1, 1, 2, 3], '1': [4, 5, 6, 7], '2': [8, 9, 10, 11], '3': [12, 13, 14, 15], '4': [16, 17, 18, 19]} df = pd.DataFrame(data=data) config = TestUtilsForParallelization.get_test_config() - config = Config(config) + temp_path = None + with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp_file: + temp_path = tmp_file.name + + # Step 2: Store the DataFrame to the temporary file + df.to_csv(temp_path, index=True, columns=df.columns) + config["input_matrix_path"] = temp_path + config = Configuration.from_dict(mcda_run.config_dict_to_configuration_model(config)) + mcda_no_var = MCDAWithoutRobustness(config, df) df_norm = mcda_no_var.normalize_indicators()