From 9e89de8c8eee0dc78e93941ce96f7e1c42801111 Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 13:02:15 +0200 Subject: [PATCH 1/8] References and modules rename --- stress_test/{nb_generator.py => nbemu.py} | 32 ++--- ...ator_exceptions.py => nbemu_exceptions.py} | 0 stress_test/nstat_orchestrator.py | 2 +- stress_test/{emulator.py => sbemu.py} | 112 +++++++++--------- ...ator_exceptions.py => sbemu_exceptions.py} | 0 stress_test/test_run.py | 8 +- 6 files changed, 77 insertions(+), 77 deletions(-) rename stress_test/{nb_generator.py => nbemu.py} (91%) rename stress_test/{nb_generator_exceptions.py => nbemu_exceptions.py} (100%) rename stress_test/{emulator.py => sbemu.py} (90%) rename stress_test/{emulator_exceptions.py => sbemu_exceptions.py} (100%) diff --git a/stress_test/nb_generator.py b/stress_test/nbemu.py similarity index 91% rename from stress_test/nb_generator.py rename to stress_test/nbemu.py index 02d3e7f3..bad4149c 100644 --- a/stress_test/nb_generator.py +++ b/stress_test/nbemu.py @@ -10,7 +10,7 @@ import gevent import logging import os -import stress_test.nb_generator_exceptions +import stress_test.nbemu_exceptions import sys import time import traceback @@ -100,7 +100,7 @@ def _error_handling(self, error_message, error_num=1): format(exc_obj, self.name, exc_type, exc_tb.tb_lineno)) if self.traceback_enabled: traceback.print_exc() - raise(stress_test.nb_generator_exceptions.NBGenError) + raise(stress_test.nbemu_exceptions.NBGenError) def init_ssh(self): """ @@ -127,8 +127,8 @@ def init_ssh(self): self.ip, int(self.ssh_port), self.ssh_user, self.ssh_pass, 10) except: - raise(stress_test.nb_generator_exceptions.NBGenNodeConnectionError) - except stress_test.nb_generator_exceptions.NBGenError as e: + raise(stress_test.nbemu_exceptions.NBGenNodeConnectionError) + except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) def build(self): @@ -161,17 +161,17 @@ def build(self): logging.info("[NB_generator] Successful building") else: self.status = 'NOT_BUILT' - raise(stress_test.nb_generator_exceptions.NBGenBuildError( + raise(stress_test.nbemu_exceptions.NBGenBuildError( '[NB_generator] Failure during running. Build handler ' 'exited with no zero exit status. \n ' 'Handler output: {0}'.format(cmd_output), exit_status)) - except stress_test.nb_generator_exceptions.NBGenError as e: + except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.nb_generator_exceptions.NBGenBuildError( + raise(stress_test.nbemu_exceptions.NBGenBuildError( '[NB_generator] Build handler was not executed at all. ' 'Failure running the handler.')) - except stress_test.nb_generator_exceptions.NBGenError as e: + except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) def clean(self): @@ -204,17 +204,17 @@ def clean(self): logging.info("[NB_generator] Successful clean") else: self.status = 'NOT_CLEANED' - raise(stress_test.nb_generator_exceptions.NBGenCleanError( + raise(stress_test.nbemu_exceptions.NBGenCleanError( '[NB_generator] Failure during running. Clean handler ' 'exited with no zero exit status. \n ' 'Handler output: {0}'.format(cmd_output), exit_status)) - except stress_test.nb_generator_exceptions.NBGenError as e: + except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.nb_generator_exceptions.NBGenCleanError( + raise(stress_test.nbemu_exceptions.NBGenCleanError( '[NB_generator] Clean handler was not executed at all. ' 'Failure running the handler.')) - except stress_test.nb_generator_exceptions.NBGenError as e: + except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) def run(self): @@ -265,15 +265,15 @@ def run(self): logging.info("[NB_generator] up and running") else: self.status = 'NB_GEN_NOT_RUNNING' - raise(stress_test.nb_generator_exceptions.NBGenRunError( + raise(stress_test.nbemu_exceptions.NBGenRunError( '[NB_generator] Failure during running. {0}'. format(cmd_output), exit_status)) return cmd_output - except stress_test.nb_generator_exceptions.NBGenError as e: + except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.nb_generator_exceptions.NBGenRunError) - except stress_test.nb_generator_exceptions.NBGenError as e: + raise(stress_test.nbemu_exceptions.NBGenRunError) + except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) def __del__(self): diff --git a/stress_test/nb_generator_exceptions.py b/stress_test/nbemu_exceptions.py similarity index 100% rename from stress_test/nb_generator_exceptions.py rename to stress_test/nbemu_exceptions.py diff --git a/stress_test/nstat_orchestrator.py b/stress_test/nstat_orchestrator.py index 4063f772..2201f242 100644 --- a/stress_test/nstat_orchestrator.py +++ b/stress_test/nstat_orchestrator.py @@ -13,7 +13,7 @@ import argparse import json import stress_test.controller -import stress_test.emulator +import stress_test.sbemu import stress_test.test_type def main(): diff --git a/stress_test/emulator.py b/stress_test/sbemu.py similarity index 90% rename from stress_test/emulator.py rename to stress_test/sbemu.py index f1ef3370..4f4590c5 100644 --- a/stress_test/emulator.py +++ b/stress_test/sbemu.py @@ -12,7 +12,7 @@ import logging import os import re -import stress_test.emulator_exceptions +import stress_test.sbemu_exceptions import sys import time import traceback @@ -94,7 +94,7 @@ def _error_handling(self, error_message, error_num=1): format(exc_obj, self.name, exc_type, exc_tb.tb_lineno)) if self.traceback_enabled: traceback.print_exc() - raise(stress_test.emulator_exceptions.SBEmuError) + raise(stress_test.sbemu_exceptions.SBEmuError) def init_ssh(self): """ @@ -121,8 +121,8 @@ def init_ssh(self): self.ip, int(self.ssh_port), self.ssh_user, self.ssh_pass, 10) except: - raise(stress_test.emulator_exceptions.SBEmuNodeConnectionError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.SBEmuNodeConnectionError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def build(self): @@ -155,14 +155,14 @@ def build(self): logging.info("[SB-Emulator] Successful building") else: self.status = 'NOT_BUILT' - raise(stress_test.emulator_exceptions.SBEmuBuildError( + raise(stress_test.sbemu_exceptions.SBEmuBuildError( '[SB-Emulator] Failure during building: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.SBEmuBuildError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.SBEmuBuildError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def clean(self): @@ -195,15 +195,15 @@ def clean(self): logging.info("[SB-Emulator] Successful clean") else: self.status = 'NOT_CLEANED' - raise(stress_test.emulator_exceptions. + raise(stress_test.sbemu_exceptions. SBEmuCleanupError( '[SB-Emulator] Failure during cleaning: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.SBEmuCleanupError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.SBEmuCleanupError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def __del__(self): @@ -337,14 +337,14 @@ def run(self, ctrl_ip, ctrl_sb_port, prefix='[MTCBench.run_handler]', logging.info('{0} Successful started'.format(prefix)) else: self.status = 'NOT_STARTED' - raise(stress_test.emulator_exceptions.MTCbenchRunError( + raise(stress_test.sbemu_exceptions.MTCbenchRunError( '{0} Failure during starting: {1}'. format(prefix, cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MTCbenchRunError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MTCbenchRunError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) @@ -483,15 +483,15 @@ def __generate_config(self, cntrl_of_port, cntrl_ip): 'w') as config_json_file: json.dump(config_data, config_json_file) if not util.file_ops.file_exists(self.__multinet_config_file_local_path): - raise(stress_test.emulator_exceptions.MultinetConfGenerateError( + raise(stress_test.sbemu_exceptions.MultinetConfGenerateError( '[Multinet] Config local file has not been created', 2)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: raise( - stress_test.emulator_exceptions.MultinetConfGenerateError) - except stress_test.emulator_exceptions.SBEmuError as e: + stress_test.sbemu_exceptions.MultinetConfGenerateError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def __parse_output(self, multinet_handler_name, multinet_output): @@ -517,7 +517,7 @@ def __parse_output(self, multinet_handler_name, multinet_output): r'INFO:root:\[{0}\]\[response data\].*'. format(multinet_handler_name), multinet_output) if regex_result is None: - raise(stress_test.emulator_exceptions.MultinetOutputParsingError( + raise(stress_test.sbemu_exceptions.MultinetOutputParsingError( 'Failed to get results from {0} multinet handler.'. format(multinet_handler_name), 2)) else: @@ -527,12 +527,12 @@ def __parse_output(self, multinet_handler_name, multinet_output): multinet_result = \ sum([list(json.loads(v).values())[0] for v in json.loads(json_result)]) return multinet_result - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: raise( - stress_test.emulator_exceptions.MultinetOutputParsingError) - except stress_test.emulator_exceptions.SBEmuError as e: + stress_test.sbemu_exceptions.MultinetOutputParsingError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def deploy(self, cntrl_ip, cntrl_of_port): @@ -577,14 +577,14 @@ def deploy(self, cntrl_ip, cntrl_of_port): logging.info('[Multinet] Successful deployed') else: self.status = 'NOT_DEPLOYED' - raise(stress_test.emulator_exceptions.MultinetDeployError( + raise(stress_test.sbemu_exceptions.MultinetDeployError( '[Multinet] Failure during deploying: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetDeployError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetDeployError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def get_switches(self, new_ssh_conn=None): @@ -631,14 +631,14 @@ def get_switches(self, new_ssh_conn=None): cmd_output) else: self.status = 'NOT_GOT_SWITCHES' - raise(stress_test.emulator_exceptions.MultinetGetSwitchesError( + raise(stress_test.sbemu_exceptions.MultinetGetSwitchesError( '[Multinet] Failure during getting switches: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetGetSwitchesError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetGetSwitchesError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def get_flows(self, new_ssh_conn=None): @@ -686,14 +686,14 @@ def get_flows(self, new_ssh_conn=None): cmd_output) else: self.status = 'NOT_GOT_FLOWS' - raise(stress_test.emulator_exceptions.MultinetGetFlowsError( + raise(stress_test.sbemu_exceptions.MultinetGetFlowsError( '[Multinet] Failure during getting flows: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetGetFlowsError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetGetFlowsError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def init_topos(self): @@ -729,14 +729,14 @@ def init_topos(self): 'of Mininet topos') else: self.status = 'TOPOS_NOT_INITIALIZED' - raise(stress_test.emulator_exceptions.MultinetInitToposError( + raise(stress_test.sbemu_exceptions.MultinetInitToposError( '[Multinet] Failure during topos initialization: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetInitToposError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetInitToposError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def start_topos(self): @@ -772,14 +772,14 @@ def start_topos(self): 'of Mininet topos') else: self.status = 'TOPOS_NOT_STARTED' - raise(stress_test.emulator_exceptions.MultinetStartToposError( + raise(stress_test.sbemu_exceptions.MultinetStartToposError( '[Multinet] Failure during the starting of topos: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetStartToposError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetStartToposError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def stop_topos(self): @@ -814,14 +814,14 @@ def stop_topos(self): logging.info('[Multinet] Successful stop of Mininet topos') else: self.status = 'TOPOS_NOT_STOPPED' - raise(stress_test.emulator_exceptions.MultinetStopToposError( + raise(stress_test.sbemu_exceptions.MultinetStopToposError( '[Multinet] Failure during the stopping of topos'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetStopToposError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetStopToposError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def cleanup(self): @@ -857,14 +857,14 @@ def cleanup(self): 'topos') else: self.status = 'TOPOS_NOT_CLEANED' - raise(stress_test.emulator_exceptions.MultinetCleanupError( + raise(stress_test.sbemu_exceptions.MultinetCleanupError( '[Multinet] Failure during the cleanup of topos: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetCleanupError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetCleanupError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def generate_traffic(self): @@ -900,16 +900,16 @@ def generate_traffic(self): 'from switches') else: self.status = 'TRAFFIC_DOWN' - raise(stress_test.emulator_exceptions. + raise(stress_test.sbemu_exceptions. MultinetTraffigGenError( '[Multinet] Failure during traffic generation ' 'from switches: {0}'. format(cmd_output), exit_status)) - except stress_test.emulator_exceptions.SBEmuError as e: + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) except: - raise(stress_test.emulator_exceptions.MultinetTraffigGenError) - except stress_test.emulator_exceptions.SBEmuError as e: + raise(stress_test.sbemu_exceptions.MultinetTraffigGenError) + except stress_test.sbemu_exceptions.SBEmuError as e: self._error_handling(e.err_msg, e.err_code) def __del__(self): diff --git a/stress_test/emulator_exceptions.py b/stress_test/sbemu_exceptions.py similarity index 100% rename from stress_test/emulator_exceptions.py rename to stress_test/sbemu_exceptions.py diff --git a/stress_test/test_run.py b/stress_test/test_run.py index e86e9996..8cf3f46c 100644 --- a/stress_test/test_run.py +++ b/stress_test/test_run.py @@ -12,9 +12,9 @@ import logging import stress_test.controller import stress_test.controller_exceptions -import stress_test.emulator +import stress_test.sbemu import stress_test.monitor -import stress_test.nb_generator +import stress_test.nbemu import stress_test.oftraf import sys import time @@ -47,7 +47,7 @@ def __init__(self, args, json_conf, test_type): # SB EMULATOR preparation # ---------------------------------------------------------------------- - self.sb_emu = stress_test.emulator.SBEmu.new(args.sb_emu_base_dir, + self.sb_emu = stress_test.sbemu.SBEmu.new(args.sb_emu_base_dir, json_conf) self.sb_emu.init_ssh() self.sb_emu.build() @@ -55,7 +55,7 @@ def __init__(self, args, json_conf, test_type): # NB EMULATOR preparation # ---------------------------------------------------------------------- if 'nb_emulator_name' in json_conf: - self.nb_emu = stress_test.nb_generator.NBgen( + self.nb_emu = stress_test.nbemu.NBgen( args.nb_emu_base_dir, json_conf, self.ctrl, From 2188484ae6e25c4f0d3433b4c2713e9b416f6c21 Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 13:17:13 +0200 Subject: [PATCH 2/8] Folder rename --- emulators/{nb_generator => nbemu}/build.sh | 0 emulators/{nb_generator => nbemu}/clean.sh | 0 stress_test/monitor.py | 34 +++++++-------- stress_test/nbemu.py | 51 +++++++++++----------- stress_test/oftraf.py | 8 ++-- 5 files changed, 46 insertions(+), 47 deletions(-) rename emulators/{nb_generator => nbemu}/build.sh (100%) rename emulators/{nb_generator => nbemu}/clean.sh (100%) diff --git a/emulators/nb_generator/build.sh b/emulators/nbemu/build.sh similarity index 100% rename from emulators/nb_generator/build.sh rename to emulators/nbemu/build.sh diff --git a/emulators/nb_generator/clean.sh b/emulators/nbemu/clean.sh similarity index 100% rename from emulators/nb_generator/clean.sh rename to emulators/nbemu/clean.sh diff --git a/stress_test/monitor.py b/stress_test/monitor.py index 818424ab..820168f6 100644 --- a/stress_test/monitor.py +++ b/stress_test/monitor.py @@ -742,20 +742,20 @@ def __poll_flows_ds(self, t_start, expected_flows): while True: if (time.time() - t_discovery_start) > \ self.nbgen.flows_ds_discovery_deadline: - logging.info('[NB_generator] [Poll_flows thread] Deadline of ' + logging.info('[NB_emulator] [Poll_flows thread] Deadline of ' '{0} seconds passed' .format(self.nbgen.flows_ds_discovery_deadline)) self.nbgen.e2e_installation_time = -1.0 self.nbgen_queue.put({'end_to_end_flows_operation_time': -1.0}, block=True) - logging.info('[NB_generator] [Poll_flows thread] End to End ' + logging.info('[NB_emulator] [Poll_flows thread] End to End ' 'installation time monitor FAILED') return else: new_ssh = self.controller.init_ssh() oper_ds_found_flows = self.controller.get_oper_flows(new_ssh) - logging.debug('[NB_generator] [Poll_flows thread] Found {0}' + logging.debug('[NB_emulator] [Poll_flows thread] Found {0}' ' flows at inventory'. format(oper_ds_found_flows)) if (oper_ds_found_flows - previous_discovered_flows) != 0: @@ -763,14 +763,14 @@ def __poll_flows_ds(self, t_start, expected_flows): previous_discovered_flows = oper_ds_found_flows if oper_ds_found_flows == expected_flows: time_interval = time.time() - t_start - logging.debug('[NB_generator] [Poll_flows thread] ' + logging.debug('[NB_emulator] [Poll_flows thread] ' 'Flow-Master {0} flows found in {1} seconds' .format(expected_flows, time_interval)) self.nbgen.e2e_installation_time = time_interval self.nbgen_queue.put( {'end_to_end_flows_operation_time': time_interval}, block=True) - logging.info('[NB_generator] [Poll_flows thread] ' + logging.info('[NB_emulator] [Poll_flows thread] ' 'End to End installation time is: {0}' .format(self.nbgen.e2e_installation_time)) return @@ -795,19 +795,19 @@ def __poll_flows_ds_confirm(self, expected_flows): while True: if (time.time() - t_discovery_start) > \ self.nbgen.flows_ds_discovery_deadline: - logging.info('[NB_generator] [Poll_flows_confirm thread] ' + logging.info('[NB_emulator] [Poll_flows_confirm thread] ' ' Deadline of {0} seconds passed' .format(self.flows_ds_discovery_deadline)) self.nbgen.confirm_time = -1.0 self.nbgen_queue.put({'confirm_time': -1.0}, block=True) - logging.info('[NB_generator] [Poll_flows_confirm thread] ' + logging.info('[NB_emulator] [Poll_flows_confirm thread] ' 'Confirmation time monitoring FAILED') return else: new_ssh = self.controller.init_ssh() oper_ds_found_flows = self.controller.get_oper_flows(new_ssh) - logging.debug('[NB_generator] [Poll_flows_confirm thread] ' + logging.debug('[NB_emulator] [Poll_flows_confirm thread] ' 'Found {0} flows at inventory' .format(oper_ds_found_flows)) if (oper_ds_found_flows - previous_discovered_flows) != 0: @@ -815,14 +815,14 @@ def __poll_flows_ds_confirm(self, expected_flows): previous_discovered_flows = oper_ds_found_flows if oper_ds_found_flows == expected_flows: time_interval = time.time() - t_start - logging.debug('[NB_generator] [Poll_flows_confirm thread] ' + logging.debug('[NB_emulator] [Poll_flows_confirm thread] ' 'Flow-Master {0} flows found in {1} seconds' .format(expected_flows, time_interval)) self.nbgen.confirm_time = time_interval self.nbgen_queue.put({'confirm_time': time_interval}, block=True) - logging.info('[NB_generator] [Poll_flows_confirm thread] ' + logging.info('[NB_emulator] [Poll_flows_confirm thread] ' 'Confirmation time is: {0}' .format(self.nbgen.confirm_time)) @@ -851,20 +851,20 @@ def __poll_flows_switches(self, t_start, expected_flows): while True: if (time.time() - t_discovery_start) > \ self.nbgen.flows_ds_discovery_deadline: - logging.info('[NB_generator] [Poll_flows_switches thread] ' + logging.info('[NB_emulator] [Poll_flows_switches thread] ' 'Deadline of {0} seconds passed' .format(self.flows_ds_discovery_deadline)) self.nbgen.discover_flows_on_switches_time = -1.0 self.nbgen_queue.put({'switch_operation_time': -1.0}, block=True) - logging.info('[NB_generator] [Poll_flows_switches thread] ' + logging.info('[NB_emulator] [Poll_flows_switches thread] ' 'Discovering flows on switches FAILED') return else: new_ssh = self.sbemu.init_ssh() discovered_flows = self.sbemu.get_flows(new_ssh) - logging.debug('[NB_generator] [Poll_flows_switches thread] ' + logging.debug('[NB_emulator] [Poll_flows_switches thread] ' 'Found {0} flows at topology switches' .format(discovered_flows)) if (discovered_flows - previous_discovered_flows) != 0: @@ -872,14 +872,14 @@ def __poll_flows_switches(self, t_start, expected_flows): previous_discovered_flows = discovered_flows if discovered_flows == expected_flows: time_interval = time.time() - t_start - logging.debug('[NB_generator] [Poll_flows_switches thread]' + logging.debug('[NB_emulator] [Poll_flows_switches thread]' ' expected flows = {0} \n ' 'discovered flows = {1}' .format(expected_flows, discovered_flows)) self.discover_flows_on_switches_time = time_interval self.nbgen_queue.put( {'switch_operation_time': time_interval}, block=True) - logging.info('[NB_generator] [Poll_flows_switches thread] ' + logging.info('[NB_emulator] [Poll_flows_switches thread] ' 'Time to discover flows on switches is: {0}' .format(self.nbgen. discover_flows_on_switches_time)) @@ -924,7 +924,7 @@ def monitor_threads_run(self, t_start, total_failed_flows, :type expected_flows: int :type flow_delete_flag: boolean """ - logging.info('[NB_generator] Start polling measurements') + logging.info('[NB_emulator] Start polling measurements') monitor_ds = gevent.spawn(self.__poll_flows_ds, t_start, expected_flows) @@ -940,7 +940,7 @@ def monitor_threads_run(self, t_start, total_failed_flows, controller_time = self.__controller_time(t_start) discovered_flows = self.sbemu.get_flows() flow_measurement_latency_interval = time.time() - time_start - logging.info('[NB_generator] Flows measurement latency ' + logging.info('[NB_emulator] Flows measurement latency ' 'interval: {0} sec. | Discovered flows: {1}' .format(flow_measurement_latency_interval, discovered_flows)) diff --git a/stress_test/nbemu.py b/stress_test/nbemu.py index bad4149c..6c1d87dc 100644 --- a/stress_test/nbemu.py +++ b/stress_test/nbemu.py @@ -6,7 +6,6 @@ """ NB-Generator Class- All NB-Generator-related functionality is here""" -# import emulators.nb_generator import gevent import logging import os @@ -81,14 +80,14 @@ def __init__(self, nb_gen_base_dir, test_config, controller, sbemu, def _error_handling(self, error_message, error_num=1): """ - Handles custom errors of nb_generator + Handles custom errors of nb_emulator :param error_message: message of the handled error :param error_num: error number of the handled error, used to define subcases of raised errors. :type error_message: str :type error_num: int - :raises nb_generator_exceptions.NBGenError: to terminate execution of + :raises nb_emulator_exceptions.NBGenError: to terminate execution of test after error handling """ exc_type, exc_obj, exc_tb = sys.exc_info() @@ -104,12 +103,12 @@ def _error_handling(self, error_message, error_num=1): def init_ssh(self): """ - Initializes a new SSH client object, with the nb_generator node and \ + Initializes a new SSH client object, with the nb_emulator node and \ assigns it to the protected attribute _ssh_conn. If a connection \ already exists it returns a new SSH client object to the \ controller node. - :raises nb_generator_exceptions.NBGenNodeConnectionError: if ssh \ + :raises nb_emulator_exceptions.NBGenNodeConnectionError: if ssh \ connection establishment fails """ logging.info( @@ -136,9 +135,9 @@ def build(self): Wrapper to the NB-Generator build handler :raises IOError: if the handler does not exist on the remote host - :raises nb_generator_exceptions.NBGenBuildError: if build process fails + :raises nb_emulator_exceptions.NBGenBuildError: if build process fails """ - logging.info('[NB_generator] Building') + logging.info('[NB_emulator] Building') self.status = 'BUILDING' try: try: @@ -148,28 +147,28 @@ def build(self): self.status = 'NOT_BUILT' raise(IOError( '{0} build handler does not exist'. - format('[nb_generator.build]'))) + format('[nb_emulator.build]'))) else: util.netutil.make_remote_file_executable( self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, self.build_hnd) exit_status, cmd_output = util.netutil.ssh_run_command( self._ssh_conn, ' '.join([self.build_hnd]), - '[NB_generator.build_handler]') + '[NB_emulator.build_handler]') if exit_status == 0: self.status = 'BUILT' - logging.info("[NB_generator] Successful building") + logging.info("[NB_emulator] Successful building") else: self.status = 'NOT_BUILT' raise(stress_test.nbemu_exceptions.NBGenBuildError( - '[NB_generator] Failure during running. Build handler ' + '[NB_emulator] Failure during running. Build handler ' 'exited with no zero exit status. \n ' 'Handler output: {0}'.format(cmd_output), exit_status)) except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) except: raise(stress_test.nbemu_exceptions.NBGenBuildError( - '[NB_generator] Build handler was not executed at all. ' + '[NB_emulator] Build handler was not executed at all. ' 'Failure running the handler.')) except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) @@ -179,9 +178,9 @@ def clean(self): Wrapper to the NB-Generator clean handler :raises IOError: if the handler does not exist on the remote host - :raises nb_generator_exceptions.NBGenCleanError: if clean process fails + :raises nb_emulator_exceptions.NBGenCleanError: if clean process fails """ - logging.info('[NB_generator] Cleaning') + logging.info('[NB_emulator] Cleaning') self.status = 'CLEANING' try: try: @@ -191,28 +190,28 @@ def clean(self): self.status = 'NOT_CLEANED' raise(IOError( '{0} clean handler does not exist'. - format('[nb_generator.clean]'))) + format('[nb_emulator.clean]'))) else: util.netutil.make_remote_file_executable( self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, self.clean_hnd) exit_status, cmd_output = util.netutil.ssh_run_command( self._ssh_conn, self.clean_hnd, - '[NB_generator.clean_handler]') + '[NB_emulator.clean_handler]') if exit_status == 0: self.status = 'CLEANED' - logging.info("[NB_generator] Successful clean") + logging.info("[NB_emulator] Successful clean") else: self.status = 'NOT_CLEANED' raise(stress_test.nbemu_exceptions.NBGenCleanError( - '[NB_generator] Failure during running. Clean handler ' + '[NB_emulator] Failure during running. Clean handler ' 'exited with no zero exit status. \n ' 'Handler output: {0}'.format(cmd_output), exit_status)) except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) except: raise(stress_test.nbemu_exceptions.NBGenCleanError( - '[NB_generator] Clean handler was not executed at all. ' + '[NB_emulator] Clean handler was not executed at all. ' 'Failure running the handler.')) except stress_test.nbemu_exceptions.NBGenError as e: self._error_handling(e.err_msg, e.err_code) @@ -224,10 +223,10 @@ def run(self): :returns: Returns the combined stdout - stderr of the executed command :rtype: str :raises IOError: if the handler does not exist on the remote host - :raises nb_generator_exceptions.NBGenRunError: if running \ - nb_generator fails + :raises nb_emulator_exceptions.NBGenRunError: if running \ + nb_emulator fails """ - logging.info("[NB_generator] Run handler") + logging.info("[NB_emulator] Run handler") self.status = 'STARTED' try: try: @@ -237,7 +236,7 @@ def run(self): self.status = 'NB_GEN_NOT_RUNNING' raise(IOError( '{0} run handler does not exist'. - format('[nb_generator.run]'))) + format('[nb_emulator.run]'))) else: util.netutil.make_remote_file_executable( self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, @@ -258,15 +257,15 @@ def run(self): str(self.controller.restconf_pass), str(self.flows_per_request), str(self.log_level)]), - '[NB_generator] run_handler') + '[NB_emulator] run_handler') if exit_status == 0: self.status = 'NB_GEN_RUNNING' - logging.info("[NB_generator] up and running") + logging.info("[NB_emulator] up and running") else: self.status = 'NB_GEN_NOT_RUNNING' raise(stress_test.nbemu_exceptions.NBGenRunError( - '[NB_generator] Failure during running. {0}'. + '[NB_emulator] Failure during running. {0}'. format(cmd_output), exit_status)) return cmd_output except stress_test.nbemu_exceptions.NBGenError as e: diff --git a/stress_test/oftraf.py b/stress_test/oftraf.py index 3b66622f..e7590b28 100644 --- a/stress_test/oftraf.py +++ b/stress_test/oftraf.py @@ -108,7 +108,7 @@ def build(self): self.status = 'NOT_BUILT' raise(IOError( '{0} build handler does not exist'. - format('[nb_generator.build]'))) + format('[oftraf.build]'))) else: util.netutil.make_remote_file_executable( self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, @@ -119,7 +119,7 @@ def build(self): '[oftraf.build_handler]') if exit_status == 0: self.status = 'BUILT' - logging.info("[Oftraf] Successful building") + logging.info("[OFTraf] Successful building") else: self.status = 'NOT_BUILT' raise(stress_test.oftraf_exceptions.OftrafBuildError( @@ -152,7 +152,7 @@ def clean(self): self.status = 'NOT_CLEANED' raise(IOError( '{0} clean handler does not exist'. - format('[nb_generator.build]'))) + format('[oftraf.build]'))) else: util.netutil.make_remote_file_executable( self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, @@ -198,7 +198,7 @@ def start(self): self.status = 'NOT_STARTED' raise(IOError( '{0} start handler does not exist'. - format('[nb_generator.build]'))) + format('[oftraf.build]'))) else: util.netutil.make_remote_file_executable( self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, From d55bc72ffc0410de6c1901874350449a2c768331 Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 13:49:29 +0200 Subject: [PATCH 3/8] New docs added --- docs/Makefile | 8 ++++---- docs/conf.rst | 7 +++++++ docs/index.rst | 7 ++++--- docs/make.bat | 4 ++-- docs/stress_test.rst | 44 +++++++++++++++++++++--------------------- stress_test/nbemu.py | 16 --------------- stress_test/oftraf.py | 19 +----------------- stress_test/sbemu.py | 45 ------------------------------------------- 8 files changed, 40 insertions(+), 110 deletions(-) create mode 100644 docs/conf.rst diff --git a/docs/Makefile b/docs/Makefile index 43c39ddb..af169ee5 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -93,9 +93,9 @@ qthelp: @echo @echo "Build finished; now you can run "qcollectiongenerator" with the" \ ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nstatsrc.qhcp" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/nstat_src.qhcp" @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nstatsrc.qhc" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/nstat_src.qhc" .PHONY: applehelp applehelp: @@ -112,8 +112,8 @@ devhelp: @echo @echo "Build finished." @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/nstatsrc" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/nstatsrc" + @echo "# mkdir -p $$HOME/.local/share/devhelp/nstat_src" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/nstat_src" @echo "# devhelp" .PHONY: epub diff --git a/docs/conf.rst b/docs/conf.rst new file mode 100644 index 00000000..b852516d --- /dev/null +++ b/docs/conf.rst @@ -0,0 +1,7 @@ +conf module +=========== + +.. automodule:: conf + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/index.rst b/docs/index.rst index 647d430e..dd945363 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,15 +1,16 @@ -.. nstat.src documentation master file, created by - sphinx-quickstart on Sun Jan 29 11:26:04 2017. +.. nstat_src documentation master file, created by + sphinx-quickstart on Wed Feb 15 13:38:32 2017. You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to NSTAT's documentation! +Welcome to nstat_src's documentation! ===================================== .. toctree:: :maxdepth: 4 :caption: Contents: + conf stress_test util diff --git a/docs/make.bat b/docs/make.bat index ade9c1af..cfb7ebc5 100644 --- a/docs/make.bat +++ b/docs/make.bat @@ -131,9 +131,9 @@ if "%1" == "qthelp" ( echo. echo.Build finished; now you can run "qcollectiongenerator" with the ^ .qhcp project file in %BUILDDIR%/qthelp, like this: - echo.^> qcollectiongenerator %BUILDDIR%\qthelp\nstatsrc.qhcp + echo.^> qcollectiongenerator %BUILDDIR%\qthelp\nstat_src.qhcp echo.To view the help file: - echo.^> assistant -collectionFile %BUILDDIR%\qthelp\nstatsrc.ghc + echo.^> assistant -collectionFile %BUILDDIR%\qthelp\nstat_src.ghc goto end ) diff --git a/docs/stress_test.rst b/docs/stress_test.rst index fc49f5a6..f26dafb8 100644 --- a/docs/stress_test.rst +++ b/docs/stress_test.rst @@ -20,22 +20,6 @@ stress_test.controller_exceptions module :undoc-members: :show-inheritance: -stress_test.emulator module ---------------------------- - -.. automodule:: stress_test.emulator - :members: - :undoc-members: - :show-inheritance: - -stress_test.emulator_exceptions module --------------------------------------- - -.. automodule:: stress_test.emulator_exceptions - :members: - :undoc-members: - :show-inheritance: - stress_test.html_generation module ---------------------------------- @@ -52,18 +36,18 @@ stress_test.monitor module :undoc-members: :show-inheritance: -stress_test.nb_generator module -------------------------------- +stress_test.nbemu module +------------------------ -.. automodule:: stress_test.nb_generator +.. automodule:: stress_test.nbemu :members: :undoc-members: :show-inheritance: -stress_test.nb_generator_exceptions module ------------------------------------------- +stress_test.nbemu_exceptions module +----------------------------------- -.. automodule:: stress_test.nb_generator_exceptions +.. automodule:: stress_test.nbemu_exceptions :members: :undoc-members: :show-inheritance: @@ -116,6 +100,22 @@ stress_test.report_spec_templates module :undoc-members: :show-inheritance: +stress_test.sbemu module +------------------------ + +.. automodule:: stress_test.sbemu + :members: + :undoc-members: + :show-inheritance: + +stress_test.sbemu_exceptions module +----------------------------------- + +.. automodule:: stress_test.sbemu_exceptions + :members: + :undoc-members: + :show-inheritance: + stress_test.test_run module --------------------------- diff --git a/stress_test/nbemu.py b/stress_test/nbemu.py index 6c1d87dc..3a33c2a7 100644 --- a/stress_test/nbemu.py +++ b/stress_test/nbemu.py @@ -56,10 +56,7 @@ def __init__(self, nb_gen_base_dir, test_config, controller, sbemu, self.get_oper_ds_flows_hnd = ( self.base_dir + test_config['nb_emulator_get_oper_ds_handler']) - - self.status = 'UNKNOWN' self._ssh_conn = None - self.flow_delete_flag = test_config['flow_delete_flag'] self.flows_per_request = test_config['flows_per_request'] self.log_level = log_level @@ -138,13 +135,11 @@ def build(self): :raises nb_emulator_exceptions.NBGenBuildError: if build process fails """ logging.info('[NB_emulator] Building') - self.status = 'BUILDING' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.build_hnd]): - self.status = 'NOT_BUILT' raise(IOError( '{0} build handler does not exist'. format('[nb_emulator.build]'))) @@ -156,10 +151,8 @@ def build(self): self._ssh_conn, ' '.join([self.build_hnd]), '[NB_emulator.build_handler]') if exit_status == 0: - self.status = 'BUILT' logging.info("[NB_emulator] Successful building") else: - self.status = 'NOT_BUILT' raise(stress_test.nbemu_exceptions.NBGenBuildError( '[NB_emulator] Failure during running. Build handler ' 'exited with no zero exit status. \n ' @@ -181,13 +174,11 @@ def clean(self): :raises nb_emulator_exceptions.NBGenCleanError: if clean process fails """ logging.info('[NB_emulator] Cleaning') - self.status = 'CLEANING' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.clean_hnd]): - self.status = 'NOT_CLEANED' raise(IOError( '{0} clean handler does not exist'. format('[nb_emulator.clean]'))) @@ -199,10 +190,8 @@ def clean(self): self._ssh_conn, self.clean_hnd, '[NB_emulator.clean_handler]') if exit_status == 0: - self.status = 'CLEANED' logging.info("[NB_emulator] Successful clean") else: - self.status = 'NOT_CLEANED' raise(stress_test.nbemu_exceptions.NBGenCleanError( '[NB_emulator] Failure during running. Clean handler ' 'exited with no zero exit status. \n ' @@ -227,13 +216,11 @@ def run(self): nb_emulator fails """ logging.info("[NB_emulator] Run handler") - self.status = 'STARTED' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.run_hnd]): - self.status = 'NB_GEN_NOT_RUNNING' raise(IOError( '{0} run handler does not exist'. format('[nb_emulator.run]'))) @@ -258,12 +245,9 @@ def run(self): str(self.flows_per_request), str(self.log_level)]), '[NB_emulator] run_handler') - if exit_status == 0: - self.status = 'NB_GEN_RUNNING' logging.info("[NB_emulator] up and running") else: - self.status = 'NB_GEN_NOT_RUNNING' raise(stress_test.nbemu_exceptions.NBGenRunError( '[NB_emulator] Failure during running. {0}'. format(cmd_output), exit_status)) diff --git a/stress_test/oftraf.py b/stress_test/oftraf.py index e7590b28..8797b2de 100644 --- a/stress_test/oftraf.py +++ b/stress_test/oftraf.py @@ -43,7 +43,6 @@ def __init__(self, controller, test_config): self.ssh_pass = controller.ssh_pass self.of_port = controller.of_port - self.status = 'UNKNOWN' self._ssh_conn = controller.init_ssh() self.traceback_enabled = False @@ -95,7 +94,6 @@ def build(self): :raises oftraf_exceptions.OftrafBuildError: if build process fails """ logging.info('[Oftraf] Building') - self.status = 'BUILDING' try: try: oftraf_path = str(self.get_oftraf_path()) @@ -105,7 +103,6 @@ def build(self): self.ssh_user, self.ssh_pass, [build_hnd]): - self.status = 'NOT_BUILT' raise(IOError( '{0} build handler does not exist'. format('[oftraf.build]'))) @@ -118,10 +115,8 @@ def build(self): ' '.join([build_hnd]), '[oftraf.build_handler]') if exit_status == 0: - self.status = 'BUILT' logging.info("[OFTraf] Successful building") else: - self.status = 'NOT_BUILT' raise(stress_test.oftraf_exceptions.OftrafBuildError( 'Build process exited with non zero exit code. ' 'Command-line output: {0} \n Exit status code: {1}'. @@ -141,7 +136,6 @@ def clean(self): :raises oftraf_exceptions.OftrafCleanError: if clean process fails """ logging.info('[Oftraf] Cleaning') - self.status = 'CLEANING' try: try: oftraf_path = self.get_oftraf_path() @@ -149,7 +143,6 @@ def clean(self): if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [clean_hnd]): - self.status = 'NOT_CLEANED' raise(IOError( '{0} clean handler does not exist'. format('[oftraf.build]'))) @@ -162,10 +155,8 @@ def clean(self): ' '.join([clean_hnd]), '[oftraf.clean_handler]') if exit_status == 0: - self.status = 'CLEANED' logging.info("[Oftraf] Successful cleaning") else: - self.status = 'NOT_CLEANED' raise(stress_test.oftraf_exceptions.OftrafCleanError( 'clean process exited with non zero exit code. ' 'Command-line output: {0} \n Exit status code: {1}'. @@ -187,7 +178,6 @@ def start(self): :raises oftraf_exceptions.OftrafStartError: if start process fails """ logging.info('[Oftraf] Starting') - self.status = 'STARTING' try: try: oftraf_path = self.get_oftraf_path() @@ -195,7 +185,6 @@ def start(self): if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [start_hnd]): - self.status = 'NOT_STARTED' raise(IOError( '{0} start handler does not exist'. format('[oftraf.build]'))) @@ -213,10 +202,8 @@ def start(self): lines_queue=None, print_flag=True, block_flag=True, getpty_flag=True) if exit_status == 0: - self.status = 'STARTED' logging.info("[Oftraf] Successful starting") else: - self.status = 'NOT_STARTED' raise(stress_test.oftraf_exceptions.OftrafStartError( 'Start process exited with non zero exit code. ' 'Command-line output: {0} \n Exit status code: {1}'. @@ -235,7 +222,6 @@ def stop(self): :raises oftraf_exceptions.OftrafStopError: if stop process fails """ logging.info('[Oftraf] Starting') - self.status = 'STOPPING' try: try: oftraf_path = self.get_oftraf_path() @@ -243,10 +229,9 @@ def stop(self): if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [stop_hnd]): - self.status = 'NOT_STOPPED' raise(IOError( '{0} stop handler does not exist'. - format('[nb_generator.build]'))) + format('[oftraf.build]'))) else: util.netutil.make_remote_file_executable( self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, @@ -259,10 +244,8 @@ def stop(self): str(self.rest_server_port)]), '[oftraf.stop_handler]') if exit_status == 0: - self.status = 'STOPED' logging.info("[Oftraf] Successful stopping") else: - self.status = 'NOT_STOPPED' raise(stress_test.oftraf_exceptions.OftrafStopError( 'Stop process exited with non zero exit code. ' 'Command-line output: {0} \n Exit status code: {1}'. diff --git a/stress_test/sbemu.py b/stress_test/sbemu.py index 4f4590c5..86cb97fc 100644 --- a/stress_test/sbemu.py +++ b/stress_test/sbemu.py @@ -45,7 +45,6 @@ def __init__(self, sb_emu_base_dir, test_config): test_config['sb_emulator_build_handler']) self.clean_hnd = (self.base_dir + test_config['sb_emulator_clean_handler']) - self.status = 'UNKNOWN' self._ssh_conn = None util.file_ops.check_filelist([self.build_hnd, self.clean_hnd]) @@ -133,13 +132,11 @@ def build(self): :raises emulator_exceptions.SBEmuBuildError: build fails """ logging.info('[SB-Emulator] Building') - self.status = 'BUILDING' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.build_hnd]): - self.status = 'NOT_BUILT' raise(IOError( '{0} build handler does not exist'. format('[SB-Emulator.build_handler]'))) @@ -151,10 +148,8 @@ def build(self): self._ssh_conn, ' '.join([self.build_hnd]), '[SB-Emulator.build_handler]') if exit_status == 0: - self.status = 'BUILT' logging.info("[SB-Emulator] Successful building") else: - self.status = 'NOT_BUILT' raise(stress_test.sbemu_exceptions.SBEmuBuildError( '[SB-Emulator] Failure during building: {0}'. format(cmd_output), exit_status)) @@ -173,13 +168,11 @@ def clean(self): :raises emulator_exceptions.SBEmuCleanupError: if cleanup process fails """ logging.info('[SB-Emulator] Cleaning') - self.status = 'CLEANING' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.clean_hnd]): - self.status = 'NOT_CLEANED' raise(IOError( '{0} clean handler does not exist'. format('[SB-Emulator.clean_handler]'))) @@ -191,10 +184,8 @@ def clean(self): self._ssh_conn, self.clean_hnd, '[SB-Emulator.clean_handler]') if exit_status == 0: - self.status = 'CLEANED' logging.info("[SB-Emulator] Successful clean") else: - self.status = 'NOT_CLEANED' raise(stress_test.sbemu_exceptions. SBEmuCleanupError( '[SB-Emulator] Failure during cleaning: {0}'. @@ -307,13 +298,11 @@ def run(self, ctrl_ip, ctrl_sb_port, prefix='[MTCBench.run_handler]', error """ logging.info('{0} Starting'.format(prefix)) - self.status = 'STARTING' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.run_hnd]): - self.status = 'NOT_STARTED' raise(IOError( '{0} run handler does not exist'.format(prefix))) else: @@ -333,10 +322,8 @@ def run(self, ctrl_ip, ctrl_sb_port, prefix='[MTCBench.run_handler]', prefix, lines_queue, print_flag, block_flag, getpty_flag) if exit_status == 0: - self.status = 'STARTED' logging.info('{0} Successful started'.format(prefix)) else: - self.status = 'NOT_STARTED' raise(stress_test.sbemu_exceptions.MTCbenchRunError( '{0} Failure during starting: {1}'. format(prefix, cmd_output), exit_status)) @@ -548,7 +535,6 @@ def deploy(self, cntrl_ip, cntrl_of_port): deploy error """ logging.info('[Multinet] Deploy') - self.status = 'DEPLOYING' try: try: self.__generate_config(cntrl_of_port, cntrl_ip) @@ -560,7 +546,6 @@ def deploy(self, cntrl_ip, cntrl_of_port): if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.deploy_hnd]): - self.status = 'NOT_DEPLOYED' raise(IOError( '[Multinet] Deploy handler does not exist')) else: @@ -573,10 +558,8 @@ def deploy(self, cntrl_ip, cntrl_of_port): self.__multinet_config_file_remote_path]), '[Multinet.deploy_handler]') if exit_status == 0: - self.status = 'DEPLOYED' logging.info('[Multinet] Successful deployed') else: - self.status = 'NOT_DEPLOYED' raise(stress_test.sbemu_exceptions.MultinetDeployError( '[Multinet] Failure during deploying: {0}'. format(cmd_output), exit_status)) @@ -600,13 +583,11 @@ def get_switches(self, new_ssh_conn=None): fails to run successfully and return a valid result """ logging.info('[Multinet] get_switches') - self.status = 'GETTING_SWITCHES' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.get_switches_hnd]): - self.status = 'NOT_GOT_SWITCHES' raise(IOError( '[Multinet] Get_switches handler does not exist')) else: @@ -625,12 +606,10 @@ def get_switches(self, new_ssh_conn=None): if new_ssh_conn is not None: used_ssh_conn.close() if exit_status == 0: - self.status = 'GOT_SWITCHES' logging.info("[Multinet] Successful got switches") return self.__parse_output('get_switches_topology_handler', cmd_output) else: - self.status = 'NOT_GOT_SWITCHES' raise(stress_test.sbemu_exceptions.MultinetGetSwitchesError( '[Multinet] Failure during getting switches: {0}'. format(cmd_output), exit_status)) @@ -654,13 +633,11 @@ def get_flows(self, new_ssh_conn=None): run successfully """ logging.info('[Multinet] get_flows') - self.status = 'GETTING_FLOWS' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.get_flows_hnd]): - self.status = 'NOT_GOT_FLOWS' raise(IOError( '[Multinet] Get_flows handler does not exist')) else: @@ -680,12 +657,10 @@ def get_flows(self, new_ssh_conn=None): if new_ssh_conn is not None: used_ssh_conn.close() if exit_status == 0: - self.status = 'GOT_FLOWS' logging.info("[Multinet] Successful got flows") return self.__parse_output('get_flows_topology_handler', cmd_output) else: - self.status = 'NOT_GOT_FLOWS' raise(stress_test.sbemu_exceptions.MultinetGetFlowsError( '[Multinet] Failure during getting flows: {0}'. format(cmd_output), exit_status)) @@ -705,13 +680,11 @@ def init_topos(self): initialization fails """ logging.info('[Multinet] init_topos') - self.status = 'INIT_MININET_TOPOS' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.init_topos_hnd]): - self.status = 'TOPOS_NOT_INITIALIZED' raise(IOError( '[Multinet] Init_topos handler does not exist')) else: @@ -724,11 +697,9 @@ def init_topos(self): self.__multinet_config_file_remote_path]), '[Multinet.init_topos_hnd]') if exit_status == 0: - self.status = 'TOPOS_INITIALIZED' logging.info('[Multinet] Successful initialization ' 'of Mininet topos') else: - self.status = 'TOPOS_NOT_INITIALIZED' raise(stress_test.sbemu_exceptions.MultinetInitToposError( '[Multinet] Failure during topos initialization: {0}'. format(cmd_output), exit_status)) @@ -748,13 +719,11 @@ def start_topos(self): topology handler fails """ logging.info('[Multinet] start_topos') - self.status = 'START_MININET_TOPOS' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.start_topos_hnd]): - self.status = 'TOPOS_NOT_STARTED' raise(IOError( '[Multinet] Start_topos handler does not exist')) else: @@ -767,11 +736,9 @@ def start_topos(self): self.__multinet_config_file_remote_path]), '[Multinet.start_topos_hnd]') if exit_status == 0: - self.status = 'TOPOS_STARTED' logging.info('[Multinet] Successful start ' 'of Mininet topos') else: - self.status = 'TOPOS_NOT_STARTED' raise(stress_test.sbemu_exceptions.MultinetStartToposError( '[Multinet] Failure during the starting of topos: {0}'. format(cmd_output), exit_status)) @@ -791,13 +758,11 @@ def stop_topos(self): handler fails """ logging.info('[Multinet] stop_topos') - self.status = 'STOP_MININET_TOPOS' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.stop_topos_hnd]): - self.status = 'TOPOS_NOT_STOPPED' raise(IOError( '[Multinet] Stop_topos handler does not exist')) else: @@ -810,10 +775,8 @@ def stop_topos(self): self.__multinet_config_file_remote_path]), '[Multinet.stop_topos_hnd]') if exit_status == 0: - self.status = 'TOPOS_STOPPED' logging.info('[Multinet] Successful stop of Mininet topos') else: - self.status = 'TOPOS_NOT_STOPPED' raise(stress_test.sbemu_exceptions.MultinetStopToposError( '[Multinet] Failure during the stopping of topos'. format(cmd_output), exit_status)) @@ -833,13 +796,11 @@ def cleanup(self): handler fails """ logging.info('[Multinet] cleanup') - self.status = 'CLEANUP_MININET' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.cleanup_hnd]): - self.status = 'TOPOS_NOT_CLEANED' raise(IOError( '[Multinet] Cleanup handler does not exist')) else: @@ -852,11 +813,9 @@ def cleanup(self): self.__multinet_config_file_remote_path]), '[Multinet.cleanup_hnd]') if exit_status == 0: - self.status = 'TOPOS_CLEANED' logging.info('[Multinet] Successful cleanup of Mininet ' 'topos') else: - self.status = 'TOPOS_NOT_CLEANED' raise(stress_test.sbemu_exceptions.MultinetCleanupError( '[Multinet] Failure during the cleanup of topos: {0}'. format(cmd_output), exit_status)) @@ -876,13 +835,11 @@ def generate_traffic(self): traffic generator handler fails to run successfully """ logging.info('[Multinet] traffic gen') - self.status = 'CREATE_TRAFFIC' try: try: if not util.netutil.isfile(self.ip, self.ssh_port, self.ssh_user, self.ssh_pass, [self.traffic_gen_hnd]): - self.status = 'TRAFFIC_DOWN' raise(IOError('[Multinet] Traffic_generator handler ' 'does not exist')) else: @@ -895,11 +852,9 @@ def generate_traffic(self): self.__multinet_config_file_remote_path]), '[Multinet.generate_traffic_hnd]') if exit_status == 0: - self.status = 'TRAFFIC_UP' logging.info('[Multinet] Successful traffic generation ' 'from switches') else: - self.status = 'TRAFFIC_DOWN' raise(stress_test.sbemu_exceptions. MultinetTraffigGenError( '[Multinet] Failure during traffic generation ' From 106970cfd38b4d5f332bb0e98faaf94f9f04bdca Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 13:58:15 +0200 Subject: [PATCH 4/8] Code cleanup fixes --- stress_test/nbemu.py | 2 +- stress_test/nbemu_exceptions.py | 5 ++++- stress_test/sbemu.py | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/stress_test/nbemu.py b/stress_test/nbemu.py index 3a33c2a7..165e415d 100644 --- a/stress_test/nbemu.py +++ b/stress_test/nbemu.py @@ -18,7 +18,7 @@ class NBgen: """ - All NB-generator related functionality is here + NorthBound emulator class """ def __init__(self, nb_gen_base_dir, test_config, controller, sbemu, diff --git a/stress_test/nbemu_exceptions.py b/stress_test/nbemu_exceptions.py index 8d0dd736..a04b0c03 100644 --- a/stress_test/nbemu_exceptions.py +++ b/stress_test/nbemu_exceptions.py @@ -61,7 +61,10 @@ def __init__(self, additional_error_info='', err_code=1): class NBGenCleanError(NBGenError): - + """ + Contains the exception handling concerning the North-Bound Emulator + cleaning functionality. + """ def __init__(self, additional_error_info='', err_code=1): """ NB generator clean failure. diff --git a/stress_test/sbemu.py b/stress_test/sbemu.py index 86cb97fc..dcbe7f5e 100644 --- a/stress_test/sbemu.py +++ b/stress_test/sbemu.py @@ -22,7 +22,7 @@ class SBEmu: """ - All South-bound related functionality is here + Southbound emulator super class """ def __init__(self, sb_emu_base_dir, test_config): """ From f5d44862cab2fa98df39c1948c3ed365edd8c755 Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 14:08:36 +0200 Subject: [PATCH 5/8] Moved multinet/mtcbench under sbemu directory --- .codeclimate.yml | 3 +-- emulators/{ => sbemu}/mt_cbench/build.sh | 0 emulators/{ => sbemu}/mt_cbench/clean.sh | 0 emulators/{ => sbemu}/multinet/build.sh | 0 emulators/{ => sbemu}/multinet/clean.sh | 0 5 files changed, 1 insertion(+), 2 deletions(-) rename emulators/{ => sbemu}/mt_cbench/build.sh (100%) rename emulators/{ => sbemu}/mt_cbench/clean.sh (100%) rename emulators/{ => sbemu}/multinet/build.sh (100%) rename emulators/{ => sbemu}/multinet/clean.sh (100%) diff --git a/.codeclimate.yml b/.codeclimate.yml index 436eb71e..a0413d18 100644 --- a/.codeclimate.yml +++ b/.codeclimate.yml @@ -17,6 +17,5 @@ ratings: - "**.md" exclude_paths: - "jenkins_jobs/*" -- "emulators/cbench/*" -- "emulators/mt_cbench/*" +- "emulators/sbemu/mtcbench/*" - "controllers/*" diff --git a/emulators/mt_cbench/build.sh b/emulators/sbemu/mt_cbench/build.sh similarity index 100% rename from emulators/mt_cbench/build.sh rename to emulators/sbemu/mt_cbench/build.sh diff --git a/emulators/mt_cbench/clean.sh b/emulators/sbemu/mt_cbench/clean.sh similarity index 100% rename from emulators/mt_cbench/clean.sh rename to emulators/sbemu/mt_cbench/clean.sh diff --git a/emulators/multinet/build.sh b/emulators/sbemu/multinet/build.sh similarity index 100% rename from emulators/multinet/build.sh rename to emulators/sbemu/multinet/build.sh diff --git a/emulators/multinet/clean.sh b/emulators/sbemu/multinet/clean.sh similarity index 100% rename from emulators/multinet/clean.sh rename to emulators/sbemu/multinet/clean.sh From d0edb80c6f198863a0b0c266fb2a8d411bc1c779 Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 14:10:15 +0200 Subject: [PATCH 6/8] Rename of nstat orchestrator --- stress_test/{nstat_orchestrator.py => nstat.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename stress_test/{nstat_orchestrator.py => nstat.py} (100%) diff --git a/stress_test/nstat_orchestrator.py b/stress_test/nstat.py similarity index 100% rename from stress_test/nstat_orchestrator.py rename to stress_test/nstat.py From a32957c2e180f8ade5ad4b1e833db6f35ce0120d Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 14:13:18 +0200 Subject: [PATCH 7/8] Adaptations for nstat_orchestrator rename --- .travis.yml | 2 +- stress_test/report_gen.py | 2 +- stress_test/test_type.py | 16 ++++++++-------- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.travis.yml b/.travis.yml index 7f37f590..591ef18f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -29,7 +29,7 @@ script: # - coverage run --omit='/usr/local/lib/python3.4/*' --include='./util/*.py' --parallel-mode ./util/unittests/test_html.py # - coverage run --omit='/usr/local/lib/python3.4/*' --include='./util/*.py' --parallel-mode ./util/unittests/test_process.py # - coverage run --omit='/usr/local/lib/python3.4/*' --include='./util/*.py' --parallel-mode ./util/unittests/test_netutil.py - - coverage run --omit='/usr/local/lib/python3.4/*' --include='./util/*.py' --parallel-mode ./stress_test/nstat_orchestrator.py --help + - coverage run --omit='/usr/local/lib/python3.4/*' --include='./util/*.py' --parallel-mode ./stress_test/nstat.py --help - coverage combine after_success: coveralls diff --git a/stress_test/report_gen.py b/stress_test/report_gen.py index b8008e1f..beecb998 100644 --- a/stress_test/report_gen.py +++ b/stress_test/report_gen.py @@ -204,7 +204,7 @@ def generate_html_report(self): """ # Generate html report and move it within test output dir # --------------------------------------------------------------------- - logging.info('[nstat_orchestrator] Generating html report') + logging.info('[nstat] Generating html report') stress_test.html_generation.generate_html(self.report_spec, self.args.html_report) shutil.move(self.args.html_report, self.args.output_dir) diff --git a/stress_test/test_type.py b/stress_test/test_type.py index 1600b569..53ac1cd2 100755 --- a/stress_test/test_type.py +++ b/stress_test/test_type.py @@ -101,7 +101,7 @@ def test_selector(self, args): # run the test if nstat_test_type_run == 'sb_active_scalability_mtcbench': if not args.bypass_test: - logging.info('[nstat_orchestrator] running test: {0}'. + logging.info('[nstat] running test: {0}'. format(nstat_test_type_run)) self.total_samples = \ nstat_test_run.sb_active_scalability_mtcbench_run( @@ -123,7 +123,7 @@ def test_selector(self, args): elif nstat_test_type_run == 'sb_active_stability_mtcbench': if not args.bypass_test: - logging.info('[nstat_orchestrator] running test:{0}'. + logging.info('[nstat] running test:{0}'. format(nstat_test_type_run)) self.total_samples = \ nstat_test_run.sb_active_stability_mtcbench_run( @@ -145,7 +145,7 @@ def test_selector(self, args): elif nstat_test_type_run == 'sb_active_scalability_multinet': if not args.bypass_test: - logging.info('[nstat_orchestrator] running test: {0}'. + logging.info('[nstat] running test: {0}'. format(nstat_test_type_run)) self.total_samples = \ nstat_test_run.sb_active_scalability_multinet_run( @@ -167,7 +167,7 @@ def test_selector(self, args): elif nstat_test_type_run == 'sb_idle_scalability_mtcbench': if not args.bypass_test: - logging.info('[nstat_orchestrator] running test: {0}'. + logging.info('[nstat] running test: {0}'. format(nstat_test_type_run)) self.total_samples = \ nstat_test_run.sb_idle_scalability_mtcbench_run( @@ -189,7 +189,7 @@ def test_selector(self, args): elif nstat_test_type_run == 'sb_idle_scalability_multinet': if not args.bypass_test: - logging.info('[nstat_orchestrator] running test: {0}'. + logging.info('[nstat] running test: {0}'. format(nstat_test_type_run)) self.total_samples = \ nstat_test_run.sb_idle_scalability_multinet_run( @@ -210,7 +210,7 @@ def test_selector(self, args): format(self.test_type)) elif nstat_test_type_run == 'sb_idle_stability_multinet': if not args.bypass_test: - logging.info('[nstat_orchestrator] running test: {0}'. + logging.info('[nstat] running test: {0}'. format(nstat_test_type_run)) self.total_samples = \ nstat_test_run.sb_idle_stability_multinet_run( @@ -232,7 +232,7 @@ def test_selector(self, args): elif nstat_test_type_run == 'nb_active_scalability_multinet': if not args.bypass_test: - logging.info('[nstat_orchestrator] running test: {0}'. + logging.info('[nstat] running test: {0}'. format(nstat_test_type_run)) self.total_samples = \ nstat_test_run.nb_active_scalability_multinet_run( @@ -252,5 +252,5 @@ def test_selector(self, args): logging.error('[{0}] Fail to generate test report.'. format(self.test_type)) else: - logging.error('[nstat_orchestrator] not valid test configuration') + logging.error('[nstat] not valid test configuration') exit(0) From 4ad2cf9d961e1c3fd6fae9272d8d6c2dabc36700 Mon Sep 17 00:00:00 2001 From: Konstantinos Papadopoulos Date: Wed, 15 Feb 2017 14:14:34 +0200 Subject: [PATCH 8/8] MTCbench emulator folder rename --- emulators/sbemu/{mt_cbench => mtcbench}/build.sh | 0 emulators/sbemu/{mt_cbench => mtcbench}/clean.sh | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename emulators/sbemu/{mt_cbench => mtcbench}/build.sh (100%) rename emulators/sbemu/{mt_cbench => mtcbench}/clean.sh (100%) diff --git a/emulators/sbemu/mt_cbench/build.sh b/emulators/sbemu/mtcbench/build.sh similarity index 100% rename from emulators/sbemu/mt_cbench/build.sh rename to emulators/sbemu/mtcbench/build.sh diff --git a/emulators/sbemu/mt_cbench/clean.sh b/emulators/sbemu/mtcbench/clean.sh similarity index 100% rename from emulators/sbemu/mt_cbench/clean.sh rename to emulators/sbemu/mtcbench/clean.sh