From 7b5352a81bcbd75fd96449b3dd2a4fc5a4c0cea9 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 25 May 2014 14:45:59 +1000 Subject: [PATCH 01/14] Fix header file include guard. --- src/server/wsgi_server.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/wsgi_server.h b/src/server/wsgi_server.h index 3dd09001..f1b323d3 100644 --- a/src/server/wsgi_server.h +++ b/src/server/wsgi_server.h @@ -1,5 +1,5 @@ -#ifndef WSGI_SERVER -#define WSGI_SERVER +#ifndef WSGI_SERVER_H +#define WSGI_SERVER_H /* ------------------------------------------------------------------------- */ From 0ea8ed172dc523e97b497aa836a72bf5149c657a Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 25 May 2014 14:47:04 +1000 Subject: [PATCH 02/14] Defer daemon process creation until after scoreboard creation on initial startup of Apache. --- src/server/mod_wsgi.c | 40 ++++++++++++++++++++++++++++++++++++++-- src/server/wsgi_apache.h | 2 ++ 2 files changed, 40 insertions(+), 2 deletions(-) diff --git a/src/server/mod_wsgi.c b/src/server/mod_wsgi.c index 920cc5df..1ba07793 100644 --- a/src/server/mod_wsgi.c +++ b/src/server/mod_wsgi.c @@ -9186,6 +9186,13 @@ static int wsgi_start_daemons(apr_pool_t *p) return OK; } +static apr_pool_t *wsgi_pconf_pool = NULL; + +static int wsgi_deferred_start_daemons(apr_pool_t *p, ap_scoreboard_e sb_type) +{ + return wsgi_start_daemons(wsgi_pconf_pool); +} + static apr_status_t wsgi_socket_connect_un(apr_socket_t *sock, struct sockaddr_un *sa) { @@ -11204,10 +11211,39 @@ static int wsgi_hook_init(apr_pool_t *pconf, apr_pool_t *ptemp, if (!wsgi_python_after_fork) wsgi_python_init(pconf); - /* Startup separate named daemon processes. */ + /* + * Startup separate named daemon processes. This is + * a bit tricky as we only want to do this after the + * scoreboard has been created. On the initial server + * startup though, this hook function is called prior + * to the MPM being run, which means the scoreboard + * hasn't been created yet. In that case we need to + * defer process creation until after that, which we + * can only do by hooking into the pre_mpm hook after + * scoreboard creation has been done. On a server + * restart, the scoreboard will be preserved, so we + * can do it here, which is just as well as the pre_mpm + * hook isn't run on a restart. + */ #if defined(MOD_WSGI_WITH_DAEMONS) - status = wsgi_start_daemons(pconf); + if (!ap_scoreboard_image) { + /* + * Need to remember the pool we were given here as + * the pre_mpm hook functions get given a different + * pool which isn't the one we want and if we use + * that then Apache will crash when it is being + * shutdown. So our pre_mpm hook will use the pool + * we have remembered here. + */ + + wsgi_pconf_pool = pconf; + + ap_hook_pre_mpm(wsgi_deferred_start_daemons, NULL, NULL, + APR_HOOK_REALLY_LAST); + } + else + status = wsgi_start_daemons(pconf); #endif return status; diff --git a/src/server/wsgi_apache.h b/src/server/wsgi_apache.h index d03b359f..0d4bf7a3 100644 --- a/src/server/wsgi_apache.h +++ b/src/server/wsgi_apache.h @@ -86,6 +86,8 @@ APR_DECLARE_OPTIONAL_FN(char *, ssl_var_lookup, (apr_pool_t *, #include "http_request.h" #include "util_script.h" #include "util_md5.h" +#include "mpm_common.h" +#include "scoreboard.h" #ifdef APLOG_USE_MODULE APLOG_USE_MODULE(wsgi); From 6078ae4c39df3f687af2633b1196a2a087e558f0 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 25 May 2014 15:53:16 +1000 Subject: [PATCH 03/14] Expose Apache server and status information and adjust how mod_wsgi process status provided. --- src/server/wsgi_interp.c | 40 +++++++--- src/server/wsgi_metrics.c | 152 +++++++++++++++++++++++++++++++++++++- src/server/wsgi_metrics.h | 4 +- tests/environ.wsgi | 6 ++ 4 files changed, 188 insertions(+), 14 deletions(-) diff --git a/src/server/wsgi_interp.c b/src/server/wsgi_interp.c index 7edb04f3..ebd7ba9b 100644 --- a/src/server/wsgi_interp.c +++ b/src/server/wsgi_interp.c @@ -339,6 +339,8 @@ InterpreterObject *newInterpreterObject(const char *name) int is_threaded = 0; int is_forked = 0; + const char *str = NULL; + /* Create handle for interpreter and local data. */ self = PyObject_New(InterpreterObject, &Interpreter_Type); @@ -1066,8 +1068,8 @@ InterpreterObject *newInterpreterObject(const char *name) PyModule_AddObject(module, "threads_per_process", object); #endif - PyModule_AddObject(module, "thread_utilization", PyCFunction_New( - &wsgi_get_utilization_method[0], NULL)); + PyModule_AddObject(module, "process_status", PyCFunction_New( + &wsgi_process_status_method[0], NULL)); /* Done with the 'mod_wsgi' module. */ @@ -1140,13 +1142,6 @@ InterpreterObject *newInterpreterObject(const char *name) * the number of processes and threads available. */ -#if PY_MAJOR_VERSION >= 3 - PyModule_AddObject(module, "mpm_name", PyUnicode_DecodeLatin1(MPM_NAME, - strlen(MPM_NAME), NULL)); -#else - PyModule_AddObject(module, "mpm_name", PyString_FromString(MPM_NAME)); -#endif - ap_mpm_query(AP_MPMQ_IS_THREADED, &is_threaded); if (is_threaded != AP_MPMQ_NOT_SUPPORTED) { ap_mpm_query(AP_MPMQ_MAX_THREADS, &max_threads); @@ -1168,6 +1163,33 @@ InterpreterObject *newInterpreterObject(const char *name) object = PyLong_FromLong(max_threads); PyModule_AddObject(module, "threads_per_process", object); + str = ap_get_server_description(); +#if PY_MAJOR_VERSION >= 3 + object = PyUnicode_DecodeLatin1(str, strlen(str), NULL); +#else + object = PyString_FromString(str); +#endif + PyModule_AddObject(module, "description", object); + + str = MPM_NAME; +#if PY_MAJOR_VERSION >= 3 + object = PyUnicode_DecodeLatin1(str, strlen(str), NULL); +#else + object = PyString_FromString(str); +#endif + PyModule_AddObject(module, "mpm_name", object); + + str = ap_get_server_built(); +#if PY_MAJOR_VERSION >= 3 + object = PyUnicode_DecodeLatin1(str, strlen(str), NULL); +#else + object = PyString_FromString(str); +#endif + PyModule_AddObject(module, "build_date", object); + + PyModule_AddObject(module, "server_status", PyCFunction_New( + &wsgi_apache_server_status_method[0], NULL)); + /* Done with the 'apache' module. */ Py_DECREF(module); diff --git a/src/server/wsgi_metrics.c b/src/server/wsgi_metrics.c index 22ecbf12..d37deb65 100644 --- a/src/server/wsgi_metrics.c +++ b/src/server/wsgi_metrics.c @@ -78,13 +78,157 @@ double wsgi_end_request(void) return wsgi_utilization_time(-1); } -static PyObject *wsgi_get_thread_utilization(PyObject *self, PyObject *args) +static PyObject *wsgi_process_status(void) { - return PyFloat_FromDouble(wsgi_utilization_time(0)); + PyObject *result = NULL; + + PyObject *object = NULL; + + result = PyDict_New(); + + object = PyFloat_FromDouble(wsgi_utilization_time(0)); + PyDict_SetItemString(result, "thread_utilization", object); + Py_DECREF(object); + + return result; +} + +PyMethodDef wsgi_process_status_method[] = { + { "process_status", (PyCFunction)wsgi_process_status, + METH_NOARGS, 0 }, + { NULL }, +}; + +/* ------------------------------------------------------------------------- */ + +static PyObject *wsgi_apache_server_status(void) +{ + PyObject *result = NULL; + + PyObject *object = NULL; + + apr_time_t now_time; + apr_interval_time_t up_time; + + ap_generation_t mpm_generation; + + int j, i, res; + int ready; + int busy; + unsigned long count; + unsigned long lres; + apr_off_t bytes; + apr_off_t bcount, kbcount; + worker_score *ws_record; + process_score *ps_record; + + int server_limit = 0; + int thread_limit = 0; + + /* Scoreboard is not available in inetd mode. Give up now. */ + + if (!ap_exists_scoreboard_image()) + return PyDict_New(); + + ap_mpm_query(AP_MPMQ_HARD_LIMIT_THREADS, &thread_limit); + ap_mpm_query(AP_MPMQ_HARD_LIMIT_DAEMONS, &server_limit); + + now_time = apr_time_now(); + up_time = (apr_uint32_t)apr_time_sec( + now_time - ap_scoreboard_image->global->restart_time); + +#if defined(AP_MPMQ_GENERATION) + ap_mpm_query(AP_MPMQ_GENERATION, &mpm_generation); +#else + mpm_generation = ap_my_generation; +#endif + + ready = 0; + busy = 0; + count = 0; + bcount = 0; + kbcount = 0; + + for (i = 0; i < server_limit; ++i) { + ps_record = ap_get_scoreboard_process(i); + for (j = 0; j < thread_limit; ++j) { + int indx = (i * thread_limit) + j; + +#if AP_MODULE_MAGIC_AT_LEAST(20071023,0) + ws_record = ap_get_scoreboard_worker_from_indexes(i, j); +#else + ws_record = ap_get_scoreboard_worker(i, j); +#endif + res = ws_record->status; + + if (!ps_record->quiescing + && ps_record->pid) { + if (res == SERVER_READY) { + if (ps_record->generation == mpm_generation) + ready++; + } + else if (res != SERVER_DEAD && + res != SERVER_STARTING && + res != SERVER_IDLE_KILL) { + busy++; + } + } + + lres = ws_record->access_count; + bytes = ws_record->bytes_served; + + if (lres != 0 || (res != SERVER_READY && res != SERVER_DEAD)) { + count += lres; + bcount += bytes; + + if (bcount >= 1024) { + kbcount += (bcount >> 10); + bcount = bcount & 0x3ff; + } + } + } + } + + /* + * Generate the dictionary for the server status from the + * calculated values. + */ + + result = PyDict_New(); + + object = PyInt_FromLong(now_time); + PyDict_SetItemString(result, "time", object); + Py_DECREF(object); + + object = PyInt_FromLong(up_time); + PyDict_SetItemString(result, "uptime", object); + Py_DECREF(object); + + object = PyInt_FromLong(mpm_generation); + PyDict_SetItemString(result, "generation", object); + Py_DECREF(object); + + object = PyInt_FromLong(count); + PyDict_SetItemString(result, "total_accesses", object); + Py_DECREF(object); + + object = PyInt_FromLong(kbcount); + PyDict_SetItemString(result, "total_kbytes", object); + Py_DECREF(object); + + object = PyInt_FromLong(busy); + PyDict_SetItemString(result, "busy_workers", object); + Py_DECREF(object); + + object = PyInt_FromLong(ready); + PyDict_SetItemString(result, "idle_workers", object); + Py_DECREF(object); + + return result; } -PyMethodDef wsgi_get_utilization_method[] = { - { "thread_utilization", (PyCFunction)wsgi_get_thread_utilization, +PyMethodDef wsgi_apache_server_status_method[] = { + { "server_status", (PyCFunction)wsgi_apache_server_status, METH_NOARGS, 0 }, { NULL }, }; diff --git a/src/server/wsgi_metrics.h b/src/server/wsgi_metrics.h index 034f910e..367bb137 100644 --- a/src/server/wsgi_metrics.h +++ b/src/server/wsgi_metrics.h @@ -31,11 +31,13 @@ extern int wsgi_dump_stack_traces; extern apr_thread_mutex_t* wsgi_monitor_lock; -extern PyMethodDef wsgi_get_utilization_method[]; +extern PyMethodDef wsgi_process_status_method[]; extern double wsgi_start_request(void); extern double wsgi_end_request(void); +extern PyMethodDef wsgi_apache_server_status_method[]; + /* ------------------------------------------------------------------------- */ #endif diff --git a/tests/environ.wsgi b/tests/environ.wsgi index 3dc83e24..1302b77c 100644 --- a/tests/environ.wsgi +++ b/tests/environ.wsgi @@ -34,13 +34,19 @@ def application(environ, start_response): file=output) print('mod_wsgi.threads_per_process: %s' % mod_wsgi.threads_per_process, file=output) + print('mod_wsgi.process_status: %s' % mod_wsgi.process_status(), + file=output) print(file=output) + print('apache.description: %s' % apache.description, file=output) + print('apache.build_date: %s' % apache.build_date, file=output) print('apache.mpm_name: %s' % apache.mpm_name, file=output) print('apache.maximum_processes: %s' % apache.maximum_processes, file=output) print('apache.threads_per_process: %s' % apache.threads_per_process, file=output) + print('apache.server_status: %s' % apache.server_status(), + file=output) print(file=output) print('PATH: %s' % sys.path, file=output) From 629bcbe460058f0a5d9077169bd5ffbecc8daf61 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Thu, 5 Jun 2014 14:41:50 +1000 Subject: [PATCH 04/14] Added New Relic platform plugin support. --- .gitignore | 1 + setup.py | 2 +- src/server/__init__.py | 175 +++++- src/server/management/commands/runmodwsgi.py | 2 +- src/server/mod_wsgi.c | 3 +- src/server/newrelic/__init__.py | 0 src/server/newrelic/interface.py | 185 ++++++ src/server/newrelic/main.py | 62 ++ src/server/newrelic/sampler.py | 605 +++++++++++++++++++ src/server/wsgi_interp.c | 4 +- src/server/wsgi_metrics.c | 330 +++++++--- src/server/wsgi_metrics.h | 1 + src/server/wsgi_python.h | 76 +++ tests/environ.wsgi | 10 +- tox.ini | 3 + 15 files changed, 1331 insertions(+), 128 deletions(-) create mode 100644 src/server/newrelic/__init__.py create mode 100644 src/server/newrelic/interface.py create mode 100644 src/server/newrelic/main.py create mode 100644 src/server/newrelic/sampler.py diff --git a/.gitignore b/.gitignore index 183f8d84..da5f3fb6 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ lib apxs libtool docs/_build +newrelic.ini diff --git a/setup.py b/setup.py index 09ad4040..9604c040 100644 --- a/setup.py +++ b/setup.py @@ -179,7 +179,7 @@ def _version(): ], packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management', 'mod_wsgi.server.management.commands', 'mod_wsgi.docs', - 'mod_wsgi.images'], + 'mod_wsgi.images', 'mod_wsgi.server.newrelic'], package_dir = {'mod_wsgi': 'src', 'mod_wsgi.docs': 'docs/_build/html', 'mod_wsgi.images': 'images'}, package_data = {'mod_wsgi.docs': _documentation(), diff --git a/src/server/__init__.py b/src/server/__init__.py index 286e9ba0..189e95a9 100644 --- a/src/server/__init__.py +++ b/src/server/__init__.py @@ -1,4 +1,4 @@ -from __future__ import print_function, division +from __future__ import print_function, division, absolute_import import os import sys @@ -126,7 +126,7 @@ def find_mimetypes(): LoadModule dir_module '%(modules_directory)s/mod_dir.so' LoadModule wsgi_module '%(mod_wsgi_so)s' - + LoadModule status_module '%(modules_directory)s/mod_status.so' @@ -200,8 +200,11 @@ def find_mimetypes(): WSGICallableObject '%(callable_object)s' WSGIPassAuthorization On - + ExtendedStatus On + + + SetHandler server-status Order deny,allow @@ -334,10 +337,18 @@ def find_mimetypes(): Include '%(filename)s' """ +APACHE_TOOLS_CONFIG = """ +WSGIDaemonProcess express display-name=%%{GROUP} threads=1 +""" + +APACHE_METRICS_CONFIG = """ +WSGIImportScript '%(server_root)s/server-metrics.py' \\ + process-group=express application-group=%%{GLOBAL} +""" + APACHE_WDB_CONFIG = """ -WSGIDaemonProcess wdb-server display-name=%%{GROUP} threads=1 WSGIImportScript '%(server_root)s/wdb-server.py' \\ - process-group=wdb-server application-group=%%{GLOBAL} + process-group=express application-group=%%{GLOBAL} """ def generate_apache_config(options): @@ -377,6 +388,12 @@ def generate_apache_config(options): print(APACHE_INCLUDE_CONFIG % dict(filename=filename), file=fp) + if options['with_newrelic_platform'] or options['with_wdb']: + print(APACHE_TOOLS_CONFIG % options, file=fp) + + if options['with_newrelic_platform']: + print(APACHE_METRICS_CONFIG % options, file=fp) + if options['with_wdb']: print(APACHE_WDB_CONFIG % options, file=fp) @@ -419,7 +436,7 @@ def _modified(path): if mtime != _times[path]: return True - except: + except Exception: # If any exception occured, likely that file has been # been removed just before stat(), so force a restart. @@ -430,7 +447,7 @@ def _modified(path): def _monitor(): global _files - while 1: + while True: # Check modification times on all files in sys.modules. for module in list(sys.modules.values()): @@ -455,7 +472,8 @@ def _monitor(): try: return _queue.get(timeout=_interval) - except: + + except queue.Empty: pass _thread = threading.Thread(target=_monitor) @@ -464,7 +482,7 @@ def _monitor(): def _exiting(): try: _queue.put(True) - except: + except Exception: pass _thread.join() @@ -507,7 +525,7 @@ def __init__(self, script, callable_object='application', try: self.mtime = os.path.getmtime(script) - except: + except Exception: self.mtime = None if with_newrelic: @@ -539,7 +557,7 @@ def setup_wdb(self): def reload_required(self, environ): try: mtime = os.path.getmtime(self.script) - except: + except Exception: mtime = None return mtime != self.mtime @@ -564,7 +582,7 @@ def __call__(self, environ, start_response): script = '%(script)s' callable_object = '%(callable_object)s' -with_newrelic = %(with_newrelic)s +with_newrelic = %(with_newrelic_agent)s with_wdb = %(with_wdb)s handler = mod_wsgi.server.ApplicationHandler(script, callable_object, @@ -630,6 +648,17 @@ def generate_wsgi_handler_script(options): with open(path, 'w') as fp: print(WSGI_DEFAULT_SCRIPT % options, file=fp) +SERVER_METRICS_SCRIPT = """ +from mod_wsgi.server.newrelic.main import start + +start('%(host)s:%(port)s') +""" + +def generate_server_metrics_script(options): + path = os.path.join(options['server_root'], 'server-metrics.py') + with open(path, 'w') as fp: + print(SERVER_METRICS_SCRIPT % options, file=fp) + WDB_SERVER_SCRIPT = """ from wdb_server import server try: @@ -704,7 +733,7 @@ def generate_wdb_server_script(options): """ def generate_control_scripts(options): - path = os.path.join(options['server_root'], 'server-admin') + path = os.path.join(options['server_root'], 'apachectl') with open(path, 'w') as fp: print(WSGI_CONTROL_SCRIPT.lstrip() % options, file=fp) @@ -715,6 +744,12 @@ def generate_control_scripts(options): if options['envvars_script']: print(APACHE_ENVVARS_FILE.lstrip() % options, file=fp) +def check_percentage(option, opt_str, value, parser): + if value is not None and value < 0 or value > 1: + raise optparse.OptionValueError('%s option value needs to be within ' + 'the range 0 to 1.' % opt_str) + setattr(parser.values, option.dest, value) + option_list = ( optparse.make_option('--host', default=None, metavar='IP-ADDRESS', help='The specific host (IP address) interface on which ' @@ -736,9 +771,28 @@ def generate_control_scripts(options): optparse.make_option('--max-clients', type='int', default=None, metavar='NUMBER', help='The maximum number of simultaneous ' 'client connections that will be accepted. This will default ' - 'to being 1.25 times the total number of threads in the ' + 'to being 1.5 times the total number of threads in the ' 'request thread pools across all process handling requests.'), + optparse.make_option('--initial-workers', type='float', default=None, + metavar='NUMBER', action='callback', callback=check_percentage, + help='The initial number of workers to create on startup ' + 'expressed as a percentage of the maximum number of clients. ' + 'The value provided should be between 0 and 1. The default is ' + 'dependent on the type of MPM being used.'), + optparse.make_option('--minimum-spare-workers', type='float', + default=None, metavar='NUMBER', action='callback', + callback=check_percentage, help='The minimum number of spare ' + 'workers to maintain expressed as a percentage of the maximum ' + 'number of clients. The value provided should be between 0 and ' + '1. The default is dependent on the type of MPM being used.'), + optparse.make_option('--maximum-spare-workers', type='float', + default=None, metavar='NUMBER', action='callback', + callback=check_percentage, help='The maximum number of spare ' + 'workers to maintain expressed as a percentage of the maximum ' + 'number of clients. The value provided should be between 0 and ' + '1. The default is dependent on the type of MPM being used.'), + optparse.make_option('--limit-request-body', type='int', default=10485760, metavar='NUMBER', help='The maximum number of bytes which are ' 'allowed in a request body. Defaults to 10485760 (10MB).'), @@ -868,10 +922,18 @@ def generate_control_scripts(options): 'to be made over the same connection. Defaults to 0, indicating ' 'that keep alive connections are disabled.'), + optparse.make_option('--server-metrics', action='store_true', + default=False, help='Flag indicating whether extended web server ' + 'status will be available within the WSGI application. Defaults ' + 'to being disabled meaning that only the state of each worker ' + 'will be available. Will be automatically enabled as a side ' + 'effect of enabling server status URL or New Relic server level ' + 'monitoring.'), optparse.make_option('--server-status', action='store_true', default=False, help='Flag indicating whether web server status ' 'will be available at the /server-status sub URL. Defaults to ' - 'being disabled'), + 'being disabled.'), + optparse.make_option('--include-file', action='append', dest='include_files', metavar='FILE-PATH', help='Specify the ' 'path to an additional web server configuration file to be ' @@ -934,9 +996,18 @@ def generate_control_scripts(options): 'file used by the web server.'), optparse.make_option('--with-newrelic', action='store_true', - default=False, help='Flag indicating whether New Relic ' - 'performance monitoring should be enabled for the WSGI ' - 'application.'), + default=False, help='Flag indicating whether all New Relic ' + 'performance monitoring features should be enabled.'), + + optparse.make_option('--with-newrelic-agent', action='store_true', + default=False, help='Flag indicating whether the New Relic ' + 'Python agent should be enabled for reporting application server ' + 'metrics.'), + optparse.make_option('--with-newrelic-platform', action='store_true', + default=False, help='Flag indicating whether the New Relic ' + 'platform plugin should be enabled for reporting server level ' + 'metrics.'), + optparse.make_option('--with-wdb', action='store_true', default=False, help='Flag indicating whether the wdb interactive debugger ' 'should be enabled for the WSGI application.'), @@ -1062,8 +1133,18 @@ def _cmd_setup_server(args, options): options['keep_alive'] = options['keep_alive_timeout'] != 0 + if options['with_newrelic']: + options['with_newrelic_agent'] = True + options['with_newrelic_platform'] = True + + if options['with_newrelic_platform']: + options['server_metrics'] = True + generate_wsgi_handler_script(options) + if options['with_newrelic_platform']: + generate_server_metrics_script(options) + if options['with_wdb']: generate_wdb_server_script(options) @@ -1072,13 +1153,50 @@ def _cmd_setup_server(args, options): if options['max_clients'] is not None: max_clients = max(options['max_clients'], max_clients) else: - max_clients = int(1.25 * max_clients) + max_clients = int(1.5 * max_clients) + + initial_workers = options['initial_workers'] + min_spare_workers = options['minimum_spare_workers'] + max_spare_workers = options['maximum_spare_workers'] + + if initial_workers is None: + prefork_initial_workers = 0.02 + else: + prefork_initial_workers = initial_workers + + if min_spare_workers is None: + prefork_min_spare_workers = prefork_initial_workers + else: + prefork_min_spare_workers = min_spare_workers + + if max_spare_workers is None: + prefork_max_spare_workers = 0.05 + else: + prefork_max_spare_workers = max_spare_workers options['prefork_max_clients'] = max_clients options['prefork_server_limit'] = max_clients - options['prefork_start_servers'] = max(1, int(0.1 * max_clients)) - options['prefork_min_spare_servers'] = options['prefork_start_servers'] - options['prefork_max_spare_servers'] = max(1, int(0.4 * max_clients)) + options['prefork_start_servers'] = max(1, int( + prefork_initial_workers * max_clients)) + options['prefork_min_spare_servers'] = max(1, int( + prefork_min_spare_workers * max_clients)) + options['prefork_max_spare_servers'] = max(1, int( + prefork_max_spare_workers * max_clients)) + + if initial_workers is None: + worker_initial_workers = 0.2 + else: + worker_initial_workers = initial_workers + + if min_spare_workers is None: + worker_min_spare_workers = worker_initial_workers + else: + worker_min_spare_workers = min_spare_workers + + if max_spare_workers is None: + worker_max_spare_workers = 0.6 + else: + worker_max_spare_workers = max_spare_workers options['worker_max_clients'] = max_clients @@ -1098,15 +1216,15 @@ def _cmd_setup_server(args, options): options['worker_max_clients'] = (options['worker_server_limit'] * options['worker_threads_per_child']) - options['worker_start_servers'] = max(1, int(0.1 * - options['worker_server_limit'])) + options['worker_start_servers'] = max(1, + int(worker_initial_workers * options['worker_server_limit'])) options['worker_min_spare_threads'] = max( options['worker_threads_per_child'], - int(0.2 * options['worker_server_limit']) * + int(worker_min_spare_workers * options['worker_server_limit']) * options['worker_threads_per_child']) options['worker_max_spare_threads'] = max( options['worker_threads_per_child'], - int(0.4 * options['worker_server_limit']) * + int(worker_max_spare_workers * options['worker_server_limit']) * options['worker_threads_per_child']) options['httpd_conf'] = os.path.join(options['server_root'], 'httpd.conf') @@ -1138,7 +1256,10 @@ def _cmd_setup_server(args, options): options['url'] = 'http://%s:%s/' % (options['host'], options['port']) + if options['server_metrics']: + options['httpd_arguments_list'].append('-DWSGI_SERVER_METRICS') if options['server_status']: + options['httpd_arguments_list'].append('-DWSGI_SERVER_METRICS') options['httpd_arguments_list'].append('-DWSGI_SERVER_STATUS') if options['access_log']: options['httpd_arguments_list'].append('-DWSGI_ACCESS_LOG') @@ -1183,7 +1304,7 @@ def cmd_start_server(params): options = cmd_setup_server(params, usage) - executable = os.path.join(options['server_root'], 'server-admin') + executable = os.path.join(options['server_root'], 'apachectl') name = executable.ljust(len(options['process_name'])) os.execl(executable, name, 'start', '-DNO_DETACH') diff --git a/src/server/management/commands/runmodwsgi.py b/src/server/management/commands/runmodwsgi.py index c1b64b37..1248526f 100644 --- a/src/server/management/commands/runmodwsgi.py +++ b/src/server/management/commands/runmodwsgi.py @@ -46,6 +46,6 @@ def handle(self, *args, **options): options = mod_wsgi.server._cmd_setup_server(args, options) - executable = os.path.join(options['server_root'], 'wsgi-server') + executable = os.path.join(options['server_root'], 'apachectl') name = executable.ljust(len(options['process_name'])) os.execl(executable, name, 'start', '-DNO_DETACH') diff --git a/src/server/mod_wsgi.c b/src/server/mod_wsgi.c index 1ba07793..06315867 100644 --- a/src/server/mod_wsgi.c +++ b/src/server/mod_wsgi.c @@ -9212,7 +9212,8 @@ static apr_status_t wsgi_socket_connect_un(apr_socket_t *sock, do { rv = connect(rawsock, (struct sockaddr*)sa, - sizeof(*sa) + strlen(sa->sun_path)); + APR_OFFSETOF(struct sockaddr_un, sun_path) + + strlen(sa->sun_path) + 1); } while (rv == -1 && errno == EINTR); if ((rv == -1) && (errno == EINPROGRESS || errno == EALREADY) diff --git a/src/server/newrelic/__init__.py b/src/server/newrelic/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/src/server/newrelic/interface.py b/src/server/newrelic/interface.py new file mode 100644 index 00000000..c9e788cf --- /dev/null +++ b/src/server/newrelic/interface.py @@ -0,0 +1,185 @@ +import zlib +import sys +import socket +import os +import types +import json +import httplib +import logging + +_logger = logging.getLogger(__name__) + +# Python 3 compatibility helpers. + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + def b(s): + return s.encode('latin-1') +else: + def b(s): + return s + +# Helpers for json encoding and decoding. + +def json_encode(obj, **kwargs): + _kwargs = {} + + if type(b'') is type(''): + _kwargs['encoding'] = 'latin-1' + + def _encode(o): + if isinstance(o, bytes): + return o.decode('latin-1') + elif isinstance(o, types.GeneratorType): + return list(o) + elif hasattr(o, '__iter__'): + return list(iter(o)) + raise TypeError(repr(o) + ' is not JSON serializable') + + _kwargs['default'] = _encode + _kwargs['separators'] = (',', ':') + + _kwargs.update(kwargs) + + return json.dumps(obj, **_kwargs) + +def json_decode(s, **kwargs): + return json.loads(s, **kwargs) + +# Platform plugin interface. + +class Interface(object): + + class NetworkInterfaceException(Exception): pass + class DiscardDataForRequest(NetworkInterfaceException): pass + class RetryDataForRequest(NetworkInterfaceException): pass + class ServerIsUnavailable(RetryDataForRequest): pass + + USER_AGENT = 'ModWsgi-PythonPlugin/%s (Python %s %s)' % ( + '1.0.0', sys.version.split()[0], sys.platform) + + HOST = 'platform-api.newrelic.com' + URL = '/platform/v1/metrics' + + def __init__(self, license_key): + self.license_key = license_key + + def send_request(self, payload=()): + headers = {} + config = {} + + license_key = self.license_key + + if not self.license_key: + license_key = 'INVALID LICENSE KEY' + + headers['User-Agent'] = self.USER_AGENT + headers['Content-Encoding'] = 'identity' + headers['X-License-Key'] = license_key + + try: + data = json_encode(payload) + + except Exception as exc: + _logger.exception('Error encoding data for JSON payload ' + 'with payload of %r.', payload) + + raise Interface.DiscardDataForRequest(str(exc)) + + if len(data) > 64*1024: + headers['Content-Encoding'] = 'deflate' + level = (len(data) < 2000000) and 1 or 9 + data = zlib.compress(b(data), level) + + try: + connection = httplib.HTTPSConnection(self.HOST, timeout=30.0) + connection.request('POST', self.URL, data, headers) + response = connection.getresponse() + content = response.read() + + except httplib.HTTPException as exc: + raise Interface.RetryDataForRequest(str(exc)) + + finally: + connection.close() + + if response.status != 200: + _logger.debug('Received a non 200 HTTP response from the data ' + 'collector where headers=%r, status=%r and content=%r.', + headers, response.status, content) + + if response.status == 400: + if headers['Content-Encoding'] == 'deflate': + data = zlib.decompress(data) + + _logger.error('Data collector is indicating that a bad ' + 'request has been submitted for headers of %r and ' + 'payload of %r with response of %r.', headers, data, + content) + + raise Interface.DiscardDataForRequest() + + elif response.status == 403: + _logger.error('Data collector is indicating that the license ' + 'key %r is not valid.', license_key) + + raise Interface.DiscardDataForRequest() + + elif response.status == 413: + _logger.warning('Data collector is indicating that a request ' + 'was received where the request content size was over ' + 'the maximum allowed size limit. The length of the ' + 'request content was %d.', len(data)) + + raise Interface.DiscardDataForRequest() + + elif response.status in (503, 504): + _logger.warning('Data collector is unavailable.') + + raise Interface.ServerIsUnavailable() + + elif response.status != 200: + _logger.warning('An unexpected HTTP response was received ' + 'from the data collector of %r. The payload for ' + 'the request was %r.', respnse.status, payload) + + raise Interface.DiscardDataForRequest() + + try: + if PY3: + content = content.decode('UTF-8') + + result = json_decode(content) + + except Exception as exc: + _logger.exception('Error decoding data for JSON payload ' + 'with payload of %r.', content) + + raise Interface.DiscardDataForRequest(str(exc)) + + if 'status' in result: + return result['status'] + + error_message = result['error'] + + raise Interface.DiscardDataForRequest(error_message) + + def send_metrics(self, name, guid, version, duration, metrics): + agent = {} + agent['host'] = socket.gethostname() + agent['pid'] = os.getpid() + agent['version'] = version or '0.0.0.' + + component = {} + component['name'] = name + component['guid'] = guid + component['duration'] = duration + component['metrics'] = metrics + + payload = {} + payload['agent'] = agent + payload['components'] = [component] + + return self.send_request(payload) diff --git a/src/server/newrelic/main.py b/src/server/newrelic/main.py new file mode 100644 index 00000000..c336e3f0 --- /dev/null +++ b/src/server/newrelic/main.py @@ -0,0 +1,62 @@ +import os +import logging + +try: + from ConfigParser import RawConfigParser, NoOptionError +except ImportError: + from configparser import RawConfigParser, NoOptionError + +from .interface import Interface +from .sampler import Sampler + +import apache + +LOG_LEVEL = { + 'CRITICAL': logging.CRITICAL, + 'ERROR': logging.ERROR, + 'WARNING': logging.WARNING, + 'INFO': logging.INFO, + 'DEBUG': logging.DEBUG, +} + +LOG_FORMAT = '%(asctime)s (%(process)d/%(threadName)s) ' \ + '%(name)s %(levelname)s - %(message)s' + +def start(name): + if apache.scoreboard() is None: + return + + config_object = RawConfigParser() + + config_file = os.environ.get('NEW_RELIC_CONFIG_FILE') + + if config_file: + config_object.read([config_file]) + + def option(name, section='newrelic', type=None, **kwargs): + try: + getter = 'get%s' % (type or '') + return getattr(config_object, getter)(section, name) + except NoOptionError: + if 'default' in kwargs: + return kwargs['default'] + else: + raise + + log_level = os.environ.get('NEW_RELIC_LOG_LEVEL', 'INFO').upper() + log_level = option('log_level', default=log_level).upper() + + if log_level in LOG_LEVEL: + log_level = LOG_LEVEL[log_level] + else: + log_level = logging.INFO + + logging.basicConfig(level=log_level, format=LOG_FORMAT) + + license_key = os.environ.get('NEW_RELIC_LICENSE_KEY') + license_key = option('license_key', default=license_key) + + interface = Interface(license_key) + sampler = Sampler(interface, name) + + sampler.start() diff --git a/src/server/newrelic/sampler.py b/src/server/newrelic/sampler.py new file mode 100644 index 00000000..1d178a45 --- /dev/null +++ b/src/server/newrelic/sampler.py @@ -0,0 +1,605 @@ +import threading +import atexit +import os +import sys +import json +import socket +import time +import math + +try: + import Queue as queue +except ImportError: + import queue + +import apache + +SERVER_READY = '_' +SERVER_STARTING = 'S' +SERVER_BUSY_READ = 'R' +SERVER_BUSY_WRITE = 'W' +SERVER_BUST_KEEPALIVE = 'K' +SERVER_BUSY_LOG = 'L' +SERVER_BUSY_DNS = 'D' +SERVER_CLOSING = 'C' +SERVER_GRACEFUL = 'G' +SERVER_IDLE_KILL = 'I' +SERVER_DEAD = '.' + +STATUS_FLAGS = { + SERVER_READY: 'Ready', + SERVER_STARTING: 'Starting', + SERVER_BUSY_READ: 'Read', + SERVER_BUSY_WRITE: 'Write', + SERVER_BUST_KEEPALIVE: 'Keepalive', + SERVER_BUSY_LOG: 'Logging', + SERVER_BUSY_DNS: 'DNS lookup', + SERVER_CLOSING: 'Closing', + SERVER_GRACEFUL: 'Graceful', + SERVER_IDLE_KILL: 'Dying', + SERVER_DEAD: 'Dead' +} + +class Sample(dict): + + def __init__(self, count=0, total=0.0, min=0.0, max=0.0, + sum_of_squares=0.0): + self.count = count + self.total = total + self.min = min + self.max = max + self.sum_of_squares = sum_of_squares + + def __setattr__(self, name, value): + self[name] = value + + def __getattr__(self, name): + return self[name] + + def merge_stats(self, other): + self.total += other.total + self.min = self.count and min(self.min, other.min) or other.min + self.max = max(self.max, other.max) + self.sum_of_squares += other.sum_of_squares + self.count += other.count + + def merge_value(self, value): + self.total += value + self.min = self.count and min(self.min, value) or value + self.max = max(self.max, value) + self.sum_of_squares += value ** 2 + self.count += 1 + +class Samples(object): + + def __init__(self): + self.samples = {} + + def __iter__(self): + return iter(self.samples.items()) + + def __nonzero__(self): + return bool(self.samples) + + def sample_name(self, name): + return 'Component/' + name + + def _assign_value(self, value): + if isinstance(value, Sample): + sample = value + self.samples[name] = sample + else: + sample = Sample() + self.samples[name] = sample + sample.merge_value(value) + + return sample + + def assign_value(self, value): + name = self.sample_name(name) + + return self._assign_value(name) + + def _merge_value(self, name, value): + sample = self.samples.get(name) + + if sample is None: + sample = Sample() + self.samples[name] = sample + + if isinstance(value, Sample): + sample.merge_stats(value) + else: + sample.merge_value(value) + + return sample + + def merge_value(self, name, value): + name = self.sample_name(name) + + return self._merge_value(name, value) + + def fetch_sample(self, name): + name = self.sample_name(name) + + sample = self.samples.get(name) + + if sample is None: + sample = Sample() + self.samples[name] = sample + + return sample + + def merge_samples(self, samples): + for name, sample in samples: + self._merge_value(name, sample) + + def assign_samples(self, samples): + for name, sample in samples: + self._assign_value(name, sample) + + def clear_samples(self): + self.samples.clear() + +class Sampler(object): + + guid = 'au.com.dscpl.wsgi.mod_wsgi' + version = '1.0.0' + + def __init__(self, interface, name): + self.interface = interface + self.name = name + + self.running = False + self.lock = threading.Lock() + + self.period_start = 0 + self.access_count = 0 + self.bytes_served = 0 + + self.request_samples = [] + + self.metric_data = Samples() + + self.report_queue = queue.Queue() + + self.report_thread = threading.Thread(target=self.report_main_loop) + self.report_thread.setDaemon(True) + + self.report_start = 0 + self.report_metrics = Samples() + + self.monitor_queue = queue.Queue() + + self.monitor_thread = threading.Thread(target=self.monitor_main_loop) + self.monitor_thread.setDaemon(True) + + self.monitor_count = 0 + + def upload_report(self, start, end, metrics): + try: + self.interface.send_metrics(self.name, self.guid, self.version, + end-start, metrics.samples) + + except self.interface.RetryDataForRequest: + return True + + except Exception: + pass + + return False + + def generate_request_metrics(self, harvest_data): + metrics = Samples() + + # Chart as 'Throughput'. + + metrics.merge_value('Requests/Throughput[|requests]', + Sample(count=harvest_data['access_count'], + total=harvest_data['access_count'])) + + # Calculate from the set of sampled requests the average + # and percentile metrics. + + requests = harvest_data['request_samples'] + + if requests: + for request in requests: + # Chart as 'Average'. + + metrics.merge_value('Requests/Response Time[seconds|request]', + request['duration']) + + requests.sort(key=lambda e: e['duration']) + + total = sum([x['duration'] for x in requests]) + + # Chart as 'Average'. + + metrics.merge_value('Requests/Percentiles/Average[seconds]', + total/len(requests)) + + idx50 = int(0.50 * len(requests)) + metrics.merge_value('Requests/Percentiles/Median[seconds]', + requests[idx50]['duration']) + + idx95 = int(0.95 * len(requests)) + metrics.merge_value('Requests/Percentiles/95%[seconds]', + requests[idx95]['duration']) + + idx99 = int(0.99 * len(requests)) + metrics.merge_value('Requests/Percentiles/99%[seconds]', + requests[idx99]['duration']) + + # Chart as 'Rate'. + + metrics.merge_value('Requests/Bytes Served[bytes]', + harvest_data['bytes_served']) + + return metrics + + def generate_process_metrics(self, harvest_data): + metrics = Samples() + + # Chart as 'Count'. Round to Integer. + + metrics.merge_value('Processes/Instances[|processes]', + Sample(count=math.ceil(float( + harvest_data['processes_running']) / + harvest_data['sample_count']))) + + metrics.merge_value('Processes/Lifecycle/Starting[|processes]', + Sample(count=harvest_data['processes_started'])) + + metrics.merge_value('Processes/Lifecycle/Stopping[|processes]', + Sample(count=harvest_data['processes_stopped'])) + + metrics.merge_value('Workers/Availability/Idle[|workers]', + Sample(count=math.ceil(float( + harvest_data['idle_workers']) / + harvest_data['sample_count']))) + metrics.merge_value('Workers/Availability/Busy[|workers]', + Sample(count=math.ceil(float( + harvest_data['busy_workers']) / + harvest_data['sample_count']))) + + # Chart as 'Percentage'. + + metrics.merge_value('Workers/Utilization[server]', + (float(harvest_data['busy_workers']) / + harvest_data['sample_count']) / ( + harvest_data['server_limit']*harvest_data['thread_limit'])) + + total = 0 + for value in harvest_data['worker_status'].values(): + value = float(value)/harvest_data['sample_count'] + total += value + + if total: + for key, value in harvest_data['worker_status'].items(): + if key != SERVER_DEAD and value != 0: + label = STATUS_FLAGS.get(key, 'Unknown') + + # Chart as 'Average'. Round to Integer. + + value = float(value)/harvest_data['sample_count'] + + metrics.merge_value('Workers/Status/%s[workers]' % + label, (value/total)*total) + + return metrics + + def report_main_loop(self): + # We need a set of cached metrics for the case where + # we fail in uploading the metric data and need to + # retain it for the next attempt to upload data. + + retries = 0 + retained_start = 0 + retained = Samples() + + # We simply wait to be passed the metric data to be + # reported for the current sample period. + + while True: + harvest_data = self.report_queue.get() + + # If samples is None then we are being told to + # exit as the process is being shutdown. Otherwise + # we should be passed the cumulative metric data + # and the set of sampled requests. + + if harvest_data is None: + return + + start = harvest_data['period_start'] + end = harvest_data['period_end'] + + metrics = harvest_data['metrics'] + + # Add metric to track how many Apache server instances + # are reporting for each sample period. + + # Chart as 'Count'. Round to Integer. + + metrics.merge_value('Server/Instances[|servers]', 0) + + # Generate percentiles metrics for request samples. + + metrics.merge_samples(self.generate_request_metrics(harvest_data)) + metrics.merge_samples(self.generate_process_metrics(harvest_data)) + + # If we had metrics from a previous reporting period + # because we couldn't upload the metric data, we need + # to merge the data from the current reporting period + # with that for the previous period. + + if retained: + start = retained_start + retained.merge_samples(metrics) + metrics = retained + + # Now attempt to upload the metric data. + + retry = self.upload_report(start, end, metrics) + + # If a failure occurred but failure type was such that we + # could try again to upload the data, then retain them. If + # have two many failed attempts though we give up. + + if retry: + retries += 1 + + if retries == 5: + retries = 0 + + else: + retained = metrics + + else: + retries = 0 + + if retries == 0: + retained_start = 0 + retained.clear_samples() + + else: + retained_start = start + retained = metrics + + def generate_scoreboard(self, sample_start=None): + busy_workers = 0 + idle_workers = 0 + access_count = 0 + bytes_served = 0 + + active_processes = 0 + + scoreboard = apache.scoreboard() + + if sample_start is None: + sample_start = scoreboard['current_time'] + + scoreboard['request_samples'] = request_samples = [] + + for process in scoreboard['processes']: + process['active_workers'] = 0 + + for worker in process['workers']: + status = worker['status'] + + if not process['quiescing'] and process['pid']: + if (status == SERVER_READY and process['generation'] == + scoreboard['running_generation']): + + process['active_workers'] += 1 + idle_workers += 1 + + elif status not in (SERVER_DEAD, SERVER_STARTING, + SERVER_IDLE_KILL): + + process['active_workers'] += 1 + busy_workers += 1 + + count = worker['access_count'] + + if count or status not in (SERVER_READY, SERVER_DEAD): + access_count += count + bytes_served += worker['bytes_served'] + + current_time = scoreboard['current_time'] + + start_time = worker['start_time'] + stop_time = worker['stop_time'] + + if (stop_time > start_time and sample_start < stop_time + and stop_time <= current_time): + + duration = stop_time - start_time + thread_num = worker['thread_num'] + + request_samples.append(dict(start_time=start_time, + duration=duration, thread_num=thread_num)) + + if process['active_workers']: + active_processes += 1 + + scoreboard['busy_workers'] = busy_workers + scoreboard['idle_workers'] = idle_workers + scoreboard['access_count'] = access_count + scoreboard['bytes_served'] = bytes_served + + scoreboard['active_processes'] = active_processes + + return scoreboard + + def record_process_statistics(self, scoreboard, harvest_data): + current_active_processes = scoreboard['active_processes'] + previous_active_processes = harvest_data['active_processes'] + + harvest_data['active_processes'] = current_active_processes + harvest_data['processes_running'] += current_active_processes + + if current_active_processes > previous_active_processes: + harvest_data['processes_started'] += (current_active_processes - + previous_active_processes) + + elif current_active_processes < previous_active_processes: + harvest_data['processes_stopped'] += (previous_active_processes - + current_active_processes) + + harvest_data['idle_workers'] += scoreboard['idle_workers'] + harvest_data['busy_workers'] += scoreboard['busy_workers'] + + for process in scoreboard['processes']: + for worker in process['workers']: + harvest_data['worker_status'][worker['status']] += 1 + + def monitor_main_loop(self): + scoreboard = self.generate_scoreboard() + + harvest_start = scoreboard['current_time'] + sample_start = harvest_start + sample_duration = 0.0 + + access_count = scoreboard['access_count'] + bytes_served = scoreboard['bytes_served'] + + harvest_data = {} + + harvest_data['sample_count'] = 0 + harvest_data['period_start'] = harvest_start + + harvest_data['metrics'] = Samples() + + harvest_data['request_samples'] = [] + + harvest_data['active_processes'] = 0 + + harvest_data['processes_running'] = 0 + harvest_data['processes_started'] = 0 + harvest_data['processes_stopped'] = 0 + + harvest_data['idle_workers'] = 0 + harvest_data['busy_workers'] = 0 + + harvest_data['server_limit'] = scoreboard['server_limit'] + harvest_data['thread_limit'] = scoreboard['thread_limit'] + + harvest_data['worker_status'] = {} + + for status in STATUS_FLAGS.keys(): + harvest_data['worker_status'][status] = 0 + + harvest_data['access_count'] = 0 + harvest_data['bytes_served'] = 0 + + # Chart as 'Count'. Round to Integer. + + harvest_data['metrics'].merge_value('Server/Restarts[|servers]', 0) + + start = time.time() + end = start + 60.0 + + while True: + try: + # We want to collect metrics on a regular second + # interval so we need to align the timeout value. + + now = time.time() + start += 1.0 + timeout = start - now + + return self.monitor_queue.get(timeout=timeout) + + except queue.Empty: + pass + + harvest_data['sample_count'] += 1 + + scoreboard = self.generate_scoreboard(sample_start) + + harvest_end = scoreboard['current_time'] + sample_end = harvest_end + + sample_duration = sample_end - sample_start + + self.record_process_statistics(scoreboard, harvest_data) + + harvest_data['request_samples'].extend( + scoreboard['request_samples']) + + access_count_delta = scoreboard['access_count'] + access_count_delta -= access_count + access_count = scoreboard['access_count'] + + harvest_data['access_count'] += access_count_delta + + bytes_served_delta = scoreboard['bytes_served'] + bytes_served_delta -= bytes_served + bytes_served = scoreboard['bytes_served'] + + harvest_data['bytes_served'] += bytes_served_delta + + now = time.time() + + if now >= end: + harvest_data['period_end'] = harvest_end + + self.report_queue.put(harvest_data) + + harvest_start = harvest_end + metrics = Samples() + end += 60.0 + + _harvest_data = {} + + _harvest_data['sample_count'] = 0 + _harvest_data['period_start'] = harvest_start + + _harvest_data['metrics'] = Samples() + + _harvest_data['request_samples'] = [] + + _harvest_data['active_processes'] = ( + harvest_data['active_processes']) + + _harvest_data['processes_running'] = 0 + _harvest_data['processes_started'] = 0 + _harvest_data['processes_stopped'] = 0 + + _harvest_data['idle_workers'] = 0 + _harvest_data['busy_workers'] = 0 + + _harvest_data['server_limit'] = scoreboard['server_limit'] + _harvest_data['thread_limit'] = scoreboard['thread_limit'] + + _harvest_data['worker_status'] = {} + + for status in STATUS_FLAGS.keys(): + _harvest_data['worker_status'][status] = 0 + + _harvest_data['access_count'] = 0 + _harvest_data['bytes_served'] = 0 + + harvest_data = _harvest_data + + sample_start = sample_end + + def terminate(self): + try: + self.report_queue.put(None) + self.monitor_queue.put(None) + except Exception: + pass + + self.monitor_thread.join() + self.report_thread.join() + + def start(self): + with self.lock: + if not self.running: + self.running = True + atexit.register(self.terminate) + self.monitor_thread.start() + self.report_thread.start() diff --git a/src/server/wsgi_interp.c b/src/server/wsgi_interp.c index ebd7ba9b..406974b1 100644 --- a/src/server/wsgi_interp.c +++ b/src/server/wsgi_interp.c @@ -1187,8 +1187,8 @@ InterpreterObject *newInterpreterObject(const char *name) #endif PyModule_AddObject(module, "build_date", object); - PyModule_AddObject(module, "server_status", PyCFunction_New( - &wsgi_apache_server_status_method[0], NULL)); + PyModule_AddObject(module, "scoreboard", PyCFunction_New( + &wsgi_apache_scoreboard_method[0], NULL)); /* Done with the 'apache' module. */ diff --git a/src/server/wsgi_metrics.c b/src/server/wsgi_metrics.c index d37deb65..9ee86982 100644 --- a/src/server/wsgi_metrics.c +++ b/src/server/wsgi_metrics.c @@ -101,134 +101,274 @@ PyMethodDef wsgi_process_status_method[] = { /* ------------------------------------------------------------------------- */ -static PyObject *wsgi_apache_server_status(void) +WSGI_STATIC_INTERNED_STRING(server_limit); +WSGI_STATIC_INTERNED_STRING(thread_limit); +WSGI_STATIC_INTERNED_STRING(running_generation); +WSGI_STATIC_INTERNED_STRING(restart_time); +WSGI_STATIC_INTERNED_STRING(current_time); +WSGI_STATIC_INTERNED_STRING(running_time); +WSGI_STATIC_INTERNED_STRING(process_num); +WSGI_STATIC_INTERNED_STRING(pid); +WSGI_STATIC_INTERNED_STRING(generation); +WSGI_STATIC_INTERNED_STRING(quiescing); +WSGI_STATIC_INTERNED_STRING(workers); +WSGI_STATIC_INTERNED_STRING(thread_num); +WSGI_STATIC_INTERNED_STRING(status); +WSGI_STATIC_INTERNED_STRING(access_count); +WSGI_STATIC_INTERNED_STRING(bytes_served); +WSGI_STATIC_INTERNED_STRING(start_time); +WSGI_STATIC_INTERNED_STRING(stop_time); +WSGI_STATIC_INTERNED_STRING(last_used); +WSGI_STATIC_INTERNED_STRING(client); +WSGI_STATIC_INTERNED_STRING(request); +WSGI_STATIC_INTERNED_STRING(vhost); +WSGI_STATIC_INTERNED_STRING(processes); + +static PyObject *wsgi_status_flags[SERVER_NUM_STATUS]; + +#define WSGI_CREATE_STATUS_FLAG(name, val) \ + wsgi_status_flags[name] = wsgi_PyString_InternFromString(val) + +static PyObject *wsgi_apache_scoreboard(void) { - PyObject *result = NULL; + PyObject *scoreboard_dict = NULL; - PyObject *object = NULL; + PyObject *process_list = NULL; - apr_time_t now_time; - apr_interval_time_t up_time; + PyObject *object = NULL; - ap_generation_t mpm_generation; + apr_time_t current_time; + apr_interval_time_t running_time; - int j, i, res; - int ready; - int busy; - unsigned long count; - unsigned long lres; - apr_off_t bytes; - apr_off_t bcount, kbcount; + global_score *gs_record; worker_score *ws_record; process_score *ps_record; - int server_limit = 0; - int thread_limit = 0; - - /* Scoreboard is not available in inetd mode. Give up now. */ - - if (!ap_exists_scoreboard_image()) - return PyDict_New(); + int j, i; + + static int init_static = 0; + + /* Initialise interned strings the first time. */ + + if (!init_static) { + WSGI_CREATE_INTERNED_STRING_ID(server_limit); + WSGI_CREATE_INTERNED_STRING_ID(thread_limit); + WSGI_CREATE_INTERNED_STRING_ID(running_generation); + WSGI_CREATE_INTERNED_STRING_ID(restart_time); + WSGI_CREATE_INTERNED_STRING_ID(current_time); + WSGI_CREATE_INTERNED_STRING_ID(running_time); + WSGI_CREATE_INTERNED_STRING_ID(process_num); + WSGI_CREATE_INTERNED_STRING_ID(pid); + WSGI_CREATE_INTERNED_STRING_ID(generation); + WSGI_CREATE_INTERNED_STRING_ID(quiescing); + WSGI_CREATE_INTERNED_STRING_ID(workers); + WSGI_CREATE_INTERNED_STRING_ID(thread_num); + WSGI_CREATE_INTERNED_STRING_ID(status); + WSGI_CREATE_INTERNED_STRING_ID(access_count); + WSGI_CREATE_INTERNED_STRING_ID(bytes_served); + WSGI_CREATE_INTERNED_STRING_ID(start_time); + WSGI_CREATE_INTERNED_STRING_ID(stop_time); + WSGI_CREATE_INTERNED_STRING_ID(last_used); + WSGI_CREATE_INTERNED_STRING_ID(client); + WSGI_CREATE_INTERNED_STRING_ID(request); + WSGI_CREATE_INTERNED_STRING_ID(vhost); + WSGI_CREATE_INTERNED_STRING_ID(processes); + + WSGI_CREATE_STATUS_FLAG(SERVER_DEAD, "."); + WSGI_CREATE_STATUS_FLAG(SERVER_READY, "_"); + WSGI_CREATE_STATUS_FLAG(SERVER_STARTING, "S"); + WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_READ, "R"); + WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_WRITE, "W"); + WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_KEEPALIVE, "K"); + WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_LOG, "L"); + WSGI_CREATE_STATUS_FLAG(SERVER_BUSY_DNS, "D"); + WSGI_CREATE_STATUS_FLAG(SERVER_CLOSING, "C"); + WSGI_CREATE_STATUS_FLAG(SERVER_GRACEFUL, "G"); + WSGI_CREATE_STATUS_FLAG(SERVER_IDLE_KILL, "I"); + + init_static = 1; + } - ap_mpm_query(AP_MPMQ_HARD_LIMIT_THREADS, &thread_limit); - ap_mpm_query(AP_MPMQ_HARD_LIMIT_DAEMONS, &server_limit); + /* Scoreboard needs to exist. */ - now_time = apr_time_now(); - up_time = (apr_uint32_t)apr_time_sec( - now_time - ap_scoreboard_image->global->restart_time); + if (!ap_exists_scoreboard_image()) { + Py_INCREF(Py_None); -#if defined(AP_MPMQ_GENERATION) - ap_mpm_query(AP_MPMQ_GENERATION, &mpm_generation); -#else - mpm_generation = ap_my_generation; -#endif + return Py_None; + } - ready = 0; - busy = 0; - count = 0; - bcount = 0; - kbcount = 0; + gs_record = ap_get_scoreboard_global(); - for (i = 0; i < server_limit; ++i) { - ps_record = ap_get_scoreboard_process(i); - for (j = 0; j < thread_limit; ++j) { - int indx = (i * thread_limit) + j; + if (!gs_record) { + Py_INCREF(Py_None); -#if AP_MODULE_MAGIC_AT_LEAST(20071023,0) - ws_record = ap_get_scoreboard_worker_from_indexes(i, j); -#else - ws_record = ap_get_scoreboard_worker(i, j); -#endif - res = ws_record->status; - - if (!ps_record->quiescing - && ps_record->pid) { - if (res == SERVER_READY) { - if (ps_record->generation == mpm_generation) - ready++; - } - else if (res != SERVER_DEAD && - res != SERVER_STARTING && - res != SERVER_IDLE_KILL) { - busy++; - } - } - - lres = ws_record->access_count; - bytes = ws_record->bytes_served; - - if (lres != 0 || (res != SERVER_READY && res != SERVER_DEAD)) { - count += lres; - bcount += bytes; - - if (bcount >= 1024) { - kbcount += (bcount >> 10); - bcount = bcount & 0x3ff; - } - } - } + return Py_None; } - /* - * Generate the dictionary for the server status from the - * calculated values. - */ + /* Return everything in a dictionary. Start with global. */ - result = PyDict_New(); + scoreboard_dict = PyDict_New(); - object = PyInt_FromLong(now_time); - PyDict_SetItemString(result, "time", object); + object = wsgi_PyInt_FromLong(gs_record->server_limit); + PyDict_SetItem(scoreboard_dict, + WSGI_INTERNED_STRING(server_limit), object); Py_DECREF(object); - object = PyInt_FromLong(up_time); - PyDict_SetItemString(result, "uptime", object); + object = wsgi_PyInt_FromLong(gs_record->thread_limit); + PyDict_SetItem(scoreboard_dict, + WSGI_INTERNED_STRING(thread_limit), object); Py_DECREF(object); - object = PyInt_FromLong(mpm_generation); - PyDict_SetItemString(result, "generation", object); + object = wsgi_PyInt_FromLong(gs_record->running_generation); + PyDict_SetItem(scoreboard_dict, + WSGI_INTERNED_STRING(running_generation), object); Py_DECREF(object); - object = PyInt_FromLong(count); - PyDict_SetItemString(result, "total_accesses", object); + object = PyFloat_FromDouble(apr_time_sec(( + double)gs_record->restart_time)); + PyDict_SetItem(scoreboard_dict, + WSGI_INTERNED_STRING(restart_time), object); Py_DECREF(object); - object = PyInt_FromLong(kbcount); - PyDict_SetItemString(result, "total_kbytes", object); - Py_DECREF(object); + current_time = apr_time_now(); - object = PyInt_FromLong(busy); - PyDict_SetItemString(result, "busy_workers", object); + object = PyFloat_FromDouble(apr_time_sec((double)current_time)); + PyDict_SetItem(scoreboard_dict, + WSGI_INTERNED_STRING(current_time), object); Py_DECREF(object); - object = PyInt_FromLong(ready); - PyDict_SetItemString(result, "idle_workers", object); + running_time = (apr_uint32_t)apr_time_sec((double) + current_time - ap_scoreboard_image->global->restart_time); + + object = wsgi_PyInt_FromLongLong(running_time); + PyDict_SetItem(scoreboard_dict, + WSGI_INTERNED_STRING(running_time), object); Py_DECREF(object); - return result; + /* Now add in the processes/workers. */ + + process_list = PyList_New(0); + + for (i = 0; i < gs_record->server_limit; ++i) { + PyObject *process_dict = NULL; + PyObject *worker_list = NULL; + + ps_record = ap_get_scoreboard_process(i); + + process_dict = PyDict_New(); + PyList_Append(process_list, process_dict); + + object = wsgi_PyInt_FromLong(i); + PyDict_SetItem(process_dict, + WSGI_INTERNED_STRING(process_num), object); + Py_DECREF(object); + + object = wsgi_PyInt_FromLong(ps_record->pid); + PyDict_SetItem(process_dict, + WSGI_INTERNED_STRING(pid), object); + Py_DECREF(object); + + object = wsgi_PyInt_FromLong(ps_record->generation); + PyDict_SetItem(process_dict, + WSGI_INTERNED_STRING(generation), object); + Py_DECREF(object); + + object = PyBool_FromLong(ps_record->quiescing); + PyDict_SetItem(process_dict, + WSGI_INTERNED_STRING(quiescing), object); + Py_DECREF(object); + + worker_list = PyList_New(0); + PyDict_SetItem(process_dict, + WSGI_INTERNED_STRING(workers), worker_list); + + for (j = 0; j < gs_record->thread_limit; ++j) { + PyObject *worker_dict = NULL; + +#if AP_MODULE_MAGIC_AT_LEAST(20071023,0) + ws_record = ap_get_scoreboard_worker_from_indexes(i, j); +#else + ws_record = ap_get_scoreboard_worker(i, j); +#endif + + worker_dict = PyDict_New(); + + PyList_Append(worker_list, worker_dict); + + object = wsgi_PyInt_FromLong(ws_record->thread_num); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(thread_num), object); + Py_DECREF(object); + + object = wsgi_PyInt_FromLong(ws_record->generation); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(generation), object); + Py_DECREF(object); + + object = wsgi_status_flags[ws_record->status]; + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(status), object); + + object = wsgi_PyInt_FromLong(ws_record->access_count); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(access_count), object); + Py_DECREF(object); + + object = wsgi_PyInt_FromUnsignedLongLong(ws_record->bytes_served); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(bytes_served), object); + Py_DECREF(object); + + object = PyFloat_FromDouble(apr_time_sec( + (double)ws_record->start_time)); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(start_time), object); + Py_DECREF(object); + + object = PyFloat_FromDouble(apr_time_sec( + (double)ws_record->stop_time)); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(stop_time), object); + Py_DECREF(object); + + object = wsgi_PyInt_FromLongLong(ws_record->last_used); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(last_used), object); + Py_DECREF(object); + + object = wsgi_PyString_FromString(ws_record->client); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(client), object); + Py_DECREF(object); + + object = wsgi_PyString_FromString(ws_record->request); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(request), object); + Py_DECREF(object); + + object = wsgi_PyString_FromString(ws_record->vhost); + PyDict_SetItem(worker_dict, + WSGI_INTERNED_STRING(vhost), object); + Py_DECREF(object); + + Py_DECREF(worker_dict); + } + + Py_DECREF(worker_list); + Py_DECREF(process_dict); + } + + PyDict_SetItem(scoreboard_dict, + WSGI_INTERNED_STRING(processes), process_list); + Py_DECREF(process_list); + + return scoreboard_dict; } -PyMethodDef wsgi_apache_server_status_method[] = { - { "server_status", (PyCFunction)wsgi_apache_server_status, +/* ------------------------------------------------------------------------- */ + +PyMethodDef wsgi_apache_scoreboard_method[] = { + { "scoreboard", (PyCFunction)wsgi_apache_scoreboard, METH_NOARGS, 0 }, { NULL }, }; diff --git a/src/server/wsgi_metrics.h b/src/server/wsgi_metrics.h index 367bb137..7a51bf65 100644 --- a/src/server/wsgi_metrics.h +++ b/src/server/wsgi_metrics.h @@ -37,6 +37,7 @@ extern double wsgi_start_request(void); extern double wsgi_end_request(void); extern PyMethodDef wsgi_apache_server_status_method[]; +extern PyMethodDef wsgi_apache_scoreboard_method[]; /* ------------------------------------------------------------------------- */ diff --git a/src/server/wsgi_python.h b/src/server/wsgi_python.h index 0e48ee3c..8c3841a5 100644 --- a/src/server/wsgi_python.h +++ b/src/server/wsgi_python.h @@ -108,6 +108,82 @@ /* ------------------------------------------------------------------------- */ +#if PY_MAJOR_VERSION >= 3 +#define wsgi_PyString_InternFromString(str) \ + PyUnicode_InternFromString(str) +#else +#define wsgi_PyString_InternFromString(str) \ + PyString_InternFromString(str) +#endif + +#if PY_MAJOR_VERSION >= 3 +#define wsgi_PyString_FromString(str) \ + PyUnicode_DecodeLatin1(str, strlen(str), NULL) +#else +#define wsgi_PyString_FromString(str) \ + PyString_FromString(str) +#endif + +#ifdef HAVE_LONG_LONG +#define wsgi_PyInt_FromLongLong(val) \ + PyLong_FromLongLong(val) +#else +#if PY_MAJOR_VERSION >= 3 +#define wsgi_PyInt_FromLongLong(val) \ + PyLong_FromLong(val) +#else +#define wsgi_PyInt_FromLongLong(val) \ + PyInt_FromLong(val) +#endif +#endif + +#ifdef HAVE_LONG_LONG +#define wsgi_PyInt_FromUnsignedLongLong(val) \ + PyLong_FromUnsignedLongLong(val) +#else +#if PY_MAJOR_VERSION >= 3 +#define wsgi_PyInt_FromUnsignedLongLong(val) \ + PyLong_FromLong(val) +#else +#define wsgi_PyInt_FromUnsignedLongLong(val) \ + PyInt_FromLong(val) +#endif +#endif + +#if PY_MAJOR_VERSION >= 3 +#define wsgi_PyInt_FromLong(val) \ + PyLong_FromLong(val) +#else +#define wsgi_PyInt_FromLong(val) \ + PyInt_FromLong(val) +#endif + +#if PY_MAJOR_VERSION >= 3 +#define wsgi_PyInt_FromUnsignedLong(val) \ + PyLong_FromUnsignedLong(val) +#else +#define wsgi_PyInt_FromUnsignedLong(val) \ + PyInt_FromUnsignedLong(val) +#endif + +/* ------------------------------------------------------------------------- */ + +#define WSGI_STATIC_INTERNED_STRING(name) \ + static PyObject *wsgi_id_##name + +#define WSGI_CREATE_INTERNED_STRING(name, val) \ + if (wsgi_id_##name) ; else wsgi_id_##name = \ + wsgi_PyString_InternFromString(val) + +#define WSGI_CREATE_INTERNED_STRING_ID(name) \ + if (wsgi_id_##name) ; else wsgi_id_##name = \ + wsgi_PyString_InternFromString(#name) + +#define WSGI_INTERNED_STRING(name) \ + wsgi_id_##name + +/* ------------------------------------------------------------------------- */ + #endif /* vi: set sw=4 expandtab : */ diff --git a/tests/environ.wsgi b/tests/environ.wsgi index 1302b77c..ac131a88 100644 --- a/tests/environ.wsgi +++ b/tests/environ.wsgi @@ -45,10 +45,18 @@ def application(environ, start_response): file=output) print('apache.threads_per_process: %s' % apache.threads_per_process, file=output) - print('apache.server_status: %s' % apache.server_status(), + print('apache.scoreboard: %s' % apache.scoreboard(), file=output) print(file=output) + scoreboard = apache.scoreboard() + + for process in scoreboard['processes']: + for worker in process['workers']: + print(worker['status'], file=output, end='') + print(file=output) + print(file=output) + print('PATH: %s' % sys.path, file=output) print(file=output) diff --git a/tox.ini b/tox.ini index ea952efb..6cbcac40 100644 --- a/tox.ini +++ b/tox.ini @@ -1,2 +1,5 @@ [tox] envlist = py26,py27,py33 + +[testenv] +deps = newrelic From a5d1ef52bed880a35d2184b36e6398a5fd6572e2 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Thu, 5 Jun 2014 21:42:05 +1000 Subject: [PATCH 05/14] Convert time value to seconds. --- src/server/wsgi_metrics.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server/wsgi_metrics.c b/src/server/wsgi_metrics.c index 9ee86982..1e162fef 100644 --- a/src/server/wsgi_metrics.c +++ b/src/server/wsgi_metrics.c @@ -331,7 +331,8 @@ static PyObject *wsgi_apache_scoreboard(void) WSGI_INTERNED_STRING(stop_time), object); Py_DECREF(object); - object = wsgi_PyInt_FromLongLong(ws_record->last_used); + object = PyFloat_FromDouble(apr_time_sec( + (double)ws_record->last_used)); PyDict_SetItem(worker_dict, WSGI_INTERNED_STRING(last_used), object); Py_DECREF(object); From 6e1f30a3e8db926e16f9d63da2240ba3eccaa542 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Thu, 5 Jun 2014 21:42:26 +1000 Subject: [PATCH 06/14] Python 3 fixes for New Relic platform plugin. --- src/server/newrelic/interface.py | 6 +++++- src/server/newrelic/sampler.py | 6 +----- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/server/newrelic/interface.py b/src/server/newrelic/interface.py index c9e788cf..a9db15fd 100644 --- a/src/server/newrelic/interface.py +++ b/src/server/newrelic/interface.py @@ -4,9 +4,13 @@ import os import types import json -import httplib import logging +try: + import http.client as httplib +except ImportError: + import httplib + _logger = logging.getLogger(__name__) # Python 3 compatibility helpers. diff --git a/src/server/newrelic/sampler.py b/src/server/newrelic/sampler.py index 1d178a45..74a5e40f 100644 --- a/src/server/newrelic/sampler.py +++ b/src/server/newrelic/sampler.py @@ -78,9 +78,6 @@ def __init__(self): def __iter__(self): return iter(self.samples.items()) - def __nonzero__(self): - return bool(self.samples) - def sample_name(self, name): return 'Component/' + name @@ -334,7 +331,7 @@ def report_main_loop(self): # to merge the data from the current reporting period # with that for the previous period. - if retained: + if retained.samples: start = retained_start retained.merge_samples(metrics) metrics = retained @@ -549,7 +546,6 @@ def monitor_main_loop(self): self.report_queue.put(harvest_data) harvest_start = harvest_end - metrics = Samples() end += 60.0 _harvest_data = {} From 1396103d69a11c0ffcee5f6ae3f2666211a3ded0 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Thu, 5 Jun 2014 21:55:26 +1000 Subject: [PATCH 07/14] Increment version to 4.2.0. --- src/server/wsgi_version.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/server/wsgi_version.h b/src/server/wsgi_version.h index d25e4793..a97c5900 100644 --- a/src/server/wsgi_version.h +++ b/src/server/wsgi_version.h @@ -24,9 +24,9 @@ /* Module version information. */ #define MOD_WSGI_MAJORVERSION_NUMBER 4 -#define MOD_WSGI_MINORVERSION_NUMBER 1 -#define MOD_WSGI_MICROVERSION_NUMBER 3 -#define MOD_WSGI_VERSION_STRING "4.1.3" +#define MOD_WSGI_MINORVERSION_NUMBER 2 +#define MOD_WSGI_MICROVERSION_NUMBER 0 +#define MOD_WSGI_VERSION_STRING "4.2.0" /* ------------------------------------------------------------------------- */ From 2222b4b64aa0e4f89c72ce8881004c95015c1675 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Thu, 5 Jun 2014 22:35:35 +1000 Subject: [PATCH 08/14] Add release notes for 4.2.0. --- docs/release-notes/index.rst | 2 ++ docs/release-notes/version-4.2.0.rst | 42 ++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 docs/release-notes/version-4.2.0.rst diff --git a/docs/release-notes/index.rst b/docs/release-notes/index.rst index 00cce03d..45ff52dc 100644 --- a/docs/release-notes/index.rst +++ b/docs/release-notes/index.rst @@ -5,6 +5,8 @@ Release Notes .. toctree:: :maxdepth: 2 + version-4.2.0.rst + version-4.1.3.rst version-4.1.2.rst version-4.1.1.rst diff --git a/docs/release-notes/version-4.2.0.rst b/docs/release-notes/version-4.2.0.rst new file mode 100644 index 00000000..3c892754 --- /dev/null +++ b/docs/release-notes/version-4.2.0.rst @@ -0,0 +1,42 @@ +============= +Version 4.2.0 +============= + +Version 4.2.0 of mod_wsgi can be obtained from: + + https://github.com/GrahamDumpleton/mod_wsgi/archive/4.2.0.tar.gz + +Known Issues +------------ + +1. The makefiles for building mod_wsgi on Windows are currently broken and +need updating. As most new changes relate to mod_wsgi daemon mode, which is +not supported under Windows, you should keep using the last available +binary for version 3.X on Windows instead. + +New Features +------------ + +1. Added ``apache.scoreboard()`` function which provides access to a +dictionary of data derived from the Apache worker scoreboard. In effect this +provides access to the same information that is used to create the Apache +server status page. + +Note that if ``mod_status`` is not loaded into Apache, or the compile time +configuration of Apache prohibits the scoreboard from being available, this +function will return ``None``. + +Also be aware that only partial information about worker status, and no +information about requests, will be returned if the ``ExtendedStatus`` +directive is not also set to ``On``. + +Although ``mod_status`` needs to be loaded, it is not necessary to enable +any URL to expose the server status page. + +2. Added a platform plugin for New Relic which will report server status +information up to New Relic if the ``--with-newrelic`` option is supplied +when running mod_wsgi express. + +That same agent also enables the New Relic Python agent. If you only want +one or the other, you can instead use the ``--with-newrelic-agent`` and +``--with-newrelic-platform`` options. From 5000a6e1f0a4b480328a7469b4f572d3bd883bbc Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Fri, 6 Jun 2014 15:16:16 +1000 Subject: [PATCH 09/14] Require server metrics to be enabled and change API naming. --- src/server/__init__.py | 22 +++++++++------- src/server/mod_wsgi.c | 40 +++++++++++++++++++++++++++++ src/server/newrelic/main.py | 2 +- src/server/newrelic/sampler.py | 2 +- src/server/wsgi_daemon.h | 1 + src/server/wsgi_interp.c | 8 +++--- src/server/wsgi_metrics.c | 46 ++++++++++++++++++++++++++++------ src/server/wsgi_metrics.h | 5 ++-- src/server/wsgi_server.c | 2 ++ src/server/wsgi_server.h | 2 ++ tests/environ.wsgi | 17 +++++++------ 11 files changed, 114 insertions(+), 33 deletions(-) diff --git a/src/server/__init__.py b/src/server/__init__.py index 189e95a9..8ce52cf1 100644 --- a/src/server/__init__.py +++ b/src/server/__init__.py @@ -173,7 +173,8 @@ def find_mimetypes(): shutdown-timeout=%(shutdown_timeout)s \\ send-buffer-size=%(send_buffer_size)s \\ receive-buffer-size=%(receive_buffer_size)s \\ - header-buffer-size=%(header_buffer_size)s + header-buffer-size=%(header_buffer_size)s \\ + server-metrics=%(daemon_server_metrics_flag)s WSGIDaemonProcess %(host)s:%(port)s \\ @@ -195,7 +196,8 @@ def find_mimetypes(): shutdown-timeout=%(shutdown_timeout)s \\ send-buffer-size=%(send_buffer_size)s \\ receive-buffer-size=%(receive_buffer_size)s \\ - header-buffer-size=%(header_buffer_size)s + header-buffer-size=%(header_buffer_size)s \\ + server-metrics=%(daemon_server_metrics_flag)s WSGICallableObject '%(callable_object)s' WSGIPassAuthorization On @@ -338,7 +340,7 @@ def find_mimetypes(): """ APACHE_TOOLS_CONFIG = """ -WSGIDaemonProcess express display-name=%%{GROUP} threads=1 +WSGIDaemonProcess express display-name=%%{GROUP} threads=1 server-metrics=On """ APACHE_METRICS_CONFIG = """ @@ -923,12 +925,9 @@ def check_percentage(option, opt_str, value, parser): 'that keep alive connections are disabled.'), optparse.make_option('--server-metrics', action='store_true', - default=False, help='Flag indicating whether extended web server ' - 'status will be available within the WSGI application. Defaults ' - 'to being disabled meaning that only the state of each worker ' - 'will be available. Will be automatically enabled as a side ' - 'effect of enabling server status URL or New Relic server level ' - 'monitoring.'), + default=False, help='Flag indicating whether internal server ' + 'metrics will be available within the WSGI application. ' + 'Defaults to being disabled.'), optparse.make_option('--server-status', action='store_true', default=False, help='Flag indicating whether web server status ' 'will be available at the /server-status sub URL. Defaults to ' @@ -1133,6 +1132,11 @@ def _cmd_setup_server(args, options): options['keep_alive'] = options['keep_alive_timeout'] != 0 + if options['server_metrics']: + options['daemon_server_metrics_flag'] = 'On' + else: + options['daemon_server_metrics_flag'] = 'Off' + if options['with_newrelic']: options['with_newrelic_agent'] = True options['with_newrelic_platform'] = True diff --git a/src/server/mod_wsgi.c b/src/server/mod_wsgi.c index 9be59f67..0865faed 100644 --- a/src/server/mod_wsgi.c +++ b/src/server/mod_wsgi.c @@ -4996,6 +4996,28 @@ static const char *wsgi_add_handler_script(cmd_parms *cmd, void *mconfig, return NULL; } +static const char *wsgi_set_server_metrics(cmd_parms *cmd, void *mconfig, + const char *f) +{ + const char *error = NULL; + WSGIServerConfig *sconfig = NULL; + + error = ap_check_cmd_context(cmd, GLOBAL_ONLY); + if (error != NULL) + return error; + + sconfig = ap_get_module_config(cmd->server->module_config, &wsgi_module); + + if (strcasecmp(f, "Off") == 0) + sconfig->server_metrics = 0; + else if (strcasecmp(f, "On") == 0) + sconfig->server_metrics = 1; + else + return "WSGIServerMetrics must be one of: Off | On"; + + return NULL; +} + static const char *wsgi_set_newrelic_config_file( cmd_parms *cmd, void *mconfig, const char *f) { @@ -6305,6 +6327,8 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig, int groups_count = 0; gid_t *groups = NULL; + int server_metrics = 0; + const char *newrelic_config_file = NULL; const char *newrelic_environment = NULL; @@ -6617,6 +6641,17 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig, if (virtual_memory_limit < 0) return "Invalid virtual memory limit for WSGI daemon process."; } + else if (!strcmp(option, "server-metrics")) { + if (!*value) + return "Invalid server metrics flag for WSGI daemon process."; + + if (strcasecmp(value, "Off") == 0) + server_metrics = 0; + else if (strcasecmp(value, "On") == 0) + server_metrics = 1; + else + return "Invalid server metrics flag for WSGI daemon process."; + } else if (!strcmp(option, "newrelic-config-file")) { newrelic_config_file = value; } @@ -6734,6 +6769,8 @@ static const char *wsgi_add_daemon_process(cmd_parms *cmd, void *mconfig, entry->memory_limit = memory_limit; entry->virtual_memory_limit = virtual_memory_limit; + entry->server_metrics = server_metrics; + entry->newrelic_config_file = newrelic_config_file; entry->newrelic_environment = newrelic_environment; @@ -13425,6 +13462,9 @@ static const command_rec wsgi_commands[] = AP_INIT_RAW_ARGS("WSGIHandlerScript", wsgi_add_handler_script, NULL, ACCESS_CONF|RSRC_CONF, "Location of WSGI handler script file."), + AP_INIT_TAKE1("WSGIServerMetrics", wsgi_set_server_metrics, + NULL, RSRC_CONF, "Enabled/Disable access to server metrics."), + AP_INIT_TAKE1("WSGINewRelicConfigFile", wsgi_set_newrelic_config_file, NULL, RSRC_CONF, "New Relic monitoring agent configuration file."), AP_INIT_TAKE1("WSGINewRelicEnvironment", wsgi_set_newrelic_environment, diff --git a/src/server/newrelic/main.py b/src/server/newrelic/main.py index c336e3f0..afa8972e 100644 --- a/src/server/newrelic/main.py +++ b/src/server/newrelic/main.py @@ -23,7 +23,7 @@ '%(name)s %(levelname)s - %(message)s' def start(name): - if apache.scoreboard() is None: + if apache.server_metrics() is None: return config_object = RawConfigParser() diff --git a/src/server/newrelic/sampler.py b/src/server/newrelic/sampler.py index 74a5e40f..73eab835 100644 --- a/src/server/newrelic/sampler.py +++ b/src/server/newrelic/sampler.py @@ -372,7 +372,7 @@ def generate_scoreboard(self, sample_start=None): active_processes = 0 - scoreboard = apache.scoreboard() + scoreboard = apache.server_metrics() if sample_start is None: sample_start = scoreboard['current_time'] diff --git a/src/server/wsgi_daemon.h b/src/server/wsgi_daemon.h index 3507536c..1601f30a 100644 --- a/src/server/wsgi_daemon.h +++ b/src/server/wsgi_daemon.h @@ -128,6 +128,7 @@ typedef struct { int listener_fd; const char* mutex_path; apr_proc_mutex_t* mutex; + int server_metrics; const char *newrelic_config_file; const char *newrelic_environment; } WSGIProcessGroup; diff --git a/src/server/wsgi_interp.c b/src/server/wsgi_interp.c index 406974b1..0f132eed 100644 --- a/src/server/wsgi_interp.c +++ b/src/server/wsgi_interp.c @@ -1068,8 +1068,8 @@ InterpreterObject *newInterpreterObject(const char *name) PyModule_AddObject(module, "threads_per_process", object); #endif - PyModule_AddObject(module, "process_status", PyCFunction_New( - &wsgi_process_status_method[0], NULL)); + PyModule_AddObject(module, "server_metrics", PyCFunction_New( + &wsgi_process_server_metrics_method[0], NULL)); /* Done with the 'mod_wsgi' module. */ @@ -1187,8 +1187,8 @@ InterpreterObject *newInterpreterObject(const char *name) #endif PyModule_AddObject(module, "build_date", object); - PyModule_AddObject(module, "scoreboard", PyCFunction_New( - &wsgi_apache_scoreboard_method[0], NULL)); + PyModule_AddObject(module, "server_metrics", PyCFunction_New( + &wsgi_apache_server_metrics_method[0], NULL)); /* Done with the 'apache' module. */ diff --git a/src/server/wsgi_metrics.c b/src/server/wsgi_metrics.c index 1e162fef..0f1c0b3d 100644 --- a/src/server/wsgi_metrics.c +++ b/src/server/wsgi_metrics.c @@ -21,6 +21,8 @@ #include "wsgi_metrics.h" #include "wsgi_apache.h" +#include "wsgi_daemon.h" +#include "wsgi_server.h" /* ------------------------------------------------------------------------- */ @@ -78,12 +80,27 @@ double wsgi_end_request(void) return wsgi_utilization_time(-1); } -static PyObject *wsgi_process_status(void) +static PyObject *wsgi_process_server_metrics(void) { PyObject *result = NULL; PyObject *object = NULL; + if (!wsgi_daemon_pool) { + if (!wsgi_server_config->server_metrics) { + Py_INCREF(Py_None); + + return Py_None; + } + } + else { + if (!wsgi_daemon_process->group->server_metrics) { + Py_INCREF(Py_None); + + return Py_None; + } + } + result = PyDict_New(); object = PyFloat_FromDouble(wsgi_utilization_time(0)); @@ -93,8 +110,8 @@ static PyObject *wsgi_process_status(void) return result; } -PyMethodDef wsgi_process_status_method[] = { - { "process_status", (PyCFunction)wsgi_process_status, +PyMethodDef wsgi_process_server_metrics_method[] = { + { "server_metrics", (PyCFunction)wsgi_process_server_metrics, METH_NOARGS, 0 }, { NULL }, }; @@ -129,7 +146,7 @@ static PyObject *wsgi_status_flags[SERVER_NUM_STATUS]; #define WSGI_CREATE_STATUS_FLAG(name, val) \ wsgi_status_flags[name] = wsgi_PyString_InternFromString(val) -static PyObject *wsgi_apache_scoreboard(void) +static PyObject *wsgi_apache_server_metrics(void) { PyObject *scoreboard_dict = NULL; @@ -189,7 +206,7 @@ static PyObject *wsgi_apache_scoreboard(void) init_static = 1; } - /* Scoreboard needs to exist. */ + /* Scoreboard needs to exist and server metrics enabled. */ if (!ap_exists_scoreboard_image()) { Py_INCREF(Py_None); @@ -197,6 +214,21 @@ static PyObject *wsgi_apache_scoreboard(void) return Py_None; } + if (!wsgi_daemon_pool) { + if (!wsgi_server_config->server_metrics) { + Py_INCREF(Py_None); + + return Py_None; + } + } + else { + if (!wsgi_daemon_process->group->server_metrics) { + Py_INCREF(Py_None); + + return Py_None; + } + } + gs_record = ap_get_scoreboard_global(); if (!gs_record) { @@ -368,8 +400,8 @@ static PyObject *wsgi_apache_scoreboard(void) /* ------------------------------------------------------------------------- */ -PyMethodDef wsgi_apache_scoreboard_method[] = { - { "scoreboard", (PyCFunction)wsgi_apache_scoreboard, +PyMethodDef wsgi_apache_server_metrics_method[] = { + { "server_metrics", (PyCFunction)wsgi_apache_server_metrics, METH_NOARGS, 0 }, { NULL }, }; diff --git a/src/server/wsgi_metrics.h b/src/server/wsgi_metrics.h index 7a51bf65..6f2008de 100644 --- a/src/server/wsgi_metrics.h +++ b/src/server/wsgi_metrics.h @@ -31,13 +31,12 @@ extern int wsgi_dump_stack_traces; extern apr_thread_mutex_t* wsgi_monitor_lock; -extern PyMethodDef wsgi_process_status_method[]; +extern PyMethodDef wsgi_process_server_metrics_method[]; extern double wsgi_start_request(void); extern double wsgi_end_request(void); -extern PyMethodDef wsgi_apache_server_status_method[]; -extern PyMethodDef wsgi_apache_scoreboard_method[]; +extern PyMethodDef wsgi_apache_server_metrics_method[]; /* ------------------------------------------------------------------------- */ diff --git a/src/server/wsgi_server.c b/src/server/wsgi_server.c index 2d6041e8..0d602ca4 100644 --- a/src/server/wsgi_server.c +++ b/src/server/wsgi_server.c @@ -122,6 +122,8 @@ WSGIServerConfig *newWSGIServerConfig(apr_pool_t *p) object->enable_sendfile = -1; + object->server_metrics = -1; + object->newrelic_config_file = NULL; object->newrelic_environment = NULL; diff --git a/src/server/wsgi_server.h b/src/server/wsgi_server.h index f1b323d3..c58b980c 100644 --- a/src/server/wsgi_server.h +++ b/src/server/wsgi_server.h @@ -107,6 +107,8 @@ typedef struct { apr_hash_t *handler_scripts; + int server_metrics; + const char *newrelic_config_file; const char *newrelic_environment; } WSGIServerConfig; diff --git a/tests/environ.wsgi b/tests/environ.wsgi index ac131a88..e31bd838 100644 --- a/tests/environ.wsgi +++ b/tests/environ.wsgi @@ -34,7 +34,7 @@ def application(environ, start_response): file=output) print('mod_wsgi.threads_per_process: %s' % mod_wsgi.threads_per_process, file=output) - print('mod_wsgi.process_status: %s' % mod_wsgi.process_status(), + print('mod_wsgi.server_metrics: %s' % mod_wsgi.server_metrics(), file=output) print(file=output) @@ -45,17 +45,18 @@ def application(environ, start_response): file=output) print('apache.threads_per_process: %s' % apache.threads_per_process, file=output) - print('apache.scoreboard: %s' % apache.scoreboard(), + print('apache.server_metrics: %s' % apache.server_metrics(), file=output) print(file=output) - scoreboard = apache.scoreboard() + scoreboard = apache.server_metrics() - for process in scoreboard['processes']: - for worker in process['workers']: - print(worker['status'], file=output, end='') - print(file=output) - print(file=output) + if scoreboard: + for process in scoreboard['processes']: + for worker in process['workers']: + print(worker['status'], file=output, end='') + print(file=output) + print(file=output) print('PATH: %s' % sys.path, file=output) print(file=output) From 0e935de66af4ed216e7c0a55d57c5f5063292835 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 8 Jun 2014 16:23:32 +1000 Subject: [PATCH 10/14] Expand to version number triplet in WSGI environ and modules. --- src/server/mod_wsgi.c | 20 +++++++++++++++++++- src/server/wsgi_interp.c | 10 ++++++---- 2 files changed, 25 insertions(+), 5 deletions(-) diff --git a/src/server/mod_wsgi.c b/src/server/mod_wsgi.c index 0865faed..10a9194b 100644 --- a/src/server/mod_wsgi.c +++ b/src/server/mod_wsgi.c @@ -2195,7 +2195,13 @@ static PyObject *Adapter_environ(AdapterObject *self) PyDict_SetItemString(vars, "wsgi.file_wrapper", (PyObject *)&Stream_Type); - /* Add mod_wsgi version information. */ + /* Add Apache and mod_wsgi version information. */ + + object = Py_BuildValue("(iii)", AP_SERVER_MAJORVERSION_NUMBER, + AP_SERVER_MINORVERSION_NUMBER, + AP_SERVER_PATCHLEVEL_NUMBER); + PyDict_SetItemString(vars, "apache.version", object); + Py_DECREF(object); object = Py_BuildValue("(iii)", MOD_WSGI_MAJORVERSION_NUMBER, MOD_WSGI_MINORVERSION_NUMBER, @@ -11714,6 +11720,18 @@ static PyObject *Auth_environ(AuthObject *self, const char *group) Py_DECREF(object); } + object = Py_BuildValue("(iii)", AP_SERVER_MAJORVERSION_NUMBER, + AP_SERVER_MINORVERSION_NUMBER, + AP_SERVER_PATCHLEVEL_NUMBER); + PyDict_SetItemString(vars, "apache.version", object); + Py_DECREF(object); + + object = Py_BuildValue("(iii)", MOD_WSGI_MAJORVERSION_NUMBER, + MOD_WSGI_MINORVERSION_NUMBER, + MOD_WSGI_MICROVERSION_NUMBER); + PyDict_SetItemString(vars, "mod_wsgi.version", object); + Py_DECREF(object); + #if PY_MAJOR_VERSION >= 3 object = PyUnicode_FromString(""); #else diff --git a/src/server/wsgi_interp.c b/src/server/wsgi_interp.c index 0f132eed..7dd460c8 100644 --- a/src/server/wsgi_interp.c +++ b/src/server/wsgi_interp.c @@ -981,9 +981,10 @@ InterpreterObject *newInterpreterObject(const char *name) * 'mod_wsgi' module. */ - PyModule_AddObject(module, "version", Py_BuildValue("(ii)", + PyModule_AddObject(module, "version", Py_BuildValue("(iii)", MOD_WSGI_MAJORVERSION_NUMBER, - MOD_WSGI_MINORVERSION_NUMBER)); + MOD_WSGI_MINORVERSION_NUMBER, + MOD_WSGI_MICROVERSION_NUMBER)); /* Add type object for file wrapper. */ @@ -1133,9 +1134,10 @@ InterpreterObject *newInterpreterObject(const char *name) * module. */ - PyModule_AddObject(module, "version", Py_BuildValue("(ii)", + PyModule_AddObject(module, "version", Py_BuildValue("(iii)", AP_SERVER_MAJORVERSION_NUMBER, - AP_SERVER_MINORVERSION_NUMBER)); + AP_SERVER_MINORVERSION_NUMBER, + AP_SERVER_PATCHLEVEL_NUMBER)); /* * Add information about the Apache MPM configuration and From 70d48e7b6474ee9b2078859957237a1f1e2db873 Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 8 Jun 2014 16:26:27 +1000 Subject: [PATCH 11/14] Split services into separate sub interpreters so isolated. --- src/server/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server/__init__.py b/src/server/__init__.py index 8ce52cf1..aa08b3ca 100644 --- a/src/server/__init__.py +++ b/src/server/__init__.py @@ -345,12 +345,12 @@ def find_mimetypes(): APACHE_METRICS_CONFIG = """ WSGIImportScript '%(server_root)s/server-metrics.py' \\ - process-group=express application-group=%%{GLOBAL} + process-group=express application-group=server-metrics """ APACHE_WDB_CONFIG = """ WSGIImportScript '%(server_root)s/wdb-server.py' \\ - process-group=express application-group=%%{GLOBAL} + process-group=express application-group=wdb-server """ def generate_apache_config(options): From f73220e664912133a129753ba0ca5986d581b5ac Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 8 Jun 2014 16:28:40 +1000 Subject: [PATCH 12/14] Split New Relic platform plugin out into separate Python package called mod_wsgi-metrics. --- setup.py | 3 +- src/server/__init__.py | 18 +- src/server/newrelic/__init__.py | 0 src/server/newrelic/interface.py | 189 ---------- src/server/newrelic/main.py | 62 ---- src/server/newrelic/sampler.py | 601 ------------------------------- src/server/wsgi_interp.c | 8 +- src/server/wsgi_metrics.c | 14 +- src/server/wsgi_metrics.h | 4 +- tests/environ.wsgi | 26 +- tox.ini | 4 +- 11 files changed, 49 insertions(+), 880 deletions(-) delete mode 100644 src/server/newrelic/__init__.py delete mode 100644 src/server/newrelic/interface.py delete mode 100644 src/server/newrelic/main.py delete mode 100644 src/server/newrelic/sampler.py diff --git a/setup.py b/setup.py index 309ad3cf..872f4377 100644 --- a/setup.py +++ b/setup.py @@ -179,7 +179,7 @@ def _version(): ], packages = ['mod_wsgi', 'mod_wsgi.server', 'mod_wsgi.server.management', 'mod_wsgi.server.management.commands', 'mod_wsgi.docs', - 'mod_wsgi.images', 'mod_wsgi.server.newrelic'], + 'mod_wsgi.images'], package_dir = {'mod_wsgi': 'src', 'mod_wsgi.docs': 'docs/_build/html', 'mod_wsgi.images': 'images'}, package_data = {'mod_wsgi.docs': _documentation(), @@ -187,4 +187,5 @@ def _version(): ext_modules = [extension], entry_points = { 'console_scripts': ['mod_wsgi-express = mod_wsgi.server:main'],}, + install_requires=['mod_wsgi-metrics >= 1.0.0'], ) diff --git a/src/server/__init__.py b/src/server/__init__.py index aa08b3ca..c64ef4f6 100644 --- a/src/server/__init__.py +++ b/src/server/__init__.py @@ -651,9 +651,23 @@ def generate_wsgi_handler_script(options): print(WSGI_DEFAULT_SCRIPT % options, file=fp) SERVER_METRICS_SCRIPT = """ -from mod_wsgi.server.newrelic.main import start +import logging -start('%(host)s:%(port)s') +logging.basicConfig(level=logging.INFO, + format='%%(name)s (pid=%%(process)d, level=%%(levelname)s): %%(message)s') + +_logger = logging.getLogger(__name__) + +try: + from mod_wsgi.metrics.newrelic import Agent + + agent = Agent() + agent.start() + +except ImportError: + _logger.fatal('The module mod_wsgi.metrics.newrelic is not available. ' + 'The New Relic platform plugin has been disabled. Install the ' + '"mod_wsgi-metrics" package.') """ def generate_server_metrics_script(options): diff --git a/src/server/newrelic/__init__.py b/src/server/newrelic/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/src/server/newrelic/interface.py b/src/server/newrelic/interface.py deleted file mode 100644 index a9db15fd..00000000 --- a/src/server/newrelic/interface.py +++ /dev/null @@ -1,189 +0,0 @@ -import zlib -import sys -import socket -import os -import types -import json -import logging - -try: - import http.client as httplib -except ImportError: - import httplib - -_logger = logging.getLogger(__name__) - -# Python 3 compatibility helpers. - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -if PY3: - def b(s): - return s.encode('latin-1') -else: - def b(s): - return s - -# Helpers for json encoding and decoding. - -def json_encode(obj, **kwargs): - _kwargs = {} - - if type(b'') is type(''): - _kwargs['encoding'] = 'latin-1' - - def _encode(o): - if isinstance(o, bytes): - return o.decode('latin-1') - elif isinstance(o, types.GeneratorType): - return list(o) - elif hasattr(o, '__iter__'): - return list(iter(o)) - raise TypeError(repr(o) + ' is not JSON serializable') - - _kwargs['default'] = _encode - _kwargs['separators'] = (',', ':') - - _kwargs.update(kwargs) - - return json.dumps(obj, **_kwargs) - -def json_decode(s, **kwargs): - return json.loads(s, **kwargs) - -# Platform plugin interface. - -class Interface(object): - - class NetworkInterfaceException(Exception): pass - class DiscardDataForRequest(NetworkInterfaceException): pass - class RetryDataForRequest(NetworkInterfaceException): pass - class ServerIsUnavailable(RetryDataForRequest): pass - - USER_AGENT = 'ModWsgi-PythonPlugin/%s (Python %s %s)' % ( - '1.0.0', sys.version.split()[0], sys.platform) - - HOST = 'platform-api.newrelic.com' - URL = '/platform/v1/metrics' - - def __init__(self, license_key): - self.license_key = license_key - - def send_request(self, payload=()): - headers = {} - config = {} - - license_key = self.license_key - - if not self.license_key: - license_key = 'INVALID LICENSE KEY' - - headers['User-Agent'] = self.USER_AGENT - headers['Content-Encoding'] = 'identity' - headers['X-License-Key'] = license_key - - try: - data = json_encode(payload) - - except Exception as exc: - _logger.exception('Error encoding data for JSON payload ' - 'with payload of %r.', payload) - - raise Interface.DiscardDataForRequest(str(exc)) - - if len(data) > 64*1024: - headers['Content-Encoding'] = 'deflate' - level = (len(data) < 2000000) and 1 or 9 - data = zlib.compress(b(data), level) - - try: - connection = httplib.HTTPSConnection(self.HOST, timeout=30.0) - connection.request('POST', self.URL, data, headers) - response = connection.getresponse() - content = response.read() - - except httplib.HTTPException as exc: - raise Interface.RetryDataForRequest(str(exc)) - - finally: - connection.close() - - if response.status != 200: - _logger.debug('Received a non 200 HTTP response from the data ' - 'collector where headers=%r, status=%r and content=%r.', - headers, response.status, content) - - if response.status == 400: - if headers['Content-Encoding'] == 'deflate': - data = zlib.decompress(data) - - _logger.error('Data collector is indicating that a bad ' - 'request has been submitted for headers of %r and ' - 'payload of %r with response of %r.', headers, data, - content) - - raise Interface.DiscardDataForRequest() - - elif response.status == 403: - _logger.error('Data collector is indicating that the license ' - 'key %r is not valid.', license_key) - - raise Interface.DiscardDataForRequest() - - elif response.status == 413: - _logger.warning('Data collector is indicating that a request ' - 'was received where the request content size was over ' - 'the maximum allowed size limit. The length of the ' - 'request content was %d.', len(data)) - - raise Interface.DiscardDataForRequest() - - elif response.status in (503, 504): - _logger.warning('Data collector is unavailable.') - - raise Interface.ServerIsUnavailable() - - elif response.status != 200: - _logger.warning('An unexpected HTTP response was received ' - 'from the data collector of %r. The payload for ' - 'the request was %r.', respnse.status, payload) - - raise Interface.DiscardDataForRequest() - - try: - if PY3: - content = content.decode('UTF-8') - - result = json_decode(content) - - except Exception as exc: - _logger.exception('Error decoding data for JSON payload ' - 'with payload of %r.', content) - - raise Interface.DiscardDataForRequest(str(exc)) - - if 'status' in result: - return result['status'] - - error_message = result['error'] - - raise Interface.DiscardDataForRequest(error_message) - - def send_metrics(self, name, guid, version, duration, metrics): - agent = {} - agent['host'] = socket.gethostname() - agent['pid'] = os.getpid() - agent['version'] = version or '0.0.0.' - - component = {} - component['name'] = name - component['guid'] = guid - component['duration'] = duration - component['metrics'] = metrics - - payload = {} - payload['agent'] = agent - payload['components'] = [component] - - return self.send_request(payload) diff --git a/src/server/newrelic/main.py b/src/server/newrelic/main.py deleted file mode 100644 index afa8972e..00000000 --- a/src/server/newrelic/main.py +++ /dev/null @@ -1,62 +0,0 @@ -import os -import logging - -try: - from ConfigParser import RawConfigParser, NoOptionError -except ImportError: - from configparser import RawConfigParser, NoOptionError - -from .interface import Interface -from .sampler import Sampler - -import apache - -LOG_LEVEL = { - 'CRITICAL': logging.CRITICAL, - 'ERROR': logging.ERROR, - 'WARNING': logging.WARNING, - 'INFO': logging.INFO, - 'DEBUG': logging.DEBUG, -} - -LOG_FORMAT = '%(asctime)s (%(process)d/%(threadName)s) ' \ - '%(name)s %(levelname)s - %(message)s' - -def start(name): - if apache.server_metrics() is None: - return - - config_object = RawConfigParser() - - config_file = os.environ.get('NEW_RELIC_CONFIG_FILE') - - if config_file: - config_object.read([config_file]) - - def option(name, section='newrelic', type=None, **kwargs): - try: - getter = 'get%s' % (type or '') - return getattr(config_object, getter)(section, name) - except NoOptionError: - if 'default' in kwargs: - return kwargs['default'] - else: - raise - - log_level = os.environ.get('NEW_RELIC_LOG_LEVEL', 'INFO').upper() - log_level = option('log_level', default=log_level).upper() - - if log_level in LOG_LEVEL: - log_level = LOG_LEVEL[log_level] - else: - log_level = logging.INFO - - logging.basicConfig(level=log_level, format=LOG_FORMAT) - - license_key = os.environ.get('NEW_RELIC_LICENSE_KEY') - license_key = option('license_key', default=license_key) - - interface = Interface(license_key) - sampler = Sampler(interface, name) - - sampler.start() diff --git a/src/server/newrelic/sampler.py b/src/server/newrelic/sampler.py deleted file mode 100644 index 73eab835..00000000 --- a/src/server/newrelic/sampler.py +++ /dev/null @@ -1,601 +0,0 @@ -import threading -import atexit -import os -import sys -import json -import socket -import time -import math - -try: - import Queue as queue -except ImportError: - import queue - -import apache - -SERVER_READY = '_' -SERVER_STARTING = 'S' -SERVER_BUSY_READ = 'R' -SERVER_BUSY_WRITE = 'W' -SERVER_BUST_KEEPALIVE = 'K' -SERVER_BUSY_LOG = 'L' -SERVER_BUSY_DNS = 'D' -SERVER_CLOSING = 'C' -SERVER_GRACEFUL = 'G' -SERVER_IDLE_KILL = 'I' -SERVER_DEAD = '.' - -STATUS_FLAGS = { - SERVER_READY: 'Ready', - SERVER_STARTING: 'Starting', - SERVER_BUSY_READ: 'Read', - SERVER_BUSY_WRITE: 'Write', - SERVER_BUST_KEEPALIVE: 'Keepalive', - SERVER_BUSY_LOG: 'Logging', - SERVER_BUSY_DNS: 'DNS lookup', - SERVER_CLOSING: 'Closing', - SERVER_GRACEFUL: 'Graceful', - SERVER_IDLE_KILL: 'Dying', - SERVER_DEAD: 'Dead' -} - -class Sample(dict): - - def __init__(self, count=0, total=0.0, min=0.0, max=0.0, - sum_of_squares=0.0): - self.count = count - self.total = total - self.min = min - self.max = max - self.sum_of_squares = sum_of_squares - - def __setattr__(self, name, value): - self[name] = value - - def __getattr__(self, name): - return self[name] - - def merge_stats(self, other): - self.total += other.total - self.min = self.count and min(self.min, other.min) or other.min - self.max = max(self.max, other.max) - self.sum_of_squares += other.sum_of_squares - self.count += other.count - - def merge_value(self, value): - self.total += value - self.min = self.count and min(self.min, value) or value - self.max = max(self.max, value) - self.sum_of_squares += value ** 2 - self.count += 1 - -class Samples(object): - - def __init__(self): - self.samples = {} - - def __iter__(self): - return iter(self.samples.items()) - - def sample_name(self, name): - return 'Component/' + name - - def _assign_value(self, value): - if isinstance(value, Sample): - sample = value - self.samples[name] = sample - else: - sample = Sample() - self.samples[name] = sample - sample.merge_value(value) - - return sample - - def assign_value(self, value): - name = self.sample_name(name) - - return self._assign_value(name) - - def _merge_value(self, name, value): - sample = self.samples.get(name) - - if sample is None: - sample = Sample() - self.samples[name] = sample - - if isinstance(value, Sample): - sample.merge_stats(value) - else: - sample.merge_value(value) - - return sample - - def merge_value(self, name, value): - name = self.sample_name(name) - - return self._merge_value(name, value) - - def fetch_sample(self, name): - name = self.sample_name(name) - - sample = self.samples.get(name) - - if sample is None: - sample = Sample() - self.samples[name] = sample - - return sample - - def merge_samples(self, samples): - for name, sample in samples: - self._merge_value(name, sample) - - def assign_samples(self, samples): - for name, sample in samples: - self._assign_value(name, sample) - - def clear_samples(self): - self.samples.clear() - -class Sampler(object): - - guid = 'au.com.dscpl.wsgi.mod_wsgi' - version = '1.0.0' - - def __init__(self, interface, name): - self.interface = interface - self.name = name - - self.running = False - self.lock = threading.Lock() - - self.period_start = 0 - self.access_count = 0 - self.bytes_served = 0 - - self.request_samples = [] - - self.metric_data = Samples() - - self.report_queue = queue.Queue() - - self.report_thread = threading.Thread(target=self.report_main_loop) - self.report_thread.setDaemon(True) - - self.report_start = 0 - self.report_metrics = Samples() - - self.monitor_queue = queue.Queue() - - self.monitor_thread = threading.Thread(target=self.monitor_main_loop) - self.monitor_thread.setDaemon(True) - - self.monitor_count = 0 - - def upload_report(self, start, end, metrics): - try: - self.interface.send_metrics(self.name, self.guid, self.version, - end-start, metrics.samples) - - except self.interface.RetryDataForRequest: - return True - - except Exception: - pass - - return False - - def generate_request_metrics(self, harvest_data): - metrics = Samples() - - # Chart as 'Throughput'. - - metrics.merge_value('Requests/Throughput[|requests]', - Sample(count=harvest_data['access_count'], - total=harvest_data['access_count'])) - - # Calculate from the set of sampled requests the average - # and percentile metrics. - - requests = harvest_data['request_samples'] - - if requests: - for request in requests: - # Chart as 'Average'. - - metrics.merge_value('Requests/Response Time[seconds|request]', - request['duration']) - - requests.sort(key=lambda e: e['duration']) - - total = sum([x['duration'] for x in requests]) - - # Chart as 'Average'. - - metrics.merge_value('Requests/Percentiles/Average[seconds]', - total/len(requests)) - - idx50 = int(0.50 * len(requests)) - metrics.merge_value('Requests/Percentiles/Median[seconds]', - requests[idx50]['duration']) - - idx95 = int(0.95 * len(requests)) - metrics.merge_value('Requests/Percentiles/95%[seconds]', - requests[idx95]['duration']) - - idx99 = int(0.99 * len(requests)) - metrics.merge_value('Requests/Percentiles/99%[seconds]', - requests[idx99]['duration']) - - # Chart as 'Rate'. - - metrics.merge_value('Requests/Bytes Served[bytes]', - harvest_data['bytes_served']) - - return metrics - - def generate_process_metrics(self, harvest_data): - metrics = Samples() - - # Chart as 'Count'. Round to Integer. - - metrics.merge_value('Processes/Instances[|processes]', - Sample(count=math.ceil(float( - harvest_data['processes_running']) / - harvest_data['sample_count']))) - - metrics.merge_value('Processes/Lifecycle/Starting[|processes]', - Sample(count=harvest_data['processes_started'])) - - metrics.merge_value('Processes/Lifecycle/Stopping[|processes]', - Sample(count=harvest_data['processes_stopped'])) - - metrics.merge_value('Workers/Availability/Idle[|workers]', - Sample(count=math.ceil(float( - harvest_data['idle_workers']) / - harvest_data['sample_count']))) - metrics.merge_value('Workers/Availability/Busy[|workers]', - Sample(count=math.ceil(float( - harvest_data['busy_workers']) / - harvest_data['sample_count']))) - - # Chart as 'Percentage'. - - metrics.merge_value('Workers/Utilization[server]', - (float(harvest_data['busy_workers']) / - harvest_data['sample_count']) / ( - harvest_data['server_limit']*harvest_data['thread_limit'])) - - total = 0 - for value in harvest_data['worker_status'].values(): - value = float(value)/harvest_data['sample_count'] - total += value - - if total: - for key, value in harvest_data['worker_status'].items(): - if key != SERVER_DEAD and value != 0: - label = STATUS_FLAGS.get(key, 'Unknown') - - # Chart as 'Average'. Round to Integer. - - value = float(value)/harvest_data['sample_count'] - - metrics.merge_value('Workers/Status/%s[workers]' % - label, (value/total)*total) - - return metrics - - def report_main_loop(self): - # We need a set of cached metrics for the case where - # we fail in uploading the metric data and need to - # retain it for the next attempt to upload data. - - retries = 0 - retained_start = 0 - retained = Samples() - - # We simply wait to be passed the metric data to be - # reported for the current sample period. - - while True: - harvest_data = self.report_queue.get() - - # If samples is None then we are being told to - # exit as the process is being shutdown. Otherwise - # we should be passed the cumulative metric data - # and the set of sampled requests. - - if harvest_data is None: - return - - start = harvest_data['period_start'] - end = harvest_data['period_end'] - - metrics = harvest_data['metrics'] - - # Add metric to track how many Apache server instances - # are reporting for each sample period. - - # Chart as 'Count'. Round to Integer. - - metrics.merge_value('Server/Instances[|servers]', 0) - - # Generate percentiles metrics for request samples. - - metrics.merge_samples(self.generate_request_metrics(harvest_data)) - metrics.merge_samples(self.generate_process_metrics(harvest_data)) - - # If we had metrics from a previous reporting period - # because we couldn't upload the metric data, we need - # to merge the data from the current reporting period - # with that for the previous period. - - if retained.samples: - start = retained_start - retained.merge_samples(metrics) - metrics = retained - - # Now attempt to upload the metric data. - - retry = self.upload_report(start, end, metrics) - - # If a failure occurred but failure type was such that we - # could try again to upload the data, then retain them. If - # have two many failed attempts though we give up. - - if retry: - retries += 1 - - if retries == 5: - retries = 0 - - else: - retained = metrics - - else: - retries = 0 - - if retries == 0: - retained_start = 0 - retained.clear_samples() - - else: - retained_start = start - retained = metrics - - def generate_scoreboard(self, sample_start=None): - busy_workers = 0 - idle_workers = 0 - access_count = 0 - bytes_served = 0 - - active_processes = 0 - - scoreboard = apache.server_metrics() - - if sample_start is None: - sample_start = scoreboard['current_time'] - - scoreboard['request_samples'] = request_samples = [] - - for process in scoreboard['processes']: - process['active_workers'] = 0 - - for worker in process['workers']: - status = worker['status'] - - if not process['quiescing'] and process['pid']: - if (status == SERVER_READY and process['generation'] == - scoreboard['running_generation']): - - process['active_workers'] += 1 - idle_workers += 1 - - elif status not in (SERVER_DEAD, SERVER_STARTING, - SERVER_IDLE_KILL): - - process['active_workers'] += 1 - busy_workers += 1 - - count = worker['access_count'] - - if count or status not in (SERVER_READY, SERVER_DEAD): - access_count += count - bytes_served += worker['bytes_served'] - - current_time = scoreboard['current_time'] - - start_time = worker['start_time'] - stop_time = worker['stop_time'] - - if (stop_time > start_time and sample_start < stop_time - and stop_time <= current_time): - - duration = stop_time - start_time - thread_num = worker['thread_num'] - - request_samples.append(dict(start_time=start_time, - duration=duration, thread_num=thread_num)) - - if process['active_workers']: - active_processes += 1 - - scoreboard['busy_workers'] = busy_workers - scoreboard['idle_workers'] = idle_workers - scoreboard['access_count'] = access_count - scoreboard['bytes_served'] = bytes_served - - scoreboard['active_processes'] = active_processes - - return scoreboard - - def record_process_statistics(self, scoreboard, harvest_data): - current_active_processes = scoreboard['active_processes'] - previous_active_processes = harvest_data['active_processes'] - - harvest_data['active_processes'] = current_active_processes - harvest_data['processes_running'] += current_active_processes - - if current_active_processes > previous_active_processes: - harvest_data['processes_started'] += (current_active_processes - - previous_active_processes) - - elif current_active_processes < previous_active_processes: - harvest_data['processes_stopped'] += (previous_active_processes - - current_active_processes) - - harvest_data['idle_workers'] += scoreboard['idle_workers'] - harvest_data['busy_workers'] += scoreboard['busy_workers'] - - for process in scoreboard['processes']: - for worker in process['workers']: - harvest_data['worker_status'][worker['status']] += 1 - - def monitor_main_loop(self): - scoreboard = self.generate_scoreboard() - - harvest_start = scoreboard['current_time'] - sample_start = harvest_start - sample_duration = 0.0 - - access_count = scoreboard['access_count'] - bytes_served = scoreboard['bytes_served'] - - harvest_data = {} - - harvest_data['sample_count'] = 0 - harvest_data['period_start'] = harvest_start - - harvest_data['metrics'] = Samples() - - harvest_data['request_samples'] = [] - - harvest_data['active_processes'] = 0 - - harvest_data['processes_running'] = 0 - harvest_data['processes_started'] = 0 - harvest_data['processes_stopped'] = 0 - - harvest_data['idle_workers'] = 0 - harvest_data['busy_workers'] = 0 - - harvest_data['server_limit'] = scoreboard['server_limit'] - harvest_data['thread_limit'] = scoreboard['thread_limit'] - - harvest_data['worker_status'] = {} - - for status in STATUS_FLAGS.keys(): - harvest_data['worker_status'][status] = 0 - - harvest_data['access_count'] = 0 - harvest_data['bytes_served'] = 0 - - # Chart as 'Count'. Round to Integer. - - harvest_data['metrics'].merge_value('Server/Restarts[|servers]', 0) - - start = time.time() - end = start + 60.0 - - while True: - try: - # We want to collect metrics on a regular second - # interval so we need to align the timeout value. - - now = time.time() - start += 1.0 - timeout = start - now - - return self.monitor_queue.get(timeout=timeout) - - except queue.Empty: - pass - - harvest_data['sample_count'] += 1 - - scoreboard = self.generate_scoreboard(sample_start) - - harvest_end = scoreboard['current_time'] - sample_end = harvest_end - - sample_duration = sample_end - sample_start - - self.record_process_statistics(scoreboard, harvest_data) - - harvest_data['request_samples'].extend( - scoreboard['request_samples']) - - access_count_delta = scoreboard['access_count'] - access_count_delta -= access_count - access_count = scoreboard['access_count'] - - harvest_data['access_count'] += access_count_delta - - bytes_served_delta = scoreboard['bytes_served'] - bytes_served_delta -= bytes_served - bytes_served = scoreboard['bytes_served'] - - harvest_data['bytes_served'] += bytes_served_delta - - now = time.time() - - if now >= end: - harvest_data['period_end'] = harvest_end - - self.report_queue.put(harvest_data) - - harvest_start = harvest_end - end += 60.0 - - _harvest_data = {} - - _harvest_data['sample_count'] = 0 - _harvest_data['period_start'] = harvest_start - - _harvest_data['metrics'] = Samples() - - _harvest_data['request_samples'] = [] - - _harvest_data['active_processes'] = ( - harvest_data['active_processes']) - - _harvest_data['processes_running'] = 0 - _harvest_data['processes_started'] = 0 - _harvest_data['processes_stopped'] = 0 - - _harvest_data['idle_workers'] = 0 - _harvest_data['busy_workers'] = 0 - - _harvest_data['server_limit'] = scoreboard['server_limit'] - _harvest_data['thread_limit'] = scoreboard['thread_limit'] - - _harvest_data['worker_status'] = {} - - for status in STATUS_FLAGS.keys(): - _harvest_data['worker_status'][status] = 0 - - _harvest_data['access_count'] = 0 - _harvest_data['bytes_served'] = 0 - - harvest_data = _harvest_data - - sample_start = sample_end - - def terminate(self): - try: - self.report_queue.put(None) - self.monitor_queue.put(None) - except Exception: - pass - - self.monitor_thread.join() - self.report_thread.join() - - def start(self): - with self.lock: - if not self.running: - self.running = True - atexit.register(self.terminate) - self.monitor_thread.start() - self.report_thread.start() diff --git a/src/server/wsgi_interp.c b/src/server/wsgi_interp.c index 7dd460c8..f74eb9e5 100644 --- a/src/server/wsgi_interp.c +++ b/src/server/wsgi_interp.c @@ -1070,7 +1070,10 @@ InterpreterObject *newInterpreterObject(const char *name) #endif PyModule_AddObject(module, "server_metrics", PyCFunction_New( - &wsgi_process_server_metrics_method[0], NULL)); + &wsgi_server_metrics_method[0], NULL)); + + PyModule_AddObject(module, "process_metrics", PyCFunction_New( + &wsgi_process_metrics_method[0], NULL)); /* Done with the 'mod_wsgi' module. */ @@ -1189,9 +1192,6 @@ InterpreterObject *newInterpreterObject(const char *name) #endif PyModule_AddObject(module, "build_date", object); - PyModule_AddObject(module, "server_metrics", PyCFunction_New( - &wsgi_apache_server_metrics_method[0], NULL)); - /* Done with the 'apache' module. */ Py_DECREF(module); diff --git a/src/server/wsgi_metrics.c b/src/server/wsgi_metrics.c index 0f1c0b3d..cee20311 100644 --- a/src/server/wsgi_metrics.c +++ b/src/server/wsgi_metrics.c @@ -80,7 +80,7 @@ double wsgi_end_request(void) return wsgi_utilization_time(-1); } -static PyObject *wsgi_process_server_metrics(void) +static PyObject *wsgi_process_metrics(void) { PyObject *result = NULL; @@ -104,14 +104,14 @@ static PyObject *wsgi_process_server_metrics(void) result = PyDict_New(); object = PyFloat_FromDouble(wsgi_utilization_time(0)); - PyDict_SetItemString(result, "thread_utilization", object); + PyDict_SetItemString(result, "utilization", object); Py_DECREF(object); return result; } -PyMethodDef wsgi_process_server_metrics_method[] = { - { "server_metrics", (PyCFunction)wsgi_process_server_metrics, +PyMethodDef wsgi_process_metrics_method[] = { + { "process_metrics", (PyCFunction)wsgi_process_metrics, METH_NOARGS, 0 }, { NULL }, }; @@ -146,7 +146,7 @@ static PyObject *wsgi_status_flags[SERVER_NUM_STATUS]; #define WSGI_CREATE_STATUS_FLAG(name, val) \ wsgi_status_flags[name] = wsgi_PyString_InternFromString(val) -static PyObject *wsgi_apache_server_metrics(void) +static PyObject *wsgi_server_metrics(void) { PyObject *scoreboard_dict = NULL; @@ -400,8 +400,8 @@ static PyObject *wsgi_apache_server_metrics(void) /* ------------------------------------------------------------------------- */ -PyMethodDef wsgi_apache_server_metrics_method[] = { - { "server_metrics", (PyCFunction)wsgi_apache_server_metrics, +PyMethodDef wsgi_server_metrics_method[] = { + { "server_metrics", (PyCFunction)wsgi_server_metrics, METH_NOARGS, 0 }, { NULL }, }; diff --git a/src/server/wsgi_metrics.h b/src/server/wsgi_metrics.h index 6f2008de..2143806b 100644 --- a/src/server/wsgi_metrics.h +++ b/src/server/wsgi_metrics.h @@ -31,12 +31,12 @@ extern int wsgi_dump_stack_traces; extern apr_thread_mutex_t* wsgi_monitor_lock; -extern PyMethodDef wsgi_process_server_metrics_method[]; +extern PyMethodDef wsgi_process_metrics_method[]; extern double wsgi_start_request(void); extern double wsgi_end_request(void); -extern PyMethodDef wsgi_apache_server_metrics_method[]; +extern PyMethodDef wsgi_server_metrics_method[]; /* ------------------------------------------------------------------------- */ diff --git a/tests/environ.wsgi b/tests/environ.wsgi index e31bd838..6ee17239 100644 --- a/tests/environ.wsgi +++ b/tests/environ.wsgi @@ -24,6 +24,10 @@ def application(environ, start_response): print('GID: %s' % os.getgid(), file=output) print(file=output) + print('apache.version: %r' % (apache.version,), file=output) + print('mod_wsgi.version: %r' % (mod_wsgi.version,), file=output) + print(file=output) + print('mod_wsgi.process_group: %s' % mod_wsgi.process_group, file=output) print('mod_wsgi.application_group: %s' % mod_wsgi.application_group, @@ -34,10 +38,21 @@ def application(environ, start_response): file=output) print('mod_wsgi.threads_per_process: %s' % mod_wsgi.threads_per_process, file=output) + print('mod_wsgi.process_metrics: %s' % mod_wsgi.process_metrics(), + file=output) print('mod_wsgi.server_metrics: %s' % mod_wsgi.server_metrics(), file=output) print(file=output) + metrics = mod_wsgi.server_metrics() + + if metrics: + for process in metrics['processes']: + for worker in process['workers']: + print(worker['status'], file=output, end='') + print(file=output) + print(file=output) + print('apache.description: %s' % apache.description, file=output) print('apache.build_date: %s' % apache.build_date, file=output) print('apache.mpm_name: %s' % apache.mpm_name, file=output) @@ -45,19 +60,8 @@ def application(environ, start_response): file=output) print('apache.threads_per_process: %s' % apache.threads_per_process, file=output) - print('apache.server_metrics: %s' % apache.server_metrics(), - file=output) print(file=output) - scoreboard = apache.server_metrics() - - if scoreboard: - for process in scoreboard['processes']: - for worker in process['workers']: - print(worker['status'], file=output, end='') - print(file=output) - print(file=output) - print('PATH: %s' % sys.path, file=output) print(file=output) diff --git a/tox.ini b/tox.ini index 6cbcac40..e1cf3c3c 100644 --- a/tox.ini +++ b/tox.ini @@ -2,4 +2,6 @@ envlist = py26,py27,py33 [testenv] -deps = newrelic +deps = + newrelic + mod_wsgi-metrics From a8ad9446af06930b749213c56efd6dfd82dd4a7b Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 8 Jun 2014 18:16:14 +1000 Subject: [PATCH 13/14] Reference to mod_wsgi-metrics in README file. --- README.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.rst b/README.rst index d9907044..99f21c24 100644 --- a/README.rst +++ b/README.rst @@ -187,6 +187,12 @@ agent configuration file. mod_wsgi-express wsgi.py --with-newrelic +When using this option, if you have also installed the ``mod_wsgi-metrics`` +Python package, then additional metrics about Apache and mod_wsgi will also +be reported via the New Relic Platform API. These will appear as a separate +set of dashboards under 'mod_wsgi' in the left hand side navigation bar of +the New Relic UI. + New Relic provides a free Lite tier so there is no excuse for not using it. Learn about what your Python web application is really doing. [1]_ From db1f0f78a5b786aa5ab31d2a88242a9be735d26e Mon Sep 17 00:00:00 2001 From: Graham Dumpleton Date: Sun, 8 Jun 2014 18:28:49 +1000 Subject: [PATCH 14/14] Updates to release notes for 4.1.2. --- docs/release-notes/version-4.2.0.rst | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/docs/release-notes/version-4.2.0.rst b/docs/release-notes/version-4.2.0.rst index 3c892754..a932e12b 100644 --- a/docs/release-notes/version-4.2.0.rst +++ b/docs/release-notes/version-4.2.0.rst @@ -17,7 +17,7 @@ binary for version 3.X on Windows instead. New Features ------------ -1. Added ``apache.scoreboard()`` function which provides access to a +1. Added ``mod_wsgi.server_metrics()`` function which provides access to a dictionary of data derived from the Apache worker scoreboard. In effect this provides access to the same information that is used to create the Apache server status page. @@ -33,10 +33,14 @@ directive is not also set to ``On``. Although ``mod_status`` needs to be loaded, it is not necessary to enable any URL to expose the server status page. -2. Added a platform plugin for New Relic which will report server status -information up to New Relic if the ``--with-newrelic`` option is supplied -when running mod_wsgi express. +2. Added support for a platform plugin for New Relic to ``mod_wsgi-express`` +which will report server status information up to New Relic if the +``--with-newrelic`` option is supplied when running mod_wsgi express. -That same agent also enables the New Relic Python agent. If you only want +That same option also enables the New Relic Python agent. If you only want one or the other, you can instead use the ``--with-newrelic-agent`` and ``--with-newrelic-platform`` options. + +The feature of ``mod_wsgi-express`` for reporting data up to the New Relic +Platform is dependent upon the separate ``mod_wsgi-metrics`` package being +installed.