diff --git a/.gitignore b/.gitignore index ed1fbcc..07c7194 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,8 @@ cover/ __pycache__/ *.py[cod] +*.swp + # C extensions *.so diff --git a/.travis.yml b/.travis.yml index 73435c3..4b65559 100644 --- a/.travis.yml +++ b/.travis.yml @@ -8,14 +8,24 @@ addons: services: - postgresql before_install: + - sudo [ $(ip addr show | grep "inet6 ::1" | wc -l) -lt "1" ] && sudo sed -i '/^::1/d' /etc/hosts && sudo sed -i '/^127.0.1.1/d' /etc/hosts - sudo mkdir /etc/dbod - sudo mkdir /var/log/dbod - sudo chown travis /var/log/dbod - sudo cp static/api.cfg /etc/dbod + - wget https://github.com/begriffs/postgrest/releases/download/v0.3.1.1/postgrest-0.3.1.1-ubuntu.tar.xz + - tar xf postgrest-0.3.1.1-ubuntu.tar.xz install: - pip install -r requirements.txt - pip install coveralls + - python setup.py install +before_script: + - psql -c 'CREATE DATABASE dbod;' -U postgres + - psql -d dbod -f dbod/tests/db_test.sql -U postgres + - ./postgrest postgres://postgres@localhost/dbod -a postgres -s api & + - sleep 5 script: - - nosetests --with-coverage --cover-package=dbod --cover-html + - curl -g http://localhost:3000 + - nosetests --with-coverage --cover-package=dbod --cover-html -s after_success: coveralls diff --git a/bin/dbod-api b/bin/dbod-api index 59019a9..d79e0f2 100755 --- a/bin/dbod-api +++ b/bin/dbod-api @@ -12,49 +12,8 @@ DB On Demand metadata REST API server """ -import ConfigParser -import sys, traceback -from tornado.options import parse_command_line, options -from tornado.httpserver import HTTPServer -from tornado.ioloop import IOLoop -from dbod.api.handlers import * -from dbod.config import config - -import logging - -def main(): - """ Main body """ - - # Set up log file and level. - options.log_file_prefix = config.get('logging','path') - options.logging = 'debug' - options.log_to_stderr = False - - # Parse server command line and set up logging defaults, if necessary - parse_command_line() - - logging.info("Defining application (url, handler) pairs") - application = tornado.web.Application([ - (r"/", DocHandler), - (r"/api/v1/entity/([^/]+)", EntityHandler), - (r"/api/v1/entity/alias/([^/]+)", FunctionalAliasHandler), - (r"/api/v1/host/([^/]+)", HostHandler), - (r"/api/v1/host/aliases/([^/]+)", HostAliases), - (r"/api/v1/metadata/(?P[^\/]+)/?(?P[^\/]+)?", Metadata), - (r"/api/v1/rundeck/resources.xml", RundeckResources), - ], debug=True) - - logging.info("Configuring HTTP server") - http_server = HTTPServer(application, - ssl_options = { - "certfile" : config.get('ssl', 'hostcert') , - "keyfile" : config.get('ssl', 'hostkey'), - }) - - http_server.listen(config.get('server', 'port')) - - logging.info("Starting application") - IOLoop.instance().start() +from dbod.api.api import Application if __name__ == "__main__": - main() + Application() + diff --git a/dbod/api/api.py b/dbod/api/api.py new file mode 100644 index 0000000..82df5ca --- /dev/null +++ b/dbod/api/api.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +import ConfigParser +import sys, traceback +import tornado.web +import logging + +from tornado.options import parse_command_line, options, define +from tornado.httpserver import HTTPServer +from tornado.ioloop import IOLoop + +from dbod.api.base import DocHandler +from dbod.api.rundeck import RundeckResources, RundeckJobs +from dbod.api.metadata import Metadata +from dbod.api.functionalalias import FunctionalAliasHandler +from dbod.api.hostaliases import HostAliases +from dbod.api.entity import Entity +from dbod.config import config + +handlers = [ + (r"/", DocHandler), + (r"/api/v1/entity/([^/]+)", Entity), + (r"/api/v1/host/aliases/([^/]+)", HostAliases), + (r"/api/v1/entity/alias/([^/]*)", FunctionalAliasHandler), + (r"/api/v1/metadata/(?P[^\/]+)/?(?P[^\/]+)?", Metadata), + (r"/api/v1/rundeck/resources.xml", RundeckResources), + (r"/api/v1/rundeck/job/(?P[^\/]+)/?(?P[^\/]+)?", RundeckJobs), + ] + +class Application(): + def __init__(self): + # Set up log file and level. + options.log_file_prefix = config.get('logging', 'path') + options.logging = config.get('logging', 'level') + options.log_to_stderr = config.getboolean('logging', 'stderr') + + # Port and arguments + port = config.get('server', 'port') + define('port', default=port, help='Port to be used') + parse_command_line() + + # Defining handlers + logging.info("Defining application (url, handler) pairs") + application = tornado.web.Application(handlers, debug=config.getboolean('tornado', 'debug')) + + # Configuring server and SSL + logging.info("Configuring HTTP server") + if (config.has_section('ssl')): + http_server = HTTPServer(application, + ssl_options = { + "certfile" : config.get('ssl', 'hostcert') , + "keyfile" : config.get('ssl', 'hostkey'), + }) + else: + http_server = HTTPServer(application) + logging.info("Host certificate undefined, SSL is DISABLED") + + # Listening port + http_server.listen(options.port) + + # Starting + logging.info("Starting application") + tornado.ioloop.IOLoop.instance().start() diff --git a/dbod/api/base.py b/dbod/api/base.py new file mode 100644 index 0000000..126564b --- /dev/null +++ b/dbod/api/base.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +""" +REST API Server for the DB On Demand System +""" + +import tornado.web +import base64 +import functools +import logging + +from dbod.config import config + +# HTTP API status codes +OK = 200 +CREATED = 201 # Request fulfilled resulting in creation of new resource +NO_CONTENT = 204 # Succesfull delete +NOT_FOUND = 404 +UNAUTHORIZED = 401 +BAD_REQUEST = 400 + +# Basic HTTP Authentication decorator +def http_basic_auth(fun): + """Decorator for extracting HTTP basic authentication user/password pairs + from the request headers and matching them to configurated credentials. + It will generate an HTTP UNAUTHORIZED (Error code 401) if the request is + not using HTTP basic authentication. + + Example: + @http_basic_auth + def get(self, user, pwd) + """ + @functools.wraps(fun) + def wrapper(*args, **kwargs): + """ Decorator wrapper """ + self = args[0] + try: + # Try + auth = self.request.headers.get('Authorization') + scheme, _, token = auth.partition(' ') + if scheme.lower() == 'basic': + # Decode user and password + user, _, pwd = base64.decodestring(token).partition(':') + if user == config.get('api','user') and pwd == config.get('api','pass'): + return fun(*args, **kwargs) + else: + # Raise UNAUTHORIZED HTTP Error (401) + logging.error("Unauthorized access from: %s", + self.request.headers) + raise tornado.web.HTTPError(UNAUTHORIZED) + + else: + # We only support basic authentication + logging.error("Authentication scheme not recognized") + return "Authentication scheme not recognized" + except AttributeError: + # Raise UNAUTHORIZED HTTP Error (401) if the request is not + # using autentication (auth will be None and partition() will fail + logging.error("Unauthorized access from: %s", + self.request.headers) + raise tornado.web.HTTPError(UNAUTHORIZED) + + return wrapper + +class DocHandler(tornado.web.RequestHandler): + """Generates the API endpoint documentation""" + def get(self): + logging.info("Generating API endpoints doc") + response = """Please use : +

http://hostname:port/api/v1/entity/NAME

+

http://hostname:port/api/v1/host/HOSTNAME

""" + self.write(response) diff --git a/dbod/api/dbops.py b/dbod/api/dbops.py deleted file mode 100644 index 111a369..0000000 --- a/dbod/api/dbops.py +++ /dev/null @@ -1,186 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright (C) 2015, CERN -# This software is distributed under the terms of the GNU General Public -# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". -# In applying this license, CERN does not waive the privileges and immunities -# granted to it by virtue of its status as Intergovernmental Organization -# or submit itself to any jurisdiction. - -""" -This file contains all database related code -""" - -from psycopg2 import connect, DatabaseError, pool, errorcodes -import sys, traceback, logging - -from dbod.config import config - -try: - POOL = pool.ThreadedConnectionPool( - 5, # Min. # of connections - 20, # Max. # of connections - database = config.get('database', 'database'), - user = config.get('database', 'user'), - host = config.get('database', 'host'), - port = config.get('database', 'port'), - password = config.get('database', 'password')) -except DatabaseError as dberr: - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode[:2])) - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode)) - -def get_metadata(entity): - """Returns a JSON object containing all the metadata for a certain entity""" - try: - entity = str(entity) - with POOL.getconn() as conn: - with conn.cursor() as curs: - curs.execute("""select data from metadata where db_name = %s""", - (entity, )) - res = curs.fetchone() - return res[0] if res else None - except DatabaseError as dberr: - logging.error("PG Error: %s", dberr.pgerror) - logging.error("PG Error lookup: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) - - -def insert_metadata(entity, metadata): - """Creates an entry in the metadata table""" - try: - with POOL.getconn() as conn: - with conn.cursor() as curs: - logging.debug("Creating metadata entry for %s", entity) - logging.debug("Metadata: %s", metadata) - curs.execute("""insert into metadata(db_name, data) values(%s, %s)""", - (entity, metadata, )) - logging.debug('DB query: %s', curs.query) - conn.commit() - return curs.rowcount == 1 - except DatabaseError as dberr: - logging.error("PG Error: %s", dberr.pgerror) - logging.error("PG Error lookup: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) - -def update_metadata(entity, metadata): - """Updates the JSON object containing all the metadata for an entity""" - try: - with POOL.getconn() as conn: - with conn.cursor() as curs: - logging.debug("Updating metadata entry for %s", entity) - logging.debug("Metadata: %s", metadata) - curs.execute("""update metadata set data =%s where db_name = %s""", - (metadata, entity,)) - logging.debug('DB query: %s', curs.query) - conn.commit() - return curs.rowcount == 1 - except DatabaseError as dberr: - logging.error("PG Error: %s", dberr.pgerror) - logging.error("PG Error lookup: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) - -def delete_metadata(entity): - """Deletes the metadata entry for an entity""" - try: - with POOL.getconn() as conn: - with conn.cursor() as curs: - curs.execute("""delete from metadata where db_name = %s""", - (entity, )) - logging.debug('DB query: %s', curs.query) - conn.commit() - return curs.rowcount == 1 - except DatabaseError as dberr: - logging.error("PG Error: %s", dberr.pgerror) - logging.error("PG Error lookup: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) - -def host_metadata(host): - """Returns a JSON object containing the metadata for all the entities - residing on a host""" - try: - with POOL.getconn() as conn: - with conn.cursor() as curs: - curs.execute("""select db_name, data - from ( - select db_name, json_array_elements(data->'hosts') host, data - from metadata) - as foo - where trim(foo.host::text, '"') = %s""", (host, )) - res = curs.fetchall() # Either a list of tuples or empty list - return res if res else None - except DatabaseError as dberr: - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode[:2])) - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) - -# Functional aliases related methods -# The assumption for the first implementation is that the database -# table contains empty entries for pre-created dnsnames that are -# considered valid - -def next_dnsname(): - """Returns the next dnsname which can be used for a newly created - instance, if any.""" - try: - with POOL.getconn() as conn: - with conn.cursor() as curs: - curs.execute("""select dns_name - from functional_aliases - where db_name is NULL order by dns_name limit 1""") - logging.debug('DB query: %s', curs.query) - return curs.fetchone() # First unused dnsname or None - except DatabaseError as dberr: - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode[:2])) - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) - -def update_functional_alias(dnsname, db_name, alias): - """Updates a dnsname record with its db_name and alias""" - try: - with POOL.getconn() as conn: - with conn.cursor() as curs: - logging.debug("Updating functional alias record (%s, %s, %s)", - dnsname, db_name, alias) - curs.execute("""update functional_aliases - set db_name = %s, alias = %s where dns_name = %s""", - (db_name, alias, dnsname,)) - logging.debug('DB query: %s', curs.query) - conn.commit() - # Return True if the record was updated succesfully - return curs.rowcount == 1 - except DatabaseError as dberr: - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode[:2])) - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) - -def get_functional_alias(db_name): - """Returns the funcional alias and dnsname for a certain database""" - try: - with POOL.getconn() as conn: - with conn.cursor() as curs: - curs.execute("""select dns_name, alias - from functional_aliases - where db_name = %s""", (db_name,)) - logging.debug('DB query: %s', curs.query) - return curs.fetchone() - except DatabaseError as dberr: - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode[:2])) - logging.error("PG Error: %s", errorcodes.lookup(dberr.pgcode)) - return None - finally: - POOL.putconn(conn) diff --git a/dbod/api/entity.py b/dbod/api/entity.py new file mode 100644 index 0000000..37f2cd9 --- /dev/null +++ b/dbod/api/entity.py @@ -0,0 +1,154 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +""" +REST API Server for the DB On Demand System +""" + +import tornado.web +import logging +import requests +import json +import urllib + +from dbod.api.base import * +from dbod.config import config + +class Entity(tornado.web.RequestHandler): + def get(self, name): + """Returns an instance by db_name""" + response = requests.get(config.get('postgrest', 'instance_url') + "?db_name=eq." + name) + if response.ok: + data = response.json() + if data: + self.write({'response' : data}) + self.set_status(OK) + else: + logging.error("Entity metadata not found: " + name) + raise tornado.web.HTTPError(NOT_FOUND) + + def post(self, instance): + """Inserts a new instance in the database""" + entity = json.loads(self.request.body) + + if not "port" in entity or not "volumes" in entity: + logging.error("Port or volumes not defined for entity: " + instance) + raise tornado.web.HTTPError(BAD_REQUEST) + + # Get the port + port = entity["port"] + del entity["port"] + + # Get the volumes + volumes = entity["volumes"] + del entity["volumes"] + + # Insert the entity in database using PostREST + response = requests.post(config.get('postgrest', 'instance_url'), json=entity, headers={'Prefer': 'return=representation'}) + if response.ok: + entid = json.loads(response.text)["id"] + logging.debug("Created entity with id: " + str(entid)) + + # Add entity id to volumes + for volume in volumes: + volume["instance_id"] = entid + + # Insert the volumes in database using PostREST + response = requests.post(config.get('postgrest', 'volume_url'), json=volumes) + if response.ok: + response = requests.post(config.get('postgrest', 'attribute_url'), json={'instance_id': entid, 'name': 'port', 'value': port}) + if response.ok: + self.set_status(CREATED) + else: + logging.error("Error inserting the port attribute: " + response.text) + self.__delete_instance__(entid) + raise tornado.web.HTTPError(response.status_code) + else: + logging.error("Error creating the volumes: " + response.text) + self.__delete_instance__(entid) + raise tornado.web.HTTPError(response.status_code) + else: + logging.error("Error creating the entity: " + response.text) + raise tornado.web.HTTPError(response.status_code) + + def put(self, instance): + """Updates an instance""" + entity = json.loads(self.request.body) + entid = self.__get_instance_id__(instance) + + # Check if the port is changed + if "port" in entity: + port = {"value":entity["port"]} + del entity["port"] + response = requests.patch(config.get('postgrest', 'attribute_url') + "?instance_id=eq." + str(entid) + "&name=eq.port", json=port) + if response.ok: + self.set_status(response.status_code) + else: + logging.error("Error updating port on instance: " + instance) + raise tornado.web.HTTPError(response.status_code) + + # Check if the volumes are changed + if "volumes" in entity: + volumes = entity["volumes"] + for volume in volumes: + volume["instance_id"] = entid + del entity["volumes"] + + # Delete current volumes + response = requests.delete(config.get('postgrest', 'volume_url') + "?instance_id=eq." + str(entid)) + if response.ok: + response = requests.post(config.get('postgrest', 'volume_url'), json=volumes) + if response.ok: + self.set_status(response.status_code) + else: + logging.error("Error adding volumes for instance: " + str(entid)) + raise tornado.web.HTTPError(response.status_code) + else: + logging.error("Error deleting old volumes for instance: " + str(entid)) + raise tornado.web.HTTPError(response.status_code) + + if entity: + response = requests.patch(config.get('postgrest', 'instance_url') + "?db_name=eq." + instance, json=entity) + if response.ok: + self.set_status(response.status_code) + else: + logging.error("Instance not found: " + instance) + raise tornado.web.HTTPError(response.status_code) + else: + self.set_status(NO_CONTENT) + + def delete(self, instance): + """Deletes an instance by name""" + entid = self.__get_instance_id__(instance) + if entid: + logging.debug("Deleting instance id: " + str(entid)) + self.__delete_instance__(entid) + self.set_status(204) + else: + logging.error("Instance not found: " + instance) + raise tornado.web.HTTPError(response.status_code) + + def __get_instance_id__(self, instance): + response = requests.get(config.get('postgrest', 'instance_url') + "?db_name=eq." + instance) + if response.ok: + data = response.json() + if data: + return data[0]["id"] + else: + return None + else: + return None + + def __delete_instance__(self, inst_id): + requests.delete(config.get('postgrest', 'attribute_url') + "?instance_id=eq." + str(inst_id)) + requests.delete(config.get('postgrest', 'volume_url') + "?instance_id=eq." + str(inst_id)) + requests.delete(config.get('postgrest', 'instance_url') + "?id=eq." + str(inst_id)) + + diff --git a/dbod/api/functionalalias.py b/dbod/api/functionalalias.py new file mode 100644 index 0000000..eb21694 --- /dev/null +++ b/dbod/api/functionalalias.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +""" +REST API Server for the DB On Demand System +""" + +import logging +import json +from ast import literal_eval +from sys import exc_info +import requests +import tornado.web +import tornado.escape +from dbod.api.base import NOT_FOUND, BAD_REQUEST +from dbod.config import config + +class FunctionalAliasHandler(tornado.web.RequestHandler): + '''The handler for the entity/alias/''' + url = config.get('postgrest', 'functional_alias_url') + if not url: + logging.error("Internal entity/alias endpoint not configured") + raise tornado.web.HTTPError(NOT_FOUND) + + def data_received(self, *arg, **kwargs): + '''Abstract method which handles streamed request data.''' + #No need for implementation + pass + + def get(self, db_name, *args): + '''Returns db_name's alias and dns name''' + #self.set_header('Content-Type', 'application/json') + + logging.debug(args) + logging.debug('Arguments:' + str(self.request.arguments)) + composed_url = self.url + '?db_name=eq.' + db_name + '&select=dns_name,alias' + logging.debug('Requesting ' + composed_url) + response = requests.get(composed_url) + if response.ok: + data = response.json() + if data: + self.write({'response' : data}) + self.set_status(response.status_code) + else: + logging.error("Functional alias not found") + raise tornado.web.HTTPError(NOT_FOUND) + else: + logging.error("Error fetching functional alias of: " + db_name) + raise tornado.web.HTTPError(response.status_code) + + + def post(self, *args): + '''Updates a row with db_name and the alias. The dns_name is already there.''' + + def next_dnsname(): + '''Returns the next dnsname which can be used for a newly created + instance, if any''' + + #self.set_header('Content-Type', 'application/json') + #LIMIT is not working in postgrest but it uses some headers for that as well + headers = {'Range-Unit': 'items', 'Range': '0-0'} + # select the next available dns_name with db_name and alias assigned to NULL + query_select = '?select=dns_name&order=dns_name.asc&' + query_filter = 'db_name=is.null&alias=is.null&dns_name=isnot.null' + composed_url = self.url + query_select + query_filter + try: + response_dns = requests.get(composed_url, headers=headers) + response_dns_dict = literal_eval(response_dns.text)[0] + if response_dns.ok: + return response_dns_dict['dns_name'] + else: + return None + except: + error_msg = exc_info()[0] + logging.error(error_msg) + return None + + + def check_if_exists(column, value): + '''It checks if the inserted data already exists in the functional_aliases table''' + query = '?%s=eq.%s&select=%s' %(column, value, column) + composed_url = self.url + query + response = requests.get(composed_url) + if response.ok and literal_eval(response.text): + logging.error("A(n) %s with the value of %s already exists in the functional_aliases table" %(column,value)) + return True + else: + + return False + #self.set_header("Content-Type", 'application/json') + self.set_header('Prefer', 'return=representation') + logging.debug(args) + logging.debug('Arguments:' + str(self.request.arguments)) + try: + functional_alias = json.loads(self.get_argument('functional_alias')) + logging.debug(str(len(functional_alias)) + " Argument(s) given:") + logging.debug(functional_alias) + except: + logging.error("Argument not recognized or not defined.") + logging.error("Try adding header 'Content-Type:application/x-www-form-urlencoded'") + logging.error("The right format should be: functional_alias={'':''}") + raise tornado.web.HTTPError(NOT_FOUND) + + dns_name = next_dnsname() + logging.info("dns_name picked: " + str(dns_name)) + + if dns_name: + logging.debug("dns_name picked: " + str(dns_name)) + headers = {'Prefer': 'return=representation'} + db_name = str(functional_alias.keys()[0]) + alias = str(functional_alias.values()[0]) + insert_data = {"db_name": db_name, + "alias": alias} + logging.debug("Data to insert: " + str(insert_data)) + + #if not check_if_exists('db_name', db_name) and not check_if_exists('alias', alias): + composed_url = self.url + '?dns_name=eq.' + dns_name + logging.debug('Requesting insertion: ' + composed_url) + + response = requests.patch(composed_url, json=insert_data, headers=headers) + + if response.ok: + data = response.json() + #if data: + logging.info('Success. Data inserted in the functional_aliases table:') + logging.info(data) + self.set_status(response.status_code) + #else: + # logging.error("Empty data") + # raise tornado.web.HTTPError(response.status_code) + else: + logging.error("Unsuccessful insertion") + self.set_status(response.status_code) + + else: + logging.error("No dns_name available in the functional_aliases table") + raise tornado.web.HTTPError(BAD_REQUEST) + + def delete(self, db_name, *args): + '''Deletes or else asssigns to NULL the db_name and alias fields.''' + """Removes the functional alias association for an entity. + If the functional alias doesn't exist it doesn't do anything""" + + def get_dns(db_name): + '''Get the dns_name given the db_name''' + composed_url = self.url + '?db_name=eq.' + db_name + '&select=dns_name' + response = requests.get(composed_url) + if response.ok: + try: + dns_name_dict = literal_eval(response.text)[0] + return dns_name_dict['dns_name'] + except IndexError: + return None + else: + return None + + + logging.debug(args) + logging.debug('Arguments:' + str(self.request.arguments)) + + dns_name = get_dns(db_name) + logging.debug(dns_name) + if dns_name: + headers = {'Prefer': 'return=representation', 'Content-Type': 'application/json'} + composed_url = self.url + '?dns_name=eq.' + dns_name + logging.debug('Requesting deletion: ' + composed_url) + delete_data = '{"db_name": null, "alias": null}' + logging.debug("dns_name to be remained: " + dns_name) + response = requests.patch(composed_url, json=json.loads(delete_data), headers=headers) + + if response.ok: + data = response.json() + logging.info("Delete success of: " + str(data)) + self.set_status(response.status_code) + else: + logging.error("Unsuccessful deletion") + raise tornado.web.HTTPError(response.status_code) + + else: + logging.info("db_name not found. Nothing to do") + self.set_status(BAD_REQUEST) diff --git a/dbod/api/handlers.py b/dbod/api/handlers.py deleted file mode 100644 index 07041d9..0000000 --- a/dbod/api/handlers.py +++ /dev/null @@ -1,284 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -# Copyright (C) 2015, CERN -# This software is distributed under the terms of the GNU General Public -# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". -# In applying this license, CERN does not waive the privileges and immunities -# granted to it by virtue of its status as Intergovernmental Organization -# or submit itself to any jurisdiction. - -""" -REST API Server for the DB On Demand System -""" - -from dbod.api.dbops import * -from dbod.config import config -import tornado.web -import tornado.log -import base64 -import functools -import logging -import json - - -# HTTP API status codes -OK = 200 -CREATED = 201 # Request fulfilled resulting in creation of new resource -NO_CONTENT = 204 # Succesfull delete -NOT_FOUND = 404 -UNAUTHORIZED = 401 - -# Basic HTTP Authentication decorator -def http_basic_auth(fun): - """Decorator for extracting HTTP basic authentication user/password pairs - from the request headers and matching them to configurated credentials. - It will generate an HTTP UNAUTHORIZED (Error code 401) if the request is - not using HTTP basic authentication. - - Example: - @http_basic_auth - def get(self, user, pwd) - """ - @functools.wraps(fun) - def wrapper(*args, **kwargs): - """ Decorator wrapper """ - self = args[0] - try: - # Try - auth = self.request.headers.get('Authorization') - scheme, _, token = auth.partition(' ') - if scheme.lower() == 'basic': - # Decode user and password - user, _, pwd = base64.decodestring(token).partition(':') - if user == config.get('api','user') and pwd == config.get('api','pass'): - return fun(*args, **kwargs) - else: - # Raise UNAUTHORIZED HTTP Error (401) - logging.error("Unauthorized access from: %s", - self.request.headers) - raise tornado.web.HTTPError(UNAUTHORIZED) - - else: - # We only support basic authentication - logging.error("Authentication scheme not recognized") - return "Authentication scheme not recognized" - except AttributeError: - # Raise UNAUTHORIZED HTTP Error (401) if the request is not - # using autentication (auth will be None and partition() will fail - logging.error("Unauthorized access from: %s", - self.request.headers) - raise tornado.web.HTTPError(UNAUTHORIZED) - - return wrapper - -class DocHandler(tornado.web.RequestHandler): - """Generates the API endpoint documentation""" - def get(self): - logging.info("Generating API endpoints doc") - response = """Please use : -

http://hostname:port/api/v1/entity/NAME

-

http://hostname:port/api/v1/host/HOSTNAME

""" - self.write(response) - -class EntityHandler(tornado.web.RequestHandler): - - def get(self, entity): - """Returns metadata for a certain entity""" - response = get_metadata(entity) - if response: - logging.debug(response) - self.write(response) - else: - logging.warning("Entity not found: %s", entity) - raise tornado.web.HTTPError(NOT_FOUND) - - @http_basic_auth - def post(self, entity): - """Returns metadata for a certain entity""" - try: - metadata = self.get_argument('metadata') - response = insert_metadata(entity, metadata) - if response: - logging.debug("Metadata successfully created for %s: %s", - entity, metadata) - self.set_status(CREATED) - self.finish() - except tornado.web.MissingArgumentError as err: - logging.error("Missing 'metadata' argument in request!") - raise tornado.web.MissingArgumentError() - - @http_basic_auth - def put(self, entity): - """Returns metadata for a certain entity""" - try: - metadata = self.get_argument('metadata') - response = update_metadata(entity, metadata) - if response: - logging.debug("Metadata successfully updated for %s: %s", - entity, metadata) - self.set_status(CREATED) - self.finish() - except tornado.web.MissingArgumentError as err: - logging.error("Missing 'metadata' argument in request!") - raise tornado.web.MissingArgumentError() - - @http_basic_auth - def delete(self, entity): - """Returns metadata for a certain entity""" - response = delete_metadata(entity) - if response: - self.set_status(NO_CONTENT) - self.finish() - else: - logging.warning("Entity not found: %s", entity) - raise tornado.web.HTTPError(NOT_FOUND) - -class HostHandler(tornado.web.RequestHandler): - def get(self, host): - """Returns an object containing the metadata for all the entities - on a certain host""" - response = host_metadata(host) - if response: - logging.debug(response) - self.write(json.dumps(response)) - else: - logging.warning("Metadata not found for host: %s", host) - raise tornado.web.HTTPError(NOT_FOUND) - -class FunctionalAliasHandler(tornado.web.RequestHandler): - def get(self, entity): - """Returns the functional alias association for an entity""" - response = get_functional_alias(entity) - if response: - logging.debug(response) - self.write(json.dumps(response)) - else: - logging.error("Functional alias not found for entity: %s", entity) - raise tornado.web.HTTPError(NOT_FOUND) - - @http_basic_auth - def post(self, entity): - """Creates a functional alias association for an entity""" - dnsname = next_dnsname() - if dnsname: - try: - alias = self.get_argument('alias') - response = update_functional_alias(dnsname[0], entity, alias) - if response: - logging.debug("Functional alias (%s) successfully added for %s", - alias, entity) - self.set_status(CREATED) - self.write(json.dumps(dnsname)) - except tornado.web.MissingArgumentError as err: - logging.error("Missing 'alias' argument in request!") - raise tornado.web.MissingArgumentError() - else: - logging.error("No available dnsnames found!") - raise tornado.web.HTTPError(NOT_FOUND) - - @http_basic_auth - def delete(self, entity): - """Removes the functional alias association for an entity. - If the functional alias doesn't exist it doesn't do anything""" - dnsname_full = get_functional_alias(entity) - if dnsname_full: - dnsname = dnsname_full[0] - response = update_functional_alias(dnsname, None, None) - if response: - logging.debug("Functional alias successfully removed for %s", - entity) - self.set_status(NO_CONTENT) - self.finish() - else: - logging.error("Functional alias not found for entity: %s", entity) - raise tornado.web.HTTPError(NOT_FOUND) - - -class RundeckResources(tornado.web.RequestHandler): - def get(self): - """Returns an valid resources.xml file to import target entities in - Rundeck""" - - import requests - url = config.get('postgrest', 'rundeck_resources_url') - if url: - response = requests.get(url) - if response.ok: - data = json.loads(response.text) - d = {} - for entry in data: - d[entry[u'db_name']] = entry - self.set_header('Content-Type', 'text/xml') - # Page Header - self.write('\n') - self.write('\n') - for instance in sorted(d.keys()): - body = d[instance] - text = ('\n' % - ( instance, # Name - body.get(u'hostname'), - body.get(u'username'), - body.get(u'category'), - body.get(u'db_type'), - body.get(u'port'), - body.get(u'tags') - )) - self.write(text) - self.write('\n') - else: - logging.error("Error fetching Rundeck resources.xml") - raise tornado.web.HTTPError(NOT_FOUND) - else: - logging.error("Internal Rundeck resources endpoint not configured") - - -class HostAliases(tornado.web.RequestHandler): - def get(self, host): - """list of ip-aliases registered in a host""" - import requests - url = config.get('postgrest', 'host_aliases_url') - if url: - composed_url = url + '?host=eq.' + host - logging.debug('Requesting ' + composed_url ) - response = requests.get(composed_url) - if response.ok: - data = json.loads(response.text) - d = data.pop() - self.write(d.get('aliases')) - else: - logging.error("Error fetching aliases in host: " + host) - raise tornado.web.HTTPError(NOT_FOUND) - else: - logging.error("Internal host aliases endpoint not configured") - -class Metadata(tornado.web.RequestHandler): - def get(self, **args): - """Returns entity metadata""" - import requests - url = config.get('postgrest', 'entity_metadata_url') - name = args.get('name') - etype = args.get('class') - if url: - if etype == u'entity': - composed_url = url + '?db_name=eq.' + name - else: - composed_url = url + '?host=eq.' + name - logging.debug('Requesting ' + composed_url ) - response = requests.get(composed_url) - if response.ok: - data = json.loads(response.text) - if data != []: - if etype == u'entity': - d = data.pop() - self.write(d) - else: - self.write(json.dumps(data)) - else: - logging.error("Entity metadata not found: " + name) - raise tornado.web.HTTPError(NOT_FOUND) - else: - logging.error("Error fetching entity metadata: " + name) - raise tornado.web.HTTPError(response.status_code) - else: - logging.error("Internal entity metadata endpoint not configured") diff --git a/dbod/api/hostaliases.py b/dbod/api/hostaliases.py new file mode 100644 index 0000000..abb0276 --- /dev/null +++ b/dbod/api/hostaliases.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +""" +REST API Server for the DB On Demand System +""" + +import tornado.web +import logging +import json +import requests + +from dbod.config import config + +class HostAliases(tornado.web.RequestHandler): + def get(self, host): + """list of ip-aliases registered in a host""" + url = config.get('postgrest', 'host_aliases_url') + if url: + composed_url = url + '?host=eq.' + host + logging.debug('Requesting ' + composed_url ) + response = requests.get(composed_url) + if response.ok: + data = response.json() + self.write({'response' : data}) + else: + logging.error("Error fetching aliases in host: " + host) + raise tornado.web.HTTPError(NOT_FOUND) + else: + logging.error("Internal host aliases endpoint not configured") diff --git a/dbod/api/metadata.py b/dbod/api/metadata.py new file mode 100644 index 0000000..56cbf9e --- /dev/null +++ b/dbod/api/metadata.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +""" +REST API Server for the DB On Demand System +""" + +import tornado.web +import logging +import requests + +from dbod.api.base import * +from dbod.config import config + +class Metadata(tornado.web.RequestHandler): + def get(self, **args): + self.set_header("Content-Type", 'application/json') + """Returns entity metadata""" + url = config.get('postgrest', 'entity_metadata_url') + name = args.get('name') + etype = args.get('class') + if url and name: + if etype == u'entity': + composed_url = url + '?db_name=eq.' + name + elif etype == u'host': + composed_url = url + '?host=eq.' + name + else: + logging.error("Unsupported endpoint") + raise tornado.web.HTTPError(NOT_FOUND) + logging.debug('Requesting ' + composed_url) + response = requests.get(composed_url, verify=False) + if response.ok: + data = response.json() + if data: + self.write({'response' : data}) + else: + logging.error("Entity metadata not found: " + name) + raise tornado.web.HTTPError(NOT_FOUND) + else: + logging.error("Error fetching entity metadata: " + name) + raise tornado.web.HTTPError(response.status_code) + else: + logging.error("Internal entity metadata endpoint not configured") + raise tornado.web.HTTPError(NOT_FOUND) diff --git a/dbod/api/rundeck.py b/dbod/api/rundeck.py new file mode 100644 index 0000000..edeb7f1 --- /dev/null +++ b/dbod/api/rundeck.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "LICENSE". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +""" +REST API Server for the DB On Demand System +""" + +import tornado.web +import logging +import json +import requests +import time + +from dbod.api.base import * +from dbod.config import config + +class RundeckResources(tornado.web.RequestHandler): + def get(self): + """Returns an valid resources.xml file to import target entities in + Rundeck""" + url = config.get('postgrest', 'rundeck_resources_url') + if url: + response = requests.get(url) + if response.ok: + data = json.loads(response.text) + d = {} + for entry in data: + d[entry[u'db_name']] = entry + self.set_header('Content-Type', 'text/xml') + # Page Header + self.write('\n') + self.write('\n') + for instance in sorted(d.keys()): + body = d[instance] + text = ('\n' % + ( instance, # Name + body.get(u'hostname'), + body.get(u'username'), + body.get(u'category'), + body.get(u'db_type'), + body.get(u'port'), + body.get(u'tags') + )) + self.write(text) + self.write('\n') + else: + logging.error("Error fetching Rundeck resources.xml") + raise tornado.web.HTTPError(NOT_FOUND) + else: + logging.error("Internal Rundeck resources endpoint not configured") + +class RundeckJobs(tornado.web.RequestHandler): + def get(self, **args): + """Returns the output of a job execution""" + job = args.get('job') + response = self.__get_output__(job) + if response.ok: + self.set_header("Content-Type", 'application/json') + self.write({'response' : json.loads(response.text)}) + else: + logging.error("Error reading the job: " + job) + raise tornado.web.HTTPError(response.status_code) + + def post(self, **args): + """Executes a new Rundeck job and returns the output""" + job = args.get('job') + entity = args.get('entity') + response_run = self.__run_job__(job, entity) + if response_run.ok: + data = json.loads(response_run.text) + exid = str(data["id"]) + timeout = 20 + while timeout > 0: + response_output = self.__get_output__(exid) + if response_output.ok: + output = json.loads(response_output.text) + logging.debug(output) + if output["execState"] != "running": + if output["execState"] == "succeeded": + self.set_header("Content-Type", 'application/json') + self.write({'response' : json.loads(response_output.text)}) + timeout = 0 + else: + logging.error("The job completed with errors: " + exid) + raise tornado.web.HTTPError(NOT_FOUND) + else: + timeout -= 1 + time.sleep(0.500) + else: + logging.error("Error reading the job: " + exid) + raise tornado.web.HTTPError(response_output.status_code) + else: + logging.error("Error running the job: " + jobid) + raise tornado.web.HTTPError(response_run.status_code) + + def __get_output__(self, execution): + """Returns the output of a job execution""" + api_job_output = config.get('rundeck', 'api_job_output').format(execution) + return requests.get(api_job_output, headers={'Authorization': config.get('rundeck', 'api_authorization')}, verify=False) + + def __run_job__(self, job, entity): + """Executes a new Rundeck job and returns the output""" + jobid = config.get('rundeck-jobs', job) + if jobid: + run_job_url = config.get('rundeck', 'api_run_job').format(jobid) + return requests.post(run_job_url, headers={'Authorization': config.get('rundeck', 'api_authorization')}, verify=False, data = {'filter':'name: ' + entity}) + + + + diff --git a/dbod/tests/db_test.sql b/dbod/tests/db_test.sql new file mode 100644 index 0000000..e07913d --- /dev/null +++ b/dbod/tests/db_test.sql @@ -0,0 +1,290 @@ +-- Copyright (C) 2015, CERN +-- This software is distributed under the terms of the GNU General Public +-- Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". +-- In applying this license, CERN does not waive the privileges and immunities +-- granted to it by virtue of its status as Intergovernmental Organization +-- or submit itself to any jurisdiction. + +------------------------------------------------ +-- Create the structure for the test database -- +------------------------------------------------ + +CREATE SCHEMA public; + +-- DOD_COMMAND_DEFINITION +CREATE TABLE public.dod_command_definition ( + command_name varchar(64) NOT NULL, + type varchar(64) NOT NULL, + exec varchar(2048), + category varchar(20), + PRIMARY KEY (command_name, type, category) +); + +-- DOD_COMMAND_PARAMS +CREATE TABLE public.dod_command_params ( + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + command_name varchar(64) NOT NULL, + type varchar(64) NOT NULL, + creation_date date NOT NULL, + name varchar(64) NOT NULL, + value text, + category varchar(20), + PRIMARY KEY (username, db_name, command_name, type, creation_date, name) +); + +-- DOD_INSTANCE_CHANGES +CREATE TABLE public.dod_instance_changes ( + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + attribute varchar(32) NOT NULL, + change_date date NOT NULL, + requester varchar(32) NOT NULL, + old_value varchar(1024), + new_value varchar(1024), + PRIMARY KEY (username, db_name, attribute, change_date) +); + +-- DOD_INSTANCES +CREATE TABLE public.dod_instances ( + id serial, + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + e_group varchar(256), + category varchar(32) NOT NULL, + creation_date date NOT NULL, + expiry_date date, + db_type varchar(32) NOT NULL, + db_size int, + no_connections int, + project varchar(128), + description varchar(1024), + version varchar(128), + master varchar(32), + slave varchar(32), + host varchar(128), + state varchar(32), + status varchar(32), + CONSTRAINT dod_instances_pkey PRIMARY KEY (id), + CONSTRAINT dod_instances_dbname UNIQUE (db_name) +); + +-- DOD_JOBS +CREATE TABLE public.dod_jobs ( + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + command_name varchar(64) NOT NULL, + type varchar(64) NOT NULL, + creation_date date NOT NULL, + completion_date date, + requester varchar(32) NOT NULL, + admin_action int NOT NULL, + state varchar(32) NOT NULL, + log text, + result varchar(2048), + email_sent date, + category varchar(20), + PRIMARY KEY (username, db_name, command_name, type, creation_date) +); + +-- DOD_UPGRADES +CREATE TABLE public.dod_upgrades ( + db_type varchar(32) NOT NULL, + category varchar(32) NOT NULL, + version_from varchar(128) NOT NULL, + version_to varchar(128) NOT NULL, + PRIMARY KEY (db_type, category, version_from) +); + +-- VOLUME +CREATE TABLE public.volume ( + id serial, + instance_id integer NOT NULL, + file_mode char(4) NOT NULL, + owner varchar(32) NOT NULL, + vgroup varchar(32) NOT NULL, + server varchar(63) NOT NULL, + mount_options varchar(256) NOT NULL, + mounting_path varchar(256) NOT NULL, + PRIMARY KEY (id) +); + +-- HOST +CREATE TABLE public.host ( + id serial, + name varchar(63) NOT NULL, + memory integer NOT NULL, + PRIMARY KEY (id) +); + +-- ATTRIBUTE +CREATE TABLE public.attribute ( + id serial, + instance_id integer NOT NULL, + name varchar(32) NOT NULL, + value varchar(250) NOT NULL, + PRIMARY KEY (id) +); + +-- FUNCTIONAL ALIASES +CREATE TABLE public.functional_aliases ( + dns_name character varying(256) NOT NULL, + db_name character varying(8), + alias character varying(256), + CONSTRAINT functional_aliases_pkey PRIMARY KEY (dns_name), + CONSTRAINT db_name_con UNIQUE (db_name) +); + +-- Insert test data for instances +INSERT INTO public.dod_instances (username, db_name, e_group, category, creation_date, expiry_date, db_type, db_size, no_connections, project, description, version, master, slave, host, state, status) +VALUES ('user01', 'dbod01', 'testgroupA', 'TEST', now(), NULL, 'MYSQL', 100, 100, 'API', 'Test instance 1', '5.6.17', NULL, NULL, 'host01', 'RUNNING', 1), + ('user01', 'dbod02', 'testgroupB', 'PROD', now(), NULL, 'PG', 50, 500, 'API', 'Test instance 2', '9.4.4', NULL, NULL, 'host03', 'RUNNING', 1), + ('user02', 'dbod03', 'testgroupB', 'TEST', now(), NULL, 'MYSQL', 100, 200, 'WEB', 'Expired instance 1', '5.5', NULL, NULL, 'host01', 'RUNNING', 0), + ('user03', 'dbod04', 'testgroupA', 'PROD', now(), NULL, 'PG', 250, 10, 'LCC', 'Test instance 3', '9.4.5', NULL, NULL, 'host01', 'RUNNING', 1), + ('user04', 'dbod05', 'testgroupC', 'TEST', now(), NULL, 'MYSQL', 300, 200, 'WEB', 'Test instance 4', '5.6.17', NULL, NULL, 'host01', 'RUNNING', 1); + +-- Insert test data for volumes +INSERT INTO public.volume (instance_id, file_mode, owner, vgroup, server, mount_options, mounting_path) +VALUES (1, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard', '/MNT/data1'), + (1, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw', '/MNT/bin'), + (2, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard', '/MNT/data2'), + (4, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard,tcp', '/MNT/data4'), + (5, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard', '/MNT/data5'), + (5, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw', '/MNT/bin'); + +-- Insert test data for attributes +INSERT INTO public.attribute (instance_id, name, value) +VALUES (1, 'port', '5501'), + (2, 'port', '6603'), + (3, 'port', '5510'), + (4, 'port', '6601'), + (5, 'port', '5500'); + +-- Insert test data for hosts +INSERT INTO public.host (name, memory) +VALUES ('host01', 12), + ('host02', 24), + ('host03', 64), + ('host04', 256); + +-- Insert test data for database aliases +INSERT INTO public.functional_aliases (dns_name, db_name, alias) +VALUES ('db-dbod-dns01','dbod_01','dbod-dbod-01.cern.ch'), + ('db-dbod-dns02','dbod_02','dbod-dbod-02.cern.ch'), + ('db-dbod-dns03','dbod_03','dbod-dbod-03.cern.ch'), + ('db-dbod-dns04','dbod_04','dbod-dbod-04.cern.ch'), + ('db-dbod-dns05', NULL, NULL); + +-- Schema API +CREATE SCHEMA api; + +-- Dod_instances view +CREATE OR REPLACE VIEW api.dod_instances AS +SELECT id, username, db_name, e_group, category, creation_date, expiry_date, db_type, db_size, no_connections, project, description, version, master, slave, host, state, status +FROM dod_instances; + +-- Job stats view +CREATE OR REPLACE VIEW api.job_stats AS +SELECT db_name, command_name, COUNT(*) as COUNT, ROUND(AVG(completion_date - creation_date) * 24*60*60) AS mean_duration +FROM dod_jobs GROUP BY command_name, db_name; + +-- Command stats view +CREATE OR REPLACE VIEW api.command_stats AS +SELECT command_name, COUNT(*) AS COUNT, ROUND(AVG(completion_date - creation_date) * 24*60*60) AS mean_duration +FROM dod_jobs GROUP BY command_name; + +-- Get hosts function +CREATE OR REPLACE FUNCTION get_hosts(host_ids INTEGER[]) +RETURNS VARCHAR[] AS $$ +DECLARE + hosts VARCHAR := ''; +BEGIN + SELECT ARRAY (SELECT name FROM host WHERE id = ANY(host_ids)) INTO hosts; + RETURN hosts; +END +$$ LANGUAGE plpgsql; + +-- Get volumes function +CREATE OR REPLACE FUNCTION get_volumes(pid INTEGER) +RETURNS JSON[] AS $$ +DECLARE + volumes JSON[]; +BEGIN + SELECT ARRAY (SELECT row_to_json(t) FROM (SELECT * FROM public.volume WHERE instance_id = pid) t) INTO volumes; + return volumes; +END +$$ LANGUAGE plpgsql; + +-- Get port function +CREATE OR REPLACE FUNCTION get_attribute(attr_name VARCHAR, inst_id INTEGER) +RETURNS VARCHAR AS $$ +DECLARE + res VARCHAR; +BEGIN + SELECT value FROM public.attribute A WHERE A.instance_id = inst_id AND A.name = attr_name INTO res; + return res; +END +$$ LANGUAGE plpgsql; + + +-- Get directories function +CREATE OR REPLACE FUNCTION get_directories(inst_name VARCHAR, type VARCHAR, version VARCHAR, port VARCHAR) +RETURNS TABLE (basedir VARCHAR, bindir VARCHAR, datadir VARCHAR, logdir VARCHAR, socket VARCHAR) AS $$ +BEGIN + IF type = 'MYSQL' THEN + RETURN QUERY SELECT + ('/usr/local/mysql/mysql-' || version)::VARCHAR basedir, + ('/usr/local/mysql/mysql-' || version || '/bin')::VARCHAR bindir, + ('/ORA/dbs03/' || upper(inst_name) || '/mysql')::VARCHAR datadir, + ('/ORA/dbs02/' || upper(inst_name) || '/mysql')::VARCHAR logdir, + ('/var/lib/mysql/mysql.sock.' || lower(inst_name) || '.' || port)::VARCHAR socket; + ELSIF type = 'PG' THEN + RETURN QUERY SELECT + ('/usr/local/pgsql/pgsql-' || version)::VARCHAR basedir, + ('/usr/local/mysql/mysql-' || version || '/bin')::VARCHAR bindir, + ('/ORA/dbs03/' || upper(inst_name) || '/data')::VARCHAR datadir, + ('/ORA/dbs02/' || upper(inst_name) || '/pg_xlog')::VARCHAR logdir, + ('/var/lib/pgsql/')::VARCHAR socket; + END IF; +END +$$ LANGUAGE plpgsql; + +CREATE VIEW api.instance AS +SELECT * FROM public.dod_instances; + +CREATE VIEW api.volume AS +SELECT * FROM public.volume; + +CREATE VIEW api.attribute AS +SELECT * FROM public.attribute; + +CREATE VIEW api.host AS +SELECT * FROM public.host; + +-- Metadata View +CREATE OR REPLACE VIEW api.metadata AS +SELECT id, username, db_name, category, db_type, version, host, get_attribute('port', id) port, get_volumes volumes, d.* +FROM dod_instances, get_volumes(id), get_directories(db_name, db_type, version, get_attribute('port', id)) d; + +-- Rundeck instances View +CREATE OR REPLACE VIEW api.rundeck_instances AS +SELECT public.dod_instances.db_name, + public.functional_aliases.alias hostname, + public.get_attribute('port', public.dod_instances.id) port, + 'dbod' username, + public.dod_instances.db_type db_type, + public.dod_instances.category category, + db_type || ',' || category tags +FROM public.dod_instances JOIN public.functional_aliases ON +public.dod_instances.db_name = public.functional_aliases.db_name; + +-- Host aliases View +CREATE OR REPLACE VIEW api.host_aliases AS +SELECT host, string_agg('dbod-' || db_name || 'domain', E',') aliases +FROM dod_instances +GROUP BY host; + +-- Functional aliases view +CREATE OR REPLACE VIEW api.functional_aliases AS +SELECT * +FROM functional_aliases; diff --git a/dbod/tests/db_test_isotirop.sql b/dbod/tests/db_test_isotirop.sql new file mode 100644 index 0000000..8ec0643 --- /dev/null +++ b/dbod/tests/db_test_isotirop.sql @@ -0,0 +1,280 @@ +-- Copyright (C) 2015, CERN +-- This software is distributed under the terms of the GNU General Public +-- Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". +-- In applying this license, CERN does not waive the privileges and immunities +-- granted to it by virtue of its status as Intergovernmental Organization +-- or submit itself to any jurisdiction. + +------------------------------------------------ +-- Create the structure for the test database -- +------------------------------------------------ + +-- DOD_COMMAND_DEFINITION +CREATE TABLE public.dod_command_definition ( + command_name varchar(64) NOT NULL, + type varchar(64) NOT NULL, + exec varchar(2048), + category varchar(20), + PRIMARY KEY (command_name, type, category) +); + +-- DOD_COMMAND_PARAMS +CREATE TABLE public.dod_command_params ( + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + command_name varchar(64) NOT NULL, + type varchar(64) NOT NULL, + creation_date date NOT NULL, + name varchar(64) NOT NULL, + value text, + category varchar(20), + PRIMARY KEY (db_name) +); + +-- DOD_INSTANCE_CHANGES +CREATE TABLE public.dod_instance_changes ( + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + attribute varchar(32) NOT NULL, + change_date date NOT NULL, + requester varchar(32) NOT NULL, + old_value varchar(1024), + new_value varchar(1024), + PRIMARY KEY (db_name) +); + +-- DOD_INSTANCES +CREATE TABLE public.dod_instances ( + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + e_group varchar(256), + category varchar(32) NOT NULL, + creation_date date NOT NULL, + expiry_date date, + db_type varchar(32) NOT NULL, + db_size int NOT NULL, + no_connections int, + project varchar(128), + description varchar(1024), + version varchar(128), + master varchar(32), + slave varchar(32), + host varchar(128), + state varchar(32), + status varchar(32), + id int, + PRIMARY KEY (id) +); + +-- DOD_JOBS +CREATE TABLE public.dod_jobs ( + username varchar(32) NOT NULL, + db_name varchar(128) NOT NULL, + command_name varchar(64) NOT NULL, + type varchar(64) NOT NULL, + creation_date date NOT NULL, + completion_date date, + requester varchar(32) NOT NULL, + admin_action int NOT NULL, + state varchar(32) NOT NULL, + log text, + result varchar(2048), + email_sent date, + category varchar(20), + PRIMARY KEY (username, db_name, command_name, type, creation_date) +); + +-- DOD_UPGRADES +CREATE TABLE public.dod_upgrades ( + db_type varchar(32) NOT NULL, + category varchar(32) NOT NULL, + version_from varchar(128) NOT NULL, + version_to varchar(128) NOT NULL, + PRIMARY KEY (db_type, category, version_from) +); + +-- VOLUME +CREATE TABLE public.volume ( + id serial, + instance_id integer NOT NULL, + file_mode char(4) NOT NULL, + owner varchar(32) NOT NULL, + vgroup varchar(32) NOT NULL, + server varchar(63) NOT NULL, + mount_options varchar(256) NOT NULL, + mounting_path varchar(256) NOT NULL +); + +-- HOST +CREATE TABLE public.host ( + id serial, + name varchar(63) NOT NULL, + memory integer NOT NULL +); + +-- ATTRIBUTE +CREATE TABLE public.attribute ( + id serial, + instance_id integer NOT NULL, + name varchar(32) NOT NULL, + value varchar(250) NOT NULL +); + +-- FUNCTIONAL ALIASES +CREATE TABLE public.functional_aliases +( + dns_name character varying(256) NOT NULL, + db_name character varying(8), + alias character varying(256), + CONSTRAINT functional_aliases_pkey PRIMARY KEY (dns_name) + CONSTRAINT db_name_con UNIQUE (db_name) +); + +-- Insert test data for instances +INSERT INTO public.dod_instances (username, db_name, e_group, category, creation_date, expiry_date, db_type, db_size, no_connections, project, description, version, master, slave, host, state, status, id) +VALUES ('user01', 'dbod01', 'testgroupA', 'TEST', now(), NULL, 'MYSQL', 100, 100, 'API', 'Test instance 1', '5.6.17', NULL, NULL, 'host01', 'RUNNING', 1, 1), + ('user01', 'dbod02', 'testgroupB', 'PROD', now(), NULL, 'PG', 50, 500, 'API', 'Test instance 2', '9.4.4', NULL, NULL, 'host03', 'RUNNING', 1, 2), + ('user02', 'dbod03', 'testgroupB', 'TEST', now(), NULL, 'MYSQL', 100, 200, 'WEB', 'Expired instance 1', '5.5', NULL, NULL, 'host01', 'RUNNING', 0, 3), + ('user03', 'dbod04', 'testgroupA', 'PROD', now(), NULL, 'PG', 250, 10, 'LCC', 'Test instance 3', '9.4.5', NULL, NULL, 'host01', 'RUNNING', 1, 4), + ('user04', 'dbod05', 'testgroupC', 'TEST', now(), NULL, 'MYSQL', 300, 200, 'WEB', 'Test instance 4', '5.6.17', NULL, NULL, 'host01', 'RUNNING', 1, 5); + +-- Insert test data for volumes +INSERT INTO public.volume (instance_id, file_mode, owner, vgroup, server, mount_options, mounting_path) +VALUES (1, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard', '/MNT/data1'), + (2, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard', '/MNT/data2'), + (4, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard,tcp', '/MNT/data4'), + (5, '0755', 'TSM', 'ownergroup', 'NAS-server', 'rw,bg,hard', '/MNT/data5'); + +-- Insert test data for attributes +INSERT INTO public.attribute (instance_id, name, value) +VALUES (1, 'port', '5501'), + (2, 'port', '6603'), + (3, 'port', '5510'), + (4, 'port', '6601'), + (5, 'port', '5500'); + +-- Insert test data for hosts +INSERT INTO public.host (name, memory) +VALUES ('host01', 12), + ('host02', 24), + ('host03', 64), + ('host04', 256); + +INSERT INTO public.functional_aliases (dns_name, db_name, alias) +VALUES ('db-dbod-dns01','dbod_01','dbod-dbod-01.cern.ch'), + ('db-dbod-dns02','dbod_02','dbod-dbod-02.cern.ch'), + ('db-dbod-dns03','dbod_03','dbod-dbod-03.cern.ch'), + ('db-dbod-dns04','dbod_04','dbod-dbod-04.cern.ch'), + ('db-dbod-dns05', NULL, NULL), + ('db-dbod-dns06', NULL, NULL), + ('db-dbod-dns07', NULL, NULL), + ('db-dbod-dns08', NULL, NULL); + +-- Schema API +CREATE SCHEMA api; + +-- Job stats view +CREATE OR REPLACE VIEW api.job_stats AS +SELECT db_name, command_name, COUNT(*) as COUNT, ROUND(AVG(completion_date - creation_date) * 24*60*60) AS mean_duration +FROM dod_jobs GROUP BY command_name, db_name; + +-- Command stats view +CREATE OR REPLACE VIEW api.command_stats AS +SELECT command_name, COUNT(*) AS COUNT, ROUND(AVG(completion_date - creation_date) * 24*60*60) AS mean_duration +FROM dod_jobs GROUP BY command_name; + +-- Get hosts function +CREATE OR REPLACE FUNCTION get_hosts(host_ids INTEGER[]) +RETURNS VARCHAR[] AS $$ +DECLARE + hosts VARCHAR := ''; +BEGIN + SELECT ARRAY (SELECT name FROM host WHERE id = ANY(host_ids)) INTO hosts; + RETURN hosts; +END +$$ LANGUAGE plpgsql; + +-- Get volumes function +CREATE OR REPLACE FUNCTION get_volumes(pid INTEGER) +RETURNS JSON[] AS $$ +DECLARE + volumes JSON[]; +BEGIN + SELECT ARRAY (SELECT row_to_json(t) FROM (SELECT * FROM public.volume WHERE instance_id = pid) t) INTO volumes; + return volumes; +END +$$ LANGUAGE plpgsql; + +-- Get port function +CREATE OR REPLACE FUNCTION get_attribute(attr_name VARCHAR, inst_id INTEGER) +RETURNS VARCHAR AS $$ +DECLARE + res VARCHAR; +BEGIN + SELECT value FROM public.attribute A WHERE A.instance_id = inst_id AND A.name = attr_name INTO res; + return res; +END +$$ LANGUAGE plpgsql; + + +-- Get directories function +CREATE OR REPLACE FUNCTION get_directories(inst_name VARCHAR, type VARCHAR, version VARCHAR, port VARCHAR) +RETURNS TABLE (basedir VARCHAR, bindir VARCHAR, datadir VARCHAR, logdir VARCHAR, socket VARCHAR) AS $$ +BEGIN + IF type = 'MYSQL' THEN + RETURN QUERY SELECT + ('/usr/local/mysql/mysql-' || version)::VARCHAR basedir, + ('/usr/local/mysql/mysql-' || version || '/bin')::VARCHAR bindir, + ('/ORA/dbs03/' || upper(inst_name) || '/mysql')::VARCHAR datadir, + ('/ORA/dbs02/' || upper(inst_name) || '/mysql')::VARCHAR logdir, + ('/var/lib/mysql/mysql.sock.' || lower(inst_name) || '.' || port)::VARCHAR socket; + ELSIF type = 'PG' THEN + RETURN QUERY SELECT + ('/usr/local/pgsql/pgsql-' || version)::VARCHAR basedir, + ('/usr/local/mysql/mysql-' || version || '/bin')::VARCHAR bindir, + ('/ORA/dbs03/' || upper(inst_name) || '/data')::VARCHAR datadir, + ('/ORA/dbs02/' || upper(inst_name) || '/pg_xlog')::VARCHAR logdir, + ('/var/lib/pgsql/')::VARCHAR socket; + END IF; +END +$$ LANGUAGE plpgsql; + +CREATE VIEW api.instance AS +SELECT * FROM public.dod_instances; + +CREATE VIEW api.volume AS +SELECT * FROM public.volume; + +CREATE VIEW api.attribute AS +SELECT * FROM public.attribute; + +CREATE VIEW api.host AS +SELECT * FROM public.host; + +-- Metadata View +CREATE OR REPLACE VIEW api.metadata AS +SELECT id, username, db_name, category, db_type, version, host, get_attribute('port', id) port, get_volumes volumes, d.* +FROM dod_instances, get_volumes(id), get_directories(db_name, db_type, version, get_attribute('port', id)) d; + +-- Rundeck instances View +CREATE OR REPLACE VIEW api.rundeck_instances AS +SELECT public.dod_instances.db_name, + public.functional_aliases.alias hostname, + public.get_attribute('port', public.dod_instances.id) port, + 'dbod' username, + public.dod_instances.db_type db_type, + public.dod_instances.category category, + db_type || ',' || category tags +FROM public.dod_instances JOIN public.functional_aliases ON +public.dod_instances.db_name = public.functional_aliases.db_name; + +-- Host aliases View +CREATE OR REPLACE VIEW api.host_aliases AS +SELECT host, string_agg('dbod-' || db_name || 'domain', E',') aliases +FROM dod_instances +GROUP BY host; + +-- functional aliases view +CREATE OR REPLACE VIEW api.functional_aliases AS +SELECT * +FROM functional_aliases; diff --git a/dbod/tests/entity_test.py b/dbod/tests/entity_test.py new file mode 100644 index 0000000..24633f9 --- /dev/null +++ b/dbod/tests/entity_test.py @@ -0,0 +1,242 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +import tornado.web +import json +import urllib +import logging + +from tornado.testing import AsyncHTTPTestCase +from tornado.testing import get_unused_port +from timeout_decorator import timeout + +from dbod.api.api import * + +logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) + +class EntityTest(AsyncHTTPTestCase): + def get_app(self): + return tornado.web.Application(handlers, debug=True) + + @timeout(5) + def test_create_entity(self): + ### Creation of a new instance in a correct way. + response = self.fetch("/api/v1/entity/testdb", method='DELETE') + + entity = """{ + "username": "testuser", "category": "TEST", "creation_date":"2016-07-20", + "version": "5.6.17", "db_type": "MYSQL", "port": "5505", "host": "testhost", "db_name": "testdb", + "volumes": [ + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/data1"}, + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/bin"} + ]}""" + + # Create the instance + response = self.fetch("/api/v1/entity/create", method='POST', body=entity) + self.assertEquals(response.code, 201) + + # Check the metadata for this new instance + response = self.fetch("/api/v1/metadata/entity/testdb") + self.assertEquals(response.code, 200) + data = json.loads(response.body)["response"] + self.assertEquals(data[0]["db_name"], "testdb") + self.assertEquals(len(data[0]["volumes"]), 2) + self.assertEquals(data[0]["port"], "5505") # Reminder: the port is saved as a String in DB + + # Delete the created instance + response = self.fetch("/api/v1/entity/testdb", method='DELETE') + self.assertEquals(response.code, 204) + + # Check again, the metadata should be empty + response = self.fetch("/api/v1/metadata/entity/testdb") + self.assertEquals(response.code, 404) + + @timeout(5) + def test_create_existing_entity(self): + ### Creation of an entity that already exists + entity = """{ + "username": "testuser", "category": "TEST", "creation_date":"2016-07-20", + "version": "5.6.17", "db_type": "MYSQL", "port": "5505", "host": "testhost", "db_name": "dbod01", + "volumes": [ + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/data1"}, + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/bin"} + ]}""" + + # Create the instance + response = self.fetch("/api/v1/entity/create", method='POST', body=entity) + self.assertEquals(response.code, 409) + + @timeout(5) + def test_create_entity_invalid_fields(self): + ### Creation of an entity with an undefined required field (db_type) + entity = """{ + "username": "testuser", "category": "TEST", "creation_date":"2016-07-20", + "version": "5.6.17", "port": "5505", "host": "testhost", "db_name": "very_long_name", + "volumes": [ + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/data1"}, + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/bin"} + ]}""" + + # Create the instance + response = self.fetch("/api/v1/entity/create", method='POST', body=entity) + self.assertEquals(response.code, 400) + + @timeout(5) + def test_create_entity_no_port(self): + ### Creation of an entity without port + entity = """{ + "username": "testuser", "category": "TEST", "creation_date":"2016-07-20", + "version": "5.6.17", "db_type": "MYSQL", "host": "testhost", "db_name": "very_long_name", + "volumes": [ + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/data1"}, + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/bin"} + ]}""" + + # Create the instance + response = self.fetch("/api/v1/entity/create", method='POST', body=entity) + self.assertEquals(response.code, 400) + + @timeout(5) + def test_create_entity_no_volumes(self): + ### Creation of an entity without volumes + entity = """{ + "username": "testuser", "category": "TEST", "creation_date":"2016-07-20", + "version": "5.6.17", "db_type": "MYSQL", "port": "5505", "host": "testhost", "db_name": "very_long_name"}""" + + # Create the instance + response = self.fetch("/api/v1/entity/create", method='POST', body=entity) + self.assertEquals(response.code, 400) + + @timeout(5) + def test_edit_entity_username(self): + ### Edit the username correctly + entity = """{"username": "newuser"}""" + restore = """{"username": "user01"}""" + + # Edit the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=entity) + self.assertEquals(response.code, 204) + + # Check the metadata for this instance + response = self.fetch("/api/v1/metadata/entity/dbod01") + self.assertEquals(response.code, 200) + data = json.loads(response.body)["response"] + self.assertEquals(data[0]["username"], "newuser") + + # Restore the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=restore) + self.assertEquals(response.code, 204) + + @timeout(5) + def test_edit_entity_dbname(self): + ### Edit the dbname correctly + entity = """{"db_name": "newdb01"}""" + restore = """{"db_name": "dbod01"}""" + + # Edit the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=entity) + self.assertEquals(response.code, 204) + + # Check the metadata for this instance + response = self.fetch("/api/v1/metadata/entity/newdb01") + self.assertEquals(response.code, 200) + data = json.loads(response.body)["response"] + self.assertEquals(data[0]["db_name"], "newdb01") + + # Restore the instance + response = self.fetch("/api/v1/entity/newdb01", method='PUT', body=restore) + self.assertEquals(response.code, 204) + + @timeout(5) + def test_edit_entity_port(self): + ### Edit the port correctly + entity = """{"port": "3005"}""" + restore = """{"port": "5501"}""" + + # Edit the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=entity) + self.assertEquals(response.code, 204) + + # Check the metadata for this instance + response = self.fetch("/api/v1/metadata/entity/dbod01") + self.assertEquals(response.code, 200) + data = json.loads(response.body)["response"] + self.assertEquals(data[0]["port"], "3005") + + # Restore the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=restore) + self.assertEquals(response.code, 204) + + @timeout(5) + def test_edit_entity_port_and_host(self): + ### Edit the host and port correctly + entity = """{"port": "3005", "host": "newhost"}""" + restore = """{"port": "5501", "host": "host01"}""" + + # Edit the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=entity) + self.assertEquals(response.code, 204) + + # Check the metadata for this instance + response = self.fetch("/api/v1/metadata/entity/dbod01") + self.assertEquals(response.code, 200) + data = json.loads(response.body)["response"] + self.assertEquals(data[0]["port"], "3005") + self.assertEquals(data[0]["host"], "newhost") + + # Restore the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=restore) + self.assertEquals(response.code, 204) + + @timeout(5) + def test_edit_entity_volumes(self): + ### Edit volumes correctly + entity = """{"volumes": [ + {"vgroup": "testgroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/data1"}, + {"vgroup": "testgroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/test"}, + {"vgroup": "testgroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/bin"} + ]}""" + restore = """{"volumes": [ + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw,bg,hard", + "owner": "TSM", "mounting_path": "/MNT/data1"}, + {"vgroup": "ownergroup", "file_mode": "0755", "server": "NAS-server", "mount_options": "rw", + "owner": "TSM", "mounting_path": "/MNT/bin"} + ]}""" + + # Edit the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=entity) + self.assertEquals(response.code, 204) + + # Check the metadata for this instance + response = self.fetch("/api/v1/metadata/entity/dbod01") + self.assertEquals(response.code, 200) + data = json.loads(response.body)["response"] + self.assertEquals(len(data[0]["volumes"]), 3) + self.assertEquals(data[0]["volumes"][0]["vgroup"], "testgroup") + self.assertEquals(data[0]["volumes"][1]["vgroup"], "testgroup") + self.assertEquals(data[0]["volumes"][2]["vgroup"], "testgroup") + self.assertEquals(data[0]["volumes"][1]["mounting_path"], "/MNT/test") + + # Restore the instance + response = self.fetch("/api/v1/entity/dbod01", method='PUT', body=restore) + self.assertEquals(response.code, 204) + + \ No newline at end of file diff --git a/dbod/tests/functionalalias_test.py b/dbod/tests/functionalalias_test.py new file mode 100644 index 0000000..a19baf4 --- /dev/null +++ b/dbod/tests/functionalalias_test.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python +'''Testing functional alias endpoint''' +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +import json +from timeout_decorator import timeout +import tornado.web + +from tornado.testing import AsyncHTTPTestCase + +from dbod.api.api import handlers + +class FunctionalAliasTest(AsyncHTTPTestCase): + '''Class for testing functional alias with nosetest''' + headers = {'Content-Type': 'application/x-www-form-urlencoded', + 'Prefer': 'return=representation', + 'Accept': 'text/json'} + + def get_app(self): + return tornado.web.Application(handlers) + + @timeout(5) + def test_get_single_alias_by_name(self): + '''test for getting the right data''' + db_name_test = 'dbod_01' + response = self.fetch("/api/v1/entity/alias/%s" %(db_name_test)) + data = json.loads(response.body)["response"] + self.assertEquals(response.code, 200) + self.assertEquals(len(data), 1) + self.assertEquals(data[0]["alias"], "dbod-dbod-01.cern.ch") + self.assertTrue(data[0]["dns_name"] != None) + + @timeout(5) + def test_novalid_db(self): + '''test when the given db does not exist''' + response = self.fetch("/api/v1/entity/alias/some_db") + self.assertEquals(response.code, 404) + + @timeout(5) + def test_invalid_endpoint(self): + '''test when the given endpoint does not exist''' + response = self.fetch("/api/v1/entity/something/some_db") + self.assertEquals(response.code, 404) + + + @timeout(5) + def test_post_valid_request(self): + '''test when the arguments are valid and dns_name is available''' + body = 'functional_alias={"dbod_42": "dbod-dbod-42.cern.ch"}' + response = self.fetch("/api/v1/entity/alias/", + method="POST", + body=body, + headers=self.headers) + self.assertEquals(response.code, 200) + self.fetch("/api/v1/entity/alias/dbod_42", + method="DELETE") + + @timeout(5) + def test_post_duplicate(self): + '''test when there is a request to insert a db_name which already exists''' + body = 'functional_alias={"dbod_01": "dbod-dbod-01.cern.ch"}' + response = self.fetch("/api/v1/entity/alias/", + method="POST", + body=body, + headers=self.headers) + self.assertEquals(response.code, 409) + + @timeout(5) + def test_post_no_dns(self): + '''test when there are no any dns available''' + body = 'functional_alias={"dbod_42": "dbod-dbod-42.cern.ch"}' + self.fetch("/api/v1/entity/alias/", + method="POST", + body=body, + headers=self.headers) + body = 'functional_alias={"dbod_42": "dbod-dbod-42.cern.ch"}' + response = self.fetch("/api/v1/entity/alias/", + method="POST", + body=body, + headers=self.headers) + self.assertEquals(response.code, 400) + self.fetch("/api/v1/entity/alias/dbod_42", + method="DELETE") + + @timeout(5) + def test_post_no_valid_argument(self): + ''' test if the provided argument is valid''' + body = 'something={"dbod_42": "dbod-dbod-42.cern.ch"}' + response = self.fetch("/api/v1/entity/alias/", + method="POST", + body=body, + headers=self.headers) + self.assertEquals(response.code, 404) + + @timeout(5) + def test_post_no_valid_headers(self): + ''' test if the provided argument is valid''' + body = 'something={"dbod_42": "dbod-dbod-42.cern.ch"}' + response = self.fetch("/api/v1/entity/alias/", + method="POST", + body=body) + self.assertEquals(response.code, 404) + + def test_post_bad_argument(self): + ''' test if the provided argument is valid''' + body = 'functional_alias={dbod_42: dbod-dbod-42.cern.ch}' + response = self.fetch("/api/v1/entity/alias/", + method="POST", + body=body, + headers=self.headers) + self.assertEquals(response.code, 404) + + @timeout(5) + def test_delete_valid_request(self): + '''test when there is a valid request to delete a previous inserted db_name''' + body = 'functional_alias={"dbod_42": "dbod-dbod-42.cern.ch"}' + response = self.fetch("/api/v1/entity/alias/", + method="POST", + body=body, + headers=self.headers) + response = self.fetch("/api/v1/entity/alias/dbod_42", + method="DELETE") + self.assertEquals(response.code, 200) + + + def test_delete_invalid_dbname(self): + '''test when the given db_name does not exist''' + response = self.fetch("/api/v1/entity/alias/dbod_42", + method="DELETE") + self.assertEquals(response.code, 400) diff --git a/dbod/tests/metadata_test.py b/dbod/tests/metadata_test.py new file mode 100644 index 0000000..0b30b79 --- /dev/null +++ b/dbod/tests/metadata_test.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (C) 2015, CERN +# This software is distributed under the terms of the GNU General Public +# Licence version 3 (GPL Version 3), copied verbatim in the file "COPYING". +# In applying this license, CERN does not waive the privileges and immunities +# granted to it by virtue of its status as Intergovernmental Organization +# or submit itself to any jurisdiction. + +import tornado.web +import json + +from tornado.testing import AsyncHTTPTestCase +from tornado.testing import get_unused_port +from timeout_decorator import timeout + +from dbod.api.api import * + +class MetadataTest(AsyncHTTPTestCase): + def get_app(self): + return tornado.web.Application(handlers) + + @timeout(5) + def test_single_instance_by_name(self): + response = self.fetch("/api/v1/metadata/entity/dbod01") + data = json.loads(response.body)["response"] + self.assertEquals(response.code, 200) + self.assertEquals(len(data), 1) + self.assertEquals(data[0]["db_name"], "dbod01") + self.assertTrue(data[0]["volumes"] != None) + self.assertTrue(data[0]["host"] != None) + + @timeout(5) + def test_no_instance_by_name(self): + response = self.fetch("/api/v1/metadata/entity/invalid") + self.assertEquals(response.code, 404) + + @timeout(5) + def test_single_instance_by_host(self): + response = self.fetch("/api/v1/metadata/host/host03") + data = json.loads(response.body)["response"] + self.assertEquals(response.code, 200) + self.assertEquals(len(data), 1) + self.assertTrue(data[0]["volumes"] != None) + self.assertEquals(data[0]["host"], "host03") + + @timeout(5) + def test_multiple_instances_by_host(self): + response = self.fetch("/api/v1/metadata/host/host01") + data = json.loads(response.body)["response"] + self.assertEquals(response.code, 200) + self.assertEquals(len(data), 4) + list = [] + for i in range(4): + self.assertEquals(data[i]["host"], "host01") + self.assertTrue(data[i]["volumes"] != None) + self.assertNotIn(data[i]["db_name"], list) + list.append(data[i]["db_name"]) + self.assertEquals(len(list), 4) + + @timeout(5) + def test_no_instance_by_host(self): + response = self.fetch("/api/v1/metadata/host/invalid") + self.assertEquals(response.code, 404) + + @timeout(5) + def test_invalid_class(self): + response = self.fetch("/api/v1/metadata/invalid/invalid") + self.assertEquals(response.code, 404) + diff --git a/dbod/tests/db_tests.py b/dbod/tests/rundeck_test.py similarity index 63% rename from dbod/tests/db_tests.py rename to dbod/tests/rundeck_test.py index 4d3b452..752afa1 100644 --- a/dbod/tests/db_tests.py +++ b/dbod/tests/rundeck_test.py @@ -1,5 +1,3 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- # Copyright (C) 2015, CERN # This software is distributed under the terms of the GNU General Public @@ -8,17 +6,27 @@ # granted to it by virtue of its status as Intergovernmental Organization # or submit itself to any jurisdiction. -import nose +import unittest from types import * import json import logging import sys +import requests logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -from dbod.api.dbops import * +class Rundeck(unittest.TestCase): + @classmethod + def setUpClass(self): + pass -def empty(): - assert(True) + @classmethod + def tearDownClass(self): + pass + + def setUp(self): + pass -if __name__ == "__main__": - pass + def tearDown(self): + pass + + \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index ca4de0f..e3e6942 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ configparser nose -psycopg2 -tornado +tornado==4.2 virtualenv requests +timeout-decorator \ No newline at end of file diff --git a/setup.py b/setup.py index 3c4df63..95907d6 100644 --- a/setup.py +++ b/setup.py @@ -29,7 +29,6 @@ requires=[ 'ConfigParser', 'tornado', - 'psycopg2', 'nose', 'mock', 'requests', diff --git a/sql/views.sql b/sql/views.sql new file mode 100644 index 0000000..18fcb38 --- /dev/null +++ b/sql/views.sql @@ -0,0 +1,96 @@ + +-- Drop view and function to be able to insert it again +DROP VIEW api.test_metadata; +DROP FUNCTION get_hosts(INTEGER[]); +DROP FUNCTION get_volumes(INTEGER); + +-- Job stats view +CREATE OR REPLACE VIEW job_stats AS +SELECT db_name, command_name, COUNT(*) as COUNT, ROUND(AVG(completion_date - creation_date) * 24*60*60) AS mean_duration +FROM dod_jobs GROUP BY command_name, db_name; + +-- Command stats view +CREATE OR REPLACE VIEW command_stats AS +SELECT command_name, COUNT(*) AS COUNT, ROUND(AVG(completion_date - creation_date) * 24*60*60) AS mean_duration +FROM dod_jobs GROUP BY command_name; + +-- Get hosts function +CREATE OR REPLACE FUNCTION get_hosts(host_ids INTEGER[]) +RETURNS VARCHAR[] AS $$ +DECLARE + hosts VARCHAR := ''; +BEGIN + SELECT ARRAY (SELECT name FROM host WHERE id = ANY(host_ids)) INTO hosts; + RETURN hosts; +END +$$ LANGUAGE plpgsql; + +-- Get volumes function +CREATE OR REPLACE FUNCTION get_volumes(pid INTEGER) +RETURNS JSON[] AS $$ +DECLARE + volumes JSON[]; +BEGIN + SELECT ARRAY (SELECT row_to_json(t) FROM (SELECT * FROM public.volume WHERE instance_id = pid) t) INTO volumes; + return volumes; +END +$$ LANGUAGE plpgsql; + +-- Get port function +CREATE OR REPLACE FUNCTION get_attribute(name VARCHAR, instance_id INTEGER) +RETURNS VARCHAR AS $$ +DECLARE + res VARCHAR; +BEGIN + SELECT value FROM public.attribute A WHERE A.instance_id = instance_id AND A.name = name INTO res; + return res; +END +$$ LANGUAGE plpgsql; + + +-- Get directories function +CREATE OR REPLACE FUNCTION get_directories(inst_name VARCHAR, type VARCHAR, version VARCHAR, port VARCHAR) +RETURNS TABLE (basedir VARCHAR, bindir VARCHAR, datadir VARCHAR, logdir VARCHAR, socket VARCHAR) AS $$ +BEGIN + IF type = 'MYSQL' THEN + RETURN QUERY SELECT + ('/usr/local/mysql/mysql-' || version)::VARCHAR basedir, + ('/usr/local/mysql/mysql-' || version || '/bin')::VARCHAR bindir, + ('/ORA/dbs03/' || upper(inst_name) || '/mysql')::VARCHAR datadir, + ('/ORA/dbs02/' || upper(inst_name) || '/mysql')::VARCHAR logdir, + ('/var/lib/mysql/mysql.sock.' || lower(inst_name) || '.' || port)::VARCHAR socket; + ELSIF type = 'PG' THEN + RETURN QUERY SELECT + ('/usr/local/pgsql/pgsql-' || version)::VARCHAR basedir, + ('/usr/local/mysql/mysql-' || version || '/bin')::VARCHAR bindir, + ('/ORA/dbs03/' || upper(inst_name) || '/data')::VARCHAR datadir, + ('/ORA/dbs02/' || upper(inst_name) || '/pg_xlog')::VARCHAR logdir, + ('/var/lib/pgsql/')::VARCHAR socket; + END IF; +END +$$ LANGUAGE plpgsql; + + +-- Metadata View +CREATE OR REPLACE VIEW api.metadata AS +SELECT id, username, db_name, category, db_type, version, host, get_attribute('port', id) port, get_volumes volumes, d.* +FROM fo_dod_instances, get_volumes(id), get_directories(db_name, db_type, version, get_attribute('port', id)) d; + +-- Rundeck instances View +CREATE OR REPLACE VIEW api.rundeck_instances AS +SELECT public.dod_instances.db_name, + public.functional_aliases.alias hostname, + public.get_attribute('port', public.dod_instances.id) port, + 'dbod' username, + public.dod_instances.db_type db_type, + public.dod_instances.category category, + db_type || ',' || category tags +FROM public.dod_instances JOIN public.functional_aliases ON +public.dod_instances.db_name = public.functional_aliases.db_name; + +-- Host aliases View +CREATE OR REPLACE VIEW api.host_aliases AS +SELECT host, array_agg('dbod-' || db_name || '.cern.ch') aliases +FROM dod_instances +GROUP BY host; + diff --git a/static/api.cfg b/static/api.cfg index f36194b..785d0e3 100644 --- a/static/api.cfg +++ b/static/api.cfg @@ -1,22 +1,35 @@ [server] port=5443 -[database] -user=travis -host=localhost -port=5432 -database=travis -password=travis + [cache] path=/etc/dbod/cache/metadata.json -[ssl] -hostcert=/etc/dbod/hostcert.pem -hostkey=/etc/dbod/hostkey.pem + [logging] path=/var/log/dbod/api.log +level=debug +stderr=true + +[tornado] +debug=true + [api] user=api-user -password=api-password +pass=api-password + [postgrest] rundeck_resources_url=http://localhost:3000/rundeck_instances host_aliases_url=http://localhost:3000/host_aliases entity_metadata_url=http://localhost:3000/metadata +instance_url=http://localhost:3000/instance +volume_url=http://localhost:3000/volume +attribute_url=http://localhost:3000/attribute +functional_alias_url=http://localhost:3000/functional_aliases + +[rundeck] +api_run_job = https://rundeck/api/14/job/{0}/run?format=json +api_job_output = https://rundeck/api/14/execution/{0}/output?format=json +api_authorization = Basic abcdefghijklm + +[rundeck-jobs] +get-snapshots = d4072a88-b7fc-4e0b-bd0a-06e2d97e16dd +