Skip to content

Commit

Permalink
Update tox.ini files from release-tools gold copy
Browse files Browse the repository at this point in the history
All OpenStack Charms now contain identical tox.ini files,
not to be modified or made unique within each charm repo.

This is to ensure consistency across charm repos in tox
target naming, approach and purpose, also giving the
charm dev and test experience additional consistency.

Also create empty dirs with .keep files where necessary.
Some classic charms have actions and/or lib dirs, and some
do not. In all classic charms, flake will now check those
dirs to ensure lint coverage of existing or future content.

Fix Amulet test connection check and resync charm-helpers
to pull in rabbitmq-server amulet test helper update.

Add osplatform to charm-helper sync yaml to meet new
import requirements in the c-h core and fetch modules.

Disable precise tests until removal of precise nrpe charm
is resolved in the charm-store.

Disable SSL tests for <= trusty targets from xenial hosts.

Related-Bug: 1625044

Change-Id: I7d9308d222928e64a1a34b1b209d9bfd25738446
  • Loading branch information
Ryan Beisner authored and javacruft committed Sep 19, 2016
1 parent 87afc44 commit 5dee299
Show file tree
Hide file tree
Showing 54 changed files with 4,770 additions and 398 deletions.
1 change: 1 addition & 0 deletions charm-helpers-hooks.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ include:
- fetch
- core
- cli
- osplatform
- contrib.charmsupport
- contrib.openstack|inc=*
- contrib.storage
Expand Down
2 changes: 2 additions & 0 deletions charm-helpers-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,5 @@ branch: lp:charm-helpers
include:
- contrib.amulet
- contrib.openstack.amulet
- core
- osplatform
42 changes: 33 additions & 9 deletions hooks/charmhelpers/contrib/charmsupport/nrpe.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
)

from charmhelpers.core.host import service
from charmhelpers.core import host

# This module adds compatibility with the nrpe-external-master and plain nrpe
# subordinate charms. To use it in your charm:
Expand Down Expand Up @@ -108,6 +109,13 @@
# def local_monitors_relation_changed():
# update_nrpe_config()
#
# 4.a If your charm is a subordinate charm set primary=False
#
# from charmsupport.nrpe import NRPE
# (...)
# def update_nrpe_config():
# nrpe_compat = NRPE(primary=False)
#
# 5. ln -s hooks.py nrpe-external-master-relation-changed
# ln -s hooks.py local-monitors-relation-changed

Expand Down Expand Up @@ -220,9 +228,10 @@ class NRPE(object):
nagios_exportdir = '/var/lib/nagios/export'
nrpe_confdir = '/etc/nagios/nrpe.d'

def __init__(self, hostname=None):
def __init__(self, hostname=None, primary=True):
super(NRPE, self).__init__()
self.config = config()
self.primary = primary
self.nagios_context = self.config['nagios_context']
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
self.nagios_servicegroups = self.config['nagios_servicegroups']
Expand All @@ -238,6 +247,12 @@ def __init__(self, hostname=None):
else:
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
self.checks = []
# Iff in an nrpe-external-master relation hook, set primary status
relation = relation_ids('nrpe-external-master')
if relation:
log("Setting charm primary status {}".format(primary))
for rid in relation_ids('nrpe-external-master'):
relation_set(relation_id=rid, relation_settings={'primary': self.primary})

def add_check(self, *args, **kwargs):
self.checks.append(Check(*args, **kwargs))
Expand Down Expand Up @@ -332,16 +347,25 @@ def add_init_service_checks(nrpe, services, unit_name):
:param str unit_name: Unit name to use in check description
"""
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
next

upstart_init = '/etc/init/%s.conf' % svc
sysv_init = '/etc/init.d/%s' % svc
if os.path.exists(upstart_init):
# Don't add a check for these services from neutron-gateway
if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)

if host.init_is_systemd():
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_systemd.py %s' % svc
)
elif os.path.exists(upstart_init):
nrpe.add_check(
shortname=svc,
description='process check {%s}' % unit_name,
check_cmd='check_upstart_job %s' % svc
)
elif os.path.exists(sysv_init):
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
cron_file = ('*/5 * * * * root '
Expand Down
119 changes: 118 additions & 1 deletion hooks/charmhelpers/contrib/openstack/amulet/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,56 @@ def validate_endpoint_data(self, endpoints, admin_port, internal_port,
if not found:
return 'endpoint not found'

def validate_v3_endpoint_data(self, endpoints, admin_port, internal_port,
public_port, expected):
"""Validate keystone v3 endpoint data.
Validate the v3 endpoint data which has changed from v2. The
ports are used to find the matching endpoint.
The new v3 endpoint data looks like:
[<Endpoint enabled=True,
id=0432655fc2f74d1e9fa17bdaa6f6e60b,
interface=admin,
links={u'self': u'<RESTful URL of this endpoint>'},
region=RegionOne,
region_id=RegionOne,
service_id=17f842a0dc084b928e476fafe67e4095,
url=http://10.5.6.5:9312>,
<Endpoint enabled=True,
id=6536cb6cb92f4f41bf22b079935c7707,
interface=admin,
links={u'self': u'<RESTful url of this endpoint>'},
region=RegionOne,
region_id=RegionOne,
service_id=72fc8736fb41435e8b3584205bb2cfa3,
url=http://10.5.6.6:35357/v3>,
... ]
"""
self.log.debug('Validating v3 endpoint data...')
self.log.debug('actual: {}'.format(repr(endpoints)))
found = []
for ep in endpoints:
self.log.debug('endpoint: {}'.format(repr(ep)))
if ((admin_port in ep.url and ep.interface == 'admin') or
(internal_port in ep.url and ep.interface == 'internal') or
(public_port in ep.url and ep.interface == 'public')):
found.append(ep.interface)
# note we ignore the links member.
actual = {'id': ep.id,
'region': ep.region,
'region_id': ep.region_id,
'interface': self.not_null,
'url': ep.url,
'service_id': ep.service_id, }
ret = self._validate_dict_data(expected, actual)
if ret:
return 'unexpected endpoint data - {}'.format(ret)

if len(found) != 3:
return 'Unexpected number of endpoints found'

def validate_svc_catalog_endpoint_data(self, expected, actual):
"""Validate service catalog endpoint data.
Expand All @@ -100,6 +150,72 @@ def validate_svc_catalog_endpoint_data(self, expected, actual):
return "endpoint {} does not exist".format(k)
return ret

def validate_v3_svc_catalog_endpoint_data(self, expected, actual):
"""Validate the keystone v3 catalog endpoint data.
Validate a list of dictinaries that make up the keystone v3 service
catalogue.
It is in the form of:
{u'identity': [{u'id': u'48346b01c6804b298cdd7349aadb732e',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'url': u'http://10.5.5.224:35357/v3'},
{u'id': u'8414f7352a4b47a69fddd9dbd2aef5cf',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'url': u'http://10.5.5.224:5000/v3'},
{u'id': u'd5ca31440cc24ee1bf625e2996fb6a5b',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'url': u'http://10.5.5.224:5000/v3'}],
u'key-manager': [{u'id': u'68ebc17df0b045fcb8a8a433ebea9e62',
u'interface': u'public',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'url': u'http://10.5.5.223:9311'},
{u'id': u'9cdfe2a893c34afd8f504eb218cd2f9d',
u'interface': u'internal',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'url': u'http://10.5.5.223:9311'},
{u'id': u'f629388955bc407f8b11d8b7ca168086',
u'interface': u'admin',
u'region': u'RegionOne',
u'region_id': u'RegionOne',
u'url': u'http://10.5.5.223:9312'}]}
Note, that an added complication is that the order of admin, public,
internal against 'interface' in each region.
Thus, the function sorts the expected and actual lists using the
interface key as a sort key, prior to the comparison.
"""
self.log.debug('Validating v3 service catalog endpoint data...')
self.log.debug('actual: {}'.format(repr(actual)))
for k, v in six.iteritems(expected):
if k in actual:
l_expected = sorted(v, key=lambda x: x['interface'])
l_actual = sorted(actual[k], key=lambda x: x['interface'])
if len(l_actual) != len(l_expected):
return ("endpoint {} has differing number of interfaces "
" - expected({}), actual({})"
.format(k, len(l_expected), len(l_actual)))
for i_expected, i_actual in zip(l_expected, l_actual):
self.log.debug("checking interface {}"
.format(i_expected['interface']))
ret = self._validate_dict_data(i_expected, i_actual)
if ret:
return self.endpoint_error(k, ret)
else:
return "endpoint {} does not exist".format(k)
return ret

def validate_tenant_data(self, expected, actual):
"""Validate tenant data.
Expand Down Expand Up @@ -928,7 +1044,8 @@ def connect_amqp_by_unit(self, sentry_unit, ssl=False,
retry_delay=5,
socket_timeout=1)
connection = pika.BlockingConnection(parameters)
assert connection.server_properties['product'] == 'RabbitMQ'
assert connection.is_open is True
assert connection.is_closing is False
self.log.debug('Connect OK')
return connection
except Exception as e:
Expand Down
4 changes: 4 additions & 0 deletions hooks/charmhelpers/contrib/openstack/neutron.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,6 +245,10 @@ def neutron_plugins():
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
if release >= 'mitaka':
plugins['nsx']['server_packages'].remove('neutron-plugin-vmware')
plugins['nsx']['server_packages'].append('python-vmware-nsx')
plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
return plugins


Expand Down
29 changes: 28 additions & 1 deletion hooks/charmhelpers/contrib/openstack/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@
relation_set,
service_name,
status_set,
hook_name
hook_name,
application_version_set,
)

from charmhelpers.contrib.storage.linux.lvm import (
Expand Down Expand Up @@ -1889,3 +1890,29 @@ def config_flags_parser(config_flags):
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)

return flags


def os_application_version_set(package):
'''Set version of application for Juju 2.0 and later'''
import apt_pkg as apt
cache = apt_cache()
application_version = None
application_codename = os_release(package)

try:
pkg = cache[package]
if not pkg.current_ver:
juju_log('Package {} is not currently installed.'.format(package),
DEBUG)
else:
application_version = apt.upstream_version(pkg.current_ver.ver_str)
except:
juju_log('Package {} has no installation candidate.'.format(package),
DEBUG)

# NOTE(jamespage) if not able to figure out package version, fallback to
# openstack codename version detection.
if not application_version:
application_version_set(application_codename)
else:
application_version_set(application_version)
6 changes: 6 additions & 0 deletions hooks/charmhelpers/contrib/storage/linux/ceph.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@
DEFAULT_PGS_PER_OSD_TARGET = 100
DEFAULT_POOL_WEIGHT = 10.0
LEGACY_PG_COUNT = 200
DEFAULT_MINIMUM_PGS = 2


def validator(value, valid_type, valid_range=None):
Expand Down Expand Up @@ -266,6 +267,11 @@ def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
target_pgs_per_osd = config('pgs-per-osd') or DEFAULT_PGS_PER_OSD_TARGET
num_pg = (target_pgs_per_osd * osd_count * percent_data) // pool_size

# NOTE: ensure a sane minimum number of PGS otherwise we don't get any
# reasonable data distribution in minimal OSD configurations
if num_pg < DEFAULT_MINIMUM_PGS:
num_pg = DEFAULT_MINIMUM_PGS

# The CRUSH algorithm has a slight optimization for placement groups
# with powers of 2 so find the nearest power of 2. If the nearest
# power of 2 is more than 25% below the original value, the next
Expand Down
14 changes: 14 additions & 0 deletions hooks/charmhelpers/core/hookenv.py
Original file line number Diff line number Diff line change
Expand Up @@ -843,6 +843,20 @@ def inner_translate_exc2(*args, **kwargs):
return inner_translate_exc1


def application_version_set(version):
"""Charm authors may trigger this command from any hook to output what
version of the application is running. This could be a package version,
for instance postgres version 9.5. It could also be a build number or
version control revision identifier, for instance git sha 6fb7ba68. """

cmd = ['application-version-set']
cmd.append(version)
try:
subprocess.check_call(cmd)
except OSError:
log("Application Version: {}".format(version))


@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def is_leader():
"""Does the current unit hold the juju leadership
Expand Down
Loading

0 comments on commit 5dee299

Please sign in to comment.