Skip to content

Commit

Permalink
Merge branch 'testing'
Browse files Browse the repository at this point in the history
  • Loading branch information
mickenordin committed Feb 26, 2025
2 parents a9c7ee1 + 48354f4 commit 7c5fe66
Show file tree
Hide file tree
Showing 14 changed files with 128 additions and 71 deletions.
101 changes: 60 additions & 41 deletions files/scriptherder/cleanup_scriptherder
Original file line number Diff line number Diff line change
@@ -1,7 +1,59 @@
#!/usr/bin/env python3

import re
import os
import re


def decide_interval(tokens):
# Default to weekly
interval = "weekly"
if tokens[0][0] == "@":
word = tokens[0]
if word == "@reboot":
# reboot once every half year?
interval = "halfyear"
elif word == "@yearly" or word == "@annually":
interval = "yearly"
elif word == "@monthly":
interval = "monthly"
elif word == "@weekly":
interval = "weekly"
elif word == "@daily" or word == "@midnight" or word == "@hourly":
interval = "daily"
else:
if tokens[1].startswith('*') and (
tokens[2] == tokens[3] == tokens[4] == "*"
):
interval = "hourly"
elif tokens[2] == tokens[3] == tokens[4] == "*":
interval = "daily"
elif tokens[3] != "*":
interval = "yearly"
elif tokens[2] != "*":
interval = "monthly"
elif tokens[4] != "*":
interval = "weekly"
return interval


def decide_days_to_save(interval):
min_logs_to_save = 7
if interval == "yearly":
days_to_save = min_logs_to_save * 365
elif interval == "halfyear":
days_to_save = min_logs_to_save * 180
elif interval == "monthly":
days_to_save = min_logs_to_save * 31
elif interval == "weekly":
days_to_save = min_logs_to_save * 7
elif interval == "daily":
days_to_save = min_logs_to_save * 1
else:
# "hourly" If a job runs more than once per hour,
# like "*/5" or even "*" we want to keep even less
days_to_save = 2
return days_to_save


with os.popen("/usr/bin/crontab -l") as pipe:
for line in pipe:
Expand All @@ -16,52 +68,16 @@ with os.popen("/usr/bin/crontab -l") as pipe:
continue

tokens = line.split()
# Default to weekly
interval = "weekly"
if tokens[0][0] == "@":
word = tokens[0]
if word == "@reboot":
# reboot once every half year?
interval = "halfyear"
elif word == "@yearly" or word == "@annually":
interval = "yearly"
elif word == "@monthly":
interval = "monthly"
elif word == "@weekly":
interval = "weekly"
elif word == "@daily" or word == "@midnight" or word == "@hourly":
interval = "daily"
else:
if tokens[2] == tokens[3] == tokens[4] == "*":
interval = "daily"
elif tokens[3] != "*":
interval = "yearly"
elif tokens[2] != "*":
interval = "monthly"
elif tokens[4] != "*":
interval = "weekly"

min_logs_to_save = 7
if interval == "yearly":
days_to_save = min_logs_to_save * 365
elif interval == "halfyear":
days_to_save = min_logs_to_save * 180
elif interval == "monthly":
days_to_save = min_logs_to_save * 31
elif interval == "weekly":
days_to_save = min_logs_to_save * 7
else:
# "daily":
days_to_save = min_logs_to_save * 1

interval = decide_interval(tokens)
days_to_save = decide_days_to_save(interval)
position = 0
for arg in tokens:
position += 1
if arg == "--name":
break

# Make sure --name isn't the last argument
if position > len(tokens):
if position < len(tokens):
job_name = tokens[position].replace("-", "_")
else:
continue
Expand All @@ -70,6 +86,9 @@ with os.popen("/usr/bin/crontab -l") as pipe:
job_name = tokens[position].replace("-", "_")

if job_name and days_to_save:
command = "find /var/cache/scriptherder -type f "
command += f"-mtime +{days_to_save} -name {job_name}__* "
command += "-delete"
os.system(
f"find /var/cache/scriptherder -type f -mtime +{days_to_save} -name {job_name}__* -print0 | xargs -0 rm -f"
command
)
1 change: 1 addition & 0 deletions files/xrootd/Authfile
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
u lasse /xrootd-test/ rl
1 change: 1 addition & 0 deletions files/xrootd/grid-mapfile
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
"/C=SE/ST=Some-State/L=Stockholm/O=Sunet/CN=Lars Delhage/[email protected]" lasse
11 changes: 4 additions & 7 deletions functions/lb/load_balancer/get_all_backend_ips.pp
Original file line number Diff line number Diff line change
Expand Up @@ -2,20 +2,17 @@
function sunet::lb::load_balancer::get_all_backend_ips(
Hash[String, Hash] $config,
) >> Array[String] {
if has_key($config['load_balancer'], 'websites') {
if 'websites' in $config['load_balancer'] {
$websites = $config['load_balancer']['websites']
} elsif has_key($config['load_balancer'], 'websites2') {
# name used during migration
$websites = $config['load_balancer']['websites2']
} else {
fail('Load balancer config contains neither "websites" nor "websites2"')
fail('Load balancer config does not contain "websites"')
}

$all_ips = map($websites) | $instance_name, $v1 | {
if has_key($v1, 'backends') {
if 'backends' in $v1 {
map($v1['backends']) | $backend_name, $v2 | {
map($v2) | $backend_fqdn, $v3 | {
has_key($v3, 'ips') ? {
'ips' in $v3 ? {
true => $v3['ips'],
false => []
}
Expand Down
4 changes: 2 additions & 2 deletions functions/lb/load_balancer/get_all_frontend_ips.pp
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,13 @@
function sunet::lb::load_balancer::get_all_frontend_ips(
Hash[String, Any] $config,
) >> Array[String] {
if ! has_key($config, 'frontends') {
if ! 'frontends' in $config {
fail('Website config contains no frontends section')
}
$all_ips = map($config['frontends']) | $frontend_fqdn, $v | {
# k should be a frontend FQDN and $v a hash with ips in it:
# $v = {ips => [192.0.2.1]}}
(is_hash($v) and has_key($v, 'ips')) ? {
($v =~ Hash and 'ips' in $v) ? {
true => $v['ips'],
false => []
}
Expand Down
2 changes: 1 addition & 1 deletion functions/lb/load_balancer/get_config.pp
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ function sunet::lb::load_balancer::get_config(
String $name,
$default = undef
) {
has_key($config['load_balancer'], $name) ? {
$name in $config['load_balancer'] ? {
true => $config['load_balancer'][$name],
false => $default,
}
Expand Down
7 changes: 6 additions & 1 deletion manifests/dehydrated/client_define.pp
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,12 @@
'root' => '/root',
default => "/home/${user}"
}
ensure_resource('file', "${home}/.ssh", { ensure => 'directory' })
ensure_resource('file', "${home}/.ssh", {
ensure => directory,
mode => '0700',
owner => $user,
group => $user,
});
ensure_resource('file', '/etc/dehydrated', { ensure => directory })
ensure_resource('file', '/etc/dehydrated/certs', { ensure => directory })
ensure_resource('file', '/usr/bin/le-ssl-compat.sh', {
Expand Down
17 changes: 12 additions & 5 deletions manifests/lb/load_balancer.pp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@
ensure_resource('sunet::misc::create_dir', ['/etc/bgp', $confdir, $scriptdir],
{ owner => 'root', group => 'root', mode => '0755' })

ensure_resource('sunet::misc::create_dir', [ "${confdir}/ssl"],
{ owner => 'root', group => 'haproxy', mode => '0750' })

$websites = $config['load_balancer']['websites']

sunet::lb::load_balancer::configure_websites { 'websites':
Expand Down Expand Up @@ -63,17 +66,21 @@
],
group => 'ssl-cert',
}
if has_key($facts['tls_certificates'], $facts['networking']['fqdn']) and has_key($facts['tls_certificates'][$::fqdn], 'infra_cert') {
$infra_cert = $facts['tls_certificates'][$::fqdn]['infra_cert']
$infra_key = $facts['tls_certificates'][$::fqdn]['infra_key']

$fqdn = $facts['networking']['fqdn']

if $fqdn in $facts['tls_certificates'] and 'infra_cert' in $facts['tls_certificates'][$fqdn] {
$infra_cert = $facts['tls_certificates'][$fqdn]['infra_cert']
$infra_key = $facts['tls_certificates'][$fqdn]['infra_key']

# Create a haproxy cert bundle from the infracert, to be used as client certififace when connecting to backends
ensure_resource(sunet::misc::certbundle, "${facts['networking']['fqdn']}_haproxy", {
ensure_resource(sunet::misc::certbundle, "${fqdn}_haproxy", {
bundle => [
"cert=${infra_cert}",
"key=${infra_key}",
'out=private/infra_haproxy.crt',
'out=/opt/frontend/config/ssl/infra_haproxy.crt',
],
group => 'haproxy',
})
}
}
18 changes: 9 additions & 9 deletions manifests/lb/load_balancer/website.pp
Original file line number Diff line number Diff line change
Expand Up @@ -16,11 +16,13 @@
$haproxy_template_dir = lookup('haproxy_template_dir', undef, undef, $instance)

# Figure out what certificate to pass to the haproxy container
if ! has_key($config, 'tls_certificate_bundle') {
if has_key($facts['tls_certificates'], 'snakeoil') {
if 'tls_certificate_bundle' in $config {
$tls_certificate_bundle = $config['tls_certificate_bundle']
} else {
if 'snakeoil' in $facts['tls_certificates'] {
$snakeoil = $facts['tls_certificates']['snakeoil']['bundle']
}
if has_key($facts['tls_certificates'], $site_name) {
if $site_name in $facts['tls_certificates'] {
# Site name found in tls_certificates - good start
$_tls_certificate_bundle = pick(
$facts['tls_certificates'][$site_name]['haproxy'],
Expand Down Expand Up @@ -49,8 +51,6 @@
if $snakeoil and $tls_certificate_bundle == $snakeoil {
notice("Using snakeoil certificate for instance ${instance} (site ${site_name})")
}
} else {
$tls_certificate_bundle = $config['tls_certificate_bundle']
}

if $tls_certificate_bundle {
Expand Down Expand Up @@ -188,11 +188,11 @@

if $::facts['sunet_nftables_enabled'] != 'yes' {
# OLD way
if has_key($config, 'allow_ports') {
if 'allow_ports' in $config {
each($config['frontends']) | $k, $v | {
# k should be a frontend FQDN and $v a hash with ips in it:
# $v = {ips => [192.0.2.1]}}
if is_hash($v) and has_key($v, 'ips') {
if $v =~ Hash and 'ips' in $v {
sunet::misc::ufw_allow { "allow_ports_to_${instance}_frontend_${k}":
from => 'any',
to => $v['ips'],
Expand Down Expand Up @@ -232,7 +232,7 @@
})
}

if has_key($config, 'letsencrypt_server') and $config['letsencrypt_server'] != $facts['networking']['fqdn'] {
if 'letsencrypt_server' in $config and $config['letsencrypt_server'] != $::facts['os']['fqdn'] {
sunet::dehydrated::client_define { $name :
domain => $name,
server => $config['letsencrypt_server'],
Expand All @@ -246,7 +246,7 @@
each($config['backends']) | $k, $v | {
# k should be a backend name (like 'default') and v a hash with it's backends:
# $v = {host.example.org => {ips => [192.0.2.1]}}
if is_hash($v) {
if $v =~ Hash {
each($v) | $name, $params | {
sunet::lb::api::instance { "api_${instance}_${k}_${name}":
site_name => $site_name,
Expand Down
16 changes: 14 additions & 2 deletions manifests/xrootd.pp
Original file line number Diff line number Diff line change
Expand Up @@ -69,13 +69,25 @@
ensure => file,
content => template("sunet/xrootd/xrootd-${role}.cfg.erb"),
}
file { '/opt/xrootd/config/Authfile':
ensure => file,
content => file('sunet/xrootd/Authfile'),
}
file { '/opt/xrootd/grid-security/grid-mapfile':
ensure => file,
content => file('sunet/xrootd/grid-mapfile'),
}
file { '/opt/xrootd/grid-security/certificates/ca.pem':
ensure => file,
content => file("sunet/xrootd/ca.crt"),
content => file('sunet/xrootd/ca.crt'),
}
file { '/opt/xrootd/grid-security/certificates/2b1f9a7d.0':
ensure => link,
target => 'ca.pem'
}
file { '/opt/xrootd/grid-security/xrd/xrdcert.pem':
ensure => file,
content => file("sunet/xrootd/wildcard.drive.test.sunet.se.crt"),
content => file('sunet/xrootd/wildcard.drive.test.sunet.se.crt'),
}
file { '/opt/xrootd/grid-security/xrd/xrdkey.pem':
ensure => file,
Expand Down
2 changes: 1 addition & 1 deletion templates/lb/docker-compose_template.erb
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ services:
<% end -%>
<% end -%>
environment:
- 'HOSTFQDN=<%= @fqdn %>'
- 'HOSTFQDN=<%= @networking['fqdn'] %>'
- 'INSTANCE=<%= @instance %>'
- 'SITENAME=<%= @site_name %>'
- 'STATUSFN=/dev/shm/haproxy-status' # need to be writable by user fe-monitor, and match the healthcheck
Expand Down
6 changes: 6 additions & 0 deletions templates/lb/exabgp/exabgp.conf_neighbor.erb
Original file line number Diff line number Diff line change
Expand Up @@ -19,9 +19,15 @@
family {
<% if @is_v4_peer -%>
ipv4 unicast;

<% end -%>
<% if @is_v6_peer -%>
ipv6 unicast;

<% end -%>
}

api {
processes [ watch-service ];
}
}
2 changes: 1 addition & 1 deletion templates/xrootd/xrootd-manager.cfg.erb
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ s3.url_style path
s3.trace all
xrd.trace all
cms.trace all
xrootd.trace all dump
xrootd.trace all
# Setting up S3 plugin
ofs.osslib libXrdS3.so

11 changes: 10 additions & 1 deletion templates/xrootd/xrootd-server.cfg.erb
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,15 @@ xrd.tls /etc/grid-security/xrd/xrdcert.pem /etc/grid-security/xrd/xrdkey.pem
xrd.tlsca certdir /etc/grid-security/certificates refresh 8h
xrootd.tls capable all -data

# Authentication and authorization
xrootd.seclib libXrdSec.so
sec.protocol gsi -ca:1 -showdn:true -d:1 -crl:ignore -gmapopt:trymap -gridmap:/etc/grid-security/grid-mapfile

acc.audit deny
acc.authdb /etc/xrootd/Authfile
acc.authrefresh 60
ofs.authorize

# Upon last testing, the plugin did not yet work in async mode
xrootd.async off

Expand Down Expand Up @@ -65,7 +74,7 @@ s3.url_style path
s3.trace all
xrd.trace all
cms.trace all
xrootd.trace all dump
xrootd.trace all
# Setting up S3 plugin
ofs.osslib libXrdS3.so

0 comments on commit 7c5fe66

Please sign in to comment.