diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 8a6e639..4a466dd 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -65,24 +65,23 @@ jobs: - name: Deploy the test VMs run: | - cd ansible if [ "${{ matrix.os }}" = "centos/9-Stream" ]; then - cp hosts.yaml.example.centos hosts.yaml + inventory_file=inventories/baremetal-centos.yaml else - cp hosts.yaml.example hosts.yaml + inventory_file=inventories/baremetal.yaml fi - ansible-playbook tasks/update-packages.yaml + ansible-playbook -i $inventory_file playbooks/update_packages.yaml if [ "${{ matrix.os }}" = "ubuntu/20.04" ]; then # Ubuntu 20.04's OVN is too old. - sed -i "s/ovn_release:.*/ovn_release: \"ppa\"/g" hosts.yaml + sed -i "s/ovn_release:.*/ovn_release: \"ppa\"/g" $inventory_file elif [ "${{ matrix.os }}" = "debian/12" ]; then # ZFS on Debian needs compiling which is slow, use btrfs instead - sed -i "s/driver: zfs/driver: btrfs/g" hosts.yaml + sed -i "s/driver: zfs/driver: btrfs/g" $inventory_file fi - ansible-playbook deploy.yaml + ansible-playbook -i $inventory_file -v deploy.yaml - name: Post deployment validation run: | diff --git a/.gitignore b/.gitignore index a295864..95dcef7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ *.pyc __pycache__ +data/* diff --git a/.zed/tasks.json b/.zed/tasks.json index d161a18..6e89a5e 100644 --- a/.zed/tasks.json +++ b/.zed/tasks.json @@ -6,8 +6,8 @@ "shell": "system" }, { "label": "Deploy dev cluster", - "command": "ansible-playbook deploy.yaml", - "cwd": "$ZED_WORKTREE_ROOT/ansible", + "command": "ansible-playbook -i inventories/baremetal.yaml deploy.yaml", + "cwd": "$ZED_WORKTREE_ROOT", "shell": "system" } ] diff --git a/README.md b/README.md index b4c39b2..2ce965a 100644 --- a/README.md +++ b/README.md @@ -34,9 +34,9 @@ tofu apply -target=module.baremetal ``` ### Run the Ansible Playbook -Go to the ansible directory: +Go to the repository root directory: ``` -cd ../ansible/ +cd ../ ``` NOTE: If you need the same version of Ansible this was tested with: @@ -47,15 +47,11 @@ pipenv shell ansible-galaxy install -r ansible_requirements.yml ``` -Copy the example inventory file: -``` -cp hosts.yaml.example hosts.yaml -``` NOTE: If you are connecting to a remote Incus host you will need to change the `ansible_incus_remote` variable to match the name of the Incus remote (see: `incus remote list` for a list of remote names to use). Run the Playbooks: ``` -ansible-playbook deploy.yaml +ansible-playbook -i inventories/baremetal.yaml deploy.yaml ``` NOTE: When re-deploying the same cluster (e.g. following a `terraform destroy`), @@ -65,9 +61,9 @@ connection to the previously deployed systems which will cause the deployment to get stuck. ``` -rm ansible/data/ceph/* -rm ansible/data/lvmcluster/* -rm ansible/data/ovn/* +rm data/ceph/* +rm data/lvmcluster/* +rm data/ovn/* ``` ### Test a VM and Container on the new Incus cluster diff --git a/ansible/ansible.cfg b/ansible.cfg similarity index 85% rename from ansible/ansible.cfg rename to ansible.cfg index da763df..748974b 100644 --- a/ansible/ansible.cfg +++ b/ansible.cfg @@ -1,9 +1,10 @@ [defaults] -inventory = hosts.yaml error_on_undefined_vars = false timeout = 30 forks = 10 connection_plugins = plugins/connection/ +stdout_callback = yaml + [inventory] enable_plugins = yaml diff --git a/ansible/.gitignore b/ansible/.gitignore deleted file mode 100644 index 620494d..0000000 --- a/ansible/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -data/* -hosts.yaml -Pipfile.lock \ No newline at end of file diff --git a/ansible/Pipfile b/ansible/Pipfile deleted file mode 100644 index 26f7e06..0000000 --- a/ansible/Pipfile +++ /dev/null @@ -1,14 +0,0 @@ -[[source]] -url = "https://pypi.org/simple" -verify_ssl = true -name = "pypi" - -[packages] -jmespath = "*" -ansible-core = "==2.18.1" - -[dev-packages] - -[requires] -python_version = "3.13" -python_full_version = "3.13.1" \ No newline at end of file diff --git a/ansible/README.md b/ansible/README.md deleted file mode 100644 index ac85065..0000000 --- a/ansible/README.md +++ /dev/null @@ -1,74 +0,0 @@ -# Variables -## Ceph - - - `ceph_disks`: List of disks to disks to include in the Ceph cluster (type: object) - - `data`: Path to the disk, recommended to be a /dev/disk/by-id/ path (type: string) - - `db`: Path to a disk or partition to use for the RocksDB database, recommended to be a /dev/disk/by-id/ path (type: string) - - `ceph_fsid`: UUID of the Ceph cluster (use `uuidgen` or similar to generate) (**required**, type: string) - - `ceph_ip_address`: Override for the server's IP address (used to generate ceph.conf) (type: string) - - `ceph_keyrings`: List of keyrings to deploy on the system (type: list of string, default: ["client"]) - - `ceph_network_private`: CIDR subnet of the backend network (type: string) - - `ceph_network_public`: CIDR subnet of the consumer facing network (type: string) - - `ceph_rbd_cache`: Amount of memory for caching of librbd client requests (type: string) - - `ceph_rbd_cache_max`: Maximum amount of memory to be used for librbd client request caching (type: string) - - `ceph_rbd_cache_target`: Ideal amount of memory used for librbd client request caching (type: string) - - `ceph_release`: Ceph release to deploy, can be `distro` to use distribution version (type: string, default: `reef`) - - `ceph_roles`: List of roles the server should have in the Ceph cluster (**required**, type: list of string): - - `client`: Ceph client, gets ceph.conf and keyring - - `mds`: Ceph Metadata Server, used for exporting distributed filesystems (CephFS) - - `mgr`: Ceph Manager server, used to process background management tasks and services - - `mon`: Ceph Monitor server, provides the core Ceph API used by all other services - - `osd`: Ceph Object Storage Daemon, used to export disks to the cluster - - `rbd-mirror`: Ceph Rados Block Device mirroring server, used for cross-cluster replication - - `rgw`: A RADOS (object) Gateway, used to expose an S3 API on top of Ceph objects - -## Incus - - `incus_name`: Name identifier for the deployment (**required**, type: string) - - `incus_init`: Initial configuration data (type: dict) - - `config`: Dict of config keys - - `clients`: Dict of client certificates to trust - - `type`: Type of certificate, typically `client` or `metrics` (**required**, type: string) - - `certificate`: PEM encoded certificate (**required**, type: string) - - `network`: Dict of networks - - `name`: Name of the network (**required**, type: string) - - `type`: Type of network (**required**, type: string) - - `default`: Whether to include in the default profile (type: bool, default: False) - - `config`: Dict of global config keys - - `local_config`: Dict of server-specific config keys - - `storage`: Dict of storage pools - - `name`: Name of the storage pool (**required**, type: string) - - `driver`: Storage pool driver (**required**, type: string) - - `default`: Whether to include in the default profile (type: bool, default: False) - - `config`: Dict of global config keys - - `local_config`: Dict of server-specific config keys - - `incus_ip_address`: Override for the server's IP address (used cluster and client traffic) (type: string) - - `incus_release`: Incus release to deploy, can be one of `daily`, `stable` or `lts-6.0` (type: string, default: `stable`) - - `incus_roles`: Operation mode for the deployed Incus system (**required**, type: string) - - `standalone` - - `cluster` - - `ui`: Whether to serve the Incus UI - -## Netplan -Netplan doesn't make use of configuration variables, but if you wish to replace the network configuration of a server, you can do so by putting a file in `data/netplan/HOSTNAME.yaml`. - -## NVME - - `nvme_targets`: List of NVME over TCP targets (IPs) (type: list of strings) - -## LVM cluster - - `lvmcluster_metadata_size`: PV metadata size (default to 10MB) - - `lvmcluster_name`: Name identifier for the deployment (**required**, type: string) - - `lvmcluster_vgs`: Dict of VG name to storage device path - -## OVN - - - `ovn_az_name`: OVN availability zone name (**required** if using OVN IC, type: string) - - `ovn_clients`: List of certificates to generate for OVN clients (type: list of string) - - `ovn_ip_address`: Override for the server's IP address (used for tunnels and DB traffic) (type: string) - - `ovn_name`: OVN deployment name (**required**, type: string) - - `ovn_release`: OVN release to deploy, can be `distro` or `ppa` (type: string, default: `distro`) - - `ovn_roles`: List of roles the server should have in the OVN cluster (**required**, type: list of string): - - `central`: OVN API server, runs NorthBound and SouthBound database and northd daemon - - `host`: OVN client / controller, runs OpenVswitch and ovn-controller - - `ic`: OVN Inter-Connection server, runs the `ovn-ic` daemon - - `ic-db`: OVN Inter-Connection NorthBound and SouthBound database server - - `ic-gateway`: OVN Inter-Connection traffic gateway diff --git a/ansible/ansible_requirements.yml b/ansible/ansible_requirements.yml deleted file mode 100644 index 2319af2..0000000 --- a/ansible/ansible_requirements.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -# Install Collections and Roles with Ansible Galaxy -# ansible-galaxy install -r ansible_requirements.yml - -collections: - - name: community.crypto - - name: community.general - -roles: diff --git a/ansible/books/ceph.yaml b/ansible/books/ceph.yaml deleted file mode 100644 index 9fd9e6f..0000000 --- a/ansible/books/ceph.yaml +++ /dev/null @@ -1,562 +0,0 @@ ---- -- name: Ceph - Add package repository (apt) - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_release: "{{ ceph_release | default('squid') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution not in ("Ubuntu", "Debian")' - - - name: Create keyring path - file: - path: /etc/apt/keyrings/ - mode: 0755 - state: directory - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Add ceph GPG key - copy: - src: ../files/ceph/ceph.asc - dest: /etc/apt/keyrings/ansible-ceph.asc - notify: Update apt - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Get local architecture - shell: dpkg --print-architecture - register: dpkg_architecture - changed_when: false - check_mode: no - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Add ceph package sources - template: - src: ../files/ceph/ceph.sources.tpl - dest: /etc/apt/sources.list.d/ansible-ceph.sources - notify: Update apt - when: 'task_roles|length > 0 and task_release != "distro"' - - handlers: - - name: Update apt - apt: - force_apt_get: yes - update_cache: yes - cache_valid_time: 0 - -- name: Ceph - Add package repository (rpm) - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_release: "{{ ceph_release | default('squid') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution != "CentOS"' - - - name: Import ceph GPG key - ansible.builtin.rpm_key: - state: present - key: https://download.ceph.com/keys/release.asc - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Configure ceph stable community repository - ansible.builtin.yum_repository: - name: ceph_stable - description: Ceph Stable repo - state: present - baseurl: "https://download.ceph.com/rpm-{{ task_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch" - file: ceph_stable - priority: 2 - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Configure ceph stable noarch community repository - ansible.builtin.yum_repository: - name: ceph_stable_noarch - description: Ceph Stable noarch repo - state: present - baseurl: "https://download.ceph.com/rpm-{{ task_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch" - file: ceph_stable - priority: 2 - when: 'task_roles|length > 0 and task_release != "distro"' - -- name: Ceph - Install packages - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_release: "{{ ceph_release | default('squid') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Install ceph-common - ansible.builtin.package: - name: - - ceph-common - state: present - when: '"client" in task_roles' - - - name: Install ceph-mon - ansible.builtin.package: - name: - - ceph-mon - state: present - when: '"mon" in task_roles' - - - name: Install ceph-mgr dependencies - ansible.builtin.package: - name: - - python3-distutils - state: present - when: 'ansible_distribution in ("Ubuntu", "Debian") and "mgr" in task_roles and task_release != "distro"' - - - name: Install ceph-mgr - ansible.builtin.package: - name: - - ceph-mgr - state: present - when: '"mgr" in task_roles' - - - name: Install ceph-mds - ansible.builtin.package: - name: - - ceph-mds - state: present - when: '"mds" in task_roles' - - - name: Install ceph-osd - ansible.builtin.package: - name: - - ceph-osd - - python3-packaging - state: present - when: '"osd" in task_roles' - - - name: Install ceph-volume - ansible.builtin.package: - name: - - ceph-volume - state: present - when: '"osd" in task_roles and (task_release != "distro" or ansible_distribution_release not in ("bookworm", "focal"))' - - - name: Install ceph-rbd-mirror - ansible.builtin.package: - name: - - rbd-mirror - state: present - when: '"rbd-mirror" in task_roles' - - - name: Install radosgw (deb) - ansible.builtin.package: - name: - - radosgw - state: present - when: 'ansible_distribution in ("Debian", "Ubuntu") and "rgw" in task_roles' - - - name: Install radosgw (rpm) - ansible.builtin.package: - name: - - ceph-radosgw - state: present - when: 'ansible_distribution == "CentOS" and "rgw" in task_roles' - -- name: Ceph - Generate cluster keys and maps - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "default_ipv4" - - "default_ipv6" - vars: - task_fsid: "{{ ceph_fsid | default('') }}" - task_bootstrap_osd_keyring: ../data/ceph/cluster.{{ task_fsid }}.bootstrap-osd.keyring - task_client_admin_keyring: ../data/ceph/cluster.{{ task_fsid }}.client.admin.keyring - task_mon_keyring: ../data/ceph/cluster.{{ task_fsid }}.mon.keyring - task_mon_map: ../data/ceph/cluster.{{ task_fsid }}.mon.map - task_release: "{{ ceph_release | default('squid') }}" - task_roles: "{{ ceph_roles | default([]) }}" - - task_release_majors: - luminous: 12 - mimic: 13 - nautilus: 14 - octopus: 15 - pacific: 16 - quincy: 17 - reef: 18 - squid: 19 - any_errors_fatal: true - tasks: - - name: Generate mon keyring - delegate_to: 127.0.0.1 - shell: - cmd: ceph-authtool --create-keyring {{ task_mon_keyring }} --gen-key -n mon. --cap mon 'allow *' - creates: '{{ task_mon_keyring }}' - throttle: 1 - when: 'task_fsid' - - - name: Generate client.admin keyring - delegate_to: 127.0.0.1 - shell: - cmd: ceph-authtool --create-keyring {{ task_client_admin_keyring }} --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' - creates: '{{ task_client_admin_keyring }}' - throttle: 1 - notify: Add key to client.admin keyring - when: 'task_fsid' - - - name: Generate bootstrap-osd keyring - delegate_to: 127.0.0.1 - shell: - cmd: ceph-authtool --create-keyring {{ task_bootstrap_osd_keyring }} --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r' - creates: '{{ task_bootstrap_osd_keyring }}' - throttle: 1 - notify: Add key to bootstrap-osd keyring - when: 'task_fsid' - - - name: Generate mon map - delegate_to: 127.0.0.1 - shell: - cmd: monmaptool --create{% if task_release_majors[task_release] | default(None) %} --set-min-mon-release={{ task_release_majors[task_release] }}{% endif %} --fsid {{ task_fsid }} {{ task_mon_map }} - creates: '{{ task_mon_map }}' - throttle: 1 - notify: Add nodes to mon map - when: 'task_fsid' - - handlers: - - name: Add key to client.admin keyring - delegate_to: 127.0.0.1 - shell: - cmd: ceph-authtool {{ task_mon_keyring }} --import-keyring {{ task_client_admin_keyring }} - - - name: Add key to bootstrap-osd keyring - delegate_to: 127.0.0.1 - shell: - cmd: ceph-authtool {{ task_mon_keyring }} --import-keyring {{ task_bootstrap_osd_keyring }} - - - name: Add nodes to mon map - delegate_to: 127.0.0.1 - shell: - cmd: monmaptool --add {{ item.name }} {{ item.ip }} {{ task_mon_map }} - loop: "{{ lookup('template', '../files/ceph/ceph.monitors.tpl') | from_yaml | default([]) }}" - -- name: Ceph - Set up config and keyrings - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "default_ipv4" - - "default_ipv6" - vars: - task_fsid: "{{ ceph_fsid | default('') }}" - task_keyrings: "{{ ceph_keyrings | default(['admin']) }}" - task_network_public: "{{ ceph_network_public | default('') }}" - task_network_private: "{{ ceph_network_private | default('') }}" - task_roles: "{{ ceph_roles | default([]) }}" - task_bootstrap_osd_keyring: ../data/ceph/cluster.{{ task_fsid }}.bootstrap-osd.keyring - task_client_admin_keyring: ../data/ceph/cluster.{{ task_fsid }}.client.admin.keyring - task_mon_keyring: ../data/ceph/cluster.{{ task_fsid }}.mon.keyring - task_mon_map: ../data/ceph/cluster.{{ task_fsid }}.mon.map - task_rbd_cache: "{{ ceph_rbd_cache | default('128Mi') }}" - task_rbd_cache_max: "{{ ceph_rbd_cache_max | default('96Mi') }}" - task_rbd_cache_target: "{{ ceph_rbd_cache_target | default('64Mi') }}" - any_errors_fatal: true - tasks: - - name: Transfer the cluster configuration - template: - src: ../files/ceph/ceph.conf.tpl - dest: /etc/ceph/ceph.conf - notify: Restart Ceph - when: 'task_roles|length > 0' - - - name: Create main storage directory - file: - path: /var/lib/ceph - owner: ceph - group: ceph - mode: 0750 - state: directory - when: 'task_roles|length > 0 and (task_roles|length > 1 or task_roles[0] != "client")' - - - name: Create monitor bootstrap path - file: - path: /var/lib/ceph/bootstrap-mon - owner: ceph - group: ceph - mode: 0770 - state: directory - when: '"mon" in task_roles' - - - name: Create OSD bootstrap path - file: - path: /var/lib/ceph/bootstrap-osd - owner: ceph - group: ceph - mode: 0770 - state: directory - when: '"osd" in task_roles' - - - name: Transfer main admin keyring - copy: - src: '{{ task_client_admin_keyring }}' - dest: /etc/ceph/ceph.client.admin.keyring - owner: ceph - group: ceph - mode: 0660 - notify: Restart Ceph - when: '("client" in task_roles and "admin" in task_keyrings) or "mon" in task_roles' - - - name: Transfer additional client keyrings - copy: - src: '../data/ceph/cluster.{{ task_fsid }}.client.{{ item }}.keyring' - dest: '/etc/ceph/ceph.client.{{ item }}.keyring' - owner: ceph - group: ceph - mode: 0660 - with_items: - '{{ task_keyrings | difference(["admin"]) }}' - when: '"client" in task_roles' - - - name: Transfer bootstrap mon keyring - copy: - src: '{{ task_mon_keyring }}' - dest: /var/lib/ceph/bootstrap-mon/ceph.keyring - owner: ceph - group: ceph - mode: 0660 - when: '"mon" in task_roles' - - - name: Transfer bootstrap mon map - copy: - src: '{{ task_mon_map }}' - dest: /var/lib/ceph/bootstrap-mon/ceph.monmap - owner: ceph - group: ceph - mode: 0660 - when: '"mon" in task_roles' - - - name: Transfer bootstrap OSD keyring - copy: - src: '{{ task_bootstrap_osd_keyring }}' - dest: /var/lib/ceph/bootstrap-osd/ceph.keyring - owner: ceph - group: ceph - mode: 0660 - when: '"osd" in task_roles' - - handlers: - - name: Restart Ceph - systemd: - name: ceph.target - state: restarted - -- name: Ceph - Deploy mon - hosts: all - order: shuffle - gather_facts: no - vars: - task_fsid: "{{ ceph_fsid | default('') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Bootstrap Ceph mon - shell: - cmd: sudo -u ceph ceph-mon --mkfs -i {{ inventory_hostname_short }} --monmap /var/lib/ceph/bootstrap-mon/ceph.monmap --keyring /var/lib/ceph/bootstrap-mon/ceph.keyring - creates: /var/lib/ceph/mon/ceph-{{ inventory_hostname_short }}/keyring - notify: - - Enable msgr2 - - Disable insecure_global_id_reclaim - when: '"mon" in task_roles' - - - name: Enable and start Ceph mon - systemd: - enabled: yes - name: ceph-mon@{{ inventory_hostname_short }} - state: started - when: '"mon" in task_roles' - - handlers: - - name: Enable msgr2 - shell: - cmd: ceph mon enable-msgr2 - - - name: Disable insecure_global_id_reclaim - shell: - cmd: ceph config set global auth_allow_insecure_global_id_reclaim false - -- name: Ceph - Deploy osd - hosts: all - order: shuffle - gather_facts: no - vars: - task_disks: "{{ ceph_disks | default([]) }}" - task_fsid: "{{ ceph_fsid | default('') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Bootstrap Ceph OSD - shell: - cmd: ceph-volume lvm create --data /dev/disk/by-id/{{ item.data }}{% if "db" in item %} --block.db /dev/disk/by-id/{{ item.db }}{% endif %} - creates: /var/lib/ceph/osd/.{{ item.data }}.created - loop: '{{ task_disks }}' - when: '"osd" in task_roles' - - - name: Bootstrap Ceph OSD (stamp) - shell: - cmd: touch /var/lib/ceph/osd/.{{ item.data }}.created - creates: /var/lib/ceph/osd/.{{ item.data }}.created - loop: '{{ task_disks }}' - when: '"osd" in task_roles' - -- name: Ceph - Deploy mgr - hosts: all - order: shuffle - gather_facts: no - vars: - task_fsid: "{{ ceph_fsid | default('') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Create /var/lib/ceph/mgr/ceph-{{ inventory_hostname_short }} - file: - path: /var/lib/ceph/mgr/ceph-{{ inventory_hostname_short }} - owner: ceph - group: ceph - mode: 0770 - state: directory - register: deploy_mgr - when: '"mgr" in task_roles' - - - name: Create mgr keyring - delegate_to: "{{ lookup('template', '../files/ceph/ceph.monitors.names.tpl') | from_yaml | first }}" - shell: - cmd: ceph auth get-or-create mgr.{{ inventory_hostname_short }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' - register: mgr_keyring - when: '"mgr" in task_roles and deploy_mgr.changed' - - - name: Transfer mgr keyring - copy: - content: "{{ mgr_keyring.stdout }}\n" - dest: "/var/lib/ceph/mgr/ceph-{{ inventory_hostname_short }}/keyring" - owner: ceph - group: ceph - mode: 0660 - when: '"mgr" in task_roles and deploy_mgr.changed' - - - name: Enable ceph mgr - systemd: - enabled: yes - name: ceph-mgr@{{ inventory_hostname_short }} - state: started - when: '"mgr" in task_roles' - -- name: Ceph - Deploy mds - hosts: all - order: shuffle - gather_facts: no - vars: - task_fsid: "{{ ceph_fsid | default('') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Create /var/lib/ceph/mds/ceph-{{ inventory_hostname_short }} - file: - path: /var/lib/ceph/mds/ceph-{{ inventory_hostname_short }} - owner: ceph - group: ceph - mode: 0770 - state: directory - register: deploy_mds - when: '"mds" in task_roles' - - - name: Create mds keyring - delegate_to: "{{ lookup('template', '../files/ceph/ceph.monitors.names.tpl') | from_yaml | first }}" - shell: - cmd: ceph auth get-or-create mds.{{ inventory_hostname_short }} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' - register: mds_keyring - when: '"mds" in task_roles and deploy_mds.changed' - - - name: Transfer mds keyring - copy: - content: "{{ mds_keyring.stdout }}\n" - dest: "/var/lib/ceph/mds/ceph-{{ inventory_hostname_short }}/keyring" - owner: ceph - group: ceph - mode: 0660 - when: '"mds" in task_roles and deploy_mds.changed' - - - name: Enable ceph mds - systemd: - enabled: yes - name: ceph-mds@{{ inventory_hostname_short }} - state: started - when: '"mds" in task_roles' - -- name: Ceph - Deploy rgw - hosts: all - order: shuffle - gather_facts: no - vars: - task_fsid: "{{ ceph_fsid | default('') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Create /var/lib/ceph/radosgw/ceph-rgw.{{ inventory_hostname_short }} - file: - path: /var/lib/ceph/radosgw/ceph-rgw.{{ inventory_hostname_short }} - owner: ceph - group: ceph - mode: 0770 - state: directory - register: deploy_rgw - when: '"rgw" in task_roles' - - - name: Create Ceph rgw keyring - delegate_to: "{{ lookup('template', '../files/ceph/ceph.monitors.names.tpl') | from_yaml | first }}" - shell: - cmd: ceph auth get-or-create client.rgw.{{ inventory_hostname_short }} mon 'allow rw' osd 'allow rwx' - register: rgw_keyring - when: '"rgw" in task_roles and deploy_rgw.changed' - - - name: Transfer rgw keyring - copy: - content: "{{ rgw_keyring.stdout }}\n" - dest: "/var/lib/ceph/radosgw/ceph-rgw.{{ inventory_hostname_short }}/keyring" - owner: ceph - group: ceph - mode: 0660 - when: '"rgw" in task_roles and deploy_rgw.changed' - - - name: Enable ceph rgw - systemd: - enabled: yes - name: ceph-radosgw@rgw.{{ inventory_hostname_short }} - state: started - when: '"rgw" in task_roles' - -- name: Ceph - Deploy rbd-mirror - hosts: all - order: shuffle - gather_facts: no - vars: - task_fsid: "{{ ceph_fsid | default('') }}" - task_roles: "{{ ceph_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Enable ceph rbd-mirror - systemd: - enabled: yes - name: ceph-rbd-mirror@admin - state: started - when: '"rbd-mirror" in task_roles' diff --git a/ansible/books/environment.yaml b/ansible/books/environment.yaml deleted file mode 100644 index e2a6e4b..0000000 --- a/ansible/books/environment.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: Time synchronization (NTP) - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - any_errors_fatal: true - tasks: - - name: Install EPEL repo (rpm) - yum: - name: epel-release - state: present - when: 'ansible_distribution == "CentOS"' - - - name: Install systemd-timesyncd - ansible.builtin.package: - name: - - systemd-timesyncd - state: present - - - name: Enable systemd-timesyncd - systemd: - enabled: yes - name: systemd-timesyncd - state: started - register: enable - - - name: Wait 5s for NTP sync - ansible.builtin.wait_for: - timeout: 5 - delegate_to: localhost - when: enable.changed - - - name: Verify NTP successfully synced - shell: - cmd: timedatectl timesync-status - register: ntp_status_output - failed_when: '"Packet count: 0" in ntp_status_output.stdout' - changed_when: false diff --git a/ansible/books/incus.yaml b/ansible/books/incus.yaml deleted file mode 100644 index 98eb3ce..0000000 --- a/ansible/books/incus.yaml +++ /dev/null @@ -1,292 +0,0 @@ ---- -- name: Incus - Add package repository (apt) - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_release: "{{ incus_release | default('stable') }}" - task_roles: "{{ incus_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution not in ("Ubuntu", "Debian")' - - - name: Create apt keyring path - file: - path: /etc/apt/keyrings/ - mode: 0755 - state: directory - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Add Zabbly repository key - copy: - src: ../files/incus/zabbly.asc - dest: /etc/apt/keyrings/ansible-zabbly.asc - notify: Update apt - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Get DPKG architecture - shell: dpkg --print-architecture - register: dpkg_architecture - changed_when: false - check_mode: no - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Add Zabbly package source - template: - src: ../files/incus/incus.sources.tpl - dest: /etc/apt/sources.list.d/ansible-zabbly-incus-{{ task_release }}.sources - notify: Update apt - when: 'task_roles|length > 0 and task_release != "distro"' - - handlers: - - name: Update apt - apt: - force_apt_get: yes - update_cache: yes - cache_valid_time: 0 - -- name: Incus - Add package repository (rpm) - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_release: "{{ incus_release | default('stable') }}" - task_roles: "{{ incus_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution != "CentOS"' - - - name: Add COPR repository - community.general.copr: - chroot: "epel-9-x86_64" - name: "neil/incus" - state: enabled - when: 'task_roles|length > 0 and task_release != "distro"' - -- name: Incus - Install packages and bootstrap - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "default_ipv4" - - "default_ipv6" - - "distribution_release" - vars: - task_init: "{{ incus_init | default('{}') }}" - task_ip_address: "{{ incus_ip_address | default(ansible_default_ipv6['address'] | default(ansible_default_ipv4['address'])) }}" - task_name: "{{ incus_name | default('') }}" - task_roles: "{{ incus_roles | default([]) }}" - - task_ovn_northbound: "{{ lookup('template', '../files/ovn/ovn-central.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6641') | join(',') }}" - task_servers: "{{ lookup('template', '../files/incus/incus.servers.tpl') | from_yaml | sort }}" - any_errors_fatal: true - tasks: - - name: Install the Incus package (deb) - apt: - name: - - incus - install_recommends: no - state: present - register: install_deb - when: 'ansible_distribution in ("Debian", "Ubuntu") and task_roles | length > 0' - - - name: Install the Incus package (rpm) - ansible.builtin.package: - name: - - incus - state: present - register: install_rpm - when: 'ansible_distribution == "CentOS" and task_roles | length > 0' - - - name: Install the Incus UI package (deb) - apt: - name: - - incus-ui-canonical - install_recommends: no - state: present - when: 'ansible_distribution in ("Debian", "Ubuntu") and "ui" in task_roles' - - - name: Install btrfs tools - ansible.builtin.package: - name: - - btrfs-progs - state: present - when: "task_roles | length > 0 and 'btrfs' in task_init['storage'] | dict2items | json_query('[].value.driver')" - - - name: Install ceph tools - ansible.builtin.package: - name: - - ceph-common - state: present - when: "task_roles | length > 0 and 'ceph' in task_init['storage'] | dict2items | json_query('[].value.driver')" - - - name: Install LVM tools - ansible.builtin.package: - name: - - lvm2 - state: present - when: "task_roles | length > 0 and 'lvm' in task_init['storage'] | dict2items | json_query('[].value.driver')" - - - name: Install ZFS dependencies - ansible.builtin.package: - name: - - zfs-dkms - state: present - when: "task_roles | length > 0 and 'zfs' in task_init['storage'] | dict2items | json_query('[].value.driver') and ansible_distribution == 'Debian'" - - - name: Install ZFS tools - ansible.builtin.package: - name: - - zfsutils-linux - state: present - when: "task_roles | length > 0 and 'zfs' in task_init['storage'] | dict2items | json_query('[].value.driver')" - - - name: Set uid allocation - shell: - cmd: "usermod root --add-subuids 10000000-1009999999" - when: '(install_deb.changed or install_rpm.changed) and ansible_distribution == "CentOS"' - - - name: Set gid allocation - shell: - cmd: "usermod root --add-subgids 10000000-1009999999" - when: '(install_deb.changed or install_rpm.changed) and ansible_distribution == "CentOS"' - - - name: Enable incus socket unit - systemd: - enabled: true - name: incus.socket - state: started - when: 'install_deb.changed or install_rpm.changed' - - - name: Enable incus service unit - systemd: - enabled: true - name: incus.service - state: started - when: 'install_deb.changed or install_rpm.changed' - - - name: Enable incus startup unit - systemd: - enabled: true - name: incus-startup.service - state: started - when: 'install_deb.changed or install_rpm.changed' - - - name: Set client listen address - shell: - cmd: "incus --force-local config set core.https_address {{ task_ip_address }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - - - name: Set cluster listen address - shell: - cmd: "incus --force-local config set cluster.https_address {{ task_ip_address }}" - when: '(install_deb.changed or install_rpm.changed) and "cluster" in task_roles and task_servers[0] == inventory_hostname' - - - name: Set OVN NorthBound database - shell: - cmd: "incus --force-local config set network.ovn.northbound_connection={{ task_ovn_northbound }} network.ovn.client_cert=\"{{ lookup('file', '../data/ovn/'+ovn_name+'/'+inventory_hostname+'.crt') }}\" network.ovn.client_key=\"{{ lookup('file', '../data/ovn/'+ovn_name+'/'+inventory_hostname+'.key') }}\" network.ovn.ca_cert=\"{{ lookup('file', '../data/ovn/'+ovn_name+'/ca.crt') }}\"" - notify: Restart Incus - when: '(install_deb.changed or install_rpm.changed) and task_ovn_northbound and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - - - name: Add networks - shell: - cmd: "incus network create {{ item.key }} --type={{ item.value.type }}{% for k in item.value.local_config | default([]) %} {{ k }}={{ item.value.local_config[k] }}{% endfor %}{% for k in item.value.config | default([]) %} {{ k }}={{ item.value.config[k] }}{% endfor %}" - loop: "{{ task_init['network'] | dict2items }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - - - name: Set network description - shell: - cmd: "incus network set --property {{ item.key }} description=\"{{ item.value.description }}\"" - loop: "{{ task_init['network'] | dict2items }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname)) and item.value.description | default(None)' - - - name: Add storage pools - shell: - cmd: "incus storage create {{ item.key }} {{ item.value.driver }}{% for k in item.value.local_config | default([]) %} {{ k }}={{ item.value.local_config[k] }}{% endfor %}{% for k in item.value.config | default([]) %} {{ k }}={{ item.value.config[k] }}{% endfor %}" - loop: "{{ task_init['storage'] | dict2items }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - - - name: Set storage pool description - shell: - cmd: "incus storage set --property {{ item.key }} description=\"{{ item.value.description }}\"" - loop: "{{ task_init['storage'] | dict2items }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname)) and item.value.description | default(None)' - - - name: Add storage pool to default profile - shell: - cmd: "incus profile device add default root disk path=/ pool={{ item }}" - loop: "{{ task_init['storage'] | dict2items | json_query('[?value.default].key') }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - - - name: Add network to default profile - shell: - cmd: "incus profile device add default eth0 nic network={{ item }} name=eth0" - loop: "{{ task_init['network'] | dict2items | json_query('[?value.default].key') }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - - - name: Bootstrap the cluster - shell: - cmd: "incus --force-local cluster enable {{ inventory_hostname }}" - when: '(install_deb.changed or install_rpm.changed) and "cluster" in task_roles and task_servers[0] == inventory_hostname' - - - name: Create join tokens - delegate_to: "{{ task_servers[0] }}" - shell: - cmd: "incus --force-local --quiet cluster add {{ inventory_hostname }}" - register: cluster_add - when: '(install_deb.changed or install_rpm.changed) and "cluster" in task_roles and task_servers[0] != inventory_hostname' - - - name: Wait 5s to avoid token use before valid - ansible.builtin.wait_for: - timeout: 5 - delegate_to: localhost - when: 'cluster_add.changed' - - - name: Join the cluster - throttle: 1 - shell: - cmd: "incus --force-local admin init --preseed" - stdin: |- - cluster: - enabled: true - cluster_address: "{{ task_ip_address }}" - cluster_token: "{{ cluster_add.stdout }}" - server_address: "{{ task_ip_address }}" - member_config: {% for pool in task_init.storage %}{% for key in task_init.storage[pool].local_config | default([]) %} - - - entity: storage-pool - name: {{ pool }} - key: {{ key }} - value: {{ task_init.storage[pool].local_config[key] }}{% endfor %}{% endfor %}{% for network in task_init.network %}{% for key in task_init.network[network].local_config | default([]) %} - - - entity: network - name: {{ network }} - key: {{ key }} - value: {{ task_init.network[network].local_config[key] }}{% endfor %}{% endfor %} - when: 'cluster_add.changed' - - - name: Apply additional configuration - shell: - cmd: "incus config set {{ item.key }}=\"{{ item.value }}\"" - loop: "{{ task_init['config'] | default({}) | dict2items }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - - - name: Load client certificates - shell: - cmd: "incus config trust add-certificate --name \"{{ item.key }}\" --type={{ item.value.type | default('client') }} -" - stdin: "{{ item.value.certificate }}" - loop: "{{ task_init['clients'] | default({}) | dict2items }}" - when: '(install_deb.changed or install_rpm.changed) and ("standalone" in task_roles or ("cluster" in task_roles and task_servers[0] == inventory_hostname))' - handlers: - - name: Restart Incus - systemd: - name: incus.service - state: restarted diff --git a/ansible/books/local.early.yaml b/ansible/books/local.early.yaml deleted file mode 100644 index 4d44274..0000000 --- a/ansible/books/local.early.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# Playbook that's run prior to any of the incus-deploy books. -- name: Local - Run some early tasks - hosts: all - order: shuffle - gather_facts: no - any_errors_fatal: true diff --git a/ansible/books/local.late.yaml b/ansible/books/local.late.yaml deleted file mode 100644 index 428ff9b..0000000 --- a/ansible/books/local.late.yaml +++ /dev/null @@ -1,6 +0,0 @@ -# Playbook that's run after all of the incus-deploy books. -- name: Local - Run some late tasks - hosts: all - order: shuffle - gather_facts: no - any_errors_fatal: true diff --git a/ansible/books/lvmcluster.yaml b/ansible/books/lvmcluster.yaml deleted file mode 100644 index 52851ad..0000000 --- a/ansible/books/lvmcluster.yaml +++ /dev/null @@ -1,108 +0,0 @@ ---- -- name: LVM Cluster - Generate configuration - hosts: all - order: shuffle - gather_facts: no - vars: - task_name: "{{ lvmcluster_name | default('') }}" - any_errors_fatal: true - tasks: - - name: Create cluster directory - delegate_to: 127.0.0.1 - file: - path: "../data/lvmcluster/{{ task_name }}" - mode: 0755 - state: directory - throttle: 1 - when: 'task_name' - register: create - - - name: Create cluster host_id tracking - delegate_to: 127.0.0.1 - throttle: 1 - copy: - content: "{}" - dest: "../data/lvmcluster/{{ task_name }}/host_id.yaml" - mode: 0644 - when: "create.changed" - - - name: Update cluster host_id tracking - delegate_to: 127.0.0.1 - throttle: 1 - template: - src: "../files/lvmcluster/host_id.yaml.tpl" - dest: "../data/lvmcluster/{{ task_name }}/host_id.yaml" - when: 'task_name' - vars: - task_host_ids: "{{ lookup('file', '../data/lvmcluster/' + task_name + '/host_id.yaml') | from_yaml }}" - -- name: LVM Cluster - Install packages and host config - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_name: "{{ lvmcluster_name | default('') }}" - task_host_ids: "{{ lookup('file', '../data/lvmcluster/' + task_name + '/host_id.yaml') | from_yaml }}" - any_errors_fatal: true - tasks: - - name: Install the LVM packages - ansible.builtin.package: - name: - - lvm2 - - lvm2-lockd - - sanlock - state: present - when: 'task_name' - - - name: Configure for LVM cluster - template: - src: ../files/lvmcluster/lvmlocal.conf.tpl - dest: /etc/lvm/lvmlocal.conf - when: 'task_name' - - - name: Enable the lvmlockd unit - systemd: - enabled: yes - name: lvmlockd - state: started - when: 'task_name' - - - name: Enable the sanlock unit - systemd: - enabled: yes - name: sanlock - state: started - when: 'task_name' - -- name: LVM Cluster - Create VGs - hosts: all - order: shuffle - gather_facts: no - vars: - task_metadata_size: "{{ lvmcluster_metadata_size | default('10m') }}" - task_vgs: "{{ lvmcluster_vgs | default({}) }}" - any_errors_fatal: true - tasks: - - name: Check for existing VGs - shell: - cmd: "vgs {{ item }}" - register: check - loop: "{{ task_vgs.keys() }}" - run_once: true - changed_when: false - failed_when: "check.rc not in (0, 5)" - - - name: Create the VG (first server) - shell: - cmd: "vgcreate --shared {{ item.item }} {{ task_vgs[item.item] }} --metadatasize={{ task_metadata_size }}" - when: "item.rc == 5" - loop: "{{ check.results }}" - run_once: true - register: create - - - name: Ensure lock manager is running - shell: - cmd: "vgchange --lock-start" - when: "create.changed" diff --git a/ansible/books/netplan.yaml b/ansible/books/netplan.yaml deleted file mode 100644 index f37de33..0000000 --- a/ansible/books/netplan.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- -- name: Netplan - Override system configuration - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution not in ("Ubuntu", "Debian")' - - - name: Check if a Netplan configuration exists - local_action: stat path=../data/netplan/{{ inventory_hostname }}.yaml - register: main_file - - - name: Ensure netplan is installed - apt: - name: - - netplan.io - state: present - when: main_file.stat.exists - - - name: Remove existing configuration - file: - path: "/etc/netplan/{{ item }}" - state: absent - loop: - - 00-snapd-config.yaml - - 00-installer-config.yaml - - 10-lxc.yaml - - 50-cloud-init.yaml - when: main_file.stat.exists - notify: Apply netplan - - - name: Transfer netplan configuration - copy: - src: ../data/netplan/{{ inventory_hostname }}.yaml - dest: /etc/netplan/00-ansible-main.yaml - mode: 0600 - when: main_file.stat.exists - notify: Apply netplan - - handlers: - - name: Apply netplan - shell: netplan apply diff --git a/ansible/books/nvme.yaml b/ansible/books/nvme.yaml deleted file mode 100644 index 2d9ec1f..0000000 --- a/ansible/books/nvme.yaml +++ /dev/null @@ -1,33 +0,0 @@ ---- -- name: NVME - Install packages and host config - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_targets: "{{ nvme_targets | default([]) }}" - any_errors_fatal: true - tasks: - - name: Install the NVME packages - ansible.builtin.package: - name: - - nvme-cli - state: present - when: 'task_targets | length > 0' - - - name: Configure NVME discovery - template: - src: ../files/nvme/discovery.conf.tpl - dest: /etc/nvme/discovery.conf - when: 'task_targets | length > 0' - notify: - - Discover NVME targets - - Connect NVME targets - - handlers: - - name: Discover NVME targets - shell: nvme discover - - - name: Connect NVME targets - shell: nvme connect-all diff --git a/ansible/books/ovn.yaml b/ansible/books/ovn.yaml deleted file mode 100644 index 5bdc8bc..0000000 --- a/ansible/books/ovn.yaml +++ /dev/null @@ -1,418 +0,0 @@ ---- -- name: OVN - Generate PKI certificates (central) - hosts: all - order: shuffle - gather_facts: no - vars: - task_clients: "{{ ovn_clients | default([]) }}" - task_name: "{{ ovn_name | default('') }}" - task_pki_path: "../data/ovn/{{ task_name }}/" - task_roles: "{{ ovn_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Create cluster directory - delegate_to: 127.0.0.1 - file: - path: "../data/ovn/{{ task_name }}" - mode: 0755 - state: directory - throttle: 1 - when: '"central" in task_roles or "host" in task_roles' - - - name: Create CA private key - delegate_to: 127.0.0.1 - community.crypto.openssl_privatekey: - path: "{{ task_pki_path }}/ca.key" - register: ca_key - throttle: 1 - when: '"central" in task_roles or "host" in task_roles' - - - name: Create CA signing request - delegate_to: 127.0.0.1 - community.crypto.openssl_csr_pipe: - privatekey_path: "{{ task_pki_path }}/ca.key" - common_name: "OVN CA for {{ task_name }}" - use_common_name_for_san: false - basic_constraints: - - 'CA:TRUE' - basic_constraints_critical: true - key_usage: - - keyCertSign - key_usage_critical: true - register: ca_csr - when: "ca_key.changed" - throttle: 1 - - - name: Issue CA certificate - delegate_to: 127.0.0.1 - community.crypto.x509_certificate: - path: "{{ task_pki_path }}/ca.crt" - csr_content: "{{ ca_csr.csr }}" - privatekey_path: "{{ task_pki_path }}/ca.key" - provider: selfsigned - when: "ca_csr.changed" - throttle: 1 - - - name: Create server keys - delegate_to: 127.0.0.1 - community.crypto.openssl_privatekey: - path: "{{ task_pki_path }}/{{ inventory_hostname }}.key" - register: cert_key - when: 'task_roles | length > 0' - - - name: Create server signing request - delegate_to: 127.0.0.1 - community.crypto.openssl_csr_pipe: - privatekey_path: "{{ task_pki_path }}/{{ inventory_hostname }}.key" - common_name: "OVN certificate for {{ inventory_hostname }}" - use_common_name_for_san: false - register: cert_csr - when: "cert_key.changed" - - - name: Issue server certificate - delegate_to: 127.0.0.1 - community.crypto.x509_certificate: - path: "{{ task_pki_path }}/{{ inventory_hostname }}.crt" - csr_content: "{{ cert_csr.csr }}" - ownca_path: "{{ task_pki_path }}/ca.crt" - ownca_privatekey_path: "{{ task_pki_path }}/ca.key" - ownca_not_after: "+3650d" - ownca_not_before: "-1d" - provider: ownca - when: "cert_csr.changed" - throttle: 1 - - - name: Create client keys - delegate_to: 127.0.0.1 - community.crypto.openssl_privatekey: - path: "{{ task_pki_path }}/{{ item }}.key" - register: client_key - when: 'task_roles | length > 0' - loop: "{{ task_clients }}" - throttle: 1 - - - name: Create client signing request - delegate_to: 127.0.0.1 - community.crypto.openssl_csr_pipe: - privatekey_path: "{{ task_pki_path }}/{{ item.item }}.key" - common_name: "OVN client certificate for {{ item.item }}" - use_common_name_for_san: false - register: client_csr - loop: "{{ client_key.results }}" - when: "client_key.changed" - - - name: Issue client certificate - delegate_to: 127.0.0.1 - community.crypto.x509_certificate: - path: "{{ task_pki_path }}/{{ item.item.item }}.crt" - csr_content: "{{ item.csr }}" - ownca_path: "{{ task_pki_path }}/ca.crt" - ownca_privatekey_path: "{{ task_pki_path }}/ca.key" - ownca_not_after: "+3650d" - ownca_not_before: "-1d" - provider: ownca - loop: "{{ client_csr.results }}" - when: "client_csr.changed" - throttle: 1 - -- name: OVN - Add package repository - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "distribution_release" - vars: - task_release: "{{ ovn_release | default('distro') }}" - task_roles: "{{ ovn_roles | default([]) }}" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution not in ("Ubuntu", "Debian")' - - - name: Create apt keyring path - file: - path: /etc/apt/keyrings/ - mode: 0755 - state: directory - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Add PPA GPG key - copy: - src: ../files/ovn/ovn-ppa.asc - dest: /etc/apt/keyrings/ansible-ovn-ppa.asc - notify: Update apt - when: 'task_roles|length > 0 and task_release == "ppa"' - - - name: Get DPKG architecture - shell: dpkg --print-architecture - register: dpkg_architecture - changed_when: false - check_mode: no - when: 'task_roles|length > 0 and task_release != "distro"' - - - name: Add OVN PPA package source - template: - src: ../files/ovn/ovn-ppa.sources.tpl - dest: /etc/apt/sources.list.d/ansible-ovn-ppa.sources - notify: Update apt - when: 'task_roles|length > 0 and task_release == "ppa"' - - handlers: - - name: Update apt - apt: - force_apt_get: yes - update_cache: yes - cache_valid_time: 0 - -- name: OVN - Install packages - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "default_ipv4" - - "default_ipv6" - vars: - task_ip_address: "{{ ovn_ip_address | default(ansible_default_ipv6['address'] | default(ansible_default_ipv4['address'])) }}" - task_name: "{{ ovn_name | default('') }}" - task_release: "{{ ovn_release | default('distro') }}" - task_roles: "{{ ovn_roles | default([]) }}" - - task_central_northbound: "{{ lookup('template', '../files/ovn/ovn-central.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6641') | join(',') }}" - task_central_southbound: "{{ lookup('template', '../files/ovn/ovn-central.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6642') | join(',') }}" - task_ic_northbound: "{{ lookup('template', '../files/ovn/ovn-ic.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6645') | join(',') }}" - task_ic_southbound: "{{ lookup('template', '../files/ovn/ovn-ic.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6646') | join(',') }}" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution not in ("Ubuntu", "Debian")' - - - name: Install the OVN central package - apt: - name: - - ovn-central - install_recommends: no - state: present - when: '"central" in task_roles' - - - name: Install the OVN IC database package - apt: - name: - - ovn-ic-db - install_recommends: no - state: present - when: '"ic-db" in task_roles' - - - name: Install the OVN IC package - apt: - name: - - ovn-ic - install_recommends: no - state: present - when: '"ic" in task_roles' - - - name: Install the OVN host package - apt: - name: - - ovn-host - install_recommends: no - state: present - notify: - - Configure OVS - - Enable OVN IC gateway - when: '"host" in task_roles' - - handlers: - - name: Configure OVS - shell: ovs-vsctl set open_vswitch . external_ids:hostname={{ inventory_hostname }} external_ids:ovn-remote={{ task_central_southbound }} external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip={{ task_ip_address }} - - - name: Enable OVN IC gateway - shell: - cmd: "ovs-vsctl set open_vswitch . external_ids:ovn-is-interconn=true" - when: '"ic-gateway" in task_roles' - -- name: OVN - Set up daemon configuration - hosts: all - order: shuffle - gather_facts: yes - gather_subset: - - "default_ipv4" - - "default_ipv6" - vars: - task_ip_address: "{{ ovn_ip_address | default(ansible_default_ipv6['address'] | default(ansible_default_ipv4['address'])) }}" - task_az_name: "{{ ovn_az_name | default('') }}" - task_name: "{{ ovn_name | default('') }}" - task_roles: "{{ ovn_roles | default([]) }}" - - task_central_northbound: "{{ lookup('template', '../files/ovn/ovn-central.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6641') | join(',') }}" - task_central_southbound: "{{ lookup('template', '../files/ovn/ovn-central.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6642') | join(',') }}" - task_ic_northbound: "{{ lookup('template', '../files/ovn/ovn-ic.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6645') | join(',') }}" - task_ic_southbound: "{{ lookup('template', '../files/ovn/ovn-ic.servers.tpl') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6646') | join(',') }}" - task_pki_path: "../data/ovn/{{ task_name }}/" - any_errors_fatal: true - tasks: - - name: Check if distribution is supported - meta: end_play - when: 'ansible_distribution not in ("Ubuntu", "Debian")' - - - name: Create OVN config directory - file: - path: /etc/ovn - mode: 0755 - state: directory - when: 'task_roles | length > 0' - - - name: Transfer OVN CA certificate - copy: - src: "{{ task_pki_path }}/ca.crt" - dest: /etc/ovn/{{ task_name }}.ca.crt - mode: 0644 - when: 'task_roles | length > 0' - - - name: Transfer OVN server certificate - copy: - src: "{{ task_pki_path }}/{{ inventory_hostname }}.crt" - dest: /etc/ovn/{{ task_name }}.server.crt - mode: 0644 - when: 'task_roles | length > 0' - - - name: Transfer OVN server key - copy: - src: "{{ task_pki_path }}/{{ inventory_hostname }}.key" - dest: /etc/ovn/{{ task_name }}.server.key - mode: 0600 - when: 'task_roles | length > 0' - notify: - - Configure OVN central northbound DB for SSL (certs) - - Configure OVN central northbound DB for SSL (ports) - - Configure OVN central southbound DB for SSL (certs) - - Configure OVN central southbound DB for SSL (ports) - - Configure OVN IC northbound DB for SSL (certs) - - Configure OVN IC northbound DB for SSL (ports) - - Configure OVN IC southbound DB for SSL (certs) - - Configure OVN IC southbound DB for SSL (ports) - - - name: Configure OVN central database - template: - src: ../files/ovn/ovn-central.tpl - dest: /etc/default/ovn-central - notify: - - Restart OVN central - - Configure OVN AZ name - - Enable OVN IC route sharing - when: '"central" in task_roles' - - - name: Configure OVN host - template: - src: ../files/ovn/ovn-host.tpl - dest: /etc/default/ovn-host - notify: - - Restart OVN host - when: '"host" in task_roles' - - - name: Create OVN IC override directory - file: - path: /etc/systemd/system/ovn-ic.service.d - mode: 0755 - state: directory - when: '"ic" in task_roles' - - - name: Transfer OVN IC override - copy: - content: | - [Service] - EnvironmentFile=-/etc/default/ovn-ic - ExecStart= - ExecStart=/usr/share/ovn/scripts/ovn-ctl start_ic --no-monitor $OVN_CTL_OPTS - dest: /etc/systemd/system/ovn-ic.service.d/ansible.conf - notify: Restart OVN IC - when: '"ic" in task_roles' - - - name: Configure OVN IC database - template: - src: ../files/ovn/ovn-ic.tpl - dest: /etc/default/ovn-ic - notify: - - Restart OVN IC databases - - Restart OVN IC - when: '"ic" in task_roles or "ic-db" in task_roles' - - - name: Transfer OVN aliases - template: - src: ../files/ovn/alias.sh.tpl - dest: /etc/ovn/alias.sh - when: 'task_roles | length > 0' - handlers: - - name: Configure OVN central northbound DB for SSL (certs) - shell: - cmd: "ovn-nbctl set-ssl /etc/ovn/{{ task_name }}.server.key /etc/ovn/{{ task_name }}.server.crt /etc/ovn/{{ task_name }}.ca.crt" - when: '"central" in task_roles' - - - name: Configure OVN central northbound DB for SSL (ports) - shell: - cmd: "ovn-nbctl set-connection pssl:6641:[::]" - when: '"central" in task_roles' - - - name: Configure OVN central southbound DB for SSL (certs) - shell: - cmd: "ovn-sbctl set-ssl /etc/ovn/{{ task_name }}.server.key /etc/ovn/{{ task_name }}.server.crt /etc/ovn/{{ task_name }}.ca.crt" - when: '"central" in task_roles' - - - name: Configure OVN central southbound DB for SSL (ports) - shell: - cmd: "ovn-sbctl set-connection pssl:6642:[::]" - when: '"central" in task_roles' - - - name: Configure OVN IC northbound DB for SSL (certs) - shell: - cmd: "ovn-ic-nbctl set-ssl /etc/ovn/{{ task_name }}.server.key /etc/ovn/{{ task_name }}.server.crt /etc/ovn/{{ task_name }}.ca.crt" - when: '"ic-db" in task_roles' - - - name: Configure OVN IC northbound DB for SSL (ports) - shell: - cmd: "ovn-ic-nbctl set-connection pssl:6645:[::]" - when: '"ic-db" in task_roles' - - - name: Configure OVN IC southbound DB for SSL (certs) - shell: - cmd: "ovn-ic-sbctl set-ssl /etc/ovn/{{ task_name }}.server.key /etc/ovn/{{ task_name }}.server.crt /etc/ovn/{{ task_name }}.ca.crt" - when: '"ic-db" in task_roles' - - - name: Configure OVN IC southbound DB for SSL (ports) - shell: - cmd: "ovn-ic-sbctl set-connection pssl:6646:[::]" - when: '"ic-db" in task_roles' - - - name: Restart OVN central - systemd: - name: ovn-central.service - state: restarted - - - name: Restart OVN host - systemd: - name: ovn-host.service - state: restarted - - - name: Restart OVN IC - systemd: - daemon_reload: true - name: ovn-ic.service - state: restarted - when: '"ic" in task_roles' - - - name: Restart OVN IC databases - systemd: - name: ovn-ic-db.service - state: restarted - when: '"ic-db" in task_roles' - - - name: Configure OVN AZ name - shell: - cmd: "ovn-nbctl --db={{ task_central_northbound }} -c /etc/ovn/{{ task_name }}.server.crt -p /etc/ovn/{{ task_name }}.server.key -C /etc/ovn/{{ task_name }}.ca.crt set NB_Global . name={{ task_az_name }}" - when: '"central" in task_roles and task_az_name' - - - name: Enable OVN IC route sharing - shell: - cmd: "ovn-nbctl --db={{ task_central_northbound }} -c /etc/ovn/{{ task_name }}.server.crt -p /etc/ovn/{{ task_name }}.server.key -C /etc/ovn/{{ task_name }}.ca.crt set NB_Global . options:ic-route-adv=true options:ic-route-learn=true" - when: '"central" in task_roles and task_az_name' diff --git a/ansible/deploy.yaml b/ansible/deploy.yaml deleted file mode 100644 index 9de2a28..0000000 --- a/ansible/deploy.yaml +++ /dev/null @@ -1,9 +0,0 @@ -- import_playbook: books/local.early.yaml -- import_playbook: books/netplan.yaml -- import_playbook: books/environment.yaml -- import_playbook: books/nvme.yaml -- import_playbook: books/ceph.yaml -- import_playbook: books/lvmcluster.yaml -- import_playbook: books/ovn.yaml -- import_playbook: books/incus.yaml -- import_playbook: books/local.late.yaml diff --git a/ansible/files/incus/incus.servers.tpl b/ansible/files/incus/incus.servers.tpl deleted file mode 100644 index fa87357..0000000 --- a/ansible/files/incus/incus.servers.tpl +++ /dev/null @@ -1,5 +0,0 @@ -{% for host in vars['ansible_play_hosts'] | sort %} -{% if hostvars[host]['incus_name'] == task_name and "cluster" in hostvars[host]['incus_roles'] %} -- {{ host }} -{% endif %} -{% endfor %} diff --git a/ansible/files/ovn/alias.sh.tpl b/ansible/files/ovn/alias.sh.tpl deleted file mode 100644 index ea6baf7..0000000 --- a/ansible/files/ovn/alias.sh.tpl +++ /dev/null @@ -1,9 +0,0 @@ -# Managed by Ansible, do not modify. -alias ovn-nbctl="/usr/bin/ovn-nbctl --db={{ task_central_northbound }} -c /etc/ovn/{{ task_name }}.server.crt -p /etc/ovn/{{ task_name }}.server.key -C /etc/ovn/{{ task_name }}.ca.crt" -alias ovn-sbctl="/usr/bin/ovn-sbctl --db={{ task_central_southbound }} -c /etc/ovn/{{ task_name }}.server.crt -p /etc/ovn/{{ task_name }}.server.key -C /etc/ovn/{{ task_name }}.ca.crt" -{% if task_ic_northbound %} -alias ovn-ic-nbctl="/usr/bin/ovn-ic-nbctl --db={{ task_ic_northbound }} -c /etc/ovn/{{ task_name }}.server.crt -p /etc/ovn/{{ task_name }}.server.key -C /etc/ovn/{{ task_name }}.ca.crt" -{% endif %} -{% if task_ic_southbound %} -alias ovn-ic-sbctl="/usr/bin/ovn-ic-sbctl --db={{ task_ic_southbound }} -c /etc/ovn/{{ task_name }}.server.crt -p /etc/ovn/{{ task_name }}.server.key -C /etc/ovn/{{ task_name }}.ca.crt" -{% endif %} diff --git a/ansible/files/ovn/ovn-central.tpl b/ansible/files/ovn/ovn-central.tpl deleted file mode 100644 index 9ab4a9d..0000000 --- a/ansible/files/ovn/ovn-central.tpl +++ /dev/null @@ -1,22 +0,0 @@ -{% set servers = lookup('template', '../files/ovn/ovn-central.servers.tpl') | from_yaml -%} -# Managed by Ansible, do not modify. - -# This is a POSIX shell fragment -*- sh -*- - -# OVN_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example, -# a suitable place to specify --ovn-northd-wrapper=valgrind. - -OVN_CTL_OPTS="\ - --db-nb-create-insecure-remote=no \ - --db-sb-create-insecure-remote=no \ - --db-nb-addr=[{{ task_ip_address }}] \ - --db-sb-addr=[{{ task_ip_address }}] \ - --db-nb-cluster-local-addr=[{{ task_ip_address }}] \ - --db-sb-cluster-local-addr=[{{ task_ip_address }}] \ - --ovn-northd-ssl-key=/etc/ovn/{{ task_name }}.server.key \ - --ovn-northd-ssl-cert=/etc/ovn/{{ task_name }}.server.crt \ - --ovn-northd-ssl-ca-cert=/etc/ovn/{{ task_name }}.ca.crt \ - --ovn-northd-nb-db={{ task_central_northbound }} \ - --ovn-northd-sb-db={{ task_central_southbound }}{% if task_ip_address != servers[0] %} \ - --db-nb-cluster-remote-addr=[{{ servers[0] }}] \ - --db-sb-cluster-remote-addr=[{{ servers[0] }}]{% endif %}" diff --git a/ansible/files/ovn/ovn-ic.tpl b/ansible/files/ovn/ovn-ic.tpl deleted file mode 100644 index ce3c84f..0000000 --- a/ansible/files/ovn/ovn-ic.tpl +++ /dev/null @@ -1,24 +0,0 @@ -{% set servers = lookup('template', '../files/ovn/ovn-ic.servers.tpl') | from_yaml -%} -# Managed by Ansible, do not modify. - -# This is a POSIX shell fragment -*- sh -*- - -# OVN_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example, -# a suitable place to specify --ovn-northd-wrapper=valgrind. - -OVN_CTL_OPTS="\ - --db-ic-nb-create-insecure-remote=no \ - --db-ic-sb-create-insecure-remote=no \ - --db-ic-nb-addr=[{{ task_ip_address }}] \ - --db-ic-sb-addr=[{{ task_ip_address }}] \ - --db-ic-nb-cluster-local-addr=[{{ task_ip_address }}] \ - --db-ic-sb-cluster-local-addr=[{{ task_ip_address }}] \ - --ovn-ic-ssl-key=/etc/ovn/{{ task_name }}.server.key \ - --ovn-ic-ssl-cert=/etc/ovn/{{ task_name }}.server.crt \ - --ovn-ic-ssl-ca-cert=/etc/ovn/{{ task_name }}.ca.crt \ - --ovn-northd-nb-db={{ task_central_northbound }} \ - --ovn-northd-sb-db={{ task_central_southbound }} \ - --ovn-ic-nb-db={{ task_ic_northbound }} \ - --ovn-ic-sb-db={{ task_ic_southbound }}{% if task_ip_address != servers[0] %} \ - --db-ic-nb-cluster-remote-addr=[{{ servers[0] }}] - --db-ic-sb-cluster-remote-addr=[{{ servers[0] }}]{% endif %}" diff --git a/ansible/data/ceph/.placeholder b/data/ceph/.placeholder similarity index 100% rename from ansible/data/ceph/.placeholder rename to data/ceph/.placeholder diff --git a/ansible/data/lvmcluster/.placeholder b/data/lvmcluster/.placeholder similarity index 100% rename from ansible/data/lvmcluster/.placeholder rename to data/lvmcluster/.placeholder diff --git a/ansible/data/ovn/.placeholder b/data/ovn/.placeholder similarity index 100% rename from ansible/data/ovn/.placeholder rename to data/ovn/.placeholder diff --git a/deploy.yaml b/deploy.yaml new file mode 100644 index 0000000..c1c8d8c --- /dev/null +++ b/deploy.yaml @@ -0,0 +1,74 @@ +--- + +- name: Netplan role + hosts: all + order: shuffle + gather_facts: yes + gather_subset: + - "distribution_release" + roles: + - netplan + +- name: Environment role + hosts: all + order: shuffle + gather_facts: yes + gather_subset: + - "distribution_release" + roles: + - environment + +- name: NVMe role + hosts: all + order: shuffle + gather_facts: yes + gather_subset: + - "distribution_release" + roles: + - nvme + +- name: Ceph role + hosts: all + order: shuffle + gather_facts: yes + gather_subset: + - "distribution_release" + - "default_ipv4" + - "default_ipv6" + + roles: + - ceph + +- name: LVM Cluster role + hosts: all + order: shuffle + gather_facts: yes + gather_subset: + - "distribution_release" + - "default_ipv4" + - "default_ipv6" + roles: + - lvmcluster + +- name: OVN role + hosts: all + order: shuffle + gather_facts: yes + gather_subset: + - "distribution_release" + - "default_ipv4" + - "default_ipv6" + roles: + - ovn + +- name: Incus role + hosts: all + order: shuffle + gather_facts: yes + gather_subset: + - "distribution_release" + - "default_ipv4" + - "default_ipv6" + + roles: + - incus diff --git a/galaxy.yml b/galaxy.yml new file mode 100644 index 0000000..e1a2a93 --- /dev/null +++ b/galaxy.yml @@ -0,0 +1,71 @@ +#SPDX-License-Identifier: MIT-0 +### REQUIRED +# The namespace of the collection. This can be a company/brand/organization or product namespace under which all +# content lives. May only contain alphanumeric lowercase characters and underscores. Namespaces cannot start with +# underscores or numbers and cannot contain consecutive underscores +namespace: lxc + +# The name of the collection. Has the same character restrictions as 'namespace' +name: incus_deploy + +# The version of the collection. Must be compatible with semantic versioning +version: 0.0.0-dev0 + +# The path to the Markdown (.md) readme file. This path is relative to the root of the collection +readme: README.md + +# A list of the collection's content authors. Can be just the name or in the format 'Full Name (url) +# @nicks:irc/im.site#channel' +authors: +- Stéphane Graber +- Incus-deploy contributors + +### OPTIONAL but strongly recommended +# A short summary description of the collection +description: This is a collection of Ansible playbooks, Terraform configurations and scripts to deploy and operate Incus clusters. + +# Either a single license or a list of licenses for content inside of a collection. Ansible Galaxy currently only +# accepts L(SPDX,https://spdx.org/licenses/) licenses. This key is mutually exclusive with 'license_file' +license: +- Apache-2.0 + +# The path to the license file for the collection. This path is relative to the root of the collection. This key is +# mutually exclusive with 'license' +#license_file: '' + +# A list of tags you want to associate with the collection for indexing/searching. A tag name has the same character +# requirements as 'namespace' and 'name' +tags: + - incus + +# Collections that this collection requires to be installed for it to be usable. The key of the dict is the +# collection label 'namespace.name'. The value is a version range +# L(specifiers,https://python-semanticversion.readthedocs.io/en/latest/#requirement-specification). Multiple version +# range specifiers can be set and are separated by ',' +dependencies: {} + +# The URL of the originating SCM repository +repository: https://github.com/lxc/incus-deploy + +# The URL to any online docs +documentation: https://linuxcontainers.org/incus/ + +# The URL to the homepage of the collection/project +homepage: https://linuxcontainers.org/incus/ + +# The URL to the collection issue tracker +issues: https://github.com/lxc/incus-deploy/issues + +# A list of file glob-like patterns used to filter any files or directories that should not be included in the build +# artifact. A pattern is matched from the relative path of the file or directory of the collection directory. This +# uses 'fnmatch' to match the files or directories. Some directories and files like 'galaxy.yml', '*.pyc', '*.retry', +# and '.git' are always filtered. Mutually exclusive with 'manifest' +build_ignore: [] + +# A dict controlling use of manifest directives used in building the collection artifact. The key 'directives' is a +# list of MANIFEST.in style +# L(directives,https://packaging.python.org/en/latest/guides/using-manifest-in/#manifest-in-commands). The key +# 'omit_default_directives' is a boolean that controls whether the default directives are used. Mutually exclusive +# with 'build_ignore' +# manifest: null + diff --git a/ansible/hosts.yaml.example.centos b/inventories/baremetal-centos.yaml similarity index 100% rename from ansible/hosts.yaml.example.centos rename to inventories/baremetal-centos.yaml diff --git a/ansible/hosts.yaml.example b/inventories/baremetal.yaml similarity index 100% rename from ansible/hosts.yaml.example rename to inventories/baremetal.yaml diff --git a/meta/runtime.yml b/meta/runtime.yml new file mode 100644 index 0000000..62ced21 --- /dev/null +++ b/meta/runtime.yml @@ -0,0 +1,53 @@ +#SPDX-License-Identifier: Apache-2.0 +--- +# Collections must specify a minimum required ansible version to upload +# to galaxy +# requires_ansible: '>=2.9.10' + +# Content that Ansible needs to load from another location or that has +# been deprecated/removed +# plugin_routing: +# action: +# redirected_plugin_name: +# redirect: ns.col.new_location +# deprecated_plugin_name: +# deprecation: +# removal_version: "4.0.0" +# warning_text: | +# See the porting guide on how to update your playbook to +# use ns.col.another_plugin instead. +# removed_plugin_name: +# tombstone: +# removal_version: "2.0.0" +# warning_text: | +# See the porting guide on how to update your playbook to +# use ns.col.another_plugin instead. +# become: +# cache: +# callback: +# cliconf: +# connection: +# doc_fragments: +# filter: +# httpapi: +# inventory: +# lookup: +# module_utils: +# modules: +# netconf: +# shell: +# strategy: +# terminal: +# test: +# vars: + +# Python import statements that Ansible needs to load from another location +# import_redirection: +# ansible_collections.ns.col.plugins.module_utils.old_location: +# redirect: ansible_collections.ns.col.plugins.module_utils.new_location + +# Groups of actions/modules that take a common set of options +# action_groups: +# group_name: +# - module1 +# - module2 diff --git a/ansible/tasks/update-packages.yaml b/playbooks/update_packages.yaml similarity index 100% rename from ansible/tasks/update-packages.yaml rename to playbooks/update_packages.yaml diff --git a/ansible/plugins/.placeholder b/plugins/.placeholder similarity index 100% rename from ansible/plugins/.placeholder rename to plugins/.placeholder diff --git a/plugins/README.md b/plugins/README.md new file mode 100644 index 0000000..615f703 --- /dev/null +++ b/plugins/README.md @@ -0,0 +1,31 @@ +# Collections Plugins Directory + +This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that +is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that +would contain module utils and modules respectively. + +Here is an example directory of the majority of plugins currently supported by Ansible: + +``` +└── plugins + ├── action + ├── become + ├── cache + ├── callback + ├── cliconf + ├── connection + ├── filter + ├── httpapi + ├── inventory + ├── lookup + ├── module_utils + ├── modules + ├── netconf + ├── shell + ├── strategy + ├── terminal + ├── test + └── vars +``` + +A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible-core/2.18/plugins/plugins.html). diff --git a/ansible/plugins/connection/incus.py b/plugins/connection/incus.py similarity index 100% rename from ansible/plugins/connection/incus.py rename to plugins/connection/incus.py diff --git a/roles/ceph/README.md b/roles/ceph/README.md new file mode 100644 index 0000000..08e63f5 --- /dev/null +++ b/roles/ceph/README.md @@ -0,0 +1,24 @@ +# Ceph Role +## Variables + + - `ceph_disks`: List of disks to disks to include in the Ceph cluster (type: object) + - `data`: Path to the disk, recommended to be a /dev/disk/by-id/ path (type: string) + - `db`: Path to a disk or partition to use for the RocksDB database, recommended to be a /dev/disk/by-id/ path (type: string) + - `ceph_fsid`: UUID of the Ceph cluster (use `uuidgen` or similar to generate) (**required**, type: string) + - `ceph_ip_address`: Override for the server's IP address (used to generate ceph.conf) (type: string) + - `ceph_keyrings`: List of keyrings to deploy on the system (type: list of string, default: ["client"]) + - `ceph_network_private`: CIDR subnet of the backend network (type: string) + - `ceph_network_public`: CIDR subnet of the consumer facing network (type: string) + - `ceph_rbd_cache`: Amount of memory for caching of librbd client requests (type: string) + - `ceph_rbd_cache_max`: Maximum amount of memory to be used for librbd client request caching (type: string) + - `ceph_rbd_cache_target`: Ideal amount of memory used for librbd client request caching (type: string) + - `ceph_release`: Ceph release to deploy, can be `distro` to use distribution version (type: string, default: `reef`) + - `ceph_roles`: List of roles the server should have in the Ceph cluster (**required**, type: list of string): + - `client`: Ceph client, gets ceph.conf and keyring + - `mds`: Ceph Metadata Server, used for exporting distributed filesystems (CephFS) + - `mgr`: Ceph Manager server, used to process background management tasks and services + - `mon`: Ceph Monitor server, provides the core Ceph API used by all other services + - `osd`: Ceph Object Storage Daemon, used to export disks to the cluster + - `rbd-mirror`: Ceph Rados Block Device mirroring server, used for cross-cluster replication + - `rgw`: A RADOS (object) Gateway, used to expose an S3 API on top of Ceph objects + diff --git a/roles/ceph/defaults/main.yaml b/roles/ceph/defaults/main.yaml new file mode 100644 index 0000000..abe4aa6 --- /dev/null +++ b/roles/ceph/defaults/main.yaml @@ -0,0 +1,11 @@ +--- +ceph_release: 'squid' +ceph_roles: [] +ceph_fsid: '' +ceph_keyrings: ['admin'] +ceph_network_public: '' +ceph_network_private: '' +ceph_rbd_cache: '128Mi' +ceph_rbd_cache_max: '96Mi' +ceph_rbd_cache_target: '64Mi' +ceph_disks: [] diff --git a/ansible/files/ceph/ceph.asc b/roles/ceph/files/ceph.asc similarity index 100% rename from ansible/files/ceph/ceph.asc rename to roles/ceph/files/ceph.asc diff --git a/roles/ceph/handlers/main.yaml b/roles/ceph/handlers/main.yaml new file mode 100644 index 0000000..956eafd --- /dev/null +++ b/roles/ceph/handlers/main.yaml @@ -0,0 +1,36 @@ +--- +- name: Update apt + apt: + force_apt_get: yes + update_cache: yes + cache_valid_time: 0 + +- name: Add key to client.admin keyring + delegate_to: 127.0.0.1 + shell: + cmd: ceph-authtool {{ ceph_mon_keyring }} --import-keyring {{ ceph_client_admin_keyring }} + +- name: Add key to bootstrap-osd keyring + delegate_to: 127.0.0.1 + shell: + cmd: ceph-authtool {{ ceph_mon_keyring }} --import-keyring {{ ceph_bootstrap_osd_keyring }} + +- name: Add nodes to mon map + delegate_to: 127.0.0.1 + shell: + cmd: monmaptool --add {{ item.name }} {{ item.ip }} {{ ceph_mon_map }} + loop: "{{ lookup('template', 'ceph.monitors.j2') | from_yaml | default([]) }}" + +- name: Restart Ceph + systemd: + name: ceph.target + state: restarted + +- name: Enable msgr2 + shell: + cmd: ceph mon enable-msgr2 + +- name: Disable insecure_global_id_reclaim + shell: + cmd: ceph config set global auth_allow_insecure_global_id_reclaim false + diff --git a/roles/ceph/tasks/config.yaml b/roles/ceph/tasks/config.yaml new file mode 100644 index 0000000..370098c --- /dev/null +++ b/roles/ceph/tasks/config.yaml @@ -0,0 +1,86 @@ +--- + +- name: Transfer the cluster configuration + template: + src: ceph.conf.j2 + dest: /etc/ceph/ceph.conf + notify: Restart Ceph + when: 'ceph_roles|length > 0' + +- name: Create main storage directory + file: + path: /var/lib/ceph + owner: ceph + group: ceph + mode: 0750 + state: directory + when: 'ceph_roles|length > 0 and (ceph_roles|length > 1 or ceph_roles[0] != "client")' + +- name: Create monitor bootstrap path + file: + path: /var/lib/ceph/bootstrap-mon + owner: ceph + group: ceph + mode: 0770 + state: directory + when: '"mon" in ceph_roles' + +- name: Create OSD bootstrap path + file: + path: /var/lib/ceph/bootstrap-osd + owner: ceph + group: ceph + mode: 0770 + state: directory + when: '"osd" in ceph_roles' + +- name: Transfer main admin keyring + copy: + src: '{{ ceph_client_admin_keyring }}' + dest: /etc/ceph/ceph.client.admin.keyring + owner: ceph + group: ceph + mode: 0660 + notify: Restart Ceph + when: '("client" in ceph_roles and "admin" in ceph_keyrings) or "mon" in ceph_roles' + +- name: Transfer additional client keyrings + copy: + src: 'data/ceph/cluster.{{ ceph_fsid }}.client.{{ item }}.keyring' + dest: '/etc/ceph/ceph.client.{{ item }}.keyring' + owner: ceph + group: ceph + mode: 0660 + with_items: + '{{ ceph_keyrings | difference(["admin"]) }}' + when: '"client" in ceph_roles' + +- name: Transfer bootstrap mon keyring + copy: + src: '{{ ceph_mon_keyring }}' + dest: /var/lib/ceph/bootstrap-mon/ceph.keyring + owner: ceph + group: ceph + mode: 0660 + when: '"mon" in ceph_roles' + +- name: Transfer bootstrap mon map + copy: + src: '{{ ceph_mon_map }}' + dest: /var/lib/ceph/bootstrap-mon/ceph.monmap + owner: ceph + group: ceph + mode: 0660 + when: '"mon" in ceph_roles' + +- name: Transfer bootstrap OSD keyring + copy: + src: '{{ ceph_bootstrap_osd_keyring }}' + dest: /var/lib/ceph/bootstrap-osd/ceph.keyring + owner: ceph + group: ceph + mode: 0660 + when: '"osd" in ceph_roles' + +- name: Run all notified handlers + meta: flush_handlers diff --git a/roles/ceph/tasks/deploy_mds.yaml b/roles/ceph/tasks/deploy_mds.yaml new file mode 100644 index 0000000..5daac57 --- /dev/null +++ b/roles/ceph/tasks/deploy_mds.yaml @@ -0,0 +1,31 @@ +--- +- name: Create /var/lib/ceph/mds/ceph-{{ inventory_hostname_short }} + file: + path: /var/lib/ceph/mds/ceph-{{ inventory_hostname_short }} + owner: ceph + group: ceph + mode: 0770 + state: directory + register: deploy_mds + +- name: Create mds keyring + delegate_to: "{{ lookup('template', 'ceph.monitors.names.j2') | from_yaml | first }}" + shell: + cmd: ceph auth get-or-create mds.{{ inventory_hostname_short }} mon 'profile mds' mgr 'profile mds' mds 'allow *' osd 'allow *' + register: mds_keyring + when: 'deploy_mds.changed' + +- name: Transfer mds keyring + copy: + content: "{{ mds_keyring.stdout }}\n" + dest: "/var/lib/ceph/mds/ceph-{{ inventory_hostname_short }}/keyring" + owner: ceph + group: ceph + mode: 0660 + when: 'deploy_mds.changed' + +- name: Enable ceph mds + systemd: + enabled: yes + name: ceph-mds@{{ inventory_hostname_short }} + state: started diff --git a/roles/ceph/tasks/deploy_mgr.yaml b/roles/ceph/tasks/deploy_mgr.yaml new file mode 100644 index 0000000..fff7e60 --- /dev/null +++ b/roles/ceph/tasks/deploy_mgr.yaml @@ -0,0 +1,31 @@ +--- +- name: Create /var/lib/ceph/mgr/ceph-{{ inventory_hostname_short }} + file: + path: /var/lib/ceph/mgr/ceph-{{ inventory_hostname_short }} + owner: ceph + group: ceph + mode: 0770 + state: directory + register: deploy_mgr + +- name: Create mgr keyring + delegate_to: "{{ lookup('template', 'ceph.monitors.names.j2') | from_yaml | first }}" + shell: + cmd: ceph auth get-or-create mgr.{{ inventory_hostname_short }} mon 'allow profile mgr' osd 'allow *' mds 'allow *' + register: mgr_keyring + when: 'deploy_mgr.changed' + +- name: Transfer mgr keyring + copy: + content: "{{ mgr_keyring.stdout }}\n" + dest: "/var/lib/ceph/mgr/ceph-{{ inventory_hostname_short }}/keyring" + owner: ceph + group: ceph + mode: 0660 + when: 'deploy_mgr.changed' + +- name: Enable ceph mgr + systemd: + enabled: yes + name: ceph-mgr@{{ inventory_hostname_short }} + state: started diff --git a/roles/ceph/tasks/deploy_mon.yaml b/roles/ceph/tasks/deploy_mon.yaml new file mode 100644 index 0000000..3a89e07 --- /dev/null +++ b/roles/ceph/tasks/deploy_mon.yaml @@ -0,0 +1,17 @@ +--- +- name: Bootstrap Ceph mon + shell: + cmd: sudo -u ceph ceph-mon --mkfs -i {{ inventory_hostname_short }} --monmap /var/lib/ceph/bootstrap-mon/ceph.monmap --keyring /var/lib/ceph/bootstrap-mon/ceph.keyring + creates: /var/lib/ceph/mon/ceph-{{ inventory_hostname_short }}/keyring + notify: + - Enable msgr2 + - Disable insecure_global_id_reclaim + +- name: Enable and start Ceph mon + systemd: + enabled: yes + name: ceph-mon@{{ inventory_hostname_short }} + state: started + +- name: Run all notified handlers + meta: flush_handlers diff --git a/roles/ceph/tasks/deploy_osd.yaml b/roles/ceph/tasks/deploy_osd.yaml new file mode 100644 index 0000000..4d702ac --- /dev/null +++ b/roles/ceph/tasks/deploy_osd.yaml @@ -0,0 +1,12 @@ +--- +- name: Bootstrap Ceph OSD + shell: + cmd: ceph-volume lvm create --data /dev/disk/by-id/{{ item.data }}{% if "db" in item %} --block.db /dev/disk/by-id/{{ item.db }}{% endif %} + creates: /var/lib/ceph/osd/.{{ item.data }}.created + loop: '{{ ceph_disks }}' + +- name: Bootstrap Ceph OSD (stamp) + shell: + cmd: touch /var/lib/ceph/osd/.{{ item.data }}.created + creates: /var/lib/ceph/osd/.{{ item.data }}.created + loop: '{{ ceph_disks }}' diff --git a/roles/ceph/tasks/deploy_rbd_mirror.yaml b/roles/ceph/tasks/deploy_rbd_mirror.yaml new file mode 100644 index 0000000..35764ea --- /dev/null +++ b/roles/ceph/tasks/deploy_rbd_mirror.yaml @@ -0,0 +1,6 @@ +--- +- name: Enable ceph rbd-mirror + systemd: + enabled: yes + name: ceph-rbd-mirror@admin + state: started diff --git a/roles/ceph/tasks/deploy_rgw.yaml b/roles/ceph/tasks/deploy_rgw.yaml new file mode 100644 index 0000000..6563cdb --- /dev/null +++ b/roles/ceph/tasks/deploy_rgw.yaml @@ -0,0 +1,31 @@ +--- +- name: Create /var/lib/ceph/radosgw/ceph-rgw.{{ inventory_hostname_short }} + file: + path: /var/lib/ceph/radosgw/ceph-rgw.{{ inventory_hostname_short }} + owner: ceph + group: ceph + mode: 0770 + state: directory + register: deploy_rgw + +- name: Create Ceph rgw keyring + delegate_to: "{{ lookup('template', 'ceph.monitors.names.j2') | from_yaml | first }}" + shell: + cmd: ceph auth get-or-create client.rgw.{{ inventory_hostname_short }} mon 'allow rw' osd 'allow rwx' + register: rgw_keyring + when: 'deploy_rgw.changed' + +- name: Transfer rgw keyring + copy: + content: "{{ rgw_keyring.stdout }}\n" + dest: "/var/lib/ceph/radosgw/ceph-rgw.{{ inventory_hostname_short }}/keyring" + owner: ceph + group: ceph + mode: 0660 + when: 'deploy_rgw.changed' + +- name: Enable ceph rgw + systemd: + enabled: yes + name: ceph-radosgw@rgw.{{ inventory_hostname_short }} + state: started diff --git a/roles/ceph/tasks/generate.yaml b/roles/ceph/tasks/generate.yaml new file mode 100644 index 0000000..a284d4a --- /dev/null +++ b/roles/ceph/tasks/generate.yaml @@ -0,0 +1,38 @@ +--- +- name: Generate mon keyring + delegate_to: 127.0.0.1 + shell: + cmd: ceph-authtool --create-keyring {{ ceph_mon_keyring }} --gen-key -n mon. --cap mon 'allow *' + creates: '{{ ceph_mon_keyring }}' + throttle: 1 + when: 'ceph_fsid' + +- name: Generate client.admin keyring + delegate_to: 127.0.0.1 + shell: + cmd: ceph-authtool --create-keyring {{ ceph_client_admin_keyring }} --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *' + creates: '{{ ceph_client_admin_keyring }}' + throttle: 1 + notify: Add key to client.admin keyring + when: 'ceph_fsid' + +- name: Generate bootstrap-osd keyring + delegate_to: 127.0.0.1 + shell: + cmd: ceph-authtool --create-keyring {{ ceph_bootstrap_osd_keyring }} --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r' + creates: '{{ ceph_bootstrap_osd_keyring }}' + throttle: 1 + notify: Add key to bootstrap-osd keyring + when: 'ceph_fsid' + +- name: Generate mon map + delegate_to: 127.0.0.1 + shell: + cmd: monmaptool --create{% if ceph_release_majors[ceph_release] | default(None) %} --set-min-mon-release={{ ceph_release_majors[ceph_release] }}{% endif %} --fsid {{ ceph_fsid }} {{ ceph_mon_map }} + creates: '{{ ceph_mon_map }}' + throttle: 1 + notify: Add nodes to mon map + when: 'ceph_fsid' + +- name: Run all notified handlers + meta: flush_handlers diff --git a/roles/ceph/tasks/install.yaml b/roles/ceph/tasks/install.yaml new file mode 100644 index 0000000..4aa7288 --- /dev/null +++ b/roles/ceph/tasks/install.yaml @@ -0,0 +1,72 @@ +--- + +- name: Install ceph-common + ansible.builtin.package: + name: + - ceph-common + state: present + when: '"client" in ceph_roles' + +- name: Install ceph-mon + ansible.builtin.package: + name: + - ceph-mon + state: present + when: '"mon" in ceph_roles' + +- name: Install ceph-mgr dependencies + ansible.builtin.package: + name: + - python3-distutils + state: present + when: 'ansible_distribution in ("Ubuntu", "Debian") and "mgr" in ceph_roles and ceph_release != "distro"' + +- name: Install ceph-mgr + ansible.builtin.package: + name: + - ceph-mgr + state: present + when: '"mgr" in ceph_roles' + +- name: Install ceph-mds + ansible.builtin.package: + name: + - ceph-mds + state: present + when: '"mds" in ceph_roles' + +- name: Install ceph-osd + ansible.builtin.package: + name: + - ceph-osd + - python3-packaging + state: present + when: '"osd" in ceph_roles' + +- name: Install ceph-volume + ansible.builtin.package: + name: + - ceph-volume + state: present + when: '"osd" in ceph_roles and (ceph_release != "distro" or ansible_distribution_release not in ("bookworm", "focal"))' + +- name: Install ceph-rbd-mirror + ansible.builtin.package: + name: + - rbd-mirror + state: present + when: '"rbd-mirror" in ceph_roles' + +- name: Install radosgw (deb) + ansible.builtin.package: + name: + - radosgw + state: present + when: 'ansible_distribution in ("Debian", "Ubuntu") and "rgw" in ceph_roles' + +- name: Install radosgw (rpm) + ansible.builtin.package: + name: + - ceph-radosgw + state: present + when: 'ansible_distribution == "CentOS" and "rgw" in ceph_roles' diff --git a/roles/ceph/tasks/main.yaml b/roles/ceph/tasks/main.yaml new file mode 100644 index 0000000..bab9d97 --- /dev/null +++ b/roles/ceph/tasks/main.yaml @@ -0,0 +1,41 @@ +--- +- name: Add package repository (apt) + import_tasks: repo_apt.yaml + when: 'ansible_distribution in ("Ubuntu", "Debian")' + +- name: Add package repository (rpm) + import_tasks: repo_rpm.yaml + when: 'ansible_distribution == "CentOS"' + +- name: Install packages + import_tasks: install.yaml + +- name: Generate cluster keys and maps + import_tasks: generate.yaml + +- name: Set up config and keyrings + import_tasks: config.yaml + +- name: Deploy mon + import_tasks: deploy_mon.yaml + when: '"mon" in ceph_roles' + +- name: Deploy osd + import_tasks: deploy_osd.yaml + when: '"osd" in ceph_roles' + +- name: Deploy mgr + import_tasks: deploy_mgr.yaml + when: '"mgr" in ceph_roles' + +- name: Deploy mds + import_tasks: deploy_mds.yaml + when: '"mds" in ceph_roles' + +- name: Deploy rgw + import_tasks: deploy_rgw.yaml + when: '"rgw" in ceph_roles' + +- name: Deploy rbd-mirror + import_tasks: deploy_rbd_mirror.yaml + when: '"rbd-mirror" in ceph_roles' diff --git a/roles/ceph/tasks/repo_apt.yaml b/roles/ceph/tasks/repo_apt.yaml new file mode 100644 index 0000000..a1b6fd7 --- /dev/null +++ b/roles/ceph/tasks/repo_apt.yaml @@ -0,0 +1,31 @@ +--- +- name: Create keyring path + file: + path: /etc/apt/keyrings/ + mode: 0755 + state: directory + when: 'ceph_roles|length > 0 and ceph_release != "distro"' + +- name: Add ceph GPG key + copy: + src: ceph.asc + dest: /etc/apt/keyrings/ansible-ceph.asc + notify: Update apt + when: 'ceph_roles|length > 0 and ceph_release != "distro"' + +- name: Get local architecture + shell: dpkg --print-architecture + register: dpkg_architecture + changed_when: false + check_mode: no + when: 'ceph_roles|length > 0 and ceph_release != "distro"' + +- name: Add ceph package sources + template: + src: ceph.sources.j2 + dest: /etc/apt/sources.list.d/ansible-ceph.sources + notify: Update apt + when: 'ceph_roles|length > 0 and ceph_release != "distro"' + +- name: Run all notified handlers + meta: flush_handlers diff --git a/roles/ceph/tasks/repo_rpm.yaml b/roles/ceph/tasks/repo_rpm.yaml new file mode 100644 index 0000000..55352ff --- /dev/null +++ b/roles/ceph/tasks/repo_rpm.yaml @@ -0,0 +1,27 @@ +--- + +- name: Import ceph GPG key + ansible.builtin.rpm_key: + state: present + key: https://download.ceph.com/keys/release.asc + when: 'ceph_roles|length > 0 and ceph_release != "distro"' + +- name: Configure ceph stable community repository + ansible.builtin.yum_repository: + name: ceph_stable + description: Ceph Stable repo + state: present + baseurl: "https://download.ceph.com/rpm-{{ ceph_release }}/el{{ ansible_facts['distribution_major_version'] }}/$basearch" + file: ceph_stable + priority: 2 + when: 'ceph_roles|length > 0 and ceph_release != "distro"' + +- name: Configure ceph stable noarch community repository + ansible.builtin.yum_repository: + name: ceph_stable_noarch + description: Ceph Stable noarch repo + state: present + baseurl: "https://download.ceph.com/rpm-{{ ceph_release }}/el{{ ansible_facts['distribution_major_version'] }}/noarch" + file: ceph_stable + priority: 2 + when: 'ceph_roles|length > 0 and ceph_release != "distro"' diff --git a/ansible/files/ceph/ceph.conf.tpl b/roles/ceph/templates/ceph.conf.j2 similarity index 59% rename from ansible/files/ceph/ceph.conf.tpl rename to roles/ceph/templates/ceph.conf.j2 index df54f72..56dbb7f 100644 --- a/ansible/files/ceph/ceph.conf.tpl +++ b/roles/ceph/templates/ceph.conf.j2 @@ -1,16 +1,16 @@ -{% set monitors = lookup('template', '../files/ceph/ceph.monitors.tpl') | from_yaml | default([]) %} +{% set monitors = lookup('template', 'ceph.monitors.j2') | from_yaml | default([]) %} {% set addresses = monitors | map(attribute='ip') | map('regex_replace', '^(.*)$', '[\\1]:6789') | sort | join(',') -%} {% set names = monitors | map(attribute='name') | sort | join(',') -%} # Managed by Ansible, do not modify. [global] -fsid = {{ task_fsid }} +fsid = {{ ceph_fsid }} mon_initial_members = {{ names }} mon_host = {{ addresses }} -{% if task_network_public %} -public_network = {{ task_network_public }} +{% if ceph_network_public %} +public_network = {{ ceph_network_public }} {% endif %} -{% if task_network_private %} -private_network = {{ task_network_private }} +{% if ceph_network_private %} +private_network = {{ ceph_network_private }} {% endif %} auth allow insecure global id reclaim = false {% if ansible_default_ipv6['address'] | default("") %} @@ -23,7 +23,7 @@ ms bind ipv4 = true [client] rbd_cache = true -rbd_cache_size = {{ task_rbd_cache }} +rbd_cache_size = {{ ceph_rbd_cache }} rbd_cache_writethrough_until_flush = false -rbd_cache_max_dirty = {{ task_rbd_cache_max }} -rbd_cache_target_dirty = {{ task_rbd_cache_target }} +rbd_cache_max_dirty = {{ ceph_rbd_cache_max }} +rbd_cache_target_dirty = {{ ceph_rbd_cache_target }} diff --git a/ansible/files/ceph/ceph.monitors.tpl b/roles/ceph/templates/ceph.monitors.j2 similarity index 81% rename from ansible/files/ceph/ceph.monitors.tpl rename to roles/ceph/templates/ceph.monitors.j2 index d2ab862..7a91d07 100644 --- a/ansible/files/ceph/ceph.monitors.tpl +++ b/roles/ceph/templates/ceph.monitors.j2 @@ -1,5 +1,5 @@ {% for host in groups['all'] %} -{% if hostvars[host]['ceph_fsid'] == task_fsid and "mon" in hostvars[host]['ceph_roles'] %} +{% if hostvars[host]['ceph_fsid'] == ceph_fsid and "mon" in hostvars[host]['ceph_roles'] %} - name: "{{ host }}" ip: "{{ hostvars[host]['ceph_ip_address'] | default(hostvars[host]['ansible_default_ipv6']['address'] | default(hostvars[host]['ansible_default_ipv4']['address'])) }}" {% endif %} diff --git a/ansible/files/ceph/ceph.monitors.names.tpl b/roles/ceph/templates/ceph.monitors.names.j2 similarity index 82% rename from ansible/files/ceph/ceph.monitors.names.tpl rename to roles/ceph/templates/ceph.monitors.names.j2 index f4cd279..4e2710a 100644 --- a/ansible/files/ceph/ceph.monitors.names.tpl +++ b/roles/ceph/templates/ceph.monitors.names.j2 @@ -1,6 +1,6 @@ {%- set found = namespace(count=0) %} {%- for host in vars['ansible_play_hosts'] %} -{% if 'ceph_fsid' in hostvars[host] and hostvars[host]['ceph_fsid'] == task_fsid and 'ceph_roles' in hostvars[host] and "mon" in hostvars[host]['ceph_roles'] %} +{% if 'ceph_fsid' in hostvars[host] and hostvars[host]['ceph_fsid'] == ceph_fsid and 'ceph_roles' in hostvars[host] and "mon" in hostvars[host]['ceph_roles'] %} {% set found.count = found.count + 1 %} - "{{ host }}" {% endif %} diff --git a/ansible/files/ceph/ceph.sources.tpl b/roles/ceph/templates/ceph.sources.j2 similarity index 77% rename from ansible/files/ceph/ceph.sources.tpl rename to roles/ceph/templates/ceph.sources.j2 index 6266389..3846d78 100644 --- a/ansible/files/ceph/ceph.sources.tpl +++ b/roles/ceph/templates/ceph.sources.j2 @@ -1,6 +1,6 @@ # Managed by Ansible, do not modify. Types: deb -URIs: https://download.ceph.com/debian-{{ task_release }} +URIs: https://download.ceph.com/debian-{{ ceph_release }} Suites: {{ ansible_distribution_release }} Components: main Architectures: {{ dpkg_architecture.stdout }} diff --git a/roles/ceph/vars/main.yaml b/roles/ceph/vars/main.yaml new file mode 100644 index 0000000..6f2dbff --- /dev/null +++ b/roles/ceph/vars/main.yaml @@ -0,0 +1,15 @@ +--- +ceph_bootstrap_osd_keyring: data/ceph/cluster.{{ ceph_fsid }}.bootstrap-osd.keyring +ceph_client_admin_keyring: data/ceph/cluster.{{ ceph_fsid }}.client.admin.keyring +ceph_mon_keyring: data/ceph/cluster.{{ ceph_fsid }}.mon.keyring +ceph_mon_map: data/ceph/cluster.{{ ceph_fsid }}.mon.map + +ceph_release_majors: + luminous: 12 + mimic: 13 + nautilus: 14 + octopus: 15 + pacific: 16 + quincy: 17 + reef: 18 + squid: 19 diff --git a/roles/environment/defaults/main.yaml b/roles/environment/defaults/main.yaml new file mode 100644 index 0000000..e69de29 diff --git a/roles/environment/handlers/main.yaml b/roles/environment/handlers/main.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/environment/handlers/main.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/environment/tasks/main.yaml b/roles/environment/tasks/main.yaml new file mode 100644 index 0000000..ad361d2 --- /dev/null +++ b/roles/environment/tasks/main.yaml @@ -0,0 +1,40 @@ +--- + +- name: Run all notified handlers + meta: flush_handlers + +# Time synchronization (NTP) +- name: Install EPEL repo (rpm) + yum: + name: epel-release + state: present + when: 'ansible_distribution == "CentOS"' + +- name: Install systemd-timesyncd + ansible.builtin.package: + name: + - systemd-timesyncd + state: present + +- name: Enable systemd-timesyncd + systemd: + enabled: yes + name: systemd-timesyncd + state: started + register: enable + +- name: Wait 5s for NTP sync + ansible.builtin.wait_for: + timeout: 5 + delegate_to: localhost + when: enable.changed + +- name: Verify NTP successfully synced + shell: + cmd: timedatectl timesync-status + register: ntp_status_output + failed_when: '"Packet count: 0" in ntp_status_output.stdout' + changed_when: false +- name: Run all notified handlers + meta: flush_handlers + diff --git a/roles/environment/vars/main.yaml b/roles/environment/vars/main.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/environment/vars/main.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/incus/README.md b/roles/incus/README.md new file mode 100644 index 0000000..6e15ac2 --- /dev/null +++ b/roles/incus/README.md @@ -0,0 +1,27 @@ +# Incus Role +## Variables + - `incus_name`: Name identifier for the deployment (**required**, type: string) + - `incus_init`: Initial configuration data (type: dict) + - `config`: Dict of config keys + - `clients`: Dict of client certificates to trust + - `type`: Type of certificate, typically `client` or `metrics` (**required**, type: string) + - `certificate`: PEM encoded certificate (**required**, type: string) + - `network`: Dict of networks + - `name`: Name of the network (**required**, type: string) + - `type`: Type of network (**required**, type: string) + - `default`: Whether to include in the default profile (type: bool, default: False) + - `config`: Dict of global config keys + - `local_config`: Dict of server-specific config keys + - `storage`: Dict of storage pools + - `name`: Name of the storage pool (**required**, type: string) + - `driver`: Storage pool driver (**required**, type: string) + - `default`: Whether to include in the default profile (type: bool, default: False) + - `config`: Dict of global config keys + - `local_config`: Dict of server-specific config keys + - `incus_ip_address`: Override for the server's IP address (used cluster and client traffic) (type: string) + - `incus_release`: Incus release to deploy, can be one of `daily`, `stable` or `lts-6.0` (type: string, default: `stable`) + - `incus_roles`: Operation mode for the deployed Incus system (**required**, type: string) + - `standalone` + - `cluster` + - `ui`: Whether to serve the Incus UI + diff --git a/roles/incus/defaults/main.yaml b/roles/incus/defaults/main.yaml new file mode 100644 index 0000000..62a8594 --- /dev/null +++ b/roles/incus/defaults/main.yaml @@ -0,0 +1,6 @@ +--- +incus_release: 'stable' +incus_roles: [] +incus_init: '{}' +incus_ip_address: "{{ ansible_default_ipv6['address'] | default(ansible_default_ipv4['address']) }}" +incus_name: '' diff --git a/ansible/files/incus/zabbly.asc b/roles/incus/files/zabbly.asc similarity index 100% rename from ansible/files/incus/zabbly.asc rename to roles/incus/files/zabbly.asc diff --git a/roles/incus/handlers/main.yaml b/roles/incus/handlers/main.yaml new file mode 100644 index 0000000..e3e57d3 --- /dev/null +++ b/roles/incus/handlers/main.yaml @@ -0,0 +1,11 @@ +--- +- name: Update apt + apt: + force_apt_get: yes + update_cache: yes + cache_valid_time: 0 + +- name: Restart Incus + systemd: + name: incus.service + state: restarted diff --git a/roles/incus/tasks/bootstrap.yaml b/roles/incus/tasks/bootstrap.yaml new file mode 100644 index 0000000..13a6abe --- /dev/null +++ b/roles/incus/tasks/bootstrap.yaml @@ -0,0 +1,41 @@ +--- +- name: Bootstrap the cluster + shell: + cmd: "incus --force-local cluster enable {{ inventory_hostname }}" + when: '(install_deb.changed or install_rpm.changed) and "cluster" in incus_roles and incus_servers[0] == inventory_hostname' + +- name: Create join tokens + delegate_to: "{{ incus_servers[0] }}" + shell: + cmd: "incus --force-local --quiet cluster add {{ inventory_hostname }}" + register: cluster_add + when: '(install_deb.changed or install_rpm.changed) and "cluster" in incus_roles and incus_servers[0] != inventory_hostname' + +- name: Wait 5s to avoid token use before valid + ansible.builtin.wait_for: + timeout: 5 + delegate_to: localhost + when: 'cluster_add.changed' + +- name: Join the cluster + throttle: 1 + shell: + cmd: "incus --force-local admin init --preseed" + stdin: |- + cluster: + enabled: true + cluster_address: "{{ incus_ip_address }}" + cluster_token: "{{ cluster_add.stdout }}" + server_address: "{{ incus_ip_address }}" + member_config: {% for pool in incus_init.storage %}{% for key in incus_init.storage[pool].local_config | default([]) %} + + - entity: storage-pool + name: {{ pool }} + key: {{ key }} + value: {{ incus_init.storage[pool].local_config[key] }}{% endfor %}{% endfor %}{% for network in incus_init.network %}{% for key in incus_init.network[network].local_config | default([]) %} + + - entity: network + name: {{ network }} + key: {{ key }} + value: {{ incus_init.network[network].local_config[key] }}{% endfor %}{% endfor %} + when: 'cluster_add.changed' diff --git a/roles/incus/tasks/config.yaml b/roles/incus/tasks/config.yaml new file mode 100644 index 0000000..de3ed34 --- /dev/null +++ b/roles/incus/tasks/config.yaml @@ -0,0 +1,16 @@ +--- +- name: Set client listen address + shell: + cmd: "incus --force-local config set core.https_address {{ incus_ip_address }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' + +- name: Set cluster listen address + shell: + cmd: "incus --force-local config set cluster.https_address {{ incus_ip_address }}" + when: '(install_deb.changed or install_rpm.changed) and "cluster" in incus_roles and incus_servers[0] == inventory_hostname' + +- name: Set OVN NorthBound database + shell: + cmd: "incus --force-local config set network.ovn.northbound_connection={{ incus_ovn_northbound }} network.ovn.client_cert=\"{{ lookup('file', 'data/ovn/'+ovn_name+'/'+inventory_hostname+'.crt') }}\" network.ovn.client_key=\"{{ lookup('file', 'data/ovn/'+ovn_name+'/'+inventory_hostname+'.key') }}\" network.ovn.ca_cert=\"{{ lookup('file', 'data/ovn/'+ovn_name+'/ca.crt') }}\"" + notify: Restart Incus + when: '(install_deb.changed or install_rpm.changed) and incus_ovn_northbound and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' diff --git a/roles/incus/tasks/final.yaml b/roles/incus/tasks/final.yaml new file mode 100644 index 0000000..6f4006a --- /dev/null +++ b/roles/incus/tasks/final.yaml @@ -0,0 +1,17 @@ +--- + +- name: Apply additional configuration + shell: + cmd: "incus config set {{ item.key }}=\"{{ item.value }}\"" + loop: "{{ incus_init['config'] | default({}) | dict2items }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' + +- name: Load client certificates + shell: + cmd: "incus config trust add-certificate --name \"{{ item.key }}\" --type={{ item.value.type | default('client') }} -" + stdin: "{{ item.value.certificate }}" + loop: "{{ incus_init['clients'] | default({}) | dict2items }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' +- name: Run all notified handlers + meta: flush_handlers + diff --git a/roles/incus/tasks/install.yaml b/roles/incus/tasks/install.yaml new file mode 100644 index 0000000..ef58bdd --- /dev/null +++ b/roles/incus/tasks/install.yaml @@ -0,0 +1,91 @@ +--- +- name: Install the Incus package (deb) + apt: + name: + - incus + install_recommends: no + state: present + register: install_deb + when: 'ansible_distribution in ("Debian", "Ubuntu") and incus_roles | length > 0' + +- name: Install the Incus package (rpm) + ansible.builtin.package: + name: + - incus + state: present + register: install_rpm + when: 'ansible_distribution == "CentOS" and incus_roles | length > 0' + +- name: Install the Incus UI package (deb) + apt: + name: + - incus-ui-canonical + install_recommends: no + state: present + when: 'ansible_distribution in ("Debian", "Ubuntu") and "ui" in incus_roles' + +- name: Install btrfs tools + ansible.builtin.package: + name: + - btrfs-progs + state: present + when: "incus_roles | length > 0 and 'btrfs' in incus_init['storage'] | dict2items | json_query('[].value.driver')" + +- name: Install ceph tools + ansible.builtin.package: + name: + - ceph-common + state: present + when: "incus_roles | length > 0 and 'ceph' in incus_init['storage'] | dict2items | json_query('[].value.driver')" + +- name: Install LVM tools + ansible.builtin.package: + name: + - lvm2 + state: present + when: "incus_roles | length > 0 and 'lvm' in incus_init['storage'] | dict2items | json_query('[].value.driver')" + +- name: Install ZFS dependencies + ansible.builtin.package: + name: + - zfs-dkms + state: present + when: "incus_roles | length > 0 and 'zfs' in incus_init['storage'] | dict2items | json_query('[].value.driver') and ansible_distribution == 'Debian'" + +- name: Install ZFS tools + ansible.builtin.package: + name: + - zfsutils-linux + state: present + when: "incus_roles | length > 0 and 'zfs' in incus_init['storage'] | dict2items | json_query('[].value.driver')" + +- name: Set uid allocation + shell: + cmd: "usermod root --add-subuids 10000000-1009999999" + when: '(install_deb.changed or install_rpm.changed) and ansible_distribution == "CentOS"' + +- name: Set gid allocation + shell: + cmd: "usermod root --add-subgids 10000000-1009999999" + when: '(install_deb.changed or install_rpm.changed) and ansible_distribution == "CentOS"' + +- name: Enable incus socket unit + systemd: + enabled: true + name: incus.socket + state: started + when: 'install_deb.changed or install_rpm.changed' + +- name: Enable incus service unit + systemd: + enabled: true + name: incus.service + state: started + when: 'install_deb.changed or install_rpm.changed' + +- name: Enable incus startup unit + systemd: + enabled: true + name: incus-startup.service + state: started + when: 'install_deb.changed or install_rpm.changed' diff --git a/roles/incus/tasks/main.yaml b/roles/incus/tasks/main.yaml new file mode 100644 index 0000000..85a1f50 --- /dev/null +++ b/roles/incus/tasks/main.yaml @@ -0,0 +1,30 @@ +--- + +- name: Run all notified handlers + meta: flush_handlers + +- name: Add package repository (apt) + import_tasks: repo_apt.yaml + when: 'ansible_distribution in ("Ubuntu", "Debian")' + +- name: Add package repository (rpm) + import_tasks: repo_rpm.yaml + when: 'ansible_distribution == "CentOS"' + +- name: Install packages and bootstrap + import_tasks: install.yaml + +- name: Configure + import_tasks: config.yaml + +- name: Networks + import_tasks: networks.yaml + +- name: Storage + import_tasks: storage.yaml + +- name: Bootstrap + import_tasks: storage.yaml + +- name: Final configuration + import_tasks: final.yaml diff --git a/roles/incus/tasks/networks.yaml b/roles/incus/tasks/networks.yaml new file mode 100644 index 0000000..f6bb028 --- /dev/null +++ b/roles/incus/tasks/networks.yaml @@ -0,0 +1,18 @@ +--- +- name: Add networks + shell: + cmd: "incus network create {{ item.key }} --type={{ item.value.type }}{% for k in item.value.local_config | default([]) %} {{ k }}={{ item.value.local_config[k] }}{% endfor %}{% for k in item.value.config | default([]) %} {{ k }}={{ item.value.config[k] }}{% endfor %}" + loop: "{{ incus_init['network'] | dict2items }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' + +- name: Set network description + shell: + cmd: "incus network set --property {{ item.key }} description=\"{{ item.value.description }}\"" + loop: "{{ incus_init['network'] | dict2items }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname)) and item.value.description | default(None)' + +- name: Add network to default profile + shell: + cmd: "incus profile device add default eth0 nic network={{ item }} name=eth0" + loop: "{{ incus_init['network'] | dict2items | json_query('[?value.default].key') }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' diff --git a/roles/incus/tasks/repo_apt.yaml b/roles/incus/tasks/repo_apt.yaml new file mode 100644 index 0000000..0883746 --- /dev/null +++ b/roles/incus/tasks/repo_apt.yaml @@ -0,0 +1,34 @@ +--- + + +- name: Create apt keyring path + file: + path: /etc/apt/keyrings/ + mode: 0755 + state: directory + when: 'incus_roles|length > 0 and incus_release != "distro"' + +- name: Add Zabbly repository key + copy: + src: zabbly.asc + dest: /etc/apt/keyrings/ansible-zabbly.asc + notify: Update apt + when: 'incus_roles|length > 0 and incus_release != "distro"' + +- name: Get DPKG architecture + shell: dpkg --print-architecture + register: dpkg_architecture + changed_when: false + check_mode: no + when: 'incus_roles|length > 0 and incus_release != "distro"' + +- name: Add Zabbly package source + template: + src: incus.sources.j2 + dest: /etc/apt/sources.list.d/ansible-zabbly-incus-{{ incus_release }}.sources + notify: Update apt + when: 'incus_roles|length > 0 and incus_release != "distro"' + + +- name: Run all notified handlers + meta: flush_handlers diff --git a/roles/incus/tasks/repo_rpm.yaml b/roles/incus/tasks/repo_rpm.yaml new file mode 100644 index 0000000..8ce751f --- /dev/null +++ b/roles/incus/tasks/repo_rpm.yaml @@ -0,0 +1,12 @@ +--- + +- name: Add COPR repository + community.general.copr: + chroot: "epel-9-x86_64" + name: "neil/incus" + state: enabled + when: 'incus_roles|length > 0 and incus_release != "distro"' + + +- name: Run all notified handlers + meta: flush_handlers diff --git a/roles/incus/tasks/storage.yaml b/roles/incus/tasks/storage.yaml new file mode 100644 index 0000000..b3824f2 --- /dev/null +++ b/roles/incus/tasks/storage.yaml @@ -0,0 +1,18 @@ +--- +- name: Add storage pools + shell: + cmd: "incus storage create {{ item.key }} {{ item.value.driver }}{% for k in item.value.local_config | default([]) %} {{ k }}={{ item.value.local_config[k] }}{% endfor %}{% for k in item.value.config | default([]) %} {{ k }}={{ item.value.config[k] }}{% endfor %}" + loop: "{{ incus_init['storage'] | dict2items }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' + +- name: Set storage pool description + shell: + cmd: "incus storage set --property {{ item.key }} description=\"{{ item.value.description }}\"" + loop: "{{ incus_init['storage'] | dict2items }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname)) and item.value.description | default(None)' + +- name: Add storage pool to default profile + shell: + cmd: "incus profile device add default root disk path=/ pool={{ item }}" + loop: "{{ incus_init['storage'] | dict2items | json_query('[?value.default].key') }}" + when: '(install_deb.changed or install_rpm.changed) and ("standalone" in incus_roles or ("cluster" in incus_roles and incus_servers[0] == inventory_hostname))' diff --git a/roles/incus/templates/incus.servers.j2 b/roles/incus/templates/incus.servers.j2 new file mode 100644 index 0000000..fc3905c --- /dev/null +++ b/roles/incus/templates/incus.servers.j2 @@ -0,0 +1,5 @@ +{% for host in vars['ansible_play_hosts'] | sort %} +{% if hostvars[host]['incus_name'] == incus_name and "cluster" in hostvars[host]['incus_roles'] %} +- {{ host }} +{% endif %} +{% endfor %} diff --git a/ansible/files/incus/incus.sources.tpl b/roles/incus/templates/incus.sources.j2 similarity index 78% rename from ansible/files/incus/incus.sources.tpl rename to roles/incus/templates/incus.sources.j2 index 874c8e3..823ee31 100644 --- a/ansible/files/incus/incus.sources.tpl +++ b/roles/incus/templates/incus.sources.j2 @@ -1,7 +1,7 @@ # Managed by Ansible, do not modify. Enabled: yes Types: deb -URIs: https://pkgs.zabbly.com/incus/{{ task_release }}/ +URIs: https://pkgs.zabbly.com/incus/{{ incus_release }}/ Suites: {{ ansible_distribution_release }} Components: main Architectures: {{ dpkg_architecture.stdout }} diff --git a/roles/incus/vars/main.yaml b/roles/incus/vars/main.yaml new file mode 100644 index 0000000..6fd529c --- /dev/null +++ b/roles/incus/vars/main.yaml @@ -0,0 +1,4 @@ +--- + +incus_ovn_northbound: "{{ lookup('template', '../ovn/templates/ovn-central.servers.j2') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6641') | join(',') }}" +incus_servers: "{{ lookup('template', 'incus.servers.j2') | from_yaml | sort }}" diff --git a/roles/lvmcluster/README.md b/roles/lvmcluster/README.md new file mode 100644 index 0000000..04ff426 --- /dev/null +++ b/roles/lvmcluster/README.md @@ -0,0 +1,6 @@ +# LVM cluster Role +## Variables + - `lvmcluster_metadata_size`: PV metadata size (default to 10MB) + - `lvmcluster_name`: Name identifier for the deployment (**required**, type: string) + - `lvmcluster_vgs`: Dict of VG name to storage device path + diff --git a/roles/lvmcluster/defaults/main.yaml b/roles/lvmcluster/defaults/main.yaml new file mode 100644 index 0000000..5017306 --- /dev/null +++ b/roles/lvmcluster/defaults/main.yaml @@ -0,0 +1,4 @@ +--- +lvmcluster_name: '' +lvmcluster_metadata_size: '10m' +lvmcluster_vgs: {} diff --git a/roles/lvmcluster/handlers/main.yaml b/roles/lvmcluster/handlers/main.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/lvmcluster/handlers/main.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/lvmcluster/tasks/main.yaml b/roles/lvmcluster/tasks/main.yaml new file mode 100644 index 0000000..a2730bf --- /dev/null +++ b/roles/lvmcluster/tasks/main.yaml @@ -0,0 +1,98 @@ +--- + +- name: Run all notified handlers + meta: flush_handlers + +# LVM Cluster - Generate configuration +- name: Create cluster directory + delegate_to: 127.0.0.1 + file: + path: "data/lvmcluster/{{ lvmcluster_name }}" + mode: 0755 + state: directory + throttle: 1 + when: 'lvmcluster_name' + register: create + +- name: Create cluster host_id tracking + delegate_to: 127.0.0.1 + throttle: 1 + copy: + content: "{}" + dest: "data/lvmcluster/{{ lvmcluster_name }}/host_id.yaml" + mode: 0644 + when: "create.changed" + +- name: Update cluster host_id tracking + delegate_to: 127.0.0.1 + throttle: 1 + template: + src: "host_id.yaml.j2" + dest: "data/lvmcluster/{{ lvmcluster_name }}/host_id.yaml" + when: 'lvmcluster_name' + vars: + lvmcluster_host_ids: "{{ lookup('file', 'data/lvmcluster/' + lvmcluster_name + '/host_id.yaml') | from_yaml }}" + + +- name: Run all notified handlers + meta: flush_handlers + +# LVM Cluster - Install packages and host config +- name: Install the LVM packages + ansible.builtin.package: + name: + - lvm2 + - lvm2-lockd + - sanlock + state: present + when: 'lvmcluster_name' + +- name: Configure for LVM cluster + template: + src: lvmlocal.conf.j2 + dest: /etc/lvm/lvmlocal.conf + when: 'lvmcluster_name' + +- name: Enable the lvmlockd unit + systemd: + enabled: yes + name: lvmlockd + state: started + when: 'lvmcluster_name' + +- name: Enable the sanlock unit + systemd: + enabled: yes + name: sanlock + state: started + when: 'lvmcluster_name' + + +- name: Run all notified handlers + meta: flush_handlers + +# LVM Cluster - Create VGs +- name: Check for existing VGs + shell: + cmd: "vgs {{ item }}" + register: check + loop: "{{ lvmcluster_vgs.keys() }}" + run_once: true + changed_when: false + failed_when: "check.rc not in (0, 5)" + +- name: Create the VG (first server) + shell: + cmd: "vgcreate --shared {{ item.item }} {{ lvmcluster_vgs[item.item] }} --metadatasize={{ lvmcluster_metadata_size }}" + when: "item.rc == 5" + loop: "{{ check.results }}" + run_once: true + register: create + +- name: Ensure lock manager is running + shell: + cmd: "vgchange --lock-start" + when: "create.changed" +- name: Run all notified handlers + meta: flush_handlers + diff --git a/ansible/files/lvmcluster/host_id.yaml.tpl b/roles/lvmcluster/templates/host_id.yaml.j2 similarity index 75% rename from ansible/files/lvmcluster/host_id.yaml.tpl rename to roles/lvmcluster/templates/host_id.yaml.j2 index 519b2f9..ecd632a 100644 --- a/ansible/files/lvmcluster/host_id.yaml.tpl +++ b/roles/lvmcluster/templates/host_id.yaml.j2 @@ -1,13 +1,13 @@ {% set ns = namespace() %} {% set ns.next = 1 %} -{% for key, value in task_host_ids.items() %} +{% for key, value in lvmcluster_host_ids.items() %} {% if value >= ns.next %} {% set ns.next = value + 1 %} {% endif %} {{ key }}: {{ value }} {% endfor %} {% for host in vars['ansible_play_hosts'] %} -{% if not host in task_host_ids %} +{% if not host in lvmcluster_host_ids %} {{ host }}: {{ ns.next }} {% set ns.next = ns.next + 1 %} {% endif %} diff --git a/ansible/files/lvmcluster/lvmlocal.conf.tpl b/roles/lvmcluster/templates/lvmlocal.conf.j2 similarity index 55% rename from ansible/files/lvmcluster/lvmlocal.conf.tpl rename to roles/lvmcluster/templates/lvmlocal.conf.j2 index fc32040..50a336b 100644 --- a/ansible/files/lvmcluster/lvmlocal.conf.tpl +++ b/roles/lvmcluster/templates/lvmlocal.conf.j2 @@ -1,6 +1,6 @@ # Managed by Ansible, do not modify. -# Cluster is {{ task_name }} +# Cluster is {{ lvmcluster_name }} global { use_lvmlockd = 1 @@ -11,5 +11,5 @@ devices { } local { - host_id = {{ task_host_ids[inventory_hostname] }} + host_id = {{ lvmcluster_host_ids[inventory_hostname] }} } diff --git a/roles/lvmcluster/vars/main.yaml b/roles/lvmcluster/vars/main.yaml new file mode 100644 index 0000000..7024804 --- /dev/null +++ b/roles/lvmcluster/vars/main.yaml @@ -0,0 +1,2 @@ +--- +lvmcluster_host_ids: "{{ lookup('file', 'data/lvmcluster/' + lvmcluster_name + '/host_id.yaml') | from_yaml }}" diff --git a/roles/netplan/README.md b/roles/netplan/README.md new file mode 100644 index 0000000..52ebb74 --- /dev/null +++ b/roles/netplan/README.md @@ -0,0 +1,4 @@ +# Netplan Role +## Variables +Netplan doesn't make use of configuration variables, but if you wish to replace the network configuration of a server, you can do so by putting a file in `data/netplan/HOSTNAME.yaml`. + diff --git a/roles/netplan/defaults/main.yaml b/roles/netplan/defaults/main.yaml new file mode 100644 index 0000000..e69de29 diff --git a/roles/netplan/handlers/main.yaml b/roles/netplan/handlers/main.yaml new file mode 100644 index 0000000..a8b8dde --- /dev/null +++ b/roles/netplan/handlers/main.yaml @@ -0,0 +1,3 @@ +--- +- name: Apply netplan + shell: netplan apply diff --git a/roles/netplan/tasks/main.yaml b/roles/netplan/tasks/main.yaml new file mode 100644 index 0000000..3767ded --- /dev/null +++ b/roles/netplan/tasks/main.yaml @@ -0,0 +1,44 @@ +--- + +- name: Run all notified handlers + meta: flush_handlers + +# Netplan - Override system configuration +- name: Check if distribution is supported + meta: end_play + when: 'ansible_distribution not in ("Ubuntu", "Debian")' + +- name: Check if a Netplan configuration exists + local_action: stat path=data/netplan/{{ inventory_hostname }}.yaml + register: main_file + +- name: Ensure netplan is installed + apt: + name: + - netplan.io + state: present + when: main_file.stat.exists + +- name: Remove existing configuration + file: + path: "/etc/netplan/{{ item }}" + state: absent + loop: + - 00-snapd-config.yaml + - 00-installer-config.yaml + - 10-lxc.yaml + - 50-cloud-init.yaml + when: main_file.stat.exists + notify: Apply netplan + +- name: Transfer netplan configuration + copy: + src: data/netplan/{{ inventory_hostname }}.yaml + dest: /etc/netplan/00-ansible-main.yaml + mode: 0600 + when: main_file.stat.exists + notify: Apply netplan + +- name: Run all notified handlers + meta: flush_handlers + diff --git a/roles/netplan/vars/main.yaml b/roles/netplan/vars/main.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/netplan/vars/main.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/nvme/README.md b/roles/nvme/README.md new file mode 100644 index 0000000..add8247 --- /dev/null +++ b/roles/nvme/README.md @@ -0,0 +1,4 @@ +# NVME Role +## Variables + - `nvme_targets`: List of NVME over TCP targets (IPs) (type: list of strings) + diff --git a/roles/nvme/defaults/main.yaml b/roles/nvme/defaults/main.yaml new file mode 100644 index 0000000..948f0eb --- /dev/null +++ b/roles/nvme/defaults/main.yaml @@ -0,0 +1,2 @@ +--- +nvme_targets: [] diff --git a/roles/nvme/handlers/main.yaml b/roles/nvme/handlers/main.yaml new file mode 100644 index 0000000..88eb622 --- /dev/null +++ b/roles/nvme/handlers/main.yaml @@ -0,0 +1,6 @@ +--- +- name: Discover NVME targets + shell: nvme discover + +- name: Connect NVME targets + shell: nvme connect-all diff --git a/roles/nvme/tasks/main.yaml b/roles/nvme/tasks/main.yaml new file mode 100644 index 0000000..fa20bc6 --- /dev/null +++ b/roles/nvme/tasks/main.yaml @@ -0,0 +1,25 @@ +--- + +- name: Run all notified handlers + meta: flush_handlers + +# NVME - Install packages and host config +- name: Install the NVME packages + ansible.builtin.package: + name: + - nvme-cli + state: present + when: 'nvme_targets | length > 0' + +- name: Configure NVME discovery + template: + src: discovery.conf.j2 + dest: /etc/nvme/discovery.conf + when: 'nvme_targets | length > 0' + notify: + - Discover NVME targets + - Connect NVME targets + +- name: Run all notified handlers + meta: flush_handlers + diff --git a/ansible/files/nvme/discovery.conf.tpl b/roles/nvme/templates/discovery.conf.j2 similarity index 89% rename from ansible/files/nvme/discovery.conf.tpl rename to roles/nvme/templates/discovery.conf.j2 index 7f84558..4854e42 100644 --- a/ansible/files/nvme/discovery.conf.tpl +++ b/roles/nvme/templates/discovery.conf.j2 @@ -4,6 +4,6 @@ # # Example: # --transport= --traddr= --trsvcid= --host-traddr= --host-iface= -{% for target in task_targets %} +{% for target in nvme_targets %} --transport=tcp --traddr={{ target }} {% endfor %} diff --git a/roles/nvme/vars/main.yaml b/roles/nvme/vars/main.yaml new file mode 100644 index 0000000..ed97d53 --- /dev/null +++ b/roles/nvme/vars/main.yaml @@ -0,0 +1 @@ +--- diff --git a/roles/ovn/README.md b/roles/ovn/README.md new file mode 100644 index 0000000..6b72f78 --- /dev/null +++ b/roles/ovn/README.md @@ -0,0 +1,13 @@ +# OVN Role +## Variables + + - `ovn_az_name`: OVN availability zone name (**required** if using OVN IC, type: string) + - `ovn_clients`: List of certificates to generate for OVN clients (type: list of string) + - `ovn_ip_address`: Override for the server's IP address (used for tunnels and DB traffic) (type: string) + - `ovn_name`: OVN deployment name (**required**, type: string) + - `ovn_release`: OVN release to deploy, can be `distro` or `ppa` (type: string, default: `distro`) + - `ovn_roles`: List of roles the server should have in the OVN cluster (**required**, type: list of string): + - `central`: OVN API server, runs NorthBound and SouthBound database and northd daemon + - `host`: OVN client / controller, runs OpenVswitch and ovn-controller + - `ic`: OVN Inter-Connection server, runs the `ovn-ic` daemon + - `ic-db`: OVN Inter-Connection NorthBound and SouthBound database server diff --git a/roles/ovn/defaults/main.yaml b/roles/ovn/defaults/main.yaml new file mode 100644 index 0000000..67aeccb --- /dev/null +++ b/roles/ovn/defaults/main.yaml @@ -0,0 +1,7 @@ +--- +ovn_clients: [] +ovn_name: '' +ovn_roles: [] +ovn_release: 'distro' +ovn_ip_address: "{{ ansible_default_ipv6['address'] | default(ansible_default_ipv4['address']) }}" +ovn_az_name: '' diff --git a/ansible/files/ovn/ovn-ppa.asc b/roles/ovn/files/ovn-ppa.asc similarity index 100% rename from ansible/files/ovn/ovn-ppa.asc rename to roles/ovn/files/ovn-ppa.asc diff --git a/roles/ovn/handlers/main.yaml b/roles/ovn/handlers/main.yaml new file mode 100644 index 0000000..aa77eed --- /dev/null +++ b/roles/ovn/handlers/main.yaml @@ -0,0 +1,87 @@ +--- +- name: Update apt + apt: + force_apt_get: yes + update_cache: yes + cache_valid_time: 0 + +- name: Configure OVS + shell: ovs-vsctl set open_vswitch . external_ids:hostname={{ inventory_hostname }} external_ids:ovn-remote={{ ovn_central_southbound }} external_ids:ovn-encap-type=geneve external_ids:ovn-encap-ip={{ ovn_ip_address }} + +- name: Enable OVN IC gateway + shell: + cmd: "ovs-vsctl set open_vswitch . external_ids:ovn-is-interconn=true" + when: '"ic-gateway" in ovn_roles' + +- name: Configure OVN central northbound DB for SSL (certs) + shell: + cmd: "ovn-nbctl set-ssl /etc/ovn/{{ ovn_name }}.server.key /etc/ovn/{{ ovn_name }}.server.crt /etc/ovn/{{ ovn_name }}.ca.crt" + when: '"central" in ovn_roles' + +- name: Configure OVN central northbound DB for SSL (ports) + shell: + cmd: "ovn-nbctl set-connection pssl:6641:[::]" + when: '"central" in ovn_roles' + +- name: Configure OVN central southbound DB for SSL (certs) + shell: + cmd: "ovn-sbctl set-ssl /etc/ovn/{{ ovn_name }}.server.key /etc/ovn/{{ ovn_name }}.server.crt /etc/ovn/{{ ovn_name }}.ca.crt" + when: '"central" in ovn_roles' + +- name: Configure OVN central southbound DB for SSL (ports) + shell: + cmd: "ovn-sbctl set-connection pssl:6642:[::]" + when: '"central" in ovn_roles' + +- name: Configure OVN IC northbound DB for SSL (certs) + shell: + cmd: "ovn-ic-nbctl set-ssl /etc/ovn/{{ ovn_name }}.server.key /etc/ovn/{{ ovn_name }}.server.crt /etc/ovn/{{ ovn_name }}.ca.crt" + when: '"ic-db" in ovn_roles' + +- name: Configure OVN IC northbound DB for SSL (ports) + shell: + cmd: "ovn-ic-nbctl set-connection pssl:6645:[::]" + when: '"ic-db" in ovn_roles' + +- name: Configure OVN IC southbound DB for SSL (certs) + shell: + cmd: "ovn-ic-sbctl set-ssl /etc/ovn/{{ ovn_name }}.server.key /etc/ovn/{{ ovn_name }}.server.crt /etc/ovn/{{ ovn_name }}.ca.crt" + when: '"ic-db" in ovn_roles' + +- name: Configure OVN IC southbound DB for SSL (ports) + shell: + cmd: "ovn-ic-sbctl set-connection pssl:6646:[::]" + when: '"ic-db" in ovn_roles' + +- name: Restart OVN central + systemd: + name: ovn-central.service + state: restarted + +- name: Restart OVN host + systemd: + name: ovn-host.service + state: restarted + +- name: Restart OVN IC + systemd: + daemon_reload: true + name: ovn-ic.service + state: restarted + when: '"ic" in ovn_roles' + +- name: Restart OVN IC databases + systemd: + name: ovn-ic-db.service + state: restarted + when: '"ic-db" in ovn_roles' + +- name: Configure OVN AZ name + shell: + cmd: "ovn-nbctl --db={{ ovn_central_northbound }} -c /etc/ovn/{{ ovn_name }}.server.crt -p /etc/ovn/{{ ovn_name }}.server.key -C /etc/ovn/{{ ovn_name }}.ca.crt set NB_Global . name={{ ovn_az_name }}" + when: '"central" in ovn_roles and ovn_az_name' + +- name: Enable OVN IC route sharing + shell: + cmd: "ovn-nbctl --db={{ ovn_central_northbound }} -c /etc/ovn/{{ ovn_name }}.server.crt -p /etc/ovn/{{ ovn_name }}.server.key -C /etc/ovn/{{ ovn_name }}.ca.crt set NB_Global . options:ic-route-adv=true options:ic-route-learn=true" + when: '"central" in ovn_roles and ovn_az_name' diff --git a/roles/ovn/tasks/main.yaml b/roles/ovn/tasks/main.yaml new file mode 100644 index 0000000..bc9b7bd --- /dev/null +++ b/roles/ovn/tasks/main.yaml @@ -0,0 +1,291 @@ +--- + +- name: Run all notified handlers + meta: flush_handlers + +# OVN - Generate PKI certificates (central) +- name: Create cluster directory + delegate_to: 127.0.0.1 + file: + path: "data/ovn/{{ ovn_name }}" + mode: 0755 + state: directory + throttle: 1 + when: '"central" in ovn_roles or "host" in ovn_roles' + +- name: Create CA private key + delegate_to: 127.0.0.1 + community.crypto.openssl_privatekey: + path: "{{ ovn_pki_path }}/ca.key" + register: ca_key + throttle: 1 + when: '"central" in ovn_roles or "host" in ovn_roles' + +- name: Create CA signing request + delegate_to: 127.0.0.1 + community.crypto.openssl_csr_pipe: + privatekey_path: "{{ ovn_pki_path }}/ca.key" + common_name: "OVN CA for {{ ovn_name }}" + use_common_name_for_san: false + basic_constraints: + - 'CA:TRUE' + basic_constraints_critical: true + key_usage: + - keyCertSign + key_usage_critical: true + register: ca_csr + when: "ca_key.changed" + throttle: 1 + +- name: Issue CA certificate + delegate_to: 127.0.0.1 + community.crypto.x509_certificate: + path: "{{ ovn_pki_path }}/ca.crt" + csr_content: "{{ ca_csr.csr }}" + privatekey_path: "{{ ovn_pki_path }}/ca.key" + provider: selfsigned + when: "ca_csr.changed" + throttle: 1 + +- name: Create server keys + delegate_to: 127.0.0.1 + community.crypto.openssl_privatekey: + path: "{{ ovn_pki_path }}/{{ inventory_hostname }}.key" + register: cert_key + when: 'ovn_roles | length > 0' + +- name: Create server signing request + delegate_to: 127.0.0.1 + community.crypto.openssl_csr_pipe: + privatekey_path: "{{ ovn_pki_path }}/{{ inventory_hostname }}.key" + common_name: "OVN certificate for {{ inventory_hostname }}" + use_common_name_for_san: false + register: cert_csr + when: "cert_key.changed" + +- name: Issue server certificate + delegate_to: 127.0.0.1 + community.crypto.x509_certificate: + path: "{{ ovn_pki_path }}/{{ inventory_hostname }}.crt" + csr_content: "{{ cert_csr.csr }}" + ownca_path: "{{ ovn_pki_path }}/ca.crt" + ownca_privatekey_path: "{{ ovn_pki_path }}/ca.key" + ownca_not_after: "+3650d" + ownca_not_before: "-1d" + provider: ownca + when: "cert_csr.changed" + throttle: 1 + +- name: Create client keys + delegate_to: 127.0.0.1 + community.crypto.openssl_privatekey: + path: "{{ ovn_pki_path }}/{{ item }}.key" + register: client_key + when: 'ovn_roles | length > 0' + loop: "{{ ovn_clients }}" + throttle: 1 + +- name: Create client signing request + delegate_to: 127.0.0.1 + community.crypto.openssl_csr_pipe: + privatekey_path: "{{ ovn_pki_path }}/{{ item.item }}.key" + common_name: "OVN client certificate for {{ item.item }}" + use_common_name_for_san: false + register: client_csr + loop: "{{ client_key.results }}" + when: "client_key.changed" + +- name: Issue client certificate + delegate_to: 127.0.0.1 + community.crypto.x509_certificate: + path: "{{ ovn_pki_path }}/{{ item.item.item }}.crt" + csr_content: "{{ item.csr }}" + ownca_path: "{{ ovn_pki_path }}/ca.crt" + ownca_privatekey_path: "{{ ovn_pki_path }}/ca.key" + ownca_not_after: "+3650d" + ownca_not_before: "-1d" + provider: ownca + loop: "{{ client_csr.results }}" + when: "client_csr.changed" + throttle: 1 + + +- name: Run all notified handlers + meta: flush_handlers + +# OVN - Add package repository +- name: Check if distribution is supported + meta: end_play + when: 'ansible_distribution not in ("Ubuntu", "Debian")' + +- name: Create apt keyring path + file: + path: /etc/apt/keyrings/ + mode: 0755 + state: directory + when: 'ovn_roles|length > 0 and ovn_release != "distro"' + +- name: Add PPA GPG key + copy: + src: ovn-ppa.asc + dest: /etc/apt/keyrings/ansible-ovn-ppa.asc + notify: Update apt + when: 'ovn_roles|length > 0 and ovn_release == "ppa"' + +- name: Get DPKG architecture + shell: dpkg --print-architecture + register: dpkg_architecture + changed_when: false + check_mode: no + when: 'ovn_roles|length > 0 and ovn_release != "distro"' + +- name: Add OVN PPA package source + template: + src: ovn-ppa.sources.j2 + dest: /etc/apt/sources.list.d/ansible-ovn-ppa.sources + notify: Update apt + when: 'ovn_roles|length > 0 and ovn_release == "ppa"' + + +- name: Run all notified handlers + meta: flush_handlers + +# OVN - Install packages +- name: Check if distribution is supported + meta: end_play + when: 'ansible_distribution not in ("Ubuntu", "Debian")' + +- name: Install the OVN central package + apt: + name: + - ovn-central + install_recommends: no + state: present + when: '"central" in ovn_roles' + +- name: Install the OVN IC database package + apt: + name: + - ovn-ic-db + install_recommends: no + state: present + when: '"ic-db" in ovn_roles' + +- name: Install the OVN IC package + apt: + name: + - ovn-ic + install_recommends: no + state: present + when: '"ic" in ovn_roles' + +- name: Install the OVN host package + apt: + name: + - ovn-host + install_recommends: no + state: present + notify: + - Configure OVS + - Enable OVN IC gateway + when: '"host" in ovn_roles' + + +- name: Run all notified handlers + meta: flush_handlers + +# OVN - Set up daemon configuration +- name: Check if distribution is supported + meta: end_play + when: 'ansible_distribution not in ("Ubuntu", "Debian")' + +- name: Create OVN config directory + file: + path: /etc/ovn + mode: 0755 + state: directory + when: 'ovn_roles | length > 0' + +- name: Transfer OVN CA certificate + copy: + src: "{{ ovn_pki_path }}/ca.crt" + dest: /etc/ovn/{{ ovn_name }}.ca.crt + mode: 0644 + when: 'ovn_roles | length > 0' + +- name: Transfer OVN server certificate + copy: + src: "{{ ovn_pki_path }}/{{ inventory_hostname }}.crt" + dest: /etc/ovn/{{ ovn_name }}.server.crt + mode: 0644 + when: 'ovn_roles | length > 0' + +- name: Transfer OVN server key + copy: + src: "{{ ovn_pki_path }}/{{ inventory_hostname }}.key" + dest: /etc/ovn/{{ ovn_name }}.server.key + mode: 0600 + when: 'ovn_roles | length > 0' + notify: + - Configure OVN central northbound DB for SSL (certs) + - Configure OVN central northbound DB for SSL (ports) + - Configure OVN central southbound DB for SSL (certs) + - Configure OVN central southbound DB for SSL (ports) + - Configure OVN IC northbound DB for SSL (certs) + - Configure OVN IC northbound DB for SSL (ports) + - Configure OVN IC southbound DB for SSL (certs) + - Configure OVN IC southbound DB for SSL (ports) + +- name: Configure OVN central database + template: + src: ovn-central.j2 + dest: /etc/default/ovn-central + notify: + - Restart OVN central + - Configure OVN AZ name + - Enable OVN IC route sharing + when: '"central" in ovn_roles' + +- name: Configure OVN host + template: + src: ovn-host.j2 + dest: /etc/default/ovn-host + notify: + - Restart OVN host + when: '"host" in ovn_roles' + +- name: Create OVN IC override directory + file: + path: /etc/systemd/system/ovn-ic.service.d + mode: 0755 + state: directory + when: '"ic" in ovn_roles' + +- name: Transfer OVN IC override + copy: + content: | + [Service] + EnvironmentFile=-/etc/default/ovn-ic + ExecStart= + ExecStart=/usr/share/ovn/scripts/ovn-ctl start_ic --no-monitor $OVN_CTL_OPTS + dest: /etc/systemd/system/ovn-ic.service.d/ansible.conf + notify: Restart OVN IC + when: '"ic" in ovn_roles' + +- name: Configure OVN IC database + template: + src: ovn-ic.j2 + dest: /etc/default/ovn-ic + notify: + - Restart OVN IC databases + - Restart OVN IC + when: '"ic" in ovn_roles or "ic-db" in ovn_roles' + +- name: Transfer OVN aliases + template: + src: alias.sh.j2 + dest: /etc/ovn/alias.sh + when: 'ovn_roles | length > 0' +- name: Run all notified handlers + meta: flush_handlers + diff --git a/roles/ovn/templates/alias.sh.j2 b/roles/ovn/templates/alias.sh.j2 new file mode 100644 index 0000000..ede8420 --- /dev/null +++ b/roles/ovn/templates/alias.sh.j2 @@ -0,0 +1,9 @@ +# Managed by Ansible, do not modify. +alias ovn-nbctl="/usr/bin/ovn-nbctl --db={{ ovn_central_northbound }} -c /etc/ovn/{{ ovn_name }}.server.crt -p /etc/ovn/{{ ovn_name }}.server.key -C /etc/ovn/{{ ovn_name }}.ca.crt" +alias ovn-sbctl="/usr/bin/ovn-sbctl --db={{ ovn_central_southbound }} -c /etc/ovn/{{ ovn_name }}.server.crt -p /etc/ovn/{{ ovn_name }}.server.key -C /etc/ovn/{{ ovn_name }}.ca.crt" +{% if ovn_ic_northbound %} +alias ovn-ic-nbctl="/usr/bin/ovn-ic-nbctl --db={{ ovn_ic_northbound }} -c /etc/ovn/{{ ovn_name }}.server.crt -p /etc/ovn/{{ ovn_name }}.server.key -C /etc/ovn/{{ ovn_name }}.ca.crt" +{% endif %} +{% if ovn_ic_southbound %} +alias ovn-ic-sbctl="/usr/bin/ovn-ic-sbctl --db={{ ovn_ic_southbound }} -c /etc/ovn/{{ ovn_name }}.server.crt -p /etc/ovn/{{ ovn_name }}.server.key -C /etc/ovn/{{ ovn_name }}.ca.crt" +{% endif %} diff --git a/roles/ovn/templates/ovn-central.j2 b/roles/ovn/templates/ovn-central.j2 new file mode 100644 index 0000000..a3b51cc --- /dev/null +++ b/roles/ovn/templates/ovn-central.j2 @@ -0,0 +1,22 @@ +{% set servers = lookup('template', 'ovn-central.servers.j2') | from_yaml -%} +# Managed by Ansible, do not modify. + +# This is a POSIX shell fragment -*- sh -*- + +# OVN_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example, +# a suitable place to specify --ovn-northd-wrapper=valgrind. + +OVN_CTL_OPTS="\ + --db-nb-create-insecure-remote=no \ + --db-sb-create-insecure-remote=no \ + --db-nb-addr=[{{ ovn_ip_address }}] \ + --db-sb-addr=[{{ ovn_ip_address }}] \ + --db-nb-cluster-local-addr=[{{ ovn_ip_address }}] \ + --db-sb-cluster-local-addr=[{{ ovn_ip_address }}] \ + --ovn-northd-ssl-key=/etc/ovn/{{ ovn_name }}.server.key \ + --ovn-northd-ssl-cert=/etc/ovn/{{ ovn_name }}.server.crt \ + --ovn-northd-ssl-ca-cert=/etc/ovn/{{ ovn_name }}.ca.crt \ + --ovn-northd-nb-db={{ ovn_central_northbound }} \ + --ovn-northd-sb-db={{ ovn_central_southbound }}{% if ovn_ip_address != servers[0] %} \ + --db-nb-cluster-remote-addr=[{{ servers[0] }}] \ + --db-sb-cluster-remote-addr=[{{ servers[0] }}]{% endif %}" diff --git a/ansible/files/ovn/ovn-central.servers.tpl b/roles/ovn/templates/ovn-central.servers.j2 similarity index 100% rename from ansible/files/ovn/ovn-central.servers.tpl rename to roles/ovn/templates/ovn-central.servers.j2 diff --git a/ansible/files/ovn/ovn-host.tpl b/roles/ovn/templates/ovn-host.j2 similarity index 56% rename from ansible/files/ovn/ovn-host.tpl rename to roles/ovn/templates/ovn-host.j2 index ec3d889..bb88604 100644 --- a/ansible/files/ovn/ovn-host.tpl +++ b/roles/ovn/templates/ovn-host.j2 @@ -5,6 +5,6 @@ # OVN_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example, # a suitable place to specify --ovn-controller-wrapper=valgrind. OVN_CTL_OPTS="\ - --ovn-controller-ssl-key=/etc/ovn/{{ task_name }}.server.key \ - --ovn-controller-ssl-cert=/etc/ovn/{{ task_name }}.server.crt \ - --ovn-controller-ssl-ca-cert=/etc/ovn/{{ task_name }}.ca.crt" + --ovn-controller-ssl-key=/etc/ovn/{{ ovn_name }}.server.key \ + --ovn-controller-ssl-cert=/etc/ovn/{{ ovn_name }}.server.crt \ + --ovn-controller-ssl-ca-cert=/etc/ovn/{{ ovn_name }}.ca.crt" diff --git a/roles/ovn/templates/ovn-ic.j2 b/roles/ovn/templates/ovn-ic.j2 new file mode 100644 index 0000000..0904ce6 --- /dev/null +++ b/roles/ovn/templates/ovn-ic.j2 @@ -0,0 +1,24 @@ +{% set servers = lookup('template', 'ovn-ic.servers.j2') | from_yaml -%} +# Managed by Ansible, do not modify. + +# This is a POSIX shell fragment -*- sh -*- + +# OVN_CTL_OPTS: Extra options to pass to ovs-ctl. This is, for example, +# a suitable place to specify --ovn-northd-wrapper=valgrind. + +OVN_CTL_OPTS="\ + --db-ic-nb-create-insecure-remote=no \ + --db-ic-sb-create-insecure-remote=no \ + --db-ic-nb-addr=[{{ ovn_ip_address }}] \ + --db-ic-sb-addr=[{{ ovn_ip_address }}] \ + --db-ic-nb-cluster-local-addr=[{{ ovn_ip_address }}] \ + --db-ic-sb-cluster-local-addr=[{{ ovn_ip_address }}] \ + --ovn-ic-ssl-key=/etc/ovn/{{ ovn_name }}.server.key \ + --ovn-ic-ssl-cert=/etc/ovn/{{ ovn_name }}.server.crt \ + --ovn-ic-ssl-ca-cert=/etc/ovn/{{ ovn_name }}.ca.crt \ + --ovn-northd-nb-db={{ ovn_central_northbound }} \ + --ovn-northd-sb-db={{ ovn_central_southbound }} \ + --ovn-ic-nb-db={{ ovn_ic_northbound }} \ + --ovn-ic-sb-db={{ ovn_ic_southbound }}{% if ovn_ip_address != servers[0] %} \ + --db-ic-nb-cluster-remote-addr=[{{ servers[0] }}] + --db-ic-sb-cluster-remote-addr=[{{ servers[0] }}]{% endif %}" diff --git a/ansible/files/ovn/ovn-ic.servers.tpl b/roles/ovn/templates/ovn-ic.servers.j2 similarity index 100% rename from ansible/files/ovn/ovn-ic.servers.tpl rename to roles/ovn/templates/ovn-ic.servers.j2 diff --git a/ansible/files/ovn/ovn-ppa.sources.tpl b/roles/ovn/templates/ovn-ppa.sources.j2 similarity index 100% rename from ansible/files/ovn/ovn-ppa.sources.tpl rename to roles/ovn/templates/ovn-ppa.sources.j2 diff --git a/roles/ovn/vars/main.yaml b/roles/ovn/vars/main.yaml new file mode 100644 index 0000000..e75de40 --- /dev/null +++ b/roles/ovn/vars/main.yaml @@ -0,0 +1,7 @@ +--- +ovn_pki_path: "data/ovn/{{ ovn_name }}/" + +ovn_central_northbound: "{{ lookup('template', 'ovn-central.servers.j2') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6641') | join(',') }}" +ovn_central_southbound: "{{ lookup('template', 'ovn-central.servers.j2') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6642') | join(',') }}" +ovn_ic_northbound: "{{ lookup('template', 'ovn-ic.servers.j2') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6645') | join(',') }}" +ovn_ic_southbound: "{{ lookup('template', 'ovn-ic.servers.j2') | from_yaml | map('regex_replace', '^(.*)$', 'ssl:[\\1]:6646') | join(',') }}"