diff --git a/.github/workflows/greenboot-rs.yaml b/.github/workflows/greenboot-rs.yaml new file mode 100644 index 00000000..64300b84 --- /dev/null +++ b/.github/workflows/greenboot-rs.yaml @@ -0,0 +1,225 @@ +--- +name: greenboot rs integration test + +on: + issue_comment: + types: + - created + +jobs: + pr-info: + if: ${{ github.event.issue.pull_request && + (startsWith(github.event.comment.body, '/greenboot-rs-test-all') || + startsWith(github.event.comment.body, '/greenboot-rs-test-39')) }} + runs-on: ubuntu-latest + steps: + - name: Query author repository permissions + uses: octokit/request-action@v2.x + id: user_permission + with: + route: GET /repos/${{ github.repository }}/collaborators/${{ github.event.sender.login }}/permission + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + # restrict running of tests to users with admin or write permission for the repository + # see https://docs.github.com/en/free-pro-team@latest/rest/reference/repos#get-repository-permissions-for-a-user + - name: Check if user does have correct permissions + if: contains('admin write', fromJson(steps.user_permission.outputs.data).permission) + id: check_user_perm + run: | + echo "User '${{ github.event.sender.login }}' has permission '${{ fromJson(steps.user_permission.outputs.data).permission }}' allowed values: 'admin', 'write'" + echo "allowed_user=true" >> $GITHUB_OUTPUT + - name: Get information for pull request + uses: octokit/request-action@v2.x + id: pr-api + with: + route: GET /repos/${{ github.repository }}/pulls/${{ github.event.issue.number }} + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + outputs: + allowed_user: ${{ steps.check_user_perm.outputs.allowed_user }} + sha: ${{ fromJson(steps.pr-api.outputs.data).head.sha }} + + comment-info: + needs: pr-info + if: ${{ needs.pr-info.outputs.allowed_user == 'true' }} + runs-on: ubuntu-latest + steps: + - name: PR comment analysis + id: comment-analysis + run: | + comment_content="${{ github.event.comment.body }}" + comment_array=($comment_content) + comment_arg_len=${#comment_array[@]} + + echo ${comment_array[@]} + echo $comment_arg_len + + # Default to osbuild and osbuild-composer main branch + IMAGES_REPO="osbuild/images" + IMAGES_BRANCH="main" + OSBUILD_COMPOSER_REPO="osbuild/osbuild-composer" + OSBUILD_COMPOSER_BRANCH="main" + + for item in "${comment_array[@]}"; do + if [[ "$item" =~ "/images:" ]]; then + IMAGES_REPO="$(echo $item | cut -d: -f1)" + IMAGES_BRANCH="$(echo $item | cut -d: -f2)" + fi + if [[ "$item" =~ "/osbuild-composer:" ]]; then + OSBUILD_COMPOSER_REPO="$(echo $item | cut -d: -f1)" + OSBUILD_COMPOSER_BRANCH="$(echo $item | cut -d: -f2)" + fi + done + + echo $IMAGES_REPO + echo $IMAGES_BRANCH + echo $OSBUILD_COMPOSER_REPO + echo $OSBUILD_COMPOSER_BRANCH + + echo "images_repo=$IMAGES_REPO" >> $GITHUB_OUTPUT + echo "images_branch=$IMAGES_BRANCH" >> $GITHUB_OUTPUT + echo "osbuild-composer_repo=$OSBUILD_COMPOSER_REPO" >> $GITHUB_OUTPUT + echo "osbuild-composer_branch=$OSBUILD_COMPOSER_BRANCH" >> $GITHUB_OUTPUT + + outputs: + images_repo: ${{ steps.comment-analysis.outputs.images_repo }} + images_branch: ${{ steps.comment-analysis.outputs.images_branch }} + osbuild-composer_repo: ${{ steps.comment-analysis.outputs.osbuild-composer_repo }} + osbuild-composer_branch: ${{ steps.comment-analysis.outputs.osbuild-composer_branch }} + + pre-greenboot-rs-39: + needs: pr-info + if: ${{ needs.pr-info.outputs.allowed_user == 'true' }} && + (startsWith(github.event.comment.body, '/greenboot-rs-test-all') || + startsWith(github.event.comment.body, '/greenboot-rs-test-39'))}} + runs-on: ubuntu-latest + env: + STATUS_NAME: greenboot-rs-39 + + steps: + - name: Create in-progress status + uses: octokit/request-action@v2.x + with: + route: 'POST /repos/${{ github.repository }}/statuses/${{ needs.pr-info.outputs.sha }}' + context: ${{ env.STATUS_NAME }} + state: pending + description: 'Deploy runner' + target_url: 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + greenboot-rs-39: + needs: [pr-info, comment-info, pre-greenboot-rs-39] + if: ${{ needs.pr-info.outputs.allowed_user == 'true' }} && + (startsWith(github.event.comment.body, '/greenboot-rs-test-all') || + startsWith(github.event.comment.body, '/greenboot-rs-test-39'))}} + runs-on: [kite, x86_64, gcp, fedora-39, large] + env: + STATUS_NAME: greenboot-rs-39 + + steps: + - name: Create in-progress status + uses: octokit/request-action@v2.x + with: + route: 'POST /repos/${{ github.repository }}/statuses/${{ needs.pr-info.outputs.sha }}' + context: ${{ env.STATUS_NAME }} + state: pending + target_url: 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Install required packages + run: sudo dnf install -y make gcc openssl openssl-devel findutils golang git tpm2-tss-devel swtpm swtpm-tools clevis clevis-luks cryptsetup cryptsetup-devel clang-devel cracklib-dicts rust cargo rust-packaging rpmdevtools python3-docutils createrepo_c libassuan-devel krb5-devel gpgme-devel go-rpm-macros + + - name: Clone repository + uses: actions/checkout@v3 + with: + ref: ${{ needs.pr-info.outputs.sha }} + fetch-depth: 0 + + - name: Build greenboot PRM pakcages + run: make rpm + working-directory: greenboot + + - name: Prepare greenboot + run: | + sudo mkdir -p /var/www/html/source + sudo cp ./rpmbuild/RPMS/x86_64/* /var/www/html/source/ 2>/dev/null || : + sudo createrepo_c /var/www/html/source + sudo restorecon -Rv /var/www/html/source + sudo ls -al /var/www/html/source + + # Only run when PR has osbuild-composer dependence + - name: Checkout images code + if: ${{ needs.comment-info.outputs.images_branch != 'main' }} || ${{ needs.comment-info.outputs.osbuild-composer_branch != 'main' }} + uses: actions/checkout@v3 + with: + repository: ${{ needs.comment-info.outputs.images_repo }} + ref: ${{ needs.comment-info.outputs.images_branch }} + path: images + + # Only run when PR has osbuild-composer dependence + - name: Checkout osbuild-composer code + if: ${{ needs.comment-info.outputs.osbuild-composer_branch != 'main' }} || ${{ needs.comment-info.outputs.images_branch != 'main' }} + run: git clone -b ${{ needs.comment-info.outputs.osbuild-composer_branch }} https://github.com/${{ needs.comment-info.outputs.osbuild-composer_repo }} + # uses: actions/checkout@v3 + # with: + # repository: ${{ needs.comment-info.outputs.osbuild-composer_repo }} + # ref: ${{ needs.comment-info.outputs.osbuild-composer_branch }} + # path: osbuild-composer + + - name: Build osbuild-composer + if: ${{ needs.comment-info.outputs.osbuild-composer_branch != 'main' }} || ${{ needs.comment-info.outputs.images_branch != 'main' }} + run: | + ls -al + pwd + git status + ls -a ../images + git -C ../images status + go clean -modcache + go mod tidy + go mod edit -replace github.com/osbuild/images=../images + GOPROXY=direct GOSUMDB=off ./tools/prepare-source.sh + + git config --global user.name "greenboot bot" + git config --global user.email "greenboot-bot@greenboot.com" + git status + git add -A + git commit -m "new build for greenboot test" + + make rpm + + sudo cp rpmbuild/RPMS/x86_64/* /var/www/html/source/ + sudo ls -al /var/www/html/source/ + sudo createrepo_c /var/www/html/source + sudo restorecon -Rv /var/www/html/source + + sudo tee "/etc/yum.repos.d/source.repo" > /dev/null << EOF + [source] + name = source + baseurl = file:///var/www/html/source/ + enabled = 1 + gpgcheck = 0 + priority = 5 + EOF + + sudo dnf info osbuild osbuild-composer + working-directory: ./osbuild-composer + + - name: Run greenboot-rs.sh test + run: ./greenboot-rs.sh + working-directory: tests + timeout-minutes: 100 + + - name: Set result status + if: always() + uses: octokit/request-action@v2.x + with: + route: 'POST /repos/${{ github.repository }}/statuses/${{ needs.pr-info.outputs.sha }}' + context: ${{ env.STATUS_NAME }} + state: ${{ job.status }} + target_url: 'https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/test/ansible.cfg b/test/ansible.cfg new file mode 100644 index 00000000..2ffc410a --- /dev/null +++ b/test/ansible.cfg @@ -0,0 +1,8 @@ +[defaults] +timeout = 30 +# human-readable stdout/stderr results display +stdout_callback = yaml + +[ssh_connection] +scp_if_ssh=True +pipelining=False diff --git a/test/check-ostree.yaml b/test/check-ostree.yaml new file mode 100644 index 00000000..6bbae164 --- /dev/null +++ b/test/check-ostree.yaml @@ -0,0 +1,388 @@ +--- +- hosts: ostree_guest + become: no + vars: + total_counter: "0" + failed_counter: "0" + + tasks: + + # current target host's IP address + - debug: var=ansible_all_ipv4_addresses + - debug: var=ansible_facts['distribution_version'] + - debug: var=ansible_facts['distribution'] + - debug: var=ansible_facts['architecture'] + + # check BIOS or UEFI + - name: check bios or uefi + stat: + path: /sys/firmware/efi + + # check secure boot status if it's enabled + - name: check secure boot status + command: mokutil --sb-state + ignore_errors: yes + + - name: check partition size + command: df -h + ignore_errors: yes + become: yes + + - name: check disk partition table + command: fdisk -l + ignore_errors: yes + become: yes + + - name: check rpm-ostree status + command: rpm-ostree status + ignore_errors: yes + + - name: check installed kernel + command: uname -r + register: result_kernel + + # first installed or upgraded + - name: determin which stage the checking is running on + shell: rpm-ostree status --json | jq '.deployments | length' + register: result_stage + + - set_fact: + checking_stage: "{{ result_stage.stdout }}" + + # case: check ostree commit correctly updated + - name: get deployed ostree commit + shell: rpm-ostree status --json | jq -r '.deployments[0].checksum' + register: result_commit + + - name: make a json result + set_fact: + deploy_commit: "{{ result_commit.stdout }}" + + - name: check commit deployed and built + block: + - assert: + that: + - deploy_commit == ostree_commit + fail_msg: "deployed ostree commit is not commit built by osbuild-composer" + success_msg: "successful building and deployment" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + # case: check ostree ref + - name: check ostree ref + shell: rpm-ostree status --json | jq -r '.deployments[0].origin' + register: result_ref + + - name: check ostree ref deployed + block: + - assert: + that: + - result_ref.stdout == ostree_ref + fail_msg: "deployed ostree ref failed" + success_msg: "ostree ref successful building and deployment" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + - name: check mount point device name + command: findmnt + + # case: check wget installed after upgrade + - name: check installed package + shell: rpm -qa | sort + register: result_packages + + - name: check wget installed + block: + - assert: + that: + - "'wget' in result_packages.stdout" + fail_msg: "wget not installed, ostree upgrade might be failed" + success_msg: "wget installed in ostree upgrade" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + when: checking_stage == "2" + + # case: check installed greenboot packages + - name: greenboot should be installed + block: + - name: greenboot should be installed + shell: rpm -qa | grep greenboot + register: result_greenboot_packages + + - assert: + that: + - "'greenboot-1' in result_greenboot_packages.stdout" + - "'greenboot-default-health-checks' in result_greenboot_packages.stdout" + fail_msg: "greenboot is not installed" + success_msg: "greenboot is installed" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + # case: check greenboot services + - name: greenboot services should be enabled + block: + - name: greenboot services should be enabled + command: systemctl is-enabled greenboot-healthcheck.service greenboot-rollback.service + register: result_greenboot_service + + - assert: + that: + - result_greenboot_service.stdout == 'enabled\nenabled' + fail_msg: "greenboot services are not enabled" + success_msg: "greenboot services are enabled" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + - name: greenboot-healthcheck service should be active + block: + - name: greenboot-healthcheck service should be active + command: systemctl is-active greenboot-healthcheck.service + register: result_greenboot_service + + - assert: + that: + - result_greenboot_service.stdout == 'active' + fail_msg: "greenboot services are not active" + success_msg: "greenboot services are active" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + # case: check greenboot and greenboot-rollback services log + - name: greenboot service should run without error + block: + - name: all greenboot and greenboot-healthcheck services should run without error + command: journalctl -b -0 -u greenboot -u greenboot-healthcheck + become: yes + register: result_greenboot_log + + - assert: + that: + - "'greenboot health-check passed' in result_greenboot_log.stdout" + fail_msg: "Some errors happened in service boot" + success_msg: "All greenboot services booted success" + + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + # case: check grubenv variables + - name: grubenv variables should contain boot_success=1 + block: + - name: grubenv variables should contain boot_success=1 + command: grub2-editenv list + register: result_grubenv + become: yes + + - assert: + that: + - "'boot_success=1' in result_grubenv.stdout" + fail_msg: "Not found boot_success=1" + success_msg: "Found boot_success=1" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + # case: check rollback function if boot error found + - name: install sanely failing health check unit to test red boot status behavior + block: + - name: install sanely failing health check unit to test red boot status behavior + command: rpm-ostree install --cache-only https://kite-webhook-prod.s3.amazonaws.com/greenboot-failing-unit-1.0-1.el8.noarch.rpm --reboot + become: yes + ignore_errors: yes + ignore_unreachable: yes + + - block: + - name: delay 60 seconds after reboot to make system stable + pause: + seconds: 60 + delegate_to: 127.0.0.1 + + - name: wait for connection to become reachable/usable + wait_for_connection: + delay: 30 + register: result_rebooting + + - name: waits until instance is reachable + wait_for: + host: "{{ ansible_all_ipv4_addresses[0] }}" + port: 22 + search_regex: OpenSSH + delay: 10 + register: result_rollback + until: result_rollback is success + retries: 6 + delay: 10 + ignore_unreachable: yes + + - fail: + msg: "Failed here for unreachable to run rescue" + when: result_rollback.unreachable is defined + + rescue: + # manual reboot VM to workaround vm reboot failed issue + - name: check vm name + community.libvirt.virt: + command: list_vms + become: yes + register: all_vms + delegate_to: 127.0.0.1 + + - set_fact: + vm_name: "{{ item }}" + loop: "{{ all_vms.list_vms }}" + when: item is match ("ostree-.*") + + - debug: var=vm_name + + - name: stop vm "{{ vm_name }}" + community.libvirt.virt: + name: "{{ vm_name }}" + command: destroy + become: yes + delegate_to: 127.0.0.1 + + - name: start vm "{{ vm_name }}" + community.libvirt.virt: + name: "{{ vm_name }}" + command: start + become: yes + delegate_to: 127.0.0.1 + + - name: wait for connection to become reachable/usable + wait_for_connection: + delay: 30 + + - name: waits until instance is reachable + wait_for: + host: "{{ ansible_all_ipv4_addresses[0] }}" + port: 22 + search_regex: OpenSSH + delay: 10 + register: result_rollback + until: result_rollback is success + retries: 6 + delay: 10 + + - assert: + that: + - result_rollback is succeeded + fail_msg: "Rollback failed" + success_msg: "Rollback success" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + + # case: check ostree commit after rollback + - name: check ostree commit after rollback + block: + - name: check ostree commit after rollback + shell: rpm-ostree status --json | jq -r '.deployments[0].checksum' + register: result_commit + + - assert: + that: + - deploy_commit == ostree_commit + fail_msg: "Not rollbackto last commit" + success_msg: "Rollback success" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + when: result_rollback is succeeded + + # case: check greenboot* services log again + - name: fallback log should be found here + block: + - name: fallback log should be found here + command: journalctl -u greenboot-rollback + become: yes + register: result_greenboot_log + + - assert: + that: + - "'Greenboot will now attempt to rollback' in result_greenboot_log.stdout" + - "'Rollback successful' in result_greenboot_log.stdout" + fail_msg: "Fallback log not found" + success_msg: "Found fallback log" + + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + when: result_rollback is succeeded + + # case: check grubenv variables again + - name: grubenv variables should contain boot_success=1 + block: + - name: grubenv variables should contain boot_success=1 + command: grub2-editenv list + register: result_grubenv + become: yes + + - assert: + that: + - "'boot_success=1' in result_grubenv.stdout" + fail_msg: "Not found boot_success=1" + success_msg: "Found boot_success=1" + always: + - set_fact: + total_counter: "{{ total_counter | int + 1 }}" + rescue: + - name: failed count + 1 + set_fact: + failed_counter: "{{ failed_counter | int + 1 }}" + when: result_rollback is succeeded + + - assert: + that: + - failed_counter == "0" + fail_msg: "Run {{ total_counter }} tests, but {{ failed_counter }} of them failed" + success_msg: "Totally {{ total_counter }} test passed" diff --git a/test/files/fedora-39.json b/test/files/fedora-39.json new file mode 100644 index 00000000..6f7a33a8 --- /dev/null +++ b/test/files/fedora-39.json @@ -0,0 +1,26 @@ +{ + "x86_64": [ + { + "name": "fedora", + "baseurl": "https://dl.fedoraproject.org/pub/fedora/linux/development/39/Everything/x86_64/os/", + "gpgkey": "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBGLykg8BEADURjKtgQpQNoluifXia+U3FuqGCTQ1w7iTqx1UvNhLX6tb9Qjy\nl/vjl1iXxucrd2JBnrT/21BdtaABhu2hPy7bpcGEkG8MDinAMZBzcyzHcS/JiGHZ\nd/YmMWQUgbDlApbxFSGWiXMgT0Js5QdcywHI5oiCmV0lkZ+khZ4PkVWmk6uZgYWf\nJOG5wp5TDPnoYXlA4CLb6hu2691aDm9b99XYqEjhbeIzS9bFQrdrQzRMKyzLr8NW\ns8Pq2tgyzu8txlWdBXJyAMKldTPstqtygLL9UUdo7CIQQzWqeDbAnv+WdOmiI/hR\netbbwNV+thkLJz0WD90C2L3JEeUJX5Qa4oPvfNLDeCKmJFEFUTCEdm0AYoQDjLJQ\n3d3q9M09thXO/jYM0cSnJDclssLNsNWfjJAerLadLwNnYRuralw7f74QSLYdJAJU\nSFShBlctWKnlhQ7ehockqtgXtWckkqPZZjGiMXwHde9b9Yyi+VqtUQWxSWny+9g9\n6tcoa3AdnmpqSTHQxYajD0EGXJ0z0NXfqxkI0lo8UxzypEBy4sARZ4XhTU73Zwk0\nLGhEUHlfyxXgRs6RRvM2UIoo+gou2M9rn/RWkhuHJNSfgrM0BmIBCjhjwGiS33Qh\nysLDWJMdch8lsu1fTmLEFQrOB93oieOJQ0Ysi5gQY8TOT+oZvVi9pSMJuwARAQAB\ntDFGZWRvcmEgKDM5KSA8ZmVkb3JhLTM5LXByaW1hcnlAZmVkb3JhcHJvamVjdC5v\ncmc+iQJOBBMBCAA4FiEE6PI5lvIyGGQMtEy+dc9axBi450wFAmLykg8CGw8FCwkI\nBwIGFQoJCAsCBBYCAwECHgECF4AACgkQdc9axBi450yd4w//ZtghbZX5KFstOdBS\nrcbBfCK9zmRvzeejzGl6lPKfqwx7OOHYxFlRa9MYLl8QG7Aq6yRRWzzEHiSb0wJw\nWXz5tbkAmV/fpS4wnb3FDArD44u317UAnaU+UlhgK1g62lwI2dGpvTSvohMBMeBY\nB5aBd+sLi3UtiSRM2XhxvxaWwr/oFLjKDukgrPQzeV3F/XdxGhSz/GZUVFVprcrB\nh/dIo4k0Za7YVRhlVM0coOIcKbcjxAK9CCZ8+jtdIh3/BN5zJ0RFMgqSsrWYWeft\nBI3KWLbyMfRwEtp7xSi17WXbRfsSoqwIVgP+RCSaAdVuiYs/GCRsT3ydYcDvutuJ\nYZoE53yczemM/1HZZFI04zI7KBsKm9NFH0o4K2nBWuowBm59iFvWHFpX6em54cq4\n45NwY01FkSQUqntfqCWFSowwFHAZM4gblOikq2B5zHoIntCiJlPGuaJiVSw9ZpEc\n+IEQfmXJjKGSkMbU9tmNfLR9skVQJizMTtoUQ12DWC+14anxnnR2hxnhUDAabV6y\nJ5dGeb/ArmxQj3IMrajdNwjuk9GMeMSSS2EMY8ryOuYwRbFhBOLhGAnmM5OOSUxv\nA4ipWraXDW0bK/wXI7yHMkc6WYrdV3SIXEqJBTp7npimv3JC+exWEbTLcgvV70FP\nX55M9nDtzUSayJuEcfFP2c9KQCE=\n=J4qZ\n-----END PGP PUBLIC KEY BLOCK-----\n", + "check_gpg": true + }, + { + "name": "source", + "baseurl": "http://192.168.100.1/source/" + } + ], + "aarch64": [ + { + "name": "fedora", + "baseurl": "https://dl.fedoraproject.org/pub/fedora/linux/development/39/Everything/aarch64/os/", + "gpgkey": "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBGLykg8BEADURjKtgQpQNoluifXia+U3FuqGCTQ1w7iTqx1UvNhLX6tb9Qjy\nl/vjl1iXxucrd2JBnrT/21BdtaABhu2hPy7bpcGEkG8MDinAMZBzcyzHcS/JiGHZ\nd/YmMWQUgbDlApbxFSGWiXMgT0Js5QdcywHI5oiCmV0lkZ+khZ4PkVWmk6uZgYWf\nJOG5wp5TDPnoYXlA4CLb6hu2691aDm9b99XYqEjhbeIzS9bFQrdrQzRMKyzLr8NW\ns8Pq2tgyzu8txlWdBXJyAMKldTPstqtygLL9UUdo7CIQQzWqeDbAnv+WdOmiI/hR\netbbwNV+thkLJz0WD90C2L3JEeUJX5Qa4oPvfNLDeCKmJFEFUTCEdm0AYoQDjLJQ\n3d3q9M09thXO/jYM0cSnJDclssLNsNWfjJAerLadLwNnYRuralw7f74QSLYdJAJU\nSFShBlctWKnlhQ7ehockqtgXtWckkqPZZjGiMXwHde9b9Yyi+VqtUQWxSWny+9g9\n6tcoa3AdnmpqSTHQxYajD0EGXJ0z0NXfqxkI0lo8UxzypEBy4sARZ4XhTU73Zwk0\nLGhEUHlfyxXgRs6RRvM2UIoo+gou2M9rn/RWkhuHJNSfgrM0BmIBCjhjwGiS33Qh\nysLDWJMdch8lsu1fTmLEFQrOB93oieOJQ0Ysi5gQY8TOT+oZvVi9pSMJuwARAQAB\ntDFGZWRvcmEgKDM5KSA8ZmVkb3JhLTM5LXByaW1hcnlAZmVkb3JhcHJvamVjdC5v\ncmc+iQJOBBMBCAA4FiEE6PI5lvIyGGQMtEy+dc9axBi450wFAmLykg8CGw8FCwkI\nBwIGFQoJCAsCBBYCAwECHgECF4AACgkQdc9axBi450yd4w//ZtghbZX5KFstOdBS\nrcbBfCK9zmRvzeejzGl6lPKfqwx7OOHYxFlRa9MYLl8QG7Aq6yRRWzzEHiSb0wJw\nWXz5tbkAmV/fpS4wnb3FDArD44u317UAnaU+UlhgK1g62lwI2dGpvTSvohMBMeBY\nB5aBd+sLi3UtiSRM2XhxvxaWwr/oFLjKDukgrPQzeV3F/XdxGhSz/GZUVFVprcrB\nh/dIo4k0Za7YVRhlVM0coOIcKbcjxAK9CCZ8+jtdIh3/BN5zJ0RFMgqSsrWYWeft\nBI3KWLbyMfRwEtp7xSi17WXbRfsSoqwIVgP+RCSaAdVuiYs/GCRsT3ydYcDvutuJ\nYZoE53yczemM/1HZZFI04zI7KBsKm9NFH0o4K2nBWuowBm59iFvWHFpX6em54cq4\n45NwY01FkSQUqntfqCWFSowwFHAZM4gblOikq2B5zHoIntCiJlPGuaJiVSw9ZpEc\n+IEQfmXJjKGSkMbU9tmNfLR9skVQJizMTtoUQ12DWC+14anxnnR2hxnhUDAabV6y\nJ5dGeb/ArmxQj3IMrajdNwjuk9GMeMSSS2EMY8ryOuYwRbFhBOLhGAnmM5OOSUxv\nA4ipWraXDW0bK/wXI7yHMkc6WYrdV3SIXEqJBTp7npimv3JC+exWEbTLcgvV70FP\nX55M9nDtzUSayJuEcfFP2c9KQCE=\n=J4qZ\n-----END PGP PUBLIC KEY BLOCK-----\n", + "check_gpg": true + }, + { + "name": "source", + "baseurl": "http://192.168.100.1/source/" + } + ] +} diff --git a/test/greenboot-rs.sh b/test/greenboot-rs.sh new file mode 100755 index 00000000..06226e78 --- /dev/null +++ b/test/greenboot-rs.sh @@ -0,0 +1,533 @@ +#!/bin/bash +set -exuo pipefail + +# Get OS data. +source /etc/os-release + +# Dumps details about the instance running the CI job. +CPUS=$(nproc) +MEM=$(free -m | grep -oP '\d+' | head -n 1) +DISK=$(df --output=size -h / | sed '1d;s/[^0-9]//g') +HOSTNAME=$(uname -n) +USER=$(whoami) +ARCH=$(uname -m) +KERNEL=$(uname -r) + +echo -e "\033[0;36m" +cat << EOF +------------------------------------------------------------------------------ +CI MACHINE SPECS +------------------------------------------------------------------------------ + Hostname: ${HOSTNAME} + User: ${USER} + CPUs: ${CPUS} + RAM: ${MEM} MB + DISK: ${DISK} GB + ARCH: ${ARCH} + KERNEL: ${KERNEL} +------------------------------------------------------------------------------ +EOF +echo "CPU info" +lscpu +echo -e "\033[0m" + +# Colorful output. +function greenprint { + echo -e "\033[1;32m${1}\033[0m" +} + +# set locale to en_US.UTF-8 +sudo dnf install -y glibc-langpack-en +sudo localectl set-locale LANG=en_US.UTF-8 + +# Install required packages +greenprint "Install required packages" +sudo dnf install -y --nogpgcheck httpd composer-cli podman skopeo wget firewalld lorax xorriso curl jq expect qemu-img qemu-kvm libvirt-client libvirt-daemon-kvm libvirt-daemon virt-install rpmdevtools ansible-core + +# Avoid collection installation filed sometime +for _ in $(seq 0 30); do + ansible-galaxy collection install community.general community.libvirt + install_result=$? + if [[ $install_result == 0 ]]; then + break + fi + sleep 10 +done + +# Customize repository +sudo mkdir -p /etc/osbuild-composer/repositories + +# Set os-variant and boot location used by virt-install. +case "${ID}-${VERSION_ID}" in + "fedora-"*) + IMAGE_TYPE=fedora-iot-commit + OSTREE_REF="fedora/${VERSION_ID}/${ARCH}/iot" + OS_VARIANT="fedora-unknown" + BOOT_LOCATION="https://dl.fedoraproject.org/pub/fedora/linux/development/39/Everything/x86_64/os/" + sudo cp files/fedora-39.json /etc/osbuild-composer/repositories/fedora-39.json + ;; + *) + echo "unsupported distro: ${ID}-${VERSION_ID}" + exit 1;; +esac + +# Check ostree_key permissions +KEY_PERMISSION_PRE=$(stat -L -c "%a %G %U" key/ostree_key | grep -oP '\d+' | head -n 1) +echo -e "${KEY_PERMISSION_PRE}" +if [[ "${KEY_PERMISSION_PRE}" != "600" ]]; then + greenprint "๐Ÿ’ก File permissions too open...Changing to 600" + chmod 600 ./key/ostree_key +fi + +# Start httpd server as prod ostree repo +greenprint "Start httpd service" +sudo systemctl enable --now httpd.service + +# Start osbuild-composer.socket +greenprint "Start osbuild-composer.socket" +sudo systemctl enable --now osbuild-composer.socket + +# Start firewalld +greenprint "Start firewalld" +sudo systemctl enable --now firewalld + +# Start libvirtd and test it. +greenprint "๐Ÿš€ Starting libvirt daemon" +sudo systemctl start libvirtd +sudo virsh list --all > /dev/null + +# Set a customized dnsmasq configuration for libvirt so we always get the +# same address on bootup. +greenprint "๐Ÿ’ก Setup libvirt network" +sudo tee /tmp/integration.xml > /dev/null << EOF + + integration + 1c8fe98c-b53a-4ca4-bbdb-deb0f26b3579 + + + + + + + + + + + + + + +EOF +if ! sudo virsh net-info integration > /dev/null 2>&1; then + sudo virsh net-define /tmp/integration.xml +fi +if [[ $(sudo virsh net-info integration | grep 'Active' | awk '{print $2}') == 'no' ]]; then + sudo virsh net-start integration +fi + +# Allow anyone in the wheel group to talk to libvirt. +greenprint "๐Ÿšช Allowing users in wheel group to talk to libvirt" +sudo tee /etc/polkit-1/rules.d/50-libvirt.rules > /dev/null << EOF +polkit.addRule(function(action, subject) { + if (action.id == "org.libvirt.unix.manage" && + subject.isInGroup("adm")) { + return polkit.Result.YES; + } +}); +EOF + +# Basic weldr API status checking +sudo composer-cli status show + +# Source checking +sudo composer-cli sources list +for SOURCE in $(sudo composer-cli sources list); do + sudo composer-cli sources info "$SOURCE" +done + +# Set up variables. +TEST_UUID=$(uuidgen) +IMAGE_KEY="ostree-${TEST_UUID}" +GUEST_ADDRESS=192.168.100.50 +SSH_USER="admin" +OS_NAME="rhel-edge" +IMAGE_TYPE=edge-commit +PROD_REPO_URL=http://192.168.100.1/repo + +# Set up temporary files. +TEMPDIR=$(mktemp -d) +BLUEPRINT_FILE=${TEMPDIR}/blueprint.toml +HTTPD_PATH="/var/www/html" +KS_FILE=${HTTPD_PATH}/ks.cfg +COMPOSE_START=${TEMPDIR}/compose-start-${IMAGE_KEY}.json +COMPOSE_INFO=${TEMPDIR}/compose-info-${IMAGE_KEY}.json +BOOT_ARGS="uefi" + +# SSH setup. +SSH_OPTIONS=(-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ConnectTimeout=5) +SSH_KEY=key/ostree_key + +# Get the compose log. +get_compose_log () { + COMPOSE_ID=$1 + LOG_FILE=osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.log + + # Download the logs. + sudo composer-cli compose log "$COMPOSE_ID" | tee "$LOG_FILE" > /dev/null +} + +# Get the compose metadata. +get_compose_metadata () { + COMPOSE_ID=$1 + METADATA_FILE=osbuild-${ID}-${VERSION_ID}-${COMPOSE_ID}.json + + # Download the metadata. + sudo composer-cli compose metadata "$COMPOSE_ID" > /dev/null + + # Find the tarball and extract it. + TARBALL=$(basename "$(find . -maxdepth 1 -type f -name "*-metadata.tar")") + sudo tar -xf "$TARBALL" -C "${TEMPDIR}" + sudo rm -f "$TARBALL" + + # Move the JSON file into place. + sudo cat "${TEMPDIR}"/"${COMPOSE_ID}".json | jq -M '.' | tee "$METADATA_FILE" > /dev/null +} + +# Build ostree image. +build_image() { + blueprint_file=$1 + blueprint_name=$2 + + # Prepare the blueprint for the compose. + greenprint "๐Ÿ“‹ Preparing blueprint" + sudo composer-cli blueprints push "$blueprint_file" + sudo composer-cli blueprints depsolve "$blueprint_name" + + # Get worker unit file so we can watch the journal. + WORKER_UNIT=$(sudo systemctl list-units | grep -o -E "osbuild.*worker.*\.service") + sudo journalctl -af -n 1 -u "${WORKER_UNIT}" & + WORKER_JOURNAL_PID=$! + + # Start the compose. + greenprint "๐Ÿš€ Starting compose" + if [[ $blueprint_name == upgrade ]]; then + # composer-cli in Fedora 32 has a different start-ostree arguments + # see https://github.com/weldr/lorax/pull/1051 + sudo composer-cli --json compose start-ostree --ref "$OSTREE_REF" "$blueprint_name" $IMAGE_TYPE | tee "$COMPOSE_START" + else + sudo composer-cli --json compose start "$blueprint_name" $IMAGE_TYPE | tee "$COMPOSE_START" + fi + + COMPOSE_ID=$(jq -r '.[0].body.build_id' "$COMPOSE_START") + + # Wait for the compose to finish. + greenprint "โฑ Waiting for compose to finish: ${COMPOSE_ID}" + while true; do + sudo composer-cli --json compose info "${COMPOSE_ID}" | tee "$COMPOSE_INFO" > /dev/null + + COMPOSE_STATUS=$(jq -r '.[0].body.queue_status' "$COMPOSE_INFO") + + # Is the compose finished? + if [[ $COMPOSE_STATUS != RUNNING ]] && [[ $COMPOSE_STATUS != WAITING ]]; then + break + fi + + # Wait 30 seconds and try again. + sleep 5 + done + + # Capture the compose logs from osbuild. + greenprint "๐Ÿ’ฌ Getting compose log and metadata" + get_compose_log "$COMPOSE_ID" + get_compose_metadata "$COMPOSE_ID" + + # Did the compose finish with success? + if [[ $COMPOSE_STATUS != FINISHED ]]; then + echo "Something went wrong with the compose. ๐Ÿ˜ข" + exit 1 + fi + + # Stop watching the worker journal. + sudo pkill -P ${WORKER_JOURNAL_PID} +} + +# Wait for the ssh server up to be. +wait_for_ssh_up () { + SSH_STATUS=$(sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" "${SSH_USER}@${1}" '/bin/bash -c "echo -n READY"') + if [[ $SSH_STATUS == READY ]]; then + echo 1 + else + echo 0 + fi +} + +# Clean up our mess. +clean_up () { + greenprint "๐Ÿงผ Cleaning up" + sudo virsh destroy "${IMAGE_KEY}" + sudo virsh undefine "${IMAGE_KEY}" --nvram + # Remove qcow2 file. + sudo virsh vol-delete --pool images "${IMAGE_KEY}.qcow2" + # Remove "remote" repo. + sudo rm -rf "${HTTPD_PATH}"/{httpboot,repo,compose.json,ks.cfg} + # Remomve tmp dir. + sudo rm -rf "$TEMPDIR" + # Stop httpd + sudo systemctl disable httpd --now +} + +# Test result checking +check_result () { + greenprint "Checking for test result" + if [[ $RESULTS == 1 ]]; then + greenprint "๐Ÿ’š Success" + else + greenprint "โŒ Failed" + clean_up + exit 1 + fi +} + +################################################## +## +## ostree image/commit installation +## +################################################## + +# Write a blueprint for ostree image. +tee "$BLUEPRINT_FILE" > /dev/null << EOF +name = "ostree" +description = "A base ostree image" +version = "0.0.1" +modules = [] +groups = [] + +[[packages]] +name = "python3" +version = "*" + +[[packages]] +name = "sssd" +version = "*" +EOF + +# Build installation image. +build_image "$BLUEPRINT_FILE" ostree + +# Download the image and extract tar into web server root folder. +greenprint "๐Ÿ“ฅ Downloading and extracting the image" +sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null +IMAGE_FILENAME="${COMPOSE_ID}-commit.tar" +sudo tar -xf "${IMAGE_FILENAME}" -C ${HTTPD_PATH} +sudo rm -f "$IMAGE_FILENAME" + +# Clean compose and blueprints. +greenprint "Clean up osbuild-composer" +sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null +sudo composer-cli blueprints delete ostree > /dev/null + +# Ensure SELinux is happy with our new images. +greenprint "๐Ÿ‘ฟ Running restorecon on image directory" +sudo restorecon -Rv /var/lib/libvirt/images/ + +# Create qcow2 file for virt install. +greenprint "Create qcow2 file for virt install" +LIBVIRT_IMAGE_PATH=/var/lib/libvirt/images/${IMAGE_KEY}.qcow2 +sudo qemu-img create -f qcow2 "${LIBVIRT_IMAGE_PATH}" 20G + +# Write kickstart file for ostree image installation. +greenprint "Generate kickstart file" +sudo tee "$KS_FILE" > /dev/null << STOPHERE +text +rootpw --lock --iscrypted locked +network --bootproto=dhcp --device=link --activate --onboot=on +user --name=${SSH_USER} --groups=wheel --iscrypted --password=\$6\$1LgwKw9aOoAi/Zy9\$Pn3ErY1E8/yEanJ98evqKEW.DZp24HTuqXPJl6GYCm8uuobAmwxLv7rGCvTRZhxtcYdmC0.XnYRSR9Sh6de3p0 +sshkey --username=${SSH_USER} "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCzxo5dEcS+LDK/OFAfHo6740EyoDM8aYaCkBala0FnWfMMTOq7PQe04ahB0eFLS3IlQtK5bpgzxBdFGVqF6uT5z4hhaPjQec0G3+BD5Pxo6V+SxShKZo+ZNGU3HVrF9p2V7QH0YFQj5B8F6AicA3fYh2BVUFECTPuMpy5A52ufWu0r4xOFmbU7SIhRQRAQz2u4yjXqBsrpYptAvyzzoN4gjUhNnwOHSPsvFpWoBFkWmqn0ytgHg3Vv9DlHW+45P02QH1UFedXR2MqLnwRI30qqtaOkVS+9rE/dhnR+XPpHHG+hv2TgMDAuQ3IK7Ab5m/yCbN73cxFifH4LST0vVG3Jx45xn+GTeHHhfkAfBSCtya6191jixbqyovpRunCBKexI5cfRPtWOitM3m7Mq26r7LpobMM+oOLUm4p0KKNIthWcmK9tYwXWSuGGfUQ+Y8gt7E0G06ZGbCPHOrxJ8lYQqXsif04piONPA/c9Hq43O99KPNGShONCS9oPFdOLRT3U= ostree-image-test" +zerombr +clearpart --all --initlabel --disklabel=msdos +autopart --nohome --noswap --type=plain +ostreesetup --nogpg --osname=${OS_NAME} --remote=${OS_NAME} --url=${PROD_REPO_URL} --ref=${OSTREE_REF} +poweroff + +%post --log=/var/log/anaconda/post-install.log --erroronfail +# no sudo password for SSH user +echo -e '${SSH_USER}\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers +%end +STOPHERE + +# Workaround bug https://bugzilla.redhat.com/show_bug.cgi?id=2213388 +if [[ "${VERSION_ID}" == "39" ]]; then + sudo systemctl restart libvirtd +fi + +# Install ostree image via anaconda. +greenprint "Install ostree image via anaconda" +sudo virt-install --name="${IMAGE_KEY}"\ + --initrd-inject="${KS_FILE}" \ + --extra-args="inst.ks=file:/ks.cfg console=ttyS0,115200" \ + --disk path="${LIBVIRT_IMAGE_PATH}",format=qcow2 \ + --ram 3072 \ + --vcpus 2 \ + --network network=integration,mac=34:49:22:B0:83:30 \ + --os-variant ${OS_VARIANT} \ + --boot ${BOOT_ARGS} \ + --location "${BOOT_LOCATION}" \ + --nographics \ + --noautoconsole \ + --wait=-1 \ + --noreboot + +# Start VM. +greenprint "Start VM" +sudo virsh start "${IMAGE_KEY}" + +# Check for ssh ready to go. +greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" +for _ in $(seq 0 30); do + RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)" + if [[ $RESULTS == 1 ]]; then + echo "SSH is ready now! ๐Ÿฅณ" + break + fi + sleep 10 +done + +# Reboot one more time to make /sysroot as RO by new ostree-libs-2022.6-3.el9.x86_64 +sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" "${SSH_USER}@${GUEST_ADDRESS}" 'nohup sudo systemctl reboot &>/dev/null & exit' +# Sleep 10 seconds here to make sure vm restarted already +sleep 10 + +# Check for ssh ready to go. +greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" +for _ in $(seq 0 30); do + RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)" + if [[ $RESULTS == 1 ]]; then + echo "SSH is ready now! ๐Ÿฅณ" + break + fi + sleep 10 +done + +# Get ostree commit value. +greenprint "Get ostree image commit value" +INSTALL_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}") + +# Add instance IP address into /etc/ansible/hosts +tee "${TEMPDIR}"/inventory > /dev/null << EOF +[ostree_guest] +${GUEST_ADDRESS} +[ostree_guest:vars] +ansible_python_interpreter=/usr/bin/python3 +ansible_user=${SSH_USER} +ansible_private_key_file=${SSH_KEY} +ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" +EOF + +# Test IoT/Edge OS +ansible-playbook -v -i "${TEMPDIR}/inventory" -e os_name="${OS_NAME}" -e ostree_commit="${INSTALL_HASH}" -e ostree_ref="${OS_NAME}:${OSTREE_REF}" check-ostree.yaml || RESULTS=0 + +# Check image installation result +check_result + +################################################## +## +## ostree image/commit upgrade +## +################################################## + +# Write a blueprint for ostree image. +tee "$BLUEPRINT_FILE" > /dev/null << EOF +name = "upgrade" +description = "An upgrade ostree image" +version = "0.0.2" +modules = [] +groups = [] + +[[packages]] +name = "python3" +version = "*" + +[[packages]] +name = "sssd" +version = "*" + +[[packages]] +name = "wget" +version = "*" +EOF + +# Build upgrade image. +build_image "$BLUEPRINT_FILE" upgrade + +# Download the image and extract tar into web server root folder. +greenprint "๐Ÿ“ฅ Downloading and extracting the image" +sudo composer-cli compose image "${COMPOSE_ID}" > /dev/null +IMAGE_FILENAME="${COMPOSE_ID}-commit.tar" +UPGRADE_PATH="$(pwd)/upgrade" +mkdir -p "$UPGRADE_PATH" +sudo tar -xf "$IMAGE_FILENAME" -C "$UPGRADE_PATH" +sudo rm -f "$IMAGE_FILENAME" + +# Clean compose and blueprints. +greenprint "Clean up osbuild-composer again" +sudo composer-cli compose delete "${COMPOSE_ID}" > /dev/null +sudo composer-cli blueprints delete upgrade > /dev/null + +# Introduce new ostree commit into repo. +greenprint "Introduce new ostree commit into repo" +sudo ostree pull-local --repo "${HTTPD_PATH}/repo" "${UPGRADE_PATH}/repo" "$OSTREE_REF" +# sudo ostree --repo="${HTTPD_PATH}/repo" static-delta generate "$OSTREE_REF" +sudo ostree summary --update --repo "${HTTPD_PATH}/repo" + +# Ensure SELinux is happy with all objects files. +greenprint "๐Ÿ‘ฟ Running restorecon on web server root folder" +sudo restorecon -Rv "${HTTPD_PATH}/repo" > /dev/null + +# Get ostree commit value. +greenprint "Get ostree image commit value" +UPGRADE_HASH=$(curl "${PROD_REPO_URL}/refs/heads/${OSTREE_REF}") + +# Remove upgrade repo +sudo rm -rf "$UPGRADE_PATH" + +# Upgrade image/commit. +greenprint "Upgrade ostree image/commit" +sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" "${SSH_USER}@${GUEST_ADDRESS}" 'sudo rpm-ostree upgrade' +sudo ssh "${SSH_OPTIONS[@]}" -i "${SSH_KEY}" "${SSH_USER}@${GUEST_ADDRESS}" 'nohup sudo systemctl reboot &>/dev/null & exit' + +# Sleep 10 seconds here to make sure vm restarted already +sleep 10 + +# Check for ssh ready to go. +greenprint "๐Ÿ›ƒ Checking for SSH is ready to go" +for _ in $(seq 0 30); do + RESULTS="$(wait_for_ssh_up $GUEST_ADDRESS)" + if [[ $RESULTS == 1 ]]; then + echo "SSH is ready now! ๐Ÿฅณ" + break + fi + sleep 10 +done + +# Check ostree upgrade result +check_result + +# Add instance IP address into /etc/ansible/hosts +tee "${TEMPDIR}"/inventory > /dev/null << EOF +[ostree_guest] +${GUEST_ADDRESS} +[ostree_guest:vars] +ansible_python_interpreter=/usr/bin/python3 +ansible_user=${SSH_USER} +ansible_private_key_file=${SSH_KEY} +ansible_ssh_common_args="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null" +EOF + +# Test IoT/Edge OS +ansible-playbook -v -i "${TEMPDIR}/inventory" -e os_name="${OS_NAME}" -e ostree_commit="${UPGRADE_HASH}" -e ostree_ref="${OS_NAME}:${OSTREE_REF}" check-ostree.yaml || RESULTS=0 +check_result + +# Final success clean up +clean_up + +exit 0 diff --git a/test/key/ostree_key b/test/key/ostree_key new file mode 100644 index 00000000..716fd87c --- /dev/null +++ b/test/key/ostree_key @@ -0,0 +1,38 @@ +-----BEGIN OPENSSH PRIVATE KEY----- +b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn +NhAAAAAwEAAQAAAYEAs8aOXRHEviwyvzhQHx6Ou+NBMqAzPGmGgpAWpWtBZ1nzDEzquz0H +tOGoQdHhS0tyJULSuW6YM8QXRRlaherk+c+IYWj40HnNBt/gQ+T8aOlfksUoSmaPmTRlNx +1axfadle0B9GBUI+QfBegInAN32IdgVVBRAkz7jKcuQOdrn1rtK+MThZm1O0iIUUEQEM9r +uMo16gbK6WKbQL8s86DeII1ITZ8Dh0j7LxaVqARZFpqp9MrYB4N1b/Q5R1vuOT9NkB9VBX +nV0djKi58ESN9KqrWjpFUvvaxP3YZ0flz6Rxxvob9k4DAwLkNyCuwG+Zv8gmze93MRYnx+ +C0k9L1RtyceOcZ/hk3hx4X5AHwUgrcmutfdY4sW6sqL6UbpwgSnsSOXH0T7VjorTN5uzKt +uq+y6aGzDPqDi1JuKdCijSLYVnJivbWMF1krhhn1EPmPILexNBtOmRmwjxzq8SfJWEKl7I +n9OKYjjTwP3PR6uNzvfSjzRkoTjQkvaDxXTi0U91AAAFiBiBlykYgZcpAAAAB3NzaC1yc2 +EAAAGBALPGjl0RxL4sMr84UB8ejrvjQTKgMzxphoKQFqVrQWdZ8wxM6rs9B7ThqEHR4UtL +ciVC0rlumDPEF0UZWoXq5PnPiGFo+NB5zQbf4EPk/GjpX5LFKEpmj5k0ZTcdWsX2nZXtAf +RgVCPkHwXoCJwDd9iHYFVQUQJM+4ynLkDna59a7SvjE4WZtTtIiFFBEBDPa7jKNeoGyuli +m0C/LPOg3iCNSE2fA4dI+y8WlagEWRaaqfTK2AeDdW/0OUdb7jk/TZAfVQV51dHYyoufBE +jfSqq1o6RVL72sT92GdH5c+kccb6G/ZOAwMC5DcgrsBvmb/IJs3vdzEWJ8fgtJPS9UbcnH +jnGf4ZN4ceF+QB8FIK3JrrX3WOLFurKi+lG6cIEp7Ejlx9E+1Y6K0zebsyrbqvsumhswz6 +g4tSbinQoo0i2FZyYr21jBdZK4YZ9RD5jyC3sTQbTpkZsI8c6vEnyVhCpeyJ/TimI408D9 +z0erjc730o80ZKE40JL2g8V04tFPdQAAAAMBAAEAAAGBAJIAmtQ5PwiXyqsD6AYuAgvTt7 +qO4q2YojZdIRc9MUPniH2f5i8klKKxdb3m30sQPebHC26vxAqeoatruNnz9/xuMLuzzgc6 +NGn13iQlz1zA0+7WEi/CdbMeG2mUfIk0Da2aa7D1nr/7X7qjRIK4SlffMjx3WyM8NDt59x +WdHQmxhdbTt6IUQFyiPpuG9K5CVqEgEIM8+wRqId6GpNJD/sJ/G452qx3vBpiqheaLiXLT +L15wctw/RlwjA3XR0npJzq6g066BMKYAnyT5wiCWisVFKxIudT0dphj4qmz74yC967U6ji +AB9hZ8j9OhBDA/pypXbb781Lo4iBqM6auoZqbieOE+v9v6uDozmfxtQO5y2kFP7mBMsGwG +L8oEfEPqWRTIXgvDVuBwoqdsmYzFP8SiyUDkOfcHcK924FzvyJ2LWlpNp9POXYdjTDm/oB +k1xs9UkhCImavqUnKiAplLnzMNuYbLmofoesI/2LnuYc2BOx9zub3pru6AdGi6N2EWzQAA +AMEAjbtZe+6sW5yepxKOEb0wOAmZhRGL7d50fuPuJYsljU+nQaI7NMAAZ+G3kAiaTd8npb +A5MKZ2oW++YXjJNAD+Bifnz8LojjmCqCuJL52+VNwpordW23XxRNQdoEvdN516qLyMI4i7 +i1AxNbU73SUrSCkSb1ngrhiHHQz986VciRU4X13ENbUSzPYInLoP9wTt+5CUtgiQnxe5PF +K125TVwnFaDMPUKHMhKFIkMJuAkCSKQT7n11wwO2uH9k48LxW6AAAAwQDelZf6e+Un0s1A +jLTG+r8VLG2kClXtECrRQjlzwMfc+lKOB00jBEdBLgIg3h2ECPOqh3OD9S0SU2Ja/+zb0r +wrkyzWdndhh0IOEJCqzdlJe9JBJEWwQTr9MH9s1ORyIA1XGp5GPMFIZhT393Zkichzfyoz +aACW+glGfsw27THJvI5PGJkPiuzKvwGixRcBpf72bk/30Q/qkekErdxtT3Kea41X9QOYjb +jwrWKHARpSmLP1dJrOmYh8HWzpAKghIX8AAADBAM7DunVgA++cHvG/8B5Nodb/S0D7MvOb +OtaHMfUdIIiczwOEvyoRsPyAMEGMtAMHy2YIGQYsK6CZEYP7x3sOmDOocmwjcMpjywN0b/ +g895R16d19MDzUU/SnfUsQgbEXV9KxBGa9mDiyoEiP/QduQU/YlJdQjQXvYjrTRzV6AHQo +PCE/JIQfRcvypKQU1XOdLhSIFDbvAcVgvULwe08robTn2ooR/on4+MHOE0q9RyA4lKS7CQ +77li4GQONWrqyhCwAAABFvc3RyZWUtaW1hZ2UtdGVzdA== +-----END OPENSSH PRIVATE KEY----- diff --git a/test/key/ostree_key.pub b/test/key/ostree_key.pub new file mode 100644 index 00000000..184fb50b --- /dev/null +++ b/test/key/ostree_key.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCzxo5dEcS+LDK/OFAfHo6740EyoDM8aYaCkBala0FnWfMMTOq7PQe04ahB0eFLS3IlQtK5bpgzxBdFGVqF6uT5z4hhaPjQec0G3+BD5Pxo6V+SxShKZo+ZNGU3HVrF9p2V7QH0YFQj5B8F6AicA3fYh2BVUFECTPuMpy5A52ufWu0r4xOFmbU7SIhRQRAQz2u4yjXqBsrpYptAvyzzoN4gjUhNnwOHSPsvFpWoBFkWmqn0ytgHg3Vv9DlHW+45P02QH1UFedXR2MqLnwRI30qqtaOkVS+9rE/dhnR+XPpHHG+hv2TgMDAuQ3IK7Ab5m/yCbN73cxFifH4LST0vVG3Jx45xn+GTeHHhfkAfBSCtya6191jixbqyovpRunCBKexI5cfRPtWOitM3m7Mq26r7LpobMM+oOLUm4p0KKNIthWcmK9tYwXWSuGGfUQ+Y8gt7E0G06ZGbCPHOrxJ8lYQqXsif04piONPA/c9Hq43O99KPNGShONCS9oPFdOLRT3U= ostree-image-test