From 394dc73b2e3cfbf9ce685c01a8ebdc9e148e2b09 Mon Sep 17 00:00:00 2001 From: "Petr \"Stone\" Hracek" Date: Thu, 8 Aug 2024 11:04:38 +0200 Subject: [PATCH 01/15] Move version 20 -> 22 Signed-off-by: Petr "Stone" Hracek --- {20 => 22}/.exclude-c8s | 0 {20 => 22}/Dockerfile.c8s | 0 {20 => 22}/Dockerfile.c9s | 0 {20 => 22}/Dockerfile.fedora | 0 {20 => 22}/Dockerfile.rhel8 | 0 {20 => 22}/Dockerfile.rhel9 | 0 {20 => 22}/README.md | 0 {20 => 22}/root/opt/app-root/etc/generate_container_user | 0 {20 => 22}/root/opt/app-root/etc/npm_global_module_list | 0 {20 => 22}/root/opt/app-root/etc/scl_enable | 0 {20 => 22}/s2i/bin/assemble | 0 {20 => 22}/s2i/bin/run | 0 {20 => 22}/s2i/bin/save-artifacts | 0 {20 => 22}/s2i/bin/usage | 0 {20 => 22}/test | 0 15 files changed, 0 insertions(+), 0 deletions(-) rename {20 => 22}/.exclude-c8s (100%) rename {20 => 22}/Dockerfile.c8s (100%) rename {20 => 22}/Dockerfile.c9s (100%) rename {20 => 22}/Dockerfile.fedora (100%) rename {20 => 22}/Dockerfile.rhel8 (100%) rename {20 => 22}/Dockerfile.rhel9 (100%) rename {20 => 22}/README.md (100%) rename {20 => 22}/root/opt/app-root/etc/generate_container_user (100%) rename {20 => 22}/root/opt/app-root/etc/npm_global_module_list (100%) rename {20 => 22}/root/opt/app-root/etc/scl_enable (100%) rename {20 => 22}/s2i/bin/assemble (100%) rename {20 => 22}/s2i/bin/run (100%) rename {20 => 22}/s2i/bin/save-artifacts (100%) rename {20 => 22}/s2i/bin/usage (100%) rename {20 => 22}/test (100%) diff --git a/20/.exclude-c8s b/22/.exclude-c8s similarity index 100% rename from 20/.exclude-c8s rename to 22/.exclude-c8s diff --git a/20/Dockerfile.c8s b/22/Dockerfile.c8s similarity index 100% rename from 20/Dockerfile.c8s rename to 22/Dockerfile.c8s diff --git a/20/Dockerfile.c9s b/22/Dockerfile.c9s similarity index 100% rename from 20/Dockerfile.c9s rename to 22/Dockerfile.c9s diff --git a/20/Dockerfile.fedora b/22/Dockerfile.fedora similarity index 100% rename from 20/Dockerfile.fedora rename to 22/Dockerfile.fedora diff --git a/20/Dockerfile.rhel8 b/22/Dockerfile.rhel8 similarity index 100% rename from 20/Dockerfile.rhel8 rename to 22/Dockerfile.rhel8 diff --git a/20/Dockerfile.rhel9 b/22/Dockerfile.rhel9 similarity index 100% rename from 20/Dockerfile.rhel9 rename to 22/Dockerfile.rhel9 diff --git a/20/README.md b/22/README.md similarity index 100% rename from 20/README.md rename to 22/README.md diff --git a/20/root/opt/app-root/etc/generate_container_user b/22/root/opt/app-root/etc/generate_container_user similarity index 100% rename from 20/root/opt/app-root/etc/generate_container_user rename to 22/root/opt/app-root/etc/generate_container_user diff --git a/20/root/opt/app-root/etc/npm_global_module_list b/22/root/opt/app-root/etc/npm_global_module_list similarity index 100% rename from 20/root/opt/app-root/etc/npm_global_module_list rename to 22/root/opt/app-root/etc/npm_global_module_list diff --git a/20/root/opt/app-root/etc/scl_enable b/22/root/opt/app-root/etc/scl_enable similarity index 100% rename from 20/root/opt/app-root/etc/scl_enable rename to 22/root/opt/app-root/etc/scl_enable diff --git a/20/s2i/bin/assemble b/22/s2i/bin/assemble similarity index 100% rename from 20/s2i/bin/assemble rename to 22/s2i/bin/assemble diff --git a/20/s2i/bin/run b/22/s2i/bin/run similarity index 100% rename from 20/s2i/bin/run rename to 22/s2i/bin/run diff --git a/20/s2i/bin/save-artifacts b/22/s2i/bin/save-artifacts similarity index 100% rename from 20/s2i/bin/save-artifacts rename to 22/s2i/bin/save-artifacts diff --git a/20/s2i/bin/usage b/22/s2i/bin/usage similarity index 100% rename from 20/s2i/bin/usage rename to 22/s2i/bin/usage diff --git a/20/test b/22/test similarity index 100% rename from 20/test rename to 22/test From bfd7ffd93fec4cb6e3d4a34a34760cfe407763c8 Mon Sep 17 00:00:00 2001 From: "Petr \"Stone\" Hracek" Date: Thu, 8 Aug 2024 11:04:58 +0200 Subject: [PATCH 02/15] Move 20-minimal to 22-minimal Signed-off-by: Petr "Stone" Hracek --- {20-minimal => 22-minimal}/.exclude-c8s | 0 {20-minimal => 22-minimal}/Dockerfile.c8s | 0 {20-minimal => 22-minimal}/Dockerfile.c9s | 0 {20-minimal => 22-minimal}/Dockerfile.fedora | 0 {20-minimal => 22-minimal}/Dockerfile.rhel8 | 0 {20-minimal => 22-minimal}/Dockerfile.rhel9 | 0 {20-minimal => 22-minimal}/README.md | 0 {20-minimal => 22-minimal}/root/usr/bin/fix-permissions | 0 {20-minimal => 22-minimal}/s2i/bin/assemble | 0 {20-minimal => 22-minimal}/s2i/bin/run | 0 {20-minimal => 22-minimal}/s2i/bin/save-artifacts | 0 {20-minimal => 22-minimal}/s2i/bin/usage | 0 {20-minimal => 22-minimal}/test/check_imagestreams.py | 0 {20-minimal => 22-minimal}/test/examples | 0 {20-minimal => 22-minimal}/test/imagestreams | 0 {20-minimal => 22-minimal}/test/run | 0 {20-minimal => 22-minimal}/test/run-openshift | 0 {20-minimal => 22-minimal}/test/run-openshift-remote-cluster | 0 {20-minimal => 22-minimal}/test/test-app | 0 {20-minimal => 22-minimal}/test/test-binary | 0 {20-minimal => 22-minimal}/test/test-express-webapp | 0 {20-minimal => 22-minimal}/test/test-hw | 0 {20-minimal => 22-minimal}/test/test-incremental | 0 {20-minimal => 22-minimal}/test/test-lib-nodejs.sh | 0 {20-minimal => 22-minimal}/test/test-lib-openshift.sh | 0 {20-minimal => 22-minimal}/test/test-lib-remote-openshift.sh | 0 {20-minimal => 22-minimal}/test/test-lib.sh | 0 {20-minimal => 22-minimal}/test/test-openshift.yaml | 0 28 files changed, 0 insertions(+), 0 deletions(-) rename {20-minimal => 22-minimal}/.exclude-c8s (100%) rename {20-minimal => 22-minimal}/Dockerfile.c8s (100%) rename {20-minimal => 22-minimal}/Dockerfile.c9s (100%) rename {20-minimal => 22-minimal}/Dockerfile.fedora (100%) rename {20-minimal => 22-minimal}/Dockerfile.rhel8 (100%) rename {20-minimal => 22-minimal}/Dockerfile.rhel9 (100%) rename {20-minimal => 22-minimal}/README.md (100%) rename {20-minimal => 22-minimal}/root/usr/bin/fix-permissions (100%) rename {20-minimal => 22-minimal}/s2i/bin/assemble (100%) rename {20-minimal => 22-minimal}/s2i/bin/run (100%) rename {20-minimal => 22-minimal}/s2i/bin/save-artifacts (100%) rename {20-minimal => 22-minimal}/s2i/bin/usage (100%) rename {20-minimal => 22-minimal}/test/check_imagestreams.py (100%) rename {20-minimal => 22-minimal}/test/examples (100%) rename {20-minimal => 22-minimal}/test/imagestreams (100%) rename {20-minimal => 22-minimal}/test/run (100%) rename {20-minimal => 22-minimal}/test/run-openshift (100%) rename {20-minimal => 22-minimal}/test/run-openshift-remote-cluster (100%) rename {20-minimal => 22-minimal}/test/test-app (100%) rename {20-minimal => 22-minimal}/test/test-binary (100%) rename {20-minimal => 22-minimal}/test/test-express-webapp (100%) rename {20-minimal => 22-minimal}/test/test-hw (100%) rename {20-minimal => 22-minimal}/test/test-incremental (100%) rename {20-minimal => 22-minimal}/test/test-lib-nodejs.sh (100%) rename {20-minimal => 22-minimal}/test/test-lib-openshift.sh (100%) rename {20-minimal => 22-minimal}/test/test-lib-remote-openshift.sh (100%) rename {20-minimal => 22-minimal}/test/test-lib.sh (100%) rename {20-minimal => 22-minimal}/test/test-openshift.yaml (100%) diff --git a/20-minimal/.exclude-c8s b/22-minimal/.exclude-c8s similarity index 100% rename from 20-minimal/.exclude-c8s rename to 22-minimal/.exclude-c8s diff --git a/20-minimal/Dockerfile.c8s b/22-minimal/Dockerfile.c8s similarity index 100% rename from 20-minimal/Dockerfile.c8s rename to 22-minimal/Dockerfile.c8s diff --git a/20-minimal/Dockerfile.c9s b/22-minimal/Dockerfile.c9s similarity index 100% rename from 20-minimal/Dockerfile.c9s rename to 22-minimal/Dockerfile.c9s diff --git a/20-minimal/Dockerfile.fedora b/22-minimal/Dockerfile.fedora similarity index 100% rename from 20-minimal/Dockerfile.fedora rename to 22-minimal/Dockerfile.fedora diff --git a/20-minimal/Dockerfile.rhel8 b/22-minimal/Dockerfile.rhel8 similarity index 100% rename from 20-minimal/Dockerfile.rhel8 rename to 22-minimal/Dockerfile.rhel8 diff --git a/20-minimal/Dockerfile.rhel9 b/22-minimal/Dockerfile.rhel9 similarity index 100% rename from 20-minimal/Dockerfile.rhel9 rename to 22-minimal/Dockerfile.rhel9 diff --git a/20-minimal/README.md b/22-minimal/README.md similarity index 100% rename from 20-minimal/README.md rename to 22-minimal/README.md diff --git a/20-minimal/root/usr/bin/fix-permissions b/22-minimal/root/usr/bin/fix-permissions similarity index 100% rename from 20-minimal/root/usr/bin/fix-permissions rename to 22-minimal/root/usr/bin/fix-permissions diff --git a/20-minimal/s2i/bin/assemble b/22-minimal/s2i/bin/assemble similarity index 100% rename from 20-minimal/s2i/bin/assemble rename to 22-minimal/s2i/bin/assemble diff --git a/20-minimal/s2i/bin/run b/22-minimal/s2i/bin/run similarity index 100% rename from 20-minimal/s2i/bin/run rename to 22-minimal/s2i/bin/run diff --git a/20-minimal/s2i/bin/save-artifacts b/22-minimal/s2i/bin/save-artifacts similarity index 100% rename from 20-minimal/s2i/bin/save-artifacts rename to 22-minimal/s2i/bin/save-artifacts diff --git a/20-minimal/s2i/bin/usage b/22-minimal/s2i/bin/usage similarity index 100% rename from 20-minimal/s2i/bin/usage rename to 22-minimal/s2i/bin/usage diff --git a/20-minimal/test/check_imagestreams.py b/22-minimal/test/check_imagestreams.py similarity index 100% rename from 20-minimal/test/check_imagestreams.py rename to 22-minimal/test/check_imagestreams.py diff --git a/20-minimal/test/examples b/22-minimal/test/examples similarity index 100% rename from 20-minimal/test/examples rename to 22-minimal/test/examples diff --git a/20-minimal/test/imagestreams b/22-minimal/test/imagestreams similarity index 100% rename from 20-minimal/test/imagestreams rename to 22-minimal/test/imagestreams diff --git a/20-minimal/test/run b/22-minimal/test/run similarity index 100% rename from 20-minimal/test/run rename to 22-minimal/test/run diff --git a/20-minimal/test/run-openshift b/22-minimal/test/run-openshift similarity index 100% rename from 20-minimal/test/run-openshift rename to 22-minimal/test/run-openshift diff --git a/20-minimal/test/run-openshift-remote-cluster b/22-minimal/test/run-openshift-remote-cluster similarity index 100% rename from 20-minimal/test/run-openshift-remote-cluster rename to 22-minimal/test/run-openshift-remote-cluster diff --git a/20-minimal/test/test-app b/22-minimal/test/test-app similarity index 100% rename from 20-minimal/test/test-app rename to 22-minimal/test/test-app diff --git a/20-minimal/test/test-binary b/22-minimal/test/test-binary similarity index 100% rename from 20-minimal/test/test-binary rename to 22-minimal/test/test-binary diff --git a/20-minimal/test/test-express-webapp b/22-minimal/test/test-express-webapp similarity index 100% rename from 20-minimal/test/test-express-webapp rename to 22-minimal/test/test-express-webapp diff --git a/20-minimal/test/test-hw b/22-minimal/test/test-hw similarity index 100% rename from 20-minimal/test/test-hw rename to 22-minimal/test/test-hw diff --git a/20-minimal/test/test-incremental b/22-minimal/test/test-incremental similarity index 100% rename from 20-minimal/test/test-incremental rename to 22-minimal/test/test-incremental diff --git a/20-minimal/test/test-lib-nodejs.sh b/22-minimal/test/test-lib-nodejs.sh similarity index 100% rename from 20-minimal/test/test-lib-nodejs.sh rename to 22-minimal/test/test-lib-nodejs.sh diff --git a/20-minimal/test/test-lib-openshift.sh b/22-minimal/test/test-lib-openshift.sh similarity index 100% rename from 20-minimal/test/test-lib-openshift.sh rename to 22-minimal/test/test-lib-openshift.sh diff --git a/20-minimal/test/test-lib-remote-openshift.sh b/22-minimal/test/test-lib-remote-openshift.sh similarity index 100% rename from 20-minimal/test/test-lib-remote-openshift.sh rename to 22-minimal/test/test-lib-remote-openshift.sh diff --git a/20-minimal/test/test-lib.sh b/22-minimal/test/test-lib.sh similarity index 100% rename from 20-minimal/test/test-lib.sh rename to 22-minimal/test/test-lib.sh diff --git a/20-minimal/test/test-openshift.yaml b/22-minimal/test/test-openshift.yaml similarity index 100% rename from 20-minimal/test/test-openshift.yaml rename to 22-minimal/test/test-openshift.yaml From 5fcaae06b3f87cd517e2ca5a4d3b39a330e405c2 Mon Sep 17 00:00:00 2001 From: "Petr \"Stone\" Hracek" Date: Thu, 8 Aug 2024 11:05:25 +0200 Subject: [PATCH 03/15] Copy version 22 back to 20 Signed-off-by: Petr "Stone" Hracek --- 20/.exclude-c8s | 0 20/Dockerfile.c8s | 79 + 20/Dockerfile.c9s | 79 + 20/Dockerfile.fedora | 75 + 20/Dockerfile.rhel8 | 79 + 20/Dockerfile.rhel9 | 78 + 20/README.md | 253 +++ .../opt/app-root/etc/generate_container_user | 17 + .../opt/app-root/etc/npm_global_module_list | 5 + 20/root/opt/app-root/etc/scl_enable | 3 + 20/s2i/bin/assemble | 116 ++ 20/s2i/bin/run | 60 + 20/s2i/bin/save-artifacts | 5 + 20/s2i/bin/usage | 15 + 20/test/check_imagestreams.py | 105 ++ 20/test/examples/from-dockerfile/.gitignore | 2 + 20/test/examples/from-dockerfile/Dockerfile | 16 + .../examples/from-dockerfile/Dockerfile.s2i | 25 + 20/test/examples/from-dockerfile/README.md | 22 + 20/test/imagestreams/imagestreams.yaml | 77 + 20/test/imagestreams/nodejs-centos.json | 261 +++ 20/test/imagestreams/nodejs-rhel-aarch64.json | 261 +++ 20/test/imagestreams/nodejs-rhel.json | 261 +++ 20/test/run | 116 ++ 20/test/run-minimal | 132 ++ 20/test/run-openshift-pytest | 11 + 20/test/run-openshift-remote-cluster | 53 + 20/test/run-upstream | 67 + 20/test/show_all_imagestreams.py | 58 + 20/test/test-app/README.md | 4 + 20/test/test-app/iisnode.yml | 27 + 20/test/test-app/package.json | 32 + 20/test/test-app/server.js | 50 + 20/test/test-app/web.config | 17 + 20/test/test-binary/hw.js | 11 + 20/test/test-binary/package.json | 17 + 20/test/test-express-webapp/app.js | 41 + 20/test/test-express-webapp/bin/www | 90 + 20/test/test-express-webapp/package.json | 16 + .../public/stylesheets/style.css | 8 + 20/test/test-express-webapp/routes/index.js | 9 + 20/test/test-express-webapp/routes/users.js | 9 + 20/test/test-express-webapp/views/error.jade | 6 + 20/test/test-express-webapp/views/index.jade | 5 + 20/test/test-express-webapp/views/layout.jade | 7 + 20/test/test-hw/hw.js | 11 + 20/test/test-hw/package.json | 14 + 20/test/test-incremental/README.md | 4 + 20/test/test-incremental/iisnode.yml | 27 + 20/test/test-incremental/package.json | 33 + 20/test/test-incremental/server.js | 50 + 20/test/test-incremental/web.config | 17 + 20/test/test-lib-nodejs.sh | 608 +++++++ 20/test/test-lib-openshift.sh | 1206 +++++++++++++ 20/test/test-lib-remote-openshift.sh | 136 ++ 20/test/test-lib.sh | 1509 +++++++++++++++++ 20/test/test-openshift.yaml | 77 + 20/test/test_imagestreams_quickstart.py | 83 + 20/test/test_latest_imagestreams.py | 27 + 20/test/test_nodejs_ex_standalone.py | 37 + 20/test/test_nodejs_ex_templates.py | 79 + 20/test/test_nodejs_s2i_standalone.py | 37 + 62 files changed, 6635 insertions(+) create mode 100644 20/.exclude-c8s create mode 100644 20/Dockerfile.c8s create mode 100644 20/Dockerfile.c9s create mode 100644 20/Dockerfile.fedora create mode 100644 20/Dockerfile.rhel8 create mode 100644 20/Dockerfile.rhel9 create mode 100644 20/README.md create mode 100644 20/root/opt/app-root/etc/generate_container_user create mode 100644 20/root/opt/app-root/etc/npm_global_module_list create mode 100644 20/root/opt/app-root/etc/scl_enable create mode 100755 20/s2i/bin/assemble create mode 100755 20/s2i/bin/run create mode 100755 20/s2i/bin/save-artifacts create mode 100755 20/s2i/bin/usage create mode 100755 20/test/check_imagestreams.py create mode 100644 20/test/examples/from-dockerfile/.gitignore create mode 100644 20/test/examples/from-dockerfile/Dockerfile create mode 100644 20/test/examples/from-dockerfile/Dockerfile.s2i create mode 100644 20/test/examples/from-dockerfile/README.md create mode 100644 20/test/imagestreams/imagestreams.yaml create mode 100644 20/test/imagestreams/nodejs-centos.json create mode 100644 20/test/imagestreams/nodejs-rhel-aarch64.json create mode 100644 20/test/imagestreams/nodejs-rhel.json create mode 100755 20/test/run create mode 100755 20/test/run-minimal create mode 100755 20/test/run-openshift-pytest create mode 100755 20/test/run-openshift-remote-cluster create mode 100755 20/test/run-upstream create mode 100755 20/test/show_all_imagestreams.py create mode 100644 20/test/test-app/README.md create mode 100644 20/test/test-app/iisnode.yml create mode 100644 20/test/test-app/package.json create mode 100644 20/test/test-app/server.js create mode 100644 20/test/test-app/web.config create mode 100644 20/test/test-binary/hw.js create mode 100644 20/test/test-binary/package.json create mode 100644 20/test/test-express-webapp/app.js create mode 100755 20/test/test-express-webapp/bin/www create mode 100644 20/test/test-express-webapp/package.json create mode 100644 20/test/test-express-webapp/public/stylesheets/style.css create mode 100644 20/test/test-express-webapp/routes/index.js create mode 100644 20/test/test-express-webapp/routes/users.js create mode 100644 20/test/test-express-webapp/views/error.jade create mode 100644 20/test/test-express-webapp/views/index.jade create mode 100644 20/test/test-express-webapp/views/layout.jade create mode 100644 20/test/test-hw/hw.js create mode 100644 20/test/test-hw/package.json create mode 100644 20/test/test-incremental/README.md create mode 100644 20/test/test-incremental/iisnode.yml create mode 100644 20/test/test-incremental/package.json create mode 100644 20/test/test-incremental/server.js create mode 100644 20/test/test-incremental/web.config create mode 100644 20/test/test-lib-nodejs.sh create mode 100644 20/test/test-lib-openshift.sh create mode 100644 20/test/test-lib-remote-openshift.sh create mode 100644 20/test/test-lib.sh create mode 100644 20/test/test-openshift.yaml create mode 100644 20/test/test_imagestreams_quickstart.py create mode 100644 20/test/test_latest_imagestreams.py create mode 100644 20/test/test_nodejs_ex_standalone.py create mode 100644 20/test/test_nodejs_ex_templates.py create mode 100644 20/test/test_nodejs_s2i_standalone.py diff --git a/20/.exclude-c8s b/20/.exclude-c8s new file mode 100644 index 00000000..e69de29b diff --git a/20/Dockerfile.c8s b/20/Dockerfile.c8s new file mode 100644 index 00000000..7e9fdc2b --- /dev/null +++ b/20/Dockerfile.c8s @@ -0,0 +1,79 @@ +FROM quay.io/sclorg/s2i-core-c8s + +# This image provides a Node.JS environment you can use to run your Node.JS +# applications. + +EXPOSE 8080 + +# Add $HOME/node_modules/.bin to the $PATH, allowing user to make npm scripts +# available on the CLI without using npm's --global installation mode +# This image will be initialized with "npm run $NPM_RUN" +# See https://docs.npmjs.com/misc/scripts, and your repo's package.json +# file for possible values of NPM_RUN +# Description +# Environment: +# * $NPM_RUN - Select an alternate / custom runtime mode, defined in your package.json files' scripts section (default: npm run "start"). +# Expose ports: +# * 8080 - Unprivileged port used by nodejs application + +ENV NODEJS_VERSION=20 \ + NPM_RUN=start \ + NAME=nodejs \ + NPM_CONFIG_PREFIX=$HOME/.npm-global \ + PATH=$HOME/node_modules/.bin/:$HOME/.npm-global/bin/:$PATH \ + CNB_STACK_ID=com.redhat.stacks.c8s-nodejs-20 \ + CNB_USER_ID=1001 \ + CNB_GROUP_ID=0 + +ENV SUMMARY="Platform for building and running Node.js $NODEJS_VERSION applications" \ + DESCRIPTION="Node.js $NODEJS_VERSION available as container is a base platform for \ +building and running various Node.js $NODEJS_VERSION applications and frameworks. \ +Node.js is a platform built on Chrome's JavaScript runtime for easily building \ +fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model \ +that makes it lightweight and efficient, perfect for data-intensive real-time applications \ +that run across distributed devices." + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="Node.js $NODEJS_VERSION" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,$NAME,${NAME}${NODEJS_VERSION}" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.buildpacks.stack.id="com.redhat.stacks.c8s-nodejs-20" \ + com.redhat.dev-mode="DEV_MODE:false" \ + com.redhat.deployments-dir="${APP_ROOT}/src" \ + com.redhat.dev-mode.port="DEBUG_PORT:5858" \ + com.redhat.component="${NAME}-${NODEJS_VERSION}-container" \ + name="sclorg/$NAME-$NODEJS_VERSION-c8s" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" \ + maintainer="SoftwareCollections.org " \ + help="For more information visit https://github.com/sclorg/s2i-nodejs-container" \ + usage="s2i build quay.io/sclorg/$NAME-$NODEJS_VERSION-c8s:latest " + +RUN yum -y module enable nodejs:$NODEJS_VERSION && \ + MODULE_DEPS="make gcc gcc-c++ libatomic_ops git openssl-devel" && \ + INSTALL_PKGS="$MODULE_DEPS nodejs npm nodejs-nodemon nss_wrapper-libs which" && \ + ln -s /usr/lib/node_modules/nodemon/bin/nodemon.js /usr/bin/nodemon && \ + ln -s /usr/libexec/platform-python /usr/bin/python3 && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + node -v | grep -qe "^v$NODEJS_VERSION\." && echo "Found VERSION $NODEJS_VERSION" && \ + yum -y clean all --enablerepo='*' + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Drop the root user and make the content of /opt/app-root owned by user 1001 +RUN chown -R 1001:0 ${APP_ROOT} && chmod -R ug+rwx ${APP_ROOT} && \ + rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/20/Dockerfile.c9s b/20/Dockerfile.c9s new file mode 100644 index 00000000..224acb30 --- /dev/null +++ b/20/Dockerfile.c9s @@ -0,0 +1,79 @@ +FROM quay.io/sclorg/s2i-core-c9s:c9s + +# This image provides a Node.JS environment you can use to run your Node.JS +# applications. + +EXPOSE 8080 + +# Add $HOME/node_modules/.bin to the $PATH, allowing user to make npm scripts +# available on the CLI without using npm's --global installation mode +# This image will be initialized with "npm run $NPM_RUN" +# See https://docs.npmjs.com/misc/scripts, and your repo's package.json +# file for possible values of NPM_RUN +# Description +# Environment: +# * $NPM_RUN - Select an alternate / custom runtime mode, defined in your package.json files' scripts section (default: npm run "start"). +# Expose ports: +# * 8080 - Unprivileged port used by nodejs application + +ENV NODEJS_VERSION=20 \ + NPM_RUN=start \ + NAME=nodejs \ + NPM_CONFIG_PREFIX=$HOME/.npm-global \ + PATH=$HOME/node_modules/.bin/:$HOME/.npm-global/bin/:$PATH \ + CNB_STACK_ID=com.redhat.stacks.c9s-nodejs-20 \ + CNB_USER_ID=1001 \ + CNB_GROUP_ID=0 + +ENV SUMMARY="Platform for building and running Node.js $NODEJS_VERSION applications" \ + DESCRIPTION="Node.js $NODEJS_VERSION available as container is a base platform for \ +building and running various Node.js $NODEJS_VERSION applications and frameworks. \ +Node.js is a platform built on Chrome's JavaScript runtime for easily building \ +fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model \ +that makes it lightweight and efficient, perfect for data-intensive real-time applications \ +that run across distributed devices." + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="Node.js $NODEJS_VERSION" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,$NAME,${NAME}${NODEJS_VERSION}" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.buildpacks.stack.id="com.redhat.stacks.c9s-nodejs-20" \ + com.redhat.dev-mode="DEV_MODE:false" \ + com.redhat.deployments-dir="${APP_ROOT}/src" \ + com.redhat.dev-mode.port="DEBUG_PORT:5858" \ + com.redhat.component="${NAME}-${NODEJS_VERSION}-container" \ + name="sclorg/$NAME-$NODEJS_VERSION-c9s" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" \ + maintainer="SoftwareCollections.org " \ + help="For more information visit https://github.com/sclorg/s2i-nodejs-container" \ + usage="s2i build quay.io/sclorg/$NAME-$NODEJS_VERSION-c9s:latest " + +# Package libatomic_ops was removed +RUN yum -y module enable nodejs:$NODEJS_VERSION && \ + MODULE_DEPS="make gcc gcc-c++ git openssl-devel" && \ + INSTALL_PKGS="$MODULE_DEPS nodejs npm nodejs-nodemon nss_wrapper-libs which" && \ + ln -s /usr/lib/node_modules/nodemon/bin/nodemon.js /usr/bin/nodemon && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + node -v | grep -qe "^v$NODEJS_VERSION\." && echo "Found VERSION $NODEJS_VERSION" && \ + yum -y clean all --enablerepo='*' + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Drop the root user and make the content of /opt/app-root owned by user 1001 +RUN chown -R 1001:0 ${APP_ROOT} && chmod -R ug+rwx ${APP_ROOT} && \ + rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/20/Dockerfile.fedora b/20/Dockerfile.fedora new file mode 100644 index 00000000..b162fb10 --- /dev/null +++ b/20/Dockerfile.fedora @@ -0,0 +1,75 @@ +FROM quay.io/fedora/s2i-core:38 + +# This image provides a Node.JS environment you can use to run your Node.JS +# applications. + +EXPOSE 8080 + +# Add $HOME/node_modules/.bin to the $PATH, allowing user to make npm scripts +# available on the CLI without using npm's --global installation mode +# This image will be initialized with "npm run $NPM_RUN" +# See https://docs.npmjs.com/misc/scripts, and your repo's package.json +# file for possible values of NPM_RUN +# Description +# Environment: +# * $NPM_RUN - Select an alternate / custom runtime mode, defined in your package.json files' scripts section (default: npm run "start"). +# Expose ports: +# * 8080 - Unprivileged port used by nodejs application + +ENV NODEJS_VERSION=20 \ + NPM_RUN=start \ + NAME=nodejs \ + NPM_CONFIG_PREFIX=$HOME/.npm-global \ + PATH=$HOME/node_modules/.bin/:$HOME/.npm-global/bin/:$PATH + +ENV SUMMARY="Platform for building and running Node.js $NODEJS_VERSION applications" \ + DESCRIPTION="Node.js $NODEJS_VERSION available as container is a base platform for \ +building and running various Node.js $NODEJS_VERSION applications and frameworks. \ +Node.js is a platform built on Chrome's JavaScript runtime for easily building \ +fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model \ +that makes it lightweight and efficient, perfect for data-intensive real-time applications \ +that run across distributed devices." + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="Node.js $NODEJS_VERSION" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,$NAME,$NAME$NODEJS_VERSION" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + com.redhat.dev-mode="DEV_MODE:false" \ + com.redhat.deployments-dir="${APP_ROOT}/src" \ + com.redhat.dev-mode.port="DEBUG_PORT:5858"\ + com.redhat.component="$NAME" \ + name="fedora/$NAME-$NODEJS_VERSION" \ + version="$NODEJS_VERSION" \ + maintainer="SoftwareCollections.org " \ + help="For more information visit https://github.com/sclorg/s2i-nodejs-container" \ + usage="oc new-app $FGC/$NAME~" + +RUN MODULE_DEPS="make gcc gcc-c++ libatomic_ops git openssl-devel" && \ + INSTALL_PKGS="$MODULE_DEPS nodejs$NODEJS_VERSION nodejs-nodemon nodejs$NODEJS_VERSION-npm nss_wrapper-libs which" && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + ln -s /usr/bin/node-$NODEJS_VERSION /usr/bin/node && \ + ln -s /usr/bin/npm-$NODEJS_VERSION /usr/bin/npm && \ + ln -s /usr/bin/npx-$NODEJS_VERSION /usr/bin/npx && \ + ln -s /usr/lib/node_modules_18/nodemon /usr/lib/node_modules_$NODEJS_VERSION/nodemon && \ + node -v | grep -qe "^v$NODEJS_VERSION\." && echo "Found VERSION $NODEJS_VERSION" && \ + yum -y clean all --enablerepo='*' + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image, including help file. +COPY ./root/ / + +# Drop the root user and make the content of /opt/app-root owned by user 1001 +RUN chown -R 1001:0 ${APP_ROOT} && chmod -R ug+rwx ${APP_ROOT} && \ + rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/20/Dockerfile.rhel8 b/20/Dockerfile.rhel8 new file mode 100644 index 00000000..f13bf884 --- /dev/null +++ b/20/Dockerfile.rhel8 @@ -0,0 +1,79 @@ +FROM ubi8/s2i-core:1 + +# This image provides a Node.JS environment you can use to run your Node.JS +# applications. + +EXPOSE 8080 + +# Add $HOME/node_modules/.bin to the $PATH, allowing user to make npm scripts +# available on the CLI without using npm's --global installation mode +# This image will be initialized with "npm run $NPM_RUN" +# See https://docs.npmjs.com/misc/scripts, and your repo's package.json +# file for possible values of NPM_RUN +# Description +# Environment: +# * $NPM_RUN - Select an alternate / custom runtime mode, defined in your package.json files' scripts section (default: npm run "start"). +# Expose ports: +# * 8080 - Unprivileged port used by nodejs application + +ENV NODEJS_VERSION=20 \ + NPM_RUN=start \ + NAME=nodejs \ + NPM_CONFIG_PREFIX=$HOME/.npm-global \ + PATH=$HOME/node_modules/.bin/:$HOME/.npm-global/bin/:$PATH \ + CNB_STACK_ID=com.redhat.stacks.ubi8-nodejs-20 \ + CNB_USER_ID=1001 \ + CNB_GROUP_ID=0 + +ENV SUMMARY="Platform for building and running Node.js $NODEJS_VERSION applications" \ + DESCRIPTION="Node.js $NODEJS_VERSION available as container is a base platform for \ +building and running various Node.js $NODEJS_VERSION applications and frameworks. \ +Node.js is a platform built on Chrome's JavaScript runtime for easily building \ +fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model \ +that makes it lightweight and efficient, perfect for data-intensive real-time applications \ +that run across distributed devices." + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="Node.js $NODEJS_VERSION" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,$NAME,${NAME}${NODEJS_VERSION}" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.buildpacks.stack.id="com.redhat.stacks.ubi8-nodejs-20" \ + com.redhat.dev-mode="DEV_MODE:false" \ + com.redhat.deployments-dir="${APP_ROOT}/src" \ + com.redhat.dev-mode.port="DEBUG_PORT:5858" \ + com.redhat.component="${NAME}-${NODEJS_VERSION}-container" \ + name="ubi8/$NAME-$NODEJS_VERSION" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" \ + maintainer="SoftwareCollections.org " \ + help="For more information visit https://github.com/sclorg/s2i-nodejs-container" \ + usage="s2i build ubi8/$NAME-$NODEJS_VERSION:latest " + +RUN yum -y module enable nodejs:$NODEJS_VERSION && \ + MODULE_DEPS="make gcc gcc-c++ libatomic_ops git openssl-devel" && \ + INSTALL_PKGS="$MODULE_DEPS nodejs npm nodejs-nodemon nss_wrapper-libs which" && \ + ln -s /usr/lib/node_modules/nodemon/bin/nodemon.js /usr/bin/nodemon && \ + ln -s /usr/libexec/platform-python /usr/bin/python3 && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + node -v | grep -qe "^v$NODEJS_VERSION\." && echo "Found VERSION $NODEJS_VERSION" && \ + yum -y clean all --enablerepo='*' + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Drop the root user and make the content of /opt/app-root owned by user 1001 +RUN chown -R 1001:0 ${APP_ROOT} && chmod -R ug+rwx ${APP_ROOT} && \ + rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/20/Dockerfile.rhel9 b/20/Dockerfile.rhel9 new file mode 100644 index 00000000..6bf05d11 --- /dev/null +++ b/20/Dockerfile.rhel9 @@ -0,0 +1,78 @@ +FROM ubi9/s2i-core:1 + +# This image provides a Node.JS environment you can use to run your Node.JS +# applications. + +EXPOSE 8080 + +# Add $HOME/node_modules/.bin to the $PATH, allowing user to make npm scripts +# available on the CLI without using npm's --global installation mode +# This image will be initialized with "npm run $NPM_RUN" +# See https://docs.npmjs.com/misc/scripts, and your repo's package.json +# file for possible values of NPM_RUN +# Description +# Environment: +# * $NPM_RUN - Select an alternate / custom runtime mode, defined in your package.json files' scripts section (default: npm run "start"). +# Expose ports: +# * 8080 - Unprivileged port used by nodejs application + +ENV NODEJS_VERSION=20 \ + NPM_RUN=start \ + NAME=nodejs \ + NPM_CONFIG_PREFIX=$HOME/.npm-global \ + PATH=$HOME/node_modules/.bin/:$HOME/.npm-global/bin/:$PATH \ + CNB_STACK_ID=com.redhat.stacks.ubi9-nodejs-20 \ + CNB_USER_ID=1001 \ + CNB_GROUP_ID=0 + +ENV SUMMARY="Platform for building and running Node.js $NODEJS_VERSION applications" \ + DESCRIPTION="Node.js $NODEJS_VERSION available as container is a base platform for \ +building and running various Node.js $NODEJS_VERSION applications and frameworks. \ +Node.js is a platform built on Chrome's JavaScript runtime for easily building \ +fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model \ +that makes it lightweight and efficient, perfect for data-intensive real-time applications \ +that run across distributed devices." + +LABEL summary="$SUMMARY" \ + description="$DESCRIPTION" \ + io.k8s.description="$DESCRIPTION" \ + io.k8s.display-name="Node.js $NODEJS_VERSION" \ + io.openshift.expose-services="8080:http" \ + io.openshift.tags="builder,$NAME,${NAME}${NODEJS_VERSION}" \ + io.openshift.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.s2i.scripts-url="image:///usr/libexec/s2i" \ + io.buildpacks.stack.id="com.redhat.stacks.ubi9-nodejs-20" \ + com.redhat.dev-mode="DEV_MODE:false" \ + com.redhat.deployments-dir="${APP_ROOT}/src" \ + com.redhat.dev-mode.port="DEBUG_PORT:5858" \ + com.redhat.component="${NAME}-${NODEJS_VERSION}-container" \ + name="ubi9/$NAME-$NODEJS_VERSION" \ + version="1" \ + com.redhat.license_terms="https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI" \ + maintainer="SoftwareCollections.org " \ + help="For more information visit https://github.com/sclorg/s2i-nodejs-container" \ + usage="s2i build ubi9/$NAME-$NODEJS_VERSION:latest " + +RUN yum -y module enable nodejs:$NODEJS_VERSION && \ + MODULE_DEPS="make gcc gcc-c++ git openssl-devel" && \ + INSTALL_PKGS="$MODULE_DEPS nodejs npm nodejs-nodemon nss_wrapper-libs which" && \ + ln -s /usr/lib/node_modules/nodemon/bin/nodemon.js /usr/bin/nodemon && \ + yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \ + rpm -V $INSTALL_PKGS && \ + node -v | grep -qe "^v$NODEJS_VERSION\." && echo "Found VERSION $NODEJS_VERSION" && \ + yum -y clean all --enablerepo='*' + +# Copy the S2I scripts from the specific language image to $STI_SCRIPTS_PATH +COPY ./s2i/bin/ $STI_SCRIPTS_PATH + +# Copy extra files to the image. +COPY ./root/ / + +# Drop the root user and make the content of /opt/app-root owned by user 1001 +RUN chown -R 1001:0 ${APP_ROOT} && chmod -R ug+rwx ${APP_ROOT} && \ + rpm-file-permissions + +USER 1001 + +# Set the default CMD to print the usage of the language image +CMD $STI_SCRIPTS_PATH/usage diff --git a/20/README.md b/20/README.md new file mode 100644 index 00000000..f5209b6e --- /dev/null +++ b/20/README.md @@ -0,0 +1,253 @@ +NodeJS 20 container image +========================= + +This container image includes Node.JS 20 as a [S2I](https://github.com/openshift/source-to-image) base image for your Node.JS 20 applications. +Users can choose between RHEL, CentOS and Fedora based images. +The RHEL images are available in the [Red Hat Container Catalog](https://access.redhat.com/containers/), +and the Fedora images are available in [Quay.io](https://quay.io/organization/fedora). +the CentOS Stream images are available in the [Quay.io](https://quay.io/organization/sclorg), +The resulting image can be run using [podman](https://github.com/containers/libpod). + +Note: while the examples in this README are calling `podman`, you can replace any such calls by `docker` with the same arguments + +Description +----------- + +Node.js 20 available as container is a base platform for +building and running various Node.js 20 applications and frameworks. +Node.js is a platform built on Chrome's JavaScript runtime for easily building +fast, scalable network applications. Node.js uses an event-driven, non-blocking I/O model +that makes it lightweight and efficient, perfect for data-intensive real-time applications +that run across distributed devices. + +Usage in OpenShift +------------------ +In this example, we will assume that you are using the `ubi8/nodejs-20` image, available via `nodejs:20` imagestream tag in Openshift. + +To build a simple [nodejs-sample-app](https://github.com/sclorg/nodejs-ex.git) application in Openshift: + +``` +oc new-app nodejs:20~https://github.com/sclorg/nodejs-ex.git +``` + +To access the application: +``` +$ oc get pods +$ oc exec -- curl 127.0.0.1:8080 +``` + +Source-to-Image framework and scripts +------------------------------------- +This image supports the [Source-to-Image](https://docs.openshift.com/container-platform/4.14/openshift_images/create-images.html#images-create-s2i_create-images) +(S2I) strategy in OpenShift. The Source-to-Image is an OpenShift framework +which makes it easy to write images that take application source code as +an input, use a builder image like this Node.js container image, and produce +a new image that runs the assembled application as an output. + +To support the Source-to-Image framework, important scripts are included in the builder image: + +* The `/usr/libexec/s2i/assemble` script inside the image is run to produce a new image with the application artifacts. The script takes sources of a given application and places them into appropriate directories inside the image. It utilizes some common patterns in Node.js application development (see the **Environment variables** section below). +* The `/usr/libexec/s2i/run` script is set as the default command in the resulting container image (the new image with the application artifacts). It runs `npm run` for production, or `nodemon` if `DEV_MODE` is set to `true` (see the **Environment variables** section below). + +Building an application using a Dockerfile +------------------------------------------ +Compared to the Source-to-Image strategy, using a Dockerfile is a more +flexible way to build a Node.js container image with an application. +Use a Dockerfile when Source-to-Image is not sufficiently flexible for you or +when you build the image outside of the OpenShift environment. + +To use the Node.js image in a Dockerfile, follow these steps: + +#### 1. Pull a base builder image to build on + +``` +podman pull ubi8/nodejs-20 +``` + +An UBI image `ubi8/nodejs-20` is used in this example. This image is usable and freely redistributable under the terms of the UBI End User License Agreement (EULA). See more about UBI at [UBI FAQ](https://developers.redhat.com/articles/ubi-faq). + +#### 2. Pull an application code + +An example application available at https://github.com/sclorg/nodejs-ex.git is used here. Feel free to clone the repository for further experiments. + +``` +git clone https://github.com/sclorg/nodejs-ex.git app-src +``` + +#### 3. Prepare an application inside a container + +This step usually consists of at least these parts: + +* putting the application source into the container +* installing the dependencies +* setting the default command in the resulting image + +For all these three parts, users can either setup all manually and use commands `nodejs` and `npm` explicitly in the Dockerfile ([3.1.](#31-to-use-your-own-setup-create-a-dockerfile-with-this-content)), or users can use the Source-to-Image scripts inside the image ([3.2.](#32-to-use-the-source-to-image-scripts-and-build-an-image-using-a-dockerfile-create-a-dockerfile-with-this-content); see more about these scripts in the section "Source-to-Image framework and scripts" above), that already know how to set-up and run some common Node.js applications. + +##### 3.1. To use your own setup, create a Dockerfile with this content: +``` +FROM ubi8/nodejs-20 + +# Add application sources +ADD app-src . + +# Install the dependencies +RUN npm install + +# Run script uses standard ways to run the application +CMD npm run -d start +``` + +##### 3.2. To use the Source-to-Image scripts and build an image using a Dockerfile, create a Dockerfile with this content: +``` +FROM ubi8/nodejs-20 + +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +USER 0 +ADD app-src /tmp/src +RUN chown -R 1001:0 /tmp/src +USER 1001 + +# Install the dependencies +RUN /usr/libexec/s2i/assemble + +# Set the default command for the resulting image +CMD /usr/libexec/s2i/run +``` + +#### 4. Build a new image from a Dockerfile prepared in the previous step + +``` +podman build -t node-app . +``` + +#### 5. Run the resulting image with the final application + +``` +podman run -d node-app +``` + +Environment variables for Source-to-Image +--------------------- + +Application developers can use the following environment variables to configure the runtime behavior of this image in OpenShift: + +**`NODE_ENV`** + NodeJS runtime mode (default: "production") + +**`DEV_MODE`** + When set to "true", `nodemon` will be used to automatically reload the server while you work (default: "false"). Setting `DEV_MODE` to "true" will change the `NODE_ENV` default to "development" (if not explicitly set). + +**`NPM_BUILD`** + Select an alternate / custom build command, defined in your `package.json` file's [`scripts`](https://docs.npmjs.com/misc/scripts) section (default: npm run "build"). These user-defined run-scripts are unavailable while `DEV_MODE` is in use. + +**`NPM_RUN`** + Select an alternate / custom runtime mode, defined in your `package.json` file's [`scripts`](https://docs.npmjs.com/misc/scripts) section (default: npm run "start"). These user-defined run-scripts are unavailable while `DEV_MODE` is in use. + +**`HTTP_PROXY`** + Use an npm proxy during assembly + +**`HTTPS_PROXY`** + Use an npm proxy during assembly + +**`NPM_MIRROR`** + Use a custom NPM registry mirror to download packages during the build process + +One way to define a set of environment variables is to include them as key value pairs in your repo's `.s2i/environment` file. + +Example: DATABASE_USER=sampleUser + +#### NOTE: Define your own "`DEV_MODE`": + +The following `package.json` example includes a `scripts.dev` entry. You can define your own custom [`NPM_RUN`](https://docs.npmjs.com/cli/run-script) scripts in your application's `package.json` file. + +#### Note: Setting logging output verbosity +To alter the level of logs output during an `npm install` the npm_config_loglevel environment variable can be set. See [npm-config](https://docs.npmjs.com/misc/config). + +Development Mode +---------------- +This image supports development mode. This mode can be switched on and off with the environment variable `DEV_MODE`. `DEV_MODE` can either be set to `true` or `false`. +Development mode supports two features: +* Hot Deploy +* Debugging + +The debug port can be specified with the environment variable `DEBUG_PORT`. `DEBUG_PORT` is only valid if `DEV_MODE=true`. + +A simple example command for running the container in development mode is: +``` +podman run --env DEV_MODE=true my-image-id +``` + +To run the container in development mode with a debug port of 5454, run: +``` +$ podman run --env DEV_MODE=true DEBUG_PORT=5454 my-image-id +``` + +To run the container in production mode, run: +``` +$ podman run --env DEV_MODE=false my-image-id +``` + +By default, `DEV_MODE` is set to `false`, and `DEBUG_PORT` is set to `5858`, however the `DEBUG_PORT` is only relevant if `DEV_MODE=true`. + +Hot deploy +---------- + +As part of development mode, this image supports hot deploy. If development mode is enabled, any souce code that is changed in the running container will be immediately reflected in the running nodejs application. + +### Using Podman's exec + +To change your source code in a running container, use Podman's [exec](https://github.com/containers/libpod) command: +``` +$ podman exec -it /bin/bash +``` + +After you [Podman exec](https://github.com/containers/libpod) into the running container, your current directory is set to `/opt/app-root/src`, where the source code for your application is located. + +### Using OpenShift's rsync + +If you have deployed the container to OpenShift, you can use [oc rsync](https://docs.openshift.org/latest/dev_guide/copy_files_to_container.html) to copy local files to a remote container running in an OpenShift pod. + +#### Warning: + +The default behaviour of the s2i-nodejs container image is to run the Node.js application using the command `npm start`. This runs the _start_ script in the _package.json_ file. In developer mode, the application is run using the command `nodemon`. The default behaviour of nodemon is to look for the _main_ attribute in the _package.json_ file, and execute that script. If the _main_ attribute doesn't appear in the _package.json_ file, it executes the _start_ script. So, in order to achieve some sort of uniform functionality between production and development modes, the user should remove the _main_ attribute. + +Below is an example _package.json_ file with the _main_ attribute and _start_ script marked appropriately: + +```json +{ + "name": "node-echo", + "version": "0.0.1", + "description": "node-echo", + "main": "example.js", <--- main attribute + "dependencies": { + }, + "devDependencies": { + "nodemon": "*" + }, + "engine": { + "node": "*", + "npm": "*" + }, + "scripts": { + "dev": "nodemon --ignore node_modules/ server.js", + "start": "node server.js" <-- start script + }, + "keywords": [ + "Echo" + ], + "license": "", +} +``` + +#### Note: +`oc rsync` is only available in versions 3.1+ of OpenShift. + + +See also +-------- +Dockerfile and other sources are available on https://github.com/sclorg/s2i-nodejs-container. +In that repository you also can find another versions of Node.js environment Dockerfiles. +Dockerfile for CentOS Stream 9 is called `Dockerfile.c9s`, +for RHEL8 it's `Dockerfile.rhel8`, for RHEL9 it's `Dockerfile.rhel9` and the Fedora Dockerfile is called Dockerfile.fedora. diff --git a/20/root/opt/app-root/etc/generate_container_user b/20/root/opt/app-root/etc/generate_container_user new file mode 100644 index 00000000..b28a7a3d --- /dev/null +++ b/20/root/opt/app-root/etc/generate_container_user @@ -0,0 +1,17 @@ +# Set current user in nss_wrapper +USER_ID=$(id -u) +GROUP_ID=$(id -g) + +if [ x"$USER_ID" != x"0" -a x"$USER_ID" != x"1001" ]; then + + NSS_WRAPPER_PASSWD=/opt/app-root/etc/passwd + NSS_WRAPPER_GROUP=/etc/group + + cat /etc/passwd | sed -e 's/^default:/builder:/' > $NSS_WRAPPER_PASSWD + + echo "default:x:${USER_ID}:${GROUP_ID}:Default Application User:${HOME}:/sbin/nologin" >> $NSS_WRAPPER_PASSWD + + export NSS_WRAPPER_PASSWD + export NSS_WRAPPER_GROUP + export LD_PRELOAD=libnss_wrapper.so +fi diff --git a/20/root/opt/app-root/etc/npm_global_module_list b/20/root/opt/app-root/etc/npm_global_module_list new file mode 100644 index 00000000..005e1501 --- /dev/null +++ b/20/root/opt/app-root/etc/npm_global_module_list @@ -0,0 +1,5 @@ +async +mime +mkdirp +qs +minimatch diff --git a/20/root/opt/app-root/etc/scl_enable b/20/root/opt/app-root/etc/scl_enable new file mode 100644 index 00000000..4bfebea0 --- /dev/null +++ b/20/root/opt/app-root/etc/scl_enable @@ -0,0 +1,3 @@ +# This will make scl collection binaries work out of box. +unset BASH_ENV PROMPT_COMMAND ENV +source scl_source enable rh-nodejs${NODEJS_VERSION} diff --git a/20/s2i/bin/assemble b/20/s2i/bin/assemble new file mode 100755 index 00000000..f644a811 --- /dev/null +++ b/20/s2i/bin/assemble @@ -0,0 +1,116 @@ +#!/bin/bash + +# Prevent running assemble in builders different than official STI image. +# The official nodejs:8-onbuild already run npm install and use different +# application folder. +[ -d "/usr/src/app" ] && exit 0 + +set -e + +# FIXME: Linking of global modules is disabled for now as it causes npm failures +# under RHEL7 +# Global modules good to have +# npmgl=$(grep "^\s*[^#\s]" ../etc/npm_global_module_list | sort -u) +# Available global modules; only match top-level npm packages +#global_modules=$(npm ls -g 2> /dev/null | perl -ne 'print "$1\n" if /^\S+\s(\S+)\@[\d\.-]+/' | sort -u) +# List all modules in common +#module_list=$(/usr/bin/comm -12 <(echo "${global_modules}") | tr '\n' ' ') +# Link the modules +#npm link $module_list + +safeLogging () { + if [[ $1 =~ http[s]?://.*@.*$ ]]; then + echo $1 | sed 's/^.*@/redacted@/' + else + echo $1 + fi +} + +shopt -s dotglob +if [ -d /tmp/artifacts ] && [ "$(ls /tmp/artifacts/ 2>/dev/null)" ]; then + echo "---> Restoring previous build artifacts ..." + mv -T --verbose /tmp/artifacts/node_modules "${HOME}/node_modules" +fi + +echo "---> Installing application source ..." +mv /tmp/src/* ./ + +# Fix source directory permissions +fix-permissions ./ + +if [ ! -z $HTTP_PROXY ]; then + echo "---> Setting npm http proxy to" $(safeLogging $HTTP_PROXY) + npm config set proxy $HTTP_PROXY +fi + +if [ ! -z $http_proxy ]; then + echo "---> Setting npm http proxy to" $(safeLogging $http_proxy) + npm config set proxy $http_proxy +fi + +if [ ! -z $HTTPS_PROXY ]; then + echo "---> Setting npm https proxy to" $(safeLogging $HTTPS_PROXY) + npm config set https-proxy $HTTPS_PROXY +fi + +if [ ! -z $https_proxy ]; then + echo "---> Setting npm https proxy to" $(safeLogging $https_proxy) + npm config set https-proxy $https_proxy +fi + +# Change the npm registry mirror if provided +if [ -n "$NPM_MIRROR" ]; then + npm config set registry $NPM_MIRROR +fi + +# Set the DEV_MODE to false by default. +if [ -z "$DEV_MODE" ]; then + export DEV_MODE=false +fi + +# If NODE_ENV is not set by the user, then NODE_ENV is determined by whether +# the container is run in development mode. +if [ -z "$NODE_ENV" ]; then + if [ "$DEV_MODE" == true ]; then + export NODE_ENV=development + else + export NODE_ENV=production + fi +fi + +if [ "$NODE_ENV" != "production" ]; then + + echo "---> Building your Node application from source" + npm install + +else + + echo "---> Installing all dependencies" + NODE_ENV=development npm install + + #do not fail when there is no build script + echo "---> Building in production mode" + npm run ${NPM_BUILD:-build} --if-present + + echo "---> Pruning the development dependencies" + npm prune + + NPM_TMP=$(npm config get tmp) + if ! mountpoint $NPM_TMP; then + echo "---> Cleaning the $NPM_TMP/npm-*" + rm -rf $NPM_TMP/npm-* + fi + + # Clear the npm's cache and tmp directories only if they are not a docker volumes + NPM_CACHE=$(npm config get cache) + if ! mountpoint $NPM_CACHE; then + echo "---> Cleaning the npm cache $NPM_CACHE" + #As of npm@5 even the 'npm cache clean --force' does not fully remove the cache directory + # instead of $NPM_CACHE* use $NPM_CACHE/*. + # We do not want to delete .npmrc file. + rm -rf "${NPM_CACHE:?}/" + fi +fi + +# Fix source directory permissions +fix-permissions ./ diff --git a/20/s2i/bin/run b/20/s2i/bin/run new file mode 100755 index 00000000..ff566f55 --- /dev/null +++ b/20/s2i/bin/run @@ -0,0 +1,60 @@ +#!/bin/bash + +# S2I run script for the 'nodejs' image. +# The run script executes the server that runs your application. +# +# For more information see the documentation: +# https://github.com/openshift/source-to-image/blob/master/docs/builder_image.md +# + +set -e + +if [ -e "/opt/app-root/etc/generate_container_user" ]; then + source /opt/app-root/etc/generate_container_user +fi + +# Runs the nodejs application server. If the container is run in development mode, +# hot deploy and debugging are enabled. +run_node() { + echo -e "Environment: \n\tDEV_MODE=${DEV_MODE}\n\tNODE_ENV=${NODE_ENV}\n\tDEBUG_PORT=${DEBUG_PORT}" + if [ "$DEV_MODE" == true ]; then + echo "Launching via nodemon..." + exec nodemon --inspect="$DEBUG_PORT" + else + echo "Launching via npm..." + exec npm run -d $NPM_RUN + fi +} + +#Set the debug port to 5858 by default. +if [ -z "$DEBUG_PORT" ]; then + export DEBUG_PORT=5858 +fi + +# Set the environment to development by default. +if [ -z "$DEV_MODE" ]; then + export DEV_MODE=false +fi + +# If NODE_ENV is not set by the user, then NODE_ENV is determined by whether +# the container is run in development mode. +if [ -z "$NODE_ENV" ]; then + if [ "$DEV_MODE" == true ]; then + export NODE_ENV=development + else + export NODE_ENV=production + fi +fi + +# If the official dockerhub node image is used, skip the SCL setup below +# and just run the nodejs server +if [ -d "/usr/src/app" ]; then + run_node +fi + +# Allow users to inspect/debug the builder image itself, by using: +# $ docker run -i -t openshift/centos-nodejs-builder --debug +# +[ "$1" == "--debug" ] && exec /bin/bash + +run_node diff --git a/20/s2i/bin/save-artifacts b/20/s2i/bin/save-artifacts new file mode 100755 index 00000000..16be05e7 --- /dev/null +++ b/20/s2i/bin/save-artifacts @@ -0,0 +1,5 @@ +#!/bin/bash + +if [ -d "${HOME}/node_modules" ] && [ "$(ls "${HOME}/node_modules" 2>/dev/null)" ]; then + tar -C "${HOME}" -cf - node_modules +fi diff --git a/20/s2i/bin/usage b/20/s2i/bin/usage new file mode 100755 index 00000000..b8c1dca8 --- /dev/null +++ b/20/s2i/bin/usage @@ -0,0 +1,15 @@ +#!/bin/sh + +DISTRO=`cat /etc/*-release | grep ^ID= | grep -Po '".*?"' | tr -d '"'` + +cat < nodejs-sample-app + +You can then run the resulting image via: +podman run -p 8080:8080 nodejs-sample-app +EOF diff --git a/20/test/check_imagestreams.py b/20/test/check_imagestreams.py new file mode 100755 index 00000000..6ddaef32 --- /dev/null +++ b/20/test/check_imagestreams.py @@ -0,0 +1,105 @@ +#!/bin/env python3 + +# MIT License +# +# Copyright (c) 2018-2019 Red Hat, Inc. + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import sys +import json +import logging +import os + +from pathlib import Path +from typing import Dict, List, Any + +IMAGESTREAMS_DIR: str = "imagestreams" + + +class ImageStreamChecker(object): + version: str = "" + + def __init__(self, version: str): + self.version = version + self.results: Dict[Any, Any] = {} + + def load_json_file(self, filename: Path) -> Any: + with open(str(filename)) as f: + data = json.load(f) + isinstance(data, Dict) + return data + + def check_version(self, json_dict: Dict[Any, Any]) -> List[str]: + res = [] + for tags in json_dict["spec"]["tags"]: + print( + f"check_version: Compare tags['name']:'{tags['name']}' against version:'{self.version}'" + ) + # The name can be"" or "-elX" or "-ubiX" + if tags["name"] == self.version or tags["name"].startswith( + self.version + "-" + ): + res.append(tags) + return res + + def check_latest_tag(self, json_dict: Dict[Any, Any]) -> bool: + latest_tag_correct: bool = False + for tags in json_dict["spec"]["tags"]: + if tags["name"] != "latest": + continue + print( + f"check_latest_tag: Compare tags['name']:'{tags['name']}' against version:'{self.version}'" + ) + # The latest can link to either "" or "-elX" or "-ubiX" + if tags["from"]["name"] == self.version or tags["from"]["name"].startswith( + self.version + "-" + ): + latest_tag_correct = True + print(f"Latest tag found.") + return latest_tag_correct + + def check_imagestreams(self) -> int: + p = Path(".") + json_files = p.glob(f"{IMAGESTREAMS_DIR}/*.json") + if not json_files: + print(f"No json files present in {IMAGESTREAMS_DIR}.") + return 0 + for f in json_files: + print(f"Checking file {str(f)}.") + json_dict = self.load_json_file(f) + if not (self.check_version(json_dict) and self.check_latest_tag(json_dict)): + print( + f"The latest version is not present in {str(f)} or in latest tag." + ) + self.results[f] = False + if self.results: + return 1 + print("Imagestreams contains the latest version.") + return 0 + + +if __name__ == "__main__": + if len(sys.argv) != 2: + logging.fatal("%s: %s", sys.argv[0], "VERSION as an argument was not provided") + sys.exit(1) + + print(f"Version to check is {sys.argv[1]}.") + isc = ImageStreamChecker(version=sys.argv[1]) + sys.exit(isc.check_imagestreams()) diff --git a/20/test/examples/from-dockerfile/.gitignore b/20/test/examples/from-dockerfile/.gitignore new file mode 100644 index 00000000..81e219d3 --- /dev/null +++ b/20/test/examples/from-dockerfile/.gitignore @@ -0,0 +1,2 @@ +app-src + diff --git a/20/test/examples/from-dockerfile/Dockerfile b/20/test/examples/from-dockerfile/Dockerfile new file mode 100644 index 00000000..b0be3313 --- /dev/null +++ b/20/test/examples/from-dockerfile/Dockerfile @@ -0,0 +1,16 @@ +FROM registry.access.redhat.com/ubi8/nodejs-16 + +# Add application sources +ADD app-src . + +# In case you run into permission errors during build (eg. by use of umask) +# running the fix-permission script will make sure all bits are as expected by the image +USER 0 +RUN fix-permissions ./ +USER 1001 + +# Install the dependencies +RUN npm install + +# Run script uses standard ways to run the application +CMD npm run -d start diff --git a/20/test/examples/from-dockerfile/Dockerfile.s2i b/20/test/examples/from-dockerfile/Dockerfile.s2i new file mode 100644 index 00000000..0cbc980d --- /dev/null +++ b/20/test/examples/from-dockerfile/Dockerfile.s2i @@ -0,0 +1,25 @@ +FROM registry.access.redhat.com/ubi8/nodejs-16 + +# This image supports the Source-to-Image +# (see more at https://docs.openshift.com/container-platform/3.11/creating_images/s2i.html). +# In order to support the Source-to-Image framework, there are some interesting +# scripts inside the builder image, that can be run in a Dockerfile directly as well: +# * The `/usr/libexec/s2i/assemble` script inside the image is run in order +# to produce a new image with the application artifacts. +# The script takes sources of a given application and places them into +# appropriate directories inside the image. +# * The `/usr/libexec/s2i/run` script executes the application and is set as +# a default command in the resulting container image. + +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +USER 0 +ADD app-src /tmp/src +RUN chown -R 1001:0 /tmp/src +USER 1001 + +# Let the assemble script to install the dependencies +RUN /usr/libexec/s2i/assemble + +# Run script uses standard ways to run the application +CMD /usr/libexec/s2i/run diff --git a/20/test/examples/from-dockerfile/README.md b/20/test/examples/from-dockerfile/README.md new file mode 100644 index 00000000..ddaa6672 --- /dev/null +++ b/20/test/examples/from-dockerfile/README.md @@ -0,0 +1,22 @@ +Dockerfile examples +=================== + +This directory contains example Dockerfiles that demonstrate how to use the image with a Dockerfile and `docker build`. + +For demonstration, we use an application code available at https://github.com/sclorg/nodejs-ex.git. + +Pull the source to the local machine first: +``` +git clone https://github.com/sclorg/nodejs-ex.git app-src +``` + +Then, build a new image from a Dockerfile in this directory: +``` +docker build -f Dockerfile -t node-app . +``` + +And run the resulting image with the final application: +``` +docker run -ti --rm node-app +``` + diff --git a/20/test/imagestreams/imagestreams.yaml b/20/test/imagestreams/imagestreams.yaml new file mode 100644 index 00000000..29f93e2d --- /dev/null +++ b/20/test/imagestreams/imagestreams.yaml @@ -0,0 +1,77 @@ +--- +- name: nodejs + pretty_name: Node.js + sample_repo: https://github.com/sclorg/nodejs-ex.git + category: builder + description: >- + Build and run Node.js APP_VERSION applications on DISTRO_NAME. For more information + about using this builder image, including OpenShift considerations, see + https://github.com/sclorg/s2i-nodejs-container/blob/master/APP_VERSION/README.md. + imagestream_files: + - filename: nodejs-centos.json + latest: "20-ubi9" + distros: + - name: UBI 8 + app_versions: ["18", "18-minimal", "20", "20-minimal"] + + - name: UBI 9 + app_versions: ["18", "18-minimal", "20", "20-minimal"] + custom_tags: + - name: "18-ubi8-minimal" + distro: UBI 8 + app_version: "18-minimal" + - name: "20-ubi8-minimal" + distro: UBI 8 + app_version: "20-minimal" + - name: "18-ubi9-minimal" + distro: UBI 9 + app_version: "18-minimal" + - name: "20-ubi9-minimal" + distro: UBI 9 + app_version: "20-minimal" + + - filename: nodejs-rhel.json + latest: "20-ubi9" + distros: + - name: UBI 8 + app_versions: ["18", "18-minimal", "20", "20-minimal"] + + - name: UBI 9 + app_versions: ["18", "18-minimal", "20", "20-minimal"] + # these are non standard tags, maintained for backwards compatibility + custom_tags: + - name: "18-ubi8-minimal" + distro: UBI 8 + app_version: "18-minimal" + - name: "20-ubi8-minimal" + distro: UBI 8 + app_version: "20-minimal" + - name: "18-ubi9-minimal" + distro: UBI 9 + app_version: "18-minimal" + - name: "20-ubi9-minimal" + distro: UBI 9 + app_version: "20-minimal" + + - filename: nodejs-rhel-aarch64.json + latest: "20-ubi9" + distros: + - name: UBI 8 + app_versions: ["18", "18-minimal", "20", "20-minimal"] + + - name: UBI 9 + app_versions: ["18", "18-minimal", "20", "20-minimal"] + custom_tags: + - name: "18-ubi8-minimal" + distro: UBI 8 + app_version: "18-minimal" + - name: "20-ubi8-minimal" + distro: UBI 8 + app_version: "20-minimal" + - name: "18-ubi9-minimal" + distro: UBI 9 + app_version: "18-minimal" + - name: "20-ubi9-minimal" + distro: UBI 9 + app_version: "20-minimal" +... diff --git a/20/test/imagestreams/nodejs-centos.json b/20/test/imagestreams/nodejs-centos.json new file mode 100644 index 00000000..3398baab --- /dev/null +++ b/20/test/imagestreams/nodejs-centos.json @@ -0,0 +1,261 @@ +{ + "kind": "ImageStream", + "apiVersion": "image.openshift.io/v1", + "metadata": { + "name": "nodejs", + "annotations": { + "openshift.io/display-name": "Node.js" + } + }, + "spec": { + "tags": [ + { + "name": "18-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 18 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/nodejs-18:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-minimal-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 20 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/nodejs-20:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-minimal-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 18 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi9/nodejs-18:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-minimal-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi9/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 20 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi9/nodejs-20:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-minimal-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi9/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi8-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi8-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi8/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi9-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi9/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi9-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.access.redhat.com/ubi9/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Node.js 20 (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version available on OpenShift, including major version updates.\n", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "20-ubi9" + }, + "referencePolicy": { + "type": "Local" + } + } + ] + } +} diff --git a/20/test/imagestreams/nodejs-rhel-aarch64.json b/20/test/imagestreams/nodejs-rhel-aarch64.json new file mode 100644 index 00000000..02b1b0d4 --- /dev/null +++ b/20/test/imagestreams/nodejs-rhel-aarch64.json @@ -0,0 +1,261 @@ +{ + "kind": "ImageStream", + "apiVersion": "image.openshift.io/v1", + "metadata": { + "name": "nodejs", + "annotations": { + "openshift.io/display-name": "Node.js" + } + }, + "spec": { + "tags": [ + { + "name": "18-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 18 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-18:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-minimal-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 20 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-20:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-minimal-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 18 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-18:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-minimal-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 20 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-20:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-minimal-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi8-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi8-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi9-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi9-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Node.js 20 (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version available on OpenShift, including major version updates.\n", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "20-ubi9" + }, + "referencePolicy": { + "type": "Local" + } + } + ] + } +} diff --git a/20/test/imagestreams/nodejs-rhel.json b/20/test/imagestreams/nodejs-rhel.json new file mode 100644 index 00000000..02b1b0d4 --- /dev/null +++ b/20/test/imagestreams/nodejs-rhel.json @@ -0,0 +1,261 @@ +{ + "kind": "ImageStream", + "apiVersion": "image.openshift.io/v1", + "metadata": { + "name": "nodejs", + "annotations": { + "openshift.io/display-name": "Node.js" + } + }, + "spec": { + "tags": [ + { + "name": "18-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 18 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-18:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-minimal-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 20 (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-20:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-minimal-ubi8", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 18 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-18:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-minimal-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 20 (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-20:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-minimal-ubi9", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi8-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi8-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 8)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 8. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi8/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "18-ubi9-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 18-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 18-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/18-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "18-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-18-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "20-ubi9-minimal", + "annotations": { + "openshift.io/display-name": "Node.js 20-minimal (UBI 9)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20-minimal applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20-minimal/README.md.", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20-minimal", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "DockerImage", + "name": "registry.redhat.io/ubi9/nodejs-20-minimal:latest" + }, + "referencePolicy": { + "type": "Local" + } + }, + { + "name": "latest", + "annotations": { + "openshift.io/display-name": "Node.js 20 (Latest)", + "openshift.io/provider-display-name": "Red Hat, Inc.", + "description": "Build and run Node.js 20 applications on UBI 9. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version available on OpenShift, including major version updates.\n", + "iconClass": "icon-nodejs", + "tags": "builder,nodejs", + "version": "20", + "sampleRepo": "https://github.com/sclorg/nodejs-ex.git" + }, + "from": { + "kind": "ImageStreamTag", + "name": "20-ubi9" + }, + "referencePolicy": { + "type": "Local" + } + } + ] + } +} diff --git a/20/test/run b/20/test/run new file mode 100755 index 00000000..832f1257 --- /dev/null +++ b/20/test/run @@ -0,0 +1,116 @@ +#!/bin/bash +# +# The 'run' performs a simple test that verifies that STI image. +# The main focus here is to exercise the STI scripts. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +TEST_LIST_APP="\ +test_run_app_application +test_s2i_usage +test_scl_usage +test_connection +test_docker_run_usage +test_npm_functionality +test_check_build_using_dockerfile +test_nodemon_removed +test_npm_cache_cleared +test_npm_tmp_cleared +kill_test_application +test_dev_mode_true_development +test_dev_mode_false_production +" + +TEST_LIST_BINARY="\ +test_run_binary_application +" + +TEST_LIST_NODE_ENV="\ +test_run_app_application +test_connection +test_nodemon_present +test_npm_cache_exists +kill_test_application +test_dev_mode_true_development +test_dev_mode_false_development +" + +TEST_LIST_DEV_MODE="\ +test_run_app_application +test_connection +test_nodemon_present +test_npm_cache_exists +kill_test_application +test_dev_mode_true_development +test_dev_mode_false_production +" + +TEST_LIST_HW="\ +test_safe_logging +test_run_hw_application +test_incremental_build +test_build_express_webapp +" + +source "${THISDIR}/test-lib.sh" +source "${THISDIR}/test-lib-nodejs.sh" + +test -n $IMAGE_NAME \ + -a -n $VERSION + +test_dir="$(readlink -f $(dirname ${BASH_SOURCE[0]}))" +image_dir="$(readlink -f ${test_dir}/..)" + +# Since we built the candidate image locally, we don't want S2I attempt to pull +# it from Docker hub +s2i_args="--pull-policy=never " + +# TODO: This should be part of the image metadata +test_port=8080 + +# Common git configuration +readonly -A gitconfig=( + [user.name]="builder" + [user.email]="build@localhost" + [commit.gpgsign]="false" +) + +[[ -n "$DEBUG" ]] && set -x + +ct_init +cid_file=$CID_FILE_DIR/$(mktemp -u -p . --suffix=.cid) + +# Build the application image twice to ensure the 'save-artifacts' and +# 'restore-artifacts' scripts are working properly +prepare app +check_prep_result $? app || exit +echo "Testing the production image build" +run_s2i_build +ct_check_testcase_result $? + +TEST_SET=${TESTS:-$TEST_LIST_APP} ct_run_tests_from_testset "app" + +echo "Testing the development image build: s2i build -e \"NODE_ENV=development\")" +run_s2i_build "-e NODE_ENV=development" +ct_check_testcase_result $? + +TEST_SET=${TESTS:-$TEST_LIST_NODE_ENV} ct_run_tests_from_testset "node_env_development" + +echo "Testing the development image build: s2i build -e \"DEV_MODE=true\")" +run_s2i_build "-e DEV_MODE=true" +ct_check_testcase_result $? + +TEST_SET=${TESTS:-$TEST_LIST_DEV_MODE} ct_run_tests_from_testset "dev_mode" + +echo "Testing proxy safe logging..." +prepare hw +check_prep_result $? hw || exit +run_s2i_build_proxy http://user.password@0.0.0.0:8000 https://user.password@0.0.0.0:8000 > /tmp/build-log 2>&1 +ct_check_testcase_result $? + +TEST_SET=${TESTS:-$TEST_LIST_HW} ct_run_tests_from_testset "hw" + +TEST_SET=${TESTS:-$TEST_LIST_BINARY} ct_run_tests_from_testset "binary" diff --git a/20/test/run-minimal b/20/test/run-minimal new file mode 100755 index 00000000..1d808bcc --- /dev/null +++ b/20/test/run-minimal @@ -0,0 +1,132 @@ +#!/bin/bash +# +# The 'run' performs a simple test that verifies that STI image. +# The main focus here is to exercise the STI scripts. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# +# EXPRESS_REVISION specifies which express.js branch or tag should be tested; +# by default it uses the latest released version as reported by +# `npm show express version`. + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +TEST_LIST_APP="\ +test_run_app_application +test_s2i_usage +test_scl_usage +test_connection +test_docker_run_usage +test_check_build_using_dockerfile +test_nodemon_removed +test_npm_cache_cleared +test_npm_tmp_cleared +kill_test_application +test_dev_mode_true_development +test_dev_mode_false_production +" + +TEST_LIST_NODE_ENV="\ +test_run_app_application +test_connection +test_nodemon_present +test_npm_cache_exists +kill_test_application +test_dev_mode_true_development +test_dev_mode_false_development +" + +TEST_LIST_DEV_MODE="\ +test_run_app_application +test_connection +test_nodemon_present +test_npm_cache_exists +kill_test_application +test_dev_mode_true_development +test_dev_mode_false_production +" + +TEST_LIST_HW="\ +test_safe_logging +test_run_hw_application +" +source "${THISDIR}/test-lib.sh" +source "${THISDIR}/test-lib-nodejs.sh" + + +test -n $IMAGE_NAME \ + -a -n $VERSION +readonly EXPRESS_REVISION="${EXPRESS_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show express version)}" + +test_dir="$(readlink -f $(dirname ${BASH_SOURCE[0]}))" +image_dir="$(readlink -f ${test_dir}/..)" + +# Since we built the candidate image locally, we don't want S2I attempt to pull +# it from Docker hub +s2i_args="--pull-policy=never " + +# TODO: This should be part of the image metadata +test_port=8080 + +# Common git configuration +readonly -A gitconfig=( + [user.name]="builder" + [user.email]="build@localhost" + [commit.gpgsign]="false" +) + + +if [ "$DEBUG" != "" ]; then + set -x +fi + +ct_init +cid_file=$CID_FILE_DIR/$(mktemp -u -p . --suffix=.cid) + +# Build the application image twice to ensure the 'save-artifacts' and +# 'restore-artifacts' scripts are working properly +prepare app +check_prep_result $? app || exit +echo "Testing the production image build" + +FULL_IMAGE=${FULL_IMAGE:-${IMAGE_NAME//-minimal:1/}} +# if FULL_IMAGE is not found, try to pull it, if that does not work, try to pull ubi9 one +if [ -z "$(docker images -q "$FULL_IMAGE")" ] ; then + echo "Image $FULL_IMAGE not found, trying to pull $FULL_IMAGE from registry" + docker pull "$FULL_IMAGE" +fi +if [ -z "$(docker images -q "$FULL_IMAGE")" ] ; then + echo "Image $FULL_IMAGE still not found, trying to use ubi9/nodejs-${VERSION//-minimal/}:latest and pull it" + FULL_IMAGE="ubi9/nodejs-${VERSION//-minimal/}:latest" + docker pull "$FULL_IMAGE" +fi +if [ -z "$(docker images -q "$FULL_IMAGE")" ] ; then + echo "Image $FULL_IMAGE could not be found nor pulled, giving up." + exit 1 +fi + +prepare_minimal_build testapp + +TEST_SET=${TESTS:-$TEST_LIST_APP} ct_run_tests_from_testset "app" + +echo "Testing proxy safe logging..." +prepare hw +check_prep_result $? hw || exit +prepare_minimal_build testhw + +TEST_SET=${TESTS:-$TEST_LIST_HW} ct_run_tests_from_testset "hw" + +# Start of testing regular s2i build for minimal image + +echo "Testing the development image build: s2i build -e \"NODE_ENV=development\")" +run_s2i_build "-e NODE_ENV=development" +check_result $? + +TEST_SET=${TESTS:-$TEST_LIST_NODE_ENV} ct_run_tests_from_testset "node_env_development" + +echo "Testing the development image build: s2i build -e \"DEV_MODE=true\")" +run_s2i_build "-e DEV_MODE=true" +check_result $? + +TEST_SET=${TESTS:-$TEST_LIST_DEV_MODE} ct_run_tests_from_testset "dev_mode" diff --git a/20/test/run-openshift-pytest b/20/test/run-openshift-pytest new file mode 100755 index 00000000..80885f16 --- /dev/null +++ b/20/test/run-openshift-pytest @@ -0,0 +1,11 @@ +#!/bin/bash +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# VERSION specifies the major version of the MariaDB in format of X.Y +# OS specifies RHEL version (e.g. OS=rhel7) +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +cd "${THISDIR}" && python3.12 -m pytest -s -rA --showlocals -vv test_*.py diff --git a/20/test/run-openshift-remote-cluster b/20/test/run-openshift-remote-cluster new file mode 100755 index 00000000..136fec0f --- /dev/null +++ b/20/test/run-openshift-remote-cluster @@ -0,0 +1,53 @@ +#!/bin/bash +# +# Test the NodeJS image in OpenShift. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source "${THISDIR}/test-lib.sh" +source "${THISDIR}/test-lib-openshift.sh" +source "${THISDIR}/test-lib-nodejs.sh" +source "${THISDIR}/test-lib-remote-openshift.sh" + +TEST_LIST="\ +test_nodejs_s2i_container +test_nodejs_s2i_app_ex +test_nodejs_s2i_templates +test_nodejs_imagestream +test_latest_imagestreams +" + +# change the branch to a different value if a new change in the example +# app needs to be tested +BRANCH_TO_TEST=master + +trap ct_os_cleanup EXIT SIGINT + +ct_os_set_ocp4 || exit $OC_ERR + +ct_os_check_compulsory_vars || exit $OC_ERR + +ct_os_check_login || exit $OC_ERR + +ct_os_tag_image_for_cvp "nodejs" + +ct_pull_or_import_postgresql || exit $OC_ERR + +set -u + +test -n "${IMAGE_NAME-}" || false 'make sure $IMAGE_NAME is defined' +test -n "${VERSION-}" || false 'make sure $VERSION is defined' + +# For testing on OpenShift 4 we use internal registry +export CT_OCP4_TEST=true +export CT_SKIP_UPLOAD_IMAGE=true + +TEST_SUMMARY='' +TEST_SET=${TESTS:-$TEST_LIST} ct_run_tests_from_testset "openshift-remote-cluster" + + +# vim: set tabstop=2:shiftwidth=2:expandtab: diff --git a/20/test/run-upstream b/20/test/run-upstream new file mode 100755 index 00000000..5ff06591 --- /dev/null +++ b/20/test/run-upstream @@ -0,0 +1,67 @@ +#!/bin/bash +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# +# For each client package test, XXX_REVISION specifies which +# branch or tag should be tested; by default it uses the latest +# released version as reported by `npm show XXX version` where +# XXX is the name of the npm package. +# +THISDIR="$(dirname ${BASH_SOURCE[0]})" +source "${THISDIR}/test-lib.sh" +source "${THISDIR}/test-lib-nodejs.sh" + +test -n "$IMAGE_NAME" -a -n "$VERSION" + +# define the client npm packages to be tested. For +# each entry XXX in the list a corresponding value for +# XXX_REVISION and XXX_REPO must be defined below +TEST_LIST_CLIENTS="\ +test_client_express +test_client_prom +test_client_opossum +test_client_kube +test_client_faas +test_client_cloudevents +test_client_fastify +test_client_pino +" + +readonly EXPRESS_REVISION="${EXPRESS_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show express version)}" +readonly EXPRESS_REPO="https://github.com/expressjs/express.git" +readonly PINO_REVISION=v"${PINO_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show pino version)}" +readonly PINO_REPO="https://github.com/pinojs/pino.git" +readonly PROMCLIENT_REVISION=v"${PROMCLIENT_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show prom-client version)}" +readonly PROMCLIENT_REPO="https://github.com/siimon/prom-client.git" +readonly OPOSSUM_REVISION="v${OPOSSUM_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show opossum version)}" +readonly OPOSSUM_REPO="https://github.com/nodeshift/opossum.git" +readonly KUBESERVICEBINDINGS_REVISION="v${KUBESERVICEBINDINGS_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show kube-service-bindings version)}" +readonly KUBESERVICEBINDINGS_REPO="https://github.com/nodeshift/kube-service-bindings.git" +readonly FAASJSRUNTIME_REVISION="v${FAASJSRUNTIME_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show faas-js-runtime version)}" +readonly FAASJSRUNTIME_REPO="https://github.com/nodeshift/faas-js-runtime.git" +readonly CLOUDEVENTS_REVISION="v${CLOUDEVENTS_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show cloudevents version)}" +readonly CLOUDEVENTS_REPO="https://github.com/cloudevents/sdk-javascript.git" +readonly FASTIFY_REVISION="v${FASTIFY_REVISION:-$(docker run --rm "${IMAGE_NAME}" -- npm show fastify version)}" +readonly FASTIFY_REPO="https://github.com/fastify/fastify.git" + +# Since we built the candidate image locally, we don't want S2I attempt to pull +# it from a registry +s2i_args="--pull-policy=never " + +# TODO: This should be part of the image metadata +test_port=8080 + +# Common git configuration +readonly -A gitconfig=( + [user.name]="builder" + [user.email]="build@localhost" + [commit.gpgsign]="false" +) + +[[ -n "$DEBUG" ]] && set -x + +ct_init +cid_file=$CID_FILE_DIR/$(mktemp -u -p . --suffix=.cid) + +TEST_SET=${TESTS:-$TEST_LIST_CLIENTS} ct_run_tests_from_testset "clients" diff --git a/20/test/show_all_imagestreams.py b/20/test/show_all_imagestreams.py new file mode 100755 index 00000000..d8022764 --- /dev/null +++ b/20/test/show_all_imagestreams.py @@ -0,0 +1,58 @@ +#!/bin/env python3 + +# MIT License +# +# Copyright (c) 2018-2019 Red Hat, Inc. + +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import json +import os +import sys + +from pathlib import Path +from typing import Dict, Any + +IMAGESTREAMS_DIR: str = "imagestreams" + + +class ShowAllImageStreams(object): + def load_json_file(self, filename: Path) -> Any: + with open(str(filename)) as f: + data = json.load(f) + isinstance(data, Dict) + return data + + def show_all_imagestreams(self) -> int: + p = Path(".") + json_files = p.glob(f"{IMAGESTREAMS_DIR}/*.json") + if not json_files: + print(f"No json files present in {IMAGESTREAMS_DIR}.") + return 0 + for f in json_files: + json_dict = self.load_json_file(f) + print(f"Tags in the image stream {f}:") + for tag in json_dict["spec"]["tags"]: + print(f"- {tag['name']} -> {tag['from']['name']}") + return 0 + + +if __name__ == "__main__": + isc = ShowAllImageStreams() + isc.show_all_imagestreams() diff --git a/20/test/test-app/README.md b/20/test/test-app/README.md new file mode 100644 index 00000000..edd29eb6 --- /dev/null +++ b/20/test/test-app/README.md @@ -0,0 +1,4 @@ +node-echo +========= + +node.js echo server, returns request data to response diff --git a/20/test/test-app/iisnode.yml b/20/test/test-app/iisnode.yml new file mode 100644 index 00000000..52b201b0 --- /dev/null +++ b/20/test/test-app/iisnode.yml @@ -0,0 +1,27 @@ +# For documentation see https://github.com/tjanczuk/iisnode/blob/master/src/samples/configuration/iisnode.yml + +# loggingEnabled: false +# debuggingEnabled: false +# devErrorsEnabled: false +node_env: production +# nodeProcessCountPerApplication: 1 +# maxConcurrentRequestsPerProcess: 1024 +# maxNamedPipeConnectionRetry: 24 +# namedPipeConnectionRetryDelay: 250 +# maxNamedPipeConnectionPoolSize: 512 +# maxNamedPipePooledConnectionAge: 30000 +# asyncCompletionThreadCount: 0 +# initialRequestBufferSize: 4096 +# maxRequestBufferSize: 65536 +watchedFiles: iisnode.yml;node_modules\*;*.js +# uncFileChangesPollingInterval: 5000 +# gracefulShutdownTimeout: 60000 +# logDirectoryNameSuffix: logs +# debuggerPortRange: 5058-6058 +# debuggerPathSegment: debug +# maxLogFileSizeInKB: 128 +# appendToExistingLog: false +# logFileFlushInterval: 5000 +# flushResponse: false +# enableXFF: false +# promoteServerVars: \ No newline at end of file diff --git a/20/test/test-app/package.json b/20/test/test-app/package.json new file mode 100644 index 00000000..7ec3c6bf --- /dev/null +++ b/20/test/test-app/package.json @@ -0,0 +1,32 @@ +{ + "name": "node-echo", + "version": "0.0.1", + "description": "node-echo", + "main": "server.js", + "dependencies": { + }, + "devDependencies": { + "nodemon": "*" + }, + "engine": { + "node": "*", + "npm": "*" + }, + "scripts": { + "dev": "nodemon --ignore node_modules/ server.js", + "start": "node server.js" + }, + "repository": { + "type": "git", + "url": "http://github.com/bettiolo/node-echo.git" + }, + "keywords": [ + "Echo" + ], + "author": "Marco Bettiolo ", + "license": "", + "bugs": { + "url": "http://github.com/bettiolo/node-echo/issues" + }, + "homepage": "http://apilb.com" +} diff --git a/20/test/test-app/server.js b/20/test/test-app/server.js new file mode 100644 index 00000000..f7c25b17 --- /dev/null +++ b/20/test/test-app/server.js @@ -0,0 +1,50 @@ +var util = require('util'); +var http = require('http'); +var url = require('url'); +var qs = require('querystring'); +var os = require('os') +var port = process.env.PORT || process.env.port || process.env.OPENSHIFT_NODEJS_PORT || 8080; +var ip = process.env.OPENSHIFT_NODEJS_IP || '0.0.0.0'; +var nodeEnv = process.env.NODE_ENV || 'unknown'; +var server = http.createServer(function (req, res) { + var url_parts = url.parse(req.url, true); + + var body = ''; + req.on('data', function (data) { + body += data; + }); + req.on('end', function () { + var formattedBody = qs.parse(body); + + res.writeHead(200, {'Content-Type': 'text/plain'}); + + res.write('This is a node.js echo service\n'); + res.write('Host: ' + req.headers.host + '\n'); + res.write('\n'); + res.write('node.js Production Mode: ' + (nodeEnv == 'production' ? 'yes' : 'no') + '\n'); + res.write('\n'); + res.write('HTTP/' + req.httpVersion +'\n'); + res.write('Request headers:\n'); + res.write(util.inspect(req.headers, null) + '\n'); + res.write('Request query:\n'); + res.write(util.inspect(url_parts.query, null) + '\n'); + res.write('Request body:\n'); + res.write(util.inspect(formattedBody, null) + '\n'); + res.write('\n'); + res.write('Host: ' + os.hostname() + '\n'); + res.write('OS Type: ' + os.type() + '\n'); + res.write('OS Platform: ' + os.platform() + '\n'); + res.write('OS Arch: ' + os.arch() + '\n'); + res.write('OS Release: ' + os.release() + '\n'); + res.write('OS Uptime: ' + os.uptime() + '\n'); + res.write('OS Free memory: ' + os.freemem() / 1024 / 1024 + 'mb\n'); + res.write('OS Total memory: ' + os.totalmem() / 1024 / 1024 + 'mb\n'); + res.write('OS CPU count: ' + os.cpus().length + '\n'); + res.write('OS CPU model: ' + os.cpus()[0].model + '\n'); + res.write('OS CPU speed: ' + os.cpus()[0].speed + 'mhz\n'); + res.end('\n'); + + }); +}); +server.listen(port); +console.log('Server running on ' + ip + ':' + port); diff --git a/20/test/test-app/web.config b/20/test/test-app/web.config new file mode 100644 index 00000000..a6b44a34 --- /dev/null +++ b/20/test/test-app/web.config @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/20/test/test-binary/hw.js b/20/test/test-binary/hw.js new file mode 100644 index 00000000..1b7e0de7 --- /dev/null +++ b/20/test/test-binary/hw.js @@ -0,0 +1,11 @@ +var http = require('http'); +var ip = process.env.OPENSHIFT_NODEJS_IP || '0.0.0.0'; +var port = process.env.PORT || process.env.port || process.env.OPENSHIFT_NODEJS_PORT || 8080; + +var server = http.createServer(function(req, res) { + res.writeHead(200); + res.end('Hello World!'); +}); +server.listen(port); + +console.log("Server running on " + ip + ":" + port); diff --git a/20/test/test-binary/package.json b/20/test/test-binary/package.json new file mode 100644 index 00000000..15998a8d --- /dev/null +++ b/20/test/test-binary/package.json @@ -0,0 +1,17 @@ +{ + "name": "hello-world", + "version": "0.0.1", + "main": "hw.js", + "engine": { + "node": "*", + "npm": "*" + }, + "dependencies": { + "node-rdkafka": "*" + }, + "scripts": { + "start": "node hw.js", + "dev": "node hw.js" + }, + "license": "" +} diff --git a/20/test/test-express-webapp/app.js b/20/test/test-express-webapp/app.js new file mode 100644 index 00000000..662bcc92 --- /dev/null +++ b/20/test/test-express-webapp/app.js @@ -0,0 +1,41 @@ +var createError = require('http-errors'); +var express = require('express'); +var path = require('path'); +var cookieParser = require('cookie-parser'); +var logger = require('morgan'); + +var indexRouter = require('./routes/index'); +var usersRouter = require('./routes/users'); + +var app = express(); + +// view engine setup +app.set('views', path.join(__dirname, 'views')); +app.set('view engine', 'jade'); + +app.use(logger('dev')); +app.use(express.json()); +app.use(express.urlencoded({ extended: false })); +app.use(cookieParser()); +app.use(express.static(path.join(__dirname, 'public'))); + +app.use('/', indexRouter); +app.use('/users', usersRouter); + +// catch 404 and forward to error handler +app.use(function(req, res, next) { + next(createError(404)); +}); + +// error handler +app.use(function(err, req, res, next) { + // set locals, only providing error in development + res.locals.message = err.message; + res.locals.error = req.app.get('env') === 'development' ? err : {}; + + // render the error page + res.status(err.status || 500); + res.render('error'); +}); + +module.exports = app; diff --git a/20/test/test-express-webapp/bin/www b/20/test/test-express-webapp/bin/www new file mode 100755 index 00000000..e647f1e4 --- /dev/null +++ b/20/test/test-express-webapp/bin/www @@ -0,0 +1,90 @@ +#!/usr/bin/env node + +/** + * Module dependencies. + */ + +var app = require('../app'); +var debug = require('debug')('test-express-webapp:server'); +var http = require('http'); + +/** + * Get port from environment and store in Express. + */ + +var port = normalizePort(process.env.PORT || '8080'); +app.set('port', port); + +/** + * Create HTTP server. + */ + +var server = http.createServer(app); + +/** + * Listen on provided port, on all network interfaces. + */ + +server.listen(port); +server.on('error', onError); +server.on('listening', onListening); + +/** + * Normalize a port into a number, string, or false. + */ + +function normalizePort(val) { + var port = parseInt(val, 10); + + if (isNaN(port)) { + // named pipe + return val; + } + + if (port >= 0) { + // port number + return port; + } + + return false; +} + +/** + * Event listener for HTTP server "error" event. + */ + +function onError(error) { + if (error.syscall !== 'listen') { + throw error; + } + + var bind = typeof port === 'string' + ? 'Pipe ' + port + : 'Port ' + port; + + // handle specific listen errors with friendly messages + switch (error.code) { + case 'EACCES': + console.error(bind + ' requires elevated privileges'); + process.exit(1); + break; + case 'EADDRINUSE': + console.error(bind + ' is already in use'); + process.exit(1); + break; + default: + throw error; + } +} + +/** + * Event listener for HTTP server "listening" event. + */ + +function onListening() { + var addr = server.address(); + var bind = typeof addr === 'string' + ? 'pipe ' + addr + : 'port ' + addr.port; + debug('Listening on ' + bind); +} diff --git a/20/test/test-express-webapp/package.json b/20/test/test-express-webapp/package.json new file mode 100644 index 00000000..a44ba495 --- /dev/null +++ b/20/test/test-express-webapp/package.json @@ -0,0 +1,16 @@ +{ + "name": "test-express-webapp", + "version": "0.0.1", + "private": true, + "scripts": { + "start": "node ./bin/www" + }, + "dependencies": { + "cookie-parser": "*", + "debug": "*", + "express": "*", + "http-errors": "*", + "jade": "*", + "morgan": "*" + } +} diff --git a/20/test/test-express-webapp/public/stylesheets/style.css b/20/test/test-express-webapp/public/stylesheets/style.css new file mode 100644 index 00000000..9453385b --- /dev/null +++ b/20/test/test-express-webapp/public/stylesheets/style.css @@ -0,0 +1,8 @@ +body { + padding: 50px; + font: 14px "Lucida Grande", Helvetica, Arial, sans-serif; +} + +a { + color: #00B7FF; +} diff --git a/20/test/test-express-webapp/routes/index.js b/20/test/test-express-webapp/routes/index.js new file mode 100644 index 00000000..f18fd928 --- /dev/null +++ b/20/test/test-express-webapp/routes/index.js @@ -0,0 +1,9 @@ +var express = require('express'); +var router = express.Router(); + +/* GET home page. */ +router.get('/', function(req, res, next) { + res.render('index', { title: 'Express Testing Application' }); +}); + +module.exports = router; diff --git a/20/test/test-express-webapp/routes/users.js b/20/test/test-express-webapp/routes/users.js new file mode 100644 index 00000000..623e4302 --- /dev/null +++ b/20/test/test-express-webapp/routes/users.js @@ -0,0 +1,9 @@ +var express = require('express'); +var router = express.Router(); + +/* GET users listing. */ +router.get('/', function(req, res, next) { + res.send('respond with a resource'); +}); + +module.exports = router; diff --git a/20/test/test-express-webapp/views/error.jade b/20/test/test-express-webapp/views/error.jade new file mode 100644 index 00000000..51ec12c6 --- /dev/null +++ b/20/test/test-express-webapp/views/error.jade @@ -0,0 +1,6 @@ +extends layout + +block content + h1= message + h2= error.status + pre #{error.stack} diff --git a/20/test/test-express-webapp/views/index.jade b/20/test/test-express-webapp/views/index.jade new file mode 100644 index 00000000..3d63b9a0 --- /dev/null +++ b/20/test/test-express-webapp/views/index.jade @@ -0,0 +1,5 @@ +extends layout + +block content + h1= title + p Welcome to #{title} diff --git a/20/test/test-express-webapp/views/layout.jade b/20/test/test-express-webapp/views/layout.jade new file mode 100644 index 00000000..15af079b --- /dev/null +++ b/20/test/test-express-webapp/views/layout.jade @@ -0,0 +1,7 @@ +doctype html +html + head + title= title + link(rel='stylesheet', href='/stylesheets/style.css') + body + block content diff --git a/20/test/test-hw/hw.js b/20/test/test-hw/hw.js new file mode 100644 index 00000000..1b7e0de7 --- /dev/null +++ b/20/test/test-hw/hw.js @@ -0,0 +1,11 @@ +var http = require('http'); +var ip = process.env.OPENSHIFT_NODEJS_IP || '0.0.0.0'; +var port = process.env.PORT || process.env.port || process.env.OPENSHIFT_NODEJS_PORT || 8080; + +var server = http.createServer(function(req, res) { + res.writeHead(200); + res.end('Hello World!'); +}); +server.listen(port); + +console.log("Server running on " + ip + ":" + port); diff --git a/20/test/test-hw/package.json b/20/test/test-hw/package.json new file mode 100644 index 00000000..17b2475e --- /dev/null +++ b/20/test/test-hw/package.json @@ -0,0 +1,14 @@ +{ + "name": "hello-world", + "version": "0.0.1", + "main": "hw.js", + "engine": { + "node": "*", + "npm": "*" + }, + "scripts": { + "start": "node hw.js", + "dev": "node hw.js" + }, + "license": "" +} diff --git a/20/test/test-incremental/README.md b/20/test/test-incremental/README.md new file mode 100644 index 00000000..edd29eb6 --- /dev/null +++ b/20/test/test-incremental/README.md @@ -0,0 +1,4 @@ +node-echo +========= + +node.js echo server, returns request data to response diff --git a/20/test/test-incremental/iisnode.yml b/20/test/test-incremental/iisnode.yml new file mode 100644 index 00000000..52b201b0 --- /dev/null +++ b/20/test/test-incremental/iisnode.yml @@ -0,0 +1,27 @@ +# For documentation see https://github.com/tjanczuk/iisnode/blob/master/src/samples/configuration/iisnode.yml + +# loggingEnabled: false +# debuggingEnabled: false +# devErrorsEnabled: false +node_env: production +# nodeProcessCountPerApplication: 1 +# maxConcurrentRequestsPerProcess: 1024 +# maxNamedPipeConnectionRetry: 24 +# namedPipeConnectionRetryDelay: 250 +# maxNamedPipeConnectionPoolSize: 512 +# maxNamedPipePooledConnectionAge: 30000 +# asyncCompletionThreadCount: 0 +# initialRequestBufferSize: 4096 +# maxRequestBufferSize: 65536 +watchedFiles: iisnode.yml;node_modules\*;*.js +# uncFileChangesPollingInterval: 5000 +# gracefulShutdownTimeout: 60000 +# logDirectoryNameSuffix: logs +# debuggerPortRange: 5058-6058 +# debuggerPathSegment: debug +# maxLogFileSizeInKB: 128 +# appendToExistingLog: false +# logFileFlushInterval: 5000 +# flushResponse: false +# enableXFF: false +# promoteServerVars: \ No newline at end of file diff --git a/20/test/test-incremental/package.json b/20/test/test-incremental/package.json new file mode 100644 index 00000000..2384b8a4 --- /dev/null +++ b/20/test/test-incremental/package.json @@ -0,0 +1,33 @@ +{ + "name": "node-echo", + "version": "0.0.1", + "description": "node-echo", + "main": "server.js", + "dependencies": { + "yarn": "*" + }, + "devDependencies": { + "nodemon": "*" + }, + "engine": { + "node": "*", + "npm": "*" + }, + "scripts": { + "dev": "nodemon --ignore node_modules/ server.js", + "start": "node server.js" + }, + "repository": { + "type": "git", + "url": "http://github.com/bettiolo/node-echo.git" + }, + "keywords": [ + "Echo" + ], + "author": "Marco Bettiolo ", + "license": "", + "bugs": { + "url": "http://github.com/bettiolo/node-echo/issues" + }, + "homepage": "http://apilb.com" +} diff --git a/20/test/test-incremental/server.js b/20/test/test-incremental/server.js new file mode 100644 index 00000000..f7c25b17 --- /dev/null +++ b/20/test/test-incremental/server.js @@ -0,0 +1,50 @@ +var util = require('util'); +var http = require('http'); +var url = require('url'); +var qs = require('querystring'); +var os = require('os') +var port = process.env.PORT || process.env.port || process.env.OPENSHIFT_NODEJS_PORT || 8080; +var ip = process.env.OPENSHIFT_NODEJS_IP || '0.0.0.0'; +var nodeEnv = process.env.NODE_ENV || 'unknown'; +var server = http.createServer(function (req, res) { + var url_parts = url.parse(req.url, true); + + var body = ''; + req.on('data', function (data) { + body += data; + }); + req.on('end', function () { + var formattedBody = qs.parse(body); + + res.writeHead(200, {'Content-Type': 'text/plain'}); + + res.write('This is a node.js echo service\n'); + res.write('Host: ' + req.headers.host + '\n'); + res.write('\n'); + res.write('node.js Production Mode: ' + (nodeEnv == 'production' ? 'yes' : 'no') + '\n'); + res.write('\n'); + res.write('HTTP/' + req.httpVersion +'\n'); + res.write('Request headers:\n'); + res.write(util.inspect(req.headers, null) + '\n'); + res.write('Request query:\n'); + res.write(util.inspect(url_parts.query, null) + '\n'); + res.write('Request body:\n'); + res.write(util.inspect(formattedBody, null) + '\n'); + res.write('\n'); + res.write('Host: ' + os.hostname() + '\n'); + res.write('OS Type: ' + os.type() + '\n'); + res.write('OS Platform: ' + os.platform() + '\n'); + res.write('OS Arch: ' + os.arch() + '\n'); + res.write('OS Release: ' + os.release() + '\n'); + res.write('OS Uptime: ' + os.uptime() + '\n'); + res.write('OS Free memory: ' + os.freemem() / 1024 / 1024 + 'mb\n'); + res.write('OS Total memory: ' + os.totalmem() / 1024 / 1024 + 'mb\n'); + res.write('OS CPU count: ' + os.cpus().length + '\n'); + res.write('OS CPU model: ' + os.cpus()[0].model + '\n'); + res.write('OS CPU speed: ' + os.cpus()[0].speed + 'mhz\n'); + res.end('\n'); + + }); +}); +server.listen(port); +console.log('Server running on ' + ip + ':' + port); diff --git a/20/test/test-incremental/web.config b/20/test/test-incremental/web.config new file mode 100644 index 00000000..a6b44a34 --- /dev/null +++ b/20/test/test-incremental/web.config @@ -0,0 +1,17 @@ + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/20/test/test-lib-nodejs.sh b/20/test/test-lib-nodejs.sh new file mode 100644 index 00000000..90d8a6bd --- /dev/null +++ b/20/test/test-lib-nodejs.sh @@ -0,0 +1,608 @@ +#!/bin/bash +# +# Functions for tests for the Node.js image. +# +# IMAGE_NAME specifies a name of the candidate image used for testing. +# The image has to be available before this script is executed. +# + +THISDIR=$(dirname ${BASH_SOURCE[0]}) + +source "${THISDIR}/test-lib.sh" +source "${THISDIR}/test-lib-openshift.sh" + +test_dir="$(readlink -f $(dirname ${BASH_SOURCE[0]}))" + +info() { + echo -e "\n\e[1m[INFO] $@...\e[0m\n" +} + +check_prep_result() { + if [ $1 -ne 0 ]; then + ct_update_test_result "[FAILED]" "$2" "preparation" + TESTSUITE_RESULT=1 + return $1 + fi +} + +image_exists() { + docker inspect $1 &>/dev/null +} + +container_exists() { + image_exists $(cat $cid_file) +} + +container_ip() { + docker inspect --format="{{ .NetworkSettings.IPAddress }}" $(cat $cid_file) +} + +container_logs() { + docker logs $(cat $cid_file) +} + +run_s2i_build() { + ct_s2i_build_as_df file://${test_dir}/test-app ${IMAGE_NAME} ${IMAGE_NAME}-testapp ${s2i_args} $(ct_build_s2i_npm_variables) $1 +} + +run_s2i_build_proxy() { + ct_s2i_build_as_df file://${test_dir}/test-hw ${IMAGE_NAME} ${IMAGE_NAME}-testhw ${s2i_args} $(ct_build_s2i_npm_variables) -e HTTP_PROXY=$1 -e http_proxy=$1 -e HTTPS_PROXY=$2 -e https_proxy=$2 +} + +run_s2i_build_client() { + ct_s2i_build_as_df_build_args \ + "file://${test_dir}/$1" "${IMAGE_NAME}" "${IMAGE_NAME}-$1" "--ulimit nofile=4096:4096" \ + ${s2i_args} \ + $(ct_build_s2i_npm_variables) -e NODE_ENV=development +} + +run_s2i_build_binary() { + ct_s2i_build_as_df file://${test_dir}/test-binary ${IMAGE_NAME} ${IMAGE_NAME}-testbinary ${s2i_args} $(ct_build_s2i_npm_variables) $1 +} + +run_s2i_multistage_build() { + ct_s2i_multistage_build file://${test_dir}/test-app ${FULL_IMAGE} ${IMAGE_NAME} ${IMAGE_NAME}-testapp $(ct_build_s2i_npm_variables) +} + +prepare_dummy_git_repo() { + git init + for key in "${!gitconfig[@]}"; do + git config --local "$key" "${gitconfig[$key]}" + done + git add --all + git commit -m "Sample commit" +} + +prepare_client_repo() { + git clone \ + --config advice.detachedHead="false" \ + --branch "$3" --depth 1 \ + "$2" "$1" + pushd "$1" >/dev/null || return + for key in "${!gitconfig[@]}"; do + git config --local "$key" "${gitconfig[$key]}" + done + popd >/dev/null || return +} + +prepare_minimal_build() { + suffix=$1 + # Build the app using the full assemble-capable image + case "$suffix" in + testapp) + run_s2i_multistage_build #>/tmp/build-log 2>&1 + ;; + testhw) + IMAGE_NAME=$FULL_IMAGE run_s2i_build_proxy http://user.password@0.0.0.0:8000 https://user.password@0.0.0.0:8000 >/tmp/build-log 2>&1 + # Get the application from the assembled image and into the minimal + tempdir=$(mktemp -d) + chown 1001:0 "$tempdir" + docker run -u 0 --rm -ti -v "$tempdir:$tempdir:Z" "$FULL_IMAGE-$suffix" bash -c "cp -ar /opt/app-root/src $tempdir" + pushd "$tempdir" >/dev/null || return + cat <Dockerfile +FROM $IMAGE_NAME +ADD src/* /opt/app-root/src +CMD /usr/libexec/s2i/run +EOF + # Check if CA autority is present on host and add it into Dockerfile + [ -f "$(full_ca_file_path)" ] && cat <>Dockerfile +USER 0 +RUN cd /etc/pki/ca-trust/source/anchors && update-ca-trust extract +USER 1001 +EOF + docker build -t "$IMAGE_NAME-$suffix" $(ct_build_s2i_npm_variables | grep -o -e '\(-v\)[[:space:]]\.*\S*') . + popd >/dev/null || return + ;; + *) + echo "Please specify a valid test application" + return 1 + ;; + esac + +} + +prepare() { + if ! image_exists ${IMAGE_NAME}; then + echo "ERROR: The image ${IMAGE_NAME} must exist before this script is executed." + return 1 + fi + + case "$1" in + # TODO: STI build require the application is a valid 'GIT' repository, we + # should remove this restriction in the future when a file:// is used. + app|hw|express-webapp|binary) + pushd "${test_dir}/test-${1}" >/dev/null + prepare_dummy_git_repo + popd >/dev/null + ;; + *) + if [[ "$TEST_LIST_CLIENTS" == *"${test_case}"* ]]; + then + PREFIX=$1 + PREFIX=${PREFIX//-/} + REPO="${PREFIX^^}"_REPO + REVISION="${PREFIX^^}"_REVISION + prepare_client_repo "${test_dir}/$1" "${!REPO}" "${!REVISION}" + else + echo "Please specify a valid test application" + return 1 + fi + ;; + esac +} + +run_test_application() { + case "$1" in + app|hw|express-webapp|binary) + cid_file=$CID_FILE_DIR/$(mktemp -u -p . --suffix=.cid) + docker run -d --user=100001 $(ct_mount_ca_file) --rm --cidfile=${cid_file} $2 ${IMAGE_NAME}-test$1 + ;; + *) + echo "No such test application" + return 1 + ;; + esac +} + +run_client_test_suite() { + cid_file=$CID_FILE_DIR/$(mktemp -u -p . --suffix=.cid) + local cmd="npm test" + # Skip style check tests + [ "$1" == "prom-client" ] && cmd="sed -i.bak 's/&& npm run check-prettier //g' package.json && $cmd" + docker run --user=100001 $(ct_mount_ca_file) --rm --cidfile=${cid_file} ${IMAGE_NAME}-$1 bash -c "$cmd" +} + +kill_test_application() { + docker kill $(cat $cid_file) + rm $cid_file +} + +wait_for_cid() { + local max_attempts=20 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f $cid_file ] && [ -s $cid_file ] && break + echo "Waiting for container start..." + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done +} + +test_s2i_usage() { + echo "Testing 's2i usage'..." + ct_s2i_usage ${IMAGE_NAME} ${s2i_args} &>/dev/null + ct_check_testcase_result $? +} + +test_docker_run_usage() { + echo "Testing 'docker run' usage..." + docker run --rm ${IMAGE_NAME} &>/dev/null + ct_check_testcase_result $? +} + +test_connection() { + echo "Testing HTTP connection..." + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + echo "Sending GET request to http://$(container_ip):${test_port}/" + response_code=$(curl -s -w %{http_code} -o /dev/null http://$(container_ip):${test_port}/) + status=$? + if [ $status -eq 0 ]; then + if [ $response_code -eq 200 ]; then + result=0 + fi + break + fi + attempt=$(( $attempt + 1 )) + sleep $sleep_time + done + return $result +} + +scl_usage() { + # Verify the 'usage' script is working properly when running the base image with 's2i usage ...' + local run_cmd="$1" + local expected="$2" + + echo "Testing the image SCL enable ..." + out=$(docker run --rm ${IMAGE_NAME} /bin/bash -c "${run_cmd}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi + out=$(docker exec $(cat ${cid_file}) /bin/bash -c "${run_cmd}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi + out=$(docker exec $(cat ${cid_file}) /bin/sh -ic "${run_cmd}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic "${run_cmd}"] Expected '${expected}', got '${out}'" + return 1 + fi +} +function test_scl_usage() { + scl_usage "node --version" "v${VERSION//-minimal/}." + ct_check_testcase_result $? +} + +validate_default_value() { + local label=$1 + + IFS=':' read -a label_vals <<< $(docker inspect -f "{{index .Config.Labels \"$label\"}}" ${IMAGE_NAME}) + label_var=${label_vals[0]} + default_label_val=${label_vals[1]} + + actual_label_val=$(docker run --rm $IMAGE_NAME /bin/bash -c "echo $"$label_var) + + if [ "$actual_label_val" != "$default_label_val" ]; then + echo "ERROR default value for $label with environment variable $label_var; Expected $default_label_val, got $actual_label_val" + return 1 + fi +} + +# Gets the NODE_ENV environment variable from the container. +get_node_env_from_container() { + local dev_mode="$1" + local node_env="$2" + + IFS=':' read -a label_val <<< $(docker inspect -f '{{index .Config.Labels "com.redhat.dev-mode"}}' $IMAGE_NAME) + dev_mode_label_var="${label_val[0]}" + + echo $(docker run --rm --env $dev_mode_label_var=$dev_mode --env NODE_ENV=$node_env $IMAGE_NAME /bin/bash -c 'echo "$NODE_ENV"') +} + +# Ensures that a docker container run with '--env NODE_ENV=$current_val' produces a NODE_ENV value of $expected when +# DEV_MODE=dev_mode. +validate_node_env() { + local current_val="$1" + local dev_mode_val="$2" + local expected="$3" + + actual=$(get_node_env_from_container "$dev_mode_val" "$current_val") + if [ "$actual" != "$expected" ]; then + echo "ERROR default value for NODE_ENV when development mode is $dev_mode_val; should be $expected but is $actual" + return 1 + fi +} + +test_dev_mode() { + local app=$1 + local dev_mode=$2 + local node_env=$3 + + echo "Testing $app DEV_MODE=$dev_mode NODE_ENV=$node_env" + + run_test_application $app "-e DEV_MODE=$dev_mode" + wait_for_cid + + test_connection + ct_check_testcase_result $? + + logs=$(container_logs) + echo ${logs} | grep -q DEV_MODE=$dev_mode + ct_check_testcase_result $? + echo ${logs} | grep -q DEBUG_PORT=5858 + ct_check_testcase_result $? + echo ${logs} | grep -q NODE_ENV=$node_env + ct_check_testcase_result $? + + kill_test_application +} + +test_incremental_build() { + npm_variables=$(ct_build_s2i_npm_variables) + build_log1=$(ct_s2i_build_as_df file://${test_dir}/test-incremental ${IMAGE_NAME} ${IMAGE_NAME}-testapp ${s2i_args} ${npm_variables}) + ct_check_testcase_result $? + build_log2=$(ct_s2i_build_as_df file://${test_dir}/test-incremental ${IMAGE_NAME} ${IMAGE_NAME}-testapp ${s2i_args} ${npm_variables} --incremental) + ct_check_testcase_result $? + if [ "$VERSION" == "6" ]; then + # Different npm output for version 6 + if echo "$build_log2" | grep -e "\-\- yarn@[0-9\.]*"; then + echo "ERROR Incremental build failed: yarn package is getting installed in incremental build" + ct_check_testcase_result 1 + fi + else + first=$(echo "$build_log1" | grep -o -e "added [0-9]* packages" | awk '{ print $2 }') + second=$(echo "$build_log2" | grep -o -e "added [0-9]* packages" | awk '{ print $2 }') + if [ "$first" == "$second" ]; then + echo "ERROR Incremental build failed: both builds installed $first packages" + ct_check_testcase_result 1 + fi + fi + +} + + +# test express webapp +run_s2i_build_express_webapp() { + local result + prepare express-webapp + check_prep_result $? express-webapp || return + ct_s2i_build_as_df file://${test_dir}/test-express-webapp ${IMAGE_NAME} ${IMAGE_NAME}-testexpress-webapp ${s2i_args} $(ct_build_s2i_npm_variables) + run_test_application express-webapp + wait_for_cid + ct_test_response http://$(container_ip):${test_port} 200 'Welcome to Express Testing Application' + result=$? + kill_test_application + return $result +} + +function test_build_express_webapp() { + echo "Running express webapp test" + run_s2i_build_express_webapp + ct_check_testcase_result $? +} + +function test_running_client_js { + echo "Running $1 test suite" + prepare "$1" + check_prep_result $? $1 || return + run_s2i_build_client "$1" + ct_check_testcase_result $? + run_client_test_suite "$1" + ct_check_testcase_result $? +} + +function test_client_express() { + echo "Running express client test" + test_running_client_js express +} + +function test_client_pino() { + echo "Running pino client test" + test_running_client_js pino +} + +function test_client_prom() { + echo "Running prom-client test" + test_running_client_js prom-client +} + +function test_client_opossum() { + echo "Running opossum client test" + test_running_client_js opossum +} + +function test_client_kube() { + echo "Running kube-service-bindings client test" + test_running_client_js kube-service-bindings +} + +function test_client_faas() { + echo "Running faas-js-runtime client test" + test_running_client_js faas-js-runtime +} + +function test_client_cloudevents() { + echo "Running CloudEvents client test" + test_running_client_js cloudevents +} +function test_client_fastify() { + if [[ "${VERSION}" == *"minimal"* ]]; then + VERSION=$(echo "${VERSION}" | cut -d "-" -f 1) + fi + if [[ "$VERSION" == "18" ]]; then + if [ "$OS" == "rhel8" ] || [ "$OS" == "rhel9" ]; then + echo "Fastify is not supported in $VERSION and rhel8 and rhel9" + return + fi + fi + echo "Running fastify client test" + test_running_client_js fastify +} + +function test_check_build_using_dockerfile() { + info "Check building using a Dockerfile" + ct_test_app_dockerfile ${THISDIR}/examples/from-dockerfile/Dockerfile 'https://github.com/sclorg/nodejs-ex.git' 'Node.js Crud Application' app-src "--ulimit nofile=4096:4096" + ct_check_testcase_result $? + ct_test_app_dockerfile ${THISDIR}/examples/from-dockerfile/Dockerfile.s2i 'https://github.com/sclorg/nodejs-ex.git' 'Node.js Crud Application' app-src "--ulimit nofile=4096:4096" + ct_check_testcase_result $? +} +function test_npm_functionality() { + echo "Testing npm availability" + ct_npm_works + ct_check_testcase_result $? +} + +function test_nodemon_removed() { + # Test that the development dependencies (nodemon) have been removed (npm prune) + devdep=$(docker run --rm ${IMAGE_NAME}-testapp /bin/bash -c "! test -d ./node_modules/nodemon") + ct_check_testcase_result "$?" +} + +function test_nodemon_present() { + # Test that the development dependencies (nodemon) have been removed (npm prune) + devdep=$(docker run --rm ${IMAGE_NAME}-testapp /bin/bash -c "test -d ./node_modules/nodemon") + ct_check_testcase_result "$?" +} + + +function test_npm_cache_cleared() { + # Test that the npm cache has been cleared + cache_loc=$(docker run --rm ${IMAGE_NAME}-testapp /bin/bash -c "npm config get cache") + devdep=$(docker run --rm ${IMAGE_NAME}-testapp /bin/bash -c "! test -d $cache_loc") + ct_check_testcase_result "$?" +} + +function test_npm_cache_exists() { + # Test that the npm cache has been cleared + devdep=$(docker run --rm ${IMAGE_NAME}-testapp /bin/bash -c "test -d \$(npm config get cache)") + ct_check_testcase_result "$?" +} + +function test_npm_tmp_cleared() { + # Test that the npm tmp has been cleared + devdep=$(docker run --rm ${IMAGE_NAME}-testapp /bin/bash -c "! ls \$(npm config get tmp)/npm-* 2>/dev/null") + ct_check_testcase_result "$?" +} + +function test_dev_mode_false_production() { + # DEV_MODE=false NODE_ENV=production + test_dev_mode app false production +} + +function test_dev_mode_true_development() { + # DEV_MODE=true NODE_ENV=development + test_dev_mode app true development +} + +function test_dev_mode_false_development() { + # DEV_MODE=false NODE_ENV=development + test_dev_mode app false development +} + +function test_run_app_application() { + # Verify that the HTTP connection can be established to test application container + run_test_application app + # Wait for the container to write it's CID file + wait_for_cid +} + +function test_run_hw_application() { + # Verify that the HTTP connection can be established to test application container + run_test_application hw + # Wait for the container to write it's CID file + wait_for_cid + ct_check_testcase_result $? + kill_test_application +} + +function test_run_binary_application() { + # Test is suppressed because of https://github.com/Blizzard/node-rdkafka/issues/910 + # The newest version of node-rdkafka works only with gcc-8 and higher + # On RHEL7 and CentOS7 is gcc-4.8 + prepare binary + check_prep_result $? binary || return + run_s2i_build_binary + ct_check_testcase_result $? + # Verify that the HTTP connection can be established to test application container + run_test_application binary + # Wait for the container to write it's CID file + wait_for_cid + kill_test_application +} + +function test_safe_logging() { + if [[ $(grep redacted /tmp/build-log | wc -l) -eq 4 ]]; then + grep redacted /tmp/build-log + ct_check_testcase_result 0 + else + echo "Some proxy log-in credentials were left in log file" + grep Setting /tmp/build-log + ct_check_testcase_result 1 + fi +} + + +function ct_pull_or_import_postgresql() { + postgresql_image="quay.io/sclorg/postgresql-15-c9s" + image_short="postgresql" + image_tag="postgresql:15-c9s" + # Variable CVP is set by CVP pipeline + if [ "${CVP:-0}" -eq "0" ]; then + # In case of container or OpenShift 4 tests + # Pull image before going through tests + # Exit in case of failure, because postgresql container is mandatory + ct_pull_image "${postgresql_image}" "true" + else + # Import postgresql-15-c9s image before running tests on CVP + oc import-image "${image_short}:latest" --from="${postgresql_image}:latest" --insecure=true --confirm + # Tag postgresql image to "postgresql:10" which is expected by test suite + oc tag "${image_short}:latest" "${image_tag}" + fi +} + +# Check the imagestream +function test_nodejs_imagestream() { + local ret_val=0 + if [[ "${VERSION}" == *"minimal"* ]]; then + VERSION=$(echo "${VERSION}" | cut -d "-" -f 1) + fi + ct_os_test_image_stream_quickstart \ + "${THISDIR}/imagestreams/nodejs-${OS%[0-9]*}.json" \ + "https://raw.githubusercontent.com/sclorg/nodejs-ex/${BRANCH_TO_TEST}/openshift/templates/nodejs-postgresql-persistent.json" \ + "${IMAGE_NAME}" \ + 'nodejs' \ + "Node.js Crud Application" \ + 8080 http 200 \ + "-p SOURCE_REPOSITORY_REF=${BRANCH_TO_TEST} -p SOURCE_REPOSITORY_URL=https://github.com/sclorg/nodejs-ex.git -p NODEJS_VERSION=${VERSION} -p NAME=nodejs-testing + -p DATABASE_USER=testu \ + -p DATABASE_PASSWORD=testpwd \ + -p DATABASE_ADMIN_PASSWORD=testadminpwd" \ + "quay.io/sclorg/postgresql-15-c9s|postgresql:15-c9s"|| ret_val=1 +} + +function test_nodejs_s2i_container() { + ct_os_test_s2i_app "${IMAGE_NAME}" \ + "https://github.com/sclorg/s2i-nodejs-container.git" \ + "test/test-app" \ + "This is a node.js echo service" +} + +function test_nodejs_s2i_app_ex() { + ct_os_test_s2i_app "${IMAGE_NAME}" \ + "https://github.com/sclorg/nodejs-ex.git" \ + "." \ + "Node.js Crud Application" +} + +function test_nodejs_s2i_templates() { + local ret_val=0 + if [[ "${VERSION}" == *"minimal"* ]]; then + VERSION=$(echo "${VERSION}" | cut -d "-" -f 1) + fi + # TODO + # MongoDB is not supported at all. + # Let's disable it or replace it with mariadb + ct_os_test_template_app "${IMAGE_NAME}" \ + "https://raw.githubusercontent.com/sclorg/nodejs-ex/${BRANCH_TO_TEST}/openshift/templates/nodejs-postgresql-persistent.json" \ + nodejs \ + "Node.js Crud Application" \ + 8080 http 200 \ + "-p SOURCE_REPOSITORY_REF=${BRANCH_TO_TEST} -p SOURCE_REPOSITORY_URL=https://github.com/sclorg/nodejs-ex.git -p NODEJS_VERSION=${VERSION} -p NAME=nodejs-testing + -p DATABASE_USER=testu \ + -p DATABASE_PASSWORD=testpwd \ + -p DATABASE_ADMIN_PASSWORD=testadminpwd" \ + "quay.io/sclorg/postgresql-15-c9s|postgresql:15-c9s" || ret_val=1 + + return $ret_val +} + +function test_latest_imagestreams() { + local result=1 + # Switch to root directory of a container + pushd "${test_dir}/.." >/dev/null || return 1 + ct_check_latest_imagestreams + result=$? + popd >/dev/null || return 1 + return $result +} + +# vim: set tabstop=2:shiftwidth=2:expandtab: + diff --git a/20/test/test-lib-openshift.sh b/20/test/test-lib-openshift.sh new file mode 100644 index 00000000..d65d3a1d --- /dev/null +++ b/20/test/test-lib-openshift.sh @@ -0,0 +1,1206 @@ +# shellcheck disable=SC2148 +if [ -z "${sourced_test_lib_openshift:-}" ]; then + sourced_test_lib_openshift=1 +else + return 0 +fi + +# shellcheck shell=bash +# some functions are used from test-lib.sh, that is usually in the same dir +# shellcheck source=/dev/null +source "$(dirname "${BASH_SOURCE[0]}")"/test-lib.sh + +# Set of functions for testing docker images in OpenShift using 'oc' command + +# A variable containing the overall test result +# TESTSUITE_RESULT=0 +# And the following trap must be set, in the beginning of the test script: +# trap ct_os_cleanup EXIT SIGINT +TESTSUITE_RESULT=0 + +function ct_os_cleanup() { + local exit_code=$? + echo "${TEST_SUMMARY:-}" + if [ "$TESTSUITE_RESULT" -ne 0 ] || [ "$exit_code" -ne 0 ]; then + # shellcheck disable=SC2153 + echo "OpenShift tests for ${IMAGE_NAME} failed." + exit 1 + else + # shellcheck disable=SC2153 + echo "OpenShift tests for ${IMAGE_NAME} succeeded." + exit 0 + fi +} + +# ct_os_check_compulsory_vars +# --------------------------- +# Check the compulsory variables: +# * IMAGE_NAME specifies a name of the candidate image used for testing. +# * VERSION specifies the major version of the MariaDB in format of X.Y +# * OS specifies RHEL version (e.g. OS=rhel8) +function ct_os_check_compulsory_vars() { + # shellcheck disable=SC2016 + test -n "${IMAGE_NAME-}" || ( echo 'make sure $IMAGE_NAME is defined' >&2 ; exit 1) + # shellcheck disable=SC2016 + test -n "${VERSION-}" || ( echo 'make sure $VERSION is defined' >&2 ; exit 1) + # shellcheck disable=SC2016 + test -n "${OS-}" || ( echo 'make sure $OS is defined' >&2 ; exit 1) +} + +# ct_os_get_status +# -------------------- +# Returns status of all objects to make debugging easier. +function ct_os_get_status() { + oc get all + oc status + oc status --suggest +} + +# ct_os_print_logs +# -------------------- +# Returns status of all objects and logs from all pods. +function ct_os_print_logs() { + ct_os_get_status + while read -r pod_name; do + echo "INFO: printing logs for pod ${pod_name}" + oc logs "${pod_name}" + done < <(oc get pods --no-headers=true -o custom-columns=NAME:.metadata.name) +} + +# ct_os_enable_print_logs +# -------------------- +# Enables automatic printing of pod logs on ERR. +function ct_os_enable_print_logs() { + set -E + trap ct_os_print_logs ERR +} + +# ct_get_public_ip +# -------------------- +# Returns best guess for the IP that the node is accessible from other computers. +# This is a bit funny heuristic, simply goes through all IPv4 addresses that +# hostname -I returns and de-prioritizes IP addresses commonly used for local +# addressing. The rest of addresses are taken as public with higher probability. +function ct_get_public_ip() { + local hostnames + local public_ip='' + local found_ip + hostnames=$(hostname -I) + for guess_exp in '127\.0\.0\.1' '192\.168\.[0-9\.]*' '172\.[0-9\.]*' \ + '10\.[0-9\.]*' '[0-9\.]*' ; do + found_ip=$(echo "${hostnames}" | grep -oe "${guess_exp}") + if [ -n "${found_ip}" ] ; then + # shellcheck disable=SC2001 + hostnames=$(echo "${hostnames}" | sed -e "s/${found_ip}//") + public_ip="${found_ip}" + fi + done + if [ -z "${public_ip}" ] ; then + echo "ERROR: public IP could not be guessed." >&2 + return 1 + fi + echo "${public_ip}" +} + +# ct_os_run_in_pod POD_NAME CMD +# -------------------- +# Runs [cmd] in the pod specified by prefix [pod_prefix]. +# Arguments: pod_name - full name of the pod +# Arguments: cmd - command to be run in the pod +function ct_os_run_in_pod() { + local pod_name="$1" ; shift + + oc exec "$pod_name" -- "$@" +} + +# ct_os_get_service_ip SERVICE_NAME +# -------------------- +# Returns IP of the service specified by [service_name]. +# Arguments: service_name - name of the service +function ct_os_get_service_ip() { + local service_name="${1}" ; shift + local ocp_docker_address="172\.30\.[0-9\.]*" + if [ "${CVP:-0}" -eq "1" ]; then + # shellcheck disable=SC2034 + ocp_docker_address="172\.27\.[0-9\.]*" + fi + # shellcheck disable=SC2016 + oc get "svc/${service_name}" -o yaml | grep clusterIP | \ + cut -d':' -f2 | grep -oe "$ocp_docker_address" +} + + +# ct_os_get_all_pods_status +# -------------------- +# Returns status of all pods. +function ct_os_get_all_pods_status() { + oc get pods -o custom-columns=Ready:status.containerStatuses[0].ready,NAME:.metadata.name +} + +# ct_os_get_all_pods_name +# -------------------- +# Returns the full name of all pods. +function ct_os_get_all_pods_name() { + oc get pods --no-headers -o custom-columns=NAME:.metadata.name +} + +# ct_os_get_pod_status POD_PREFIX +# -------------------- +# Returns status of the pod specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_status() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_status | grep -e "${pod_prefix}" | grep -Ev "(build|deploy)$" \ + | awk '{print $1}' | head -n 1 +} + +# ct_os_get_build_pod_status POD_PREFIX +# -------------------- +# Returns status of the build pod specified by prefix [pod_prefix]. +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_build_pod_status() { + local pod_prefix="${1}" ; shift + local query="custom-columns=NAME:.metadata.name,Ready:status.phase" + oc get pods -o "$query" | grep -e "${pod_prefix}" | grep -E "\-build\s" \ + | sort -u | awk '{print $2}' | tail -n 1 +} + +# ct_os_get_buildconfig_pod_name POD_PREFIX +# ---------------------------- +# Returns status of the buildconfig pod specified by prefix [pod_prefix]. +# Argument: pod_prefix - prefix +function ct_os_get_buildconfig_pod_name() { + local pod_prefix="${1}" ; shift + local query="custom-columns=NAME:.metadata.name" + oc get bc -o "$query" | grep -e "${pod_prefix}" | sort -u | tail -n 1 +} + +# ct_os_get_pod_name POD_PREFIX +# -------------------- +# Returns the full name of pods specified by prefix [pod_prefix]. +# Note: Ignores -build and -deploy pods +# Arguments: pod_prefix - prefix or whole ID of the pod +function ct_os_get_pod_name() { + local pod_prefix="${1}" ; shift + ct_os_get_all_pods_name | grep -e "^${pod_prefix}" | grep -Ev "(build|deploy)$" +} + +# ct_os_get_pod_ip POD_NAME +# -------------------- +# Returns the ip of the pod specified by [pod_name]. +# Arguments: pod_name - full name of the pod +function ct_os_get_pod_ip() { + local pod_name="${1}" + oc get pod "$pod_name" --no-headers -o custom-columns=IP:status.podIP +} + +# ct_os_get_sti_build_logs +# ----------------- +# Return logs from sti_build +# Arguments: pod_name +function ct_os_get_sti_build_logs() { + local pod_prefix="${1}" + oc status --suggest + pod_name=$(ct_os_get_buildconfig_pod_name "${pod_prefix}") + # Print logs but do not failed. Just for traces + if [ x"${pod_name}" != "x" ]; then + oc logs "bc/$pod_name" || return 0 + else + echo "Build config bc/$pod_name does not exist for some reason." + echo "Import probably failed." + fi +} + +# ct_os_check_pod_readiness POD_PREFIX STATUS +# -------------------- +# Checks whether the pod is ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: status - expected status (true, false) +function ct_os_check_pod_readiness() { + local pod_prefix="${1}" ; shift + local status="${1}" ; shift + test "$(ct_os_get_pod_status "${pod_prefix}")" == "${status}" +} + +# ct_os_wait_pod_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the pod becomming ready. +# Arguments: pod_prefix - prefix or whole ID of the pod +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_pod_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + # If there is a build pod - wait for it to finish first + sleep 3 + if ct_os_get_all_pods_name | grep -E "${pod_prefix}.*-build"; then + SECONDS=0 + echo -n "Waiting for ${pod_prefix} build pod to finish ..." + while ! [ "$(ct_os_get_build_pod_status "${pod_prefix}")" == "Succeeded" ] ; do + echo -n "." + if [ "${SECONDS}" -gt "${timeout}0" ]; then + echo " FAIL" + ct_os_print_logs || : + ct_os_get_sti_build_logs "${pod_prefix}" || : + return 1 + fi + sleep 3 + done + echo " DONE" + fi + SECONDS=0 + echo -n "Waiting for ${pod_prefix} pod becoming ready ..." + while ! ct_os_check_pod_readiness "${pod_prefix}" "true" ; do + echo -n "." + if [ "${SECONDS}" -gt "${timeout}" ]; then + echo " FAIL"; + ct_os_print_logs || : + ct_os_get_sti_build_logs "${pod_prefix}" || : + return 1 + fi + sleep 3 + done + echo " DONE" +} + +# ct_os_wait_rc_ready POD_PREFIX TIMEOUT +# -------------------- +# Wait maximum [timeout] for the rc having desired number of replicas ready. +# Arguments: pod_prefix - prefix of the replication controller +# Arguments: timeout - how many seconds to wait seconds +function ct_os_wait_rc_ready() { + local pod_prefix="${1}" ; shift + local timeout="${1}" ; shift + SECONDS=0 + echo -n "Waiting for ${pod_prefix} having desired numbers of replicas ..." + while ! test "$( (oc get --no-headers statefulsets; oc get --no-headers rc) 2>/dev/null \ + | grep "^${pod_prefix}" | awk '$2==$3 {print "ready"}')" == "ready" ; do + echo -n "." + if [ "${SECONDS}" -gt "${timeout}" ]; then + echo " FAIL"; + ct_os_print_logs || : + ct_os_get_sti_build_logs "${pod_prefix}" || : + return 1 + fi + sleep 3 + done + echo " DONE" +} + +# ct_os_deploy_pure_image IMAGE [ENV_PARAMS, ...] +# -------------------- +# Runs [image] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_pure_image() { + local image="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}" "$@" || : + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_s2i_image IMAGE APP [ENV_PARAMS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. +# Arguments: image - prefix or whole ID of the pod to run the cmd in +# Arguments: app - url or local path to git repo with the application sources. +# Arguments: env_params - environment variables parameters for the images. +function ct_os_deploy_s2i_image() { + local image="${1}" ; shift + local app="${1}" ; shift + # ignore error exit code, because oc new-app returns error when image exists + oc new-app "${image}~${app}" --strategy=source "$@" || : + + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# ct_os_deploy_template_image TEMPLATE [ENV_PARAMS, ...] +# -------------------- +# Runs template in the openshift and optionally gives env_params to use +# specific values in the template. +# Arguments: template - prefix or whole ID of the pod to run the cmd in +# Arguments: env_params - environment variables parameters for the template. +# Example usage: ct_os_deploy_template_image mariadb-ephemeral-template.yaml \ +# DATABASE_SERVICE_NAME=mysql-80-c9s \ +# DATABASE_IMAGE=mysql-80-c9s \ +# MYSQL_USER=testu \ +# MYSQL_PASSWORD=testp \ +# MYSQL_DATABASE=testdb +function ct_os_deploy_template_image() { + local template="${1}" ; shift + oc process -f "${template}" "$@" | oc create -f - + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +# _ct_os_get_uniq_project_name +# -------------------- +# Returns a uniq name of the OpenShift project. +function _ct_os_get_uniq_project_name() { + local r + while true ; do + r=${RANDOM} + mkdir /var/tmp/sclorg-test-${r} &>/dev/null && echo sclorg-test-${r} && break + done +} + +# ct_os_new_project [PROJECT] +# -------------------- +# Creates a new project in the openshfit using 'os' command. +# Arguments: project - project name, uses a new random name if omitted +# Expects 'os' command that is properly logged in to the OpenShift cluster. +# Not using mktemp, because we cannot use uppercase characters. +# The OPENSHIFT_CLUSTER_PULLSECRET_PATH environment variable can be set +# to contain a path to a k8s secret definition which will be used +# to authenticate to image registries. +# shellcheck disable=SC2120 +function ct_os_new_project() { + if [ "${CVP:-0}" -eq "1" ]; then + echo "Testing in CVP environment. No need to create OpenShift project. This is done by CVP pipeline" + return + fi + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] ; then + echo "Creating project skipped." + return + fi + local project_name="${1:-$(_ct_os_get_uniq_project_name)}" ; shift || : + oc new-project "${project_name}" + # let openshift cluster to sync to avoid some race condition errors + sleep 3 + if test -n "${OPENSHIFT_CLUSTER_PULLSECRET_PATH:-}" -a -e "${OPENSHIFT_CLUSTER_PULLSECRET_PATH:-}"; then + oc create -f "$OPENSHIFT_CLUSTER_PULLSECRET_PATH" + # add registry pullsecret to the serviceaccount if provided + secret_name=$(grep '^\s*name:' "$OPENSHIFT_CLUSTER_PULLSECRET_PATH" | awk '{ print $2 }') + oc secrets link --for=pull default "$secret_name" + fi +} + +# ct_os_delete_project [PROJECT] +# -------------------- +# Deletes the specified project in the openshfit +# Arguments: project - project name, uses the current project if omitted +# shellcheck disable=SC2120 +function ct_os_delete_project() { + if [ "${CT_SKIP_NEW_PROJECT:-false}" == 'true' ] || [ "${CVP:-0}" -eq "1" ]; then + echo "Deleting project skipped, cleaning objects only." + # when not having enough privileges (remote cluster), it might fail and + # it is not a big problem, so ignore failure in this case + ct_delete_all_objects || : + return + fi + local project_name="${1:-$(oc project -q)}" ; shift || : + if oc delete project "${project_name}" ; then + echo "Project ${project_name} was deleted properly" + else + echo "Project ${project_name} was not delete properly. But it does not block CI." + fi + +} + +# ct_delete_all_objects +# ----------------- +# Deletes all objects within the project. +# Handy when we have one project and want to run more tests. +function ct_delete_all_objects() { + local objects="bc builds dc is isimage istag po rc routes svc" + if [ "${CVP:-0}" -eq "1" ]; then + echo "Testing in CVP environment. No need to delete isimage and istag in OpenShift project. This is done by CVP pipeline" + objects="bc builds dc po pvc rc routes" + fi + for x in $objects; do + echo "oc gets info about $x" + oc get "$x" + echo "oc deletes $x with --all --force --grace-period=0" + oc delete "$x" --all --force --grace-period=0 + done + # for some objects it takes longer to be really deleted, so a dummy sleep + # to avoid some races when other test can see not-yet-deleted objects and can fail + sleep 10 +} + +# ct_os_docker_login_v4 +# -------------------- +# Logs in into docker daemon +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +# Does not do anything if REGISTRY_ADDRESS is set. +function ct_os_docker_login_v4() { + OCP4_REGISTER=$(oc get route default-route -n openshift-image-registry --template='{{ .spec.host }}') + echo "OCP4 loging address is $OCP4_REGISTER." + if [ -z "${OCP4_REGISTER}" ]; then + echo "!!!OpenShift 4 registry address not found. This is an error. Check OpenShift 4 cluster!!!" + return 1 + fi + + if docker login -u kubeadmin -p "$(oc whoami -t)" "${OCP4_REGISTER}"; then + echo "Login to $OCP4_REGISTER was successfully." + return 0 + fi + return 1 +} + +# ct_os_upload_image IMAGE [IMAGESTREAM] +# -------------------- +# Uploads image from local registry to the OpenShift internal registry. +# Arguments: image - image name to upload +# Arguments: imagestream - name and tag to use for the internal registry. +# In the format of name:tag ($image_name:latest by default) +# Uses global REGISRTY_ADDRESS environment variable for arbitrary registry address. +function ct_os_upload_image() { + local input_name="${1}" ; shift + local image_name=${1} + local output_name + local source_name + + source_name="${input_name}" + # Variable OCP4_REGISTER is set in function ct_os_docker_login_v4 + if ! ct_os_docker_login_v4; then + return 1 + fi + output_name="$OCP4_REGISTER/$namespace/$image_name" + + docker tag "${source_name}" "${output_name}" + docker push "${output_name}" +} + +# ct_os_is_tag_exists IS_NAME TAG +# -------------------- +# Checks whether the specified tag exists for an image stream +# Arguments: is_name - name of the image stream +# Arguments: tag - name of the tag (usually version) +function ct_os_is_tag_exists() { + local is_name=$1 ; shift + local tag=$1 ; shift + oc get is "${is_name}" -n openshift -o=jsonpath='{.spec.tags[*].name}' | grep -qw "${tag}" +} + +# ct_os_template_exists T_NAME +# -------------------- +# Checks whether the specified template exists for an image stream +# Arguments: t_name - template name of the image stream +function ct_os_template_exists() { + local t_name=$1 ; shift + oc get templates -n openshift | grep -q "^${t_name}\s" +} + +# ct_os_cluster_running +# -------------------- +# Returns 0 if oc cluster is running +function ct_os_cluster_running() { + oc cluster status &>/dev/null +} + +# ct_os_logged_in +# --------------- +# Returns 0 if logged in to a cluster (remote or local) +function ct_os_logged_in() { + oc whoami >/dev/null +} + +# ct_os_test_s2i_app_func IMAGE APP CONTEXT_DIR CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app_func() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local check_command=${4} + local oc_args=${5:-} + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace%%:*}-testing" + local namespace + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + # shellcheck disable=SC2119 + ct_os_new_project + + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + local image_tagged="${image_name_no_namespace%:*}:${VERSION}" + + if [ "${CVP:-0}" -eq "0" ]; then + echo "Uploading image ${image_name} as ${image_tagged} into OpenShift internal registry." + ct_os_upload_image "${image_name}" "${image_tagged}" + else + echo "Testing image ${image_name} in CVP pipeline." + fi + + local app_param="${app}" + if [ -d "${app}" ] ; then + # for local directory, we need to copy the content, otherwise too smart os command + # pulls the git remote repository instead + app_param=$(ct_obtain_input "${app}") + fi + + # shellcheck disable=SC2086 + ct_os_deploy_s2i_image "${image_tagged}" "${app_param}" \ + --context-dir="${context_dir}" \ + --name "${service_name}" \ + ${oc_args} + + if [ -d "${app}" ] ; then + # in order to avoid weird race seen sometimes, let's wait shortly + # before starting the build explicitly + sleep 5 + oc start-build "${service_name}" --from-dir="${app_param}" + fi + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip + local check_command_exp + local image_id + + # get image ID from the deployment config + image_id=$(oc get "deploymentconfig.apps.openshift.io/${service_name}" -o custom-columns=IMAGE:.spec.template.spec.containers[*].image | tail -n 1) + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g" -e "s||${image_id}|g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + ct_os_service_image_info "${service_name}" + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# ct_os_test_s2i_app IMAGE APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: app - url or local path to git repo with the application sources (compulsory) +# Arguments: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_s2i_app() { + local image_name=${1} + local app=${2} + local context_dir=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_s2i_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_s2i_app_func "${image_name}" \ + "${app}" \ + "${context_dir}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" +} + +# ct_os_test_template_app_func IMAGE APP IMAGE_IN_TEMPLATE CHECK_CMD [OC_ARGS] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the container by arbitrary +# function given as argument (such an argument may include string, +# that will be replaced with actual IP). +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: check_command - CMD line that checks whether the container works (compulsory; '' will be replaced with actual IP) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app_func() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local check_command=${4} + local oc_args=${5:-} + local other_images=${6:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app_func() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + local service_name="${name_in_template}-testing" + local image_tagged="${name_in_template}:${VERSION}" + local namespace + + # shellcheck disable=SC2119 + ct_os_new_project + + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + # Upload main image is already done by CVP pipeline. No need to do it twice. + if [ "${CVP:-0}" -eq "0" ]; then + # Create a specific imagestream tag for the image so that oc cannot use anything else + echo "Uploading image ${image_name} as ${image_tagged} into OpenShift internal registry." + ct_os_upload_image "${image_name}" "${image_tagged}" + else + echo "Import is already done by CVP pipeline." + fi + # Upload main image is already done by CVP pipeline. No need to do it twice. + if [ "${CVP:-0}" -eq "0" ]; then + # Other images are not uploaded by CVP pipeline. We need to do it. + # upload also other images, that template might need (list of pairs in the format | + local image_tag_a + local i_t + for i_t in ${other_images} ; do + echo "${i_t}" + IFS='|' read -ra image_tag_a <<< "${i_t}" + if [[ "$(docker images -q "$image_name" 2>/dev/null)" == "" ]]; then + echo "ERROR: Image $image_name is not pulled yet." + docker images + echo "Add to the beginning of scripts run-openshift-remote-cluster and run-openshift row" + echo "'ct_pull_image $image_name true'." + exit 1 + fi + + echo "Uploading image ${image_tag_a[0]} as ${image_tag_a[1]} into OpenShift internal registry." + ct_os_upload_image "${image_tag_a[0]}" "${image_tag_a[1]}" + done + fi + + # get the template file from remote or local location; if not found, it is + # considered an internal template name, like 'mysql', so use the name + # explicitly + local local_template + + local_template=$(ct_obtain_input "${template}" 2>/dev/null || echo "--template=${template}") + + echo "Creating a new-app with name ${name_in_template} in namespace ${namespace} with args ${oc_args}." + # shellcheck disable=SC2086 + oc new-app "${local_template}" \ + --name "${name_in_template}" \ + -p NAMESPACE="${namespace}" \ + ${oc_args} + + ct_os_wait_pod_ready "${service_name}" 300 + + local ip + local check_command_exp + local image_id + + # get image ID from the deployment config + image_id=$(oc get "deploymentconfig.apps.openshift.io/${service_name}" -o custom-columns=IMAGE:.spec.template.spec.containers[*].image | tail -n 1) + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g" -e "s||${image_id}|g") + + echo " Checking APP using $check_command_exp ..." + local result=0 + eval "$check_command_exp" || result=1 + + ct_os_service_image_info "${service_name}" + + if [ $result -eq 0 ] ; then + echo " Check passed." + else + echo " Check failed." + fi + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# params: +# ct_os_test_template_app IMAGE APP IMAGE_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: template - url or local path to a template to use (compulsory) +# Arguments: name_in_template - image name used in the template +# Arguments: expected_output - PCRE regular expression that must match the response body (compulsory) +# Arguments: port - which port to use (optional; default: 8080) +# Arguments: protocol - which protocol to use (optional; default: http) +# Arguments: response_code - what http response code to expect (optional; default: 200) +# Arguments: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Arguments: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_template_app() { + local image_name=${1} + local template=${2} + local name_in_template=${3} + local expected_output=${4} + local port=${5:-8080} + local protocol=${6:-http} + local response_code=${7:-200} + local oc_args=${8:-} + local other_images=${9:-} + + if [ $# -lt 4 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ] || [ -z "${4}" ]; then + echo "ERROR: ct_os_test_template_app() requires at least 4 arguments that cannot be emtpy." >&2 + return 1 + fi + + ct_os_test_template_app_func "${image_name}" \ + "${template}" \ + "${name_in_template}" \ + "ct_os_test_response_internal '${protocol}://:${port}' '${response_code}' '${expected_output}'" \ + "${oc_args}" \ + "${other_images}" +} + +# ct_os_test_image_update IMAGE_NAME OLD_IMAGE ISTAG CHECK_FUNCTION OC_ARGS +# -------------------- +# Runs an image update test with [image] uploaded to [is] imagestream +# and checks the services using an arbitrary function provided in [check_function]. +# Arguments: image_name - prefix or whole ID of the pod to run the cmd in (compulsory) +# Arguments: old_image - valid name of the image from the registry +# Arguments: istag - imagestream to upload the images into (compulsory) +# Arguments: check_function - command to be run to check functionality of created services (compulsory) +# Arguments: oc_args - arguments to use during oc new-app (compulsory) +ct_os_test_image_update() { + local image_name=$1; shift + local old_image=$1; shift + local istag=$1; shift + local check_function=$1; shift + local ip="" check_command_exp="" + local image_name_no_namespace=${image_name##*/} + local service_name="${image_name_no_namespace%%:*}-testing" + + echo "Running image update test for: $image_name" + # shellcheck disable=SC2119 + ct_os_new_project + + # Get current image from repository and create an imagestream + docker pull "$old_image:latest" 2>/dev/null + ct_os_upload_image "$old_image" "$istag" + + # Setup example application with curent image + oc new-app "$@" --name "$service_name" + ct_os_wait_pod_ready "$service_name" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # Tag built image into the imagestream and wait for rebuild + ct_os_upload_image "$image_name" "$istag" + ct_os_wait_pod_ready "${service_name}-2" 60 + + # Check application output + ip=$(ct_os_get_service_ip "$service_name") + check_command_exp=${check_function///$ip} + ct_assert_cmd_success "$check_command_exp" + + # shellcheck disable=SC2119 + ct_os_delete_project +} + +# ct_os_deploy_cmd_image IMAGE_NAME +# -------------------- +# Runs a special command pod, a pod that does nothing, but includes utilities for testing. +# A typical usage is a mysql pod that includes mysql commandline, that we need for testing. +# Running commands inside this command pod is done via ct_os_cmd_image_run function. +# The pod is not run again if already running. +# Arguments: image_name - image to be used as a command pod +function ct_os_deploy_cmd_image() { + local image_name=${1} + oc get pod command-app &>/dev/null && echo "command POD already running" && return 0 + echo "command POD not running yet, will start one called command-app ${image_name}" + oc create -f - <" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file + local util_image_name='registry.access.redhat.com/ubi7/ubi' + + response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + ct_os_deploy_cmd_image "${util_image_name}" + + while [ "${attempt}" -le "${max_attempts}" ]; do + ct_os_cmd_image_run "curl --connect-timeout 10 -s -w '%{http_code}' '${url}'" >"${response_file}" && status=0 || status=1 + if [ "${status}" -eq 0 ]; then + response_code=$(tail -c 3 "${response_file}") + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + grep -qP -e "${body_regexp}" "${response_file}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ "${result}" -eq 0 ] || [ "${attempt}" -gt "${ignore_error_attempts}" ] || [ "${attempt}" -eq "${max_attempts}" ] ; then + break + fi + fi + attempt=$(( attempt + 1 )) + sleep "${sleep_time}" + done + rm -f "${response_file}" + return "${result}" +} + +# ct_os_get_image_from_pod +# ------------------------ +# Print image identifier from an existing pod to stdout +# Argument: pod_prefix - prefix or full name of the pod to get image from +ct_os_get_image_from_pod() { + local pod_prefix=$1 ; shift + local pod_name + pod_name=$(ct_os_get_pod_name "$pod_prefix") + oc get "po/${pod_name}" -o yaml | sed -ne 's/^\s*image:\s*\(.*\)\s*$/\1/ p' | head -1 +} + +# ct_os_check_cmd_internal +# ---------------- +# Runs a specified command, checks exit code and compares the output with expected regexp. +# That all is done inside an image in the cluster, so the function is used +# typically in clusters that are not accessible outside. +# The check is repeated until timeout. +# Argument: util_image_name - name of the image in the cluster that is used for running the cmd +# Argument: service_name - kubernetes' service name to work with (IP address is taken from this one) +# Argument: check_command - command that is run within the util_image_name container +# Argument: expected_content_match - regexp that must be in the output (use .* to ignore check) +# Argument: timeout - number of seconds to wait till the check succeeds +function ct_os_check_cmd_internal() { + local util_image_name=$1 ; shift + local service_name=$1 ; shift + local check_command=$1 ; shift + local expected_content_match=${1:-.*} ; shift + local timeout=${1:-60} ; shift || : + + : " Service ${service_name} check ..." + + local output + local ret + local ip + local check_command_exp + + ip=$(ct_os_get_service_ip "${service_name}") + # shellcheck disable=SC2001 + check_command_exp=$(echo "$check_command" | sed -e "s//$ip/g") + + ct_os_deploy_cmd_image "${util_image_name}" + SECONDS=0 + + echo -n "Waiting for ${service_name} service becoming ready ..." + while true ; do + output=$(ct_os_cmd_image_run "$check_command_exp") + ret=$? + echo "${output}" | grep -qe "${expected_content_match}" || ret=1 + if [ ${ret} -eq 0 ] ; then + echo " PASS" + return 0 + fi + echo -n "." + [ ${SECONDS} -gt "${timeout}" ] && break + sleep 3 + done + echo " FAIL" + return 1 +} + +# ct_os_test_image_stream_template IMAGE_STREAM_FILE TEMPLATE_FILE SERVICE NAME [TEMPLATE_PARAMS] +# ------------------------ +# Creates an image stream and deploys a specified template. Then checks that a pod runs. +# Argument: image_stream_file - local or remote file with the image stream definition +# Argument: template_file - local file name with a template +# Argument: service_name - how the pod will be named (prefix) +# Argument: template_params (optional) - parameters for the template, like image stream version +function ct_os_test_image_stream_template() { + local image_stream_file=${1} + local template_file=${2} + local service_name=${3} + local template_params=${4:-} + local local_image_stream_file + local local_template_file + + if [ $# -lt 3 ] || [ -z "${1}" ] || [ -z "${2}" ] || [ -z "${3}" ]; then + echo "ERROR: ct_os_test_image_stream() requires at least 3 arguments that cannot be empty." >&2 + return 1 + fi + + echo "Running image stream test for stream ${image_stream_file} and template ${template_file}" + # shellcheck disable=SC2119 + ct_os_new_project + + local_image_stream_file=$(ct_obtain_input "${image_stream_file}") + local_template_file=$(ct_obtain_input "${template_file}") + oc create -f "${local_image_stream_file}" + + # shellcheck disable=SC2086 + if ! ct_os_deploy_template_image "${local_template_file}" -p NAMESPACE="${CT_NAMESPACE:-$(oc project -q)}" ${template_params} ; then + echo "ERROR: ${template_file} could not be loaded" + return 1 + # Deliberately not runnig ct_os_delete_project here because user either + # might want to investigate or the cleanup is done with the cleanup trap. + # Most functions depend on the set -e anyway at this point. + fi + ct_os_wait_pod_ready "${service_name}" 120 + result=$? + + # shellcheck disable=SC2119 + ct_os_delete_project + return $result +} + +# ct_os_wait_stream_ready IMAGE_STREAM_FILE NAMESPACE [ TIMEOUT ] +# ------------------------ +# Waits max timeout seconds till a [stream] is available in the [namespace]. +# Arguments: image_stream - stream name (usuallly :) +# Arguments: namespace - namespace name +# Arguments: timeout - how many seconds to wait +function ct_os_wait_stream_ready() { + local image_stream=${1} + local namespace=${2} + local timeout=${3:-60} + # It takes some time for the first time before the image is pulled in + SECONDS=0 + echo -n "Waiting for ${namespace}/${image_stream} to become available ..." + while ! oc get -n "${namespace}" istag "${image_stream}" &>/dev/null; do + if [ "$SECONDS" -gt "${timeout}" ] ; then + echo "FAIL: ${namespace}/${image_stream} not available after ${timeout}s:" + echo "oc get -n ${namespace} istag ${image_stream}" + oc get -n "${namespace}" istag "${image_stream}" + return 1 + fi + sleep 3 + echo -n . + done + echo " DONE" +} + +# ct_os_test_image_stream_s2i IMAGE_STREAM_FILE IMAGE_NAME APP CONTEXT_DIR EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, ... ] +# -------------------- +# Check the imagestream with an s2i app check. First it imports the given image stream, then +# it runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Argument: image_stream_file - local or remote file with the image stream definition +# Argument: image_name - container image we test (or name of the existing image stream in : format) +# Argument: app - url or local path to git repo with the application sources (compulsory) +# Argument: context_dir - sub-directory inside the repository with the application sources (compulsory) +# Argument: expected_output - PCRE regular expression that must match the response body (compulsory) +# Argument: port - which port to use (optional; default: 8080) +# Argument: protocol - which protocol to use (optional; default: http) +# Argument: response_code - what http response code to expect (optional; default: 200) +# Argument: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +function ct_os_test_image_stream_s2i() { + local image_stream_file=${1} + local image_name=${2} + local app=${3} + local context_dir=${4} + local expected_output=${5} + local port=${6:-8080} + local protocol=${7:-http} + local response_code=${8:-200} + local oc_args=${9:-} + local result + local local_image_stream_file + + echo "Running image stream test for stream ${image_stream_file} and application ${app} with context ${context_dir}" + + # shellcheck disable=SC2119 + ct_os_new_project + + local_image_stream_file=$(ct_obtain_input "${image_stream_file}") + oc create -f "${local_image_stream_file}" + + # ct_os_test_s2i_app creates a new project, but we already need + # it before for the image stream import, so tell it to skip this time + CT_SKIP_NEW_PROJECT=true \ + ct_os_test_s2i_app "${IMAGE_NAME}" "${app}" "${context_dir}" "${expected_output}" \ + "${port}" "${protocol}" "${response_code}" "${oc_args}" + result=$? + + # shellcheck disable=SC2119 + CT_SKIP_NEW_PROJECT=false + ct_os_delete_project + + return $result +} + +# ct_os_test_image_stream_quickstart IMAGE_STREAM_FILE TEMPLATE IMAGE_NAME NAME_IN_TEMPLATE EXPECTED_OUTPUT [PORT, PROTOCOL, RESPONSE_CODE, OC_ARGS, OTHER_IMAGES ] +# -------------------- +# Check the imagestream with an s2i app check. First it imports the given image stream, then +# it runs [image] and [app] in the openshift and optionally specifies env_params +# as environment variables to the image. Then check the http response. +# Argument: image_stream_file - local or remote file with the image stream definition +# Argument: template_file - local file name with a template +# Argument: image_name - container image we test (or name of the existing image stream in : format) +# Argument: name_in_template - image name used in the template +# Argument: expected_output - PCRE regular expression that must match the response body (compulsory) +# Argument: port - which port to use (optional; default: 8080) +# Argument: protocol - which protocol to use (optional; default: http) +# Argument: response_code - what http response code to expect (optional; default: 200) +# Argument: oc_args - all other arguments are used as additional parameters for the `oc new-app` +# command, typically environment variables (optional) +# Argument: other_images - some templates need other image to be pushed into the OpenShift registry, +# specify them in this parameter as "|", where "" is a full image name +# (including registry if needed) and "" is a tag under which the image should be available +# in the OpenShift registry. +function ct_os_test_image_stream_quickstart() { + local image_stream_file=${1} + local template_file=${2} + local image_name=${3} + local name_in_template=${4} + local expected_output=${5} + local port=${6:-8080} + local protocol=${7:-http} + local response_code=${8:-200} + local oc_args=${9:-} + local other_images=${10:-} + local result + local local_image_stream_file + local local_template_file + + echo "Running image stream test for stream ${image_stream_file} and quickstart template ${template_file}" + echo "Image name is ${IMAGE_NAME}" + # shellcheck disable=SC2119 + ct_os_new_project + + local_image_stream_file=$(ct_obtain_input "${image_stream_file}") + local_template_file=$(ct_obtain_input "${template_file}") + # ct_os_test_template_app creates a new project, but we already need + # it before for the image stream import, so tell it to skip this time + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + + # Add namespace into openshift arguments + if [[ $oc_args != *"NAMESPACE"* ]]; then + oc_args="${oc_args} -p NAMESPACE=${namespace}" + fi + oc create -f "${local_image_stream_file}" + + # In case we are testing on OpenShift 4 export variable for mirror image + # which means, that image is going to be mirrored from an internal registry into OpenShift 4 + if [ "${CT_EXTERNAL_REGISTRY:-false}" == 'true' ]; then + export CT_TAG_IMAGE=true + fi + # ct_os_test_template_app creates a new project, but we already need + # it before for the image stream import, so tell it to skip this time + + CT_SKIP_NEW_PROJECT=true \ + ct_os_test_template_app "${image_name}" \ + "${local_template_file}" \ + "${name_in_template}" \ + "${expected_output}" \ + "${port}" "${protocol}" "${response_code}" "${oc_args}" "${other_images}" + + result=$? + + # shellcheck disable=SC2119 + CT_SKIP_NEW_PROJECT=false + ct_os_delete_project + + return $result +} + +# ct_os_service_image_info SERVICE_NAME +# -------------------- +# Shows information about the image used by a specified service. +# Argument: service_name - Service name (uesd for deployment config) +function ct_os_service_image_info() { + local service_name=$1 + local image_id + local namespace + + # get image ID from the deployment config + image_id=$(oc get "deploymentconfig.apps.openshift.io/${service_name}" -o custom-columns=IMAGE:.spec.template.spec.containers[*].image | tail -n 1) + namespace=${CT_NAMESPACE:-"$(oc project -q)"} + + echo " Information about the image we work with:" + oc get deploymentconfig.apps.openshift.io/"${service_name}" -o yaml | grep lastTriggeredImage + # for s2i builds, the resulting image is actually in the current namespace, + # so if the specified namespace does not succeed, try the current namespace + oc get isimage -n "${namespace}" "${image_id##*/}" -o yaml || oc get isimage "${image_id##*/}" -o yaml +} +# vim: set tabstop=2:shiftwidth=2:expandtab: diff --git a/20/test/test-lib-remote-openshift.sh b/20/test/test-lib-remote-openshift.sh new file mode 100644 index 00000000..8b51cbec --- /dev/null +++ b/20/test/test-lib-remote-openshift.sh @@ -0,0 +1,136 @@ +# shellcheck disable=SC2148 +if [ -z "${sourced_test_lib_remote_openshift:-}" ]; then + sourced_test_lib_remote_openshift=1 +else + return 0 +fi + +# shellcheck shell=bash +# some functions are used from test-lib.sh, that is usually in the same dir +# shellcheck source=/dev/null +source "$(dirname "${BASH_SOURCE[0]}")"/test-lib.sh + +# this should be returned when something related to the openshift cluster +# goes wrong during the test pipeline +# shellcheck disable=SC2034 +readonly OC_ERR=11 + +# Set of functions for testing docker images in OpenShift using 'oc' command + +# A variable containing the overall test result +# TESTSUITE_RESULT=0 +# And the following trap must be set, in the beginning of the test script: +# trap ct_os_cleanup EXIT SIGINT + +# ct_os_set_path_oc_4 OC_VERSION +# -------------------- +# This is a trick that helps using correct version 4 of the `oc`: +# The input is version of the openshift in format 4.4 etc. +# If the currently available version of oc is not of this version, +# it first takes a look into /usr/local/oc-/bin directory, + +# Arguments: oc_version - X.Y part of the version of OSE (e.g. 4.4) +function ct_os_set_path_oc_4() { + echo "Setting OCP4 client" + local oc_version=$1 + local installed_oc_path="/usr/local/oc-v${oc_version}/bin" + echo "PATH ${installed_oc_path}" + if [ -x "${installed_oc_path}/oc" ] ; then + oc_path="${installed_oc_path}" + echo "Binary oc found in ${installed_oc_path}" >&2 + else + echo "OpenShift Client binary on path ${installed_oc_path} not found" + return 1 + fi + export PATH="${oc_path}:${PATH}" +} + +# ct_os_prepare_ocp4 +# ------------------ +# Prepares environment for testing images in OpenShift 4 environment +# +# +function ct_os_set_ocp4() { + if [ "${CVP:-0}" -eq "1" ]; then + echo "Testing in CVP environment. No need to login to OpenShift cluster. This is already done by CVP pipeline." + return + fi + local login + OS_OC_CLIENT_VERSION=${OS_OC_CLIENT_VERSION:-4} + ct_os_set_path_oc_4 "${OS_OC_CLIENT_VERSION}" + + login=$(cat "$KUBEPASSWORD") + oc login -u kubeadmin -p "$login" + oc version + if ! oc version | grep -q "Client Version: ${OS_OC_CLIENT_VERSION}." ; then + echo "ERROR: something went wrong, oc located at ${oc_path}, but oc of version ${OS_OC_CLIENT_VERSION} not found in PATH ($PATH)" >&1 + return 1 + else + echo "PATH set correctly, binary oc found in version ${OS_OC_CLIENT_VERSION}: $(command -v oc)" + fi + # Switch to default project as soon as we are logged to cluster + oc project default + echo "Login to OpenShift ${OS_OC_CLIENT_VERSION} is DONE" + # let openshift cluster to sync to avoid some race condition errors + sleep 3 +} + +function ct_os_tag_image_for_cvp() { + if [ "${CVP:-0}" -eq "0" ]; then + echo "The function is valid only for CVP pipeline." + return + fi + local tag_image_name="$1" + local tag="" + if [ "${OS}" == "rhel8" ]; then + tag="-el8" + elif [ "${OS}" == "rhel9" ]; then + tag="-el9" + else + echo "Only RHEL images are supported." + return + fi + oc tag "${tag_image_name}:${VERSION}" "${tag_image_name}:${VERSION}${tag}" +} + +function ct_os_upload_image_external_registry() { + local input_name="${1}" ; shift + local image_name=${input_name##*/} + local imagestream=${1:-$image_name:latest} + local output_name + + ct_os_login_external_registry + + output_name="${INTERNAL_DOCKER_REGISTRY}/rhscl-ci-testing/$imagestream" + + docker images + docker tag "${input_name}" "${output_name}" + docker push "${output_name}" +} + + +function ct_os_import_image_ocp4() { + local image_name="${1}"; shift + local imagestream=${1:-$image_name:latest} + + echo "Uploading image ${image_name} as ${imagestream} into OpenShift internal registry." + ct_os_upload_image "${image_name}" "${imagestream}" + +} + +# ct_os_check_login +# --------------- +# function checks if the login to openshift was successful +# if successful returns 0 +# if not, write error message, sets test result to 1 +# and exits with non-zero +# Uses: $TESTSUITE_RESULT - overall result of all tests +function ct_os_check_login() { + oc status || { + echo "-------------------------------------------" + echo "It looks like oc is not properly logged in." + # shellcheck disable=SC2034 + TESTSUITE_RESULT=1 + return 1 + } +} diff --git a/20/test/test-lib.sh b/20/test/test-lib.sh new file mode 100644 index 00000000..4de6f590 --- /dev/null +++ b/20/test/test-lib.sh @@ -0,0 +1,1509 @@ +# shellcheck shell=bash +# +# Test a container image. +# +# Always use sourced from a specific container testfile +# + +# Container CI tests +# abbreviated as "ct" + +# run ct_init before starting the actual testsuite + +# shellcheck disable=SC2148 +if [ -z "${sourced_test_lib:-}" ]; then + sourced_test_lib=1 +else + return 0 +fi + +LINE="==============================================" + +# may be redefined in the specific container testfile +EXPECTED_EXIT_CODE=0 + +# define UNSTABLE_TESTS if not already defined, as this variable +# is not mandatory for containers +UNSTABLE_TESTS="${UNSTABLE_TESTS:-""}" + + +# ct_init +# -------------------- +# This function needs to be called before any container test starts +# Sets: $APP_ID_FILE_DIR - path to directory used for storing +# IDs of application images used during tests. +# Sets: $CID_FILE_DIR - path to directory containing cid_files +# Sets: $TEST_SUMMARY - string, where test results are written +# Sets: $TESTSUITE_RESULT - overall result of run testuite +function ct_init() { + APP_ID_FILE_DIR="$(mktemp -d)" + CID_FILE_DIR="$(mktemp -d)" + TEST_SUMMARY="" + TESTSUITE_RESULT=0 + ct_enable_cleanup +} + +# ct_cleanup +# -------------------- +# Cleans up containers used during tests. Stops and removes all containers +# referenced by cid_files in CID_FILE_DIR. Dumps logs if a container exited +# unexpectedly. Removes the cid_files and CID_FILE_DIR as well. +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $EXPECTED_EXIT_CODE - expected container exit code +# Uses: $TESTSUITE_RESULT - overall result of all tests +function ct_cleanup() { + echo "$LINE" + echo "Cleaning of testing containers and images started." + echo "It may take a few seconds." + echo "$LINE" + ct_clean_app_images + ct_clean_containers +} + +# ct_build_image_and_parse_id +# -------------------- +# Return 0 if build was successful, 1 otherwise +# Uses: $1 - path to docckerfile +# Uses: $2 - build params +# Uses: $APP_IMAGE_ID - sets the app image id value to this variable +# this should be replaced by the --iidfile parameter +# when it becames supported by all versions of podman and docker that we support +ct_build_image_and_parse_id() { + local tmpdir + local log_file + local ret_val + local dockerfile + local command + local pid_build + local pid_sleep + local sleep_time + log_file="$(mktemp)" + sleep_time="10m" + [ -n "$1" ] && dockerfile="-f $1" + command="$(echo "docker build --no-cache $dockerfile $2" | tr -d "'")" + # running command in subshell, the subshell in background, storing pid to variable + ( + $command > "$log_file" 2>&1 + ) & pid_build=$! + # creating second subshell with trap function on ALRM signal + # the subshell sleeps for 10m, then kills the first subshell + ( + trap 'exit 0' ALRM; sleep "$sleep_time" && kill $pid_build + ) & pid_sleep=$! + # waiting for build subshell to finish, either with success, or killed from sleep subshell + wait $pid_build + ret_val=$? + # send ALRM signal to the sleep subshell, so it exits even in case the 10mins + # not yet passed. If the kill was successful (the wait subshell received ALRM signal) + # then the build was not finished yet, so the return value is set to 1 + kill -s ALRM $pid_sleep 2>/dev/null || ret_val=1 + + if [ $ret_val -eq 0 ]; then + APP_IMAGE_ID="$(tail -n 1 "$log_file")" + fi + + cat "$log_file" ; rm -r "$log_file" + return "$ret_val" +} + +# ct_container_running +# -------------------- +# Return 0 if given container is in running state +# Uses: $1 - container id to check +function ct_container_running() { + local running + running="$(docker inspect -f '{{.State.Running}}' "$1")" + [ "$running" = "true" ] || return 1 +} + +# ct_container_exists +# -------------------- +# Return 0 if given container exists +# Uses: $1 - container id to check +function ct_container_exists() { + local exists + exists="$(docker ps -q -a -f "id=$1")" + [ -n "$exists" ] || return 1 +} + +# ct_clean_app_images +# -------------------- +# Cleans up application images referenced by APP_ID_FILE_DIR +# Uses: $APP_ID_FILE_DIR - path to directory containing image ID files +function ct_clean_app_images() { + local image + if [[ ! -d "${APP_ID_FILE_DIR:-}" ]]; then + echo "The \$APP_ID_FILE_DIR=$APP_ID_FILE_DIR is not created. App cleaning is to be skipped." + return 0 + fi; + echo "Examining image ID files in \$APP_ID_FILE_DIR=$APP_ID_FILE_DIR" + for file in "${APP_ID_FILE_DIR:?}"/*; do + image="$(cat "$file")" + docker inspect "$image" > /dev/null 2>&1 || continue + containers="$(docker ps -q -a -f ancestor="$image")" + [[ -z "$containers" ]] || docker rm -f "$containers" 2>/dev/null + docker rmi -f "$image" + done + rm -fr "$APP_ID_FILE_DIR" +} + +# ct_clean_containers +# -------------------- +# Cleans up containers referenced by CID_FILE_DIR +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_clean_containers() { + if [[ -z ${CID_FILE_DIR:-} ]]; then + echo "The \$CID_FILE_DIR is not set. Container cleaning is to be skipped." + return + fi; + + echo "Examining CID files in \$CID_FILE_DIR=$CID_FILE_DIR" + for cid_file in "$CID_FILE_DIR"/* ; do + [ -f "$cid_file" ] || continue + local container + container=$(cat "$cid_file") + + ct_container_exists "$container" || continue + + echo "Stopping and removing container $container..." + if ct_container_running "$container"; then + docker stop "$container" + fi + + exit_status=$(docker inspect -f '{{.State.ExitCode}}' "$container") + if [ "$exit_status" != "$EXPECTED_EXIT_CODE" ]; then + echo "Dumping logs for $container" + docker logs "$container" + fi + docker rm -v "$container" + rm -f "$cid_file" + done + + rm -rf "$CID_FILE_DIR" +} + +# ct_show_results +# --------------- +# Prints results of all test cases that are stored into TEST_SUMMARY variable. +# Uses: $IMAGE_NAME - name of the tested container image +# Uses: $TEST_SUMMARY - text info about test-cases +# Uses: $TESTSUITE_RESULT - overall result of all tests +function ct_show_results() { + echo "$LINE" + #shellcheck disable=SC2153 + echo "Tests were run for image ${IMAGE_NAME}" + echo "$LINE" + echo "Test cases results:" + echo + echo "${TEST_SUMMARY:-}" + + if [ -n "${TESTSUITE_RESULT:-}" ] ; then + if [ "$TESTSUITE_RESULT" -eq 0 ] ; then + # shellcheck disable=SC2153 + echo "Tests for ${IMAGE_NAME} succeeded." + else + # shellcheck disable=SC2153 + echo "Tests for ${IMAGE_NAME} failed." + fi + fi +} + +# ct_enable_cleanup +# -------------------- +# Enables automatic container cleanup after tests. +function ct_enable_cleanup() { + trap ct_trap_on_exit EXIT + trap ct_trap_on_sigint SIGINT +} + +# ct_trap_on_exit +# -------------------- +function ct_trap_on_exit() { + local exit_code=$? + [ "$exit_code" -eq 130 ] && return # we do not want to catch SIGINT here + # We should not really care about what the script returns + # as the tests are constructed the way they never exit the shell. + # The check is added just to be sure that we catch some not expected behavior + # if any is added in the future. + echo "Tests finished with EXIT=$exit_code" + [ $exit_code -eq 0 ] && exit_code="${TESTSUITE_RESULT:-0}" + [ -n "${DEBUG:-}" ] || ct_show_resources + ct_cleanup + ct_show_results + exit "$exit_code" +} + +# ct_trap_on_sigint +# -------------------- +function ct_trap_on_sigint() { + echo "Tests were stopped by SIGINT signal" + ct_cleanup + ct_show_results + exit 130 +} + +# ct_pull_image +# ------------- +# Function pull an image before tests execution +# Argument: image_name - string containing the public name of the image to pull +# Argument: exit - in case "true" is defined and pull failed, then script has to exit with 1 and no tests are executed +# Argument: loops - how many times to pull image in case of failure +# Function returns either 0 in case of pull was successful +# Or the test suite exit with 1 in case of pull error +function ct_pull_image() { + local image_name="$1"; [[ $# -gt 0 ]] && shift + local exit_variable=${1:-"false"}; [[ $# -gt 0 ]] && shift + local loops=${1:-10} + local loop=0 + + # Let's try to pull image. + echo "-> Pulling image $image_name ..." + # Sometimes in Fedora case it fails with HTTP 50X + # Check if the image is available locally and try to pull it if it is not + if [[ "$(docker images -q "$image_name" 2>/dev/null)" != "" ]]; then + echo "The image $image_name is already pulled." + return 0 + fi + + # Try pulling the image to see if it is accessible + # WORKAROUND: Since Fedora registry sometimes fails randomly, let's try it more times + while ! docker pull "$image_name"; do + ((loop++)) || : + echo "Pulling image $image_name failed." + if [ "$loop" -gt "$loops" ]; then + echo "Pulling of image $image_name failed $loops times in a row. Giving up." + echo "!!! ERROR with pulling image $image_name !!!!" + # shellcheck disable=SC2268 + if [[ x"$exit_variable" == x"false" ]]; then + return 1 + else + exit 1 + fi + fi + echo "Let's wait $((loop*5)) seconds and try again." + sleep "$((loop*5))" + done +} + + +# ct_check_envs_set env_filter check_envs loop_envs [env_format] +# -------------------- +# Compares values from one list of environment variable definitions against such list, +# checking if the values are present and have a specific format. +# Argument: env_filter - optional string passed to grep used for +# choosing which variables to filter out in env var lists. +# Argument: check_envs - list of env var definitions to check values against +# Argument: loop_envs - list of env var definitions to check values for +# Argument: env_format (optional) - format string for bash substring deletion used +# for checking whether the value is contained in check_envs. +# Defaults to: "*VALUE*", VALUE string gets replaced by actual value from loop_envs +function ct_check_envs_set { + local env_filter check_envs env_format + env_filter=$1; shift + check_envs=$1; shift + loop_envs=$1; shift + env_format=${1:-"*VALUE*"} + while read -r variable; do + [ -z "$variable" ] && continue + var_name=$(echo "$variable" | awk -F= '{ print $1 }') + stripped=$(echo "$variable" | awk -F= '{ print $2 }') + filtered_envs=$(echo "$check_envs" | grep "^$var_name=") + [ -z "$filtered_envs" ] && { echo "$var_name not found during \` docker exec\`"; return 1; } + old_IFS=$IFS + # For each such variable compare its content with the `docker exec` result, use `:` as delimiter + IFS=: + for value in $stripped; do + # If the falue checked does not go through env_filter we do not care about it + echo "$value" | grep -q "$env_filter" || continue + # shellcheck disable=SC2295 + if [ -n "${filtered_envs##${env_format//VALUE/$value}}" ]; then + echo " Value $value is missing from variable $var_name" + echo "$filtered_envs" + IFS=$old_IFS + return 1 + fi + done + IFS=$old_IFS + done <<< "$(echo "$loop_envs" | grep "$env_filter" | grep -v "^PWD=")" +} + +# ct_get_cid [name] +# -------------------- +# Prints container id from cid_file based on the name of the file. +# Argument: name - name of cid_file where the container id will be stored +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_get_cid() { + local name="$1" ; shift || return 1 + cat "$CID_FILE_DIR/$name" +} + +# ct_get_cip [id] +# -------------------- +# Prints container ip address based on the container id. +# Argument: id - container id +function ct_get_cip() { + local id="$1" ; shift + docker inspect --format='{{.NetworkSettings.IPAddress}}' "$(ct_get_cid "$id")" +} + +# ct_wait_for_cid [cid_file] +# -------------------- +# Holds the execution until the cid_file is created. Usually run after container +# creation. +# Argument: cid_file - name of the cid_file that should be created +function ct_wait_for_cid() { + local cid_file=$1 + local max_attempts=10 + local sleep_time=1 + local attempt=1 + local result=1 + while [ $attempt -le $max_attempts ]; do + [ -f "$cid_file" ] && [ -s "$cid_file" ] && return 0 + echo "Waiting for container start... $attempt" + attempt=$(( attempt + 1 )) + sleep $sleep_time + done + return 1 +} + +# ct_assert_container_creation_fails [container_args] +# -------------------- +# The invocation of docker run should fail based on invalid container_args +# passed to the function. Returns 0 when container fails to start properly. +# Argument: container_args - all arguments are passed directly to dokcer run +# Uses: $CID_FILE_DIR - path to directory containing cid_files +function ct_assert_container_creation_fails() { + local ret=0 + local max_attempts=10 + local attempt=1 + local cid_file=assert + local old_container_args="${CONTAINER_ARGS-}" + # we really work with CONTAINER_ARGS as with a string + # shellcheck disable=SC2124 + CONTAINER_ARGS="$@" + if ct_create_container "$cid_file" ; then + local cid + cid=$(ct_get_cid "$cid_file") + + while [ "$(docker inspect -f '{{.State.Running}}' "$cid")" == "true" ] ; do + sleep 2 + attempt=$(( attempt + 1 )) + if [ "$attempt" -gt "$max_attempts" ]; then + docker stop "$cid" + ret=1 + break + fi + done + exit_status=$(docker inspect -f '{{.State.ExitCode}}' "$cid") + if [ "$exit_status" == "0" ]; then + ret=1 + fi + docker rm -v "$cid" + rm "$CID_FILE_DIR/$cid_file" + fi + [ -n "$old_container_args" ] && CONTAINER_ARGS="$old_container_args" + return "$ret" +} + +# ct_create_container [name, command] +# -------------------- +# Creates a container using the IMAGE_NAME and CONTAINER_ARGS variables. Also +# stores the container id to a cid_file located in the CID_FILE_DIR, and waits +# for the creation of the file. +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - optional command to be executed in the container +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $CONTAINER_ARGS - optional arguments passed directly to docker run +# Uses: $IMAGE_NAME - name of the image being tested +function ct_create_container() { + local cid_file="$CID_FILE_DIR/$1" ; shift + # create container with a cidfile in a directory for cleanup + # shellcheck disable=SC2086,SC2153 + docker run --cidfile="$cid_file" -d ${CONTAINER_ARGS:-} "$IMAGE_NAME" "$@" + ct_wait_for_cid "$cid_file" || return 1 + : "Created container $(cat "$cid_file")" +} + +# ct_scl_usage_old [name, command, expected] +# -------------------- +# Tests three ways of running the SCL, by looking for an expected string +# in the output of the command +# Argument: name - name of cid_file where the container id will be stored +# Argument: command - executed inside the container +# Argument: expected - string that is expected to be in the command output +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_scl_usage_old() { + local name="$1" + local command="$2" + local expected="$3" + local out="" + : " Testing the image SCL enable" + out=$(docker run --rm "${IMAGE_NAME}" /bin/bash -c "${command}") + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[/bin/bash -c \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec "$(ct_get_cid "$name")" /bin/bash -c "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/bash -c \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi + out=$(docker exec "$(ct_get_cid "$name")" /bin/sh -ic "${command}" 2>&1) + if ! echo "${out}" | grep -q "${expected}"; then + echo "ERROR[exec /bin/sh -ic \"${command}\"] Expected '${expected}', got '${out}'" >&2 + return 1 + fi +} + +# ct_doc_content_old [strings] +# -------------------- +# Looks for occurence of stirngs in the documentation files and checks +# the format of the files. Files examined: help.1 +# Argument: strings - strings expected to appear in the documentation +# Uses: $IMAGE_NAME - name of the image being tested +function ct_doc_content_old() { + local tmpdir + tmpdir=$(mktemp -d) + local f + : " Testing documentation in the container image" + # Extract the help files from the container + # shellcheck disable=SC2043 + for f in help.1 ; do + docker run --rm "${IMAGE_NAME}" /bin/bash -c "cat /${f}" >"${tmpdir}/$(basename "${f}")" + # Check whether the files contain some important information + for term in "$@" ; do + if ! grep -E -q -e "${term}" "${tmpdir}/$(basename "${f}")" ; then + echo "ERROR: File /${f} does not include '${term}'." >&2 + return 1 + fi + done + # Check whether the files use the correct format + for term in TH PP SH ; do + if ! grep -q "^\.${term}" "${tmpdir}/help.1" ; then + echo "ERROR: /help.1 is probably not in troff or groff format, since '${term}' is missing." >&2 + return 1 + fi + done + done + : " Success!" +} + +# full_ca_file_path +# Return string for full path to CA file +function full_ca_file_path() +{ + echo "/etc/pki/ca-trust/source/anchors/RH-IT-Root-CA.crt" +} +# ct_mount_ca_file +# ------------------ +# Check if /etc/pki/certs/RH-IT-Root-CA.crt file exists +# return mount string for containers or empty string +function ct_mount_ca_file() +{ + # mount CA file only if NPM_REGISTRY variable is present. + local mount_parameter="" + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + mount_parameter="-v $(full_ca_file_path):$(full_ca_file_path):Z" + fi + echo "$mount_parameter" +} + +# ct_build_s2i_npm_variables URL_TO_NPM_JS_SERVER +# ------------------------------------------ +# Function returns -e NPM_MIRROR and -v MOUNT_POINT_FOR_CAFILE +# or empty string +function ct_build_s2i_npm_variables() +{ + npm_variables="" + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + npm_variables="-e NPM_MIRROR=$NPM_REGISTRY $(ct_mount_ca_file)" + fi + echo "$npm_variables" +} + +# ct_npm_works +# -------------------- +# Checks existance of the npm tool and runs it. +function ct_npm_works() { + local tmpdir + local cid_file + tmpdir=$(mktemp -d) + : " Testing npm in the container image" + cid_file="$(mktemp --dry-run --tmpdir="${CID_FILE_DIR}")" + if ! docker run --rm "${IMAGE_NAME}" /bin/bash -c "npm --version" >"${tmpdir}/version" ; then + echo "ERROR: 'npm --version' does not work inside the image ${IMAGE_NAME}." >&2 + return 1 + fi + + # shellcheck disable=SC2046 + docker run -d $(ct_mount_ca_file) --rm --cidfile="$cid_file" "${IMAGE_NAME}-testapp" + + # Wait for the container to write it's CID file + ct_wait_for_cid "$cid_file" || return 1 + + if ! docker exec "$(cat "$cid_file")" /bin/bash -c "npm --verbose install jquery && test -f node_modules/jquery/src/jquery.js" >"${tmpdir}/jquery" 2>&1 ; then + echo "ERROR: npm could not install jquery inside the image ${IMAGE_NAME}." >&2 + cat "${tmpdir}/jquery" + return 1 + fi + + if [ -n "$NPM_REGISTRY" ] && [ -f "$(full_ca_file_path)" ]; then + if ! grep -qo "$NPM_REGISTRY" "${tmpdir}/jquery"; then + echo "ERROR: Internal repository is NOT set. Even it is requested." + return 1 + fi + fi + + if [ -f "$cid_file" ]; then + docker stop "$(cat "$cid_file")" + fi + : " Success!" +} + +# ct_binary_found_from_df binary [path] +# -------------------- +# Checks if a binary can be found in PATH during Dockerfile build +# Argument: binary - name of the binary to test accessibility for +# Argument: path - optional path in which the binary should reside in +# /opt/rh by default +function ct_binary_found_from_df() { + local tmpdir + local id_file + local binary=$1; shift + local binary_path=${1:-"^/opt/rh"} + tmpdir=$(mktemp -d) + : " Testing $binary in build from Dockerfile" + + # Create Dockerfile that looks for the binary + cat <"$tmpdir/Dockerfile" +FROM $IMAGE_NAME +RUN command -v $binary | grep "$binary_path" +EOF + # Build an image, looking for expected path in the output + ct_build_image_and_parse_id "$tmpdir/Dockerfile" "$tmpdir" + #shellcheck disable=SC2181 + if [ $? -ne 0 ]; then + echo " ERROR: Failed to find $binary in \$PATH!" >&2 + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" +} + +# ct_check_exec_env_vars [env_filter] +# -------------------- +# Checks if all relevant environment variables from `docker run` +# can be found in `docker exec` as well. +# Argument: env_filter - optional string passed to grep used for +# choosing which variables to check in the test case. +# Defaults to X_SCLS and variables containing /opt/app-root, /opt/rh +# Uses: $CID_FILE_DIR - path to directory containing cid_files +# Uses: $IMAGE_NAME - name of the image being tested +function ct_check_exec_env_vars() { + local tmpdir exec_envs cid old_IFS env_filter + local var_name stripped filtered_envs run_envs + env_filter=${1:-"^X_SCLS=\|/opt/rh\|/opt/app-root"} + tmpdir=$(mktemp -d) + CID_FILE_DIR=${CID_FILE_DIR:-$(mktemp -d)} + # Get environment variables from `docker run` + run_envs=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "env") + # Get environment variables from `docker exec` + ct_create_container "test_exec_envs" bash -c "sleep 1000" >/dev/null + cid=$(ct_get_cid "test_exec_envs") + exec_envs=$(docker exec "$cid" env) + # Filter out variables we are not interested in + # Always check X_SCLS, ignore PWD + # Check variables from `docker run` that have alternative paths inside (/opt/rh, /opt/app-root) + ct_check_envs_set "$env_filter" "$exec_envs" "$run_envs" "*VALUE*" || return 1 + echo " All values present in \`docker exec\`" + return 0 +} + +# ct_check_scl_enable_vars [env_filter] +# -------------------- +# Checks if all relevant environment variables from `docker run` +# are set twice after a second call of `scl enable $SCLS`. +# Argument: env_filter - optional string passed to grep used for +# choosing which variables to check in the test case. +# Defaults to paths containing enabled SCLS in the image +# Uses: $IMAGE_NAME - name of the image being tested +function ct_check_scl_enable_vars() { + local tmpdir exec_envs cid old_IFS env_filter enabled_scls + local var_name stripped filtered_envs loop_envs + env_filter=$1 + tmpdir=$(mktemp -d) + enabled_scls=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "echo \$X_SCLS") + if [ -z "$env_filter" ]; then + for scl in $enabled_scls; do + [ -z "$env_filter" ] && env_filter="/$scl" && continue + # env_filter not empty, append to the existing list + env_filter="$env_filter|/$scl" + done + fi + # Get environment variables from `docker run` + loop_envs=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "env") + run_envs=$(docker run --rm "$IMAGE_NAME" /bin/bash -c "X_SCLS= scl enable $enabled_scls env") + # Check if the values are set twice in the second set of envs + ct_check_envs_set "$env_filter" "$run_envs" "$loop_envs" "*VALUE*VALUE*" || return 1 + echo " All scl_enable values present" + return 0 +} + +# ct_path_append PATH_VARNAME DIRECTORY +# ------------------------------------- +# Append DIRECTORY to VARIABLE of name PATH_VARNAME, the VARIABLE must consist +# of colon-separated list of directories. +ct_path_append () +{ + if eval "test -n \"\${$1-}\""; then + eval "$1=\$2:\$$1" + else + eval "$1=\$2" + fi +} + + +# ct_path_foreach PATH ACTION [ARGS ...] +# -------------------------------------- +# For each DIR in PATH execute ACTION (path is colon separated list of +# directories). The particular calls to ACTION will look like +# '$ ACTION directory [ARGS ...]' +ct_path_foreach () +{ + local dir dirlist action save_IFS + save_IFS=$IFS + IFS=: + dirlist=$1 + action=$2 + shift 2 + for dir in $dirlist; do "$action" "$dir" "$@" ; done + IFS=$save_IFS +} + + +# ct_gen_self_signed_cert_pem +# --------------------------- +# Generates a self-signed PEM certificate pair into specified directory. +# Argument: output_dir - output directory path +# Argument: base_name - base name of the certificate files +# Resulted files will be those: +# /-cert-selfsigned.pem -- public PEM cert +# /-key.pem -- PEM private key +ct_gen_self_signed_cert_pem() { + local output_dir=$1 ; shift + local base_name=$1 ; shift + mkdir -p "${output_dir}" + openssl req -newkey rsa:2048 -nodes -keyout "${output_dir}"/"${base_name}"-key.pem -subj '/C=GB/ST=Berkshire/L=Newbury/O=My Server Company' > "${base_name}"-req.pem + openssl req -new -x509 -nodes -key "${output_dir}"/"${base_name}"-key.pem -batch > "${output_dir}"/"${base_name}"-cert-selfsigned.pem +} + +# ct_obtain_input FILE|DIR|URL +# -------------------- +# Either copies a file or a directory to a tmp location for local copies, or +# downloads the file from remote location. +# Resulted file path is printed, so it can be later used by calling function. +# Arguments: input - local file, directory or remote URL +function ct_obtain_input() { + local input=$1 + local extension="${input##*.}" + + # Try to use same extension for the temporary file if possible + [[ "${extension}" =~ ^[a-z0-9]*$ ]] && extension=".${extension}" || extension="" + + local output + output=$(mktemp "/var/tmp/test-input-XXXXXX$extension") + if [ -f "${input}" ] ; then + cp -f "${input}" "${output}" + elif [ -d "${input}" ] ; then + rm -f "${output}" + cp -r -LH "${input}" "${output}" + elif echo "${input}" | grep -qe '^http\(s\)\?://' ; then + curl "${input}" > "${output}" + else + echo "ERROR: file type not known: ${input}" >&2 + return 1 + fi + echo "${output}" +} + +# ct_test_response +# ---------------- +# Perform GET request to the application container, checks output with +# a reg-exp and HTTP response code. +# Argument: url - request URL path +# Argument: expected_code - expected HTTP response code +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: max_attempts - Optional number of attempts (default: 20), three seconds sleep between +# Argument: ignore_error_attempts - Optional number of attempts when we ignore error output (default: 10) +ct_test_response() { + local url="$1" + local expected_code="$2" + local body_regexp="$3" + local max_attempts=${4:-20} + local ignore_error_attempts=${5:-10} + + echo " Testing the HTTP(S) response for <${url}>" + local sleep_time=3 + local attempt=1 + local result=1 + local status + local response_code + local response_file + response_file=$(mktemp /tmp/ct_test_response_XXXXXX) + while [ "${attempt}" -le "${max_attempts}" ]; do + echo "Trying to connect ... ${attempt}" + curl --connect-timeout 10 -s -w '%{http_code}' "${url}" >"${response_file}" && status=0 || status=1 + if [ "${status}" -eq 0 ]; then + response_code=$(tail -c 3 "${response_file}") + if [ "${response_code}" -eq "${expected_code}" ]; then + result=0 + fi + grep -qP -e "${body_regexp}" "${response_file}" || result=1; + # Some services return 40x code until they are ready, so let's give them + # some chance and not end with failure right away + # Do not wait if we already have expected outcome though + if [ "${result}" -eq 0 ] || [ "${attempt}" -gt "${ignore_error_attempts}" ] || [ "${attempt}" -eq "${max_attempts}" ] ; then + break + fi + fi + attempt=$(( attempt + 1 )) + sleep "${sleep_time}" + done + rm -f "${response_file}" + return "${result}" +} + +# ct_registry_from_os OS +# ---------------- +# Transform operating system string [os] into registry url +# Argument: OS - string containing the os version +ct_registry_from_os() { + local registry="" + case $1 in + rhel*) + registry=registry.redhat.io + ;; + *) + registry=quay.io + ;; + esac + echo "$registry" +} + + # ct_get_public_image_name OS BASE_IMAGE_NAME VERSION +# ---------------- +# Transform the arguments into public image name +# Argument: OS - string containing the os version +# Argument: BASE_IMAGE_NAME - string containing the base name of the image as defined in the Makefile +# Argument: VERSION - string containing the version of the image as defined in the Makefile +ct_get_public_image_name() { + local os=$1; shift + local base_image_name=$1; shift + local version=$1; shift + + local public_image_name + local registry + + registry=$(ct_registry_from_os "$os") + if [ "$os" == "rhel8" ]; then + public_image_name=$registry/rhel8/$base_image_name-${version//./} + elif [ "$os" == "rhel9" ]; then + public_image_name=$registry/rhel9/$base_image_name-${version//./} + elif [ "$os" == "c9s" ]; then + public_image_name=$registry/sclorg/$base_image_name-${version//./}-c9s + elif [ "$os" == "c10s" ]; then + public_image_name=$registry/sclorg/$base_image_name-${version//./}-c10s + fi + + echo "$public_image_name" +} + +# ct_assert_cmd_success CMD +# ---------------- +# Evaluates [cmd] and fails if it does not succeed. +# Argument: CMD - Command to be run +function ct_assert_cmd_success() { + echo "Checking '$*' for success ..." + # shellcheck disable=SC2294 + if ! eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + +# ct_assert_cmd_failure CMD +# ---------------- +# Evaluates [cmd] and fails if it succeeds. +# Argument: CMD - Command to be run +function ct_assert_cmd_failure() { + echo "Checking '$*' for failure ..." + # shellcheck disable=SC2294 + if eval "$@" &>/dev/null; then + echo " FAIL" + return 1 + fi + echo " PASS" + return 0 +} + + +# ct_random_string [LENGTH=10] +# ---------------------------- +# Generate pseudorandom alphanumeric string of LENGTH bytes, the +# default length is 10. The string is printed on stdout. +ct_random_string() +( + export LC_ALL=C + dd if=/dev/urandom count=1 bs=10k 2>/dev/null \ + | tr -dc 'a-z0-9' \ + | fold -w "${1-10}" \ + | head -n 1 +) + +# ct_s2i_usage IMG_NAME [S2I_ARGS] +# ---------------------------- +# Create a container and run the usage script inside +# Argument: IMG_NAME - name of the image to be used for the container run +# Argument: S2I_ARGS - Additional list of source-to-image arguments, currently unused. +ct_s2i_usage() +{ + local img_name=$1; shift + local s2i_args="$*"; + local usage_command="/usr/libexec/s2i/usage" + docker run --rm "$img_name" bash -c "$usage_command" +} + +# ct_s2i_build_as_df APP_PATH SRC_IMAGE DST_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# This function is wrapper for ct_s2i_build_as_df_build_args in case user do not want to add build args +# This function is used in all https://github.com/sclorg/*-container test cases and we do not +# want to break functionality +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for pull-policy=never and environment variable definitions. +ct_s2i_build_as_df() +{ + local app_path=$1; shift + local src_image=$1; shift + local dst_image=$1; shift + local s2i_args="$*"; + + ct_s2i_build_as_df_build_args "$app_path" "$src_image" "$dst_image" "" "$s2i_args" +} + +# ct_s2i_build_as_df_build_args APP_PATH SRC_IMAGE DST_IMAGE BUILD_ARGS [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: BUILD_ARGS - Build arguments to be used in the s2i build +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for pull-policy=never and environment variable definitions. +ct_s2i_build_as_df_build_args() +{ + local app_path=$1; shift + local src_image=$1; shift + local dst_image=$1; shift + local build_args=$1; shift + local s2i_args="$*"; + local local_app=upload/src/ + local local_scripts=upload/scripts/ + local user_id= + local df_name= + local tmpdir= + local incremental=false + local mount_options=() + local id_file + + # Run the entire thing inside a subshell so that we do not leak shell options outside of the function + ( + # FIXME: removed temporarily, need proper fixing + # Error out if any part of the build fails + # set -e + + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + cd "$tmpdir" || return 1 + # Check if the image is available locally and try to pull it if it is not + docker images "$src_image" &>/dev/null || echo "$s2i_args" | grep -q "pull-policy=never" || docker pull "$src_image" + user=$(docker inspect -f "{{.Config.User}}" "$src_image") + # Default to root if no user is set by the image + user=${user:-0} + # run the user through the image in case it is non-numeric or does not exist + if ! user_id=$(ct_get_uid_from_image "$user" "$src_image"); then + echo "Terminating s2i build." + return 1 + fi + + echo "$s2i_args" | grep -q "\--incremental" && incremental=true + if $incremental; then + inc_tmp=$(mktemp -d --tmpdir incremental.XXXX) + setfacl -m "u:$user_id:rwx" "$inc_tmp" + # Check if the image exists, build should fail (for testing use case) if it does not + docker images "$dst_image" &>/dev/null || (echo "Image $dst_image not found."; false) + # Run the original image with a mounted in volume and get the artifacts out of it + cmd="if [ -s /usr/libexec/s2i/save-artifacts ]; then /usr/libexec/s2i/save-artifacts > \"$inc_tmp/artifacts.tar\"; else touch \"$inc_tmp/artifacts.tar\"; fi" + docker run --rm -v "$inc_tmp:$inc_tmp:Z" "$dst_image" bash -c "$cmd" + # Move the created content into the $tmpdir for the build to pick it up + mv "$inc_tmp/artifacts.tar" "$tmpdir/" + fi + # Strip file:// from APP_PATH and copy its contents into current context + mkdir -p "$local_app" + cp -r "${app_path/file:\/\//}/." "$local_app" + [ -d "$local_app/.s2i/bin/" ] && mv "$local_app/.s2i/bin" "$local_scripts" + # Create a Dockerfile named df_name and fill it with proper content + #FIXME: Some commands could be combined into a single layer but not sure if worth the trouble for testing purposes + cat <"$df_name" +FROM $src_image +LABEL "io.openshift.s2i.build.image"="$src_image" \\ + "io.openshift.s2i.build.source-location"="$app_path" +USER root +COPY $local_app /tmp/src +EOF + [ -d "$local_scripts" ] && echo "COPY $local_scripts /tmp/scripts" >> "$df_name" && + echo "RUN chown -R $user_id:0 /tmp/scripts" >>"$df_name" + echo "RUN chown -R $user_id:0 /tmp/src" >>"$df_name" + # Check for custom environment variables inside .s2i/ folder + if [ -e "$local_app/.s2i/environment" ]; then + # Remove any comments and add the contents as ENV commands to the Dockerfile + sed '/^\s*#.*$/d' "$local_app/.s2i/environment" | while read -r line; do + echo "ENV $line" >>"$df_name" + done + fi + # Filter out env var definitions from $s2i_args and create Dockerfile ENV commands out of them + echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /' >>"$df_name" + # Check if CA autority is present on host and add it into Dockerfile + [ -f "$(full_ca_file_path)" ] && echo "RUN cd /etc/pki/ca-trust/source/anchors && update-ca-trust extract" >>"$df_name" + + # Add in artifacts if doing an incremental build + if $incremental; then + { echo "RUN mkdir /tmp/artifacts" + echo "ADD artifacts.tar /tmp/artifacts" + echo "RUN chown -R $user_id:0 /tmp/artifacts" ; } >>"$df_name" + fi + + echo "USER $user_id" >>"$df_name" + # If exists, run the custom assemble script, else default to /usr/libexec/s2i/assemble + if [ -x "$local_scripts/assemble" ]; then + echo "RUN /tmp/scripts/assemble" >>"$df_name" + else + echo "RUN /usr/libexec/s2i/assemble" >>"$df_name" + fi + # If exists, set the custom run script as CMD, else default to /usr/libexec/s2i/run + if [ -x "$local_scripts/run" ]; then + echo "CMD /tmp/scripts/run" >>"$df_name" + else + echo "CMD /usr/libexec/s2i/run" >>"$df_name" + fi + + # Check if -v parameter is present in s2i_args and add it into docker build command + read -ra mount_options <<< "$(echo "$s2i_args" | grep -o -e '\(-v\)[[:space:]]\.*\S*' || true)" + + # Run the build and tag the result + ct_build_image_and_parse_id "$df_name" "${mount_options[*]+${mount_options[*]}} -t $dst_image . $build_args" + #shellcheck disable=SC2181 + if [ "$?" -ne 0 ]; then + echo " ERROR: Failed to to build $df_name" >&2 + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" + ) +} + +# ct_s2i_multistage_build APP_PATH SRC_IMAGE DST_IMAGE SEC_IMAGE [S2I_ARGS] +# ---------------------------- +# Create a new s2i app image from local sources in a similar way as source-to-image would have used. +# Argument: APP_PATH - local path to the app sources to be used in the test +# Argument: SRC_IMAGE - image to be used as a base for the s2i build process +# Argument: SEC_IMAGE - image to be used as the base for the result of the build process +# Argument: DST_IMAGE - image name to be used during the tagging of the s2i build result +# Argument: S2I_ARGS - Additional list of source-to-image arguments. +# Only used to check for environment variable definitions. +ct_s2i_multistage_build() { + + local app_path=$1; shift + local src_image=$1; shift + local sec_image=$1; shift + local dst_image=$1; shift + local s2i_args=$*; + local local_app="app-src" + local user_id= + local mount_options=() + local id_file + + + # Run the entire thing inside a subshell so that we do not leak shell options outside of the function + ( + # FIXME: removed temporarily, need proper fixing + # Error out if any part of the build fails + # set -e + + user=$(docker inspect -f "{{.Config.User}}" "$src_image") + # Default to root if no user is set by the image + user=${user:-0} + # run the user through the image in case it is non-numeric or does not exist + if ! user_id=$(ct_get_uid_from_image "$user" "$src_image"); then + echo "Terminating s2i build." + return 1 + fi + + # Use /tmp to not pollute cwd + tmpdir=$(mktemp -d) + df_name=$(mktemp -p "$tmpdir" Dockerfile.XXXX) + cd "$tmpdir" || return 1 + + # If the path exists on the local host, copy it into the directory for the build + # Otherwise handle it as a link to a git repository + if [ -e "${app_path/file:\/\//}/." ] ; then + mkdir -p "$local_app" + # Strip file:// from APP_PATH and copy its contents into current context + cp -r "${app_path/file:\/\//}/." "$local_app" + + else + ct_clone_git_repository "$app_path" "$local_app" + fi + + cat <"$df_name" +# First stage builds the application +FROM $src_image as builder +# Add application sources to a directory that the assemble script expects them +# and set permissions so that the container runs without root access +USER 0 +ADD app-src /tmp/src +RUN chown -R 1001:0 /tmp/src +$(echo "$s2i_args" | grep -o -e '\(-e\|--env\)[[:space:]=]\S*=\S*' | sed -e 's/-e /ENV /' -e 's/--env[ =]/ENV /') +# Check if CA autority is present on host and add it into Dockerfile +$([ -f "$(full_ca_file_path)" ] && echo "RUN cd /etc/pki/ca-trust/source/anchors && update-ca-trust extract") +USER $user_id +# Install the dependencies +RUN /usr/libexec/s2i/assemble +# Second stage copies the application to the minimal image +FROM $sec_image +# Copy the application source and build artifacts from the builder image to this one +COPY --from=builder \$HOME \$HOME +# Set the default command for the resulting image +CMD /usr/libexec/s2i/run +EOF + + # Check if -v parameter is present in s2i_args and add it into docker build command + read -ra mount_options <<< "$(echo "$s2i_args" | grep -o -e '\(-v\)[[:space:]]\.*\S*' || true)" + + ct_build_image_and_parse_id "$df_name" "${mount_options[*]+${mount_options[*]}} -t $dst_image ." + #shellcheck disable=SC2181 + if [ "$?" -ne 0 ]; then + echo " ERROR: Failed to to build $df_name" >&2 + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" + ) +} + +# ct_check_image_availability PUBLIC_IMAGE_NAME +# ---------------------------- +# Pull an image from the public repositories to see if the image is already available. +# Argument: PUBLIC_IMAGE_NAME - string containing the public name of the image to pull +ct_check_image_availability() { + local public_image_name=$1; + + # Try pulling the image to see if it is accessible + if ! ct_pull_image "$public_image_name" &>/dev/null; then + echo "$public_image_name could not be downloaded via 'docker'" + return 1 + fi +} + + +# ct_check_latest_imagestreams +# ----------------------------- +# Check if the latest version present in Makefile in the variable VERSIONS +# is present in all imagestreams. +# Also the latest tag in the imagestreams has to contain the latest version +ct_check_latest_imagestreams() { + local latest_version= + local test_lib_dir= + + # We only maintain imagestreams for RHEL and CentOS (Community) + if [[ "$OS" =~ ^fedora.* ]] ; then + echo "Imagestreams for Fedora are not maintained, skipping ct_check_latest_imagestreams" + return 0 + fi + + # Check only lines which starts with VERSIONS + latest_version=$(grep '^VERSIONS' Makefile | rev | cut -d ' ' -f 1 | rev ) + # Fall back to previous version if the latest is excluded for this OS + [ -f "$latest_version/.exclude-$OS" ] && latest_version=$(grep '^VERSIONS' Makefile | rev | cut -d ' ' -f 2 | rev ) + # Only test the imagestream once, when the version matches + # ignore the SC warning, $VERSION is always available + + test_lib_dir=$(dirname "$(readlink -f "$0")") + python3 "${test_lib_dir}/show_all_imagestreams.py" + # shellcheck disable=SC2153 + if [ "$latest_version" == "$VERSION" ]; then + python3 "${test_lib_dir}/check_imagestreams.py" "$latest_version" + else + echo "Image version $VERSION is not latest, skipping ct_check_latest_imagestreams" + fi +} + +# ct_show_resources +# ---------------- +# Prints the available resources +ct_show_resources() +{ + echo + echo "$LINE" + echo "Resources info:" + echo "Memory:" + free -h + echo "Storage:" + df -h || : + echo "CPU" + lscpu + + echo "$LINE" + echo "Image ${IMAGE_NAME} information:" + echo "$LINE" + echo "Uncompressed size of the image: $(ct_get_image_size_uncompresseed "${IMAGE_NAME}")" + echo "Compressed size of the image: $(ct_get_image_size_compresseed "${IMAGE_NAME}")" + echo +} + +# ct_clone_git_repository +# ----------------------------- +# Argument: app_url - git URI pointing to a repository, supports "@" to indicate a different branch +# Argument: app_dir (optional) - name of the directory to clone the repository into +ct_clone_git_repository() +{ + local app_url=$1; shift + local app_dir=$1 + + # If app_url contains @, the string after @ is considered + # as a name of a branch to clone instead of the main/master branch + IFS='@' read -ra git_url_parts <<< "${app_url}" + + if [ -n "${git_url_parts[1]}" ]; then + git_clone_cmd="git clone --branch ${git_url_parts[1]} ${git_url_parts[0]} ${app_dir}" + else + git_clone_cmd="git clone ${app_url} ${app_dir}" + fi + + if ! $git_clone_cmd ; then + echo "ERROR: Git repository ${app_url} cannot be cloned into ${app_dir}." + return 1 + fi +} + +# ct_get_uid_from_image +# ----------------------------- +# Argument: user - user to get uid for inside the image +# Argument: src_image - image to use for user information +ct_get_uid_from_image() +{ + local user=$1; shift + local src_image=$1 + local user_id= + + # NOTE: The '-eq' test is used to check if $user is numeric as it will fail if $user is not an integer + if ! [ "$user" -eq "$user" ] 2>/dev/null && ! user_id=$(docker run --rm "$src_image" bash -c "id -u $user 2>/dev/null"); then + echo "ERROR: id of user $user not found inside image $src_image." + return 1 + else + echo "${user_id:-$user}" + fi +} + +# ct_test_app_dockerfile +# ----------------------------- +# Argument: dockerfile - path to a Dockerfile that will be used for building an image +# (must work with an application directory called 'app-src') +# Argument: app_url - git or local URI with a testing application, supports "@" to indicate a different branch +# Argument: body_regexp - PCRE regular expression that must match the response body +# Argument: app_dir - name of the application directory that is used in the Dockerfile +# Argument: build_args - build args that will be used for building an image +ct_test_app_dockerfile() { + local dockerfile=$1 + local app_url=$2 + local expected_text=$3 + local app_dir=$4 # this is a directory that must match with the name in the Dockerfile + local build_args=${5:-""} + local port=8080 + local app_image_name=myapp + local ret + local cname=app_dockerfile + local id_file + + if [ -z "$app_dir" ] ; then + echo "ERROR: Option app_dir not set. Terminating the Dockerfile build." + return 1 + fi + + if ! [ -r "${dockerfile}" ] || ! [ -s "${dockerfile}" ] ; then + echo "ERROR: Dockerfile ${dockerfile} does not exist or is empty." + echo "Terminating the Dockerfile build." + return 1 + fi + + CID_FILE_DIR=${CID_FILE_DIR:-$(mktemp -d)} + local dockerfile_abs + dockerfile_abs=$(readlink -f "${dockerfile}") + tmpdir=$(mktemp -d) + pushd "$tmpdir" >/dev/null || return 1 + cp "${dockerfile_abs}" Dockerfile + + # Rewrite the source image to what we test + sed -i -e "s|^FROM.*$|FROM $IMAGE_NAME|" Dockerfile + # a bit more verbose, but should help debugging failures + echo "Using this Dockerfile:" + cat Dockerfile + + if [ -d "$app_url" ] ; then + echo "Copying local folder: $app_url -> $app_dir." + cp -Lr "$app_url" "$app_dir" + else + if ! ct_clone_git_repository "$app_url" "$app_dir" ; then + echo "Terminating the Dockerfile build." + return 1 + fi + fi + echo "Building '${app_image_name}' image using docker build" + if ! ct_build_image_and_parse_id "" "-t ${app_image_name} . $build_args"; then + echo "ERROR: The image cannot be built from ${dockerfile} and application ${app_url}." + echo "Terminating the Dockerfile build." + return 1 + fi + id_file="${APP_ID_FILE_DIR:?}"/"$RANDOM" + echo "$APP_IMAGE_ID" > "$id_file" + + if ! docker run -d --cidfile="${CID_FILE_DIR}/app_dockerfile" --rm "${app_image_name}" ; then + echo "ERROR: The image ${app_image_name} cannot be run for ${dockerfile} and application ${app_url}." + echo "Terminating the Dockerfile build." + return 1 + fi + echo "Waiting for ${app_image_name} to start" + ct_wait_for_cid "${CID_FILE_DIR}/app_dockerfile" + + ip="$(ct_get_cip "${cname}")" + if [ -z "$ip" ]; then + echo "ERROR: Cannot get container's IP address." + return 1 + fi + ct_test_response "http://$ip:${port}" 200 "${expected_text}" + ret=$? + + [[ $ret -eq 0 ]] || docker logs "$(ct_get_cid "${cname}")" + + # cleanup + docker kill "$(ct_get_cid "${cname}")" + sleep 2 + docker rmi "${app_image_name}" + popd >/dev/null || return 1 + rm -rf "${tmpdir}" + rm -f "${CID_FILE_DIR}/${cname}" + return $ret +} + +# ct_check_testcase_result +# ----------------------------- +# Check if testcase ended in success or error +# Argument: result - testcase result value +# Uses: $TESTCASE_RESULT - result of the testcase +# Uses: $IMAGE_NAME - name of the image being tested +ct_check_testcase_result() { + local result="$1" + if [[ "$result" != "0" ]]; then + echo "Test for image '${IMAGE_NAME}' FAILED (exit code: ${result})" + TESTCASE_RESULT=1 + fi + return "$result" +} + +# ct_update_test_result +# ----------------------------- +# adds result to the $TEST_SUMMARY variable +# Argument: test_msg +# Argument: app_name +# Argument: test_name +# Argument: time_diff (optional) +# Uses: $TEST_SUMMARY - variable for storing test results +ct_update_test_result() { + local test_msg="$1" + local app_name="$2" + local test_case="$3" + local time_diff="${4:-}" + printf -v TEST_SUMMARY "%s %s for '%s' %s (%s)\n" "${TEST_SUMMARY:-}" "${test_msg}" "${app_name}" "$test_case" "$time_diff" +} + +# ct_run_tests_from_testset +# ----------------------------- +# Runs all tests in $TEST_SET, prints result to +# the $TEST_SUMMARY variable +# Argument: app_name - application name to log +# Uses: $TEST_SET - set of test cases to run +# Uses: $TEST_SUMMARY - variable for storing test results +# Uses: $IMAGE_NAME - name of the image being tested +# Uses: $UNSTABLE_TESTS - set of tests, whose result can be ignored +# Uses: $IGNORE_UNSTABLE_TESTS - flag to ignore unstable tests +ct_run_tests_from_testset() { + local app_name="${1:-appnamenotset}" + local time_beg_pretty + local time_beg + local time_end + local time_diff + local test_msg + local is_unstable + + # Let's store in the log what change do we test + echo + git show -s + echo + + echo "Running tests for image ${IMAGE_NAME}" + + for test_case in $TEST_SET; do + TESTCASE_RESULT=0 + # shellcheck disable=SC2076 + if [[ " ${UNSTABLE_TESTS[*]} " =~ " ${app_name} " ]] || \ + [[ " ${UNSTABLE_TESTS[*]} " =~ " ${test_case} " ]]; then + is_unstable=1 + else + is_unstable=0 + fi + time_beg_pretty=$(ct_timestamp_pretty) + time_beg=$(ct_timestamp_s) + echo "-----------------------------------------------" + echo "Running test $test_case (starting at $time_beg_pretty) ... " + echo "-----------------------------------------------" + $test_case + ct_check_testcase_result $? + time_end=$(ct_timestamp_s) + if [ $TESTCASE_RESULT -eq 0 ]; then + test_msg="[PASSED]" + else + if [ -n "${IGNORE_UNSTABLE_TESTS:-""}" ] && [ $is_unstable -eq 1 ]; then + test_msg="[FAILED][UNSTABLE-IGNORED]" + else + test_msg="[FAILED]" + TESTSUITE_RESULT=1 + fi + fi + # As soon as test is finished + # switch the project from sclorg-test- to default. + if [ "${CT_OCP4_TEST:-false}" == "true" ]; then + oc project default + fi + time_diff=$(ct_timestamp_diff "$time_beg" "$time_end") + ct_update_test_result "${test_msg}" "${app_name}" "$test_case" "$time_diff" + done +} + +# ct_timestamp_s +# -------------- +# Returns timestamp in seconds since unix era -- a large integer +function ct_timestamp_s() { + date '+%s' +} + +# ct_timestamp_pretty +# ----------------- +# Returns timestamp readable to a human, like 2022-05-18 10:52:44+02:00 +function ct_timestamp_pretty() { + date --rfc-3339=seconds +} + +# ct_timestamp_diff +# ----------------- +# Computes a time diff between two timestamps +# Argument: start_date - Beginning (in seconds since unix era -- a large integer) +# Argument: final_date - End (in seconds since unix era -- a large integer) +# Returns: Time difference in format HH:MM:SS +function ct_timestamp_diff() { + local start_date=$1 + local final_date=$2 + date -u -d "0 $final_date seconds - $start_date seconds" +"%H:%M:%S" +} + +# ct_get_certificate_timestamp +# ---------------------------- +# Looks into a running container into a specified file (certificate) and extracts +# a notBefore date. +# Argument: container - ID of a running container +# Argument: path - path to the certificate inside the running container +# Returns: timestamp (seconds since Unix era) for the certificate generation +function ct_get_certificate_timestamp() { + local container=$1 + local path=$2 + date '+%s' --date="$(docker exec "$container" bash -c "cat $path" | openssl x509 -startdate -noout | grep notBefore | sed -e 's/notBefore=//')" +} + +# ct_get_certificate_age_s +# ------------------------ +# Looks into a running container into a specified file and retuns age of the certificate +# Argument: container - ID of a running container +# Argument: path - path inside the running container +# Returns: age of the certificate in seconds +function ct_get_certificate_age_s() { + local container=$1 + local path=$2 + local now + local cert_timestamp + now=$(date '+%s') + cert_timestamp=$(ct_get_certificate_timestamp "$container" "$path") + echo $(( now - cert_timestamp )) +} + +# ct_get_image_age_s +# ------------------ +# Retuns age of a given image in seconds +# Argument: image_name - name of a given image +# Returns: age of the image in seconds +function ct_get_image_age_s() { + local image_name=$1 + local now + local image_created + local image_timestamp + now=$(date '+%s') + # docker inspect returns format