diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile new file mode 100644 index 0000000..ca23960 --- /dev/null +++ b/.devcontainer/Dockerfile @@ -0,0 +1,52 @@ +# DevOps Workshop 2021 Image for Python3 and Docker +FROM python:3.9-slim + +ARG INSTALL_ZSH="true" +ARG UPGRADE_PACKAGES="false" +ARG ENABLE_NONROOT_DOCKER="true" +ARG USE_MOBY="true" +ARG DOCKER_VERSION="latest" + +# Enable new "BUILDKIT" mode for Docker CLI +ENV DOCKER_BUILDKIT=1 + +# Create a user for development +ARG USERNAME=vscode +ARG USER_UID=1000 +ARG USER_GID=$USER_UID + +COPY .devcontainer/library-scripts/*.sh /tmp/library-scripts/ +RUN apt-get update \ + && /bin/bash /tmp/library-scripts/common-debian.sh "${INSTALL_ZSH}" "${USERNAME}" "${USER_UID}" "${USER_GID}" "${UPGRADE_PACKAGES}" "true" "true" \ + # Use Docker script from script library to set things up + && /bin/bash /tmp/library-scripts/docker-in-docker-debian.sh "${ENABLE_NONROOT_DOCKER}" "${USERNAME}" "${USE_MOBY}" "${DOCKER_VERSION}" \ + # Clean up + && apt-get autoremove -y && apt-get clean -y && rm -rf /var/lib/apt/lists/* /tmp/library-scripts/ + +# [Optional] Uncomment this section to install additional OS packages. +# RUN apt-get update && export DEBIAN_FRONTEND=noninteractive \ +# && apt-get -y install --no-install-recommends + +# Set up the Kubernetes tools +RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(dpkg --print-architecture)/kubectl" \ + && install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl \ + && echo "alias kc='/usr/local/bin/kubectl'" >> /home/$USERNAME/.bash_aliases \ + && chown $USERNAME:$USERNAME /home/$USERNAME/.bash_aliases \ + && curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash \ + && curl https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3 | bash + +# Update Python tools +RUN pip3 install -U pip wheel setuptools + +# Set up the development environment +WORKDIR /app +VOLUME [ "/var/lib/docker" ] + +# Become a regular user +USER $USERNAME + +# Setting the ENTRYPOINT to docker-init.sh will start up the Docker Engine +# inside the container "overrideCommand": false is set in devcontainer.json. +# The script will also execute CMD if you need to alter startup behaviors. +ENTRYPOINT [ "/usr/local/share/docker-init.sh" ] +CMD [ "sleep", "infinity" ] diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 0000000..e2990d4 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,30 @@ +{ + "name": "Python Docker", + "dockerComposeFile": "docker-compose.yml", + "service": "app", + "workspaceFolder": "/app", + "remoteUser": "vscode", + "overrideCommand": false, + "settings": {}, + "extensions": [ + "VisualVisualStudioExptTeam.vscodeintellicodeStudio", + "ms-python.python", + "ms-python.vscode-pylance", + "ms-azuretools.vscode-docker", + "wholroyd.jinja", + "cstrap.flask-snippets", + "yzhang.markdown-all-in-one", + "DavidAnson.vscode-markdownlint", + "bierner.github-markdown-preview", + "donjayamanne.githistory", + "redhat.vscode-yaml", + "inercia.vscode-k3d" + ], + "postCreateCommand": "pip install -U pip wheel && pip install -r requirements.txt", + "features": { + "docker-in-docker": { + "version": "latest", + "moby": true + } + } +} diff --git a/.devcontainer/docker-compose.yml b/.devcontainer/docker-compose.yml new file mode 100644 index 0000000..8b12420 --- /dev/null +++ b/.devcontainer/docker-compose.yml @@ -0,0 +1,43 @@ +version: "3" + +services: + app: + build: + context: .. + dockerfile: .devcontainer/Dockerfile + hostname: devops + init: true + privileged: true + # ports: + # - 5000:5000 + volumes: + - ..:/app + - ~/.gitconfig:/home/vscode/.gitconfig + - ~/.ssh/:/home/vscode/.ssh/ + - dind-var-lib-docker:/var/lib/docker + command: sleep infinity + environment: + FLASK_APP: service:app + PORT: 5000 + DATABASE_URI: "redis://redis:6379/0" + networks: + - dev + depends_on: + - redis + + redis: + image: redis:6-alpine + hostname: redis + ports: + - "6379:6379" + volumes: + - redis:/data + networks: + - dev + +volumes: + redis: + dind-var-lib-docker: + +networks: + dev: diff --git a/.devcontainer/library-scripts/common-debian.sh b/.devcontainer/library-scripts/common-debian.sh new file mode 100644 index 0000000..f453a6b --- /dev/null +++ b/.devcontainer/library-scripts/common-debian.sh @@ -0,0 +1,454 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/common.md +# Maintainer: The VS Code and Codespaces Teams +# +# Syntax: ./common-debian.sh [install zsh flag] [username] [user UID] [user GID] [upgrade packages flag] [install Oh My Zsh! flag] [Add non-free packages] + +set -e + +INSTALL_ZSH=${1:-"true"} +USERNAME=${2:-"automatic"} +USER_UID=${3:-"automatic"} +USER_GID=${4:-"automatic"} +UPGRADE_PACKAGES=${5:-"true"} +INSTALL_OH_MYS=${6:-"true"} +ADD_NON_FREE_PACKAGES=${7:-"false"} +SCRIPT_DIR="$(cd $(dirname "${BASH_SOURCE[0]}") && pwd)" +MARKER_FILE="/usr/local/etc/vscode-dev-containers/common" + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +# Ensure that login shells get the correct path if the user updated the PATH using ENV. +rm -f /etc/profile.d/00-restore-env.sh +echo "export PATH=${PATH//$(sh -lc 'echo $PATH')/\$PATH}" > /etc/profile.d/00-restore-env.sh +chmod +x /etc/profile.d/00-restore-env.sh + +# If in automatic mode, determine if a user already exists, if not use vscode +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in ${POSSIBLE_USERS[@]}; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=vscode + fi +elif [ "${USERNAME}" = "none" ]; then + USERNAME=root + USER_UID=0 + USER_GID=0 +fi + +# Load markers to see which steps have already run +if [ -f "${MARKER_FILE}" ]; then + echo "Marker file found:" + cat "${MARKER_FILE}" + source "${MARKER_FILE}" +fi + +# Ensure apt is in non-interactive to avoid prompts +export DEBIAN_FRONTEND=noninteractive + +# Function to call apt-get if needed +apt_get_update_if_needed() +{ + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + echo "Running apt-get update..." + apt-get update + else + echo "Skipping apt-get update." + fi +} + +# Run install apt-utils to avoid debconf warning then verify presence of other common developer tools and dependencies +if [ "${PACKAGES_ALREADY_INSTALLED}" != "true" ]; then + + package_list="apt-utils \ + openssh-client \ + gnupg2 \ + dirmngr \ + iproute2 \ + procps \ + lsof \ + htop \ + net-tools \ + psmisc \ + curl \ + wget \ + rsync \ + ca-certificates \ + unzip \ + zip \ + nano \ + vim-tiny \ + less \ + jq \ + lsb-release \ + apt-transport-https \ + dialog \ + libc6 \ + libgcc1 \ + libkrb5-3 \ + libgssapi-krb5-2 \ + libicu[0-9][0-9] \ + liblttng-ust0 \ + libstdc++6 \ + zlib1g \ + locales \ + sudo \ + ncdu \ + man-db \ + strace \ + manpages \ + manpages-dev \ + init-system-helpers" + + # Needed for adding manpages-posix and manpages-posix-dev which are non-free packages in Debian + if [ "${ADD_NON_FREE_PACKAGES}" = "true" ]; then + # Bring in variables from /etc/os-release like VERSION_CODENAME + . /etc/os-release + sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list + sed -i -E "s/deb-src http:\/\/(deb|httredir)\.debian\.org\/debian ${VERSION_CODENAME} main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME} main contrib non-free/" /etc/apt/sources.list + sed -i -E "s/deb http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list + sed -i -E "s/deb-src http:\/\/(deb|httpredir)\.debian\.org\/debian ${VERSION_CODENAME}-updates main/deb http:\/\/\1\.debian\.org\/debian ${VERSION_CODENAME}-updates main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}\/updates main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb-src http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main/deb http:\/\/deb\.debian\.org\/debian ${VERSION_CODENAME}-backports main contrib non-free/" /etc/apt/sources.list + # Handle bullseye location for security https://www.debian.org/releases/bullseye/amd64/release-notes/ch-information.en.html + sed -i "s/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list + sed -i "s/deb-src http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main/deb http:\/\/security\.debian\.org\/debian-security ${VERSION_CODENAME}-security main contrib non-free/" /etc/apt/sources.list + echo "Running apt-get update..." + apt-get update + package_list="${package_list} manpages-posix manpages-posix-dev" + else + apt_get_update_if_needed + fi + + # Install libssl1.1 if available + if [[ ! -z $(apt-cache --names-only search ^libssl1.1$) ]]; then + package_list="${package_list} libssl1.1" + fi + + # Install appropriate version of libssl1.0.x if available + libssl_package=$(dpkg-query -f '${db:Status-Abbrev}\t${binary:Package}\n' -W 'libssl1\.0\.?' 2>&1 || echo '') + if [ "$(echo "$LIlibssl_packageBSSL" | grep -o 'libssl1\.0\.[0-9]:' | uniq | sort | wc -l)" -eq 0 ]; then + if [[ ! -z $(apt-cache --names-only search ^libssl1.0.2$) ]]; then + # Debian 9 + package_list="${package_list} libssl1.0.2" + elif [[ ! -z $(apt-cache --names-only search ^libssl1.0.0$) ]]; then + # Ubuntu 18.04, 16.04, earlier + package_list="${package_list} libssl1.0.0" + fi + fi + + echo "Packages to verify are installed: ${package_list}" + apt-get -y install --no-install-recommends ${package_list} 2> >( grep -v 'debconf: delaying package configuration, since apt-utils is not installed' >&2 ) + + # Install git if not already installed (may be more recent than distro version) + if ! type git > /dev/null 2>&1; then + apt-get -y install --no-install-recommends git + fi + + PACKAGES_ALREADY_INSTALLED="true" +fi + +# Get to latest versions of all packages +if [ "${UPGRADE_PACKAGES}" = "true" ]; then + apt_get_update_if_needed + apt-get -y upgrade --no-install-recommends + apt-get autoremove -y +fi + +# Ensure at least the en_US.UTF-8 UTF-8 locale is available. +# Common need for both applications and things like the agnoster ZSH theme. +if [ "${LOCALE_ALREADY_SET}" != "true" ] && ! grep -o -E '^\s*en_US.UTF-8\s+UTF-8' /etc/locale.gen > /dev/null; then + echo "en_US.UTF-8 UTF-8" >> /etc/locale.gen + locale-gen + LOCALE_ALREADY_SET="true" +fi + +# Create or update a non-root user to match UID/GID. +group_name="${USERNAME}" +if id -u ${USERNAME} > /dev/null 2>&1; then + # User exists, update if needed + if [ "${USER_GID}" != "automatic" ] && [ "$USER_GID" != "$(id -g $USERNAME)" ]; then + group_name="$(id -gn $USERNAME)" + groupmod --gid $USER_GID ${group_name} + usermod --gid $USER_GID $USERNAME + fi + if [ "${USER_UID}" != "automatic" ] && [ "$USER_UID" != "$(id -u $USERNAME)" ]; then + usermod --uid $USER_UID $USERNAME + fi +else + # Create user + if [ "${USER_GID}" = "automatic" ]; then + groupadd $USERNAME + else + groupadd --gid $USER_GID $USERNAME + fi + if [ "${USER_UID}" = "automatic" ]; then + useradd -s /bin/bash --gid $USERNAME -m $USERNAME + else + useradd -s /bin/bash --uid $USER_UID --gid $USERNAME -m $USERNAME + fi +fi + +# Add add sudo support for non-root user +if [ "${USERNAME}" != "root" ] && [ "${EXISTING_NON_ROOT_USER}" != "${USERNAME}" ]; then + echo $USERNAME ALL=\(root\) NOPASSWD:ALL > /etc/sudoers.d/$USERNAME + chmod 0440 /etc/sudoers.d/$USERNAME + EXISTING_NON_ROOT_USER="${USERNAME}" +fi + +# ** Shell customization section ** +if [ "${USERNAME}" = "root" ]; then + user_rc_path="/root" +else + user_rc_path="/home/${USERNAME}" +fi + +# Restore user .bashrc defaults from skeleton file if it doesn't exist or is empty +if [ ! -f "${user_rc_path}/.bashrc" ] || [ ! -s "${user_rc_path}/.bashrc" ] ; then + cp /etc/skel/.bashrc "${user_rc_path}/.bashrc" +fi + +# Restore user .profile defaults from skeleton file if it doesn't exist or is empty +if [ ! -f "${user_rc_path}/.profile" ] || [ ! -s "${user_rc_path}/.profile" ] ; then + cp /etc/skel/.profile "${user_rc_path}/.profile" +fi + +# .bashrc/.zshrc snippet +rc_snippet="$(cat << 'EOF' + +if [ -z "${USER}" ]; then export USER=$(whoami); fi +if [[ "${PATH}" != *"$HOME/.local/bin"* ]]; then export PATH="${PATH}:$HOME/.local/bin"; fi + +# Display optional first run image specific notice if configured and terminal is interactive +if [ -t 1 ] && [[ "${TERM_PROGRAM}" = "vscode" || "${TERM_PROGRAM}" = "codespaces" ]] && [ ! -f "$HOME/.config/vscode-dev-containers/first-run-notice-already-displayed" ]; then + if [ -f "/usr/local/etc/vscode-dev-containers/first-run-notice.txt" ]; then + cat "/usr/local/etc/vscode-dev-containers/first-run-notice.txt" + elif [ -f "/workspaces/.codespaces/shared/first-run-notice.txt" ]; then + cat "/workspaces/.codespaces/shared/first-run-notice.txt" + fi + mkdir -p "$HOME/.config/vscode-dev-containers" + # Mark first run notice as displayed after 10s to avoid problems with fast terminal refreshes hiding it + ((sleep 10s; touch "$HOME/.config/vscode-dev-containers/first-run-notice-already-displayed") &) +fi + +# Set the default git editor if not already set +if [ -z "$(git config --get core.editor)" ] && [ -z "${GIT_EDITOR}" ]; then + if [ "${TERM_PROGRAM}" = "vscode" ]; then + if [[ -n $(command -v code-insiders) && -z $(command -v code) ]]; then + export GIT_EDITOR="code-insiders --wait" + else + export GIT_EDITOR="code --wait" + fi + fi +fi + +EOF +)" + +# code shim, it fallbacks to code-insiders if code is not available +cat << 'EOF' > /usr/local/bin/code +#!/bin/sh + +get_in_path_except_current() { + which -a "$1" | grep -A1 "$0" | grep -v "$0" +} + +code="$(get_in_path_except_current code)" + +if [ -n "$code" ]; then + exec "$code" "$@" +elif [ "$(command -v code-insiders)" ]; then + exec code-insiders "$@" +else + echo "code or code-insiders is not installed" >&2 + exit 127 +fi +EOF +chmod +x /usr/local/bin/code + +# systemctl shim - tells people to use 'service' if systemd is not running +cat << 'EOF' > /usr/local/bin/systemctl +#!/bin/sh +set -e +if [ -d "/run/systemd/system" ]; then + exec /bin/systemctl/systemctl "$@" +else + echo '\n"systemd" is not running in this container due to its overhead.\nUse the "service" command to start services intead. e.g.: \n\nservice --status-all' +fi +EOF +chmod +x /usr/local/bin/systemctl + +# Codespaces bash and OMZ themes - partly inspired by https://github.com/ohmyzsh/ohmyzsh/blob/master/themes/robbyrussell.zsh-theme +codespaces_bash="$(cat \ +<<'EOF' + +# Codespaces bash prompt theme +__bash_prompt() { + local userpart='`export XIT=$? \ + && [ ! -z "${GITHUB_USER}" ] && echo -n "\[\033[0;32m\]@${GITHUB_USER} " || echo -n "\[\033[0;32m\]\u " \ + && [ "$XIT" -ne "0" ] && echo -n "\[\033[1;31m\]➜" || echo -n "\[\033[0m\]➜"`' + local gitbranch='`\ + if [ "$(git config --get codespaces-theme.hide-status 2>/dev/null)" != 1 ]; then \ + export BRANCH=$(git symbolic-ref --short HEAD 2>/dev/null || git rev-parse --short HEAD 2>/dev/null); \ + if [ "${BRANCH}" != "" ]; then \ + echo -n "\[\033[0;36m\](\[\033[1;31m\]${BRANCH}" \ + && if git ls-files --error-unmatch -m --directory --no-empty-directory -o --exclude-standard ":/*" > /dev/null 2>&1; then \ + echo -n " \[\033[1;33m\]✗"; \ + fi \ + && echo -n "\[\033[0;36m\]) "; \ + fi; \ + fi`' + local lightblue='\[\033[1;34m\]' + local removecolor='\[\033[0m\]' + PS1="${userpart} ${lightblue}\w ${gitbranch}${removecolor}\$ " + unset -f __bash_prompt +} +__bash_prompt + +EOF +)" + +codespaces_zsh="$(cat \ +<<'EOF' +# Codespaces zsh prompt theme +__zsh_prompt() { + local prompt_username + if [ ! -z "${GITHUB_USER}" ]; then + prompt_username="@${GITHUB_USER}" + else + prompt_username="%n" + fi + PROMPT="%{$fg[green]%}${prompt_username} %(?:%{$reset_color%}➜ :%{$fg_bold[red]%}➜ )" # User/exit code arrow + PROMPT+='%{$fg_bold[blue]%}%(5~|%-1~/…/%3~|%4~)%{$reset_color%} ' # cwd + PROMPT+='$([ "$(git config --get codespaces-theme.hide-status 2>/dev/null)" != 1 ] && git_prompt_info)' # Git status + PROMPT+='%{$fg[white]%}$ %{$reset_color%}' + unset -f __zsh_prompt +} +ZSH_THEME_GIT_PROMPT_PREFIX="%{$fg_bold[cyan]%}(%{$fg_bold[red]%}" +ZSH_THEME_GIT_PROMPT_SUFFIX="%{$reset_color%} " +ZSH_THEME_GIT_PROMPT_DIRTY=" %{$fg_bold[yellow]%}✗%{$fg_bold[cyan]%})" +ZSH_THEME_GIT_PROMPT_CLEAN="%{$fg_bold[cyan]%})" +__zsh_prompt + +EOF +)" + +# Add RC snippet and custom bash prompt +if [ "${RC_SNIPPET_ALREADY_ADDED}" != "true" ]; then + echo "${rc_snippet}" >> /etc/bash.bashrc + echo "${codespaces_bash}" >> "${user_rc_path}/.bashrc" + echo 'export PROMPT_DIRTRIM=4' >> "${user_rc_path}/.bashrc" + if [ "${USERNAME}" != "root" ]; then + echo "${codespaces_bash}" >> "/root/.bashrc" + echo 'export PROMPT_DIRTRIM=4' >> "/root/.bashrc" + fi + chown ${USERNAME}:${group_name} "${user_rc_path}/.bashrc" + RC_SNIPPET_ALREADY_ADDED="true" +fi + +# Optionally install and configure zsh and Oh My Zsh! +if [ "${INSTALL_ZSH}" = "true" ]; then + if ! type zsh > /dev/null 2>&1; then + apt_get_update_if_needed + apt-get install -y zsh + fi + if [ "${ZSH_ALREADY_INSTALLED}" != "true" ]; then + echo "${rc_snippet}" >> /etc/zsh/zshrc + ZSH_ALREADY_INSTALLED="true" + fi + + # Adapted, simplified inline Oh My Zsh! install steps that adds, defaults to a codespaces theme. + # See https://github.com/ohmyzsh/ohmyzsh/blob/master/tools/install.sh for official script. + oh_my_install_dir="${user_rc_path}/.oh-my-zsh" + if [ ! -d "${oh_my_install_dir}" ] && [ "${INSTALL_OH_MYS}" = "true" ]; then + template_path="${oh_my_install_dir}/templates/zshrc.zsh-template" + user_rc_file="${user_rc_path}/.zshrc" + umask g-w,o-w + mkdir -p ${oh_my_install_dir} + git clone --depth=1 \ + -c core.eol=lf \ + -c core.autocrlf=false \ + -c fsck.zeroPaddedFilemode=ignore \ + -c fetch.fsck.zeroPaddedFilemode=ignore \ + -c receive.fsck.zeroPaddedFilemode=ignore \ + "https://github.com/ohmyzsh/ohmyzsh" "${oh_my_install_dir}" 2>&1 + echo -e "$(cat "${template_path}")\nDISABLE_AUTO_UPDATE=true\nDISABLE_UPDATE_PROMPT=true" > ${user_rc_file} + sed -i -e 's/ZSH_THEME=.*/ZSH_THEME="codespaces"/g' ${user_rc_file} + + mkdir -p ${oh_my_install_dir}/custom/themes + echo "${codespaces_zsh}" > "${oh_my_install_dir}/custom/themes/codespaces.zsh-theme" + # Shrink git while still enabling updates + cd "${oh_my_install_dir}" + git repack -a -d -f --depth=1 --window=1 + # Copy to non-root user if one is specified + if [ "${USERNAME}" != "root" ]; then + cp -rf "${user_rc_file}" "${oh_my_install_dir}" /root + chown -R ${USERNAME}:${group_name} "${user_rc_path}" + fi + fi +fi + +# Persist image metadata info, script if meta.env found in same directory +meta_info_script="$(cat << 'EOF' +#!/bin/sh +. /usr/local/etc/vscode-dev-containers/meta.env + +# Minimal output +if [ "$1" = "version" ] || [ "$1" = "image-version" ]; then + echo "${VERSION}" + exit 0 +elif [ "$1" = "release" ]; then + echo "${GIT_REPOSITORY_RELEASE}" + exit 0 +elif [ "$1" = "content" ] || [ "$1" = "content-url" ] || [ "$1" = "contents" ] || [ "$1" = "contents-url" ]; then + echo "${CONTENTS_URL}" + exit 0 +fi + +#Full output +echo +echo "Development container image information" +echo +if [ ! -z "${VERSION}" ]; then echo "- Image version: ${VERSION}"; fi +if [ ! -z "${DEFINITION_ID}" ]; then echo "- Definition ID: ${DEFINITION_ID}"; fi +if [ ! -z "${VARIANT}" ]; then echo "- Variant: ${VARIANT}"; fi +if [ ! -z "${GIT_REPOSITORY}" ]; then echo "- Source code repository: ${GIT_REPOSITORY}"; fi +if [ ! -z "${GIT_REPOSITORY_RELEASE}" ]; then echo "- Source code release/branch: ${GIT_REPOSITORY_RELEASE}"; fi +if [ ! -z "${BUILD_TIMESTAMP}" ]; then echo "- Timestamp: ${BUILD_TIMESTAMP}"; fi +if [ ! -z "${CONTENTS_URL}" ]; then echo && echo "More info: ${CONTENTS_URL}"; fi +echo +EOF +)" +if [ -f "${SCRIPT_DIR}/meta.env" ]; then + mkdir -p /usr/local/etc/vscode-dev-containers/ + cp -f "${SCRIPT_DIR}/meta.env" /usr/local/etc/vscode-dev-containers/meta.env + echo "${meta_info_script}" > /usr/local/bin/devcontainer-info + chmod +x /usr/local/bin/devcontainer-info +fi + +# Write marker file +mkdir -p "$(dirname "${MARKER_FILE}")" +echo -e "\ + PACKAGES_ALREADY_INSTALLED=${PACKAGES_ALREADY_INSTALLED}\n\ + LOCALE_ALREADY_SET=${LOCALE_ALREADY_SET}\n\ + EXISTING_NON_ROOT_USER=${EXISTING_NON_ROOT_USER}\n\ + RC_SNIPPET_ALREADY_ADDED=${RC_SNIPPET_ALREADY_ADDED}\n\ + ZSH_ALREADY_INSTALLED=${ZSH_ALREADY_INSTALLED}" > "${MARKER_FILE}" + +echo "Done!" diff --git a/.devcontainer/library-scripts/docker-in-docker-debian.sh b/.devcontainer/library-scripts/docker-in-docker-debian.sh new file mode 100644 index 0000000..9a72253 --- /dev/null +++ b/.devcontainer/library-scripts/docker-in-docker-debian.sh @@ -0,0 +1,324 @@ +#!/usr/bin/env bash +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- +# +# Docs: https://github.com/microsoft/vscode-dev-containers/blob/main/script-library/docs/docker-in-docker.md +# Maintainer: The VS Code and Codespaces Teams +# +# Syntax: ./docker-in-docker-debian.sh [enable non-root docker access flag] [non-root user] [use moby] [Engine/CLI Version] + +ENABLE_NONROOT_DOCKER=${1:-"true"} +USERNAME=${2:-"automatic"} +USE_MOBY=${3:-"true"} +DOCKER_VERSION=${4:-"latest"} # The Docker/Moby Engine + CLI should match in version +MICROSOFT_GPG_KEYS_URI="https://packages.microsoft.com/keys/microsoft.asc" +DOCKER_DASH_COMPOSE_VERSION="1" + +set -e + +if [ "$(id -u)" -ne 0 ]; then + echo -e 'Script must be run as root. Use sudo, su, or add "USER root" to your Dockerfile before running this script.' + exit 1 +fi + +# Determine the appropriate non-root user +if [ "${USERNAME}" = "auto" ] || [ "${USERNAME}" = "automatic" ]; then + USERNAME="" + POSSIBLE_USERS=("vscode" "node" "codespace" "$(awk -v val=1000 -F ":" '$3==val{print $1}' /etc/passwd)") + for CURRENT_USER in ${POSSIBLE_USERS[@]}; do + if id -u ${CURRENT_USER} > /dev/null 2>&1; then + USERNAME=${CURRENT_USER} + break + fi + done + if [ "${USERNAME}" = "" ]; then + USERNAME=root + fi +elif [ "${USERNAME}" = "none" ] || ! id -u ${USERNAME} > /dev/null 2>&1; then + USERNAME=root +fi + +# Get central common setting +get_common_setting() { + if [ "${common_settings_file_loaded}" != "true" ]; then + curl -sfL "https://aka.ms/vscode-dev-containers/script-library/settings.env" 2>/dev/null -o /tmp/vsdc-settings.env || echo "Could not download settings file. Skipping." + common_settings_file_loaded=true + fi + if [ -f "/tmp/vsdc-settings.env" ]; then + local multi_line="" + if [ "$2" = "true" ]; then multi_line="-z"; fi + local result="$(grep ${multi_line} -oP "$1=\"?\K[^\"]+" /tmp/vsdc-settings.env | tr -d '\0')" + if [ ! -z "${result}" ]; then declare -g $1="${result}"; fi + fi + echo "$1=${!1}" +} + +# Function to run apt-get if needed +apt_get_update_if_needed() +{ + if [ ! -d "/var/lib/apt/lists" ] || [ "$(ls /var/lib/apt/lists/ | wc -l)" = "0" ]; then + echo "Running apt-get update..." + apt-get update + else + echo "Skipping apt-get update." + fi +} + +# Checks if packages are installed and installs them if not +check_packages() { + if ! dpkg -s "$@" > /dev/null 2>&1; then + apt_get_update_if_needed + apt-get -y install --no-install-recommends "$@" + fi +} + +# Figure out correct version of a three part version number is not passed +find_version_from_git_tags() { + local variable_name=$1 + local requested_version=${!variable_name} + if [ "${requested_version}" = "none" ]; then return; fi + local repository=$2 + local prefix=${3:-"tags/v"} + local separator=${4:-"."} + local last_part_optional=${5:-"false"} + if [ "$(echo "${requested_version}" | grep -o "." | wc -l)" != "2" ]; then + local escaped_separator=${separator//./\\.} + local last_part + if [ "${last_part_optional}" = "true" ]; then + last_part="(${escaped_separator}[0-9]+)?" + else + last_part="${escaped_separator}[0-9]+" + fi + local regex="${prefix}\\K[0-9]+${escaped_separator}[0-9]+${last_part}$" + local version_list="$(git ls-remote --tags ${repository} | grep -oP "${regex}" | tr -d ' ' | tr "${separator}" "." | sort -rV)" + if [ "${requested_version}" = "latest" ] || [ "${requested_version}" = "current" ] || [ "${requested_version}" = "lts" ]; then + declare -g ${variable_name}="$(echo "${version_list}" | head -n 1)" + else + set +e + declare -g ${variable_name}="$(echo "${version_list}" | grep -E -m 1 "^${requested_version//./\\.}([\\.\\s]|$)")" + set -e + fi + fi + if [ -z "${!variable_name}" ] || ! echo "${version_list}" | grep "^${!variable_name//./\\.}$" > /dev/null 2>&1; then + echo -e "Invalid ${variable_name} value: ${requested_version}\nValid values:\n${version_list}" >&2 + exit 1 + fi + echo "${variable_name}=${!variable_name}" +} + +# Ensure apt is in non-interactive to avoid prompts +export DEBIAN_FRONTEND=noninteractive + +# Install dependencies +check_packages apt-transport-https curl ca-certificates lxc pigz iptables gnupg2 dirmngr +if ! type git > /dev/null 2>&1; then + apt_get_update_if_needed + apt-get -y install git +fi + +# Swap to legacy iptables for compatibility +if type iptables-legacy > /dev/null 2>&1; then + update-alternatives --set iptables /usr/sbin/iptables-legacy + update-alternatives --set ip6tables /usr/sbin/ip6tables-legacy +fi + +# Source /etc/os-release to get OS info +. /etc/os-release +# Fetch host/container arch. +architecture="$(dpkg --print-architecture)" + +# Set up the necessary apt repos (either Microsoft's or Docker's) +if [ "${USE_MOBY}" = "true" ]; then + + # Name of open source engine/cli + engine_package_name="moby-engine" + cli_package_name="moby-cli" + + # Import key safely and import Microsoft apt repo + get_common_setting MICROSOFT_GPG_KEYS_URI + curl -sSL ${MICROSOFT_GPG_KEYS_URI} | gpg --dearmor > /usr/share/keyrings/microsoft-archive-keyring.gpg + echo "deb [arch=${architecture} signed-by=/usr/share/keyrings/microsoft-archive-keyring.gpg] https://packages.microsoft.com/repos/microsoft-${ID}-${VERSION_CODENAME}-prod ${VERSION_CODENAME} main" > /etc/apt/sources.list.d/microsoft.list +else + # Name of licensed engine/cli + engine_package_name="docker-ce" + cli_package_name="docker-ce-cli" + + # Import key safely and import Docker apt repo + curl -fsSL https://download.docker.com/linux/${ID}/gpg | gpg --dearmor > /usr/share/keyrings/docker-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/${ID} ${VERSION_CODENAME} stable" > /etc/apt/sources.list.d/docker.list +fi + +# Refresh apt lists +apt-get update + +# Soft version matching +if [ "${DOCKER_VERSION}" = "latest" ] || [ "${DOCKER_VERSION}" = "lts" ] || [ "${DOCKER_VERSION}" = "stable" ]; then + # Empty, meaning grab whatever "latest" is in apt repo + engine_version_suffix="" + cli_version_suffix="" +else + # Fetch a valid version from the apt-cache (eg: the Microsoft repo appends +azure, breakfix, etc...) + docker_version_dot_escaped="${DOCKER_VERSION//./\\.}" + docker_version_dot_plus_escaped="${docker_version_dot_escaped//+/\\+}" + # Regex needs to handle debian package version number format: https://www.systutorials.com/docs/linux/man/5-deb-version/ + docker_version_regex="^(.+:)?${docker_version_dot_plus_escaped}([\\.\\+ ~:-]|$)" + set +e # Don't exit if finding version fails - will handle gracefully + cli_version_suffix="=$(apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")" + engine_version_suffix="=$(apt-cache madison ${engine_package_name} | awk -F"|" '{print $2}' | sed -e 's/^[ \t]*//' | grep -E -m 1 "${docker_version_regex}")" + set -e + if [ -z "${engine_version_suffix}" ] || [ "${engine_version_suffix}" = "=" ] || [ -z "${cli_version_suffix}" ] || [ "${cli_version_suffix}" = "=" ] ; then + echo "(!) No full or partial Docker / Moby version match found for \"${DOCKER_VERSION}\" on OS ${ID} ${VERSION_CODENAME} (${architecture}). Available versions:" + apt-cache madison ${cli_package_name} | awk -F"|" '{print $2}' | grep -oP '^(.+:)?\K.+' + exit 1 + fi + echo "engine_version_suffix ${engine_version_suffix}" + echo "cli_version_suffix ${cli_version_suffix}" +fi + +# Install Docker / Moby CLI if not already installed +if type docker > /dev/null 2>&1 && type dockerd > /dev/null 2>&1; then + echo "Docker / Moby CLI and Engine already installed." +else + if [ "${USE_MOBY}" = "true" ]; then + apt-get -y install --no-install-recommends moby-cli${cli_version_suffix} moby-buildx moby-engine${engine_version_suffix} + apt-get -y install --no-install-recommends moby-compose || echo "(*) Package moby-compose (Docker Compose v2) not available for OS ${ID} ${VERSION_CODENAME} (${architecture}). Skipping." + else + apt-get -y install --no-install-recommends docker-ce-cli${cli_version_suffix} docker-ce${engine_version_suffix} + fi +fi + +echo "Finished installing docker / moby!" + +# Install Docker Compose if not already installed and is on a supported architecture +if type docker-compose > /dev/null 2>&1; then + echo "Docker Compose already installed." +else + target_compose_arch="${architecture}" + if [ "${target_compose_arch}" = "amd64" ]; then + target_compose_arch="x86_64" + fi + if [ "${target_compose_arch}" != "x86_64" ]; then + # Use pip to get a version that runns on this architecture + if ! dpkg -s python3-minimal python3-pip libffi-dev python3-venv > /dev/null 2>&1; then + apt_get_update_if_needed + apt-get -y install python3-minimal python3-pip libffi-dev python3-venv + fi + export PIPX_HOME=/usr/local/pipx + mkdir -p ${PIPX_HOME} + export PIPX_BIN_DIR=/usr/local/bin + export PYTHONUSERBASE=/tmp/pip-tmp + export PIP_CACHE_DIR=/tmp/pip-tmp/cache + pipx_bin=pipx + if ! type pipx > /dev/null 2>&1; then + pip3 install --disable-pip-version-check --no-warn-script-location --no-cache-dir --user pipx + pipx_bin=/tmp/pip-tmp/bin/pipx + fi + ${pipx_bin} install --system-site-packages --pip-args '--no-cache-dir --force-reinstall' docker-compose + rm -rf /tmp/pip-tmp + else + # Only supports docker-compose v1 + find_version_from_git_tags DOCKER_DASH_COMPOSE_VERSION "https://github.com/docker/compose" "tags/" + echo "(*) Installing docker-compose ${DOCKER_DASH_COMPOSE_VERSION}..." + curl -fsSL "https://github.com/docker/compose/releases/download/${DOCKER_DASH_COMPOSE_VERSION}/docker-compose-Linux-x86_64" -o /usr/local/bin/docker-compose + chmod +x /usr/local/bin/docker-compose + fi +fi + +# If init file already exists, exit +if [ -f "/usr/local/share/docker-init.sh" ]; then + echo "/usr/local/share/docker-init.sh already exists, so exiting." + exit 0 +fi +echo "docker-init doesnt exist, adding..." + +# Add user to the docker group +if [ "${ENABLE_NONROOT_DOCKER}" = "true" ]; then + if ! getent group docker > /dev/null 2>&1; then + groupadd docker + fi + + usermod -aG docker ${USERNAME} +fi + +tee /usr/local/share/docker-init.sh > /dev/null \ +<< 'EOF' +#!/bin/sh +#------------------------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See https://go.microsoft.com/fwlink/?linkid=2090316 for license information. +#------------------------------------------------------------------------------------------------------------- + +set -e + +dockerd_start="$(cat << 'INNEREOF' + # explicitly remove dockerd and containerd PID file to ensure that it can start properly if it was stopped uncleanly + # ie: docker kill + find /run /var/run -iname 'docker*.pid' -delete || : + find /run /var/run -iname 'container*.pid' -delete || : + + ## Dind wrapper script from docker team, adapted to a function + # Maintained: https://github.com/moby/moby/blob/master/hack/dind + + export container=docker + + if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then + mount -t securityfs none /sys/kernel/security || { + echo >&2 'Could not mount /sys/kernel/security.' + echo >&2 'AppArmor detection and --privileged mode might break.' + } + fi + + # Mount /tmp (conditionally) + if ! mountpoint -q /tmp; then + mount -t tmpfs none /tmp + fi + + # cgroup v2: enable nesting + if [ -f /sys/fs/cgroup/cgroup.controllers ]; then + # move the processes from the root group to the /init group, + # otherwise writing subtree_control fails with EBUSY. + # An error during moving non-existent process (i.e., "cat") is ignored. + mkdir -p /sys/fs/cgroup/init + xargs -rn1 < /sys/fs/cgroup/cgroup.procs > /sys/fs/cgroup/init/cgroup.procs || : + # enable controllers + sed -e 's/ / +/g' -e 's/^/+/' < /sys/fs/cgroup/cgroup.controllers \ + > /sys/fs/cgroup/cgroup.subtree_control + fi + ## Dind wrapper over. + + # Handle DNS + set +e + cat /etc/resolv.conf | grep -i 'internal.cloudapp.net' + if [ $? -eq 0 ] + then + echo "Setting dockerd Azure DNS." + CUSTOMDNS="--dns 168.63.129.16" + else + echo "Not setting dockerd DNS manually." + CUSTOMDNS="" + fi + set -e + + # Start docker/moby engine + ( dockerd $CUSTOMDNS > /tmp/dockerd.log 2>&1 ) & +INNEREOF +)" + +# Start using sudo if not invoked as root +if [ "$(id -u)" -ne 0 ]; then + sudo /bin/sh -c "${dockerd_start}" +else + eval "${dockerd_start}" +fi + +set +e + +# Execute whatever commands were passed in (if any). This allows us +# to set this script to ENTRYPOINT while still executing the default CMD. +exec "$@" +EOF + +chmod +x /usr/local/share/docker-init.sh +chown ${USERNAME}:root /usr/local/share/docker-init.sh diff --git a/Dockerfile b/Dockerfile index 0d36e56..c5b3ae3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,16 +1,17 @@ -FROM python:3.8-slim +FROM python:3.9-slim # Create working folder and install dependencies WORKDIR /app COPY requirements.txt . -RUN pip install --no-cache-dir -r requirements.txt +RUN pip install -U pip wheel && \ + pip install --no-cache-dir -r requirements.txt # Copy the application contents COPY service/ ./service/ # Switch to a non-root user -RUN useradd appuser && chown -R appuser /app -USER appuser +RUN useradd --uid 1000 vagrant && chown -R vagrant /app +USER vagrant # Expose any ports the app is expecting in the environment ENV FLASK_APP=service:app @@ -18,4 +19,5 @@ ENV PORT 8080 EXPOSE $PORT ENV GUNICORN_BIND 0.0.0.0:$PORT -CMD ["gunicorn", "--log-level=info", "service:app"] +ENTRYPOINT ["gunicorn"] +CMD ["--log-level=info", "service:app"] diff --git a/Vagrantfile b/Vagrantfile index b22b405..ae96625 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -5,17 +5,20 @@ # Kubernetes Minikube Environment ###################################################################### Vagrant.configure(2) do |config| - # config.vm.box = "bento/ubuntu-20.04" - config.vm.box = "ubuntu/focal64" - config.vm.hostname = "kubernetes" + # config.vm.box = "ubuntu/focal64" # config.vm.hostname = "ubuntu" + # config.vm.box = "debian/buster64" + # config.vm.box = "debian/bullseye64" + config.vm.box = "bento/ubuntu-21.04" + config.vm.hostname = "kubernetes" # config.vm.network "forwarded_port", guest: 80, host: 8080 + config.vm.network "forwarded_port", guest: 8090, host: 8090, host_ip: "127.0.0.1" config.vm.network "forwarded_port", guest: 8080, host: 8080, host_ip: "127.0.0.1" # Create a private network, which allows host-only access to the machine # using a specific IP. - config.vm.network "private_network", ip: "192.168.33.10" + config.vm.network "private_network", ip: "192.168.56.10" # Mac users can comment this next line out but # Windows users need to change the permission of files and directories @@ -29,8 +32,8 @@ Vagrant.configure(2) do |config| vb.memory = "4096" vb.cpus = 2 # Fixes some DNS issues on some networks - vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] - vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] + #vb.customize ["modifyvm", :id, "--natdnshostresolver1", "on"] + #vb.customize ["modifyvm", :id, "--natdnsproxy1", "on"] end ############################################################ @@ -38,7 +41,7 @@ Vagrant.configure(2) do |config| ############################################################ config.vm.provider :docker do |docker, override| override.vm.box = nil - docker.image = "rofrano/vagrant-provider:ubuntu" + docker.image = "rofrano/vagrant-provider:debian" docker.remains_running = true docker.has_ssh = true docker.privileged = true @@ -66,9 +69,9 @@ Vagrant.configure(2) do |config| config.vm.provision "file", source: "~/.vimrc", destination: "~/.vimrc" end - # Copy your IBM Clouid API Key if you have one - if File.exists?(File.expand_path("~/.bluemix/apiKey.json")) - config.vm.provision "file", source: "~/.bluemix/apiKey.json", destination: "~/.bluemix/apiKey.json" + # Copy your IBM Cloud API Key if you have one + if File.exists?(File.expand_path("~/.bluemix/apikey.json")) + config.vm.provision "file", source: "~/.bluemix/apikey.json", destination: "~/.bluemix/apikey.json" end ###################################################################### @@ -77,7 +80,7 @@ Vagrant.configure(2) do |config| config.vm.provision "shell", inline: <<-SHELL # Install Python 3 and dev tools apt-get update - apt-get install -y git vim tree wget jq build-essential python3-dev python3-pip python3-venv apt-transport-https + apt-get install -y git vim tree wget jq python3-dev python3-pip python3-venv apt-transport-https apt-get upgrade python3 # Create a Python3 Virtual Environment and Activate it in .profile @@ -85,12 +88,17 @@ Vagrant.configure(2) do |config| sudo -H -u vagrant sh -c 'echo ". ~/venv/bin/activate" >> ~/.profile' # Install app dependencies in virtual environment as vagrant user - sudo -H -u vagrant sh -c '. ~/venv/bin/activate && pip install -U pip && pip install wheel' - sudo -H -u vagrant sh -c '. ~/venv/bin/activate && pip install docker-compose' - sudo -H -u vagrant sh -c '. ~/venv/bin/activate && cd /vagrant && pip install -r requirements.txt' + sudo -H -u vagrant sh -c '. ~/venv/bin/activate && + cd /vagrant && + pip install -U pip wheel && + pip install docker-compose && + pip install -r requirements.txt' - # Check versions to prove that everything is installed - python3 --version + # Check versions to prove that everything is installed + python3 --version + + # Create .env file if it doesn't exist + sudo -H -u vagrant sh -c 'cd /vagrant && if [ ! -f .env ]; then cp dot-env-example .env; fi' SHELL ############################################################ @@ -98,92 +106,61 @@ Vagrant.configure(2) do |config| ############################################################ config.vm.provision "docker" do |d| d.pull_images "alpine" - d.pull_images "python:3.8-slim" + d.pull_images "python:3.9-slim" d.pull_images "redis:6-alpine" d.run "redis:6-alpine", args: "--restart=always -d --name redis -p 6379:6379 -v redis:/data" end - # ############################################################ - # # Install Kuberrnetes CLI - # ############################################################ - # config.vm.provision "shell", inline: <<-SHELL - # # Install kubectl - # curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(dpkg --print-architecture)/kubectl" - # install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl - # rm kubectl - # echo "alias kc='/usr/local/bin/kubectl'" >> /home/vagrant/.bash_aliases - # chown vagrant:vagrant /home/vagrant/.bash_aliases - # SHELL + ############################################################ + # Install Kubernetes CLI and Helm + ############################################################ + config.vm.provision "shell", inline: <<-SHELL + # Install kubectl + curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/$(dpkg --print-architecture)/kubectl" + install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl + rm kubectl + echo "alias kc='/usr/local/bin/kubectl'" >> /home/vagrant/.bash_aliases + chown vagrant:vagrant /home/vagrant/.bash_aliases + # Install helm + curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash + SHELL - # ############################################################ - # # Create a Kubernetes Cluster wiith K3D - # ############################################################ - # config.vm.provision "shell", inline: <<-SHELL - # # Install K3d - # curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash - # sudo -H -u vagrant sh -c "k3d registry create registry.localhost --port 50000" - # sudo -H -u vagrant sh -c "k3d cluster create mycluster --registry-use k3d-registry.localhost:50000 --agents 1 --port '8080:80@loadbalancer'" - # SHELL - ############################################################ - # Create a Kubernetes Cluster with MicroK8s + # Create a Kubernetes Cluster wiith K3D ############################################################ config.vm.provision "shell", inline: <<-SHELL - # install MicroK8s version of Kubernetes - sudo snap install microk8s --classic - sudo microk8s.enable dns - sudo microk8s.enable dashboard - sudo microk8s.enable ingress - sudo microk8s.enable registry - sudo usermod -a -G microk8s vagrant - sudo -H -u vagrant sh -c 'echo "alias kubectl=/snap/bin/microk8s.kubectl" >> ~/.bashrc' - /snap/bin/microk8s.kubectl version --short - - # # Create aliases for microk8s=mk and kubecl=kc - # echo "alias mk='/snap/bin/microk8s'" >> /home/vagrant/.bash_aliases - # #echo "alias kc='/snap/bin/kubectl'" >> /home/vagrant/.bash_aliases - # chown vagrant:vagrant /home/vagrant/.bash_aliases - # # Set up Kubernetes context - # sudo -H -u vagrant sh -c 'mkdir ~/.kube && microk8s.kubectl config view --raw > ~/.kube/config' - # kubectl version --short - # microk8s.config > /home/vagrant/.kube/config - # chown vagrant:vagrant /home/vagrant/.kube/config - # chmod 600 /home/vagrant/.kube/config - + # Install K3d + curl -s https://raw.githubusercontent.com/rancher/k3d/main/install.sh | bash + # echo "127.0.0.1 k3d-registry.localhost" >> /etc/hosts + # sudo -H -u vagrant sh -c "k3d registry create registry.localhost --port 50000" + # sudo -H -u vagrant sh -c "k3d cluster create devops --registry-use k3d-registry.localhost:50000 --agents 1 --port '8080:80@loadbalancer'" SHELL - # ###################################################################### - # # Setup an IBM Cloud and Kubernetes environment - # ###################################################################### - # config.vm.provision "shell", inline: <<-SHELL - # echo "\n************************************" - # echo " Installing IBM Cloud CLI..." - # echo "************************************\n" - # # Install IBM Cloud CLI as Vagrant user - # sudo -H -u vagrant sh -c 'curl -sL https://ibm.biz/idt-installer | bash' - # sudo -H -u vagrant sh -c 'ibmcloud config --usage-stats-collect false' - # sudo -H -u vagrant sh -c "echo 'source <(kubectl completion bash)' >> ~/.bashrc" - # sudo -H -u vagrant sh -c "echo alias ic=/usr/local/bin/ibmcloud >> ~/.bash_aliases" - # # Install OpenShift Client (optional) - # # mkdir ./openshift-client - # # cd openshift-client - # # wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux.tar.gz - # # tar xzf openshift-client-linux.tar.gz - # # cp kubectl /usr/local/bin - # # cp oc /usr/local/bin - # # cd .. - # # rmdir -fr ./openshift-client - # # - # # Install the IBM Cloud Native Toolkit - # # curl -sL shell.cloudnativetoolkit.dev | sh - && . ~/.bashrc - # echo "\n" - # echo "\n************************************" - # echo " For the Kubernetes Dashboard use:" - # echo " kubectl proxy --address='0.0.0.0'" - # echo "************************************\n" - # # Prove that plug-ins are installed as vagrant user - # sudo -H -u vagrant bash -c "bx plugin list" - # SHELL + ###################################################################### + # Setup a IBM Cloud and Kubernetes environment + ###################################################################### + config.vm.provision "shell", inline: <<-SHELL + echo "\n************************************" + echo " Installing IBM Cloud CLI..." + echo "************************************\n" + # Install IBM Cloud CLI as Vagrant user + sudo -H -u vagrant sh -c ' + curl -fsSL https://clis.cloud.ibm.com/install/linux | sh && \ + ibmcloud plugin install container-service && \ + ibmcloud plugin install container-registry && \ + echo "alias ic=ibmcloud" >> ~/.bashrc + ' + + # Show completion instructions + sudo -H -u vagrant sh -c "echo alias ic=/usr/local/bin/ibmcloud >> ~/.bash_aliases" + echo "\n************************************" + echo "If you have an IBM Cloud API key in ~/.bluemix/apiKey.json" + echo "You can login with the following command:" + echo "\n" + echo "ibmcloud login -a https://cloud.ibm.com --apikey @~/.bluemix/apikey.json -r us-south" + echo "ibmcloud ks cluster config --cluster " + echo "\n************************************" + SHELL end diff --git a/kube/deployment.yaml b/deploy/deployment.yaml similarity index 86% rename from kube/deployment.yaml rename to deploy/deployment.yaml index 3d92dcf..e0f1cc1 100644 --- a/kube/deployment.yaml +++ b/deploy/deployment.yaml @@ -17,9 +17,7 @@ spec: restartPolicy: Always containers: - name: hitcounter - image: localhost:32000/hitcounter:1.0 - # image: k3d-registry.localhost:50000/hitcounter:1.0 - # image: hitcounter:1.0 + image: cluster-registry:32000/hitcounter:1.0 imagePullPolicy: IfNotPresent ports: - containerPort: 8080 diff --git a/kube/ingress.yaml b/deploy/ingress.yaml similarity index 100% rename from kube/ingress.yaml rename to deploy/ingress.yaml diff --git a/deploy/nginx-ingress.yaml b/deploy/nginx-ingress.yaml new file mode 100644 index 0000000..a152e94 --- /dev/null +++ b/deploy/nginx-ingress.yaml @@ -0,0 +1,17 @@ +apiVersion: networking.k8s.io/v1 +kind: Ingress +metadata: + name: my-nginx + annotations: + nginx.ingress.kubernetes.io/rewrite-target: / +spec: + rules: + - http: + paths: + - path: / + pathType: Prefix + backend: + service: + name: my-nginx + port: + number: 80 diff --git a/kube/redis.yaml b/deploy/redis.yaml similarity index 100% rename from kube/redis.yaml rename to deploy/redis.yaml diff --git a/kube/secret.yaml b/deploy/secret.yaml similarity index 100% rename from kube/secret.yaml rename to deploy/secret.yaml diff --git a/kube/service.yaml b/deploy/service.yaml similarity index 100% rename from kube/service.yaml rename to deploy/service.yaml diff --git a/dot-env-example b/dot-env-example new file mode 100644 index 0000000..a7784ad --- /dev/null +++ b/dot-env-example @@ -0,0 +1,2 @@ +PORT=8080 +FLASK_APP=service:app \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index fde8193..e53fad7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,20 +1,14 @@ -# Lock down these dependencies -Werkzeug==1.0.1 - -# Developmeent -Flask==1.1.2 -Flask-API==1.1 +# Runtime dependencies +Flask==2.0.2 redis==3.5.3 -python-dotenv==0.15.0 - -# Runtime -gunicorn==20.0.4 -honcho==1.0.1 -httpie==1.0.3 +gunicorn==20.1.0 +honcho==1.1.0 +python-dotenv==0.19.2 -# Testing +# Testing dependencies nose==1.3.7 -pinocchio==0.4.2 -coverage==4.5.4 -codecov==2.0.15 -pylint>=2.4.1 +pinocchio==0.4.3 +coverage==6.1.2 +codecov==2.1.12 +httpie==2.6.0 +pylint==2.11.1 diff --git a/scripts/create-cluster.sh b/scripts/create-cluster.sh new file mode 100644 index 0000000..337c3dc --- /dev/null +++ b/scripts/create-cluster.sh @@ -0,0 +1,4 @@ +#!/bin/bash +echo "Creating Kubernetes cluster with a registry..." +k3d cluster create --registry-create cluster-registry:0.0.0.0:32000 --port '8080:80@loadbalancer' +echo "Complete." \ No newline at end of file diff --git a/scripts/delete-cluster.sh b/scripts/delete-cluster.sh new file mode 100644 index 0000000..49cc28b --- /dev/null +++ b/scripts/delete-cluster.sh @@ -0,0 +1,4 @@ +#!/bin/bash +echo "Deleting Kubernetes cluster..." +k3d cluster delete +echo "Complete." \ No newline at end of file diff --git a/service/__init__.py b/service/__init__.py index a40c633..65cdee8 100644 --- a/service/__init__.py +++ b/service/__init__.py @@ -29,7 +29,7 @@ app = Flask(__name__) # Import the routes After the Flask app is created -from service import routes, models +from service import routes, models, error_handlers # Set up logging for production app.logger.propagate = False @@ -43,4 +43,4 @@ app.logger.info(" H I T C O U N T E R S E R V I C E ".center(70, "*")) app.logger.info(70 * "*") -app.logger.info("Service inititalized!") +app.logger.info("Service initialized!") diff --git a/service/error_handlers.py b/service/error_handlers.py new file mode 100644 index 0000000..b6ae913 --- /dev/null +++ b/service/error_handlers.py @@ -0,0 +1,60 @@ +from flask import jsonify +from . import app, status + +###################################################################### +# Error Handlers +###################################################################### + +@app.errorhandler(status.HTTP_404_NOT_FOUND) +def not_found(error): + """ Handles resources not found with 404_NOT_FOUND """ + message = str(error) + app.logger.warning(message) + return ( + jsonify(status=status.HTTP_404_NOT_FOUND, error="Not Found", message=message), + status.HTTP_404_NOT_FOUND, + ) + + +@app.errorhandler(status.HTTP_405_METHOD_NOT_ALLOWED) +def method_not_supported(error): + """ Handles unsupported HTTP methods with 405_METHOD_NOT_SUPPORTED """ + message = str(error) + app.logger.warning(message) + return ( + jsonify( + status=status.HTTP_405_METHOD_NOT_ALLOWED, + error="Method not Allowed", + message=message, + ), + status.HTTP_405_METHOD_NOT_ALLOWED, + ) + + +@app.errorhandler(status.HTTP_500_INTERNAL_SERVER_ERROR) +def internal_server_error(error): + """ Handles unexpected server error with 500_SERVER_ERROR """ + message = str(error) + app.logger.error(message) + return ( + jsonify( + status=status.HTTP_500_INTERNAL_SERVER_ERROR, + error="Internal Server Error", + message=message, + ), + status.HTTP_500_INTERNAL_SERVER_ERROR, + ) + +@app.errorhandler(status.HTTP_503_SERVICE_UNAVAILABLE) +def service_unavailable(error): + """ Handles unexpected server error with 503_SERVICE_UNAVAILABLE """ + message = str(error) + app.logger.error(message) + return ( + jsonify( + status=status.HTTP_503_SERVICE_UNAVAILABLE, + error="Service is unavailable", + message=message, + ), + status.HTTP_503_SERVICE_UNAVAILABLE, + ) diff --git a/service/models.py b/service/models.py index 15fcb66..b30710f 100644 --- a/service/models.py +++ b/service/models.py @@ -81,19 +81,30 @@ def serialize(self): @classmethod def all(cls): """ Returns all of the counters """ - return [dict(name=key, counter=int(cls.redis.get(key))) for key in cls.redis.keys('*')] + try: + counters = [dict(name=key, counter=int(cls.redis.get(key))) for key in cls.redis.keys('*')] + except Exception as err: + raise DatabaseConnectionError(err) + return counters @classmethod def find(cls, name): """ Finds a counter with the name or returns None """ - count = cls.redis.get(name) - if count: - return Counter(name, count) - return None + counter = None + try: + count = cls.redis.get(name) + if count: + counter = Counter(name, count) + except Exception as err: + raise DatabaseConnectionError(err) + return counter @classmethod def remove_all(cls): - cls.redis.flushall() + try: + cls.redis.flushall() + except Exception as err: + raise DatabaseConnectionError(err) ###################################################################### # R E D I S D A T A B A S E C O N N E C T I O N M E T H O D S diff --git a/service/routes.py b/service/routes.py index c21b860..49eee3c 100644 --- a/service/routes.py +++ b/service/routes.py @@ -1,4 +1,4 @@ -# Copyright 2015 IBM Corp. All Rights Reserved. +# Copyright 2015, 2021 John J. Rofrano All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -16,82 +16,12 @@ """ import os from flask import jsonify, json, abort, request, url_for -from flask_api import status # HTTP Status Codes -from . import app +from . import app, status # HTTP Status Codes from service import DATABASE_URI from .models import Counter, DatabaseConnectionError DEBUG = os.getenv("DEBUG", "False") == "True" -PORT = os.getenv("PORT", "5000") - -###################################################################### -# E R R O R H A N D L E R S -###################################################################### -@app.errorhandler(status.HTTP_503_SERVICE_UNAVAILABLE) -def service_unavailable(error): - """ Handles unexpected server error with 503_SERVICE_UNAVAILABLE """ - message = str(error) - app.logger.error(message) - return ( - jsonify( - status=status.HTTP_503_SERVICE_UNAVAILABLE, - error="Service is unavailable", - message=message, - ), - status.HTTP_503_SERVICE_UNAVAILABLE, - ) - - -@app.errorhandler(status.HTTP_400_BAD_REQUEST) -def bad_request(error): - """ Handles bad reuests with 400_BAD_REQUEST """ - app.logger.warning(str(error)) - return ( - jsonify( - status=status.HTTP_400_BAD_REQUEST, error="Bad Request", message=str(error) - ), - status.HTTP_400_BAD_REQUEST, - ) - - -@app.errorhandler(status.HTTP_404_NOT_FOUND) -def not_found(error): - """ Handles resources not found with 404_NOT_FOUND """ - app.logger.warning(str(error)) - return ( - jsonify( - status=status.HTTP_404_NOT_FOUND, error="Not Found", message=str(error) - ), - status.HTTP_404_NOT_FOUND, - ) - - -@app.errorhandler(status.HTTP_405_METHOD_NOT_ALLOWED) -def method_not_supported(error): - """ Handles unsuppoted HTTP methods with 405_METHOD_NOT_SUPPORTED """ - app.logger.warning(str(error)) - return ( - jsonify( - status=status.HTTP_405_METHOD_NOT_ALLOWED, - error="Method not Allowed", - message=str(error), - ), - status.HTTP_405_METHOD_NOT_ALLOWED, - ) - - -@app.errorhandler(status.HTTP_500_INTERNAL_SERVER_ERROR) -def internal_server_error(error): - """ Handles unexpected server error with 500_SERVER_ERROR """ - app.logger.error(str(error)) - return ( - jsonify( - status=status.HTTP_500_INTERNAL_SERVER_ERROR, - error="Internal Server Error", - message=str(error), - ), - status.HTTP_500_INTERNAL_SERVER_ERROR, - ) +PORT = os.getenv("PORT", "8080") ############################################################ # Health Endpoint @@ -116,7 +46,11 @@ def index(): @app.route("/counters", methods=["GET"]) def list_counters(): app.logger.info("Request to list all counters...") - counters = Counter.all() + try: + counters = Counter.all() + except DatabaseConnectionError as err: + abort(status.HTTP_503_SERVICE_UNAVAILABLE, err) + return jsonify(counters) @@ -129,7 +63,7 @@ def read_counters(name): try: counter = Counter.find(name) - except Exception as err: + except DatabaseConnectionError as err: abort(status.HTTP_503_SERVICE_UNAVAILABLE, err) if not counter: @@ -145,13 +79,13 @@ def read_counters(name): @app.route("/counters/", methods=["POST"]) def create_counters(name): app.logger.info("Request to Create counter...") - counter = Counter.find(name) - if counter is not None: - return jsonify(code=409, error="Counter already exists"), 409 - try: + counter = Counter.find(name) + if counter is not None: + return jsonify(code=409, error="Counter already exists"), 409 + counter = Counter(name) - except Exception as err: + except DatabaseConnectionError as err: abort(status.HTTP_503_SERVICE_UNAVAILABLE, err) location_url = url_for('read_counters', name=name, _external=True) @@ -164,13 +98,13 @@ def create_counters(name): @app.route("/counters/", methods=["PUT"]) def update_counters(name): app.logger.info("Request to Update counter...") - counter = Counter.find(name) - if counter is None: - return jsonify(code=404, error="Counter {} does not exist".format(name)), 404 - try: + counter = Counter.find(name) + if counter is None: + return jsonify(code=404, error="Counter {} does not exist".format(name)), 404 + count = counter.increment() - except Exception as err: + except DatabaseConnectionError as err: abort(status.HTTP_503_SERVICE_UNAVAILABLE, err) return jsonify(name=name, counter=count) @@ -182,12 +116,11 @@ def update_counters(name): @app.route("/counters/", methods=["DELETE"]) def delete_counters(name): app.logger.info("Request to Delete counter...") - counter = Counter.find(name) - try: + counter = Counter.find(name) if counter: del counter.value - except Exception as err: + except DatabaseConnectionError as err: abort(status.HTTP_503_SERVICE_UNAVAILABLE, err) return "", status.HTTP_204_NO_CONTENT @@ -203,5 +136,5 @@ def init_db(): app.logger.info("Initializing the Redis database") Counter.connect(DATABASE_URI) app.logger.info("Connected!") - except Exception as err: + except DatabaseConnectionError as err: app.logger.error(str(err)) diff --git a/service/status.py b/service/status.py new file mode 100644 index 0000000..d3d655e --- /dev/null +++ b/service/status.py @@ -0,0 +1,62 @@ +# coding: utf8 +""" +Descriptive HTTP status codes, for code readability. +See RFC 2616 and RFC 6585. +RFC 2616: http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html +RFC 6585: http://tools.ietf.org/html/rfc6585 +""" + +# Informational - 1xx +HTTP_100_CONTINUE = 100 +HTTP_101_SWITCHING_PROTOCOLS = 101 + +# Successful - 2xx +HTTP_200_OK = 200 +HTTP_201_CREATED = 201 +HTTP_202_ACCEPTED = 202 +HTTP_203_NON_AUTHORITATIVE_INFORMATION = 203 +HTTP_204_NO_CONTENT = 204 +HTTP_205_RESET_CONTENT = 205 +HTTP_206_PARTIAL_CONTENT = 206 + +# Redirection - 3xx +HTTP_300_MULTIPLE_CHOICES = 300 +HTTP_301_MOVED_PERMANENTLY = 301 +HTTP_302_FOUND = 302 +HTTP_303_SEE_OTHER = 303 +HTTP_304_NOT_MODIFIED = 304 +HTTP_305_USE_PROXY = 305 +HTTP_306_RESERVED = 306 +HTTP_307_TEMPORARY_REDIRECT = 307 + +# Client Error - 4xx +HTTP_400_BAD_REQUEST = 400 +HTTP_401_UNAUTHORIZED = 401 +HTTP_402_PAYMENT_REQUIRED = 402 +HTTP_403_FORBIDDEN = 403 +HTTP_404_NOT_FOUND = 404 +HTTP_405_METHOD_NOT_ALLOWED = 405 +HTTP_406_NOT_ACCEPTABLE = 406 +HTTP_407_PROXY_AUTHENTICATION_REQUIRED = 407 +HTTP_408_REQUEST_TIMEOUT = 408 +HTTP_409_CONFLICT = 409 +HTTP_410_GONE = 410 +HTTP_411_LENGTH_REQUIRED = 411 +HTTP_412_PRECONDITION_FAILED = 412 +HTTP_413_REQUEST_ENTITY_TOO_LARGE = 413 +HTTP_414_REQUEST_URI_TOO_LONG = 414 +HTTP_415_UNSUPPORTED_MEDIA_TYPE = 415 +HTTP_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416 +HTTP_417_EXPECTATION_FAILED = 417 +HTTP_428_PRECONDITION_REQUIRED = 428 +HTTP_429_TOO_MANY_REQUESTS = 429 +HTTP_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431 + +# Server Error - 5xx +HTTP_500_INTERNAL_SERVER_ERROR = 500 +HTTP_501_NOT_IMPLEMENTED = 501 +HTTP_502_BAD_GATEWAY = 502 +HTTP_503_SERVICE_UNAVAILABLE = 503 +HTTP_504_GATEWAY_TIMEOUT = 504 +HTTP_505_HTTP_VERSION_NOT_SUPPORTED = 505 +HTTP_511_NETWORK_AUTHENTICATION_REQUIRED = 511 diff --git a/tests/test_model.py b/tests/test_models.py similarity index 94% rename from tests/test_model.py rename to tests/test_models.py index a3869e8..cd962ce 100644 --- a/tests/test_model.py +++ b/tests/test_models.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2016, 2020 John J. Rofrano. All Rights Reserved. +# Copyright 2016, 2021 John J. Rofrano. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -139,12 +139,6 @@ def test_no_connection(self, ping_mock): ping_mock.side_effect = ConnectionError() self.assertRaises(DatabaseConnectionError, self.counter.connect, DATABASE_URI) - @patch.dict(os.environ, {"DATABASE_URI": "redis://:@localhost:6379/0"}) - def test_environment_uri(self): - """ Get DATABASE_URI from environment """ - self.counter.connect() - self.assertTrue(Counter.test_connection) - @patch.dict(os.environ, {"DATABASE_URI": ""}) def test_missing_environment_creds(self): """ Missing environment credentials """ diff --git a/tests/test_service.py b/tests/test_routes.py similarity index 83% rename from tests/test_service.py rename to tests/test_routes.py index aaad9de..d4398bd 100644 --- a/tests/test_service.py +++ b/tests/test_routes.py @@ -24,9 +24,8 @@ import logging from unittest import TestCase from unittest.mock import patch -from flask_api import status # HTTP Status Codes from service import app, DATABASE_URI -from service.models import Counter +from service.models import Counter, DatabaseConnectionError DATABASE_URI = os.getenv("DATABASE_URI", "redis://:@localhost:6379/0") @@ -148,11 +147,11 @@ def test_method_not_allowed(self): # T E S T E R R O R H A N D L E R S ###################################################################### - @patch("service.routes.Counter.find") - def test_failed_get_request(self, value_mock): + @patch("service.routes.Counter.redis.get") + def test_failed_get_request(self, redis_mock): """ Error handlers for failed GET """ - value_mock.return_value = 0 - value_mock.side_effect = Exception() + redis_mock.return_value = 0 + redis_mock.side_effect = DatabaseConnectionError() resp = self.app.get("/counters/foo") self.assertEqual(resp.status_code, 503) @@ -160,7 +159,7 @@ def test_failed_get_request(self, value_mock): def test_failed_update_request(self, value_mock): """ Error handlers for failed UPDATE """ value_mock.return_value = 0 - value_mock.side_effect = Exception() + value_mock.side_effect = DatabaseConnectionError() self.test_create_counter() resp = self.app.put("/counters/foo") self.assertEqual(resp.status_code, 503) @@ -169,7 +168,23 @@ def test_failed_update_request(self, value_mock): def test_failed_post_request(self, value_mock): """ Error handlers for failed POST """ value_mock.return_value = 0 - value_mock.side_effect = Exception() + value_mock.side_effect = DatabaseConnectionError() resp = self.app.post("/counters/foo") self.assertEqual(resp.status_code, 503) + @patch("service.routes.Counter.redis.keys") + def test_failed_list_request(self, redis_mock): + """ Error handlers for failed LIST """ + redis_mock.return_value = 0 + redis_mock.side_effect = Exception() + resp = self.app.get("/counters") + self.assertEqual(resp.status_code, 503) + + def test_failed_delete_request(self): + """ Error handlers for failed DELETE """ + self.test_create_counter() + with patch("service.routes.Counter.redis.get") as redis_mock: + redis_mock.return_value = 0 + redis_mock.side_effect = DatabaseConnectionError() + resp = self.app.delete("/counters/foo") + self.assertEqual(resp.status_code, 503)