From d2f7f58ebafac06fcaa492108620dee77ee1f602 Mon Sep 17 00:00:00 2001 From: Roman Dodin Date: Wed, 27 Nov 2024 23:09:39 +0100 Subject: [PATCH] New macOS documentation (#2315) * added clab icon to the website * init macos-arm docs * add multiarch build * install clab via curl script to allow multi arch downloads * checking plugins installation * remove atuin install * use docker build push action * set dockerfile path * use devcontainer step * add default value for the clab_version input * not using moby for d-in-d due to #60 75.51 (!) Packages for moby not available in OS debian bookworm (arm64). To resolve, either: (1) set feature option '"moby": false' , or (2) choose a compatible OS version (eg: 'ubuntu-20.04'). * update skopeo * use packaged skopeo * install go via dockerfile and not devcontainer features as this was super slow * try tar without sudo as sudo failed in gh actions * install docker as not vscode user * bring back devcontainer feature for go dockerfile install is not faster * added go extension * added macos docs and changed the macros * fix links --- .devcontainer/Dockerfile | 15 +- .devcontainer/devcontainer.json | 5 +- .devcontainer/zsh/.zshrc | 3 + .devcontainer/zsh/install-zsh-plugins.sh | 5 +- .github/workflows/build-devcontainer.yml | 19 ++ docs/images/containerlab_dark_no_text.svg | 8 + .../containerlab_export_white_ink_js.svg | 2 +- .../containerlab_full_white_no_text.svg | 8 + docs/images/containerlab_white_no_text.svg | 8 + docs/index.md | 2 +- docs/install.md | 272 +----------------- docs/macos.md | 198 +++++++++++++ docs/manual/kinds/cisco_iol.md | 36 +-- docs/manual/kinds/fortinet_fortigate.md | 22 +- docs/manual/kinds/freebsd.md | 8 +- docs/manual/kinds/huawei_vrp.md | 28 +- docs/manual/kinds/openbsd.md | 8 +- docs/manual/kinds/srl.md | 10 + docs/manual/kinds/vr-aoscx.md | 6 +- docs/manual/kinds/vr-c8000v.md | 8 +- docs/manual/kinds/vr-cat9kv.md | 20 +- docs/manual/kinds/vr-csr.md | 8 +- docs/manual/kinds/vr-ftdv.md | 6 +- docs/manual/kinds/vr-n9kv.md | 28 +- docs/manual/kinds/vr-pan.md | 8 +- docs/manual/kinds/vr-ros.md | 8 +- docs/manual/kinds/vr-sros.md | 8 +- docs/manual/kinds/vr-veos.md | 8 +- docs/manual/kinds/vr-vjunosevolved.md | 8 +- docs/manual/kinds/vr-vjunosrouter.md | 10 +- docs/manual/kinds/vr-vjunosswitch.md | 8 +- docs/manual/kinds/vr-vmx.md | 8 +- docs/manual/kinds/vr-vqfx.md | 8 +- docs/manual/kinds/vr-vsrx.md | 8 +- docs/manual/kinds/vr-xrv9k.md | 8 +- docs/manual/topo-def-file.md | 2 +- docs/manual/vrnetlab.md | 2 +- docs/overrides/.icons/clab/icon.svg | 8 + docs/rn/0.46.md | 2 +- docs/stylesheets/extra.css | 189 +++++++++++- macros/main.py | 61 ++++ mkdocs.yml | 14 +- 42 files changed, 690 insertions(+), 411 deletions(-) create mode 100644 docs/images/containerlab_dark_no_text.svg create mode 100644 docs/images/containerlab_full_white_no_text.svg create mode 100644 docs/images/containerlab_white_no_text.svg create mode 100644 docs/macos.md create mode 100644 docs/overrides/.icons/clab/icon.svg create mode 100644 macros/main.py diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index b07979927..a75c539ce 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -8,7 +8,6 @@ RUN echo "deb [trusted=yes] https://netdevops.fury.site/apt/ /" | \ # Install necessary packages, including curl RUN apt-get update && apt-get install -y --no-install-recommends \ - containerlab${CLAB_VERSION:+=$CLAB_VERSION} \ direnv \ btop \ iputils-ping \ @@ -19,15 +18,19 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ telnet \ curl +# Install Containerlab +RUN bash -c "$(curl -sL https://get.containerlab.dev)" -- -v ${CLAB_VERSION} + # Install GitHub CLI directly from the latest release -RUN bash -c 'VERSION=$(curl -s https://api.github.com/repos/cli/cli/releases/latest | \ +RUN bash -c 'ARCH=$(uname -m | sed "s/x86_64/amd64/" | sed "s/aarch64/arm64/") && \ + VERSION=$(curl -s https://api.github.com/repos/cli/cli/releases/latest | \ grep -oP "\"tag_name\": \"\K[^\"]+") && \ CLEAN_VERSION=${VERSION#v} && \ - DOWNLOAD_URL="https://github.com/cli/cli/releases/download/${VERSION}/gh_${CLEAN_VERSION}_linux_amd64.tar.gz" && \ + DOWNLOAD_URL="https://github.com/cli/cli/releases/download/${VERSION}/gh_${CLEAN_VERSION}_linux_${ARCH}.tar.gz" && \ curl -L "$DOWNLOAD_URL" | tar xz -C /tmp && \ - mv /tmp/gh_${CLEAN_VERSION}_linux_amd64/bin/gh /usr/local/bin/ && \ + mv /tmp/gh_${CLEAN_VERSION}_linux_${ARCH}/bin/gh /usr/local/bin/ && \ chmod +x /usr/local/bin/gh && \ - rm -rf /tmp/gh_${CLEAN_VERSION}_linux_amd64' + rm -rf /tmp/gh_${CLEAN_VERSION}_linux_${ARCH}' # Install gNMIc and gNOIc RUN bash -c "$(curl -sL https://get-gnmic.openconfig.net)" && \ @@ -61,7 +64,7 @@ COPY ./.devcontainer/zsh/install-zsh-plugins.sh /tmp/install-zsh-plugins.sh RUN bash -c "/tmp/install-zsh-plugins.sh" # Setup pyenv virtual environment for clab tests -COPY ./tests/requirements.txt /tmp/requirements.txt +COPY tests/requirements.txt /tmp/requirements.txt ENV PYENV_ROOT="/home/vscode/.pyenv" ENV PATH="$PYENV_ROOT/shims:$PYENV_ROOT/bin:$PATH" RUN pyenv virtualenv system clab-rf \ diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index 15fdceed2..b9b11f929 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -10,7 +10,8 @@ "features": { "ghcr.io/devcontainers/features/docker-in-docker:2": { "version": "26.1.4", - "dockerDashComposeVersion": "none" + "dockerDashComposeVersion": "none", + "moby": "false" }, // Add sshd to support gh cli codespace cp. "ghcr.io/devcontainers/features/sshd:1": { @@ -24,6 +25,8 @@ "customizations": { "vscode": { "extensions": [ + // go + "golang.go", "mhutchie.git-graph", "ms-azuretools.vscode-docker", // Python. diff --git a/.devcontainer/zsh/.zshrc b/.devcontainer/zsh/.zshrc index 0495a12ed..85a4d7dbb 100644 --- a/.devcontainer/zsh/.zshrc +++ b/.devcontainer/zsh/.zshrc @@ -129,3 +129,6 @@ eval "$(atuin init zsh)" export PYENV_ROOT="$HOME/.pyenv" [[ -d $PYENV_ROOT/bin ]] && export PATH="$PYENV_ROOT/bin:$PATH" eval "$(pyenv init -)" + +# go path +export PATH=$PATH:/usr/local/go/bin:~/go/bin \ No newline at end of file diff --git a/.devcontainer/zsh/install-zsh-plugins.sh b/.devcontainer/zsh/install-zsh-plugins.sh index a8632b231..5181193a5 100755 --- a/.devcontainer/zsh/install-zsh-plugins.sh +++ b/.devcontainer/zsh/install-zsh-plugins.sh @@ -1,6 +1,7 @@ +#!/usr/bin/env bash # atuin # bash <(curl --proto '=https' --tlsv1.2 -sSf https://setup.atuin.sh) -curl --proto '=https' --tlsv1.2 -LsSf https://github.com/atuinsh/atuin/releases/download/v18.3.0/atuin-installer.sh | sh +curl -LsSf https://github.com/atuinsh/atuin/releases/download/v18.3.0/atuin-installer.sh | sh # theme git clone --depth 1 https://github.com/romkatv/powerlevel10k.git ${ZSH_CUSTOM:-$HOME/.oh-my-zsh/custom}/themes/powerlevel10k @@ -16,7 +17,7 @@ git clone --depth 1 https://github.com/z-shell/F-Sy-H.git ${ZSH_CUSTOM:-$HOME/.o ### Shell completions ### # generate containerlab completions -containerlab completion zsh > "/home/vscode/.oh-my-zsh/custom/plugins/zsh-autocomplete/Completions/_containerlab" +/usr/bin/containerlab completion zsh > "/home/vscode/.oh-my-zsh/custom/plugins/zsh-autocomplete/Completions/_containerlab" # add clab alias to the completions sed -i 's/compdef _containerlab containerlab/compdef _containerlab containerlab clab/g' /home/vscode/.oh-my-zsh/custom/plugins/zsh-autocomplete/Completions/_containerlab # generate gnmic completions diff --git a/.github/workflows/build-devcontainer.yml b/.github/workflows/build-devcontainer.yml index 71eecf0a8..2b0f43bc6 100644 --- a/.github/workflows/build-devcontainer.yml +++ b/.github/workflows/build-devcontainer.yml @@ -5,11 +5,14 @@ on: inputs: CLAB_VERSION: description: "Containerlab version" + default: "0.59.0" required: false + type: string TAG: description: "Additional container image tag" required: false + type: string # trigger on published release event # to created the devspace container when the packages are published release: @@ -36,6 +39,21 @@ jobs: username: ${{ github.actor }} password: ${{ secrets.GITHUB_TOKEN }} + # https://github.com/devcontainers/ci/issues/268#issuecomment-1976014578 + - name: remove existing skopeo + run: | + sudo rm -rf $(which skopeo) + + - name: Update skopeo + run: | + REPO_URL="https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/xUbuntu_22.04" + sudo sh -c "echo 'deb ${REPO_URL}/ /' > /etc/apt/sources.list.d/skopeo.list" + curl -fsSL ${REPO_URL}/Release.key | gpg --dearmor | sudo tee /etc/apt/trusted.gpg.d/skopeo.gpg > /dev/null + + sudo apt update + sudo apt install skopeo + skopeo --version + - name: Extract Docker metadata id: meta uses: docker/metadata-action@v5 @@ -65,3 +83,4 @@ jobs: imageName: ghcr.io/${{ github.repository }}/clab-devcontainer imageTag: ${{ steps.extract-tags.outputs.tags }} push: always + platform: linux/amd64,linux/arm64 diff --git a/docs/images/containerlab_dark_no_text.svg b/docs/images/containerlab_dark_no_text.svg new file mode 100644 index 000000000..b4536bfc9 --- /dev/null +++ b/docs/images/containerlab_dark_no_text.svg @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/docs/images/containerlab_export_white_ink_js.svg b/docs/images/containerlab_export_white_ink_js.svg index 4179f51f3..d361640bf 100644 --- a/docs/images/containerlab_export_white_ink_js.svg +++ b/docs/images/containerlab_export_white_ink_js.svg @@ -1,2 +1,2 @@ - \ No newline at end of file diff --git a/docs/images/containerlab_full_white_no_text.svg b/docs/images/containerlab_full_white_no_text.svg new file mode 100644 index 000000000..48479fe6a --- /dev/null +++ b/docs/images/containerlab_full_white_no_text.svg @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/docs/images/containerlab_white_no_text.svg b/docs/images/containerlab_white_no_text.svg new file mode 100644 index 000000000..76c6be156 --- /dev/null +++ b/docs/images/containerlab_white_no_text.svg @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index 9177a8556..58441da12 100644 --- a/docs/index.md +++ b/docs/index.md @@ -73,7 +73,7 @@ This short clip briefly demonstrates containerlab features and explains its purp ## Features -* **IaaC approach** +* **Lab as Code (IaC) approach** Declarative way of defining the labs by means of the topology definition [`clab` files](manual/topo-def-file.md). * **Network Operating Systems centric** Focus on containerized Network Operating Systems. The sophisticated startup requirements of various NOS containers are abstracted with [kinds](manual/kinds/index.md) which allows the user to focus on the use cases, rather than infrastructure hurdles. diff --git a/docs/install.md b/docs/install.md index 931229e33..caee1c9a1 100644 --- a/docs/install.md +++ b/docs/install.md @@ -2,14 +2,17 @@ hide: - navigation --- -Containerlab is distributed as a Linux deb/rpm package and can be installed on any Debian- or RHEL-like distributive in a matter of a few seconds. + +# Installation + +Containerlab is distributed as a Linux deb/rpm/apk package for amd64 and arm64 architectures and can be installed on any Debian- or RHEL-like distributive in a matter of a few seconds. ## Pre-requisites The following requirements must be satisfied to let containerlab tool run successfully: * A user should have `sudo` privileges to run containerlab. -* A Linux server/VM[^2] and [Docker](https://docs.docker.com/engine/install/) installed. +* A Linux server/VM[^1] and [Docker](https://docs.docker.com/engine/install/) installed. * Load container images (e.g. Nokia SR Linux, Arista cEOS) that are not downloadable from a container registry. Containerlab will try to pull images at runtime if they do not exist locally. ## Quick setup @@ -18,11 +21,11 @@ The easiest way to get started with containerlab is to use the [quick setup scri * docker (docker-ce), docker compose * Containerlab (using the package repository) -* [`gh` CLI tool](https://cli.github.com/) +* [`gh`](https://cli.github.com/) CLI tool -The script officially supports the following OSes: +The script has been tested on the following OSes: -* Ubuntu 20.04, 22.04, 23.10 +* Ubuntu 20.04, 22.04, 23.10, 24.04 * Debian 11, 12 * Red Hat Enterprise Linux 9 * CentOS Stream 9 @@ -243,7 +246,7 @@ mv /etc/containerlab/containerlab /usr/bin && chmod a+x /usr/bin/containerlab ## Windows Subsystem Linux (WSL) -Containerlab [runs](https://twitter.com/ntdvps/status/1380915270328401922) on WSL, but you need to [install docker-ce](https://docs.docker.com/engine/install/) inside the WSL2 linux system instead of using Docker Desktop[^3]. +Containerlab [runs](https://twitter.com/ntdvps/status/1380915270328401922) on WSL, but you need to [install docker-ce](https://docs.docker.com/engine/install/) inside the WSL2 linux system instead of using Docker Desktop[^2]. If you are running Ubuntu/Debian as your WSL2 machine, you can use the [quick setup this script](https://github.com/srl-labs/containerlab/blob/main/utils/quick-setup.sh) to install docker-ce. @@ -259,251 +262,13 @@ In Windows 11 with WSL2 it is now possible to [enable KVM support](https://serve ## Apple macOS -Running containerlab on macOS is possible both on ARM (M1/M2) and Intel chipsets with certain limitations and caveats rooted in different architectures and underlying OS. - -### ARM - -At the moment of this writing, there are not a lot[^6] of Network OSes built for arm64 architecture. This fact alone makes it not practical to run containerlab natively on ARM-based Macs. Nevertheless, it is technically possible to run containerlab on ARM-based Macs by launching a Linux VM with x86_64 architecture and running containerlab inside this VM. This approach comes with a hefty performance penalty, therefore it is suitable only for tiny labs. - -#### UTM - -The easiest way to start a Linux VM with x86_64 architecture on macOS is to use [UTM](https://mac.getutm.app/). UTM is a free[^7] and open-source graphical virtual machine manager that provides a simple and intuitive interface for creating, managing, and running virtual machines with qemu. - -When you have UTM installed, you can download a pre-built Debian 12 UTM image built by the Containerlab team using the following command[^8]: - -```bash -sudo docker run --rm -v $(pwd):/workspace ghcr.io/oras-project/oras:v1.1.0 pull \ - ghcr.io/srl-labs/containerlab/clab-utm-box:0.1.0 -``` - -By running this command you will download the `clab_debian12.utm` file which is a UTM image with `containerlab`, `docker-ce` and `gh` tools pre-installed[^9]. - -Open the downloaded image with UTM **File -> Open -> select .utm file** and start the VM. - -Once the VM is started, you can log in using `debian:debian` credentials. Run `ip -4 addr` in the terminal to find out which IP got assigned to this VM. -Now you can use this IP for your Mac terminal to connect to the VM via SSH[^10]. - -When logged in, you can upgrade the containerlab to the latest version with: - -```bash -sudo clab version upgrade -``` - -and start downloading the labs you want to run. - -#### Docker in Docker - -Another option to run containerlab on ARM-based Macs is to use Docker in Docker approach. With this approach, a docker-in-docker container is launched on the macOS inside the VM providing a docker environment. This setup also works on other operating systems where Docker is available. Below is a step-by-step guide on how to set it up. - -//// details | "Docker in docker guide" -We'll provide an example of a custom [devcontainer](https://code.visualstudio.com/docs/devcontainers/containers) that can be opened in [VSCode](https://code.visualstudio.com) with [Remote Development extension pack](https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.vscode-remote-extensionpack) installed. - -Create `.devcontainer` directory in the root of the Containerlab repository with the following content: - -```text -.devcontainer -|- devcontainer.json -|- Dockerfile -``` - -/// tab | Dockerfile - -```Dockerfile -# The devcontainer will be based on debian bullseye -# The base container already has entrypoint, vscode user account, etc. out of the box -FROM mcr.microsoft.com/vscode/devcontainers/base:bullseye - -# containelab version will be set in devcontainer.json -ARG _CLAB_VERSION - -# Set permissions for mounts in devcontainer.json -RUN mkdir -p /home/vscode/.vscode-server/bin -RUN chown -R vscode:vscode /home/vscode/.vscode-server - -# install some basic tools inside the container -# adjust this list based on your demands -RUN apt-get update && \ - apt-get upgrade -y && \ - apt-get install -y --no-install-recommends \ - sshpass \ - curl \ - iputils-ping \ - htop \ - yamllint \ - && rm -rf /var/lib/apt/lists/* \ - && rm -Rf /usr/share/doc && rm -Rf /usr/share/man \ - && apt-get clean - -# install preferred version of the containerlab -RUN bash -c "$(curl -sL https://get.containerlab.dev)" -- -v ${_CLAB_VERSION} -``` - -/// - -/// tab | devcontainer.json - -```json -// For format details, see https://aka.ms/devcontainer.json. For config options, see the -// README at: https://github.com/devcontainers/templates/tree/main/src/python -{ - "name": "clab-dev-container", - "build": { - "dockerfile": "Dockerfile", - "args": { - "_CLAB_VERSION": "0.47.2" - } - }, - "features": { - // Containerlab will run in a docker-in-docker container - // it is also possible to use docker-outside-docker feature - "ghcr.io/devcontainers/features/docker-in-docker:latest": { - "version": "latest" - } - // You can add other features from this list: https://github.com/orgs/devcontainers/packages?repo_name=features - // For example: - //"ghcr.io/devcontainers/features/go:latest": { - // "version": "1.21" - //} - - }, - // add any required extensions that must be pre-installed in the devcontainer - "customizations": { - "vscode": { - "extensions": [ - // various tools - "ms-azuretools.vscode-docker", - "tuxtina.json2yaml", - "vscode-icons-team.vscode-icons", - "mutantdino.resourcemonitor" - ] - } - }, - // This adds persistent mounts, so some configuration like docker credentials are saved for the vscode user and root (for sudo). - // Furthermore, your bash history and other configurations you made in your container users 'vscode' home are saved. - // .vscode-server is an anonymous volume. Gets destroyed on rebuild, which allows vscode to reinstall the extensions and dotfiles. - "mounts": [ - "source=clab-vscode-home-dir,target=/home/vscode,type=volume", - "source=clab-docker-root-config,target=/root/.docker,type=volume", - "target=/home/vscode/.vscode-server,type=volume" -] -} -``` - -/// -Once the devcontainer is defined as described above: - -* Open the devcontainer in VSCode -* Import the required images for your cLab inside the container (if you are using Docker-in-Docker option) -* Start your Containerlab -//// - -### Intel - -On Intel based Macs, containerlab can be run in a Linux VM started by Docker Desktop for Mac[^4]. To start using containerlab in this Linux VM we start a container with containerlab inside and mount the directory with our lab files into the container. - -```shell linenums="1" -CLAB_WORKDIR=~/clab - -docker run --rm -it --privileged \ - --network host \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /run/netns:/run/netns \ - --pid="host" \ - -w $CLAB_WORKDIR \ - -v $CLAB_WORKDIR:$CLAB_WORKDIR \ - ghcr.io/srl-labs/clab bash -``` - -The first command in the snippet above sets the working directory which you intend to use on your macOS. The `~/clab` in the example above expands to `/Users//clab` and means that we intend to have our containerlab labs to be stored in this directory. - -/// note - -1. It is best to create a directory under the `~/some/path` unless you know what to do[^5] -2. vrnetlab based nodes will not be able to start, since Docker VM does not support virtualization. -3. Docker Desktop for Mac introduced cgroups v2 support in 4.3.0 version; to support the images that require cgroups v1 follow [these instructions](https://github.com/docker/for-mac/issues/6073). -4. Docker Desktop relies on a LinuxKit based HyperKit VM. Unfortunately, it is shipped with a minimalist kernel, and some modules such as VRF are disabled by default. Follow [these instructions](https://medium.com/@notsinge/making-your-own-linuxkit-with-docker-for-mac-5c1234170fb1) to rebuild it with more modules. -/// - -When the container is started, you will have a bash shell opened with the directory contents mounted from the macOS. There you can use `containerlab` commands right away. - -/// details | Step-by-step example -Let's imagine I want to run a lab with two SR Linux containers running directly on a macOS. - -First, I need to have Docker Desktop for Mac installed and running. - -Then I will create a directory under the `$HOME` path on my mac: - -``` -mkdir -p ~/clab -``` - -Then I will create a clab file defining my lab in the newly created directory: - -```bash -cat < ~/clab/2srl.clab.yml -name: 2srl - -topology: - nodes: - srl1: - kind: nokia_srlinux - image: ghcr.io/nokia/srlinux - srl2: - kind: nokia_srlinux - image: ghcr.io/nokia/srlinux +Running containerlab on macOS is possible both on ARM (M1/M2/M3/etc) and Intel chipsets. For a long time, we had many caveats around M-chipsets on Macs, but with the introduction of ARM64-native NOSes like Nokia SR Linux and Arista cEOS, powered by Rosetta emulation for x86_64-based NOSes, it is now possible to run containerlab on ARM-based Macs. - links: - - endpoints: ["srl1:e1-1", "srl2:e1-1"] -EOF -``` - -Now when the clab file is there, launch the container and don't forget to use path to the directory you created: - -```bash -CLAB_WORKDIR=~/clab - -docker run --rm -it --privileged \ - --network host \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v /run/netns:/run/netns \ - --pid="host" \ - -w $CLAB_WORKDIR \ - -v $CLAB_WORKDIR:$CLAB_WORKDIR \ - ghcr.io/srl-labs/clab bash -``` - -Immediately you will get into the directory inside the container with your lab file available: - -``` -root@docker-desktop:/Users/romandodin/clab# ls -2srl.clab.yml -``` - -Now you can launch the lab, as containerlab is already part of the image: - -``` -root@docker-desktop:/Users/romandodin/clab# clab dep -t 2srl.clab.yml -INFO[0000] Parsing & checking topology file: 2srl.clab.yml -INFO[0000] Creating lab directory: /Users/romandodin/clab/clab-2srl -INFO[0000] Creating root CA -INFO[0000] Creating docker network: Name='clab', IPv4Subnet='172.20.20.0/24', IPv6Subnet='3fff:172:20:20::/64', MTU='1500' -INFO[0000] Creating container: srl1 -INFO[0000] Creating container: srl2 -INFO[0001] Creating virtual wire: srl1:e1-1 <--> srl2:e1-1 -INFO[0001] Adding containerlab host entries to /etc/hosts file -+---+----------------+--------------+-----------------------+------+-------+---------+----------------+----------------------+ -| # | Name | Container ID | Image | Kind | Group | State | IPv4 Address | IPv6 Address | -+---+----------------+--------------+-----------------------+------+-------+---------+----------------+----------------------+ -| 1 | clab-2srl-srl1 | 574bf836fb40 | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.2/24 | 3fff:172:20:20::2/64 | -| 2 | clab-2srl-srl2 | f88531a74ffb | ghcr.io/nokia/srlinux | srl | | running | 172.20.20.3/24 | 3fff:172:20:20::3/64 | -+---+----------------+--------------+-----------------------+------+-------+---------+----------------+----------------------+ -``` - -/// +Since we wanted to share our experience with running containerlab on macOS in details, we have created a separate - [Containerlab on macOS](macos.md) - guide. ## Upgrade -To upgrade `containerlab` to the latest available version issue the following command[^1]: +To upgrade `containerlab` to the latest available version issue the following command[^3]: ``` sudo -E containerlab version upgrade @@ -572,13 +337,6 @@ or more globally: sudo setsebool -P selinuxuser_execmod 1 ``` -[^1]: only available if installed from packages -[^2]: Most containerized NOS will require >1 vCPU. RAM size depends on the lab size. Architecture: AMD64. IPv6 should not be disabled in the kernel. -[^3]: No need to uninstall Docker Desktop, just make sure that it is not integrated with WSL2 machine that you intend to use with containerlab. Moreover, you can make it even work with Docker Desktop with a [few additional steps](https://twitter.com/networkop1/status/1380976461641834500/photo/1), but installing docker-ce into the WSL maybe more intuitive. -[^4]: kudos to Michael Kashin who [shared](https://github.com/srl-labs/containerlab/issues/577#issuecomment-895847387) this approach with us -[^5]: otherwise make sure to add a custom shared directory to the docker on mac. -[^6]: FRR is a good example of arm64-capable network OS. Nokia SR Linux is going to be available for arm64 in the 2024. -[^7]: There are two options to install UTM: via [downloadable dmg](https://github.com/utmapp/UTM/releases/latest/download/UTM.dmg) file (free) or App Store (paid). The App Store version is exactly the same, it is just a way to support the project. -[^8]: This command requires docker to be installed on your macOS. You can use Docker Desktop, Rancher or [colima](https://github.com/abiosoft/colima) to run docker on your macOS. -[^9]: If you want to install these tools on an existing Debian machine, you can run `wget -qO- containerlab.dev/setup-debian | bash -s -- all` command. -[^10]: The UTM image has a pre-installed ssh key for the `debian` user. You can download the shared private key from [here](https://github.com/srl-labs/clabernetes/blob/main/launcher/assets/default_id_rsa). +[^1]: Most containerized NOS will require >1 vCPU. RAM size depends on the lab size. Architecture: AMD64. IPv6 should not be disabled in the kernel. +[^2]: No need to uninstall Docker Desktop, just make sure that it is not integrated with WSL2 machine that you intend to use with containerlab. Moreover, you can make it even work with Docker Desktop with a [few additional steps](https://twitter.com/networkop1/status/1380976461641834500/photo/1), but installing docker-ce into the WSL maybe more intuitive. +[^3]: only available if installed from packages \ No newline at end of file diff --git a/docs/macos.md b/docs/macos.md new file mode 100644 index 000000000..ea8eb9ae8 --- /dev/null +++ b/docs/macos.md @@ -0,0 +1,198 @@ +--- +comments: true +hide: +- navigation +--- + +# Containerlab on macOS + +/// details | Summary for the impatient + type: subtle-note + +1. Install [OrbStack](https://orbstack.dev)[^1] on your macOS +2. Create an **arm64** Linux VM using OrbStack or alternatives +3. Install containerlab in the VM using the usual [installation instructions](install.md) +4. Check what images can/should work on ARM64 +5. Deploy your lab. You can see the demo of this workflow in [this YT video][yt-demo]. + +[yt-demo]: https://www.youtube.com/watch?v=_BTa-CiTpvI&t=1573s + +If you run an Intel mac, you can still use OrbStack to deploy a VM, but you will not need to worry about ARM64 images, as your processor runs x86_64 natively. +/// + +For quite some time, we have been saying that containerlab and macOS is a challenging mix. This statement has been echoed through multiple workshops/demos and was based on the following reasons: + +1. **ARM64 Network OS images**: With the shift to ARM64 architecture made by Apple (and Microsoft[^2]), we found ourselves in a situation where 99% of existing network OSes were not compiled for ARM64 architecture. This meant that containerlab users would have to rely on x86_64 emulation via Rosetta or QEMU, which imposes a significant performance penalty, often making the emulation unusable for practical purposes. +2. **Docker on macOS**: Since containerlab is reliant on Docker for container orchestrations, it needs Docker to be natively installed on the host. + On macOS, Docker is always provided as a Linux/arm64 VM that runs the docker daemon, with docker-cli running on macOS natively. You can imagine, that dealing with a VM that runs a network topology poses some UX challenges, like getting access to the exposed ports or dealing with files on macOS that needs to be accessible to the Docker VM. +3. **Linux on macOS?** It is not only Docker that containerlab is based on. We leverage some Linux kernel APIs (like netlink) either directly or via Docker to be available to setup links, namespaces, bind-mounts, etc. + Naturally, Darwin (macOS kernel) is not Linux, and while it is POSIX compliant, it is not a drop-in replacement for Linux. This means that some of the Linux-specific features that containerlab relies on are simply not present on macOS. + +Looking at the above challenges one might think that containerlab on macOS is a lost cause. However, recently things have started to take a good course, and we are happy to say that for certain labs Containerlab on macOS might be even a better (!) choice overall. + +As a long time macOS user, Roman recorded an in-depth video demonstrating how to run containerlab topologies on macOS using the tools of his choice. You can watch the video below or jump to the text version of the guide below. + +-{{youtube(url='https://www.youtube.com/embed/_BTa-CiTpvI')}}- + +## Network OS Images + +The first thing one needs to understand that if you run macOS on ARM chipset (M1+), then you should use ARM64 network OS images whenever possible. This will give you the best performance and compatibility. + +With Rosetta virtualisation it is possible to run x86_64 images on ARM64, but this comes with the performance penalty that might even make nodes not work at all. + +/// admonition | VM-based images + type: warning +VM-based images built with [hellt/vrnetlab](manual/vrnetlab.md) require nested virtualization support, which is only available on M3+ chip with macOS version 15 and above. + +If you happen to satisfy these requirements, please let us know in the comments which images you were able to run on your M3+ Mac. +/// + +### Native ARM64 Network OS and application images + +Finally :pray: some good news on this front, as vendors started to release or at least announce ARM64 versions of their network OSes. +**Nokia** first [released](https://www.linkedin.com/posts/rdodin_oops-we-did-it-again-three-years-ago-activity-7234176896018632704-Ywk-/) the preview version of their freely distributed [SR Linux for ARM64](manual/kinds/srl.md#getting-sr-linux-image), and **Arista** announced the imminent cEOS availability sometime in 2024. + +You can also get [**FRR**](https://quay.io/repository/frrouting/frr?tab=tags) container for ARM64 architecture from their container registry. + +Yes, SR Linux, cEOS, FRR do not cover all the network OSes out there, but it is a good start, and we hope that more vendors will follow the trend. + +The good news is that almost all of the popular applications that we see being used in containerlabs are **already** built for ARM. Your streaming telemetry stack (gnmic, prometheus/influx, grafana), regular client-emulating endpoints such as Alpine or a collection of network related tools in the network-multitool image had already been supporting ARM architecture. You can leverage the sheer ecosystem multi-arch applications that are available in the public registries. + +### Running under Rosetta + +If the image you're looking for is not available in ARM64, you can still try running the AMD64 version of the image under Rosetta emulation. Rosetta is a macOS virtualisation layer that allows you running x86_64 code on ARM64 architecture. + +It has been known to work for the following images: + +- [Arista cEOS x64](manual/kinds/ceos.md) +- [Cisco IOL](manual/kinds/cisco_iol.md) + +## Docker on macOS + +Ever since macOS switched to ARM architecture for their processors, people in a "containers camp" have been busy making sure that Docker works well on macOS's new architecture. + +### How Docker runs on Macs + +But before we start talking about Docker on ARM Macs, let's remind ourselves how Docker works on macOS with Intel processors. + +-{{ diagram(url='srl-labs/containerlab/diagrams/macos-arm.drawio', title='Docker on Intel Macs', page=3) }}- + +At the heart of any product or project that enables the Docker engine on Mac[^3] is a Linux VM that hosts the Docker daemon, aka "the engine". This VM is created and managed by the application that sets up Docker on your desktop OS. +The Linux VM is a mandatory piece because the whole container ecosystem is built around Linux kernel features and APIs. Therefore, running Docker on any host with an operating system other than Linux requires a Linux VM. + +As shown above, on Intel Macs, the macOS runs Darwin kernel on top of an AMD64 (aka x86_64) architecture, and consequently, the Docker VM runs the same architecture. The architecture of the Docker VM is the same as the host architecture allowing for a performant virtualization, since no processor emulation is needed. + +Now let's see how things change when we switch to ARM Macs: + +-{{ diagram(url='srl-labs/containerlab/diagrams/macos-arm.drawio', title='Docker on ARM Macs', page=2) }}- + +The diagram looks 99% the same as for the Intel Macs, the only difference being the architecture that macOS runs on and consequently the architecture of the Docker VM. +Now we run ARM64 architecture on the host, and the Docker VM is also ARM64. + +/// details | Native vs Emulation + +If Docker runs exactly the same on ARM Macs as it does on Intel Macs, then why is it suddenly a problem to run containerlab on ARM Macs? + +Well, it all comes down to the requirement of having ARM64 network OS images that we discussed earlier. Now when your Docker VM runs Linux/ARM64, you can run natively only ARM64-native software in it, and we, as a network community, are not having a lot of ARM64-native network OSes. It is getting better, but we are not there yet to claim 100% parity with the x86_64 world. + +You should strive to run the native images as much as possible, as it gives you the best performance and compatibility. But how do you tell if the image is ARM64-native or not? +A lot of applications that you might want to run in your containerlab topologies are already ARM64-native and often available as a multi-arch image. + +When running the following `docker image inspect` command, you can grep the `Architecture` field to see if the image is ARM64-native: + +```bash +docker image inspect ghcr.io/nokia/srlinux:24.10.1 -f '{{.Architecture}}' +arm64 +``` + +Running the same command for an image that is not ARM64-native will return `amd64`: + +```bash +docker image inspect goatatwork/snmpwalk -f '{{.Architecture}}' +amd64 +``` + +Still, it will be possible to run the `snmpwalk` container, thanks to Rosetta emulation. +/// + +### Software + +There are many software solutions that deliver Docker on macOS, both for Intel and ARM Macs. + +- :star: [OrbStack](https://orbstack.dev/) - a great UX and performance. A choice of many and is recommended by Containerlab maintainer. Free for personal use. +- [Docker Desktop](https://www.docker.com/products/docker-desktop/) - the original and the most popular Docker on macOS. +- [Rancher Desktop](https://rancherdesktop.io/) - another popular software. +- [CoLima](https://github.com/abiosoft/colima) - a lightweight, CLI-based VM solution. + +The way most users use Containerlab on macOS, though, not directly leveraging Docker that is provided by one of the above solutions. Instead, it might be easier to spin up a VM, powered by the above-mentioned software products, and install Containerlab natively inside this arm64/Linux VM. +You can see this workflow demonstration in this [YT video][yt-demo]. + +## Devcontainer + +Another option to run containerlab on ARM or Intel Macs is to use the Docker in Docker approach which is enabled by our Devcontainer. + +Containerlab's devcontiner was created to power [containerlab in codespaces](manual/codespaces.md), but it is a perfect fit for running containerlab on **any macOS** as it uses the docker-in-docker method where an isolated instance of a docker daemon is created inside a container. + +/// note +Starting with **Containerlab v0.60.0**, you can use the devcontainer with ARM64 macOS to run containerlab. +/// + +To start using the devcontainer, you have to create a `devcontainer.json` file in your project directory where you have your containerlab topology. If you're using Containerlab the right way, your labs are neatly stored in a git repo, the `devcontainer.json` file will be in the root of the repo by the `.devcontainer/devcontainer.json` path. + +Note, the labs that we publish with Codespaces support already have the `devcontainer.json` file, in that case you don't even need to create it manually. + +If you create the `devcontainer.json` file manually, you won't need to be smart about the content of the file, all you have to specify is the containerlab version you want to run. Here is an example of the `devcontainer.json` file: + +```json title="./devcontainer/devcontainer.json" +{ + "image": "ghcr.io/srl-labs/containerlab/clab-devcontainer:0.60.0" //(1)! +} +``` + +1. devcontainer versions match containerlab versions + +With the devcontainer file in place, when you open a repo in VS Code, you will be prompted to reopen the workspace in the devcontainer. + +![img1](https://gitlab.com/rdodin/pics/-/wikis/uploads/ee918d1d5d85d83f45ced031c5fa999d/image.png) + +Clicking on this button will open the workspace in the devcontainer; you will see the usual VS Code window, but now the workspace will have containerlab installed with a separate docker instance running inside the spawned container. This means that your devcontainer works in isolation with the rest of your system. + +Open a terminal in the VS Code and run the topology by typing the familiar `sudo clab dep` command to deploy the lab. That's it! + +## Alternative options + +### UTM + +If OrbStack for some reason can not be used in your environment, you can use [UTM](https://mac.getutm.app/) - a free[^4] and open-source graphical virtual machine manager that provides a simple interface for creating, managing, and running virtual machines with qemu. + +When you have UTM installed, you can download a pre-built Debian 12 UTM image built by the Containerlab team using the following command[^5]: + +```bash +sudo docker run --rm -v $(pwd):/workspace ghcr.io/oras-project/oras:v1.1.0 pull \ + ghcr.io/srl-labs/containerlab/clab-utm-box:0.1.0 +``` + +By running this command you will download the `clab_debian12.utm` file which is a UTM image with `containerlab`, `docker-ce` and `gh` tools pre-installed[^6]. + +Open the downloaded image with UTM **File -> Open -> select .utm file** and start the VM. + +Once the VM is started, you can log in using `debian:debian` credentials. Run `ip -4 addr` in the terminal to find out which IP got assigned to this VM. +Now you can use this IP for your Mac terminal to connect to the VM via SSH[^7]. + +When logged in, you can upgrade the containerlab to the latest version with: + +```bash +sudo clab version upgrade +``` + +and start downloading the labs you want to run. + +[^1]: Or any other application that enables Docker on macOS. OrbStack is just a great choice that is used by many. +[^2]: With Microsoft Surface laptop released with ARM64 architecture +[^3]: The same principles apply to Docker Desktop on Windows +[^4]: There are two options to install UTM: via [downloadable dmg](https://github.com/utmapp/UTM/releases/latest/download/UTM.dmg) file (free) or App Store (paid). The App Store version is exactly the same, it is just a way to support the project. +[^5]: This command requires docker to be installed on your macOS. You can use Docker Desktop, Rancher or [colima](https://github.com/abiosoft/colima) to run docker on your macOS. +[^6]: If you want to install these tools on an existing Debian machine, you can run `wget -qO- containerlab.dev/setup-debian | bash -s -- all` command. +[^7]: The UTM image has a pre-installed ssh key for the `debian` user. You can download the shared private key from [here](https://github.com/srl-labs/clabernetes/blob/main/launcher/assets/default_id_rsa). + + diff --git a/docs/manual/kinds/cisco_iol.md b/docs/manual/kinds/cisco_iol.md index 35bd124c5..3a6acaf3a 100644 --- a/docs/manual/kinds/cisco_iol.md +++ b/docs/manual/kinds/cisco_iol.md @@ -5,15 +5,15 @@ kind_code_name: cisco_iol kind_display_name: Cisco IOL kind_short_display_name: IOL --- -# [[[ kind_display_name ]]] +# -{{ kind_display_name }}- -[[[ kind_display_name ]]] (IOS On Linux or [[[ kind_short_display_name ]]] for short) is a version of Cisco IOS/IOS-XE software which is packaged as binary, in other words it does not require a virtual machine, hence the name IOS *On Linux*. +-{{ kind_display_name }}- (IOS On Linux or -{{ kind_short_display_name }}- for short) is a version of Cisco IOS/IOS-XE software which is packaged as binary, in other words it does not require a virtual machine, hence the name IOS *On Linux*. -It is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md) and built using [vrnetlab](../vrnetlab.md) project and essentially is the IOL binary packaged into a docker container. +It is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md) and built using [vrnetlab](../vrnetlab.md) project and essentially is the IOL binary packaged into a docker container. -## Getting and building [[[ kind_display_name ]]] +## Getting and building -{{ kind_display_name }}- -You can get [[[ kind_display_name ]]] from Cisco's CML refplat .iso. It is identified by the `iol` or `ioll2` prefix. +You can get -{{ kind_display_name }}- from Cisco's CML refplat .iso. It is identified by the `iol` or `ioll2` prefix. From the IOL binary you are required to build a container using the [vrnetlab](../vrnetlab.md) project. @@ -24,20 +24,20 @@ IOL is distributed as two versions: ## Resource requirements -[[[ kind_display_name ]]] is very light on resources compared to VM-based Cisco products. Each IOL node requires at minimum: +-{{ kind_display_name }}- is very light on resources compared to VM-based Cisco products. Each IOL node requires at minimum: - 1vCPU per node, you are able oversubscribe and run many IOL nodes per vCPU. -- 768Mb of RAM. +- 768Mb of RAM. - 1Mb of disk space for the NVRAM (where configuration is saved). Using [KSM](../vrnetlab.md#memory-optimization) you can achieve a higher density of IOL nodes per GB of RAM. -## Managing [[[ kind_display_name ]]] nodes +## Managing -{{ kind_display_name }}- nodes -You can manage the [[[ kind_display_name ]]] with containerlab via the following interfaces: +You can manage the -{{ kind_display_name }}- with containerlab via the following interfaces: /// tab | CLI -to connect to the [[[ kind_short_display_name ]]] CLI +to connect to the -{{ kind_short_display_name }}- CLI ```bash ssh admin@ @@ -45,7 +45,7 @@ ssh admin@ /// /// tab | bash -to connect to a `bash` shell of a running [[[ kind_short_display_name ]]] container: +to connect to a `bash` shell of a running -{{ kind_short_display_name }}- container: ```bash docker exec -it bash @@ -59,7 +59,7 @@ Default credentials: `admin:admin` ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in the [[[ kind_display_name ]]] CLI. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in the -{{ kind_display_name }}- CLI. The interface naming convention is: `Ethernet0/X` (or `e0/X`), where `X` is the port number. @@ -74,16 +74,16 @@ Keep in mind IOL defines interfaces in groups of 4. Every four interfaces (zero- - `e1/0` - Fourth data-plane interface - `e1/1` - Fifth data-plane interface -The example ports above would be mapped to the following Linux interfaces inside the container running [[[ kind_display_name ]]]: +The example ports above would be mapped to the following Linux interfaces inside the container running -{{ kind_display_name }}-: - `eth0` - management interface connected to the containerlab management network. Mapped to `Ethernet0/0`. - `eth1` - First data-plane interface. Mapped to `Ethernet0/1` interface. -- `eth2` - Second data-plane interface. Mapped to `Ethernet0/2` interface +- `eth2` - Second data-plane interface. Mapped to `Ethernet0/2` interface - `eth3` - Third data-plane interface. Mapped to `Ethernet0/3` interface - `eth4` - Fourth data-plane interface. Mapped to `Ethernet1/0` interface - `eth5` - Fifth data-plane interface. Mapped to `Ethernet1/1` interface and so on... -When containerlab launches [[[ kind_display_name ]]], the `Ethernet0/0` interface of the container gets assigned management IPv4 and IPv6 addresses from docker. The `Ethernet0/0` interface is in it's own management VRF so that configuration in the global context will not affect the management interface. +When containerlab launches -{{ kind_display_name }}-, the `Ethernet0/0` interface of the container gets assigned management IPv4 and IPv6 addresses from docker. The `Ethernet0/0` interface is in it's own management VRF so that configuration in the global context will not affect the management interface. Interfaces can be defined in a non-contigous manner in your toplogy file. See the example below. @@ -104,9 +104,9 @@ topology: ``` /// warning -When defining interfaces non-contigiously you may see more interfaces than you have defined in the [[[ kind_short_display_name ]]] CLI, this is because interfaces are provisioned in groups. +When defining interfaces non-contigiously you may see more interfaces than you have defined in the -{{ kind_short_display_name }}- CLI, this is because interfaces are provisioned in groups. -At minimum you will see all numerically-lower indexed interfaces in the CLI compared to the interface you have defined, you may also see interfaces with a higher numerical index. +At minimum you will see all numerically-lower indexed interfaces in the CLI compared to the interface you have defined, you may also see interfaces with a higher numerical index. **Links/interfaces that you did not define in your containerlab topology will *not* pass any traffic.** /// @@ -124,7 +124,7 @@ Ethernet0/3 unassigned YES unset administratively down down ## Usage and sample topology -IOL-L2 has a different startup configuration compared to the regular IOL. You can tell containerlab you are using the L2 image by supplying the `type` field in your topology. +IOL-L2 has a different startup configuration compared to the regular IOL. You can tell containerlab you are using the L2 image by supplying the `type` field in your topology. See the sample topology below diff --git a/docs/manual/kinds/fortinet_fortigate.md b/docs/manual/kinds/fortinet_fortigate.md index 1736f8f7e..1a6916c9d 100644 --- a/docs/manual/kinds/fortinet_fortigate.md +++ b/docs/manual/kinds/fortinet_fortigate.md @@ -4,26 +4,26 @@ search: kind_code_name: fortinet_fortigate kind_display_name: Fortinet Fortigate --- -# [[[ kind_display_name ]]] +# -{{ kind_display_name }}- -[[[ kind_display_name ]]] virtualized security appliance is identified with the `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using the [hellt/vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +-{{ kind_display_name }}- virtualized security appliance is identified with the `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using the [hellt/vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -The integration of [[[ kind_display_name ]]] has been tested with v7.0.14 release. Note, that releases >= 7.2.0 would require a valid license and internet access to activate the [[[ kind_display_name ]]] VM. +The integration of -{{ kind_display_name }}- has been tested with v7.0.14 release. Note, that releases >= 7.2.0 would require a valid license and internet access to activate the -{{ kind_display_name }}- VM. -## Getting [[[ kind_display_name ]]] disk image +## Getting -{{ kind_display_name }}- disk image -Users can obtain the qcow2 disk image for [[[ kind_display_name ]]] VM from the [official support site](https://support.fortinet.com/Download/VMImages.aspx); a free account required. Download the "New deployment" variant of the FGVM64 VM for the KVM platform. +Users can obtain the qcow2 disk image for -{{ kind_display_name }}- VM from the [official support site](https://support.fortinet.com/Download/VMImages.aspx); a free account required. Download the "New deployment" variant of the FGVM64 VM for the KVM platform. Extract the downloaded zip file and rename the `fortios.qcow2` to `fortios-vX.Y.Z.qcow2` where `X.Y.Z` is the version of the Fortigate VM. Put the renamed file in the `fortigate` directory of the cloned [hellt/vrnetlab](https://github.com/hellt/vrnetlab) project and run `make` to build the container image. -## Managing [[[ kind_display_name ]]] nodes +## Managing -{{ kind_display_name }}- nodes /// note -Containers with [[[ kind_display_name ]]] VM inside will take ~2min to fully boot. +Containers with -{{ kind_display_name }}- VM inside will take ~2min to fully boot. You can monitor the progress with the `docker logs -f ` command. /// -[[[ kind_display_name ]]] node launched with containerlab can be managed via the following interfaces: +-{{ kind_display_name }}- node launched with containerlab can be managed via the following interfaces: /// tab | bash to connect to a `bash` shell of a running fortigate container: @@ -56,7 +56,7 @@ Default login credentials: `admin:admin` ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `portX`, where `X` is the port number. @@ -70,12 +70,12 @@ With that naming convention in mind: Data port numbering starts at `2`, as `port1` is reserved for management connectivity. Attempting to use `port1` in a containerlab topology will result in an error. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `port1` in the CLI) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `port2`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `port3` and so on) -When containerlab launches [[[ kind_display_name ]]] node the `port1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `port1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `port2+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/freebsd.md b/docs/manual/kinds/freebsd.md index aae9df337..63f997bfd 100644 --- a/docs/manual/kinds/freebsd.md +++ b/docs/manual/kinds/freebsd.md @@ -6,7 +6,7 @@ kind_display_name: FreeBSD --- # FreeBSD -[FreeBSD](https://freebsd.org/) is identified with `[[[kind_code_name]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[FreeBSD](https://freebsd.org/) is identified with `-{{kind_code_name}}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. ## Getting FreeBSD image @@ -45,7 +45,7 @@ FreeBSD node launched with containerlab can be managed via the following interfa ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `vtnetX`, where `X` denotes the port number. @@ -59,13 +59,13 @@ With that naming convention in mind: Data port numbering starts at `1`, as `vtnet0` is reserved for management connectivity. Attempting to use `vtnet0` in a containerlab topology will result in an error. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `vtnet0` in the CLI) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `vtnet1`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `vtnet2` and so on) -When containerlab launches [[[ kind_display_name ]]] node the `vtnet0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `vtnet0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `vtnet1+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/huawei_vrp.md b/docs/manual/kinds/huawei_vrp.md index 71d89ccbf..3e3699984 100644 --- a/docs/manual/kinds/huawei_vrp.md +++ b/docs/manual/kinds/huawei_vrp.md @@ -4,25 +4,25 @@ search: kind_code_name: huawei_vrp kind_display_name: Huawei VRP --- -# [[[ kind_display_name ]]] +# -{{ kind_display_name }}- -[[[ kind_display_name ]]] virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +-{{ kind_display_name }}- virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -[[[ kind_display_name ]]] currently supports Huawei N40e and CE12800 variants, the same kind value - `[[[ kind_code_name ]]]` - is used for both. +-{{ kind_display_name }}- currently supports Huawei N40e and CE12800 variants, the same kind value - `-{{ kind_code_name }}-` - is used for both. -[[[ kind_display_name ]]] nodes launched with containerlab comes up pre-provisioned with SSH, NETCONF services enabled. +-{{ kind_display_name }}- nodes launched with containerlab comes up pre-provisioned with SSH, NETCONF services enabled. -## Managing [[[ kind_display_name ]]] nodes +## Managing -{{ kind_display_name }}- nodes /// note -Containers with [[[ kind_display_name ]]] inside will take ~3min to fully boot without a startup config file. And ~5-7 minute if the startup config file is provided, since a node will undergo a reboot. +Containers with -{{ kind_display_name }}- inside will take ~3min to fully boot without a startup config file. And ~5-7 minute if the startup config file is provided, since a node will undergo a reboot. You can monitor the progress with `docker logs -f `. /// -[[[ kind_display_name ]]] node launched with containerlab can be managed via the following interfaces: +-{{ kind_display_name }}- node launched with containerlab can be managed via the following interfaces: /// tab | CLI -to connect to the [[[ kind_display_name ]]] CLI +to connect to the -{{ kind_display_name }}- CLI ```bash ssh admin@ @@ -30,7 +30,7 @@ ssh admin@ /// /// tab | bash -to connect to a `bash` shell of a running [[[ kind_display_name ]]] container: +to connect to a `bash` shell of a running -{{ kind_display_name }}- container: ```bash docker exec -it bash @@ -53,13 +53,13 @@ Default user credentials: `admin:admin` ## Interface naming -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `GigabitEthernet0/0/0` in the VRP config) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `Ethernet1/0/0`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `Ethernet1/0/1` and so on) -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `Ethernet1/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. @@ -67,17 +67,17 @@ Data interfaces `Ethernet1/0/0+` need to be configured with IP addressing manual ### Node configuration -[[[ kind_display_name ]]] nodes come up with a basic configuration where only `admin` user and management interfaces such as SSH and NETCONF provisioned. +-{{ kind_display_name }}- nodes come up with a basic configuration where only `admin` user and management interfaces such as SSH and NETCONF provisioned. #### Startup configuration -It is possible to make [[[ kind_display_name ]]] nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: +It is possible to make -{{ kind_display_name }}- nodes boot up with a user-defined startup-config instead of a built-in one. With a [`startup-config`](../nodes.md#startup-config) property of the node/kind user sets the path to the config file that will be mounted to a container and used as a startup-config: ```yaml topology: nodes: node: - kind: [[[ kind_code_name ]]] + kind: -{{ kind_code_name }}- startup-config: myconfig.txt ``` diff --git a/docs/manual/kinds/openbsd.md b/docs/manual/kinds/openbsd.md index d71201579..8fae5779a 100644 --- a/docs/manual/kinds/openbsd.md +++ b/docs/manual/kinds/openbsd.md @@ -6,7 +6,7 @@ kind_display_name: OpenBSD --- # OpenBSD -[OpenBSD](https://www.openbsd.org/) is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[OpenBSD](https://www.openbsd.org/) is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. ## Getting OpenBSD image @@ -45,7 +45,7 @@ OpenBSD node launched with containerlab can be managed via the following interfa ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `vioX`, where `X` is the port number. @@ -59,13 +59,13 @@ With that naming convention in mind: Data port numbering starts at `1`, as `vio0` is reserved for management connectivity. Attempting to use `vio0` in a containerlab topology will result in an error. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `vio0` in the CLI) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `vio1`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `vio2` and so on) -When containerlab launches [[[ kind_display_name ]]] node the `vio0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `vio0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `vio1+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/srl.md b/docs/manual/kinds/srl.md index bce00f817..3390f6c4c 100644 --- a/docs/manual/kinds/srl.md +++ b/docs/manual/kinds/srl.md @@ -17,6 +17,16 @@ docker pull ghcr.io/nokia/srlinux To pull a specific version, use tags that match the released version and are listed in the [srlinux-container-image](https://github.com/nokia/srlinux-container-image) repo. +//// admonition | ARM64-native SR Linux container image + type: tip +SR Linux Network OS is also available as an ARM64-native container image in a preview mode. The preview mode means that some issues may be present, as the image is not yet fully qualified. + +Starting with SR Linux 24.10.1 the container image is built using the manifest list, so when you pull the image, the correct architecture is selected automatically. + +ARM64 image unlocks running networking labs on [Apple macOS](../../macos.md) with M-chips, as well as cloud instances with ARM64 architecture and on new Microsoft Surface laptops. + +//// + ## Managing SR Linux nodes There are many ways to manage SR Linux nodes, ranging from classic CLI management all the way up to the gNMI programming. diff --git a/docs/manual/kinds/vr-aoscx.md b/docs/manual/kinds/vr-aoscx.md index 62cab10a1..ffcceff0e 100644 --- a/docs/manual/kinds/vr-aoscx.md +++ b/docs/manual/kinds/vr-aoscx.md @@ -32,7 +32,7 @@ Aruba AOS-CX node launched with containerlab can be managed via the following in ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `1/1/X`, where `X` is the port number. @@ -41,12 +41,12 @@ With that naming convention in mind: * `1/1/1` - first data port available * `1/1/2` - second data port, and so on... -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `1/1/1`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `1/1/2` and so on) -When containerlab launches [[[ kind_display_name ]]] node the `1/1/1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `1/1/1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `1/1/2+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-c8000v.md b/docs/manual/kinds/vr-c8000v.md index d86e9e9dd..be1cd3a74 100644 --- a/docs/manual/kinds/vr-c8000v.md +++ b/docs/manual/kinds/vr-c8000v.md @@ -6,7 +6,7 @@ kind_display_name: Cisco Catalyst 8000V --- # Cisco c8000v -The Cisco Catalyst 8000V is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). +The Cisco Catalyst 8000V is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). Cisco c8000v is a successor of [Cisco CSR1000v](../kinds/vr-csr.md) and is a **different** product from [Cisco 8000](../kinds/c8000.md) platform emulator. @@ -46,7 +46,7 @@ Default credentials: `admin:admin` ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `GigabitEthernetX` (or `GiX`), where `X` is the port number. @@ -60,13 +60,13 @@ With that naming convention in mind: Data port numbering starts at `2`, as `Gi1` is reserved for management connectivity. Attempting to use `Gi1` in a containerlab topology will result in an error. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `GigabitEthernet1` in the CLI) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `GigabitEthernet2`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `GigabitEthernet3` and so on) -When containerlab launches [[[ kind_display_name ]]] node the `GigabitEthernet1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `GigabitEthernet1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `GigabitEthernet2+` need to be configured with IP addressing manually using CLI or other available management interfaces and will appear `unset` in the CLI: diff --git a/docs/manual/kinds/vr-cat9kv.md b/docs/manual/kinds/vr-cat9kv.md index 242332584..148a29ccd 100644 --- a/docs/manual/kinds/vr-cat9kv.md +++ b/docs/manual/kinds/vr-cat9kv.md @@ -7,9 +7,9 @@ kind_short_display_name: Cat9kv --- # Cisco Catalyst 9000v -The [[[ kind_display_name ]]] (or [[[ kind_short_display_name ]]] for short) is a virtualised form of the Cisco Catalyst 9000 series switches. It is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md) and built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +The -{{ kind_display_name }}- (or -{{ kind_short_display_name }}- for short) is a virtualised form of the Cisco Catalyst 9000 series switches. It is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md) and built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. -The [[[ kind_display_name ]]] performs simulation of the dataplane ASICs that are present in the physical hardware. The two simulated ASICs are: +The -{{ kind_display_name }}- performs simulation of the dataplane ASICs that are present in the physical hardware. The two simulated ASICs are: - Cisco UADP (Unified Access Data-Plane). This is the default ASIC that's simulated. - Silicon One Q200 (referred to as Q200). @@ -20,7 +20,7 @@ The Q200 simulation has a limited featureset compared to the UADP simulation. ## Resource requirements -The [[[ kind_display_name ]]] is a resource-hungry VM. When launched with the default settings, it requires the following resources: +The -{{ kind_display_name }}- is a resource-hungry VM. When launched with the default settings, it requires the following resources: | | UADP | Q200 | | --------- | ----- | ----- | @@ -30,12 +30,12 @@ The [[[ kind_display_name ]]] is a resource-hungry VM. When launched with the de Users can adjust the CPU and memory resources by setting adding appropriate environment variables as explained in [Tuning Qemu Parameters section](../../manual/vrnetlab.md#tuning-qemu-parameters). -## Managing [[[ kind_display_name ]]] nodes +## Managing -{{ kind_display_name }}- nodes -You can manage the [[[ kind_display_name ]]] with containerlab via the following interfaces: +You can manage the -{{ kind_display_name }}- with containerlab via the following interfaces: /// tab | bash -to connect to a `bash` shell of a running [[[ kind_display_name ]]] container: +to connect to a `bash` shell of a running -{{ kind_display_name }}- container: ```bash docker exec -it bash @@ -43,7 +43,7 @@ docker exec -it bash /// /// tab | CLI -to connect to the [[[ kind_display_name ]]] CLI +to connect to the -{{ kind_display_name }}- CLI ```bash ssh admin@ @@ -64,7 +64,7 @@ Default credentials: `admin:admin` ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in the [[[ kind_display_name ]]] CLI. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in the -{{ kind_display_name }}- CLI. The interface naming convention is: `GigabitEthernet1/0/X` (or `Gi1/0/X`), where `X` is the port number. @@ -73,7 +73,7 @@ With that naming convention in mind: - `Gi1/0/1` - first data port available - `Gi1/0/2` - second data port, and so on... -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: - `eth0` - management interface connected to the containerlab management network. Mapped to `GigabitEthernet0/0`. - `eth1` - First data-plane interface. Mapped to `GigabitEthernet1/0/1` interface. @@ -107,7 +107,7 @@ topology: Regardless of how many links are defined in your containerlab topology, the Catalyst 9000v will always display 8 data-plane interfaces. Links/interfaces that you did not define in your containerlab topology will *not* pass any traffic. /// -When containerlab launches [[[ kind_display_name ]]] node the `GigabitEthernet0/0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `GigabitEthernet0/0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `GigabitEthernet1/0/1+` need to be configured with IP addressing manually using CLI or other available management interfaces and will appear `unset` in the CLI: diff --git a/docs/manual/kinds/vr-csr.md b/docs/manual/kinds/vr-csr.md index 67163feaf..187005db7 100644 --- a/docs/manual/kinds/vr-csr.md +++ b/docs/manual/kinds/vr-csr.md @@ -6,7 +6,7 @@ kind_display_name: Cisco CSR1000v --- # Cisco CSR1000v -Cisco CSR1000v virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +Cisco CSR1000v virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Cisco CSR1000v nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. @@ -39,7 +39,7 @@ Cisco CSR1000v node launched with containerlab can be managed via the following ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `GigabitEthernetX` (or `GiX`), where `X` is the port number. @@ -53,13 +53,13 @@ With that naming convention in mind: Data port numbering starts at `2`, as `Gi1` is reserved for management connectivity. Attempting to use `Gi1` in a containerlab topology will result in an error. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `GigabitEthernet1` in the CLI) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `GigabitEthernet2`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `GigabitEthernet3` and so on) -When containerlab launches [[[ kind_display_name ]]] node the `GigabitEthernet1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `GigabitEthernet1` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `GigabitEthernet2+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-ftdv.md b/docs/manual/kinds/vr-ftdv.md index 325e11865..a9da2bae4 100644 --- a/docs/manual/kinds/vr-ftdv.md +++ b/docs/manual/kinds/vr-ftdv.md @@ -41,7 +41,7 @@ Cisco FTDv node launched with containerlab can be managed via the following inte ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `GigabitEthernet0/X` (or `GiX`), where `X` is the port number. @@ -54,13 +54,13 @@ With that naming convention in mind: Data port numbering starts at `0`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `Management0/0` in the CLI) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `GigabitEthernet0/0`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `GigabitEthernet0/1` and so on) -When containerlab launches [[[ kind_display_name ]]] node the `Management0/0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the `Management0/0` interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `GigabitEthernet2+` need to be configured with IP addressing manually using Web UI or other available management interfaces. diff --git a/docs/manual/kinds/vr-n9kv.md b/docs/manual/kinds/vr-n9kv.md index d6d95ad81..d2d337804 100644 --- a/docs/manual/kinds/vr-n9kv.md +++ b/docs/manual/kinds/vr-n9kv.md @@ -4,14 +4,14 @@ search: kind_code_name: cisco_n9kv kind_display_name: Cisco Nexus 9000v --- -# [[[ kind_display_name ]]] +# -{{ kind_display_name }}- -Cisco Nexus9000v virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +Cisco Nexus9000v virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Cisco Nexus 9000v nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF, NXAPI and gRPC services enabled. /// details | N9kv Lite -If you have a Nexus 9000v Lightweight variant, you can use the same `[[[ kind_code_name ]]]` to launch it +If you have a Nexus 9000v Lightweight variant, you can use the same `-{{ kind_code_name }}-` to launch it By default, Nexus 9kv image with require 10GB memory and 4 CPU. However `n9kv-lite` VM requires less resources, so you would want to tune the defaults down. @@ -21,7 +21,7 @@ Following is sample for setting up lower memory and CPU for the `n9kv-lite`: topology: nodes: node: - kind: [[[ kind_code_name ]]] + kind: -{{ kind_code_name }}- env: QEMU_MEMORY: 6144 # N9kv-lite requires minimum 6GB memory QEMU_SMP: 2 # N9kv-lite requires minimum 2 CPUs @@ -30,17 +30,17 @@ topology: Please refer to ['tuning qemu parameters'](../vrnetlab.md#tuning-qemu-parameters) section for more details. /// -## Managing [[[ kind_display_name ]]] nodes +## Managing -{{ kind_display_name }}- nodes /// note -Containers with [[[ kind_display_name ]]] inside will take ~5min to fully boot. +Containers with -{{ kind_display_name }}- inside will take ~5min to fully boot. You can monitor the progress with `docker logs -f `. /// -[[[ kind_display_name ]]] node launched with containerlab can be managed via the following interfaces: +-{{ kind_display_name }}- node launched with containerlab can be managed via the following interfaces: /// tab | bash -to connect to a `bash` shell of a running [[[ kind_display_name ]]] container: +to connect to a `bash` shell of a running -{{ kind_display_name }}- container: ```bash docker exec -it bash @@ -49,7 +49,7 @@ docker exec -it bash /// /// tab | CLI -to connect to the [[[ kind_display_name ]]] CLI +to connect to the -{{ kind_display_name }}- CLI ```bash ssh admin@ @@ -76,7 +76,7 @@ Default user credentials: `admin:admin` ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `Ethernet1/X` (or `Et1/X`), where `X` is the port number. @@ -90,13 +90,13 @@ With that naming convention in mind: Data port numbering starts at `1`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `Ethernet1/1`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `Ethernet1/2` and so on) -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `Ethernet1/1+` need to be configured with IP addressing manually using CLI or other available management interfaces. @@ -104,7 +104,7 @@ Data interfaces `Ethernet1/1+` need to be configured with IP addressing manually ### Node configuration -[[[ kind_display_name ]]] nodes come up with a basic configuration where only `admin` user and management interfaces such as NETCONF, NXAPI and GRPC provisioned. +-{{ kind_display_name }}- nodes come up with a basic configuration where only `admin` user and management interfaces such as NETCONF, NXAPI and GRPC provisioned. #### Startup configuration @@ -114,7 +114,7 @@ It is possible to make n9kv nodes boot up with a user-defined startup-config ins topology: nodes: node: - kind: [[[ kind_code_name ]]] + kind: -{{ kind_code_name }}- startup-config: myconfig.txt ``` diff --git a/docs/manual/kinds/vr-pan.md b/docs/manual/kinds/vr-pan.md index feeee1102..e4454fd5c 100644 --- a/docs/manual/kinds/vr-pan.md +++ b/docs/manual/kinds/vr-pan.md @@ -6,7 +6,7 @@ kind_display_name: Cisco Nexus9000v --- # Palo Alto PA-VM -Palo Alto PA-VM virtualized firewall is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [boxen](https://github.com/carlmontanari/boxen/) project and essentially is a Qemu VM packaged in a docker container format. +Palo Alto PA-VM virtualized firewall is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [boxen](https://github.com/carlmontanari/boxen/) project and essentially is a Qemu VM packaged in a docker container format. Palo Alto PA-VM nodes launched with containerlab come up pre-provisioned with SSH, and HTTPS services enabled. @@ -36,7 +36,7 @@ Palo Alto PA-VM node launched with containerlab can be managed via the following ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `Ethernet1/X`, where `X` is the port number. @@ -50,13 +50,13 @@ With that naming convention in mind: Data port numbering starts at `1`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `Ethernet1/1`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `Ethernet1/2` and so on) -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `Ethernet1/1+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-ros.md b/docs/manual/kinds/vr-ros.md index f2a99ac82..644327621 100644 --- a/docs/manual/kinds/vr-ros.md +++ b/docs/manual/kinds/vr-ros.md @@ -6,7 +6,7 @@ kind_display_name: MikroTik RouterOS --- # MikroTik RouterOS Cloud-hosted router -[MikroTik RouterOS](https://mikrotik.com/download) cloud hosted router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[MikroTik RouterOS](https://mikrotik.com/download) cloud hosted router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. ## Managing MikroTik RouterOS nodes @@ -35,7 +35,7 @@ MikroTik RouterOS node launched with containerlab can be managed via the followi ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `etherX`, where `X` is the port number. @@ -49,13 +49,13 @@ With that naming convention in mind: Data port numbering starts at `2`, as `ether1` is reserved for management connectivity. Attempting to use `ether1` in a containerlab topology will result in an error. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network (rendered as `ether1`) * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `ether2`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `ether3` and so on) -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `ether2+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-sros.md b/docs/manual/kinds/vr-sros.md index f16c5ebb3..15bdc5e5a 100644 --- a/docs/manual/kinds/vr-sros.md +++ b/docs/manual/kinds/vr-sros.md @@ -6,7 +6,7 @@ kind_display_name: Nokia SR OS --- # Nokia SR OS -[Nokia SR OS](https://www.nokia.com/networks/products/service-router-operating-system/) virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Nokia SR OS](https://www.nokia.com/networks/products/service-router-operating-system/) virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Nokia SR OS nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. @@ -69,7 +69,7 @@ Default user credentials: `admin:admin` ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `1/1/X`, where `X` is the port number. @@ -86,13 +86,13 @@ With that naming convention in mind: * `1/1/1` - first data port available * `1/1/2` - second data port, and so on... -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `1/1/1`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `1/1/2` and so on) -When containerlab launches [[[ kind_display_name ]]] node the primary BOF interface gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the primary BOF interface gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `1/1/1+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-veos.md b/docs/manual/kinds/vr-veos.md index 0e24aa995..9130f0d4f 100644 --- a/docs/manual/kinds/vr-veos.md +++ b/docs/manual/kinds/vr-veos.md @@ -6,7 +6,7 @@ kind_display_name: Arista vEOS --- # Arista vEOS -[Arista vEOS](https://www.arista.com/en/cg-veos-router/veos-router-overview) virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Arista vEOS](https://www.arista.com/en/cg-veos-router/veos-router-overview) virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Arista vEOS nodes launched with containerlab comes up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. @@ -47,7 +47,7 @@ Arista vEOS node launched with containerlab can be managed via the following int ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `Ethernet1/X` (or `Et1/X`), where `X` is the port number. @@ -61,13 +61,13 @@ With that naming convention in mind: Data port numbering starts at `1`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: * `eth0` - management interface connected to the containerlab management network * `eth1` - first data interface, mapped to the first data port of the VM (rendered as `Ethernet1/1`) * `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `Ethernet1/2` and so on) -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `Ethernet1/1+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-vjunosevolved.md b/docs/manual/kinds/vr-vjunosevolved.md index b5433812c..32ee12cf8 100644 --- a/docs/manual/kinds/vr-vjunosevolved.md +++ b/docs/manual/kinds/vr-vjunosevolved.md @@ -6,7 +6,7 @@ kind_display_name: Juniper vJunosEvolved --- # Juniper vJunosEvolved -[Juniper vJunosEvolved](https://www.juniper.net/documentation/product/us/en/vjunosevolved/) is a virtualized PTX10001 router identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vJunosEvolved](https://www.juniper.net/documentation/product/us/en/vjunosevolved/) is a virtualized PTX10001 router identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Juniper vJunosEvolved nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. @@ -43,7 +43,7 @@ Juniper vJunosEvolved node launched with containerlab can be managed via the fol ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `et-0/0/X` (or `ge-0/0/X`, `xe-0/0/X`, all are accepted), where X denotes the port number. @@ -57,7 +57,7 @@ With that naming convention in mind: Data port numbering starts at `0`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: Juniper vJunosEvolved container can have up to 17 interfaces and uses the following mapping rules: @@ -65,7 +65,7 @@ Juniper vJunosEvolved container can have up to 17 interfaces and uses the follow * `eth1` - first data interface, mapped to a first data port of vJunosEvolved VM, which is `et-0/0/0` **and not `et-0/0/1`**. * `eth2+` - second and subsequent data interface -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `et-0/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-vjunosrouter.md b/docs/manual/kinds/vr-vjunosrouter.md index b9245b69f..5877f3b77 100644 --- a/docs/manual/kinds/vr-vjunosrouter.md +++ b/docs/manual/kinds/vr-vjunosrouter.md @@ -6,7 +6,7 @@ kind_display_name: Juniper vJunos-router --- # Juniper vJunos-router -[Juniper vJunos-router](https://www.juniper.net/documentation/product/us/en/vjunos-router/) is a virtualized MX router, a single-VM version of the vMX that requires no feature licenses and is meant for lab/testing use. It is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vJunos-router](https://www.juniper.net/documentation/product/us/en/vjunos-router/) is a virtualized MX router, a single-VM version of the vMX that requires no feature licenses and is meant for lab/testing use. It is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Juniper vJunos-router nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. @@ -42,13 +42,13 @@ Juniper vJunos-router node launched with containerlab can be managed via the fol ```bash telnet 5000 ``` - + !!!info Default user credentials: `admin:admin@123` ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `et-0/0/X` (or `ge-0/0/X`, `xe-0/0/X`, all are accepted), where X denotes the port number. @@ -62,7 +62,7 @@ With that naming convention in mind: Data port numbering starts at `0`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: Juniper vJunosEvolved container can have up to 17 interfaces and uses the following mapping rules: @@ -70,7 +70,7 @@ Juniper vJunosEvolved container can have up to 17 interfaces and uses the follow * `eth1` - first data interface, mapped to a first data port of vJunosEvolved VM, which is `et-0/0/0` **and not `et-0/0/1`**. * `eth2+` - second and subsequent data interface -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `et-0/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-vjunosswitch.md b/docs/manual/kinds/vr-vjunosswitch.md index f1f736335..3b4d52d9d 100644 --- a/docs/manual/kinds/vr-vjunosswitch.md +++ b/docs/manual/kinds/vr-vjunosswitch.md @@ -6,7 +6,7 @@ kind_display_name: Juniper vJunos-switch --- # Juniper vJunos-switch -[Juniper vJunos-switch](https://www.juniper.net/documentation/product/us/en/vjunos-switch/) is a virtualized EX9214 switch identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vJunos-switch](https://www.juniper.net/documentation/product/us/en/vjunos-switch/) is a virtualized EX9214 switch identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Juniper vJunos-switch nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. @@ -43,7 +43,7 @@ Juniper vJunos-switch node launched with containerlab can be managed via the fol ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `et-0/0/X` (or `ge-0/0/X`, `xe-0/0/X`, all are accepted), where X denotes the port number. @@ -57,7 +57,7 @@ With that naming convention in mind: Data port numbering starts at `0`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: Juniper vJunosEvolved container can have up to 17 interfaces and uses the following mapping rules: @@ -65,7 +65,7 @@ Juniper vJunosEvolved container can have up to 17 interfaces and uses the follow * `eth1` - first data interface, mapped to a first data port of vJunosEvolved VM, which is `et-0/0/0` **and not `et-0/0/1`**. * `eth2+` - second and subsequent data interface -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `et-0/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-vmx.md b/docs/manual/kinds/vr-vmx.md index 93fb84ae7..fab64a47e 100644 --- a/docs/manual/kinds/vr-vmx.md +++ b/docs/manual/kinds/vr-vmx.md @@ -6,7 +6,7 @@ kind_display_name: Juniper vMX --- # Juniper vMX -[Juniper vMX](https://www.juniper.net/documentation/product/us/en/vmx/) virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vMX](https://www.juniper.net/documentation/product/us/en/vmx/) virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Juniper vMX nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI services enabled. @@ -53,7 +53,7 @@ vMX nodes use the interface naming convention `ge-0/0/X` (or `et-0/0/X`, `xe-0/0 ## Interfaces mapping -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `et-0/0/X` (or `ge-0/0/X`, `xe-0/0/X`, all are accepted), where X denotes the port number. @@ -67,7 +67,7 @@ With that naming convention in mind: Data port numbering starts at `0`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: Juniper vJunosEvolved container can have up to 17 interfaces and uses the following mapping rules: @@ -75,7 +75,7 @@ Juniper vJunosEvolved container can have up to 17 interfaces and uses the follow * `eth1` - first data interface, mapped to a first data port of vJunosEvolved VM, which is `et-0/0/0` **and not `et-0/0/1`**. * `eth2+` - second and subsequent data interface -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `et-0/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-vqfx.md b/docs/manual/kinds/vr-vqfx.md index ea00a5892..1ea1a8f17 100644 --- a/docs/manual/kinds/vr-vqfx.md +++ b/docs/manual/kinds/vr-vqfx.md @@ -6,7 +6,7 @@ kind_display_name: Juniper vQFX --- # Juniper vQFX -[Juniper vQFX](https://www.juniper.net/us/en/dm/free-vqfx10000-software.html) virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vQFX](https://www.juniper.net/us/en/dm/free-vqfx10000-software.html) virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. !!!warning The public vQFX image that is downloadable from the Juniper portal mentions version 20.2, but in fact, it is a 19.4 system. Until this issue is fixed (and it seems no one cares), rename the downloaded qcow2 file to mention the 19.4 version before building the container image. @@ -37,7 +37,7 @@ Juniper vQFX node launched with containerlab can be managed via the following in ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `et-0/0/X` (or `ge-0/0/X`, `xe-0/0/X`, all are accepted), where X denotes the port number. @@ -51,7 +51,7 @@ With that naming convention in mind: Data port numbering starts at `0`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: Juniper vJunosEvolved container can have up to 17 interfaces and uses the following mapping rules: @@ -59,7 +59,7 @@ Juniper vJunosEvolved container can have up to 17 interfaces and uses the follow * `eth1` - first data interface, mapped to a first data port of vJunosEvolved VM, which is `et-0/0/0` **and not `et-0/0/1`**. * `eth2+` - second and subsequent data interface -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `et-0/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-vsrx.md b/docs/manual/kinds/vr-vsrx.md index ca932fd3a..da4308c1b 100644 --- a/docs/manual/kinds/vr-vsrx.md +++ b/docs/manual/kinds/vr-vsrx.md @@ -6,7 +6,7 @@ kind_display_name: Juniper vSRX --- # Juniper vSRX -[Juniper vSRX](https://www.juniper.net/us/en/dm/download-next-gen-vsrx-firewall-trial.html) virtualized firewall is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Juniper vSRX](https://www.juniper.net/us/en/dm/download-next-gen-vsrx-firewall-trial.html) virtualized firewall is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. ## Managing Juniper vSRX nodes @@ -34,7 +34,7 @@ Juniper vSRX node launched with containerlab can be managed via the following in ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: `et-0/0/X` (or `ge-0/0/X`, `xe-0/0/X`, all are accepted), where X denotes the port number. @@ -48,7 +48,7 @@ With that naming convention in mind: Data port numbering starts at `0`. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: Juniper vJunosEvolved container can have up to 17 interfaces and uses the following mapping rules: @@ -56,7 +56,7 @@ Juniper vJunosEvolved container can have up to 17 interfaces and uses the follow * `eth1` - first data interface, mapped to a first data port of vJunosEvolved VM, which is `et-0/0/0` **and not `et-0/0/1`**. * `eth2+` - second and subsequent data interface -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `et-0/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/kinds/vr-xrv9k.md b/docs/manual/kinds/vr-xrv9k.md index 1456df9db..c62a3534b 100644 --- a/docs/manual/kinds/vr-xrv9k.md +++ b/docs/manual/kinds/vr-xrv9k.md @@ -6,7 +6,7 @@ kind_display_name: Cisco XRv9k --- # Cisco XRv9k -[Cisco XRv9k](https://www.cisco.com/c/en/us/products/collateral/routers/ios-xrv-9000-router/datasheet-c78-734034.html) virtualized router is identified with `[[[ kind_code_name ]]]` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. +[Cisco XRv9k](https://www.cisco.com/c/en/us/products/collateral/routers/ios-xrv-9000-router/datasheet-c78-734034.html) virtualized router is identified with `-{{ kind_code_name }}-` kind in the [topology file](../topo-def-file.md). It is built using [vrnetlab](../vrnetlab.md) project and essentially is a Qemu VM packaged in a docker container format. Cisco XRv9k nodes launched with containerlab come up pre-provisioned with SSH, SNMP, NETCONF and gNMI (if available) services enabled. @@ -60,7 +60,7 @@ Cisco XRv9k node launched with containerlab can be managed via the following int ## Interface naming -You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in [[[ kind_display_name ]]]. +You can use [interfaces names](../topo-def-file.md#interface-naming) in the topology file like they appear in -{{ kind_display_name }}-. The interface naming convention is: @@ -81,13 +81,13 @@ With that naming convention in mind: 3. Cisco XRv9k can have up to 90 interfaces. /// -The example ports above would be mapped to the following Linux interfaces inside the container running the [[[ kind_display_name ]]] VM: +The example ports above would be mapped to the following Linux interfaces inside the container running the -{{ kind_display_name }}- VM: - `eth0` - management interface connected to the containerlab management network. - `eth1` - first data interface, mapped to the first data port of the VM (rendered as `Gi0/0/0/0`) - `eth2+` - second and subsequent data interfaces, mapped to the second and subsequent data ports of the VM (rendered as `Gi0/0/0/1` and so on) -When containerlab launches [[[ kind_display_name ]]] node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the [[[ kind_display_name ]]] using containerlab's assigned IP. +When containerlab launches -{{ kind_display_name }}- node the management interface of the VM gets assigned `10.0.0.15/24` address from the QEMU DHCP server. This interface is transparently stitched with container's `eth0` interface such that users can reach the management plane of the -{{ kind_display_name }}- using containerlab's assigned IP. Data interfaces `Gi0/0/0/0+` need to be configured with IP addressing manually using CLI or other available management interfaces. diff --git a/docs/manual/topo-def-file.md b/docs/manual/topo-def-file.md index 0c7cc6673..b8b9afd24 100644 --- a/docs/manual/topo-def-file.md +++ b/docs/manual/topo-def-file.md @@ -47,7 +47,7 @@ Its user's responsibility to give labs unique names if they plan to run multiple The name is a free-formed string, though it is better not to use dashes (`-`) as they are used to separate lab names from node names. -When containerlab starts the containers, their names will be generated using the following pattern: `clab-{{lab-name}}-{{node-name}}`. The lab name here is used to make the container's names unique between two different labs, even if the nodes are named the same. +When containerlab starts the containers, their names will be generated using the following pattern: `clab-${lab-name}-${node-name}`. The lab name here is used to make the container's names unique between two different labs, even if the nodes are named the same. ### Prefix diff --git a/docs/manual/vrnetlab.md b/docs/manual/vrnetlab.md index 912db1aef..6d759e50f 100644 --- a/docs/manual/vrnetlab.md +++ b/docs/manual/vrnetlab.md @@ -35,7 +35,7 @@ Containerlab depends on [`hellt/vrnetlab`](https://github.com/hellt/vrnetlab) pr | containerlab[^3] | vrnetlab[^4] | Notes | | ---------------- | ------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| `0.58.0` | [`0.20.1`](https://github.com/hellt/vrnetlab/releases/tag/v0.20.1) | [[[vr_rns["0.20.1"]]]] | +| `0.58.0` | [`0.20.1`](https://github.com/hellt/vrnetlab/releases/tag/v0.20.1) | -{{vr_rns["0.20.1"]}}- | | `0.56.0` | [`0.18.1`](https://github.com/hellt/vrnetlab/releases/tag/v0.18.1) | Added support for [Dell SONiC](kinds/dell_sonic.md), [SONiC VM](kinds/sonic-vm.md), [Cisco Catalyst 9000v](kinds/vr-cat9kv.md) | | `0.55.0` | [`0.17.0`](https://github.com/hellt/vrnetlab/releases/tag/v0.17.0) | Added support for [Juniper vJunos-router](kinds/vr-vjunosrouter.md), [Generic VM](kinds/generic_vm.md), support for setting qemu parameters via env vars for the nodes | diff --git a/docs/overrides/.icons/clab/icon.svg b/docs/overrides/.icons/clab/icon.svg new file mode 100644 index 000000000..fbe044141 --- /dev/null +++ b/docs/overrides/.icons/clab/icon.svg @@ -0,0 +1,8 @@ + + + + + + + + \ No newline at end of file diff --git a/docs/rn/0.46.md b/docs/rn/0.46.md index 5e07ddf9e..ad36afca2 100644 --- a/docs/rn/0.46.md +++ b/docs/rn/0.46.md @@ -65,7 +65,7 @@ Our [Clabernetes](../manual/clabernetes/index.md) project will use the `vxlan-st ## Containerlab on Apple M1/M2 -Systems that requires specific x86 CPU flags or nested virtualization may be launched with a UTM/Qemu VM as documented in the newly added [Apple MacOS and ARM chapter](../install.md#arm). +Systems that requires specific x86 CPU flags or nested virtualization may be launched with a UTM/Qemu VM as documented in the newly added Apple MacOS and ARM chapter. We prepared the UTM image with containerlab installed to make it easier to get started running CPU-picky systems on Apple M1/M2. diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css index ac8ce5795..1b4c9b784 100644 --- a/docs/stylesheets/extra.css +++ b/docs/stylesheets/extra.css @@ -1,3 +1,17 @@ +:root { + /* Black logo outline for light theme */ + --logo-outline-color: #001135; +} + +[data-md-color-scheme="slate"] { + /* set logo outline color to white when dark theme is used */ + --logo-outline-color: #ffffff; +} + +#outline { + fill: var(--logo-outline-color); +} + .mdx-content__footer { margin-top: 20px; text-align: center; @@ -148,4 +162,177 @@ div.highlight.code-scroll-lg pre>code { div.highlight.code-scroll-sm pre>code { max-height: 50vh; -} \ No newline at end of file +} + +/* START code-example admonition styles */ +:root { + --md-admonition-icon--code-example: url('data:image/svg+xml;charset=utf-8,') +} + +.md-typeset .admonition.code-example, +.md-typeset details.code-example { + border-color: #9e9e9e; +} + +.md-typeset .code-example>.admonition-title, +.md-typeset .code-example>summary { + background-color: #9e9e9e1a; +} + +.md-typeset .code-example>.admonition-title::before, +.md-typeset .code-example>summary::before { + background-color: #9e9e9e; + -webkit-mask-image: var(--md-admonition-icon--code-example); + mask-image: var(--md-admonition-icon--code-example); +} + +.md-typeset .code-example>.admonition-title::after, +.md-typeset .code-example>summary::after { + background-color: #9e9e9e; +} + +/* END code-example admonition styles */ + +/* START subtle-note admonition styles */ +:root { + --md-admonition-icon--subtle-note: url('data:image/svg+xml;charset=utf-8,') +} + +.md-typeset .admonition.subtle-note, +.md-typeset details.subtle-note { + border-color: #9e9e9e; +} + +.md-typeset .subtle-note>.admonition-title, +.md-typeset .subtle-note>summary { + background-color: #9e9e9e1a; +} + +.md-typeset .subtle-note>.admonition-title::before, +.md-typeset .subtle-note>summary::before { + background-color: #9e9e9e; + -webkit-mask-image: var(--md-admonition-icon--subtle-note); + mask-image: var(--md-admonition-icon--subtle-note); +} + +.md-typeset .subtle-note>.admonition-title::after, +.md-typeset .subtle-note>summary::after { + background-color: #9e9e9e; +} + +/* END subtle-note admonition styles */ + +/* START subtle-info admonition styles */ +:root { + --md-admonition-icon--subtle-info: url('data:image/svg+xml;charset=utf-8,') +} + +.md-typeset .admonition.subtle-info, +.md-typeset details.subtle-info { + border-color: #9e9e9e; +} + +.md-typeset .subtle-info>.admonition-title, +.md-typeset .subtle-info>summary { + background-color: #9e9e9e1a; +} + +.md-typeset .subtle-info>.admonition-title::before, +.md-typeset .subtle-info>summary::before { + background-color: #9e9e9e; + -webkit-mask-image: var(--md-admonition-icon--subtle-info); + mask-image: var(--md-admonition-icon--subtle-info); +} + +.md-typeset .subtle-info>.admonition-title::after, +.md-typeset .subtle-info>summary::after { + background-color: #9e9e9e; +} + +/* END subtle-info admonition styles */ + +/* START subtle-question admonition styles */ +:root { + --md-admonition-icon--subtle-question: url('data:image/svg+xml;charset=utf-8,') +} + +.md-typeset .admonition.subtle-question, +.md-typeset details.subtle-question { + border-color: #9e9e9e; +} + +.md-typeset .subtle-question>.admonition-title, +.md-typeset .subtle-question>summary { + background-color: #9e9e9e1a; +} + +.md-typeset .subtle-question>.admonition-title::before, +.md-typeset .subtle-question>summary::before { + background-color: #9e9e9e; + -webkit-mask-image: var(--md-admonition-icon--subtle-question); + mask-image: var(--md-admonition-icon--subtle-question); +} + +.md-typeset .subtle-question>.admonition-title::after, +.md-typeset .subtle-question>summary::after { + background-color: #9e9e9e; +} + +/* END subtle-question admonition styles */ + +/* START thin scrollbar styles */ +/* First unset all rules that break Chrome styling */ +.md-typeset pre>code, +.md-search__scrollwrap, +.md-sidebar__scrollwrap { + scrollbar-color: unset; + scrollbar-width: unset; +} + +.md-typeset pre>code:hover, +.md-search__scrollwrap:hover, +.md-sidebar__scrollwrap:focus-within, +.md-sidebar__scrollwrap:hover { + scrollbar-color: unset; +} + +.md-tooltip2__inner { + scrollbar-width: unset; +} + +/* Wrap new scrollbar properties in @supports rule for browsers without `::-webkit-scrollbar-*` support */ +/* This way chrome won't override `::-webkit-scrollbar-*` selectors */ +/* https://developer.chrome.com/docs/css-ui/scrollbar-styling */ +@supports not selector(::-webkit-scrollbar) { + + .md-typeset pre>code, + .md-search__scrollwrap, + .md-sidebar__scrollwrap { + scrollbar-color: var(--md-default-fg-color--lighter) #0000; + scrollbar-width: thin; + } + + .md-typeset pre>code:hover, + .md-search__scrollwrap:hover, + .md-sidebar__scrollwrap:focus-within, + .md-sidebar__scrollwrap:hover { + scrollbar-color: var(--md-accent-fg-color) #0000; + } + + .md-tooltip2__inner { + scrollbar-width: thin; + } +} + +/* Restore hover color for Chrome */ +@supports selector(::-webkit-scrollbar) { + + .md-typeset pre>code:hover::-webkit-scrollbar-thumb, + .md-search__scrollwrap:hover::-webkit-scrollbar-thumb, + .md-sidebar__scrollwrap:focus-within::-webkit-scrollbar-thumb, + .md-sidebar__scrollwrap:hover::-webkit-scrollbar-thumb { + background-color: var(--md-accent-fg-color); + } +} + +/* END thin scrollbar styles */ \ No newline at end of file diff --git a/macros/main.py b/macros/main.py new file mode 100644 index 000000000..a3965975b --- /dev/null +++ b/macros/main.py @@ -0,0 +1,61 @@ +""" +Mkdocs-macros module +""" + + +def define_env(env): + """ + Macroses used in SR Linux documentation + """ + + @env.macro + def diagram(url, page, title, zoom=2): + """ + Diagram macro + """ + + # to allow shorthand syntax for drawio URLs, like: + # srl-labs/srlinux-getting-started/main/diagrams/topology.drawio + # we will append the missing prefix to it if it doesn't start with http already + if not url.startswith("http"): + url = "https://raw.githubusercontent.com/" + url + + diagram_tmpl = f""" +
+
+
+ {f"
{title}
" if title else ""} +
+""" + + return diagram_tmpl + + @env.macro + def video(url): + """ + HTML5 video macro + """ + + video_tmpl = f""" + +""" + + return video_tmpl + + @env.macro + def youtube(url): + """ + Youtube video macro + """ + + video_tmpl = f""" +
+ +
+""" + + return video_tmpl diff --git a/mkdocs.yml b/mkdocs.yml index 331710f7a..7a21447b7 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -252,7 +252,7 @@ theme: text: Google Sans code: Fira Mono icon: - logo: fontawesome/solid/flask + logo: clab/icon repo: fontawesome/brands/github edit: material/file-edit-outline favicon: images/flask.svg @@ -270,10 +270,14 @@ plugins: #- typeset - glightbox - macros: - j2_block_start_string: "[[[%" - j2_block_end_string: "%]]]" - j2_variable_start_string: "[[[" - j2_variable_end_string: "]]]" + j2_block_start_string: "-{{%" + j2_block_end_string: "%}}-" + j2_variable_start_string: "-{{" + j2_variable_end_string: "}}-" + # include_yaml: + # - macros/vars.yml + include_dir: macros + module_name: macros/main # Customization extra: