From 8c47afc23d6959a82189d8c01e036ca854e30e05 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 22 May 2023 14:12:49 +0200 Subject: [PATCH 01/41] remove unused obsolete conda yaml file --- envs/development/conda/fedbiomed.yaml | 11 ----------- 1 file changed, 11 deletions(-) delete mode 100644 envs/development/conda/fedbiomed.yaml diff --git a/envs/development/conda/fedbiomed.yaml b/envs/development/conda/fedbiomed.yaml deleted file mode 100644 index fe5b2cb6d..000000000 --- a/envs/development/conda/fedbiomed.yaml +++ /dev/null @@ -1,11 +0,0 @@ -# -# global conda environment (unused ???) -# -name: fedbiomed - -channels: - - conda-forge - -dependencies: - - python>=3.9 - - pip From 57cee7b99cc9438b7877a3879109f24764f0e38e Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 22 May 2023 18:33:22 +0200 Subject: [PATCH 02/41] WIP package version update - node environment builds (to be tested) --- envs/development/conda/fedbiomed-node.yaml | 63 +++++++++++----------- 1 file changed, 33 insertions(+), 30 deletions(-) diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index dbfe29b99..e0af1de3f 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -10,56 +10,59 @@ channels: dependencies: # minimal environment - - python >=3.9,<3.10 - - pip - - jupyter - - ipython + # python 3.11 recently released 2022-11 and not yet supported by some deps including: torchvision + - python >=3.10,<3.11 + - pip >= 23.0 + - jupyter ~=1.0.0 + - ipython ~=8.13.2 # tests - - pytest >6.2.2 - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 + - pytest ~=7.2.0 + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 # code - - GitPython >=3.1.14,<4.0.0 - - requests >=2.25.1,<3.0.0 - - paho-mqtt >=1.5.1,<2.0.0 - - validators >=0.18.2,<0.19.0 - - tqdm >=4.59.0,<5.0.0 - - git - - packaging >=23.0,<24.0 + - GitPython >=3.1.31,<4.0.0 + - requests ~=2.29.0 + - paho-mqtt ~=1.6.1 + - validators >=0.20.0,<0.21.0 + - tqdm ~=4.65.0 + - git ~=2.40.1 + - packaging ~=23.1 # these two have to be aligned - - cryptography ~=39.0 - - pyopenssl ~=23.0 + - cryptography ~=40.0.0 + - pyopenssl ~=23.1.1 # git notebook striper - - nbstripout - - joblib >=1.0.1 + - nbstripout >=0.6.1,<0.7.0 + - joblib >=1.2.0,<1.3.0 # sklearn # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with many current systems + # with some older systems # another option is to install scipy from pip - - scipy >=1.8.0,<1.9.0 - - scikit-learn >=1.0.0,<1.1.0 + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # other - - itk + - itk >=5.3.0,<5.4.0 # nn - pip: # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 - - opacus >=1.2.0,<1.3.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 + - opacus >=1.4.0,<1.5.0 - monai >=1.1.0,<1.2.0 # other - msgpack ~=1.0 - persist-queue >=0.5.1,<0.6.0 - pytorch-ignite >=0.4.4,<0.5.0 - - pandas >=1.2.3,<2.0.0 + # pandas 2.x recently released (2023-04): dont use yet + - pandas ~=1.5.0 - openpyxl >= 3.0.9,<3.1 - - JSON-log-formatter - - python-minifier ==2.5.0 + - JSON-log-formatter ~=0.5.2 + - python-minifier ~=2.5.0 # FLamby - git+https://github.com/owkin/FLamby@main # declearn - - declearn[torch] ~= 2.1.0 + - declearn[torch] ~=2.1.0 - gmpy2 >=2.1,< 2.2 #### Notebook-specific packages #### # This section contains packages that are needed only to run specific notebooks - - unet == 0.7.7 + - unet >=0.7.7,<0.8.0 From b13791aa1a6cb57bd8a76539c5a7dbfd22932028 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 23 May 2023 09:07:04 +0200 Subject: [PATCH 03/41] - adapt fedbiomed-network.yaml for dev environment - adapt restful container build for dev/vpn environments - misc configure_conda log errors --- envs/development/conda/fedbiomed-network.yaml | 15 ++--- envs/development/docker/docker-compose.yml | 4 +- .../docker/restful/build_files/Dockerfile | 2 +- .../restful/build_files/requirements.txt | 8 +-- .../restful/run_mounts/app/fedbiomed/urls.py | 6 +- envs/vpn/conda/fedbiomed-node.yaml | 63 ++++++++++--------- envs/vpn/docker/docker-compose.yml | 4 +- .../vpn/docker/restful/build_files/Dockerfile | 4 +- .../restful/build_files/requirements.txt | 8 +-- .../restful/run_mounts/app/fedbiomed/urls.py | 6 +- scripts/configure_conda | 4 +- 11 files changed, 65 insertions(+), 59 deletions(-) diff --git a/envs/development/conda/fedbiomed-network.yaml b/envs/development/conda/fedbiomed-network.yaml index 79392374f..5d3c41622 100644 --- a/envs/development/conda/fedbiomed-network.yaml +++ b/envs/development/conda/fedbiomed-network.yaml @@ -10,13 +10,14 @@ channels: dependencies: # common - - python >=3.9,<3.10 - - pip + - python >=3.10,<3.11 + - pip >= 23.0 # http server - - django = 3.1.7 - - djangorestframework = 3.12.2 - - django-cleanup - - gunicorn + # django 4.2.0 not yet supported by: djangorestframework, django-cleanup + - django >=4.1.7,<4.2.0 + - djangorestframework >=3.14.0,<3.15.0 + - django-cleanup ~=7.0.0 + - gunicorn ~=20.1.0 # for utilities - pip: - - persist-queue + - persist-queue >=0.8.0,<0.9.0 diff --git a/envs/development/docker/docker-compose.yml b/envs/development/docker/docker-compose.yml index 3dbb404a2..8c76c6af5 100644 --- a/envs/development/docker/docker-compose.yml +++ b/envs/development/docker/docker-compose.yml @@ -18,8 +18,8 @@ services: environment: - PRODUCTION=1 - DJANGO_SUPERUSER_USERNAME=admin - - DJANGO_SUPERUSER_EMAIL=santiago-smith.silva-rincon@inria.fr - - DJANGO_SUPERUSER_PASSWORD=admin123 + - DJANGO_SUPERUSER_EMAIL=admin@nowhere.foo + - DJANGO_SUPERUSER_PASSWORD=admin ports: - "8844:8000" volumes: diff --git a/envs/development/docker/restful/build_files/Dockerfile b/envs/development/docker/restful/build_files/Dockerfile index 3fcb00a02..8a1b49293 100644 --- a/envs/development/docker/restful/build_files/Dockerfile +++ b/envs/development/docker/restful/build_files/Dockerfile @@ -1,4 +1,4 @@ -FROM python:3.9-alpine +FROM python:3.10-alpine ENV PYTHONUNBUFFERED 1 # http restful server, expected on port 8844 by fedbiomed diff --git a/envs/development/docker/restful/build_files/requirements.txt b/envs/development/docker/restful/build_files/requirements.txt index d4e277d37..491c2f168 100644 --- a/envs/development/docker/restful/build_files/requirements.txt +++ b/envs/development/docker/restful/build_files/requirements.txt @@ -1,4 +1,4 @@ -django==3.1.7 -djangorestframework==3.12.2 -django-cleanup -gunicorn \ No newline at end of file +django>=4.1.7,<4.2.0 +djangorestframework>=3.14.0,<3.15.0 +django-cleanup~=7.0.0 +gunicorn~=20.1.0 \ No newline at end of file diff --git a/envs/development/docker/restful/run_mounts/app/fedbiomed/urls.py b/envs/development/docker/restful/run_mounts/app/fedbiomed/urls.py index 1978ef1cf..9f19a4409 100644 --- a/envs/development/docker/restful/run_mounts/app/fedbiomed/urls.py +++ b/envs/development/docker/restful/run_mounts/app/fedbiomed/urls.py @@ -26,14 +26,14 @@ from django.conf import settings from django.conf.urls.static import static from django.views.static import serve -from django.conf.urls import url +from django.urls import re_path # Serve files in production if not settings.DEBUG: urlpatterns += [ - url(r'^media/(?P.*)$', serve, {'document_root': settings.MEDIA_ROOT}), - url(r'^static/(?P.*)$', serve, {'document_root': settings.STATIC_ROOT}), + re_path(r'^media/(?P.*)$', serve, {'document_root': settings.MEDIA_ROOT}), + re_path(r'^static/(?P.*)$', serve, {'document_root': settings.STATIC_ROOT}), ] else: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index 0e0d46fa0..e0af1de3f 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -10,56 +10,59 @@ channels: dependencies: # minimal environment - - python >=3.9,<3.10 - - pip - - jupyter - - ipython + # python 3.11 recently released 2022-11 and not yet supported by some deps including: torchvision + - python >=3.10,<3.11 + - pip >= 23.0 + - jupyter ~=1.0.0 + - ipython ~=8.13.2 # tests - - pytest >6.2.2 - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 + - pytest ~=7.2.0 + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 # code - - GitPython >=3.1.14,<4.0.0 - - requests >=2.25.1,<3.0.0 - - paho-mqtt >=1.5.1,<2.0.0 - - validators >=0.18.2,<0.19.0 - - tqdm >=4.59.0,<5.0.0 - - git - - packaging >=23.0,<24.0 + - GitPython >=3.1.31,<4.0.0 + - requests ~=2.29.0 + - paho-mqtt ~=1.6.1 + - validators >=0.20.0,<0.21.0 + - tqdm ~=4.65.0 + - git ~=2.40.1 + - packaging ~=23.1 # these two have to be aligned - - cryptography ~=39.0 - - pyopenssl ~=23.0 + - cryptography ~=40.0.0 + - pyopenssl ~=23.1.1 # git notebook striper - - nbstripout - - joblib >=1.0.1 + - nbstripout >=0.6.1,<0.7.0 + - joblib >=1.2.0,<1.3.0 # sklearn # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with many current systems + # with some older systems # another option is to install scipy from pip - - scipy >=1.8.0,<1.9.0 - - scikit-learn >=1.0.0,<1.1.0 + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # other - - itk + - itk >=5.3.0,<5.4.0 # nn - pip: # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 - - opacus >=1.2.0,<1.3.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 + - opacus >=1.4.0,<1.5.0 - monai >=1.1.0,<1.2.0 # other - msgpack ~=1.0 - persist-queue >=0.5.1,<0.6.0 - pytorch-ignite >=0.4.4,<0.5.0 - - pandas >=1.2.3,<2.0.0 + # pandas 2.x recently released (2023-04): dont use yet + - pandas ~=1.5.0 - openpyxl >= 3.0.9,<3.1 - - JSON-log-formatter - - python-minifier ==2.5.0 + - JSON-log-formatter ~=0.5.2 + - python-minifier ~=2.5.0 # FLamby - git+https://github.com/owkin/FLamby@main # declearn - - declearn[torch] ~= 2.0.1 + - declearn[torch] ~=2.1.0 - gmpy2 >=2.1,< 2.2 #### Notebook-specific packages #### # This section contains packages that are needed only to run specific notebooks - - unet == 0.7.7 + - unet >=0.7.7,<0.8.0 diff --git a/envs/vpn/docker/docker-compose.yml b/envs/vpn/docker/docker-compose.yml index 5c2357063..c56a766c3 100644 --- a/envs/vpn/docker/docker-compose.yml +++ b/envs/vpn/docker/docker-compose.yml @@ -332,8 +332,8 @@ services: - CONTAINER_GROUP - PRODUCTION=1 - DJANGO_SUPERUSER_USERNAME=admin - - DJANGO_SUPERUSER_EMAIL=dummy@gitlab.inria.fr - - DJANGO_SUPERUSER_PASSWORD=admin123 + - DJANGO_SUPERUSER_EMAIL=admin@nowhere.foo + - DJANGO_SUPERUSER_PASSWORD=admin # no open ports ! this would backdoor the vpnization #ports: # - "8844:8000" diff --git a/envs/vpn/docker/restful/build_files/Dockerfile b/envs/vpn/docker/restful/build_files/Dockerfile index d42b26196..b872cada7 100644 --- a/envs/vpn/docker/restful/build_files/Dockerfile +++ b/envs/vpn/docker/restful/build_files/Dockerfile @@ -1,7 +1,7 @@ # temporary builder image for wireguard tools # - need coherent system version with mosquitto image # - may need update for properly compiling boringtun (eg: cargo build of package failing on `buster-slim`) -FROM python:3.9-alpine as builder +FROM python:3.10-alpine as builder RUN apk update && apk add git alpine-sdk linux-headers @@ -25,7 +25,7 @@ RUN git clone https://git.zx2c4.com/wireguard-tools && \ # # docker image for restful server # -FROM python:3.9-alpine +FROM python:3.10-alpine ENV PYTHONUNBUFFERED 1 ARG CONTAINER_GID diff --git a/envs/vpn/docker/restful/build_files/requirements.txt b/envs/vpn/docker/restful/build_files/requirements.txt index d4e277d37..491c2f168 100644 --- a/envs/vpn/docker/restful/build_files/requirements.txt +++ b/envs/vpn/docker/restful/build_files/requirements.txt @@ -1,4 +1,4 @@ -django==3.1.7 -djangorestframework==3.12.2 -django-cleanup -gunicorn \ No newline at end of file +django>=4.1.7,<4.2.0 +djangorestframework>=3.14.0,<3.15.0 +django-cleanup~=7.0.0 +gunicorn~=20.1.0 \ No newline at end of file diff --git a/envs/vpn/docker/restful/run_mounts/app/fedbiomed/urls.py b/envs/vpn/docker/restful/run_mounts/app/fedbiomed/urls.py index 1978ef1cf..9f19a4409 100644 --- a/envs/vpn/docker/restful/run_mounts/app/fedbiomed/urls.py +++ b/envs/vpn/docker/restful/run_mounts/app/fedbiomed/urls.py @@ -26,14 +26,14 @@ from django.conf import settings from django.conf.urls.static import static from django.views.static import serve -from django.conf.urls import url +from django.urls import re_path # Serve files in production if not settings.DEBUG: urlpatterns += [ - url(r'^media/(?P.*)$', serve, {'document_root': settings.MEDIA_ROOT}), - url(r'^static/(?P.*)$', serve, {'document_root': settings.STATIC_ROOT}), + re_path(r'^media/(?P.*)$', serve, {'document_root': settings.MEDIA_ROOT}), + re_path(r'^static/(?P.*)$', serve, {'document_root': settings.STATIC_ROOT}), ] else: urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) diff --git a/scripts/configure_conda b/scripts/configure_conda index 6d15326bb..0cef7005f 100755 --- a/scripts/configure_conda +++ b/scripts/configure_conda @@ -73,11 +73,13 @@ function activate_environment() { # pip may result into inconsistencies base=$basedir/envs/development/conda/$1 + logfile=$1 # check if alternate yaml file is available (OS dependent files usually) if [ -f "${base}-${ALTERNATE_YAML}.yaml" ] then file="${base}-${ALTERNATE_YAML}.yaml" + logfile=${logfile}-${ALTERNATE_YAML} else file="${base}.yaml" fi @@ -88,7 +90,7 @@ function activate_environment() { then echo conda env update --file "$file" else - conda env update --file "$file" 2>/dev/null + conda env update --file "$file" 2>/tmp/${0##*/}_${logfile}.log fi } From 0f6d4213779d9001143fc984d835cff2063b6ec7 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 23 May 2023 11:38:40 +0200 Subject: [PATCH 04/41] WIP package updates: - go to pandas 2.0 --- envs/development/conda/fedbiomed-node.yaml | 4 ++-- envs/vpn/conda/fedbiomed-node.yaml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index e0af1de3f..7311c6958 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -53,8 +53,8 @@ dependencies: - msgpack ~=1.0 - persist-queue >=0.5.1,<0.6.0 - pytorch-ignite >=0.4.4,<0.5.0 - # pandas 2.x recently released (2023-04): dont use yet - - pandas ~=1.5.0 + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 - openpyxl >= 3.0.9,<3.1 - JSON-log-formatter ~=0.5.2 - python-minifier ~=2.5.0 diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index e0af1de3f..7311c6958 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -53,8 +53,8 @@ dependencies: - msgpack ~=1.0 - persist-queue >=0.5.1,<0.6.0 - pytorch-ignite >=0.4.4,<0.5.0 - # pandas 2.x recently released (2023-04): dont use yet - - pandas ~=1.5.0 + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 - openpyxl >= 3.0.9,<3.1 - JSON-log-formatter ~=0.5.2 - python-minifier ~=2.5.0 From 5aed95cedad2294c1b0d5757dc13b6d9411a0ad0 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 23 May 2023 13:31:52 +0200 Subject: [PATCH 05/41] WIP adapt package versions - conda envs for gui researcher: conda builds, env to be tested --- envs/ci/conda/fedbiomed-ci.yaml | 6 +- envs/development/conda/fedbiomed-gui.yaml | 51 ++++++------- envs/development/conda/fedbiomed-node.yaml | 1 - .../conda/fedbiomed-researcher.yaml | 71 +++++++++---------- envs/vpn/conda/fedbiomed-gui.yaml | 53 +++++++------- envs/vpn/conda/fedbiomed-node.yaml | 1 - envs/vpn/conda/fedbiomed-researcher.yaml | 71 +++++++++---------- tests/test_common_cli.py | 1 - 8 files changed, 128 insertions(+), 127 deletions(-) diff --git a/envs/ci/conda/fedbiomed-ci.yaml b/envs/ci/conda/fedbiomed-ci.yaml index 9380a5966..11224d849 100644 --- a/envs/ci/conda/fedbiomed-ci.yaml +++ b/envs/ci/conda/fedbiomed-ci.yaml @@ -9,10 +9,10 @@ channels: dependencies: # minimal environment - - python >=3.9,<3.10 - - pip + - python >=3.10,<3.11 + - pip >= 23.0 # specific for CI - - ansifilter + - ansifilter ~=2.17.0 #- bats-core # # conda provide bats-core 1.3.0 which does not support option diff --git a/envs/development/conda/fedbiomed-gui.yaml b/envs/development/conda/fedbiomed-gui.yaml index 15d3d07c1..7b07e4717 100644 --- a/envs/development/conda/fedbiomed-gui.yaml +++ b/envs/development/conda/fedbiomed-gui.yaml @@ -10,38 +10,41 @@ channels: dependencies: # common - - python >=3.9,<3.10 - - nodejs == 16.13.1 - - yarn >=1.22.19,<=1.22.99 - - pip - - ipython - - flask >= 2.0.0,<2.0.2 - - paho-mqtt >=1.5.1,<2.0.0 + - python >=3.10,<3.11 + - nodejs ~=18.15.0 + - yarn >=3.5.1,<3.6 + - pip >= 23.0 + - ipython ~=8.13.2 + - flask >= 2.3.2,<2.4.0 + - paho-mqtt ~=1.6.1 # tests - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 - - jsonschema >=4.2.0,< 4.2.1 - - requests >=2.25.1,<3.0.0 - - git - - packaging >=23.0,<24.0 + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 + - jsonschema >=4.17.3,<4.18.0 + - requests ~=2.29.0 + - git ~=2.40.1 + - packaging ~=23.1 # sklearn # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 # with many current systems # another option is to install scipy from pip - - scipy >=1.8.0,<1.9.0 - - scikit-learn >=1.0.0,<1.1.0 - - itk + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 + # other + - itk >=5.3.0,<5.4.0 - pip: # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 - monai >=1.1.0,<1.2.0 # other - - gunicorn >=20.1, <20.9 - - pandas >=1.2.3,<2.0.0 - - cachelib == 0.7.0 - - python-minifier ==2.5.0 - - PyJWT == 2.4.0 - - Flask-JWT-Extended == 4.4.2 + - gunicorn ~=20.1.0 + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 + - cachelib >=0.10.2,<0.11.0 + - python-minifier ~=2.5.0 + - PyJWT >=2.7.0,<2.8.0 + - Flask-JWT-Extended >=4.4.4,<4.5.0 # FLamby - git+https://github.com/owkin/FLamby@main diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index 7311c6958..18386d108 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -41,7 +41,6 @@ dependencies: - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - # nn - pip: # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn diff --git a/envs/development/conda/fedbiomed-researcher.yaml b/envs/development/conda/fedbiomed-researcher.yaml index 4c7838f83..b5bca4f6d 100644 --- a/envs/development/conda/fedbiomed-researcher.yaml +++ b/envs/development/conda/fedbiomed-researcher.yaml @@ -10,63 +10,62 @@ channels: dependencies: # minimal environment - - python >=3.9,<3.10 - - pip - - jupyter - - ipython + - python >=3.10,<3.11 + - pip >= 23.0 + - jupyter ~=1.0.0 + - ipython ~=8.13.2 # tests - - pytest >6.2.2 - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 - - nose - - coverage + - pytest ~=7.2.0 + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 # tools - - colorama - - pyyaml + - colorama >=0.4.6,<0.5 # code - - GitPython >=3.1.14,<4.0.0 - - requests >=2.25.1,<3.0.0 - - paho-mqtt >=1.5.1,<2.0.0 - - validators >=0.18.2,<0.19.0 - - tqdm >=4.59.0,<5.0.0 - - git - - packaging >=23.0,<24.0 + - GitPython >=3.1.31,<4.0.0 + - requests ~=2.29.0 + - paho-mqtt ~=1.6.1 + - validators >=0.20.0,<0.21.0 + - tqdm ~=4.65.0 + - git ~=2.40.1 + - packaging ~=23.1 # these two have to be aligned - - cryptography ~=39.0 - - pyopenssl ~=23.0 + - cryptography ~=40.0.0 + - pyopenssl ~=23.1.1 # git notebook striper - - nbstripout - - joblib >=1.0.1 + - nbstripout >=0.6.1,<0.7.0 + - joblib >=1.2.0,<1.3.0 # sklearn # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 # with many current systems # another option is to install scipy from pip - - scipy >=1.8.0,<1.9.0 - - scikit-learn >=1.0.0,<1.1.0 + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # other - - itk + - itk >=5.3.0,<5.4.0 - pip: # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 - - opacus >=1.2.0,<1.3.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 + - opacus >=1.4.0,<1.5.0 - monai >=1.1.0,<1.2.0 # other - msgpack ~=1.0 - persist-queue >=0.5.1,<0.6.0 - - pandas >=1.2.3,<2.0.0 + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 - openpyxl >= 3.0.9,<3.1 - - tensorboard - - JSON-log-formatter - - python-minifier ==2.5.0 + - tensorboard ~=2.13.0 + - JSON-log-formatter ~=0.5.2 + - python-minifier ~=2.5.0 # for nbconvert - - jupyter_contrib_nbextensions - - pathvalidate + - jupyter-contrib-nbextensions >=0.7.0,<0.8.0 + - pathvalidate ~=3.0.0 # FLamby - git+https://github.com/owkin/FLamby@main # declearn - - declearn[torch] ~= 2.1.0 + - declearn[torch] ~=2.1.0 - gmpy2 >=2.1,< 2.2 #### Notebook-specific packages #### # This section contains packages that are needed only to run specific notebooks - - unet == 0.7.7 + - unet >=0.7.7,<0.8.0 diff --git a/envs/vpn/conda/fedbiomed-gui.yaml b/envs/vpn/conda/fedbiomed-gui.yaml index 9d574146b..7b07e4717 100644 --- a/envs/vpn/conda/fedbiomed-gui.yaml +++ b/envs/vpn/conda/fedbiomed-gui.yaml @@ -7,41 +7,44 @@ name: fedbiomed-gui channels: - conda-forge - + dependencies: # common - - python >=3.9,<3.10 - - nodejs == 16.13.1 - - yarn >=1.22.19,<=1.22.99 - - pip - - ipython - - flask >= 2.0.0,<2.0.2 - - paho-mqtt >=1.5.1,<2.0.0 + - python >=3.10,<3.11 + - nodejs ~=18.15.0 + - yarn >=3.5.1,<3.6 + - pip >= 23.0 + - ipython ~=8.13.2 + - flask >= 2.3.2,<2.4.0 + - paho-mqtt ~=1.6.1 # tests - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 - - jsonschema >=4.2.0,< 4.2.1 - - requests >=2.25.1,<3.0.0 - - git - - packaging >=23.0,<24.0 + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 + - jsonschema >=4.17.3,<4.18.0 + - requests ~=2.29.0 + - git ~=2.40.1 + - packaging ~=23.1 # sklearn # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 # with many current systems # another option is to install scipy from pip - - scipy >=1.8.0,<1.9.0 - - scikit-learn >=1.0.0,<1.1.0 - - itk + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 + # other + - itk >=5.3.0,<5.4.0 - pip: # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 - monai >=1.1.0,<1.2.0 # other - - gunicorn >=20.1, <20.9 - - pandas >=1.2.3,<2.0.0 - - cachelib == 0.7.0 - - python-minifier ==2.5.0 - - PyJWT == 2.4.0 - - Flask-JWT-Extended == 4.4.2 + - gunicorn ~=20.1.0 + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 + - cachelib >=0.10.2,<0.11.0 + - python-minifier ~=2.5.0 + - PyJWT >=2.7.0,<2.8.0 + - Flask-JWT-Extended >=4.4.4,<4.5.0 # FLamby - git+https://github.com/owkin/FLamby@main diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index 7311c6958..18386d108 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -41,7 +41,6 @@ dependencies: - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - # nn - pip: # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn diff --git a/envs/vpn/conda/fedbiomed-researcher.yaml b/envs/vpn/conda/fedbiomed-researcher.yaml index 33952c81c..b5bca4f6d 100644 --- a/envs/vpn/conda/fedbiomed-researcher.yaml +++ b/envs/vpn/conda/fedbiomed-researcher.yaml @@ -10,63 +10,62 @@ channels: dependencies: # minimal environment - - python >=3.9,<3.10 - - pip - - jupyter - - ipython + - python >=3.10,<3.11 + - pip >= 23.0 + - jupyter ~=1.0.0 + - ipython ~=8.13.2 # tests - - pytest >6.2.2 - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 - - nose - - coverage + - pytest ~=7.2.0 + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 # tools - - colorama - - pyyaml + - colorama >=0.4.6,<0.5 # code - - GitPython >=3.1.14,<4.0.0 - - requests >=2.25.1,<3.0.0 - - paho-mqtt >=1.5.1,<2.0.0 - - validators >=0.18.2,<0.19.0 - - tqdm >=4.59.0,<5.0.0 - - git - - packaging >=23.0,<24.0 + - GitPython >=3.1.31,<4.0.0 + - requests ~=2.29.0 + - paho-mqtt ~=1.6.1 + - validators >=0.20.0,<0.21.0 + - tqdm ~=4.65.0 + - git ~=2.40.1 + - packaging ~=23.1 # these two have to be aligned - - cryptography ~=39.0 - - pyopenssl ~=23.0 + - cryptography ~=40.0.0 + - pyopenssl ~=23.1.1 # git notebook striper - - nbstripout - - joblib >=1.0.1 + - nbstripout >=0.6.1,<0.7.0 + - joblib >=1.2.0,<1.3.0 # sklearn # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 # with many current systems # another option is to install scipy from pip - - scipy >=1.8.0,<1.9.0 - - scikit-learn >=1.0.0,<1.1.0 + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # other - - itk + - itk >=5.3.0,<5.4.0 - pip: # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 - - opacus >=1.2.0,<1.3.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 + - opacus >=1.4.0,<1.5.0 - monai >=1.1.0,<1.2.0 # other - msgpack ~=1.0 - persist-queue >=0.5.1,<0.6.0 - - pandas >=1.2.3,<2.0.0 + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 - openpyxl >= 3.0.9,<3.1 - - tensorboard - - JSON-log-formatter - - python-minifier ==2.5.0 + - tensorboard ~=2.13.0 + - JSON-log-formatter ~=0.5.2 + - python-minifier ~=2.5.0 # for nbconvert - - jupyter_contrib_nbextensions - - pathvalidate + - jupyter-contrib-nbextensions >=0.7.0,<0.8.0 + - pathvalidate ~=3.0.0 # FLamby - git+https://github.com/owkin/FLamby@main # declearn - - declearn[torch] ~= 2.0.1 + - declearn[torch] ~=2.1.0 - gmpy2 >=2.1,< 2.2 #### Notebook-specific packages #### # This section contains packages that are needed only to run specific notebooks - - unet == 0.7.7 + - unet >=0.7.7,<0.8.0 diff --git a/tests/test_common_cli.py b/tests/test_common_cli.py index 038604f51..9878d5e77 100644 --- a/tests/test_common_cli.py +++ b/tests/test_common_cli.py @@ -1,4 +1,3 @@ -import nose.tools.nontrivial import unittest import tempfile import shutil From ca42dd026f85ca36c5fe2f90c86102039caff9d8 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 23 May 2023 18:03:01 +0200 Subject: [PATCH 06/41] WIP package upgrade conda files - install scipy from pip to avoid restrictions on glivc version --- envs/development/conda/fedbiomed-gui.yaml | 11 +++++------ envs/development/conda/fedbiomed-node.yaml | 11 +++++------ envs/development/conda/fedbiomed-researcher.yaml | 11 +++++------ envs/vpn/conda/fedbiomed-gui.yaml | 11 +++++------ envs/vpn/conda/fedbiomed-node.yaml | 11 +++++------ envs/vpn/conda/fedbiomed-researcher.yaml | 11 +++++------ 6 files changed, 30 insertions(+), 36 deletions(-) diff --git a/envs/development/conda/fedbiomed-gui.yaml b/envs/development/conda/fedbiomed-gui.yaml index 7b07e4717..87fb1bdca 100644 --- a/envs/development/conda/fedbiomed-gui.yaml +++ b/envs/development/conda/fedbiomed-gui.yaml @@ -24,15 +24,14 @@ dependencies: - requests ~=2.29.0 - git ~=2.40.1 - packaging ~=23.1 - # sklearn - # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with many current systems - # another option is to install scipy from pip - - scipy >=1.10.0,<1.11.0 - - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn - torch ~=1.13.0 diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index 18386d108..e644f08a6 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -33,15 +33,14 @@ dependencies: # git notebook striper - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 - # sklearn - # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with some older systems - # another option is to install scipy from pip - - scipy >=1.10.0,<1.11.0 - - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn - torch ~=1.13.0 diff --git a/envs/development/conda/fedbiomed-researcher.yaml b/envs/development/conda/fedbiomed-researcher.yaml index b5bca4f6d..85cbeadd7 100644 --- a/envs/development/conda/fedbiomed-researcher.yaml +++ b/envs/development/conda/fedbiomed-researcher.yaml @@ -34,15 +34,14 @@ dependencies: # git notebook striper - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 - # sklearn - # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with many current systems - # another option is to install scipy from pip - - scipy >=1.10.0,<1.11.0 - - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn - torch ~=1.13.0 diff --git a/envs/vpn/conda/fedbiomed-gui.yaml b/envs/vpn/conda/fedbiomed-gui.yaml index 7b07e4717..87fb1bdca 100644 --- a/envs/vpn/conda/fedbiomed-gui.yaml +++ b/envs/vpn/conda/fedbiomed-gui.yaml @@ -24,15 +24,14 @@ dependencies: - requests ~=2.29.0 - git ~=2.40.1 - packaging ~=23.1 - # sklearn - # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with many current systems - # another option is to install scipy from pip - - scipy >=1.10.0,<1.11.0 - - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn - torch ~=1.13.0 diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index 18386d108..e644f08a6 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -33,15 +33,14 @@ dependencies: # git notebook striper - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 - # sklearn - # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with some older systems - # another option is to install scipy from pip - - scipy >=1.10.0,<1.11.0 - - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn - torch ~=1.13.0 diff --git a/envs/vpn/conda/fedbiomed-researcher.yaml b/envs/vpn/conda/fedbiomed-researcher.yaml index b5bca4f6d..85cbeadd7 100644 --- a/envs/vpn/conda/fedbiomed-researcher.yaml +++ b/envs/vpn/conda/fedbiomed-researcher.yaml @@ -34,15 +34,14 @@ dependencies: # git notebook striper - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 - # sklearn - # scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 - # with many current systems - # another option is to install scipy from pip - - scipy >=1.10.0,<1.11.0 - - scikit-learn >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn - torch ~=1.13.0 From 427bfb6da19fa46cb5db7f5c416a8cc55443a90d Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 23 May 2023 18:27:00 +0200 Subject: [PATCH 07/41] WIP package version upgrade - fix unit test (thx SC) --- tests/test_job.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/tests/test_job.py b/tests/test_job.py index 65afb7822..8f4fe83a8 100644 --- a/tests/test_job.py +++ b/tests/test_job.py @@ -26,7 +26,7 @@ from fedbiomed.researcher.job import Job from fedbiomed.researcher.requests import Requests from fedbiomed.researcher.responses import Responses - +import fedbiomed.researcher.job # needed for specific mocking class TestJob(ResearcherTestCase): @@ -189,18 +189,17 @@ def test_job_init_05_build_wrongly_saved_model(self, mock_logger_critical): mock_logger_critical.assert_called_once() @patch('fedbiomed.common.logger.logger.critical') - @patch('inspect.isclass') def test_job_06_init_isclass_raises_error(self, - mock_isclass, mock_logger_critical): """ Test initialization when inspect.isclass raises NameError""" - mock_isclass.side_effect = NameError - with self.assertRaises(NameError): - _ = Job(training_plan_class='FakeModel', - training_args=TrainingArgs({"batch_size": 12}, only_required=False), - data=self.fds) - mock_logger_critical.assert_called_once() + with patch.object(fedbiomed.researcher.job, 'inspect') as mock_inspect: + mock_inspect.isclass.side_effect = NameError + with self.assertRaises(NameError): + _ = Job(training_plan_class='FakeModel', + training_args=TrainingArgs({"batch_size": 12}, only_required=False), + data=self.fds) + mock_logger_critical.assert_called_once() @patch('fedbiomed.common.logger.logger.error') def test_job_07_initialization_raising_exception_save_and_save_code(self, From ae567e5e3f3fb393bfb53aae8e8ad755cb16064e Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Wed, 24 May 2023 12:09:15 +0200 Subject: [PATCH 08/41] upgrade yarn packages for GUI node --- .gitignore | 3 +++ gui/ui/package.json | 53 +++++++++++++++++++++++---------------------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/.gitignore b/.gitignore index 87b1b1e8e..939a16419 100644 --- a/.gitignore +++ b/.gitignore @@ -176,6 +176,9 @@ Thumbs.db #thumbnail cache on Windows node_modules .pnp .pnp.js +gui/ui/.yarn +gui/ui/.pnp.cjs +gui/ui/.pnp.loader.mjs # testing coverage diff --git a/gui/ui/package.json b/gui/ui/package.json index 3a0fd2c7f..d9fa2e0bf 100644 --- a/gui/ui/package.json +++ b/gui/ui/package.json @@ -4,32 +4,35 @@ "private": true, "dependencies": { "@elastic/datemath": "^5.0.3", - "@elastic/eui": "^66.0.0", - "@emotion/react": "^11.10.0", - "@testing-library/jest-dom": "^5.15.1", - "@testing-library/react": "^11.2.7", - "@testing-library/user-event": "^12.8.3", - "axios": "^0.24.0", - "bootstrap": "^5.1.3", - "http-proxy-middleware": "^2.0.1", + "@elastic/eui": "^81.0.0", + "@emotion/cache": "^11.11.0", + "@emotion/css": "^11.11.0", + "@emotion/react": "^11.11.0", + "@testing-library/jest-dom": "^5.16.5", + "@testing-library/react": "^14.0.0", + "@testing-library/user-event": "^14.4.3", + "axios": "^1.4.0", + "bootstrap": "^5.2.3", + "http-proxy-middleware": "^2.0.6", "install": "^0.13.0", "jwt-decode": "^3.1.2", "moment": "^2.29.4", - "npm": "^8.2.0", + "npm": "^9.6.7", "prop-types": "^15.8.1", - "react": "^18.1.0", - "react-dom": "^18.1.0", - "react-jwt": "^1.1.6", - "react-moment": "^1.1.2", - "react-redux": "^7.2.6", - "react-router-dom": "^6.0.2", - "react-scripts": "^5.0.0", - "react-select": "^5.4.0", + "react": "^18.2.0", + "react-app": "^1.1.2", + "react-dom": "^18.2.0", + "react-jwt": "^1.1.8", + "react-moment": "^1.1.3", + "react-redux": "^8.0.5", + "react-router-dom": "^6.11.2", + "react-scripts": "^5.0.1", + "react-select": "^5.7.3", "react-syntax-highlighter": "^15.5.0", - "redux": "^4.1.2", - "redux-thunk": "^2.4.1", + "redux": "^4.2.1", + "redux-thunk": "^2.4.2", "thunk": "^0.0.1", - "web-vitals": "^1.1.2" + "web-vitals": "^3.3.1" }, "scripts": { "start": "react-scripts start", @@ -37,12 +40,6 @@ "test": "react-scripts test", "eject": "react-scripts eject" }, - "eslintConfig": { - "extends": [ - "react-app", - "react-app/jest" - ] - }, "browserslist": { "production": [ ">0.2%", @@ -54,5 +51,9 @@ "last 1 firefox version", "last 1 safari version" ] + }, + "packageManager": "yarn@3.5.1", + "devDependencies": { + "yarn-upgrade-all": "^0.7.2" } } From 3e17434ad6d6dff4fbda1daf29c60a8cb6419fe2 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Wed, 24 May 2023 13:59:01 +0200 Subject: [PATCH 09/41] misc additional `fedbiomed_environment clean` for gui --- scripts/fedbiomed_environment | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/scripts/fedbiomed_environment b/scripts/fedbiomed_environment index dd7a884a5..34d66a620 100755 --- a/scripts/fedbiomed_environment +++ b/scripts/fedbiomed_environment @@ -146,7 +146,14 @@ case $1 in # gui cleaning echo " * Node GUI cleaning" - gui_temp_paths='var/gui-build gui/ui/node_modules gui/ui/yarn.lock' + gui_temp_paths=' + var/gui-build + gui/ui/node_modules + gui/ui/yarn.lock + gui/ui/.yarn + gui/ui/.pnp.cjs + gui/ui/.pnp.loader.mjs + ' for p in $gui_temp_paths ; do if [ -e "${basedir}/$p" ] ; then echo "[INFO] Removing directory ${basedir}/$p" From 654907e7513e722abf30ef1c1e01f60ee5cc4c35 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Wed, 24 May 2023 17:06:10 +0200 Subject: [PATCH 10/41] replace compose v1 (`docker-compose`) by v2 (`docker compose`) --- CHANGELOG.md | 2 +- README.md | 2 +- envs/vpn/README.md | 156 +++++++++--------- envs/vpn/docker/docker-compose.yml | 2 +- envs/vpn/docker/docker-compose_run_node.yml | 2 +- envs/vpn/docker/gui/build_files/bashrc_append | 6 +- .../vpn/docker/mqtt/build_files/bashrc_append | 6 +- .../vpn/docker/node/build_files/bashrc_append | 6 +- .../researcher/build_files/bashrc_append | 6 +- .../docker/restful/build_files/bashrc_append | 6 +- .../vpnserver/build_files/bashrc_append | 6 +- scripts/CI_build | 2 +- scripts/fedbiomed_environment | 2 +- scripts/fedbiomed_run | 6 +- scripts/fedbiomed_vpn | 70 ++++---- scripts/run_integration_test | 2 +- scripts/run_test_mnist | 2 +- 17 files changed, 143 insertions(+), 141 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 791063211..748b885f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ ## 2023-02-08 version 4.2 -- add support for docker-compose v.2 file syntax +- add support for `docker compose` v.2 file syntax - fix model weights computation occurring during aggregation, by sending dataset sample size from node to researcher - fix GUI regression failure, after merging MP-SPDZ certificate generation - such issue was freezing some web browsers - fix incoherent tag handling: make explicit the way datasets are tagged on nodes diff --git a/README.md b/README.md index 03458c5a3..f7627af1f 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ An installation guide is also provided for Windows10, which relies on WSL2: http To ensure fedbiomed will work fine, you need to install before : * docker -* docker-compose +* docker compose v2 (aka docker compose plugin) * conda ### clone repo diff --git a/envs/vpn/README.md b/envs/vpn/README.md index 866d1cdc9..95eb3cc31 100644 --- a/envs/vpn/README.md +++ b/envs/vpn/README.md @@ -18,18 +18,18 @@ Which machine to use ? Supported operating systems for using containers : - tested on **Fedora 35**, should work for recent RedHat based Linux - - tested on **Ubuntu 20.04**, should work for recent Debian based Linux + - tested on **Ubuntu 22.04**, should work for recent Debian based Linux - tested on recent **MacOS X** - tested on **Windows 10** 21H2 with WSL2 using a Ubuntu-20.04 distribution, should work with most Windows 10/11 and other recent Linux distributions Pre-requisites for using containers : * **`docker >= 20.10.0`** is needed to build mqtt, see [there](https://wiki.alpinelinux.org/wiki/Release_Notes_for_Alpine_3.14.0#faccessat2). With older docker version it fails with a `make: sh: Operation not permitted` -* **`docker-compose` >= 1.27.0** is needed for extended file format for [GPU support in docker](https://docs.docker.com/compose/gpu-support/) even if you're not using GPU in container. - - some distributions (eg Ubuntu 20.04) don't provide a package with a recent enough version. - - Type `docker-compose --version` to check installed version. - - You can use your usual package manager to install up-to-date version (eg: `sudo apt-get update && sudo apt-get install docker-compose` for apt, `sudo dnf clean metadata && sudo dnf update docker-compose` for dnf). - - If no suitable package exist for your system, you can use [`docker-compose` install page](https://docs.docker.com/compose/install/). +* **`docker compose` >= 2.0** is needed for extended file format for [GPU support in docker](https://docs.docker.com/compose/gpu-support/) even if you're not using GPU in container. + - some distributions (eg Fedora 32) don't provide a package with a recent enough version. + - Type `docker compose version` to check installed version (if it gives an error and `docker-compose --version` succeeds then you have a compose v1 installed) + - You can use your usual package manager to install up-to-date version (eg: `sudo apt-get update && sudo apt-get remove docker-compose && sudo apt-get install docker-compose-plugin` for apt, `sudo dnf clean metadata && sudo dnf remove docker-compose && sudo dnf update docker-compose-plugin` for dnf). + - If no suitable package exist for your system, you can use the [docker compose plugin install page](https://docs.docker.com/compose/install/linux/). Installation notes for Windows 10 with WSL2 Ubuntu-20.04: * build of containers `mqtt` `restful` may fail in `cargo install` step with error `spurious network error [...] Timeout was reached`. This is due to bad name resolution of `crates.io` package respository with default WSL2 DNS configuration. If this happens connect to wsl (`wsl` from Windows command line tool), get admin privileges (`sudo bash`) and create a [`/etc/wsl.conf`](https://docs.microsoft.com/fr-fr/windows/wsl/wsl-config) file containing: @@ -65,7 +65,7 @@ Usually build each image separately when initializing each container (see after) ## **TODO**: check if we can use different id than the account building the images # ## when running on a single machine : build all needed containers at one time with -#[user@laptop $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build base vpnserver mqtt restful basenode node gui researcher +#[user@laptop $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build base vpnserver mqtt restful basenode node gui researcher ``` ### initializing vpnserver @@ -75,8 +75,8 @@ Run this only at first launch of container or after cleaning : * build container ```bash [user@network $] cd ./envs/vpn/docker -[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build base -[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build vpnserver +[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build base +[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build vpnserver ``` * set the VPN server public IP *VPN_SERVER_PUBLIC_ADDR* ```bash @@ -85,11 +85,11 @@ Run this only at first launch of container or after cleaning : ``` * launch container ```bash -[user@network $] docker-compose up -d vpnserver +[user@network $] docker compose up -d vpnserver ``` * connect and generate config for components ```bash -[user@network $] docker-compose exec vpnserver bash +[user@network $] docker compose exec vpnserver bash [root@vpnserver-container #] python ./vpn/bin/configure_peer.py genconf management mqtt [root@vpnserver-container #] python ./vpn/bin/configure_peer.py genconf management restful [root@vpnserver-container #] python ./vpn/bin/configure_peer.py genconf node NODETAG @@ -99,7 +99,7 @@ Run this only at first launch of container or after cleaning : Run this for all launches of the container : * launch container ```bash -[user@network $] docker-compose up -d vpnserver +[user@network $] docker compose up -d vpnserver ``` ### initializing mqtt @@ -109,7 +109,7 @@ Run this only at first launch of container or after cleaning : * build container ```bash [user@network $] cd ./envs/vpn/docker -[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build mqtt +[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build mqtt ``` * generate VPN client for this container (see above in vpnserver) * configure the VPN client for this container @@ -119,30 +119,30 @@ Run this only at first launch of container or after cleaning : ``` * launch container ```bash -[user@network $] docker-compose up -d mqtt +[user@network $] docker compose up -d mqtt ``` * retrieve the *publickey* ```bash -[user@network $] docker-compose exec mqtt wg show wg0 public-key +[user@network $] docker compose exec mqtt wg show wg0 public-key ``` * connect to the VPN server to declare the container as a VPN client with cut-paste of *publickey* ```bash -[user@network $] docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add management mqtt *publickey* +[user@network $] docker compose exec vpnserver python ./vpn/bin/configure_peer.py add management mqtt *publickey* ## other option : -#[user@network $] docker-compose exec vpnserver bash +#[user@network $] docker compose exec vpnserver bash #[root@vpnserver-container #] python ./vpn/bin/configure_peer.py add management mqtt *publickey* ``` * check the container correctly established a VPN with vpnserver: ```bash # 10.220.0.1 is vpnserver contacted inside the VPN # it should answer to the ping -[user@network $] docker-compose exec mqtt ping -c 3 -W 1 10.220.0.1 +[user@network $] docker compose exec mqtt ping -c 3 -W 1 10.220.0.1 ``` Run this for all launches of the container : * launch container ```bash -[user@network $] docker-compose up -d mqtt +[user@network $] docker compose up -d mqtt ``` ### initializing restful @@ -152,7 +152,7 @@ Run this only at first launch of container or after cleaning : * build container ```bash [user@network $] cd ./envs/vpn/docker -[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build restful +[user@network $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build restful ``` * generate VPN client for this container (see above in vpnserver) * configure the VPN client for this container @@ -162,27 +162,27 @@ Run this only at first launch of container or after cleaning : ``` * launch container ```bash -[user@network $] docker-compose up -d restful +[user@network $] docker compose up -d restful ``` * retrieve the *publickey* ```bash -[user@network $] docker-compose exec restful wg show wg0 public-key +[user@network $] docker compose exec restful wg show wg0 public-key ``` * connect to the VPN server to declare the container as a VPN client with cut-paste of *publickey* ```bash -[user@network $] docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add management restful *publickey* +[user@network $] docker compose exec vpnserver python ./vpn/bin/configure_peer.py add management restful *publickey* ``` * check the container correctly established a VPN with vpnserver: ```bash # 10.220.0.1 is vpnserver contacted inside the VPN # it should answer to the ping -[user@network $] docker-compose exec restful ping -c 3 -W 1 10.220.0.1 +[user@network $] docker compose exec restful ping -c 3 -W 1 10.220.0.1 ``` Run this for all launches of the container : * launch container ```bash -[user@network $] docker-compose up -d restful +[user@network $] docker compose up -d restful ``` ### initializing node @@ -197,13 +197,13 @@ Run this only at first launch of container or after cleaning : * build container ```bash -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build basenode -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build node +[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build basenode +[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build node ``` Alternative: build an (thiner) image without GPU support if you will never use it ```bash -[user@build $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build basenode-no-gpu -[user@build $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build node +[user@build $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build basenode-no-gpu +[user@build $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build node ``` Then follow the common instructions for nodes (below). @@ -228,13 +228,13 @@ On the build machine * in this example, we build the container with user `fedbiomed` (id `1234`) and group `fedbiomed` (id `1234`). Account name and id used on the node machine may differ (see below). * build container ```bash -[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker-compose build basenode -[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker-compose build node +[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build basenode +[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build node ``` Alternative: build an (thiner) image without GPU support if you will never use it ```bash -[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker-compose build basenode-no-gpu -[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker-compose build node +[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build basenode-no-gpu +[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build node ``` * save image for container ```bash @@ -244,7 +244,7 @@ On the build machine ```bash [user@build $] cd ./envs/vpn/docker # if needed, clean the configurations in ./node/run_mounts before -[user@build $] tar cvzf /tmp/vpn-node-files.tar.gz ./docker-compose_run_node.yml ./node/run_mounts +[user@build $] tar cvzf /tmp/vpn-node-files.tar.gz ./docker compose_run_node.yml ./node/run_mounts ``` On the node machine @@ -259,7 +259,7 @@ On the node machine [user@node $] mkdir -p ./envs/vpn/docker [user@node $] cd ./envs/vpn/docker [user@node $] tar xvzf /tmp/vpn-node-files.tar.gz -[user@node $] mv docker-compose_run_node.yml docker-compose.yml +[user@node $] mv docker compose_run_node.yml docker compose.yml ``` * if needed load data to be passed to container ```bash @@ -290,28 +290,28 @@ Run this only at first launch of container or after cleaning : * launch container ```bash [user@node $] NODE=node -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose up -d $NODE +[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose up -d $NODE ``` Alternative: launch container with Nvidia GPU support activated. Before launching, install [all the pre-requisites for GPU support](#gpu-support-in-container). ```bash [user@node $] NODE=node-gpu -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose up -d $NODE +[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose up -d $NODE ``` * note : `CONTAINER_{UID,GID,USER,GROUP}` are not necessary if using the same identity as in for the build, but they need to have a read/write access to the directories mounted from the host machine's filesystem. - * note : when using a different identity than at build time, `docker-compose up` may take up to a few dozen seconds to complete and node be ready for using. This is the time for re-assigning some installed resources in the container to the new account. + * note : when using a different identity than at build time, `docker compose up` may take up to a few dozen seconds to complete and node be ready for using. This is the time for re-assigning some installed resources in the container to the new account. * retrieve the *publickey* ```bash -[user@node $] docker-compose exec $NODE wg show wg0 public-key +[user@node $] docker compose exec $NODE wg show wg0 public-key ``` * connect to the VPN server to declare the container as a VPN client with cut-paste of *publickey* ```bash -[user@network $] docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODETAG *publickey* +[user@network $] docker compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODETAG *publickey* ``` * check the container correctly established a VPN with vpnserver: ```bash # 10.220.0.1 is vpnserver contacted inside the VPN # it should answer to the ping -[user@node $] docker-compose exec $NODE ping -c 3 -W 1 10.220.0.1 +[user@node $] docker compose exec $NODE ping -c 3 -W 1 10.220.0.1 ``` Run this for all launches of the container : @@ -319,12 +319,12 @@ Run this for all launches of the container : * launch container ```bash # `CONTAINER_{UID,GID,USER,GROUP}` are not needed if they are the same as used for build -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose up -d $NODE +[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose up -d $NODE ``` * TODO: better package/scripting needed Connect again to the node and launch manually, now that the VPN is established ```bash -[user@node $] docker-compose exec -u $(id -u) $NODE bash +[user@node $] docker compose exec -u $(id -u) $NODE bash # TODO : make more general by including it in the VPN configuration and user environment ? # TODO : create scripts in VPN environment # need proper parameters at first launch to create configuration file @@ -389,7 +389,7 @@ Run this only at first launch of container or after cleaning : * build container ```bash -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build gui +[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build gui ``` #### specific instructions: building gui image on a different machine @@ -411,7 +411,7 @@ On the build machine * in this example, we build the container with user `fedbiomed` (id `1234`) and group `fedbiomed` (id `1234`). Account name and id used on the node machine may differ (see below). * build container ```bash -[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker-compose build gui +[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build gui ``` * save image for container ```bash @@ -440,10 +440,10 @@ Run this for all launches of the container : * launch container ```bash -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose up -d gui +[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose up -d gui ``` * note : `CONTAINER_{UID,GID,USER,GROUP}` are not necessary if using the same identity as in for the build, but they need to have a read/write access to the directories mounted from the node machine's filesystem. - * note : when using a different identity than at build time, `docker-compose up` may take up to a few dozen seconds to complete and node be ready for using. This is the time for re-assigning some installed resources in the container to the new account. + * note : when using a different identity than at build time, `docker compose up` may take up to a few dozen seconds to complete and node be ready for using. This is the time for re-assigning some installed resources in the container to the new account. #### using the gui @@ -454,7 +454,7 @@ Use the node gui from outside the gui container : By default, only connections from `localhost` are authorized. To enable connection to the GUI from any IP address - specify the bind IP address at container launch time (eg: your node public IP address `NODE_IP`, or `0.0.0.0` to listen on all node addresses) ```bash -[user@node $] GUI_SERVER_IP=0.0.0.0 docker-compose up -d gui +[user@node $] GUI_SERVER_IP=0.0.0.0 docker compose up -d gui ``` - connect to `http://${NODE_IP}:8484` - **warning** allowing connections from non-`localhost` exposes the gui to attacks from the network. Only use with proper third party security measures (web proxy, firewall, etc.) Currently, the provided gui container does not include a user authentication mechanism or encrypted communications for the user. @@ -467,8 +467,8 @@ Run this only at first launch of container or after cleaning : * build container ```bash -[user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build base -[user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose build researcher +[user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build base +[user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build researcher ``` * generate VPN client for this container (see above in vpnserver) * configure the VPN client for this container @@ -482,21 +482,21 @@ Run this only at first launch of container or after cleaning : ``` * launch container ```bash -[user@researcher $] docker-compose up -d researcher +[user@researcher $] docker compose up -d researcher ``` * retrieve the *publickey* ```bash -[user@researcher $] docker-compose exec researcher wg show wg0 public-key +[user@researcher $] docker compose exec researcher wg show wg0 public-key ``` * connect to the VPN server to declare the container as a VPN client with cut-paste of *publickey* ```bash -[user@network $] docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add researcher researcher1 *publickey* +[user@network $] docker compose exec vpnserver python ./vpn/bin/configure_peer.py add researcher researcher1 *publickey* ``` * check the container correctly established a VPN with vpnserver: ```bash # 10.220.0.1 is vpnserver contacted inside the VPN # it should answer to the ping -[user@researcher $] docker-compose exec researcher ping -c 3 -W 1 10.220.0.1 +[user@researcher $] docker compose exec researcher ping -c 3 -W 1 10.220.0.1 ``` Run this for all launches of the container : @@ -504,7 +504,7 @@ Run this for all launches of the container : * TODO: better package/scripting needed Connect again to the researcher and launch manually, now that the VPN is established ```bash -[user@researcher $] docker-compose exec -u $(id -u) researcher bash +[user@researcher $] docker compose exec -u $(id -u) researcher bash # TODO : make more general by including it in the VPN configuration and user environment ? # TODO : create scripts in VPN environment # need proper parameters at first launch to create configuration file @@ -545,7 +545,7 @@ tensorboard --logdir "$tensorboard_dir" To enable connection to the researcher and the tensorboard from any IP address using `RESEARCHER_HOST` - specify the bind IP address at container launch time (eg: your server public IP address `SERVER_IP`, or `0.0.0.0` to listen on all server addresses) ```bash -[user@researcher $] RESEARCHER_HOST=${SERVER_IP} docker-compose up -d researcher +[user@researcher $] RESEARCHER_HOST=${SERVER_IP} docker compose up -d researcher ``` - connect to `http://${SERVER_IP}:8888` and `http://${SERVER_IP}:6006` - **warning** allowing connections from non-`localhost` exposes the researcher to attacks from the network. Only use with proper third party security measures (web proxy, firewall, etc.) Currently, the provided researcher container does not include a user authentication mechanism or encrypted communications for the user. @@ -579,7 +579,7 @@ Before using a GPU for Fed-BioMed in a `node` docker container, you need to meet - not supported on MacOS (few Nvidia cards, docker virtualized) * **Nvidia drivers and CUDA >= 11.5.0** (the version used by Fed-BioMed container with GPU support) * **[Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)** -* **`docker-compose` version >= 1.27.0** (already installed for container support) +* **`docker compose` version >= 2.0** (already installed for container support) Installation guidelines for requirements: @@ -596,12 +596,12 @@ sudo dnf install nvidia-docker2 FAQ for issues with GPU in containers : -* `docker-compose` file format error when launching any container : +* `docker compose` file format error when launching any container : ```bash ERROR: The Compose file './docker-compose.yml' is invalid because: Unsupported config option for services.node-gpu-other: 'runtime' ``` - - you need to update you `docker-compose` version + - you need to update you `docker compose` version * `runtime` error when launching `node-gpu` container : ```bash ERROR: for node-gpu Cannot create container for service node-gpu: @@ -623,20 +623,20 @@ You can connect to a container only if the corresponding container is already ru * connect on the VPN server / node / mqtt server / restful as root to configure the VPN ```bash -[user@network $] docker-compose exec vpnserver bash -[user@network $] docker-compose exec mqtt bash -[user@network $] docker-compose exec restful bash -[user@node $] docker-compose exec node bash -[user@researcher $] docker-compose exec researcher bash +[user@network $] docker compose exec vpnserver bash +[user@network $] docker compose exec mqtt bash +[user@network $] docker compose exec restful bash +[user@node $] docker compose exec node bash +[user@researcher $] docker compose exec researcher bash ``` * connect on the node as user to handle experiments ```bash -[user@node $] docker-compose exec -u $(id -u) node bash -[user@node $] docker-compose exec -u $(id -u) gui bash -[user@researcher $] docker-compose exec -u $(id -u) researcher bash +[user@node $] docker compose exec -u $(id -u) node bash +[user@node $] docker compose exec -u $(id -u) gui bash +[user@researcher $] docker compose exec -u $(id -u) researcher bash ``` -Note : can also use commands in the form, so you don't have to be in the docker-compose file directory +Note : can also use commands in the form, so you don't have to be in the docker compose file directory ```bash [user@node $] docker container exec -ti -u $(id -u) fedbiomed-vpn-node bash [user@node $] docker container exec -ti -u $(id -u) fedbiomed-vpn-gui bash @@ -651,7 +651,7 @@ Note : can also use commands in the form, so you don't have to be in the docker- [user@network $] cd ./envs/vpn/docker # level 1 : container instance -[user@network $] docker-compose rm -sf vpnserver +[user@network $] docker compose rm -sf vpnserver # level 2 : configuration [user@network #] rm -rf vpnserver/run_mounts/config/{config.env,config_peers,ip_assign,wireguard} @@ -667,7 +667,7 @@ Note : can also use commands in the form, so you don't have to be in the docker- [user@network $] cd ./envs/vpn/docker # level 1 : container instance -[user@network $] docker-compose rm -sf mqtt +[user@network $] docker compose rm -sf mqtt # level 2 : configuration [user@network $] rm -rf ./mqtt/run_mounts/config/{config.env,wireguard} @@ -683,7 +683,7 @@ Note : can also use commands in the form, so you don't have to be in the docker- [user@network $] cd ./envs/vpn/docker # level 1 : container instance -[user@network $] docker-compose rm -sf restful +[user@network $] docker compose rm -sf restful # level 2 : configuration [user@network $] rm -rf ./restful/run_mounts/config/{config.env,wireguard} @@ -703,7 +703,7 @@ Note : can also use commands in the form, so you don't have to be in the docker- [user@node $] cd ./envs/vpn/docker # level 1 : container instance -[user@node $] docker-compose rm -sf node +[user@node $] docker compose rm -sf node # level 2 : configuration [user@node $] rm -rf ./node/run_mounts/config/{config.env,wireguard} @@ -722,7 +722,7 @@ Note : can also use commands in the form, so you don't have to be in the docker- [user@node $] cd ./envs/vpn/docker # level 1 : container instance -[user@node $] docker-compose rm -sf gui +[user@node $] docker compose rm -sf gui # level 2 : configuration [user@node $] rm -rf ./node/run_mounts/{data,etc,var}/* @@ -740,7 +740,7 @@ Same as node [user@researcher $] cd ./envs/vpn/docker # level 1 : container instance -[user@researcher $] docker-compose rm -sf researcher +[user@researcher $] docker compose rm -sf researcher # level 2 : configuration [user@researcher $] rm -rf ./researcher/run_mounts/config/{config.env,wireguard} @@ -771,8 +771,8 @@ Peers in VPN server can be listed or removed through `configure_peer.py`. Following code snippet will generate configurations for `mqtt` and `restful` component register their public keys in VPN server. ```bash -[user@network $] docker-compose up -d vpnserver -[user@network $] docker-compose exec vpnserver bash +[user@network $] docker compose up -d vpnserver +[user@network $] docker compose exec vpnserver bash [root@vpnserver-container #] python ./vpn/bin/configure_peer.py genconf management mqtt [root@vpnserver-container #] python ./vpn/bin/configure_peer.py genconf management restful [root@vpnserver-container #] python ./vpn/bin/configure_peer.py add management mqtt 1OIHVWcDq5+CaDKrQ3G3QAuVnr41ONVFBto1ylBroZg= @@ -833,6 +833,6 @@ Different values at build time and runtime is also supported by `vpnserver` `mqt Example : build a researcher container with a default user/group `fedbiomed` (id `1234`), run it with the same account as the account on the researcher machine. ```bash -[user@researcher $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker-compose build researcher -[user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker-compose up -d researcher +[user@researcher $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build researcher +[user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose up -d researcher ``` diff --git a/envs/vpn/docker/docker-compose.yml b/envs/vpn/docker/docker-compose.yml index c56a766c3..60437a8dc 100644 --- a/envs/vpn/docker/docker-compose.yml +++ b/envs/vpn/docker/docker-compose.yml @@ -84,7 +84,7 @@ services: # >=1.28 becomes common https://docs.docker.com/compose/profiles/ # # profiles: - # - debug # not started by `docker-compose up` except if `--profile debug` + # - debug # not started by `docker compose up` except if `--profile debug` # # intermediate step : base image for building vpn and researcher containers # we usually dont want to launch this service (used for building other services) diff --git a/envs/vpn/docker/docker-compose_run_node.yml b/envs/vpn/docker/docker-compose_run_node.yml index 5e9ceb1c5..973895a7c 100644 --- a/envs/vpn/docker/docker-compose_run_node.yml +++ b/envs/vpn/docker/docker-compose_run_node.yml @@ -6,7 +6,7 @@ version: "3.7" # Minimal dockerfile permits minimal file tree # because *all* files dependencies referenced in `docker-compose.yml` # (eg: `./*/build_files`, `./*/run_mounts/config/config.env`) -# need to exist for *all* containers everytime we run a docker-compose command +# need to exist for *all* containers everytime we run a `docker compose` command # thus we would have to copy file trees for vpnserver, researcher, etc. when only # running a node x-node: diff --git a/envs/vpn/docker/gui/build_files/bashrc_append b/envs/vpn/docker/gui/build_files/bashrc_append index 6e8db1e63..b57cf604c 100644 --- a/envs/vpn/docker/gui/build_files/bashrc_append +++ b/envs/vpn/docker/gui/build_files/bashrc_append @@ -3,9 +3,9 @@ # # needed to assign here `CONTAINER_{USER,GROUP,UID,GID}` values to handle the case where -# - container is launched with `docker-compose up` (no value passed as in -# `CONTAINER_USER=myuser ... docker-compose up`) -# - then doing a `docker-compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` +# - container is launched with `docker compose up` (no value passed as in +# `CONTAINER_USER=myuser ... docker compose up`) +# - then doing a `docker compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` # use the (empty) values from the environment (see docker-compose.yml) # that override values from the image (see Dockerfile) export CONTAINER_USER=${CONTAINER_USER:-${CONTAINER_BUILD_USER:-root}} diff --git a/envs/vpn/docker/mqtt/build_files/bashrc_append b/envs/vpn/docker/mqtt/build_files/bashrc_append index 290f306d9..dea9a60f9 100644 --- a/envs/vpn/docker/mqtt/build_files/bashrc_append +++ b/envs/vpn/docker/mqtt/build_files/bashrc_append @@ -4,9 +4,9 @@ [ -f /config/config.env ] && source /config/config.env # needed to assign here `CONTAINER_{USER,GROUP,UID,GID}` values to handle the case where -# - container is launched with `docker-compose up` (no value passed as in -# `CONTAINER_USER=myuser ... docker-compose up`) -# - then doing a `docker-compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` +# - container is launched with `docker compose up` (no value passed as in +# `CONTAINER_USER=myuser ... docker compose up`) +# - then doing a `docker compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` # use the (empty) values from the environment (see docker-compose.yml) # that override values from the image (see Dockerfile) export CONTAINER_USER=${CONTAINER_USER:-${CONTAINER_BUILD_USER:-root}} diff --git a/envs/vpn/docker/node/build_files/bashrc_append b/envs/vpn/docker/node/build_files/bashrc_append index 290f306d9..dea9a60f9 100644 --- a/envs/vpn/docker/node/build_files/bashrc_append +++ b/envs/vpn/docker/node/build_files/bashrc_append @@ -4,9 +4,9 @@ [ -f /config/config.env ] && source /config/config.env # needed to assign here `CONTAINER_{USER,GROUP,UID,GID}` values to handle the case where -# - container is launched with `docker-compose up` (no value passed as in -# `CONTAINER_USER=myuser ... docker-compose up`) -# - then doing a `docker-compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` +# - container is launched with `docker compose up` (no value passed as in +# `CONTAINER_USER=myuser ... docker compose up`) +# - then doing a `docker compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` # use the (empty) values from the environment (see docker-compose.yml) # that override values from the image (see Dockerfile) export CONTAINER_USER=${CONTAINER_USER:-${CONTAINER_BUILD_USER:-root}} diff --git a/envs/vpn/docker/researcher/build_files/bashrc_append b/envs/vpn/docker/researcher/build_files/bashrc_append index 290f306d9..dea9a60f9 100644 --- a/envs/vpn/docker/researcher/build_files/bashrc_append +++ b/envs/vpn/docker/researcher/build_files/bashrc_append @@ -4,9 +4,9 @@ [ -f /config/config.env ] && source /config/config.env # needed to assign here `CONTAINER_{USER,GROUP,UID,GID}` values to handle the case where -# - container is launched with `docker-compose up` (no value passed as in -# `CONTAINER_USER=myuser ... docker-compose up`) -# - then doing a `docker-compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` +# - container is launched with `docker compose up` (no value passed as in +# `CONTAINER_USER=myuser ... docker compose up`) +# - then doing a `docker compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` # use the (empty) values from the environment (see docker-compose.yml) # that override values from the image (see Dockerfile) export CONTAINER_USER=${CONTAINER_USER:-${CONTAINER_BUILD_USER:-root}} diff --git a/envs/vpn/docker/restful/build_files/bashrc_append b/envs/vpn/docker/restful/build_files/bashrc_append index 290f306d9..dea9a60f9 100644 --- a/envs/vpn/docker/restful/build_files/bashrc_append +++ b/envs/vpn/docker/restful/build_files/bashrc_append @@ -4,9 +4,9 @@ [ -f /config/config.env ] && source /config/config.env # needed to assign here `CONTAINER_{USER,GROUP,UID,GID}` values to handle the case where -# - container is launched with `docker-compose up` (no value passed as in -# `CONTAINER_USER=myuser ... docker-compose up`) -# - then doing a `docker-compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` +# - container is launched with `docker compose up` (no value passed as in +# `CONTAINER_USER=myuser ... docker compose up`) +# - then doing a `docker compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` # use the (empty) values from the environment (see docker-compose.yml) # that override values from the image (see Dockerfile) export CONTAINER_USER=${CONTAINER_USER:-${CONTAINER_BUILD_USER:-root}} diff --git a/envs/vpn/docker/vpnserver/build_files/bashrc_append b/envs/vpn/docker/vpnserver/build_files/bashrc_append index 290f306d9..dea9a60f9 100644 --- a/envs/vpn/docker/vpnserver/build_files/bashrc_append +++ b/envs/vpn/docker/vpnserver/build_files/bashrc_append @@ -4,9 +4,9 @@ [ -f /config/config.env ] && source /config/config.env # needed to assign here `CONTAINER_{USER,GROUP,UID,GID}` values to handle the case where -# - container is launched with `docker-compose up` (no value passed as in -# `CONTAINER_USER=myuser ... docker-compose up`) -# - then doing a `docker-compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` +# - container is launched with `docker compose up` (no value passed as in +# `CONTAINER_USER=myuser ... docker compose up`) +# - then doing a `docker compose exec` : in this case `CONTAINER_{USER,GROUP,UID,GID}` # use the (empty) values from the environment (see docker-compose.yml) # that override values from the image (see Dockerfile) export CONTAINER_USER=${CONTAINER_USER:-${CONTAINER_BUILD_USER:-root}} diff --git a/scripts/CI_build b/scripts/CI_build index 107627f42..a90d4e6c9 100755 --- a/scripts/CI_build +++ b/scripts/CI_build @@ -9,7 +9,7 @@ # - conda (installed and setup) # - docker (installed and started https://docs.docker.com/engine/install/fedora/) # - account used for build in docker /etc/group -# - docker-compose +# - docker compose v2 # Set variables for the build diff --git a/scripts/fedbiomed_environment b/scripts/fedbiomed_environment index 34d66a620..c43efe274 100755 --- a/scripts/fedbiomed_environment +++ b/scripts/fedbiomed_environment @@ -118,7 +118,7 @@ case $1 in activate_network echo "** Cleaning all caches / temporary files" # docker containers - (cd ${basedir}/envs/development/docker/ && docker-compose rm -sf && echo " * Docker cleaning" ) + (cd ${basedir}/envs/development/docker/ && docker compose rm -sf && echo " * Docker cleaning" ) # network cleaner echo " * Network cleaning" diff --git a/scripts/fedbiomed_run b/scripts/fedbiomed_run index 398f00a71..598c35cb2 100755 --- a/scripts/fedbiomed_run +++ b/scripts/fedbiomed_run @@ -234,7 +234,7 @@ case $1 in case $2 in stop) source ${basedir}/scripts/fedbiomed_environment network - docker-compose down + docker compose down ;; help|-h|--help) usage network @@ -244,11 +244,11 @@ case $1 in CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) \ CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') \ CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') \ - docker-compose build restful mqtt + docker compose build restful mqtt CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) \ CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') \ CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') \ - docker-compose up -d restful mqtt + docker compose up -d restful mqtt ;; esac ;; diff --git a/scripts/fedbiomed_vpn b/scripts/fedbiomed_vpn index 6da09042a..47b6a7928 100755 --- a/scripts/fedbiomed_vpn +++ b/scripts/fedbiomed_vpn @@ -92,13 +92,15 @@ stop stop and remove all containers check_prerequisite() { # # verify that every needed commands are installed - commands=( docker docker-compose ) + commands=( docker 'docker compose') ERROR=0 - for i in ${commands[@]} + for i in "${commands[@]}" do - status=$(which $i) - [[ -z "$status" ]] && { echo "** ERROR: command not found: $i"; ERROR=1; } + #status=$(which $i) + $i >/dev/null 2>&1 + status=$? + [[ "$status" -ne 0 ]] && { echo "** ERROR: command not found: $i"; ERROR=1; } done [[ $ERROR -eq 1 ]] && { echo "** Please install needed commands before running this script." ; exit 1; } } @@ -204,7 +206,7 @@ containers_remove() { cd "$basedir/envs/vpn/docker" for i in ${ONLY_CONTAINERS[@]} do - docker-compose rm -sf $i >/dev/null + docker compose rm -sf $i >/dev/null done } @@ -221,7 +223,7 @@ containers_clean() { for i in base basenode do - docker-compose rm -sf $i >/dev/null + docker compose rm -sf $i >/dev/null done # @@ -308,7 +310,7 @@ containers_status() { do [[ "$i" = "gui" ]] || [[ "$i" = "gui2" ]] && { continue ; } # gui containers do not use wireguard echo -n "- pinging VPN server from container $i -> " - ping=$(docker-compose exec ${i} ping -n -c 3 -W 1 10.220.0.1 2>/dev/null | cat -v) + ping=$(docker compose exec ${i} ping -n -c 3 -W 1 10.220.0.1 2>/dev/null | cat -v) status=$(echo $ping|grep seq=) if [ -z "$status" ]; then @@ -344,26 +346,26 @@ containers_build() { CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker-compose build base + docker compose build base fi if [ $BUILD_NODEBASE -eq 1 ]; then echo "- building 'basenode' container" CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker-compose build basenode + docker compose build basenode fi for i in ${ONLY_CONTAINERS[@]} do echo "- stopping '$i' container" - docker-compose rm -sf $i >/dev/null + docker compose rm -sf $i >/dev/null echo "- building '$i' container" MPSPDZ_URL=$MPSPDZ_URL MPSPDZ_COMMIT=$MPSPDZ_COMMIT \ CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker-compose build $i + docker compose build $i done } @@ -381,7 +383,7 @@ single_container_configure() { IP=$(find_my_ip) /bin/rm -fr ./vpnserver/run_mounts/config/config_peers/${category}/${container} - docker-compose exec vpnserver bash -c -i "python ./vpn/bin/configure_peer.py genconf ${category} ${container}" + docker compose exec vpnserver bash -c -i "python ./vpn/bin/configure_peer.py genconf ${category} ${container}" sleep 1 /bin/rm -fr ./${container}/run_mounts/config/wireguard 2> /dev/null @@ -407,7 +409,7 @@ containers_configure() { CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker-compose up -d vpnserver + docker compose up -d vpnserver for i in ${ONLY_CONTAINERS[@]} do @@ -440,14 +442,14 @@ containers_start() { # Use CONTAINER_UID variable again when starting container(s) because - # if image is not build `docker-compose up ` will automatically build it + # if image is not build `docker compose up ` will automatically build it # start vpnserver first MPSPDZ_URL=$MPSPDZ_URL MPSPDZ_COMMIT=$MPSPDZ_COMMIT \ CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker-compose up -d vpnserver + docker compose up -d vpnserver # start other container(s) for i in ${ONLY_CONTAINERS[@]} @@ -465,37 +467,37 @@ containers_start() { esac echo "- starting $i container" - docker-compose rm -sf $CONTAINER >/dev/null + docker compose rm -sf $CONTAINER >/dev/null MPSPDZ_URL=$MPSPDZ_URL MPSPDZ_COMMIT=$MPSPDZ_COMMIT \ CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker-compose up -d $CONTAINER + docker compose up -d $CONTAINER case $i in mqtt|restful) - pubkey=$(docker-compose exec "$i" wg show wg0 public-key | tr -d '\r') + pubkey=$(docker compose exec "$i" wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py remove management "$i" - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add management "$i" "$pubkey" + docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove management "$i" + docker compose exec vpnserver python ./vpn/bin/configure_peer.py add management "$i" "$pubkey" ;; researcher) - pubkey=$(docker-compose exec researcher wg show wg0 public-key | tr -d '\r') + pubkey=$(docker compose exec researcher wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py remove researcher researcher1 - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add researcher researcher1 $pubkey + docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove researcher researcher1 + docker compose exec vpnserver python ./vpn/bin/configure_peer.py add researcher researcher1 $pubkey ;; node) - pubkey=$(docker-compose exec node wg show wg0 public-key | tr -d '\r') + pubkey=$(docker compose exec node wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py remove node NODETAG - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODETAG $pubkey + docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove node NODETAG + docker compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODETAG $pubkey ;; node2) - pubkey=$(docker-compose exec node2 wg show wg0 public-key | tr -d '\r') + pubkey=$(docker compose exec node2 wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py remove node NODE2TAG - docker-compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODE2TAG $pubkey + docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove node NODE2TAG + docker compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODE2TAG $pubkey ;; *) ;; @@ -514,12 +516,12 @@ run() { cd "$basedir/envs/vpn/docker" # find @IP of mqtt and restful inside the VPN - status=$(docker-compose ps restful | wc -l) + status=$(docker compose ps restful | wc -l) [[ $? ]] || { echo "** ERROR: restful container not running" exit 1 } - restful_IP=$(docker-compose exec restful ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') + restful_IP=$(docker compose exec restful ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') echo "- restful IP is: $restful_IP" [[ -z "$restful_IP" ]] && { echo "** ERROR: cannot find IP address of restful server" ; @@ -528,12 +530,12 @@ run() { } # - status=$(docker-compose ps mqtt | wc -l) + status=$(docker compose ps mqtt | wc -l) [[ $? ]] || { echo "** ERROR: mqtt container not running" exit 1 } - mqtt_IP=$(docker-compose exec mqtt ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') + mqtt_IP=$(docker compose exec mqtt ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') echo "- mqtt IP is: $mqtt_IP" [[ -z "$mqtt_IP" ]] && { echo "** ERROR: cannot find IP address of mqtt server" ; @@ -544,7 +546,7 @@ run() { CMD="export MQTT_BROKER=$mqtt_IP && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL=http://${restful_IP}:8000/upload/ && export UPLOADS_IP=${restful_IP} && export FEDBIOMED_NO_RESET=1 && ./scripts/fedbiomed_run $container ${RUN_ARGS}" - docker-compose exec -u ${CONTAINER_UID:-$(id -u)} $container bash -c "$CMD" + docker compose exec -u ${CONTAINER_UID:-$(id -u)} $container bash -c "$CMD" diff --git a/scripts/run_integration_test b/scripts/run_integration_test index 23d7b9eec..938ac75fa 100755 --- a/scripts/run_integration_test +++ b/scripts/run_integration_test @@ -205,7 +205,7 @@ cleaning() { fi # kill the docker containers - ( cd $basedir/envs/development/docker ; docker-compose down ) + ( cd $basedir/envs/development/docker ; docker compose down ) # # clean all datasets from nodes diff --git a/scripts/run_test_mnist b/scripts/run_test_mnist index 04a457840..299a6892e 100755 --- a/scripts/run_test_mnist +++ b/scripts/run_test_mnist @@ -83,7 +83,7 @@ conda deactivate ##clean running processes and datasets echo "INFO: killing kpids=$kpids" kill -9 $kpids -( cd $basedir/envs/development/docker ; docker-compose down ) +( cd $basedir/envs/development/docker ; docker compose down ) source $basedir/scripts/fedbiomed_environment node $basedir/scripts/fedbiomed_run node --delete-mnist $basedir/scripts/fedbiomed_run node config config-n1.ini --delete-mnist From dd2c5e1792d9a37d63623918b1dd989f5ab67088 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 25 May 2023 09:23:56 +0200 Subject: [PATCH 11/41] WIP containers upgrade packages - `base` updated --- envs/vpn/docker/base/build_files/Dockerfile | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/envs/vpn/docker/base/build_files/Dockerfile b/envs/vpn/docker/base/build_files/Dockerfile index ad7bcf76c..0fbf7856c 100644 --- a/envs/vpn/docker/base/build_files/Dockerfile +++ b/envs/vpn/docker/base/build_files/Dockerfile @@ -1,18 +1,18 @@ # temporary builder image for wireguard tools # - need coherent system version with base image -# - may need update for properly compiling boringtun (eg: cargo build package failing on `buster-slim`) +# - may need update for properly compiling boringtun +# - 2023-05: `bookworm` is not officially released, stick to `bullseye` for now FROM debian:bullseye-slim as builder RUN apt-get update && apt-get install -y git build-essential curl # install boringtun userspace implementation of wireguard # -# - match versions of debian & boringtun : 0.4.0 is now supported by bullseye with current system/cargo packages -# 0.5.2 not yet supported +# - match versions of debian & boringtun : up to date 0.5.2 is now ok for bullseye # - glitch: bullseye's apt-get cargo is too old vs boringtun's cargo packages dependencies, need to install # up to date rust/cargo RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && . /root/.cargo/env && \ - cargo install --locked --bin boringtun --version ~0.4.0 boringtun + cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli # install needed wireguard-tools ENV WITH_WGQUICK=yes @@ -23,10 +23,10 @@ RUN git clone https://git.zx2c4.com/wireguard-tools && \ # docker base image for VPN server, Fed-BioMed node and researcher # - need proper python version for Fed-BioMed -FROM python:3.9-slim-bullseye +FROM python:3.10-slim-bullseye RUN apt-get update && apt-get install -y iptables iproute2 iputils-ping bash vim net-tools procps build-essential kmod # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ From 2fde2579d4899a84a63eba72fb27fca9ed801b03 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 25 May 2023 09:57:31 +0200 Subject: [PATCH 12/41] replace `egrep` by `grep -E` - obsolete in fedora >= 38 and probably others --- scripts/fedbiomed_run | 2 +- scripts/fedbiomed_vpn | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/fedbiomed_run b/scripts/fedbiomed_run index 598c35cb2..1702b7cdb 100755 --- a/scripts/fedbiomed_run +++ b/scripts/fedbiomed_run @@ -445,7 +445,7 @@ case $1 in # Windows WSL and others cannot handle using redirect file for securing notebook launch # https://jupyter-notebook.readthedocs.io/en/stable/config.html (section NotebookApp.use_redirect_file) NB_OPTS= - if [ -n "$(uname -r | egrep -i 'microsoft|wsl')" ] + if [ -n "$(uname -r | grep -E -i 'microsoft|wsl')" ] # case if windows WSL is used then NB_OPTS='--NotebookApp.use_redirect_file=false' diff --git a/scripts/fedbiomed_vpn b/scripts/fedbiomed_vpn index 47b6a7928..3e8b8affb 100755 --- a/scripts/fedbiomed_vpn +++ b/scripts/fedbiomed_vpn @@ -144,7 +144,7 @@ internal_find_my_ip() { esac # verify that the result is an IPv4 adress - _IP=$(echo $_IP | egrep '([0-9]{1,3}[\.]){3}[0-9]{1,3}') + _IP=$(echo $_IP | grep -E '([0-9]{1,3}[\.]){3}[0-9]{1,3}') echo $_IP } From 3809c230eaf782ef6df6f1fd8f94ba0853dff7db Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 25 May 2023 14:26:10 +0200 Subject: [PATCH 13/41] update system requirements (ubuntu fedora windows) + VPN dependencies install --- README.md | 2 +- envs/vpn/README.md | 28 ++++++++++------------------ 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index f7627af1f..0d2c4de8b 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ According to our coding rules, the develop branch is usable, tests and tutorials ## Install and run in development environment -Fed-BioMed is developped under Linux Fedora, should be easily ported to other Linux distributions. +Fed-BioMed is developped under Linux Fedora & Ubuntu, should be easily ported to other Linux distributions. It runs also smoothly on macOSX (mostly tested on macOSX 12: Monterey). This README.md file provide a quick start/installation guide for Linux. diff --git a/envs/vpn/README.md b/envs/vpn/README.md index 95eb3cc31..8007b021b 100644 --- a/envs/vpn/README.md +++ b/envs/vpn/README.md @@ -17,10 +17,10 @@ Which machine to use ? ## requirements Supported operating systems for using containers : - - tested on **Fedora 35**, should work for recent RedHat based Linux - - tested on **Ubuntu 22.04**, should work for recent Debian based Linux + - tested on **Fedora 38**, should work for recent RedHat based Linux + - should work for **Ubuntu 22.04 LTS** and recent Debian based Linux - tested on recent **MacOS X** - - tested on **Windows 10** 21H2 with WSL2 using a Ubuntu-20.04 distribution, should work with most Windows 10/11 and other recent Linux distributions + - should work on **Windows 11** with WSL2 using a Ubuntu-22.04 distribution Pre-requisites for using containers : @@ -570,30 +570,22 @@ You can access the host machine GPU accelerator from a node container to speed u Before using a GPU for Fed-BioMed in a `node` docker container, you need to meet the requirements for the host machine: -* a **Nvidia GPU** recent enough (**`Kepler` or newer** generation) to support `450.x` Nvidia drivers that are needed for CUDA 11.x +* a **Nvidia GPU** recent enough (**`Maxwell` or newer** generation) to support `>=525.60.13` Nvidia drivers that are needed for CUDA 12.1.1 (used in containers) Recommended GPU memory is **>= 4GB or more** depending on your training plans size. * a **supported operating system** - - tested on **Fedora 35**, should work for recent RedHat based Linux - - partly tested on **Ubuntu 20.04**, should work for recent Debian based Linux - - not tested on Windows with WSL2, but should work with Windows 10 version 21H2 or higher, that [support GPU in WSL2](https://docs.microsoft.com/en/windows/wsl/tutorials/gpu-compute) + - tested on **Fedora 38**, should work for recent RedHat based Linux + - should work for **Ubuntu 22.04 LTS** and other recent Debian based Linux + - not tested on Windows with WSL2, but should work with Windows 11, that [support GPU in WSL2](https://docs.microsoft.com/en/windows/wsl/tutorials/gpu-compute) - not supported on MacOS (few Nvidia cards, docker virtualized) -* **Nvidia drivers and CUDA >= 11.5.0** (the version used by Fed-BioMed container with GPU support) +* **Nvidia drivers >=525.60.13** (for CUDA 12.1.1 support, the version used in Fed-BioMed container with GPU support) * **[Nvidia container toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html)** * **`docker compose` version >= 2.0** (already installed for container support) Installation guidelines for requirements: -* Nvidia drivers and CUDA: Type `nvidia-smi` to check driver version installed. You can use your usual package manager (`apt`, `dnf`) or [nvidia CUDA toolkit](https://developer.nvidia.com/cuda-downloads) download. In both cases, commands depend on your machine configuration. -* Nvidia container toolkit: check list of supported systems and [specific instructions](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). First enable the repository, then install the package (eg: `sudo apt-get update && sudo apt-get install nvidia-docker2` on Ubuntu/Debian, `sudo dnf install nvidia-docker2` on CentOS). - - if your system version is not supported, you can try to use an approaching version. For example, for Fedora 35 we installed and ran the CentOS 8 version with: -```bash -distribution=centos8 -curl -s -L https://nvidia.github.io/nvidia-docker/$distribution/nvidia-docker.repo | \ - sudo tee /etc/yum.repos.d/nvidia-docker.repo -sudo dnf install nvidia-docker2 -``` - +* Nvidia drivers: Type `nvidia-smi` to check driver version installed. You can use your usual package manager (`apt`, `dnf`). Exact command depends on your machine configuration. +* Nvidia container toolkit: check list of supported systems and [specific instructions](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). First enable the repository, as root, then install the package (eg: `sudo apt-get update && sudo apt-get install nvidia-container-toolkit` on Ubuntu/Debian, `sudo dnf install nvidia-container-toolkit` on Fedora). Then configure and relaunch the docker daemon: `nvidia-ctk runtime configure --runtime=docker && systemctl restart docker`. FAQ for issues with GPU in containers : * `docker compose` file format error when launching any container : From b82e2f35ef1d6923fe074c3d9a601bbfbdfaf326 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 25 May 2023 15:25:24 +0200 Subject: [PATCH 14/41] WIP update vpn mode docker containers - update `base` `basenode` containers - update boringtun to 0.5.2 - install wireguard-tools from packager --- envs/vpn/docker/base/build_files/Dockerfile | 6 +-- .../vpn/docker/basegpu/build_files/Dockerfile | 37 ++++--------------- .../gui/build_files/entrypoint_functions.bash | 6 +-- envs/vpn/docker/mqtt/build_files/Dockerfile | 11 ++---- .../build_files/entrypoint_functions.bash | 6 +-- .../build_files/entrypoint_functions.bash | 6 +-- .../build_files/entrypoint_functions.bash | 6 +-- .../vpn/docker/restful/build_files/Dockerfile | 10 ++--- .../build_files/entrypoint_functions.bash | 6 +-- .../build_files/entrypoint_functions.bash | 6 +-- 10 files changed, 34 insertions(+), 66 deletions(-) diff --git a/envs/vpn/docker/base/build_files/Dockerfile b/envs/vpn/docker/base/build_files/Dockerfile index 0fbf7856c..67a6c4bad 100644 --- a/envs/vpn/docker/base/build_files/Dockerfile +++ b/envs/vpn/docker/base/build_files/Dockerfile @@ -15,11 +15,7 @@ RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && . /root/.cargo/env && \ cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli # install needed wireguard-tools -ENV WITH_WGQUICK=yes -RUN git clone https://git.zx2c4.com/wireguard-tools && \ - make -C wireguard-tools/src && \ - make -C wireguard-tools/src install - +RUN apt-get install -y wireguard-tools # docker base image for VPN server, Fed-BioMed node and researcher # - need proper python version for Fed-BioMed diff --git a/envs/vpn/docker/basegpu/build_files/Dockerfile b/envs/vpn/docker/basegpu/build_files/Dockerfile index 304ef89bf..f2f5af2cc 100644 --- a/envs/vpn/docker/basegpu/build_files/Dockerfile +++ b/envs/vpn/docker/basegpu/build_files/Dockerfile @@ -1,49 +1,28 @@ # temporary builder image for wireguard tools # - need coherent system version with base image # - may need update for properly compiling boringtun -FROM nvidia/cuda:11.5.0-base-ubuntu20.04 as builder - -# install wget though nvidia cuda linux gpg key is obsolete -RUN apt-key del 7fa2af80 && apt-get update || true && apt install -y wget -# update nvidia cuda linux gpg repository key -# https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key/ -RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb && \ - dpkg -i cuda-keyring_1.0-1_all.deb && \ - sed -i '/developer\.download\.nvidia\.com\/compute\/cuda\/repos/d' /etc/apt/sources.list && \ - sed -i '/developer\.download\.nvidia\.com\/compute\/cuda\/repos/d' /etc/apt/sources.list.d/* +# - 2023-05: `12.1.1-base-ubuntu22.04` is uptodate version +FROM nvidia/cuda:12.1.1-base-ubuntu22.04 as builder RUN apt-get update && apt-get install -y git build-essential cargo # install boringtun userspace implementation of wireguard # -# match versions of ubuntu & boringtun -# 0.5.2 not yet supported -RUN cargo install --locked --bin boringtun --version ~0.4.0 boringtun +# - match versions of debian & boringtun : up to date 0.5.2 is now ok for bullseye +RUN cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli # install needed wireguard-tools -ENV WITH_WGQUICK=yes -RUN git clone https://git.zx2c4.com/wireguard-tools && \ - make -C wireguard-tools/src && \ - make -C wireguard-tools/src install +RUN apt-get install wireguard-tools # docker base image for VPN server, Fed-BioMed node and researcher # - need proper python version for Fed-BioMed -FROM nvidia/cuda:11.5.0-base-ubuntu20.04 - -# install wget though nvidia cuda linux gpg key is obsolete -RUN apt-key del 7fa2af80 && apt-get update || true && apt install -y wget -# update nvidia cuda linux gpg repository key -# https://developer.nvidia.com/blog/updating-the-cuda-linux-gpg-repository-key/ -RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb && \ - dpkg -i cuda-keyring_1.0-1_all.deb && \ - sed -i '/developer\.download\.nvidia\.com\/compute\/cuda\/repos/d' /etc/apt/sources.list && \ - sed -i '/developer\.download\.nvidia\.com\/compute\/cuda\/repos/d' /etc/apt/sources.list.d/* +FROM nvidia/cuda:12.1.1-base-ubuntu22.04 RUN apt-get update && \ - apt-get install -y python3.9-full && \ + apt-get install -y python3.10-full && \ apt-get install -y iptables iproute2 iputils-ping bash vim net-tools procps build-essential kmod # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ diff --git a/envs/vpn/docker/gui/build_files/entrypoint_functions.bash b/envs/vpn/docker/gui/build_files/entrypoint_functions.bash index 161811904..f8f139314 100755 --- a/envs/vpn/docker/gui/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/gui/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun + pkill boringtun-cli if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli } diff --git a/envs/vpn/docker/mqtt/build_files/Dockerfile b/envs/vpn/docker/mqtt/build_files/Dockerfile index 4cb5e1b13..c3a8d64ab 100644 --- a/envs/vpn/docker/mqtt/build_files/Dockerfile +++ b/envs/vpn/docker/mqtt/build_files/Dockerfile @@ -12,13 +12,10 @@ RUN apk update && apk add git alpine-sdk linux-headers # - glitch: alpine's apk cargo is too old vs boringtun's cargo packages dependencies, need to install # up to date rust/cargo RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && . /root/.cargo/env && \ - cargo install --locked --bin boringtun --version ~0.4.0 boringtun + cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli + +RUN apt-get install -y wireguard-tools -# install needed wireguard-tools -ENV WITH_WGQUICK=yes -RUN git clone https://git.zx2c4.com/wireguard-tools && \ - make -C wireguard-tools/src && \ - make -C wireguard-tools/src install # # docker image for mosquitto server @@ -53,7 +50,7 @@ RUN [ "$CONTAINER_USER" != 'root' ] && useradd -m -d /home/$CONTAINER_USER \ RUN apk update && apk add iptables bash runuser vim net-tools procps alpine-sdk linux-headers kmod # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ COPY ./entrypoint*.bash / diff --git a/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash b/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash index 161811904..f8f139314 100755 --- a/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun + pkill boringtun-cli if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli } diff --git a/envs/vpn/docker/node/build_files/entrypoint_functions.bash b/envs/vpn/docker/node/build_files/entrypoint_functions.bash index 161811904..f8f139314 100755 --- a/envs/vpn/docker/node/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/node/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun + pkill boringtun-cli if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli } diff --git a/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash b/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash index 161811904..f8f139314 100755 --- a/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun + pkill boringtun-cli if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli } diff --git a/envs/vpn/docker/restful/build_files/Dockerfile b/envs/vpn/docker/restful/build_files/Dockerfile index b872cada7..93aa08140 100644 --- a/envs/vpn/docker/restful/build_files/Dockerfile +++ b/envs/vpn/docker/restful/build_files/Dockerfile @@ -12,14 +12,10 @@ RUN apk update && apk add git alpine-sdk linux-headers # - glitch: alpine's apk cargo is too old vs boringtun's cargo packages dependencies, need to install # up to date rust/cargo RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && . /root/.cargo/env && \ - cargo install --locked --bin boringtun --version ~0.4.0 boringtun + cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli # install needed wireguard-tools -ENV WITH_WGQUICK=yes -RUN git clone https://git.zx2c4.com/wireguard-tools && \ - make -C wireguard-tools/src && \ - make -C wireguard-tools/src install - +RUN apt-get install -y wireguard-tools # @@ -65,7 +61,7 @@ RUN apk add --no-cache --virtual .build-deps \ && pip install -r requirements.txt # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ # Creating working directory diff --git a/envs/vpn/docker/restful/build_files/entrypoint_functions.bash b/envs/vpn/docker/restful/build_files/entrypoint_functions.bash index 161811904..f8f139314 100755 --- a/envs/vpn/docker/restful/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/restful/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun + pkill boringtun-cli if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli } diff --git a/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash b/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash index 161811904..f8f139314 100755 --- a/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun + pkill boringtun-cli if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli } From f042d8a4f0ae4a222a1ce41e3dba854ce9c6f28e Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 25 May 2023 15:35:36 +0200 Subject: [PATCH 15/41] misc typo fix --- envs/vpn/docker/basegpu/build_files/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/envs/vpn/docker/basegpu/build_files/Dockerfile b/envs/vpn/docker/basegpu/build_files/Dockerfile index f2f5af2cc..3498a82c5 100644 --- a/envs/vpn/docker/basegpu/build_files/Dockerfile +++ b/envs/vpn/docker/basegpu/build_files/Dockerfile @@ -12,7 +12,7 @@ RUN apt-get update && apt-get install -y git build-essential cargo RUN cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli # install needed wireguard-tools -RUN apt-get install wireguard-tools +RUN apt-get install -y wireguard-tools # docker base image for VPN server, Fed-BioMed node and researcher From cd788d4d200cca5e177ccfbb8ed7df62c03f758e Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 30 May 2023 13:30:12 +0200 Subject: [PATCH 16/41] WIP upgrade package version - vpn containers mqtt and restful build ok --- envs/vpn/docker/docker-compose.yml | 16 --------------- envs/vpn/docker/mqtt/build_files/Dockerfile | 20 ++++++++----------- .../vpn/docker/restful/build_files/Dockerfile | 19 +++++++----------- 3 files changed, 15 insertions(+), 40 deletions(-) diff --git a/envs/vpn/docker/docker-compose.yml b/envs/vpn/docker/docker-compose.yml index 60437a8dc..d4d50eb44 100644 --- a/envs/vpn/docker/docker-compose.yml +++ b/envs/vpn/docker/docker-compose.yml @@ -70,22 +70,6 @@ x-node2: services: # - # intermediate step : image for building wireguard for base image - # we usually dont want to build or launch this service (only used for building "base") - builder: - container_name: fedbiomed-vpn-builder - hostname: fedbiomed-vpn-builder - build: - context: ./base/build_files - target: builder - image: fedbiomed/vpn-builder - entrypoint: /bin/true - # profiles will handle not-launched-by-default services when - # >=1.28 becomes common https://docs.docker.com/compose/profiles/ - # - # profiles: - # - debug # not started by `docker compose up` except if `--profile debug` - # # intermediate step : base image for building vpn and researcher containers # we usually dont want to launch this service (used for building other services) base: diff --git a/envs/vpn/docker/mqtt/build_files/Dockerfile b/envs/vpn/docker/mqtt/build_files/Dockerfile index c3a8d64ab..714720a0f 100644 --- a/envs/vpn/docker/mqtt/build_files/Dockerfile +++ b/envs/vpn/docker/mqtt/build_files/Dockerfile @@ -1,26 +1,22 @@ # temporary builder image for wireguard tools # - need coherent system version with mosquitto image -# - may need update for properly compiling boringtun (eg: cargo build of failing on `buster-slim`) -FROM alpine:3.14 as builder +# - may need update for properly compiling boringtun +# - 2023-05: `3.18.0` is uptodate version +FROM alpine:3.18.0 as builder -RUN apk update && apk add git alpine-sdk linux-headers +RUN apk update && apk add git alpine-sdk linux-headers cargo # install boringtun userspace implementation of wireguard # -# - match versions of alpine & boringtun : 0.4.0 is now supported by bullseye with current system/cargo packages -# 0.5.2 not yet supported -# - glitch: alpine's apk cargo is too old vs boringtun's cargo packages dependencies, need to install -# up to date rust/cargo -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && . /root/.cargo/env && \ - cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli - -RUN apt-get install -y wireguard-tools +# - match versions of debian & boringtun : up to date 0.5.2 is now ok for this system +RUN cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli +RUN apk add wireguard-tools # # docker image for mosquitto server # -FROM eclipse-mosquitto +FROM eclipse-mosquitto:2.0.15 ARG CONTAINER_GID ARG CONTAINER_UID diff --git a/envs/vpn/docker/restful/build_files/Dockerfile b/envs/vpn/docker/restful/build_files/Dockerfile index 93aa08140..98aaaa91e 100644 --- a/envs/vpn/docker/restful/build_files/Dockerfile +++ b/envs/vpn/docker/restful/build_files/Dockerfile @@ -1,27 +1,22 @@ # temporary builder image for wireguard tools # - need coherent system version with mosquitto image -# - may need update for properly compiling boringtun (eg: cargo build of package failing on `buster-slim`) -FROM python:3.10-alpine as builder +# - may need update for properly compiling boringtun +FROM python:3.10-alpine3.18 as builder -RUN apk update && apk add git alpine-sdk linux-headers +RUN apk update && apk add git alpine-sdk linux-headers cargo # install boringtun userspace implementation of wireguard # -# - match versions of alpine & boringtun : 0.4.0 is now supported by bullseye with current system/cargo packages -# 0.5.2 not yet supported -# - glitch: alpine's apk cargo is too old vs boringtun's cargo packages dependencies, need to install -# up to date rust/cargo -RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && . /root/.cargo/env && \ - cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli +# - match versions of debian & boringtun : up to date 0.5.2 is now ok for this system +RUN cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli # install needed wireguard-tools -RUN apt-get install -y wireguard-tools - +RUN apk add wireguard-tools # # docker image for restful server # -FROM python:3.10-alpine +FROM python:3.10-alpine3.18 ENV PYTHONUNBUFFERED 1 ARG CONTAINER_GID From e23d0a2308bfc3023f585812577e1d4274090e4c Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Wed, 31 May 2023 13:43:04 +0200 Subject: [PATCH 17/41] misc test --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 5ea21595a..464df08c5 100644 --- a/README.md +++ b/README.md @@ -660,3 +660,4 @@ ${FEDBIOMED_DIR}/scripts/fedbiomed_mpc (node | researcher) *WORKDIR* exec --help ${FEDBIOMED_DIR}/scripts/fedbiomed_mpc (node | researcher) *WORKDIR* shamir-server-key --help ``` + From 239205dcc1dcea2c8ea4c23a4fdec9b4eb61b028 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 5 Jun 2023 13:38:02 +0200 Subject: [PATCH 18/41] rewrite equivalent of gitlabpages `feature/448-package-commits` WIP branch for documenting new packages versions before github migration --- .../0-basic-software-installation.md | 21 ++++++------ .../deployment/deployment-vpn-node2.md | 16 ++++----- docs/user-guide/deployment/deployment-vpn.md | 28 +++++++-------- .../installation/windows-installation.md | 34 +++++++++++-------- 4 files changed, 53 insertions(+), 46 deletions(-) diff --git a/docs/tutorials/installation/0-basic-software-installation.md b/docs/tutorials/installation/0-basic-software-installation.md index 848000b88..96991daa4 100644 --- a/docs/tutorials/installation/0-basic-software-installation.md +++ b/docs/tutorials/installation/0-basic-software-installation.md @@ -19,7 +19,7 @@ This tutorial gives steps for installing Fed-BioMed components (network, node, r Fed-BioMed is developed and tested under up to date version of : -* **Linux Fedora**, should also work or be easily ported under most Linux distributions (Ubuntu, etc.) +* **Linux Ubuntu and Fedora**, should also work or be easily ported under most Linux distributions * **MacOS** Check specific guidelines for installation on [Windows 10](../../user-guide/installation/windows-installation.md). @@ -30,12 +30,12 @@ Check specific guidelines for installation on [Windows 10](../../user-guide/inst The following packages are required for Fed-BioMed : * [`docker`](https://docs.docker.com) - * [`docker-compose`](https://docs.docker.com/compose) + * [`docker compose` v2](https://docs.docker.com/compose): don't confuse it with the obsolete `docker-compose` v1 * [`conda`](https://conda.io) * `git` -### Install docker and docker-compose +### Install `docker` and `docker compose` #### Linux Fedora @@ -62,20 +62,21 @@ Check with the account used to run Fed-BioMed that docker is up and can be used $ docker run hello-world ``` -Install docker-compose and git : +Install `docker compose` and `git` : ``` -$ sudo dnf install -y docker-compose git +$ sudo dnf install -y docker-compose-plugin git ``` #### MacOS -Install docker and docker-compose choosing one of the available options for example : +Install `docker` and `docker compose` choosing one of the available options for example : * official full [Docker Desktop](https://docs.docker.com/desktop/mac/install/) installation process, please check product license * your favorite third party package manager for example : - * macports provides [docker](https://ports.macports.org/port/docker/) [docker-compose](https://ports.macports.org/port/docker-compose/) and [git](https://ports.macports.org/port/git/) ports - * homebrew provides [docker](https://formulae.brew.sh/formula/docker) [docker-compose](https://formulae.brew.sh/formula/docker-compose) and [git](https://formulae.brew.sh/formula/git) formulae - + * macports provides [docker](https://ports.macports.org/port/docker/) and [git](https://ports.macports.org/port/git/) ports + * homebrew provides [docker](https://formulae.brew.sh/formula/docker) and [git](https://formulae.brew.sh/formula/git) formulae + * don't use the `docker-compose` v1 from macports or homebrew ! + * for `docker compose` v2, adapt the [manual plugin install procedure](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually) by picking the [proper binary for your hardware](https://github.com/docker/compose/releases) Check with the account used to run Fed-BioMed docker is up and can be used by the current account without error : @@ -85,7 +86,7 @@ $ docker run hello-world #### Other -Connect under an account with administrator privileges, install [`docker`](https://docs.docker.com/engine/install), ensure it is started and give docker privilege for the account used for running Fed-BioMed. Also install [`docker-compose`](https://docs.docker.com/compose/install/) and `git` +Connect under an account with administrator privileges, install [`docker`](https://docs.docker.com/engine/install), ensure it is started and give docker privilege for the account used for running Fed-BioMed. Also install [`docker compose` v2](https://docs.docker.com/compose/install/) and `git` Check with the account used to run Fed-BioMed docker is up and can be used by the current account without error : diff --git a/docs/user-guide/deployment/deployment-vpn-node2.md b/docs/user-guide/deployment/deployment-vpn-node2.md index 3cf43ab43..85a2c3137 100644 --- a/docs/user-guide/deployment/deployment-vpn-node2.md +++ b/docs/user-guide/deployment/deployment-vpn-node2.md @@ -34,13 +34,13 @@ For each node, choose a **unique** node tag (eg: *NODE2TAG* in this example) tha ```bash [user@server $] cd ${FEDBIOMED_DIR}/envs/vpn/docker - [user@server $] docker-compose exec vpnserver bash -ci 'python ./vpn/bin/configure_peer.py genconf node NODE2TAG' + [user@server $] docker compose exec vpnserver bash -ci 'python ./vpn/bin/configure_peer.py genconf node NODE2TAG' ``` The configuration file is now available on the server side in path `${FEDBIOMED_DIR}/envs/vpn/docker/vpnserver/run_mounts/config/config_peers/node/NODE2TAG/config.env` or with command : ```bash - [user@server $] docker-compose exec vpnserver cat /config/config_peers/node/NODE2TAG/config.env + [user@server $] docker compose exec vpnserver cat /config/config_peers/node/NODE2TAG/config.env ``` * copy the configuration file from the server side **to the node side** via a secure channel, to path `/tmp/config2.env` on the node. @@ -58,13 +58,13 @@ For each node, choose a **unique** node tag (eg: *NODE2TAG* in this example) tha * start `node2` container ```bash - [user@node $] docker-compose up -d node2 + [user@node $] docker compose up -d node2 ``` * retrieve the `node2`'s publickey ```bash - [user@node $] docker-compose exec node2 wg show wg0 public-key | tr -d '\r' >/tmp/publickey2-nodeside + [user@node $] docker compose exec node2 wg show wg0 public-key | tr -d '\r' >/tmp/publickey2-nodeside ``` * copy the public key from the node side **to the server side** via a secure channel (see above), to path `/tmp/publickey2-serverside` on the server. @@ -73,7 +73,7 @@ For each node, choose a **unique** node tag (eg: *NODE2TAG* in this example) tha ```bash [user@server $] cd ${FEDBIOMED_DIR}/envs/vpn/docker - [user@server $] docker-compose exec vpnserver bash -ci "python ./vpn/bin/configure_peer.py add node NODE2TAG $(cat /tmp/publickey2-serverside)" + [user@server $] docker compose exec vpnserver bash -ci "python ./vpn/bin/configure_peer.py add node NODE2TAG $(cat /tmp/publickey2-serverside)" ``` * check containers running on the node side @@ -101,7 +101,7 @@ For each node, choose a **unique** node tag (eg: *NODE2TAG* in this example) tha * do initial node configuration ```bash - [user@node $] docker-compose exec -u $(id -u) node2 bash -ci 'export FORCE_SECURE_AGGREGATION='${FORCE_SECURE_AGGREGATION}'&& export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14002 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && ENABLE_TRAINING_PLAN_APPROVAL=True ALLOW_DEFAULT_TRAINING_PLANS=True ./scripts/fedbiomed_run node configuration create' + [user@node $] docker compose exec -u $(id -u) node2 bash -ci 'export FORCE_SECURE_AGGREGATION='${FORCE_SECURE_AGGREGATION}'&& export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14002 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && ENABLE_TRAINING_PLAN_APPROVAL=True ALLOW_DEFAULT_TRAINING_PLANS=True ./scripts/fedbiomed_run node configuration create' ``` @@ -110,7 +110,7 @@ Optionally launch the node GUI : * start `gui2` container ```bash - [user@node $] docker-compose up -d gui2 + [user@node $] docker compose up -d gui2 ``` * check containers running on the node side @@ -144,7 +144,7 @@ Setup the node by sharing datasets and by launching the Fed-BioMed node: * connect to the container ```bash - [user@node $] docker-compose exec -u $(id -u) node2 bash -ci 'export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14002 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && bash' + [user@node $] docker compose exec -u $(id -u) node2 bash -ci 'export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14002 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && bash' ``` * start the Fed-BioMed node, for example in background: diff --git a/docs/user-guide/deployment/deployment-vpn.md b/docs/user-guide/deployment/deployment-vpn.md index 81b7a2ae2..5142dc290 100644 --- a/docs/user-guide/deployment/deployment-vpn.md +++ b/docs/user-guide/deployment/deployment-vpn.md @@ -18,7 +18,7 @@ This tutorial details a deployment scenario where: ## Requirements !!! info "Supported operating systems and software requirements" - Supported operating systems for containers/VPN deployment include **Fedora 35**, **Ubuntu 20.04**, recent **MacOS X**, **Windows 10** 21H2 with WSL2 using Ubuntu-20.04 distribution. Also requires **docker-compose >= 1.27.0**. + Supported operating systems for containers/VPN deployment include **Fedora 38**, **Ubuntu 22.04 LTS**. Should also work for most recent Linux, **MacOS X**, **Windows 11** with WSL2 using Ubuntu-22.04 distribution. Also requires **docker compose >= 2.0**. Check here for [detailed requirements](https://github.com/fedbiomed/fedbiomed/blob/master/envs/vpn/README.md#requirements). @@ -79,7 +79,7 @@ It covers the initial server deployment, including build, configuration and laun For the rest of this tutorial `${FEDBIOMED_DIR}` represents the base directory of the clone. - `docker-compose` commands need to be launched from `${FEDBIOMED_DIR}/envs/vpn/docker directory`. + `docker compose` commands need to be launched from `${FEDBIOMED_DIR}/envs/vpn/docker directory`. * clean running containers, containers files, temporary files @@ -155,7 +155,7 @@ For each node, choose a **unique** node tag (eg: *NODETAG* in this example) that For the rest of this tutorial `${FEDBIOMED_DIR}` represents the base directory of the clone. - `docker-compose` commands need to be launched from `${FEDBIOMED_DIR}/envs/vpn/docker directory`. + `docker compose` commands need to be launched from `${FEDBIOMED_DIR}/envs/vpn/docker directory`. * clean running containers, containers files, temporary files (skip that step if node and server run on the same machine) @@ -180,13 +180,13 @@ For each node, choose a **unique** node tag (eg: *NODETAG* in this example) that ```bash [user@server $] cd ${FEDBIOMED_DIR}/envs/vpn/docker - [user@server $] docker-compose exec vpnserver bash -ci 'python ./vpn/bin/configure_peer.py genconf node NODETAG' + [user@server $] docker compose exec vpnserver bash -ci 'python ./vpn/bin/configure_peer.py genconf node NODETAG' ``` The configuration file is now available on the server side in path `${FEDBIOMED_DIR}/envs/vpn/docker/vpnserver/run_mounts/config/config_peers/node/NODETAG/config.env` or with command : ```bash - [user@server $] docker-compose exec vpnserver cat /config/config_peers/node/NODETAG/config.env + [user@server $] docker compose exec vpnserver cat /config/config_peers/node/NODETAG/config.env ``` * copy the configuration file from the server side **to the node side** via a secure channel, to path `/tmp/config.env` on the node. @@ -204,13 +204,13 @@ For each node, choose a **unique** node tag (eg: *NODETAG* in this example) that * start `node` container ```bash - [user@node $] docker-compose up -d node + [user@node $] docker compose up -d node ``` * retrieve the `node`'s publickey ```bash - [user@node $] docker-compose exec node wg show wg0 public-key | tr -d '\r' >/tmp/publickey-nodeside + [user@node $] docker compose exec node wg show wg0 public-key | tr -d '\r' >/tmp/publickey-nodeside ``` * copy the public key from the node side **to the server side** via a secure channel (see above), to path `/tmp/publickey-serverside` on the server. @@ -219,7 +219,7 @@ For each node, choose a **unique** node tag (eg: *NODETAG* in this example) that ```bash [user@server $] cd ${FEDBIOMED_DIR}/envs/vpn/docker - [user@server $] docker-compose exec vpnserver bash -ci "python ./vpn/bin/configure_peer.py add node NODETAG $(cat /tmp/publickey-serverside)" + [user@server $] docker compose exec vpnserver bash -ci "python ./vpn/bin/configure_peer.py add node NODETAG $(cat /tmp/publickey-serverside)" ``` * check containers running on the node side @@ -247,7 +247,7 @@ For each node, choose a **unique** node tag (eg: *NODETAG* in this example) that * do initial node configuration ```bash - [user@node $] docker-compose exec -u $(id -u) node bash -ci 'export FORCE_SECURE_AGGREGATION='${FORCE_SECURE_AGGREGATION}'&& export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14001 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && ENABLE_TRAINING_PLAN_APPROVAL=True ALLOW_DEFAULT_TRAINING_PLANS=True ./scripts/fedbiomed_run node configuration create' + [user@node $] docker compose exec -u $(id -u) node bash -ci 'export FORCE_SECURE_AGGREGATION='${FORCE_SECURE_AGGREGATION}'&& export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14001 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && ENABLE_TRAINING_PLAN_APPROVAL=True ALLOW_DEFAULT_TRAINING_PLANS=True ./scripts/fedbiomed_run node configuration create' ``` @@ -281,7 +281,7 @@ Optionally launch the node GUI : * start `gui` container ```bash - [user@node $] docker-compose up -d gui + [user@node $] docker compose up -d gui ``` * check containers running on the node side @@ -336,7 +336,7 @@ Setup the node by sharing datasets and by launching the Fed-BioMed node: ```bash [user@node $] cd ${FEDBIOMED_DIR}/envs/vpn/docker - [user@node $] docker-compose exec -u $(id -u) node bash -ci 'export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14001 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && bash' + [user@node $] docker compose exec -u $(id -u) node bash -ci 'export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14001 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-node && bash' ``` * start the Fed-BioMed node, for example in background: @@ -398,7 +398,7 @@ Optionally use the researcher container's command line instead of the Jupyter no ```bash [user@server $] cd ${FEDBIOMED_DIR}/envs/vpn/docker - [user@server $] docker-compose exec -u $(id -u) researcher bash -ci 'export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14000 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-researcher && bash' + [user@server $] docker compose exec -u $(id -u) researcher bash -ci 'export MPSPDZ_IP=$VPN_IP && export MPSPDZ_PORT=14000 && export MQTT_BROKER=10.220.0.2 && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL="http://10.220.0.3:8000/upload/" && export PYTHONPATH=/fedbiomed && export FEDBIOMED_NO_RESET=1 && eval "$(conda shell.bash hook)" && conda activate fedbiomed-researcher && bash' ``` * launch a command, for example a training: @@ -421,7 +421,7 @@ Some possible management commands after initial deployment include: * check the VPN peers known from the VPN server ```bash - [user@server $] ( cd ${FEDBIOMED_DIR}/envs/vpn/docker ; docker-compose exec vpnserver bash -ci "python ./vpn/bin/configure_peer.py list" ) + [user@server $] ( cd ${FEDBIOMED_DIR}/envs/vpn/docker ; docker compose exec vpnserver bash -ci "python ./vpn/bin/configure_peer.py list" ) type id prefix peers ---------- ----------- ------------- ------------------------------------------------ management mqtt 10.220.0.2/32 ['1exampleofdummykey12345abcdef6789ghijklmnop='] @@ -470,7 +470,7 @@ Some possible management commands after initial deployment include: ```bash [user@node $] ${FEDBIOMED_DIR}/scripts/fedbiomed_vpn stop node gui - [user@node $] ( cd ${FEDBIOMED_DIR}/envs/vpn/docker ; docker-compose up -d node gui ) + [user@node $] ( cd ${FEDBIOMED_DIR}/envs/vpn/docker ; docker compose up -d node gui ) ``` VPN configurations and container files are kept unchanged when restarting containers. diff --git a/docs/user-guide/installation/windows-installation.md b/docs/user-guide/installation/windows-installation.md index 88b5d6efe..6a782f856 100644 --- a/docs/user-guide/installation/windows-installation.md +++ b/docs/user-guide/installation/windows-installation.md @@ -75,7 +75,7 @@ user@wsl-ubuntu$ ## Step 3: docker -Requirement : docker and docker-compose +Requirement : `docker` and `docker compose` Open an administrator session in WSL Ubuntu : ``` @@ -92,18 +92,23 @@ Alternative 2 : docker engine - install [docker engine](https://docs.docker.com/engine/install/ubuntu/) with as admin (root) account in WSL Ubuntu. Please note that `docker container run hello-world` will not work until we complete the steps below - install docker compose -``` -root@wsl-ubuntu# apt install -y docker-compose -``` + + ``` + root@wsl-ubuntu# apt install -y docker-compose-plugin + ``` + If not available from `apt` try a [manual installation](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually) + - if you use an account named `USER` under Ubuntu, authorize it to use docker by typing under an admin (root) account in WSL Ubuntu : -``` -root@wsl-ubuntu# adduser USER docker -``` + + ``` + root@wsl-ubuntu# adduser USER docker + ``` - open a new WSL Ubuntu terminal so that it is authorized to use docker - at each Ubuntu restart, launch docker daemon -``` -root@wsl-ubuntu# nohup dockerd & -``` + + ``` + root@wsl-ubuntu# nohup dockerd & + ``` Check that you can use docker with your user account under Ubuntu : ``` @@ -118,9 +123,10 @@ Requirement : conda installed in Ubuntu and configured for your user account * install [Anaconda](https://docs.anaconda.com/anaconda/install/linux/) under Ubuntu, using your user account * during installation, answer *Yes* to question *“Do you wish the installer to initialize Anaconda3 by running conda init?”* * activate conda for your Ubuntu session -``` -user@wsl-ubuntu$ source ~/.bashrc -``` + + ``` + user@wsl-ubuntu$ source ~/.bashrc + ``` ## Step 5: Fed-BioMed @@ -188,7 +194,7 @@ Error --> fatal: could not set 'core.filemode' to 'false' 4. Save the file and shoutdown WSL -5. Ralaunch Ubuntu WSL +5. Relaunch Ubuntu WSL If the problem still persists, you may try restarting the machine and then execute git clone command. From 413a4f94c62fce2d9ac5a9c9cb578e05e0924163 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 6 Jun 2023 07:55:19 +0200 Subject: [PATCH 19/41] misc update README for manually building containers node/researcher --- envs/vpn/README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/envs/vpn/README.md b/envs/vpn/README.md index 640881e66..d238aea03 100644 --- a/envs/vpn/README.md +++ b/envs/vpn/README.md @@ -198,12 +198,12 @@ Run this only at first launch of container or after cleaning : * build container ```bash [user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build basenode -[user@node $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build node +[user@node $] MPSPDZ_URL="$(grep -A 2 modules/MP-SPDZ ../../../.gitmodules | grep 'url =' | awk '{ print $3 }')" MPSPDZ_COMMIT="$(cd ../../../modules ; git ls-tree HEAD | grep MP-SPDZ | awk '{ print $3 }')" CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build node ``` Alternative: build an (thiner) image without GPU support if you will never use it ```bash [user@build $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build basenode-no-gpu -[user@build $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build node +[user@build $] MPSPDZ_URL="$(grep -A 2 modules/MP-SPDZ ../../../.gitmodules | grep 'url =' | awk '{ print $3 }')" MPSPDZ_COMMIT="$(cd ../../../modules ; git ls-tree HEAD | grep MP-SPDZ | awk '{ print $3 }')" CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build node ``` Then follow the common instructions for nodes (below). @@ -229,12 +229,12 @@ On the build machine * build container ```bash [user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build basenode -[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build node +[user@build $] MPSPDZ_URL="$(grep -A 2 modules/MP-SPDZ ../../../.gitmodules | grep 'url =' | awk '{ print $3 }')" MPSPDZ_COMMIT="$(cd ../../../modules ; git ls-tree HEAD | grep MP-SPDZ | awk '{ print $3 }')" CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build node ``` Alternative: build an (thiner) image without GPU support if you will never use it ```bash [user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build basenode-no-gpu -[user@build $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build node +[user@build $] MPSPDZ_URL="$(grep -A 2 modules/MP-SPDZ ../../../.gitmodules | grep 'url =' | awk '{ print $3 }')" MPSPDZ_COMMIT="$(cd ../../../modules ; git ls-tree HEAD | grep MP-SPDZ | awk '{ print $3 }')" CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build node ``` * save image for container ```bash @@ -468,7 +468,7 @@ Run this only at first launch of container or after cleaning : * build container ```bash [user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build base -[user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build researcher +[user@researcher $] MPSPDZ_URL="$(grep -A 2 modules/MP-SPDZ ../../../.gitmodules | grep 'url =' | awk '{ print $3 }')" MPSPDZ_COMMIT="$(cd ../../../modules ; git ls-tree HEAD | grep MP-SPDZ | awk '{ print $3 }')" CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose build researcher ``` * generate VPN client for this container (see above in vpnserver) * configure the VPN client for this container @@ -825,6 +825,6 @@ Different values at build time and runtime is also supported by `vpnserver` `mqt Example : build a researcher container with a default user/group `fedbiomed` (id `1234`), run it with the same account as the account on the researcher machine. ```bash -[user@researcher $] CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build researcher +[user@researcher $] MPSPDZ_URL="$(grep -A 2 modules/MP-SPDZ ../../../.gitmodules | grep 'url =' | awk '{ print $3 }')" MPSPDZ_COMMIT="$(cd ../../../modules ; git ls-tree HEAD | grep MP-SPDZ | awk '{ print $3 }')" CONTAINER_UID=1234 CONTAINER_GID=1234 CONTAINER_USER=fedbiomed CONTAINER_GROUP=fedbiomed docker compose build researcher [user@researcher $] CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') docker compose up -d researcher ``` From 4ea0aae9582a00ade2dda3350a21d5fe553cdb9c Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 6 Jun 2023 16:40:46 +0200 Subject: [PATCH 20/41] WIP upgrade dependencies: step back boringtun from 0.5.2 to 0.4.0 --- envs/vpn/docker/base/build_files/Dockerfile | 7 ++++--- envs/vpn/docker/basegpu/build_files/Dockerfile | 7 ++++--- envs/vpn/docker/gui/build_files/entrypoint_functions.bash | 6 +++--- envs/vpn/docker/mqtt/build_files/Dockerfile | 7 ++++--- envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash | 6 +++--- envs/vpn/docker/node/build_files/entrypoint_functions.bash | 6 +++--- .../researcher/build_files/entrypoint_functions.bash | 6 +++--- envs/vpn/docker/restful/build_files/Dockerfile | 7 ++++--- .../docker/restful/build_files/entrypoint_functions.bash | 6 +++--- .../docker/vpnserver/build_files/entrypoint_functions.bash | 6 +++--- 10 files changed, 34 insertions(+), 30 deletions(-) diff --git a/envs/vpn/docker/base/build_files/Dockerfile b/envs/vpn/docker/base/build_files/Dockerfile index 67a6c4bad..0e3e85ca4 100644 --- a/envs/vpn/docker/base/build_files/Dockerfile +++ b/envs/vpn/docker/base/build_files/Dockerfile @@ -8,11 +8,12 @@ RUN apt-get update && apt-get install -y git build-essential curl # install boringtun userspace implementation of wireguard # -# - match versions of debian & boringtun : up to date 0.5.2 is now ok for bullseye +# - match versions of debian & boringtun : up to date 0.5.2 compiles ok for bullseye, but could not have it working +# Continue with 0.4.0 # - glitch: bullseye's apt-get cargo is too old vs boringtun's cargo packages dependencies, need to install # up to date rust/cargo RUN curl https://sh.rustup.rs -sSf | sh -s -- -y && . /root/.cargo/env && \ - cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli + cargo install --locked --bin boringtun --version ~0.4.0 boringtun # install needed wireguard-tools RUN apt-get install -y wireguard-tools @@ -24,5 +25,5 @@ FROM python:3.10-slim-bullseye RUN apt-get update && apt-get install -y iptables iproute2 iputils-ping bash vim net-tools procps build-essential kmod # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ diff --git a/envs/vpn/docker/basegpu/build_files/Dockerfile b/envs/vpn/docker/basegpu/build_files/Dockerfile index 3498a82c5..10b7441e6 100644 --- a/envs/vpn/docker/basegpu/build_files/Dockerfile +++ b/envs/vpn/docker/basegpu/build_files/Dockerfile @@ -8,8 +8,9 @@ RUN apt-get update && apt-get install -y git build-essential cargo # install boringtun userspace implementation of wireguard # -# - match versions of debian & boringtun : up to date 0.5.2 is now ok for bullseye -RUN cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli +# - match versions of debian & boringtun : up to date 0.5.2 compiles ok for bullseye, but could not have it working +# Continue with 0.4.0 +RUN cargo install --locked --bin boringtun --version ~0.4.0 boringtun # install needed wireguard-tools RUN apt-get install -y wireguard-tools @@ -24,5 +25,5 @@ RUN apt-get update && \ apt-get install -y iptables iproute2 iputils-ping bash vim net-tools procps build-essential kmod # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ diff --git a/envs/vpn/docker/gui/build_files/entrypoint_functions.bash b/envs/vpn/docker/gui/build_files/entrypoint_functions.bash index f8f139314..161811904 100755 --- a/envs/vpn/docker/gui/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/gui/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun-cli + pkill boringtun if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun } diff --git a/envs/vpn/docker/mqtt/build_files/Dockerfile b/envs/vpn/docker/mqtt/build_files/Dockerfile index 714720a0f..1d716468d 100644 --- a/envs/vpn/docker/mqtt/build_files/Dockerfile +++ b/envs/vpn/docker/mqtt/build_files/Dockerfile @@ -8,8 +8,9 @@ RUN apk update && apk add git alpine-sdk linux-headers cargo # install boringtun userspace implementation of wireguard # -# - match versions of debian & boringtun : up to date 0.5.2 is now ok for this system -RUN cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli +# - match versions of debian & boringtun : up to date 0.5.2 compiles ok for bullseye, but could not have it working +# Continue with 0.4.0 +RUN cargo install --locked --bin boringtun --version ~0.4.0 boringtun RUN apk add wireguard-tools @@ -46,7 +47,7 @@ RUN [ "$CONTAINER_USER" != 'root' ] && useradd -m -d /home/$CONTAINER_USER \ RUN apk update && apk add iptables bash runuser vim net-tools procps alpine-sdk linux-headers kmod # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ COPY ./entrypoint*.bash / diff --git a/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash b/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash index f8f139314..161811904 100755 --- a/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/mqtt/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun-cli + pkill boringtun if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun } diff --git a/envs/vpn/docker/node/build_files/entrypoint_functions.bash b/envs/vpn/docker/node/build_files/entrypoint_functions.bash index f8f139314..161811904 100755 --- a/envs/vpn/docker/node/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/node/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun-cli + pkill boringtun if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun } diff --git a/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash b/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash index f8f139314..161811904 100755 --- a/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/researcher/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun-cli + pkill boringtun if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun } diff --git a/envs/vpn/docker/restful/build_files/Dockerfile b/envs/vpn/docker/restful/build_files/Dockerfile index 98aaaa91e..606d4f597 100644 --- a/envs/vpn/docker/restful/build_files/Dockerfile +++ b/envs/vpn/docker/restful/build_files/Dockerfile @@ -7,8 +7,9 @@ RUN apk update && apk add git alpine-sdk linux-headers cargo # install boringtun userspace implementation of wireguard # -# - match versions of debian & boringtun : up to date 0.5.2 is now ok for this system -RUN cargo install --locked --bin boringtun-cli --version ~0.5.2 boringtun-cli +# - match versions of debian & boringtun : up to date 0.5.2 compiles ok for bullseye, but could not have it working +# Continue with 0.4.0 +RUN cargo install --locked --bin boringtun --version ~0.4.0 boringtun # install needed wireguard-tools RUN apk add wireguard-tools @@ -56,7 +57,7 @@ RUN apk add --no-cache --virtual .build-deps \ && pip install -r requirements.txt # get wireguard from builder image -COPY --from=builder /root/.cargo/bin/boringtun-cli /usr/bin/ +COPY --from=builder /root/.cargo/bin/boringtun /usr/bin/ COPY --from=builder /usr/bin/wg* /usr/bin/ # Creating working directory diff --git a/envs/vpn/docker/restful/build_files/entrypoint_functions.bash b/envs/vpn/docker/restful/build_files/entrypoint_functions.bash index f8f139314..161811904 100755 --- a/envs/vpn/docker/restful/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/restful/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun-cli + pkill boringtun if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun } diff --git a/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash b/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash index f8f139314..161811904 100755 --- a/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash +++ b/envs/vpn/docker/vpnserver/build_files/entrypoint_functions.bash @@ -173,7 +173,7 @@ start_wireguard(){ then # need to remove kernel module if loaded for using boringtun alternative [ -n "$(lsmod | grep wireguard)" ] && rmmod wireguard - WG_SUDO=1 boringtun-cli wg0 && RUNNING_BORINGTUN=true + WG_SUDO=1 boringtun wg0 && RUNNING_BORINGTUN=true fi # need wireguard to continue @@ -198,7 +198,7 @@ stop_wireguard() { echo "info: stopping wireguard" if "$RUNNING_BORINGTUN" then - pkill boringtun-cli + pkill boringtun if [ "$?" -ne 0 ] then echo "CRITICAL: could not stop wireguard boringtun" @@ -215,7 +215,7 @@ stop_wireguard() { fi fi - [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun-cli + [ -z "$RUNNING_BORINGTUN" ] && ip link delete dev wg0 || pkill boringtun } From eff40bac96f01d60aed7e4ee1b7fdf413ac97b23 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 13 Jun 2023 07:47:59 +0200 Subject: [PATCH 21/41] misc change gitlab => github --- docs/developer/ci.md | 2 +- docs/developer/definition-of-done.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/developer/ci.md b/docs/developer/ci.md index a123c2402..ae9dd829b 100644 --- a/docs/developer/ci.md +++ b/docs/developer/ci.md @@ -39,7 +39,7 @@ CI pipeline currently contains : To view CI test output and logs: -- view the pull request in gitlab (select `Pull requests` in top bar, then select your pull request). +- view the pull request in github (select `Pull requests` in top bar, then select your pull request). - click on the `Checks` at the top bar of the pull request and select the `Check` that you want to display. - Click on the jobs to see its console output. diff --git a/docs/developer/definition-of-done.md b/docs/developer/definition-of-done.md index 7302445e9..829467886 100644 --- a/docs/developer/definition-of-done.md +++ b/docs/developer/definition-of-done.md @@ -13,7 +13,7 @@ The Definition of Done is a set of items that must be completed and quality meas ## Review of the code -The reviewer can question any aspect of the increment in coherence with [Usage and Tools](./usage_and_tools.md#merge-request), exchange with the developer (good practice : leave a gitlab trace of the exchanges), approve it or not. +The reviewer can question any aspect of the increment in coherence with [Usage and Tools](./usage_and_tools.md#merge-request), exchange with the developer (good practice : leave a github trace of the exchanges), approve it or not. - Be specific in the pull request about what to review (critical code or properties of the code). - Coding style: inspire from PEP-8. From 32f83f1c0b32751f0d0605e0d98e573fb8515f12 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 19 Jun 2023 16:06:08 +0200 Subject: [PATCH 22/41] WIP nosetests to pytest --- .github/actions/unit-tests/action.yml | 2 +- envs/development/conda/fedbiomed-node-macosx.yaml | 3 ++- envs/development/conda/fedbiomed-node.yaml | 1 + envs/development/conda/fedbiomed-researcher-macosx.yaml | 3 ++- envs/development/conda/fedbiomed-researcher.yaml | 1 + envs/vpn/conda/fedbiomed-node.yaml | 1 + envs/vpn/conda/fedbiomed-researcher.yaml | 1 + tests/test_insert_untested_python_files_here.py | 4 ++-- 8 files changed, 11 insertions(+), 5 deletions(-) diff --git a/.github/actions/unit-tests/action.yml b/.github/actions/unit-tests/action.yml index 6fae16d12..d625f0858 100644 --- a/.github/actions/unit-tests/action.yml +++ b/.github/actions/unit-tests/action.yml @@ -24,7 +24,7 @@ runs: - name: Run unit tests run: | - PYTHONPATH=${PYTHONPATH:-$PWD} conda run -n fedbiomed-researcher nosetests -w ${{ inputs.test-dir }} --cover-xml --cover-erase --with-coverage --cover-package=fedbiomed --with-xunit -v --process-restartworker + PYTHONPATH=${PYTHONPATH:-$PWD} conda run -n fedbiomed-researcher pytest --cov=fedbiomed --cov-report term --cov-report xml:coverage.xml ${{ inputs.test-dir }} shell: bash - name: Get coverage rate diff --git a/envs/development/conda/fedbiomed-node-macosx.yaml b/envs/development/conda/fedbiomed-node-macosx.yaml index bc37887a6..cb4d6d91c 100644 --- a/envs/development/conda/fedbiomed-node-macosx.yaml +++ b/envs/development/conda/fedbiomed-node-macosx.yaml @@ -16,7 +16,8 @@ dependencies: - jupyter - ipython # tests - - pytest >6.2.2 + - pytest ~=7.2.0 + - pytest-cov ~=4.1.0 - tinydb >=4.4.0,<5.0.0 - tabulate >=0.8.9,<0.9.0 # code diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index e644f08a6..50107ee28 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -17,6 +17,7 @@ dependencies: - ipython ~=8.13.2 # tests - pytest ~=7.2.0 + - pytest-cov ~=4.1.0 - tinydb ~=4.7.1 - tabulate >=0.9.0,<0.10.0 # code diff --git a/envs/development/conda/fedbiomed-researcher-macosx.yaml b/envs/development/conda/fedbiomed-researcher-macosx.yaml index f5580ac76..d4d5e6155 100644 --- a/envs/development/conda/fedbiomed-researcher-macosx.yaml +++ b/envs/development/conda/fedbiomed-researcher-macosx.yaml @@ -16,7 +16,8 @@ dependencies: - jupyter - ipython # tests - - pytest >6.2.2 + - pytest ~=7.2.0 + - pytest-cov ~=4.1.0 - tinydb >=4.4.0,<5.0.0 - tabulate >=0.8.9,<0.9.0 - nose diff --git a/envs/development/conda/fedbiomed-researcher.yaml b/envs/development/conda/fedbiomed-researcher.yaml index 85cbeadd7..1e91cd6c5 100644 --- a/envs/development/conda/fedbiomed-researcher.yaml +++ b/envs/development/conda/fedbiomed-researcher.yaml @@ -16,6 +16,7 @@ dependencies: - ipython ~=8.13.2 # tests - pytest ~=7.2.0 + - pytest-cov ~=4.1.0 - tinydb ~=4.7.1 - tabulate >=0.9.0,<0.10.0 # tools diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index e644f08a6..50107ee28 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -17,6 +17,7 @@ dependencies: - ipython ~=8.13.2 # tests - pytest ~=7.2.0 + - pytest-cov ~=4.1.0 - tinydb ~=4.7.1 - tabulate >=0.9.0,<0.10.0 # code diff --git a/envs/vpn/conda/fedbiomed-researcher.yaml b/envs/vpn/conda/fedbiomed-researcher.yaml index 85cbeadd7..1e91cd6c5 100644 --- a/envs/vpn/conda/fedbiomed-researcher.yaml +++ b/envs/vpn/conda/fedbiomed-researcher.yaml @@ -16,6 +16,7 @@ dependencies: - ipython ~=8.13.2 # tests - pytest ~=7.2.0 + - pytest-cov ~=4.1.0 - tinydb ~=4.7.1 - tabulate >=0.9.0,<0.10.0 # tools diff --git a/tests/test_insert_untested_python_files_here.py b/tests/test_insert_untested_python_files_here.py index d932404f4..633d8caaf 100644 --- a/tests/test_insert_untested_python_files_here.py +++ b/tests/test_insert_untested_python_files_here.py @@ -1,8 +1,8 @@ # -# nosetests and cobertura results do not show real test coverage +# pytest and cobertura results do not show real test coverage # but only coverage figures of imported files # -# by including here all .py files of fedbiomed, we force nosetests +# by including here all .py files of fedbiomed, we force pytest # to do the right test coverage calculations # # TODO: this file may be automatically crafted on ci plateform From 2c7f08a074cbd98b122e1c7d19c5258305002ca3 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 19 Jun 2023 16:23:47 +0200 Subject: [PATCH 23/41] WIP pytest coverage - try clean conda cache --- .github/actions/unit-tests/action.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/actions/unit-tests/action.yml b/.github/actions/unit-tests/action.yml index d625f0858..f46f8ada7 100644 --- a/.github/actions/unit-tests/action.yml +++ b/.github/actions/unit-tests/action.yml @@ -18,6 +18,7 @@ runs: - name: Install researcher dependencies run: | source ~/.bashrc + conda clean -i -y ./scripts/configure_conda researcher ./scripts/fedbiomed_run network shell: bash From 24b5d3b1d80c832aa6bdaa97a45abd7785b4feb4 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 19 Jun 2023 17:11:08 +0200 Subject: [PATCH 24/41] WIP nosetests to pytest - misc fix --- .github/actions/unit-tests/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/unit-tests/action.yml b/.github/actions/unit-tests/action.yml index f46f8ada7..5e5e1b9b8 100644 --- a/.github/actions/unit-tests/action.yml +++ b/.github/actions/unit-tests/action.yml @@ -25,7 +25,7 @@ runs: - name: Run unit tests run: | - PYTHONPATH=${PYTHONPATH:-$PWD} conda run -n fedbiomed-researcher pytest --cov=fedbiomed --cov-report term --cov-report xml:coverage.xml ${{ inputs.test-dir }} + PYTHONPATH=${PYTHONPATH:-$PWD} conda run -n fedbiomed-researcher pytest --cov=fedbiomed --cov-report term --cov-report xml:${{ inputs.test-dir }}/coverage.xml ${{ inputs.test-dir }} shell: bash - name: Get coverage rate From 1b3320a8bae6cdedb58d1f149add36cb1ee6aaa5 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 19 Jun 2023 17:35:59 +0200 Subject: [PATCH 25/41] WIP nosetests to pytest - add verbose output --- .github/actions/unit-tests/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/unit-tests/action.yml b/.github/actions/unit-tests/action.yml index 5e5e1b9b8..fb97e784b 100644 --- a/.github/actions/unit-tests/action.yml +++ b/.github/actions/unit-tests/action.yml @@ -25,7 +25,7 @@ runs: - name: Run unit tests run: | - PYTHONPATH=${PYTHONPATH:-$PWD} conda run -n fedbiomed-researcher pytest --cov=fedbiomed --cov-report term --cov-report xml:${{ inputs.test-dir }}/coverage.xml ${{ inputs.test-dir }} + PYTHONPATH=${PYTHONPATH:-$PWD} conda run -n fedbiomed-researcher pytest -v --cov=fedbiomed --cov-report term --cov-report xml:${{ inputs.test-dir }}/coverage.xml ${{ inputs.test-dir }} shell: bash - name: Get coverage rate From 56fa9d0367e9db19b8b99502cb427af0f031a2e7 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 19 Jun 2023 18:05:19 +0200 Subject: [PATCH 26/41] nosetests to pytest - adapt readme --- tests/README.md | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/tests/README.md b/tests/README.md index 5205a84bc..0d888522e 100644 --- a/tests/README.md +++ b/tests/README.md @@ -2,10 +2,9 @@ ### material -tests are run with [nosetests](https://nose.readthedocs.io/en/latest/testing.html), which uses unittests as test framework. -nosetests provide additional features: -- more assert capabilities -- coverage report, which provides output integrated on our Continous Integration platform +Tests are run with [pytest](https://pytest.org) using `unittests` as test framework (no specific extension). +We use pytest for these additional features: +- coverage report, which provides output integrated with `codecov` ### how to run the tests @@ -26,7 +25,7 @@ python -m unittest -v or ``` cd tests -nosetests -v +pytest -v ``` Because of the code structure (environ singleton), the tests **must** run @@ -43,14 +42,14 @@ or ``` cd tests -nosetests --tests=test_XXX.py +pytest ./test_XXX.py ``` * run a specific single test. You must specify all the path to this specific test (test\_file.py:TesctClass.specific\_test\_to_run). Eg: ``` cd tests -nosetests test_message.py:TestMessage.test_dummy_message +pytest test_requests.py::TestRequests::test_request_01_constructor ``` Remarks: **nose** could also be used to run the test (same test files as with @@ -96,7 +95,7 @@ If you want to check the test coverage, you should use: ``` cd tests -nosetests --cover-xml --cover-erase --with-coverage --cover-package=fedbiomed +pytest -v --cov=fedbiomed --cov-report term --cov-report xml:coverage.xml coverage html ``` From b1f59c3f2f5716582ab6dd44f50f0919a9c23389 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 20 Jun 2023 13:49:17 +0200 Subject: [PATCH 27/41] conda files - macos porting: try use same as linux, now - all: remove (apparently) unused GitPython openpyxl JSON-log-formatter --- .../conda/fedbiomed-node-macosx.yaml | 60 ++++++++------- envs/development/conda/fedbiomed-node.yaml | 3 - .../conda/fedbiomed-researcher-macosx.yaml | 75 +++++++++---------- .../conda/fedbiomed-researcher.yaml | 3 - envs/vpn/conda/fedbiomed-node.yaml | 3 - envs/vpn/conda/fedbiomed-researcher.yaml | 3 - 6 files changed, 68 insertions(+), 79 deletions(-) diff --git a/envs/development/conda/fedbiomed-node-macosx.yaml b/envs/development/conda/fedbiomed-node-macosx.yaml index cb4d6d91c..fe1271bf3 100644 --- a/envs/development/conda/fedbiomed-node-macosx.yaml +++ b/envs/development/conda/fedbiomed-node-macosx.yaml @@ -1,7 +1,6 @@ # # environment for fedbiomed-node on macosx # -# it fixes a pb with tensorboard and torch for macosx # # name: fedbiomed-node @@ -11,49 +10,54 @@ channels: dependencies: # minimal environment - - python >=3.9,<3.10 - - pip - - jupyter - - ipython + # python 3.11 recently released 2022-11 and not yet supported by some deps including: torchvision + - python >=3.10,<3.11 + - pip >= 23.0 + - jupyter ~=1.0.0 + - ipython ~=8.13.2 # tests - pytest ~=7.2.0 - pytest-cov ~=4.1.0 - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 # code - - GitPython >=3.1.14,<4.0.0 - - requests >=2.25.1,<3.0.0 - - paho-mqtt >=1.5.1,<2.0.0 - - validators >=0.18.2,<0.19.0 - - tqdm >=4.59.0,<5.0.0 - - git - - packaging >=23.0,<24.0 + - requests ~=2.29.0 + - paho-mqtt ~=1.6.1 + - validators >=0.20.0,<0.21.0 + - tqdm ~=4.65.0 + - git ~=2.40.1 + - packaging ~=23.1 # these two have to be aligned - - cryptography ~=39.0 - - pyopenssl ~=23.0 + - cryptography ~=40.0.0 + - pyopenssl ~=23.1.1 # git notebook striper - - nbstripout - - joblib >=1.0.1 - # nn + - nbstripout >=0.6.1,<0.7.0 + - joblib >=1.2.0,<1.3.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 - - opacus >=1.2.0,<1.3.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 + - opacus >=1.4.0,<1.5.0 - monai >=1.1.0,<1.2.0 # other - msgpack ~=1.0 - persist-queue >=0.5.1,<0.6.0 - pytorch-ignite >=0.4.4,<0.5.0 - - pandas >=1.2.3,<2.0.0 - - openpyxl >= 3.0.9,<3.1 - - scikit-learn >=1.0.0,<1.1.0 - - python-minifier ==2.5.0 + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 + - python-minifier ~=2.5.0 # FLamby - git+https://github.com/owkin/FLamby@main # declearn - - declearn[torch] ~= 2.1.0 + - declearn[torch] ~=2.1.0 - gmpy2 >=2.1,< 2.2 #### Notebook-specific packages #### # This section contains packages that are needed only to run specific notebooks - - unet == 0.7.7 + - unet >=0.7.7,<0.8.0 + diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index 50107ee28..8743da84a 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -21,7 +21,6 @@ dependencies: - tinydb ~=4.7.1 - tabulate >=0.9.0,<0.10.0 # code - - GitPython >=3.1.31,<4.0.0 - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 @@ -54,8 +53,6 @@ dependencies: - pytorch-ignite >=0.4.4,<0.5.0 # pandas 2.x recently released (2023-04) but few breaking changes - pandas ~=2.0.1 - - openpyxl >= 3.0.9,<3.1 - - JSON-log-formatter ~=0.5.2 - python-minifier ~=2.5.0 # FLamby - git+https://github.com/owkin/FLamby@main diff --git a/envs/development/conda/fedbiomed-researcher-macosx.yaml b/envs/development/conda/fedbiomed-researcher-macosx.yaml index d4d5e6155..14805af31 100644 --- a/envs/development/conda/fedbiomed-researcher-macosx.yaml +++ b/envs/development/conda/fedbiomed-researcher-macosx.yaml @@ -1,7 +1,6 @@ # # environment for fedbiomed-researcher on macosx # -# it fixes a pb with tensorboard and torch for macosx # # name: fedbiomed-researcher @@ -11,58 +10,56 @@ channels: dependencies: # minimal environment - - python >=3.9,<3.10 - - pip - - jupyter - - ipython + - python >=3.10,<3.11 + - pip >= 23.0 + - jupyter ~=1.0.0 + - ipython ~=8.13.2 # tests - pytest ~=7.2.0 - pytest-cov ~=4.1.0 - - tinydb >=4.4.0,<5.0.0 - - tabulate >=0.8.9,<0.9.0 - - nose - - coverage + - tinydb ~=4.7.1 + - tabulate >=0.9.0,<0.10.0 # tools - - colorama - - pyyaml + - colorama >=0.4.6,<0.5 # code - - GitPython >=3.1.14,<4.0.0 - - requests >=2.25.1,<3.0.0 - - paho-mqtt >=1.5.1,<2.0.0 - - validators >=0.18.2,<0.19.0 - - tqdm >=4.59.0,<5.0.0 - - git - - packaging >=23.0,<24.0 + - requests ~=2.29.0 + - paho-mqtt ~=1.6.1 + - validators >=0.20.0,<0.21.0 + - tqdm ~=4.65.0 + - git ~=2.40.1 + - packaging ~=23.1 # these two have to be aligned - - cryptography ~=39.0 - - pyopenssl ~=23.0 + - cryptography ~=40.0.0 + - pyopenssl ~=23.1.1 # git notebook striper - - nbstripout - - joblib >=1.0.1 - # nn + - nbstripout >=0.6.1,<0.7.0 + - joblib >=1.2.0,<1.3.0 + # other + - itk >=5.3.0,<5.4.0 - pip: + # sklearn + # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems + # + another option is to install scipy from pip which supports older GLIBC + - scipy >=1.10.0,<1.11.0 + - scikit-learn >=1.2.0,<1.3.0 # nn - - torch >=1.8.0,<2.0.0 - - torchvision >=0.9.0,<0.15.0 - - opacus >=1.2.0,<1.3.0 + # torch 2.x recently released (2023-03) and not yet supported by some deps including: opacus, declearn + - torch ~=1.13.0 + - torchvision >=0.14.0,<0.15.0 + - opacus >=1.4.0,<1.5.0 - monai >=1.1.0,<1.2.0 - # other - - msgpack ~=1.0 - - persist-queue >=0.5.1,<0.6.0 - - pandas >=1.2.3,<2.0.0 - - openpyxl >= 3.0.9,<3.1 - - scikit-learn >=1.0.0,<1.1.0 - - itk - - python-minifier ==2.5.0 - - tensorboard + # pandas 2.x recently released (2023-04) but few breaking changes + - pandas ~=2.0.1 + - tensorboard ~=2.13.0 + - python-minifier ~=2.5.0 # for nbconvert - - jupyter_contrib_nbextensions - - pathvalidate + - jupyter-contrib-nbextensions >=0.7.0,<0.8.0 + - pathvalidate ~=3.0.0 # FLamby - git+https://github.com/owkin/FLamby@main # declearn - - declearn[torch] ~= 2.1.0 + - declearn[torch] ~=2.1.0 - gmpy2 >=2.1,< 2.2 #### Notebook-specific packages #### # This section contains packages that are needed only to run specific notebooks - - unet == 0.7.7 + - unet >=0.7.7,<0.8.0 diff --git a/envs/development/conda/fedbiomed-researcher.yaml b/envs/development/conda/fedbiomed-researcher.yaml index 1e91cd6c5..457c8070d 100644 --- a/envs/development/conda/fedbiomed-researcher.yaml +++ b/envs/development/conda/fedbiomed-researcher.yaml @@ -22,7 +22,6 @@ dependencies: # tools - colorama >=0.4.6,<0.5 # code - - GitPython >=3.1.31,<4.0.0 - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 @@ -54,9 +53,7 @@ dependencies: - persist-queue >=0.5.1,<0.6.0 # pandas 2.x recently released (2023-04) but few breaking changes - pandas ~=2.0.1 - - openpyxl >= 3.0.9,<3.1 - tensorboard ~=2.13.0 - - JSON-log-formatter ~=0.5.2 - python-minifier ~=2.5.0 # for nbconvert - jupyter-contrib-nbextensions >=0.7.0,<0.8.0 diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index 50107ee28..8743da84a 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -21,7 +21,6 @@ dependencies: - tinydb ~=4.7.1 - tabulate >=0.9.0,<0.10.0 # code - - GitPython >=3.1.31,<4.0.0 - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 @@ -54,8 +53,6 @@ dependencies: - pytorch-ignite >=0.4.4,<0.5.0 # pandas 2.x recently released (2023-04) but few breaking changes - pandas ~=2.0.1 - - openpyxl >= 3.0.9,<3.1 - - JSON-log-formatter ~=0.5.2 - python-minifier ~=2.5.0 # FLamby - git+https://github.com/owkin/FLamby@main diff --git a/envs/vpn/conda/fedbiomed-researcher.yaml b/envs/vpn/conda/fedbiomed-researcher.yaml index 1e91cd6c5..457c8070d 100644 --- a/envs/vpn/conda/fedbiomed-researcher.yaml +++ b/envs/vpn/conda/fedbiomed-researcher.yaml @@ -22,7 +22,6 @@ dependencies: # tools - colorama >=0.4.6,<0.5 # code - - GitPython >=3.1.31,<4.0.0 - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 @@ -54,9 +53,7 @@ dependencies: - persist-queue >=0.5.1,<0.6.0 # pandas 2.x recently released (2023-04) but few breaking changes - pandas ~=2.0.1 - - openpyxl >= 3.0.9,<3.1 - tensorboard ~=2.13.0 - - JSON-log-formatter ~=0.5.2 - python-minifier ~=2.5.0 # for nbconvert - jupyter-contrib-nbextensions >=0.7.0,<0.8.0 From 54819c8403963bcb94eeb59154861e0f51173d83 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 20 Jun 2023 15:13:40 +0200 Subject: [PATCH 28/41] fix regression error in macos conda yaml --- envs/development/conda/fedbiomed-researcher-macosx.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/envs/development/conda/fedbiomed-researcher-macosx.yaml b/envs/development/conda/fedbiomed-researcher-macosx.yaml index 14805af31..092f82f26 100644 --- a/envs/development/conda/fedbiomed-researcher-macosx.yaml +++ b/envs/development/conda/fedbiomed-researcher-macosx.yaml @@ -48,6 +48,9 @@ dependencies: - torchvision >=0.14.0,<0.15.0 - opacus >=1.4.0,<1.5.0 - monai >=1.1.0,<1.2.0 + # other + - msgpack ~=1.0 + - persist-queue >=0.5.1,<0.6.0 # pandas 2.x recently released (2023-04) but few breaking changes - pandas ~=2.0.1 - tensorboard ~=2.13.0 From 35f6962eaa0b0ff83574dccf8511137c4c1ca696 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 20 Jun 2023 16:48:24 +0200 Subject: [PATCH 29/41] misc macos version notice --- docs/user-guide/deployment/deployment-vpn.md | 2 +- envs/vpn/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/user-guide/deployment/deployment-vpn.md b/docs/user-guide/deployment/deployment-vpn.md index 5142dc290..ba4becc5a 100644 --- a/docs/user-guide/deployment/deployment-vpn.md +++ b/docs/user-guide/deployment/deployment-vpn.md @@ -18,7 +18,7 @@ This tutorial details a deployment scenario where: ## Requirements !!! info "Supported operating systems and software requirements" - Supported operating systems for containers/VPN deployment include **Fedora 38**, **Ubuntu 22.04 LTS**. Should also work for most recent Linux, **MacOS X**, **Windows 11** with WSL2 using Ubuntu-22.04 distribution. Also requires **docker compose >= 2.0**. + Supported operating systems for containers/VPN deployment include **Fedora 38**, **Ubuntu 22.04 LTS**. Should also work for most recent Linux, **MacOS X 12.6.6 and 13**, **Windows 11** with WSL2 using Ubuntu-22.04 distribution. Also requires **docker compose >= 2.0**. Check here for [detailed requirements](https://github.com/fedbiomed/fedbiomed/blob/master/envs/vpn/README.md#requirements). diff --git a/envs/vpn/README.md b/envs/vpn/README.md index d238aea03..4e064ee9d 100644 --- a/envs/vpn/README.md +++ b/envs/vpn/README.md @@ -19,7 +19,7 @@ Which machine to use ? Supported operating systems for using containers : - tested on **Fedora 38**, should work for recent RedHat based Linux - should work for **Ubuntu 22.04 LTS** and recent Debian based Linux - - tested on recent **MacOS X** + - tested on recent **MacOS X**c (12.6.6 and 13) - should work on **Windows 11** with WSL2 using a Ubuntu-22.04 distribution Pre-requisites for using containers : From b47dec4bccbf5d333e1d45dcd508070aa870b545 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 22 Jun 2023 09:53:36 +0200 Subject: [PATCH 30/41] robustify fedbiomed_vpn guess of IP --- scripts/fedbiomed_vpn | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/scripts/fedbiomed_vpn b/scripts/fedbiomed_vpn index 3e8b8affb..e9159e2d0 100755 --- a/scripts/fedbiomed_vpn +++ b/scripts/fedbiomed_vpn @@ -128,11 +128,17 @@ internal_find_my_ip() { _IP=$(ifconfig eth0 2> /dev/null| grep 'inet '| awk '{print $2}') ;; 5) - _IP=$(ifconfig en0 2> /dev/null| grep 'inet '| awk '{print $2}') + for i in $(seq -w 0 5); do + _IP=$(ifconfig en$i 2> /dev/null| grep 'inet '| awk '{print $2}') + [ -n "$_IP" ] && break + done ;; 6) # check also wireless - _IP=$(ifconfig wlan0 2> /dev/null| grep 'inet '| awk '{print $2}') + for i in $(seq -w 0 5); do + _IP=$(ifconfig wlan$i 2> /dev/null| grep 'inet '| awk '{print $2}') + [ -n "$_IP" ] && break + done ;; # # you may add more heuristics here @@ -163,8 +169,8 @@ find_my_ip() { case $IP in 127.*) - echo "** WARNING: 'vpnserver' will use the localhost IP address." - echo " VPN may not work properly" + echo "** WARNING: 'vpnserver' will use the localhost IP address." >&2 + echo " VPN may not work properly" >&2 ;; esac From a93f2f7e99a5692239652bf4d775dd5622ed16c8 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 22 Jun 2023 10:21:10 +0200 Subject: [PATCH 31/41] misc reobustify fedbiomed_vpn guess IP --- scripts/fedbiomed_vpn | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/scripts/fedbiomed_vpn b/scripts/fedbiomed_vpn index e9159e2d0..7eca06176 100755 --- a/scripts/fedbiomed_vpn +++ b/scripts/fedbiomed_vpn @@ -125,19 +125,22 @@ internal_find_my_ip() { ;; 4) # other cases using ifconfig command - _IP=$(ifconfig eth0 2> /dev/null| grep 'inet '| awk '{print $2}') + for i in $(seq -w 0 5); do + _IP=$(ifconfig eth$i 2> /dev/null| grep 'inet '| awk '{print $2}') + [ -n "$_IP" ] && break + done ;; 5) for i in $(seq -w 0 5); do - _IP=$(ifconfig en$i 2> /dev/null| grep 'inet '| awk '{print $2}') - [ -n "$_IP" ] && break + _IP=$(ifconfig en$i 2> /dev/null| grep 'inet '| awk '{print $2}') + [ -n "$_IP" ] && break done ;; 6) # check also wireless for i in $(seq -w 0 5); do - _IP=$(ifconfig wlan$i 2> /dev/null| grep 'inet '| awk '{print $2}') - [ -n "$_IP" ] && break + _IP=$(ifconfig wlan$i 2> /dev/null| grep 'inet '| awk '{print $2}') + [ -n "$_IP" ] && break done ;; # From 00d28ba9027e23822e95dfb76197ea377a901220 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Thu, 22 Jun 2023 12:00:40 +0200 Subject: [PATCH 32/41] fix regression error in conda yaml envs: missing itk for macos --- envs/development/conda/fedbiomed-node-macosx.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/envs/development/conda/fedbiomed-node-macosx.yaml b/envs/development/conda/fedbiomed-node-macosx.yaml index fe1271bf3..cdb4e88c1 100644 --- a/envs/development/conda/fedbiomed-node-macosx.yaml +++ b/envs/development/conda/fedbiomed-node-macosx.yaml @@ -1,5 +1,5 @@ # -# environment for fedbiomed-node on macosx +# environment for fedbiomed-node on macos # # # @@ -33,6 +33,8 @@ dependencies: # git notebook striper - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 + # other + - itk >=5.3.0,<5.4.0 - pip: # sklearn # + scipy >= 1.9 from conda-forge needs recent GLIBC thus causes issue 389 with many current systems @@ -60,4 +62,3 @@ dependencies: #### Notebook-specific packages #### # This section contains packages that are needed only to run specific notebooks - unet >=0.7.7,<0.8.0 - From fa0f37d18dec8910c9be4e068f67f5396e04e32a Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Fri, 23 Jun 2023 11:20:48 +0200 Subject: [PATCH 33/41] misc fix for release process while merging hotfix 805 --- RELEASE.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index 792c89d09..12ba933f6 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -78,7 +78,7 @@ git pull -p - or checkout the `release/$RELEASE_TAG` branch and push it again to re-create on the remote ```bash git checkout release/$RELEASE_TAG - git push origin release/$RELEASE_TAG + git push ``` - in github create a pull request for `release/$RELEASE_TAG` to `develop` * one can auto-assign the PR, and doesn't need a review for this PR @@ -196,7 +196,7 @@ Release principle: follow the [gitflow](https://www.atlassian.com/git/tutorials/ ```bash git checkout hotfix/$HOTFIX_NAME - git push origin hotfix/$HOTFIX_NAME + git push ``` - in github create a pull request for `hotfix/$HOTFIX_NAME` to `develop` From 0d68438bfe6f6815149107d8331446ba2af74146 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Fri, 23 Jun 2023 18:37:04 +0200 Subject: [PATCH 34/41] update docs for windows --- README.md | 2 +- .../0-basic-software-installation.md | 2 +- .../installation/windows-installation.md | 42 +++++++------------ envs/vpn/README.md | 4 +- 4 files changed, 20 insertions(+), 30 deletions(-) diff --git a/README.md b/README.md index 69a315b2c..36c3c4292 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ This README.md file provide a quick start/installation guide for Linux. Full installation instruction are also available at: https://fedbiomed.org/latest/tutorials/installation/0-basic-software-installation/ -An installation guide is also provided for Windows10, which relies on WSL2: https://fedbiomed.org/latest/user-guide/installation/windows-installation/ +An installation guide is also provided for Windows11, which relies on WSL2: https://fedbiomed.org/latest/user-guide/installation/windows-installation/ ### Prerequisites : diff --git a/docs/tutorials/installation/0-basic-software-installation.md b/docs/tutorials/installation/0-basic-software-installation.md index 3f866d0b6..e179040a4 100644 --- a/docs/tutorials/installation/0-basic-software-installation.md +++ b/docs/tutorials/installation/0-basic-software-installation.md @@ -22,7 +22,7 @@ Fed-BioMed is developed and tested under up to date version of : * **Linux Ubuntu and Fedora**, should also work or be easily ported under most Linux distributions * **MacOS** -Check specific guidelines for installation on [Windows 10](../../user-guide/installation/windows-installation.md). +Check specific guidelines for installation on [Windows 11](../../user-guide/installation/windows-installation.md). ## Software packages diff --git a/docs/user-guide/installation/windows-installation.md b/docs/user-guide/installation/windows-installation.md index 6a782f856..80ae698c5 100644 --- a/docs/user-guide/installation/windows-installation.md +++ b/docs/user-guide/installation/windows-installation.md @@ -1,8 +1,8 @@ # Specific instructions for Windows installation -**Fed-BioMed requires Windows 10 or 11, WSL2 and docker. It can run on a physical machine or a virtual machine.** +**Fed-BioMed requires Windows 11, WSL2 and docker. It can run on a physical machine or a virtual machine.** -This documentation gives the steps for a typical Windows 10 installation, steps may vary depending on your system. +This documentation gives the steps for a typical Windows 11 installation, steps may vary depending on your system. ## Step 0: (optional) virtual machine @@ -21,7 +21,7 @@ Tips : ## Step 1: Windows -Requirement : Windows 10 version 2004 and higher (Build 19041 and higher) or Windows 11 is [needed for WSL2](https://docs.microsoft.com/en-us/windows/wsl/install) and Docker Desktop. +Requirement : **tested under Windows 11** 21H2, though Windows 10 version 2004 and higher (Build 19041 and higher) also [support WSL2](https://docs.microsoft.com/en-us/windows/wsl/install) and Docker Desktop. * **[update](https://support.microsoft.com/en-au/windows/update-windows-3c5ae7fc-9fb6-9af1-1984-b5e0412c556a) Windows** * **reboot** Windows @@ -30,7 +30,7 @@ Requirement: Windows Enterprise, Pro or Education edition (needed for Hyper-V fu Requirement : Hyper-V "Virtual Machine Platform" activation -* **[enable](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/quick-start/enable-hyper-v) Hyper-V** +* **[enable](https://techcommunity.microsoft.com/t5/educator-developer-blog/step-by-step-enabling-hyper-v-for-use-on-windows-11/ba-p/3745905) Hyper-V** * **reboot** Windows @@ -38,7 +38,7 @@ Requirement : Hyper-V "Virtual Machine Platform" activation [WSL](https://docs.microsoft.com/en-us/windows/wsl/install) (Windows Subsystem for Linux) is a tool that allows to run Linux within a Windows system. Version 2 of WSL is needed for docker. -We successfully tested Fed-BioMed with **Ubuntu-20.04** distribution. +We successfully tested Fed-BioMed with **Ubuntu-22.04** distribution. Requirement : WSL version 2 @@ -54,9 +54,9 @@ Requirement : a WSL distribution, eg Ubuntu * **install a distribution** in a Windows command tool ``` -cmd> wsl --install -d Ubuntu +cmd> wsl --install -d Ubuntu-22.04 ``` -* if required by install, [**download and install Linux kernel update**](https://docs.microsoft.com/en-us/windows/wsl/install-manual#step-4---download-the-linux-kernel-update-package) +* if required by install, update the WSL2 kernel: in *main menu > enter 'Windows Update' > select 'Advanced Options' > activate 'Receive updates for other Microsoft products'. Then in Windows Update, install the last version of WSL2. * **reboot** Windows Check that WSL uses version 2 and Ubuntu is installed in a Windows command tool : @@ -77,40 +77,30 @@ user@wsl-ubuntu$ Requirement : `docker` and `docker compose` -Open an administrator session in WSL Ubuntu : -``` -user@wsl-ubuntu$ sudo bash -root@wsl-ubuntu# -``` - -Alternative 1 : Docker Desktop +Install Docker Desktop on the Windows machine : - **install [Docker Desktop](https://hub.docker.com/editions/community/docker-ce-desktop-windows)** in Windows. Check the product license. - **reboot** Windows -Alternative 2 : docker engine +Setup docker in your WSL Ubuntu : - - install [docker engine](https://docs.docker.com/engine/install/ubuntu/) with as admin (root) account in WSL Ubuntu. Please note that `docker container run hello-world` will not work until we complete the steps below - - install docker compose + - open an administrator session in WSL Ubuntu : ``` - root@wsl-ubuntu# apt install -y docker-compose-plugin + user@wsl-ubuntu$ sudo bash + root@wsl-ubuntu# ``` - If not available from `apt` try a [manual installation](https://docs.docker.com/compose/install/linux/#install-the-plugin-manually) - + - install [docker engine](https://docs.docker.com/engine/install/ubuntu/) as admin (root) account in WSL Ubuntu. Please note that `docker container run hello-world` will not work until we complete the steps below - if you use an account named `USER` under Ubuntu, authorize it to use docker by typing under an admin (root) account in WSL Ubuntu : ``` root@wsl-ubuntu# adduser USER docker ``` - - open a new WSL Ubuntu terminal so that it is authorized to use docker - - at each Ubuntu restart, launch docker daemon - ``` - root@wsl-ubuntu# nohup dockerd & - ``` + - open a **new** WSL Ubuntu terminal so that it is authorized to use docker Check that you can use docker with your user account under Ubuntu : + ``` user@wsl-ubuntu$ docker container run hello-world ``` @@ -120,7 +110,7 @@ user@wsl-ubuntu$ docker container run hello-world Requirement : conda installed in Ubuntu and configured for your user account -* install [Anaconda](https://docs.anaconda.com/anaconda/install/linux/) under Ubuntu, using your user account +* install [Miniconda](https://docs.conda.io/projects/conda/en/latest/user-guide/install/linux.html) under Ubuntu, using your user account * during installation, answer *Yes* to question *“Do you wish the installer to initialize Anaconda3 by running conda init?”* * activate conda for your Ubuntu session diff --git a/envs/vpn/README.md b/envs/vpn/README.md index 4e064ee9d..a6994af1e 100644 --- a/envs/vpn/README.md +++ b/envs/vpn/README.md @@ -19,7 +19,7 @@ Which machine to use ? Supported operating systems for using containers : - tested on **Fedora 38**, should work for recent RedHat based Linux - should work for **Ubuntu 22.04 LTS** and recent Debian based Linux - - tested on recent **MacOS X**c (12.6.6 and 13) + - tested on recent **MacOS X** (12.6.6 and 13) - should work on **Windows 11** with WSL2 using a Ubuntu-22.04 distribution Pre-requisites for using containers : @@ -31,7 +31,7 @@ Pre-requisites for using containers : - You can use your usual package manager to install up-to-date version (eg: `sudo apt-get update && sudo apt-get remove docker-compose && sudo apt-get install docker-compose-plugin` for apt, `sudo dnf clean metadata && sudo dnf remove docker-compose && sudo dnf update docker-compose-plugin` for dnf). - If no suitable package exist for your system, you can use the [docker compose plugin install page](https://docs.docker.com/compose/install/linux/). -Installation notes for Windows 10 with WSL2 Ubuntu-20.04: +Installation notes for Windows 11 with WSL2 Ubuntu-22.04: * build of containers `mqtt` `restful` may fail in `cargo install` step with error `spurious network error [...] Timeout was reached`. This is due to bad name resolution of `crates.io` package respository with default WSL2 DNS configuration. If this happens connect to wsl (`wsl` from Windows command line tool), get admin privileges (`sudo bash`) and create a [`/etc/wsl.conf`](https://docs.microsoft.com/fr-fr/windows/wsl/wsl-config) file containing: ```bash [network] From 4170323707112414ff7916c14ca1a46fb3d63dbe Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Mon, 26 Jun 2023 13:12:45 +0200 Subject: [PATCH 35/41] windowd doc: notes about performance issues --- docs/user-guide/installation/windows-installation.md | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/user-guide/installation/windows-installation.md b/docs/user-guide/installation/windows-installation.md index 80ae698c5..f9fefd99e 100644 --- a/docs/user-guide/installation/windows-installation.md +++ b/docs/user-guide/installation/windows-installation.md @@ -125,7 +125,14 @@ Follow Fed-BioMed Linux installation tutorial [from the *git clone* command](../ When running ```network``` for the first time, a Windows defender pop up may appear (triggered by docker), choose *"authorize only on private network"*. -You may experience some differences when using Fed-BioMed on Windows in comparison to other systems : this is because WSL does not have a graphical interface. Everything happens as if you were running a headless Linux machine. +!!! info "Performance issue" + To ensure Fed-BioMed performance in WSL2, be sure to use the native WSL2 Linux filesystem (in `/home/login`), + not the Windows filesystem (in `/mnt/c/Users/login`), both for cloning the library and storing datasets. + We experienced 10-50 time slower execution with native Windows filesystem. + This point is [documented by Microsoft](https://learn.microsoft.com/en-us/windows/wsl/compare-versions#exceptions-for-using-wsl-1-rather-than-wsl-2) + + You may also need to [increase the memory available to WSL2](https://learn.microsoft.com/en-us/answers/questions/1296124/how-to-increase-memory-and-cpu-limits-for-wsl2-win). + By default, only 50% of the host's RAM is made available. ## Troubleshooting From 0e4df18e9623724b7a04eadd89f4568067888eb6 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 27 Jun 2023 13:48:45 +0200 Subject: [PATCH 36/41] fix test environ path for multiple path to fedbiomed --- tests/test_environ_common.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/test_environ_common.py b/tests/test_environ_common.py index a95c9c350..d79853a75 100644 --- a/tests/test_environ_common.py +++ b/tests/test_environ_common.py @@ -51,12 +51,12 @@ def test_environ_01_initialize_common_variables_01(self): values = self.environ._values self.assertTrue("ROOT_DIR" in values) - self.assertEqual(values["ROOT_DIR"], self.base_dir) - self.assertEqual(values["CONFIG_DIR"], os.path.join(self.base_dir, 'etc')) - self.assertEqual(values["VAR_DIR"], os.path.join(self.base_dir, 'var')) - self.assertEqual(values["TMP_DIR"], os.path.join(self.base_dir, 'var', 'tmp')) - self.assertEqual(values["CACHE_DIR"], os.path.join(self.base_dir, 'var', 'cache')) - self.assertEqual(values["PORT_INCREMENT_FILE"], os.path.join(self.base_dir, 'etc', 'port_increment')) + self.assertEqual(os.path.realpath(values["ROOT_DIR"]), os.path.realpath(self.base_dir)) + self.assertEqual(os.path.realpath(values["CONFIG_DIR"]), os.path.join(os.path.realpath(self.base_dir), 'etc')) + self.assertEqual(os.path.realpath(values["VAR_DIR"]), os.path.join(os.path.realpath(self.base_dir), 'var')) + self.assertEqual(os.path.realpath(values["TMP_DIR"]), os.path.join(os.path.realpath(self.base_dir), 'var', 'tmp')) + self.assertEqual(os.path.realpath(values["CACHE_DIR"]), os.path.join(os.path.realpath(self.base_dir), 'var', 'cache')) + self.assertEqual(os.path.realpath(values["PORT_INCREMENT_FILE"]), os.path.join(os.path.realpath(self.base_dir), 'etc', 'port_increment')) def test_environ_01_initialize_common_variables_02(self): """ Test initialize common variables with root dir""" From b540e83786aa470e2bcc3f268165a00f24de7c1f Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 27 Jun 2023 16:04:31 +0200 Subject: [PATCH 37/41] WIP handle obsolete `docker-compose` with clean failure --- scripts/choose_docker_compose | 20 ++++++++++++++++++++ scripts/fedbiomed_environment | 6 +++++- 2 files changed, 25 insertions(+), 1 deletion(-) create mode 100644 scripts/choose_docker_compose diff --git a/scripts/choose_docker_compose b/scripts/choose_docker_compose new file mode 100644 index 000000000..1a5b7387e --- /dev/null +++ b/scripts/choose_docker_compose @@ -0,0 +1,20 @@ + +# +# Temporary utility function for smoothing `docker-compose` to `docker compose` migration +# + +docker_compose() { + + if $(docker compose version >/dev/null 2>&1) ; then + DOCKER_COMPOSE='docker compose' + echo $DOCKER_COMPOSE + elif $(docker-compose version >/dev/null 2>&1) ; then + echo "[WARNING] docker-compose v1 is obsolete, please upgrade to docker-compose-plugin v2 !" >&2 + DOCKER_COMPOSE='docker-compose' + echo $DOCKER_COMPOSE + else + echo '[ERROR] `docker compose` not found. It is a requirement for Fed-BioMed.' >&2 + exit 1 + fi + +} \ No newline at end of file diff --git a/scripts/fedbiomed_environment b/scripts/fedbiomed_environment index c43efe274..76c5f9378 100755 --- a/scripts/fedbiomed_environment +++ b/scripts/fedbiomed_environment @@ -108,6 +108,8 @@ activate_gui() { [[ -n "$ZSH_NAME" ]] && myname=${(%):-%x} basedir=$(cd $(dirname $myname)/.. || exit ; pwd) +# temporary: `docker compose` migration +source $basedir/scripts/choose_docker_compose # initialize development environment for.... case $1 in @@ -118,7 +120,9 @@ case $1 in activate_network echo "** Cleaning all caches / temporary files" # docker containers - (cd ${basedir}/envs/development/docker/ && docker compose rm -sf && echo " * Docker cleaning" ) + DOCKER_COMPOSE=$(docker_compose) # dont call directly `docker_compose` or the calling process exists in case of error + # note: do as much cleaning as possible even if `docker compose` is not found + (cd ${basedir}/envs/development/docker/ && [ -n "$DOCKER_COMPOSE" ] && $DOCKER_COMPOSE rm -sf && echo " * Docker cleaning" ) # network cleaner echo " * Network cleaning" From 6ed80076c11ad039bd11113fa6f7df3063becaf2 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Tue, 27 Jun 2023 18:54:10 +0200 Subject: [PATCH 38/41] migration to `docker compose` (v2, plugin) - clear error when not installed - temporary (not officially supported) backward compatibility + warning for `docker-compose` --- scripts/fedbiomed_run | 7 ++-- scripts/fedbiomed_vpn | 63 +++++++++++++++++++----------------- scripts/run_integration_test | 3 +- scripts/run_test_mnist | 3 +- 4 files changed, 42 insertions(+), 34 deletions(-) diff --git a/scripts/fedbiomed_run b/scripts/fedbiomed_run index a7a947558..e0503ae8f 100755 --- a/scripts/fedbiomed_run +++ b/scripts/fedbiomed_run @@ -239,14 +239,17 @@ case $1 in usage network ;; *) + source $basedir/scripts/choose_docker_compose + docker_compose >/dev/null + CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) \ CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') \ CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') \ - docker compose build restful mqtt + $DOCKER_COMPOSE build restful mqtt CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) \ CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') \ CONTAINER_GROUP=$(id -gn | sed 's/[^[:alnum:]]/_/g') \ - docker compose up -d restful mqtt + $DOCKER_COMPOSE up -d restful mqtt ;; esac ;; diff --git a/scripts/fedbiomed_vpn b/scripts/fedbiomed_vpn index 7eca06176..93e8d5068 100755 --- a/scripts/fedbiomed_vpn +++ b/scripts/fedbiomed_vpn @@ -92,7 +92,10 @@ stop stop and remove all containers check_prerequisite() { # # verify that every needed commands are installed - commands=( docker 'docker compose') + source $basedir/scripts/choose_docker_compose + docker_compose >/dev/null + + commands=( docker "$DOCKER_COMPOSE") ERROR=0 for i in "${commands[@]}" @@ -215,7 +218,7 @@ containers_remove() { cd "$basedir/envs/vpn/docker" for i in ${ONLY_CONTAINERS[@]} do - docker compose rm -sf $i >/dev/null + $DOCKER_COMPOSE rm -sf $i >/dev/null done } @@ -232,7 +235,7 @@ containers_clean() { for i in base basenode do - docker compose rm -sf $i >/dev/null + $DOCKER_COMPOSE rm -sf $i >/dev/null done # @@ -319,7 +322,7 @@ containers_status() { do [[ "$i" = "gui" ]] || [[ "$i" = "gui2" ]] && { continue ; } # gui containers do not use wireguard echo -n "- pinging VPN server from container $i -> " - ping=$(docker compose exec ${i} ping -n -c 3 -W 1 10.220.0.1 2>/dev/null | cat -v) + ping=$($DOCKER_COMPOSE exec ${i} ping -n -c 3 -W 1 10.220.0.1 2>/dev/null | cat -v) status=$(echo $ping|grep seq=) if [ -z "$status" ]; then @@ -355,26 +358,26 @@ containers_build() { CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker compose build base + $DOCKER_COMPOSE build base fi if [ $BUILD_NODEBASE -eq 1 ]; then echo "- building 'basenode' container" CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker compose build basenode + $DOCKER_COMPOSE build basenode fi for i in ${ONLY_CONTAINERS[@]} do echo "- stopping '$i' container" - docker compose rm -sf $i >/dev/null + $DOCKER_COMPOSE rm -sf $i >/dev/null echo "- building '$i' container" MPSPDZ_URL=$MPSPDZ_URL MPSPDZ_COMMIT=$MPSPDZ_COMMIT \ CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker compose build $i + $DOCKER_COMPOSE build $i done } @@ -392,7 +395,7 @@ single_container_configure() { IP=$(find_my_ip) /bin/rm -fr ./vpnserver/run_mounts/config/config_peers/${category}/${container} - docker compose exec vpnserver bash -c -i "python ./vpn/bin/configure_peer.py genconf ${category} ${container}" + $DOCKER_COMPOSE exec vpnserver bash -c -i "python ./vpn/bin/configure_peer.py genconf ${category} ${container}" sleep 1 /bin/rm -fr ./${container}/run_mounts/config/wireguard 2> /dev/null @@ -418,7 +421,7 @@ containers_configure() { CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker compose up -d vpnserver + $DOCKER_COMPOSE up -d vpnserver for i in ${ONLY_CONTAINERS[@]} do @@ -458,7 +461,7 @@ containers_start() { CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker compose up -d vpnserver + $DOCKER_COMPOSE up -d vpnserver # start other container(s) for i in ${ONLY_CONTAINERS[@]} @@ -476,37 +479,37 @@ containers_start() { esac echo "- starting $i container" - docker compose rm -sf $CONTAINER >/dev/null + $DOCKER_COMPOSE rm -sf $CONTAINER >/dev/null MPSPDZ_URL=$MPSPDZ_URL MPSPDZ_COMMIT=$MPSPDZ_COMMIT \ CONTAINER_UID=${CONTAINER_UID:-$(id -u)} CONTAINER_GID=${CONTAINER_GID:-$(id -g)} \ CONTAINER_USER=${CONTAINER_USER:-$(id -un | sed 's/[^[:alnum:]]/_/g')} \ CONTAINER_GROUP=${CONTAINER_GROUP:-$(id -gn | sed 's/[^[:alnum:]]/_/g')} \ - docker compose up -d $CONTAINER + $DOCKER_COMPOSE up -d $CONTAINER case $i in mqtt|restful) - pubkey=$(docker compose exec "$i" wg show wg0 public-key | tr -d '\r') + pubkey=$($DOCKER_COMPOSE exec "$i" wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove management "$i" - docker compose exec vpnserver python ./vpn/bin/configure_peer.py add management "$i" "$pubkey" + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py remove management "$i" + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py add management "$i" "$pubkey" ;; researcher) - pubkey=$(docker compose exec researcher wg show wg0 public-key | tr -d '\r') + pubkey=$($DOCKER_COMPOSE exec researcher wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove researcher researcher1 - docker compose exec vpnserver python ./vpn/bin/configure_peer.py add researcher researcher1 $pubkey + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py remove researcher researcher1 + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py add researcher researcher1 $pubkey ;; node) - pubkey=$(docker compose exec node wg show wg0 public-key | tr -d '\r') + pubkey=$($DOCKER_COMPOSE exec node wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove node NODETAG - docker compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODETAG $pubkey + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py remove node NODETAG + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py add node NODETAG $pubkey ;; node2) - pubkey=$(docker compose exec node2 wg show wg0 public-key | tr -d '\r') + pubkey=$($DOCKER_COMPOSE exec node2 wg show wg0 public-key | tr -d '\r') # Remove key to avoid protocol error if keys are same - docker compose exec vpnserver python ./vpn/bin/configure_peer.py remove node NODE2TAG - docker compose exec vpnserver python ./vpn/bin/configure_peer.py add node NODE2TAG $pubkey + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py remove node NODE2TAG + $DOCKER_COMPOSE exec vpnserver python ./vpn/bin/configure_peer.py add node NODE2TAG $pubkey ;; *) ;; @@ -525,12 +528,12 @@ run() { cd "$basedir/envs/vpn/docker" # find @IP of mqtt and restful inside the VPN - status=$(docker compose ps restful | wc -l) + status=$($DOCKER_COMPOSE ps restful | wc -l) [[ $? ]] || { echo "** ERROR: restful container not running" exit 1 } - restful_IP=$(docker compose exec restful ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') + restful_IP=$($DOCKER_COMPOSE exec restful ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') echo "- restful IP is: $restful_IP" [[ -z "$restful_IP" ]] && { echo "** ERROR: cannot find IP address of restful server" ; @@ -539,12 +542,12 @@ run() { } # - status=$(docker compose ps mqtt | wc -l) + status=$($DOCKER_COMPOSE ps mqtt | wc -l) [[ $? ]] || { echo "** ERROR: mqtt container not running" exit 1 } - mqtt_IP=$(docker compose exec mqtt ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') + mqtt_IP=$($DOCKER_COMPOSE exec mqtt ip route | tr -d '\r' | grep ^10.220 | awk '{print $NF}') echo "- mqtt IP is: $mqtt_IP" [[ -z "$mqtt_IP" ]] && { echo "** ERROR: cannot find IP address of mqtt server" ; @@ -555,7 +558,7 @@ run() { CMD="export MQTT_BROKER=$mqtt_IP && export MQTT_BROKER_PORT=1883 && export UPLOADS_URL=http://${restful_IP}:8000/upload/ && export UPLOADS_IP=${restful_IP} && export FEDBIOMED_NO_RESET=1 && ./scripts/fedbiomed_run $container ${RUN_ARGS}" - docker compose exec -u ${CONTAINER_UID:-$(id -u)} $container bash -c "$CMD" + $DOCKER_COMPOSE exec -u ${CONTAINER_UID:-$(id -u)} $container bash -c "$CMD" diff --git a/scripts/run_integration_test b/scripts/run_integration_test index d3bb2e3f0..e3e1f0d85 100755 --- a/scripts/run_integration_test +++ b/scripts/run_integration_test @@ -205,7 +205,8 @@ cleaning() { fi # kill the docker containers - ( cd $basedir/envs/development/docker ; docker compose down ) + ( cd $basedir/envs/development/docker ; source $basedir/scripts/choose_docker_compose ; \ + docker_compose >/dev/null ; $DOCKER_COMPOSE down ) # # clean all datasets from nodes diff --git a/scripts/run_test_mnist b/scripts/run_test_mnist index 5bf2cddae..b52649402 100755 --- a/scripts/run_test_mnist +++ b/scripts/run_test_mnist @@ -79,7 +79,8 @@ conda deactivate ##clean running processes and datasets echo "INFO: killing kpids=$kpids" kill -9 $kpids -( cd $basedir/envs/development/docker ; docker compose down ) +( cd $basedir/envs/development/docker ; source $basedir/scripts/choose_docker_compose ; \ + docker_compose >/dev/null ; $DOCKER_COMPOSE down ) source $basedir/scripts/fedbiomed_environment node $basedir/scripts/fedbiomed_run node --delete-mnist $basedir/scripts/fedbiomed_run node config config-n1.ini --delete-mnist From 55edec58c367f2d0ec8016d35f3316b4cb518530 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Wed, 28 Jun 2023 08:01:11 +0200 Subject: [PATCH 39/41] misc: simplify `docker compose` migration checks --- scripts/choose_docker_compose | 2 -- scripts/fedbiomed_environment | 2 +- scripts/fedbiomed_run | 2 +- scripts/fedbiomed_vpn | 2 +- scripts/run_integration_test | 2 +- scripts/run_test_mnist | 2 +- 6 files changed, 5 insertions(+), 7 deletions(-) diff --git a/scripts/choose_docker_compose b/scripts/choose_docker_compose index 1a5b7387e..757b956d7 100644 --- a/scripts/choose_docker_compose +++ b/scripts/choose_docker_compose @@ -7,11 +7,9 @@ docker_compose() { if $(docker compose version >/dev/null 2>&1) ; then DOCKER_COMPOSE='docker compose' - echo $DOCKER_COMPOSE elif $(docker-compose version >/dev/null 2>&1) ; then echo "[WARNING] docker-compose v1 is obsolete, please upgrade to docker-compose-plugin v2 !" >&2 DOCKER_COMPOSE='docker-compose' - echo $DOCKER_COMPOSE else echo '[ERROR] `docker compose` not found. It is a requirement for Fed-BioMed.' >&2 exit 1 diff --git a/scripts/fedbiomed_environment b/scripts/fedbiomed_environment index 76c5f9378..c4b680a6b 100755 --- a/scripts/fedbiomed_environment +++ b/scripts/fedbiomed_environment @@ -120,7 +120,7 @@ case $1 in activate_network echo "** Cleaning all caches / temporary files" # docker containers - DOCKER_COMPOSE=$(docker_compose) # dont call directly `docker_compose` or the calling process exists in case of error + DOCKER_COMPOSE=$(docker_compose ; echo $DOCKER_COMPOSE) # dont call directly `docker_compose` or the calling process exists in case of error # note: do as much cleaning as possible even if `docker compose` is not found (cd ${basedir}/envs/development/docker/ && [ -n "$DOCKER_COMPOSE" ] && $DOCKER_COMPOSE rm -sf && echo " * Docker cleaning" ) diff --git a/scripts/fedbiomed_run b/scripts/fedbiomed_run index e0503ae8f..f18df68e6 100755 --- a/scripts/fedbiomed_run +++ b/scripts/fedbiomed_run @@ -240,7 +240,7 @@ case $1 in ;; *) source $basedir/scripts/choose_docker_compose - docker_compose >/dev/null + docker_compose CONTAINER_UID=$(id -u) CONTAINER_GID=$(id -g) \ CONTAINER_USER=$(id -un | sed 's/[^[:alnum:]]/_/g') \ diff --git a/scripts/fedbiomed_vpn b/scripts/fedbiomed_vpn index 93e8d5068..9a9337dce 100755 --- a/scripts/fedbiomed_vpn +++ b/scripts/fedbiomed_vpn @@ -93,7 +93,7 @@ check_prerequisite() { # # verify that every needed commands are installed source $basedir/scripts/choose_docker_compose - docker_compose >/dev/null + docker_compose commands=( docker "$DOCKER_COMPOSE") diff --git a/scripts/run_integration_test b/scripts/run_integration_test index e3e1f0d85..56e198c6c 100755 --- a/scripts/run_integration_test +++ b/scripts/run_integration_test @@ -206,7 +206,7 @@ cleaning() { # kill the docker containers ( cd $basedir/envs/development/docker ; source $basedir/scripts/choose_docker_compose ; \ - docker_compose >/dev/null ; $DOCKER_COMPOSE down ) + docker_compose ; $DOCKER_COMPOSE down ) # # clean all datasets from nodes diff --git a/scripts/run_test_mnist b/scripts/run_test_mnist index b52649402..897e1ea58 100755 --- a/scripts/run_test_mnist +++ b/scripts/run_test_mnist @@ -80,7 +80,7 @@ conda deactivate echo "INFO: killing kpids=$kpids" kill -9 $kpids ( cd $basedir/envs/development/docker ; source $basedir/scripts/choose_docker_compose ; \ - docker_compose >/dev/null ; $DOCKER_COMPOSE down ) + docker_compose ; $DOCKER_COMPOSE down ) source $basedir/scripts/fedbiomed_environment node $basedir/scripts/fedbiomed_run node --delete-mnist $basedir/scripts/fedbiomed_run node config config-n1.ini --delete-mnist From 693923689b46e7c7f8f0e7aa67b5c08c88ab23a4 Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Wed, 28 Jun 2023 08:34:56 +0200 Subject: [PATCH 40/41] WIP misc envs modif/cleaning --- envs/development/conda/fedbiomed-node-macosx.yaml | 2 -- envs/development/conda/fedbiomed-node.yaml | 2 -- envs/development/conda/fedbiomed-researcher-macosx.yaml | 2 ++ envs/development/conda/fedbiomed-researcher.yaml | 2 ++ envs/vpn/conda/fedbiomed-node.yaml | 2 -- envs/vpn/conda/fedbiomed-researcher.yaml | 2 ++ 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/envs/development/conda/fedbiomed-node-macosx.yaml b/envs/development/conda/fedbiomed-node-macosx.yaml index cdb4e88c1..b4a4662aa 100644 --- a/envs/development/conda/fedbiomed-node-macosx.yaml +++ b/envs/development/conda/fedbiomed-node-macosx.yaml @@ -13,8 +13,6 @@ dependencies: # python 3.11 recently released 2022-11 and not yet supported by some deps including: torchvision - python >=3.10,<3.11 - pip >= 23.0 - - jupyter ~=1.0.0 - - ipython ~=8.13.2 # tests - pytest ~=7.2.0 - pytest-cov ~=4.1.0 diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index 8743da84a..203251b2a 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -13,8 +13,6 @@ dependencies: # python 3.11 recently released 2022-11 and not yet supported by some deps including: torchvision - python >=3.10,<3.11 - pip >= 23.0 - - jupyter ~=1.0.0 - - ipython ~=8.13.2 # tests - pytest ~=7.2.0 - pytest-cov ~=4.1.0 diff --git a/envs/development/conda/fedbiomed-researcher-macosx.yaml b/envs/development/conda/fedbiomed-researcher-macosx.yaml index 092f82f26..9936791fd 100644 --- a/envs/development/conda/fedbiomed-researcher-macosx.yaml +++ b/envs/development/conda/fedbiomed-researcher-macosx.yaml @@ -12,6 +12,8 @@ dependencies: # minimal environment - python >=3.10,<3.11 - pip >= 23.0 + # TODO: consider migrating for classical notebooks to notebook7 or jupyterlab + # https://jupyter-notebook.readthedocs.io/en/latest/migrate_to_notebook7.html - jupyter ~=1.0.0 - ipython ~=8.13.2 # tests diff --git a/envs/development/conda/fedbiomed-researcher.yaml b/envs/development/conda/fedbiomed-researcher.yaml index 457c8070d..03dacfc35 100644 --- a/envs/development/conda/fedbiomed-researcher.yaml +++ b/envs/development/conda/fedbiomed-researcher.yaml @@ -12,6 +12,8 @@ dependencies: # minimal environment - python >=3.10,<3.11 - pip >= 23.0 + # TODO: consider migrating for classical notebooks to notebook7 or jupyterlab + # https://jupyter-notebook.readthedocs.io/en/latest/migrate_to_notebook7.html - jupyter ~=1.0.0 - ipython ~=8.13.2 # tests diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index 8743da84a..203251b2a 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -13,8 +13,6 @@ dependencies: # python 3.11 recently released 2022-11 and not yet supported by some deps including: torchvision - python >=3.10,<3.11 - pip >= 23.0 - - jupyter ~=1.0.0 - - ipython ~=8.13.2 # tests - pytest ~=7.2.0 - pytest-cov ~=4.1.0 diff --git a/envs/vpn/conda/fedbiomed-researcher.yaml b/envs/vpn/conda/fedbiomed-researcher.yaml index 457c8070d..03dacfc35 100644 --- a/envs/vpn/conda/fedbiomed-researcher.yaml +++ b/envs/vpn/conda/fedbiomed-researcher.yaml @@ -12,6 +12,8 @@ dependencies: # minimal environment - python >=3.10,<3.11 - pip >= 23.0 + # TODO: consider migrating for classical notebooks to notebook7 or jupyterlab + # https://jupyter-notebook.readthedocs.io/en/latest/migrate_to_notebook7.html - jupyter ~=1.0.0 - ipython ~=8.13.2 # tests From fb0570e955c6c03def95effdde87c0203bbfc55d Mon Sep 17 00:00:00 2001 From: Marc Vesin Date: Wed, 28 Jun 2023 09:08:09 +0200 Subject: [PATCH 41/41] misc yaml file contents - remove nbstripout from node envs - remove tqdm from envs and replace with `pip install` in notebooks/tuto --- ...ical-image-segmentation-unet-library.ipynb | 1 + .../conda/fedbiomed-node-macosx.yaml | 4 +-- envs/development/conda/fedbiomed-node.yaml | 4 +-- .../conda/fedbiomed-researcher-macosx.yaml | 3 +- .../conda/fedbiomed-researcher.yaml | 3 +- envs/vpn/conda/fedbiomed-node.yaml | 4 +-- envs/vpn/conda/fedbiomed-researcher.yaml | 3 +- ...-image-segmentation-custom-unet-code.ipynb | 30 ++++++++++++------- ...ical-image-segmentation-unet-library.ipynb | 3 +- 9 files changed, 29 insertions(+), 26 deletions(-) diff --git a/docs/tutorials/medical/medical-image-segmentation-unet-library.ipynb b/docs/tutorials/medical/medical-image-segmentation-unet-library.ipynb index 9e24d62e7..c700058cb 100644 --- a/docs/tutorials/medical/medical-image-segmentation-unet-library.ipynb +++ b/docs/tutorials/medical/medical-image-segmentation-unet-library.ipynb @@ -38,6 +38,7 @@ "\n", "```shell\n", "mkdir -p ${FEDBIOMED_DIR}/notebooks/data\n", + "pip install tqdm\n", "download_and_split_ixi.py -f ${FEDBIOMED_DIR}\n", "```\n", "\n", diff --git a/envs/development/conda/fedbiomed-node-macosx.yaml b/envs/development/conda/fedbiomed-node-macosx.yaml index b4a4662aa..6c2322e10 100644 --- a/envs/development/conda/fedbiomed-node-macosx.yaml +++ b/envs/development/conda/fedbiomed-node-macosx.yaml @@ -22,14 +22,12 @@ dependencies: - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 - - tqdm ~=4.65.0 - git ~=2.40.1 - packaging ~=23.1 # these two have to be aligned - cryptography ~=40.0.0 - pyopenssl ~=23.1.1 - # git notebook striper - - nbstripout >=0.6.1,<0.7.0 + # - joblib >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 diff --git a/envs/development/conda/fedbiomed-node.yaml b/envs/development/conda/fedbiomed-node.yaml index 203251b2a..bf220af47 100644 --- a/envs/development/conda/fedbiomed-node.yaml +++ b/envs/development/conda/fedbiomed-node.yaml @@ -22,14 +22,12 @@ dependencies: - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 - - tqdm ~=4.65.0 - git ~=2.40.1 - packaging ~=23.1 # these two have to be aligned - cryptography ~=40.0.0 - pyopenssl ~=23.1.1 - # git notebook striper - - nbstripout >=0.6.1,<0.7.0 + # - joblib >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 diff --git a/envs/development/conda/fedbiomed-researcher-macosx.yaml b/envs/development/conda/fedbiomed-researcher-macosx.yaml index 9936791fd..ea28d8a19 100644 --- a/envs/development/conda/fedbiomed-researcher-macosx.yaml +++ b/envs/development/conda/fedbiomed-researcher-macosx.yaml @@ -27,13 +27,12 @@ dependencies: - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 - - tqdm ~=4.65.0 - git ~=2.40.1 - packaging ~=23.1 # these two have to be aligned - cryptography ~=40.0.0 - pyopenssl ~=23.1.1 - # git notebook striper + # git notebook striper - TODO: consider removing if not used anymore ... - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 # other diff --git a/envs/development/conda/fedbiomed-researcher.yaml b/envs/development/conda/fedbiomed-researcher.yaml index 03dacfc35..5c1323b5a 100644 --- a/envs/development/conda/fedbiomed-researcher.yaml +++ b/envs/development/conda/fedbiomed-researcher.yaml @@ -27,13 +27,12 @@ dependencies: - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 - - tqdm ~=4.65.0 - git ~=2.40.1 - packaging ~=23.1 # these two have to be aligned - cryptography ~=40.0.0 - pyopenssl ~=23.1.1 - # git notebook striper + # git notebook striper - TODO: consider removing if not used anymore ... - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 # other diff --git a/envs/vpn/conda/fedbiomed-node.yaml b/envs/vpn/conda/fedbiomed-node.yaml index 203251b2a..bf220af47 100644 --- a/envs/vpn/conda/fedbiomed-node.yaml +++ b/envs/vpn/conda/fedbiomed-node.yaml @@ -22,14 +22,12 @@ dependencies: - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 - - tqdm ~=4.65.0 - git ~=2.40.1 - packaging ~=23.1 # these two have to be aligned - cryptography ~=40.0.0 - pyopenssl ~=23.1.1 - # git notebook striper - - nbstripout >=0.6.1,<0.7.0 + # - joblib >=1.2.0,<1.3.0 # other - itk >=5.3.0,<5.4.0 diff --git a/envs/vpn/conda/fedbiomed-researcher.yaml b/envs/vpn/conda/fedbiomed-researcher.yaml index 03dacfc35..5c1323b5a 100644 --- a/envs/vpn/conda/fedbiomed-researcher.yaml +++ b/envs/vpn/conda/fedbiomed-researcher.yaml @@ -27,13 +27,12 @@ dependencies: - requests ~=2.29.0 - paho-mqtt ~=1.6.1 - validators >=0.20.0,<0.21.0 - - tqdm ~=4.65.0 - git ~=2.40.1 - packaging ~=23.1 # these two have to be aligned - cryptography ~=40.0.0 - pyopenssl ~=23.1.1 - # git notebook striper + # git notebook striper - TODO: consider removing if not used anymore ... - nbstripout >=0.6.1,<0.7.0 - joblib >=1.2.0,<1.3.0 # other diff --git a/notebooks/medical-image-segmentation/medical-image-segmentation-custom-unet-code.ipynb b/notebooks/medical-image-segmentation/medical-image-segmentation-custom-unet-code.ipynb index f5a612e51..96efcca90 100644 --- a/notebooks/medical-image-segmentation/medical-image-segmentation-custom-unet-code.ipynb +++ b/notebooks/medical-image-segmentation/medical-image-segmentation-custom-unet-code.ipynb @@ -27,10 +27,10 @@ "cell_type": "markdown", "id": "a5dd7d60-f737-4ddd-b179-85922e9d0371", "metadata": { - "tags": [], "pycharm": { "name": "#%% md\n" - } + }, + "tags": [] }, "source": [ "## Data preparation\n", @@ -69,6 +69,16 @@ "After succesfully running the command, follow the instructions printed to add the datasets and run the nodes. The tag used for this experiment is `ixi-train`." ] }, + { + "cell_type": "code", + "execution_count": null, + "id": "8072d722", + "metadata": {}, + "outputs": [], + "source": [ + "! pip install tqdm" + ] + }, { "cell_type": "code", "execution_count": null, @@ -118,10 +128,10 @@ "cell_type": "markdown", "id": "91fb49d3-00f7-4c75-b8ac-5588410fb351", "metadata": { - "tags": [], "pycharm": { "name": "#%% md\n" - } + }, + "tags": [] }, "source": [ "## Create a Training Plan\n", @@ -997,10 +1007,10 @@ "execution_count": null, "id": "a86aca90-54f6-4267-9154-73c46da7add6", "metadata": { - "tags": [], "pycharm": { "name": "#%%\n" - } + }, + "tags": [] }, "outputs": [], "source": [ @@ -1108,10 +1118,10 @@ "execution_count": 43, "id": "5339706f-b645-4719-a1d1-3cdf2debb80d", "metadata": { - "tags": [], "pycharm": { "name": "#%%\n" - } + }, + "tags": [] }, "outputs": [], "source": [ @@ -1344,9 +1354,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.7" + "version": "3.10.11" } }, "nbformat": 4, "nbformat_minor": 5 -} \ No newline at end of file +} diff --git a/notebooks/medical-image-segmentation/medical-image-segmentation-unet-library.ipynb b/notebooks/medical-image-segmentation/medical-image-segmentation-unet-library.ipynb index 530f82ba3..9eba0daf8 100644 --- a/notebooks/medical-image-segmentation/medical-image-segmentation-unet-library.ipynb +++ b/notebooks/medical-image-segmentation/medical-image-segmentation-unet-library.ipynb @@ -38,6 +38,7 @@ "\n", "```shell\n", "mkdir -p ${FEDBIOMED_DIR}/notebooks/data\n", + "pip install tqdm\n", "download_and_split_ixi.py -f ${FEDBIOMED_DIR}\n", "```\n", "\n", @@ -955,7 +956,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.16" + "version": "3.10.11" } }, "nbformat": 4,