From 0591cc24d9bf7690e798bd25f9b10cb57cd1a6e8 Mon Sep 17 00:00:00 2001 From: noopur Date: Thu, 14 Nov 2024 13:55:18 +0000 Subject: [PATCH] Non TLS with single model and 3.10 python Signed-off-by: noopur --- .github/workflows/task_runner_e2e.yml | 83 ++++++++++++++++++++++----- 1 file changed, 69 insertions(+), 14 deletions(-) diff --git a/.github/workflows/task_runner_e2e.yml b/.github/workflows/task_runner_e2e.yml index 1d74a6640b..9603db81cf 100644 --- a/.github/workflows/task_runner_e2e.yml +++ b/.github/workflows/task_runner_e2e.yml @@ -31,16 +31,15 @@ env: jobs: test: - name: tr + name: tr_tls runs-on: ubuntu-22.04 timeout-minutes: 120 # 2 hours strategy: matrix: # There are open issues for some of the models, so excluding them for now: # model_name: [ "torch_cnn_mnist", "keras_cnn_mnist", "torch_cnn_histology" ] - model_name: ["torch_cnn_mnist"] - python_version: ["3.8"] - tls: [True, False] + model_name: ["torch_cnn_mnist", "keras_cnn_mnist"] + python_version: ["3.8", "3.9", "3.10"] fail-fast: false # do not immediately fail if one of the combinations fail env: @@ -68,25 +67,81 @@ jobs: python -m pip install --upgrade pip pip install . pip install -r test-requirements.txt - echo ${{ matrix.tls }} - - name: Run Task Runner E2E tests - if: matrix.tls == true - id: run_task_runner_tests_tls + - name: Run Task Runner E2E tests with TLS + id: run_tests run: | python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} echo "Task runner end to end test run completed" + - name: Print test summary # Print the test summary only if the tests were run + id: print_test_summary + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' + run: | + export PYTHONPATH="$PYTHONPATH:." + python tests/end_to_end/utils/summary_helper.py + echo "Test summary printed" + + - name: Tar files # Tar the test results only if the tests were run + id: tar_files + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' + run: tar -cvf result.tar results + + - name: Upload Artifacts # Upload the test results only if the tar was created + id: upload_artifacts + uses: actions/upload-artifact@v4 + if: steps.tar_files.outcome == 'success' + with: + name: task_runner_tls_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + path: result.tar + + test_with_non_tls: + name: tr_non_tls + runs-on: ubuntu-22.04 + timeout-minutes: 120 # 2 hours + strategy: + matrix: + # Testing non TLS scenario only for torch_cnn_mnist model and python 3.10 + # If required, this can be extended to other models and python versions + model_name: ["torch_cnn_mnist"] + python_version: ["3.10"] + fail-fast: false # do not immediately fail if one of the combinations fail + + env: + MODEL_NAME: ${{ matrix.model_name }} + PYTHON_VERSION: ${{ matrix.python_version }} + + steps: + - name: Checkout OpenFL repository + id: checkout_openfl + uses: actions/checkout@v4.1.1 + with: + fetch-depth: 2 # needed for detecting changes + submodules: "true" + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Set up Python + id: setup_python + uses: actions/setup-python@v3 + with: + python-version: ${{ env.PYTHON_VERSION }} + + - name: Install dependencies + id: install_dependencies + run: | + python -m pip install --upgrade pip + pip install . + pip install -r test-requirements.txt + - name: Run Task Runner E2E tests without TLS - if: matrix.tls == false - id: run_task_runner_tests_non_tls + id: run_tests run: | - python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --model_name ${{ env.MODEL_NAME }} --disable_tls + python -m pytest -s tests/end_to_end/test_suites/task_runner_tests.py -m ${{ env.MODEL_NAME }} --num_rounds $NUM_ROUNDS --num_collaborators $NUM_COLLABORATORS --disable_tls echo "Task runner end to end test run completed" - name: Print test summary # Print the test summary only if the tests were run id: print_test_summary - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' || steps.run_task_runner_tests_non_tls.outcome == 'success' || steps.run_task_runner_tests_non_tls.outcome == 'failure' + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' run: | export PYTHONPATH="$PYTHONPATH:." python tests/end_to_end/utils/summary_helper.py @@ -94,7 +149,7 @@ jobs: - name: Tar files # Tar the test results only if the tests were run id: tar_files - if: steps.run_task_runner_tests.outcome == 'success' || steps.run_task_runner_tests.outcome == 'failure' || steps.run_task_runner_tests_non_tls.outcome == 'success' || steps.run_task_runner_tests_non_tls.outcome == 'failure' + if: steps.run_tests.outcome == 'success' || steps.run_tests.outcome == 'failure' run: tar -cvf result.tar results - name: Upload Artifacts # Upload the test results only if the tar was created @@ -102,5 +157,5 @@ jobs: uses: actions/upload-artifact@v4 if: steps.tar_files.outcome == 'success' with: - name: task_runner_tls_${{ matrix.tls }}_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} + name: task_runner_non_tls_${{ env.MODEL_NAME }}_python${{ env.PYTHON_VERSION }}_${{ github.run_id }} path: result.tar