diff --git a/.github/workflows/test_with_docker.yml b/.github/workflows/test_with_docker.yml new file mode 100644 index 0000000..0339e3c --- /dev/null +++ b/.github/workflows/test_with_docker.yml @@ -0,0 +1,36 @@ +# This is a basic workflow to help you get started with Actions + +name: test-with-docker + +# Controls when the action will run. Triggers the workflow on push or pull request +# events but only for the master branch +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + schedule: + # * is a special character in YAML so you have to quote this string + - cron: '5 4 * * 0' + +# A workflow run is made up of one or more jobs that can run sequentially or in parallel +jobs: + # This workflow contains a single job called "build" + build: + # The type of runner that the job will run on + runs-on: ubuntu-latest + + # Steps represent a sequence of tasks that will be executed as part of the job + steps: + # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it + - name: Checkout + uses: actions/checkout@v2 + + - name: Make sure that the workflow works + run: echo Smoke test + + - name: Run the tests using docker-compose + working-directory: .github/workflows + run: | + docker compose -f ../../docker-compose.tests.yml build + docker compose -f ../../docker-compose.tests.yml up --exit-code-from notebook-server diff --git a/docker-compose.tests.yml b/docker-compose.tests.yml new file mode 100644 index 0000000..6ed827e --- /dev/null +++ b/docker-compose.tests.yml @@ -0,0 +1,51 @@ +version: "3" +services: + dashboard: + image: em-pub-dash-dev/frontend + build: + context: frontend + dockerfile: docker/Dockerfile.dev + depends_on: + - db + ports: + # DASH in numbers + - "3274:6060" + volumes: + - ./frontend:/public + - ./plots:/public/plots + networks: + - emission + notebook-server: + image: em-pub-dash-dev/viz-scripts + build: + context: viz_scripts + dockerfile: docker/Dockerfile.test + args: + SERVER_IMAGE_TAG: ${SERVER_IMAGE_TAG} + depends_on: + - db + environment: + - DB_HOST=db + - WEB_SERVER_HOST=0.0.0.0 + - CRON_MODE= + - STUDY_CONFIG=stage-program + ports: + # ipynb in numbers + - "47962:47962" + networks: + - emission + volumes: + - ./viz_scripts:/usr/src/app/saved-notebooks + - ./plots:/plots + db: + image: mongo:4.4.0 + volumes: + - mongo-data:/data/db + networks: + - emission + +networks: + emission: + +volumes: + mongo-data: diff --git a/viz_scripts/docker/Dockerfile.test b/viz_scripts/docker/Dockerfile.test new file mode 100644 index 0000000..d9a84ea --- /dev/null +++ b/viz_scripts/docker/Dockerfile.test @@ -0,0 +1,19 @@ +# python 3 +ARG SERVER_IMAGE_TAG +FROM shankari/e-mission-server:master_${SERVER_IMAGE_TAG} + +VOLUME /plots + +ADD docker/environment36.dashboard.additions.yml / + +WORKDIR /usr/src/app + +RUN /bin/bash -c "source setup/activate.sh && conda env update --name emission --file setup/environment36.notebook.additions.yml" +RUN /bin/bash -c "source setup/activate.sh && conda env update --name emission --file /environment36.dashboard.additions.yml" + +ADD docker/start_tests.sh /usr/src/app/.docker/start_tests.sh +RUN chmod u+x /usr/src/app/.docker/start_tests.sh + +EXPOSE 8888 + +CMD ["/bin/bash", "/usr/src/app/.docker/start_tests.sh"] diff --git a/viz_scripts/docker/environment36.dashboard.additions.yml b/viz_scripts/docker/environment36.dashboard.additions.yml index 59d26eb..49d927f 100644 --- a/viz_scripts/docker/environment36.dashboard.additions.yml +++ b/viz_scripts/docker/environment36.dashboard.additions.yml @@ -4,6 +4,8 @@ channels: - defaults dependencies: - seaborn=0.11.1 +- pytest +- coverage - pip: - nbparameterise==0.6 - devcron==0.4 diff --git a/viz_scripts/docker/start_tests.sh b/viz_scripts/docker/start_tests.sh new file mode 100644 index 0000000..f979f23 --- /dev/null +++ b/viz_scripts/docker/start_tests.sh @@ -0,0 +1,13 @@ +#!/bin/bash +set -e # Exit on error + +# change python environment +pwd +source setup/activate.sh || exit 1 +conda env list +cd saved-notebooks/tests || exit 1 + +echo "Starting unit tests..." +PYTHONPATH=../.. coverage run -m pytest test_plots.py -v + +coverage report diff --git a/viz_scripts/plots.py b/viz_scripts/plots.py index 34e26bc..4c35465 100644 --- a/viz_scripts/plots.py +++ b/viz_scripts/plots.py @@ -9,7 +9,17 @@ sns.set_style("whitegrid") sns.set() -get_ipython().run_line_magic('matplotlib', 'inline') + +try: + # Import the function + from IPython import get_ipython + # Check if running in an IPython environment (like Jupyter Notebook) + if get_ipython() is not None: + get_ipython().run_line_magic('matplotlib', 'inline') +except ImportError: + # Handle the case where IPython is not installed + # We are running in regular Python (likely pytest), not Jupyter/IPython + pass # Module for pretty-printing outputs (e.g. head) to help users # understand what is going on diff --git a/viz_scripts/tests/test_plots.py b/viz_scripts/tests/test_plots.py new file mode 100644 index 0000000..26d26e3 --- /dev/null +++ b/viz_scripts/tests/test_plots.py @@ -0,0 +1,58 @@ +import pytest as pytest +import pandas as pd +import numpy as np +# Using import_module, as we have saved-notebooks as the directory +import importlib +plots = importlib.import_module('saved-notebooks.plots') + +# Test Data Fixtures +@pytest.fixture +def sample_labels(): + return ['Car', 'Bus', 'Train', 'Walk'] + +@pytest.fixture +def sample_values(): + return [100, 50, 3, 1] + +@pytest.fixture +def sample_labels_no_small(): + return ['Car', 'Bus'] + + +@pytest.fixture +def sample_values_no_small(): + return [100, 100] + +class TestCalculatePct: + def test_calculate_pct_basic(self, sample_labels, sample_values): + labels, values, pcts = plots.calculate_pct(sample_labels, sample_values) + assert len(labels) == len(sample_labels) + assert len(values) == len(sample_values) + assert sum(pcts) == pytest.approx(100.0, abs=0.1) + + def test_calculate_pct_empty(self): + labels, values, pcts = plots.calculate_pct([],[]) + assert len(labels) == 0 + assert len(values) == 0 + assert len(pcts) == 0 + + def test_calculate_pct_single(self): + labels, values, pcts = plots.calculate_pct(['Car'], [100]) + assert pcts == [100.0] + +class TestMergeSmallEntries: + def test_merge_small_entries_basic(self, sample_labels, sample_values): + labels, values, pcts = plots.merge_small_entries(sample_labels, sample_values) + assert all(pct > 2.0 for pct in pcts) + + def test_merge_small_entries_no_small(self, sample_labels_no_small, sample_values_no_small): + result_labels, result_values, result_pcts = plots.merge_small_entries(sample_labels_no_small, sample_values_no_small) + assert len(result_labels) == 2 + assert 'other' not in result_labels + assert 'OTHER' not in result_labels + + def test_merge_small_entries_some_small(self, sample_labels, sample_values): + result_labels, result_values, result_pcts = plots.merge_small_entries(sample_labels, sample_values) + print(result_labels) + assert len(result_labels) == 3 + assert result_labels[0] in ['Car', 'Bus','other', 'OTHER']