diff --git a/.github/workflows/selfdrive_tests.yaml b/.github/workflows/selfdrive_tests.yaml index 2be83b16540e74..d3c8009bc23a37 100644 --- a/.github/workflows/selfdrive_tests.yaml +++ b/.github/workflows/selfdrive_tests.yaml @@ -134,24 +134,6 @@ jobs: timeout-minutes: 4 run: ${{ env.RUN }} "unset PYTHONWARNINGS && pre-commit run --all && chmod -R 777 /tmp/pre-commit" - valgrind: - name: valgrind - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - submodules: true - - uses: ./.github/workflows/setup-with-retry - - name: Build openpilot - run: ${{ env.RUN }} "scons -j$(nproc)" - - name: Run valgrind - timeout-minutes: 1 - run: | - ${{ env.RUN }} "python selfdrive/test/test_valgrind_replay.py" - - name: Print logs - if: always() - run: cat selfdrive/test/valgrind_logs.txt - unit_tests: name: unit tests runs-on: ${{ ((github.repository == 'commaai/openpilot') && @@ -200,7 +182,7 @@ jobs: uses: actions/cache@v4 with: path: .ci_cache/comma_download_cache - key: proc-replay-${{ hashFiles('.github/workflows/selfdrive_tests.yaml', 'selfdrive/test/process_replay/ref_commit') }} + key: proc-replay-${{ hashFiles('.github/workflows/selfdrive_tests.yaml', 'selfdrive/test/process_replay/ref_commit', 'selfdrive/test/process_replay/test_regen.py') }} - name: Build openpilot run: | ${{ env.RUN }} "scons -j$(nproc)" @@ -225,64 +207,18 @@ jobs: if: ${{ failure() && steps.print-diff.outcome == 'success' && github.repository == 'commaai/openpilot' && env.AZURE_TOKEN != '' }} run: | ${{ env.RUN }} "unset PYTHONWARNINGS && AZURE_TOKEN='$AZURE_TOKEN' python selfdrive/test/process_replay/test_processes.py -j$(nproc) --upload-only" - - name: "Upload coverage to Codecov" - uses: codecov/codecov-action@v4 - with: - name: ${{ github.job }} - env: - CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} - - regen: - name: regen - runs-on: 'ubuntu-latest' - steps: - - uses: actions/checkout@v4 - with: - submodules: true - - uses: ./.github/workflows/setup-with-retry - - name: Cache test routes - id: dependency-cache - uses: actions/cache@v4 - with: - path: .ci_cache/comma_download_cache - key: regen-${{ hashFiles('.github/workflows/selfdrive_tests.yaml', 'selfdrive/test/process_replay/test_regen.py') }} - - name: Build base Docker image - run: eval "$BUILD" - - name: Build openpilot - run: | - ${{ env.RUN }} "scons -j$(nproc)" - - name: Run regen - timeout-minutes: 30 - run: | - ${{ env.RUN }} "ONNXCPU=1 $PYTEST selfdrive/test/process_replay/test_regen.py && \ - chmod -R 777 /tmp/comma_download_cache" - - test_modeld: - name: model tests - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - submodules: true - - uses: ./.github/workflows/setup-with-retry - - name: Build base Docker image - run: eval "$BUILD" - - name: Build openpilot - run: | - ${{ env.RUN }} "scons -j$(nproc)" # PYTHONWARNINGS triggers a SyntaxError in onnxruntime - name: Run model replay with ONNX timeout-minutes: 4 run: | ${{ env.RUN }} "unset PYTHONWARNINGS && \ - ONNXCPU=1 NO_NAV=1 coverage run selfdrive/test/process_replay/model_replay.py && \ - coverage combine && \ - coverage xml" - - name: Run unit tests + ONNXCPU=1 NO_NAV=1 coverage run selfdrive/test/process_replay/model_replay.py && \ + coverage combine && coverage xml" + - name: Run regen timeout-minutes: 4 run: | - ${{ env.RUN }} "unset PYTHONWARNINGS && \ - $PYTEST selfdrive/modeld" + ${{ env.RUN }} "ONNXCPU=1 $PYTEST selfdrive/test/process_replay/test_regen.py && \ + chmod -R 777 /tmp/comma_download_cache" - name: "Upload coverage to Codecov" uses: codecov/codecov-action@v4 with: @@ -298,7 +234,7 @@ jobs: strategy: fail-fast: false matrix: - job: [0, 1, 2, 3, 4] + job: [0, 1] steps: - uses: actions/checkout@v4 with: @@ -313,12 +249,12 @@ jobs: - name: Build openpilot run: ${{ env.RUN }} "scons -j$(nproc)" - name: Test car models - timeout-minutes: 10 + timeout-minutes: 20 run: | ${{ env.RUN }} "$PYTEST selfdrive/car/tests/test_models.py && \ chmod -R 777 /tmp/comma_download_cache" env: - NUM_JOBS: 5 + NUM_JOBS: 2 JOB_ID: ${{ matrix.job }} - name: "Upload coverage to Codecov" uses: codecov/codecov-action@v4 diff --git a/common/tests/test_file_helpers.py b/common/tests/test_file_helpers.py index 1817f77cd2d472..a9977c2362d301 100644 --- a/common/tests/test_file_helpers.py +++ b/common/tests/test_file_helpers.py @@ -1,11 +1,10 @@ import os -import unittest from uuid import uuid4 from openpilot.common.file_helpers import atomic_write_in_dir -class TestFileHelpers(unittest.TestCase): +class TestFileHelpers: def run_atomic_write_func(self, atomic_write_func): path = f"/tmp/tmp{uuid4()}" with atomic_write_func(path) as f: @@ -13,12 +12,8 @@ def run_atomic_write_func(self, atomic_write_func): assert not os.path.exists(path) with open(path) as f: - self.assertEqual(f.read(), "test") + assert f.read() == "test" os.remove(path) def test_atomic_write_in_dir(self): self.run_atomic_write_func(atomic_write_in_dir) - - -if __name__ == "__main__": - unittest.main() diff --git a/common/tests/test_numpy_fast.py b/common/tests/test_numpy_fast.py index de7bb972e78190..aa53851db04981 100644 --- a/common/tests/test_numpy_fast.py +++ b/common/tests/test_numpy_fast.py @@ -1,10 +1,9 @@ import numpy as np -import unittest from openpilot.common.numpy_fast import interp -class InterpTest(unittest.TestCase): +class TestInterp: def test_correctness_controls(self): _A_CRUISE_MIN_BP = np.asarray([0., 5., 10., 20., 40.]) _A_CRUISE_MIN_V = np.asarray([-1.0, -.8, -.67, -.5, -.30]) @@ -20,7 +19,3 @@ def test_correctness_controls(self): expected = np.interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V) actual = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V) np.testing.assert_equal(actual, expected) - - -if __name__ == "__main__": - unittest.main() diff --git a/common/tests/test_params.py b/common/tests/test_params.py index 490ee122beaf53..16cbc45295d527 100644 --- a/common/tests/test_params.py +++ b/common/tests/test_params.py @@ -1,13 +1,13 @@ +import pytest import os import threading import time import uuid -import unittest from openpilot.common.params import Params, ParamKeyType, UnknownKeyName -class TestParams(unittest.TestCase): - def setUp(self): +class TestParams: + def setup_method(self): self.params = Params() def test_params_put_and_get(self): @@ -49,16 +49,16 @@ def _delayed_writer(): assert self.params.get("CarParams", True) == b"test" def test_params_unknown_key_fails(self): - with self.assertRaises(UnknownKeyName): + with pytest.raises(UnknownKeyName): self.params.get("swag") - with self.assertRaises(UnknownKeyName): + with pytest.raises(UnknownKeyName): self.params.get_bool("swag") - with self.assertRaises(UnknownKeyName): + with pytest.raises(UnknownKeyName): self.params.put("swag", "abc") - with self.assertRaises(UnknownKeyName): + with pytest.raises(UnknownKeyName): self.params.put_bool("swag", True) def test_remove_not_there(self): @@ -68,19 +68,19 @@ def test_remove_not_there(self): def test_get_bool(self): self.params.remove("IsMetric") - self.assertFalse(self.params.get_bool("IsMetric")) + assert not self.params.get_bool("IsMetric") self.params.put_bool("IsMetric", True) - self.assertTrue(self.params.get_bool("IsMetric")) + assert self.params.get_bool("IsMetric") self.params.put_bool("IsMetric", False) - self.assertFalse(self.params.get_bool("IsMetric")) + assert not self.params.get_bool("IsMetric") self.params.put("IsMetric", "1") - self.assertTrue(self.params.get_bool("IsMetric")) + assert self.params.get_bool("IsMetric") self.params.put("IsMetric", "0") - self.assertFalse(self.params.get_bool("IsMetric")) + assert not self.params.get_bool("IsMetric") def test_put_non_blocking_with_get_block(self): q = Params() @@ -107,7 +107,3 @@ def test_params_all_keys(self): assert len(keys) > 20 assert len(keys) == len(set(keys)) assert b"CarParams" in keys - - -if __name__ == "__main__": - unittest.main() diff --git a/common/tests/test_simple_kalman.py b/common/tests/test_simple_kalman.py index f641cd19e690a4..f4a967e58ad418 100644 --- a/common/tests/test_simple_kalman.py +++ b/common/tests/test_simple_kalman.py @@ -1,10 +1,8 @@ -import unittest - from openpilot.common.simple_kalman import KF1D -class TestSimpleKalman(unittest.TestCase): - def setUp(self): +class TestSimpleKalman: + def setup_method(self): dt = 0.01 x0_0 = 0.0 x1_0 = 0.0 @@ -24,12 +22,8 @@ def setUp(self): def test_getter_setter(self): self.kf.set_x([[1.0], [1.0]]) - self.assertEqual(self.kf.x, [[1.0], [1.0]]) + assert self.kf.x == [[1.0], [1.0]] def update_returns_state(self): x = self.kf.update(100) - self.assertEqual(x, self.kf.x) - - -if __name__ == "__main__": - unittest.main() + assert x == self.kf.x diff --git a/common/transformations/tests/test_coordinates.py b/common/transformations/tests/test_coordinates.py index 7ae79403bdd1e2..41076d9b3fb7fa 100755 --- a/common/transformations/tests/test_coordinates.py +++ b/common/transformations/tests/test_coordinates.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import numpy as np -import unittest import openpilot.common.transformations.coordinates as coord @@ -44,7 +43,7 @@ [ 78.56272609, 18.53100158, -43.25290759]]) -class TestNED(unittest.TestCase): +class TestNED: def test_small_distances(self): start_geodetic = np.array([33.8042184, -117.888593, 0.0]) local_coord = coord.LocalCoord.from_geodetic(start_geodetic) @@ -54,13 +53,13 @@ def test_small_distances(self): west_geodetic = start_geodetic + [0, -0.0005, 0] west_ned = local_coord.geodetic2ned(west_geodetic) - self.assertLess(np.abs(west_ned[0]), 1e-3) - self.assertLess(west_ned[1], 0) + assert np.abs(west_ned[0]) < 1e-3 + assert west_ned[1] < 0 southwest_geodetic = start_geodetic + [-0.0005, -0.002, 0] southwest_ned = local_coord.geodetic2ned(southwest_geodetic) - self.assertLess(southwest_ned[0], 0) - self.assertLess(southwest_ned[1], 0) + assert southwest_ned[0] < 0 + assert southwest_ned[1] < 0 def test_ecef_geodetic(self): # testing single @@ -105,5 +104,3 @@ def test_ned_batch(self): np.testing.assert_allclose(converter.ned2ecef(ned_offsets_batch), ecef_positions_offset_batch, rtol=1e-9, atol=1e-7) -if __name__ == "__main__": - unittest.main() diff --git a/common/transformations/tests/test_orientation.py b/common/transformations/tests/test_orientation.py index f77827d2f98eb6..695642774e2c3e 100755 --- a/common/transformations/tests/test_orientation.py +++ b/common/transformations/tests/test_orientation.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import numpy as np -import unittest from openpilot.common.transformations.orientation import euler2quat, quat2euler, euler2rot, rot2euler, \ rot2quat, quat2rot, \ @@ -32,7 +31,7 @@ [ 2.50450101, 0.36304151, 0.33136365]]) -class TestOrientation(unittest.TestCase): +class TestOrientation: def test_quat_euler(self): for i, eul in enumerate(eulers): np.testing.assert_allclose(quats[i], euler2quat(eul), rtol=1e-7) @@ -62,7 +61,3 @@ def test_euler_ned(self): np.testing.assert_allclose(ned_eulers[i], ned_euler_from_ecef(ecef_positions[i], eulers[i]), rtol=1e-7) #np.testing.assert_allclose(eulers[i], ecef_euler_from_ned(ecef_positions[i], ned_eulers[i]), rtol=1e-7) # np.testing.assert_allclose(ned_eulers, ned_euler_from_ecef(ecef_positions, eulers), rtol=1e-7) - - -if __name__ == "__main__": - unittest.main() diff --git a/docs/CARS.md b/docs/CARS.md index de9320594aa4cb..4364f10b08da9f 100644 --- a/docs/CARS.md +++ b/docs/CARS.md @@ -4,7 +4,7 @@ A supported vehicle is one that just works when you install a comma device. All supported cars provide a better experience than any stock system. Supported vehicles reference the US market unless otherwise specified. -# 286 Supported Cars +# 287 Supported Cars |Make|Model|Supported Package|ACC|No ACC accel below|No ALC below|Steering Torque|Resume from stop|Hardware Needed
 |Video| |---|---|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:| @@ -120,6 +120,7 @@ A supported vehicle is one that just works when you install a comma device. All |Hyundai|Tucson 2023-24[5](#footnotes)|All|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|
Parts- 1 Hyundai N connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here
|| |Hyundai|Tucson Diesel 2019|Smart Cruise Control (SCC)|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|
Parts- 1 Hyundai L connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here
|| |Hyundai|Tucson Hybrid 2022-24[5](#footnotes)|All|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|
Parts- 1 Hyundai N connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here
|| +|Hyundai|Tucson Plug-in Hybrid 2024[5](#footnotes)|All|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|
Parts- 1 Hyundai N connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here
|| |Hyundai|Veloster 2019-20|Smart Cruise Control (SCC)|Stock|5 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-empty.svg)](##)|
Parts- 1 Hyundai E connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here
|| |Jeep|Grand Cherokee 2016-18|Adaptive Cruise Control (ACC)|Stock|0 mph|9 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|
Parts- 1 FCA connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here
|| |Jeep|Grand Cherokee 2019-21|Adaptive Cruise Control (ACC)|Stock|0 mph|39 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|
Parts- 1 FCA connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here
|| diff --git a/poetry.lock b/poetry.lock index 978574a1e277e2..f1f8d9f51835f7 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand. +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. [[package]] name = "aiohttp" @@ -6641,6 +6641,24 @@ pluggy = ">=1.5,<2.0" [package.extras] dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] +[[package]] +name = "pytest-asyncio" +version = "0.23.6" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-asyncio-0.23.6.tar.gz", hash = "sha256:ffe523a89c1c222598c76856e76852b787504ddb72dd5d9b6617ffa8aa2cde5f"}, + {file = "pytest_asyncio-0.23.6-py3-none-any.whl", hash = "sha256:68516fdd1018ac57b846c9846b954f0393b26f094764a28c955eabb0536a4e8a"}, +] + +[package.dependencies] +pytest = ">=7.0.0,<9" + +[package.extras] +docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"] +testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"] + [[package]] name = "pytest-cov" version = "5.0.0" @@ -6674,6 +6692,23 @@ files = [ colorama = "*" pytest = ">=7.0" +[[package]] +name = "pytest-mock" +version = "3.14.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"}, + {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"}, +] + +[package.dependencies] +pytest = ">=6.2.5" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + [[package]] name = "pytest-randomly" version = "3.15.0" @@ -7987,4 +8022,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "~3.11" -content-hash = "5f0a1b6f26faa3effeaa5393b73d9188be385a72c1d3b9befb3f03df3b38c86d" +content-hash = "9f69dc7862f33f61e94e960f0ead2cbcd306b4502163d1934381d476143344f4" diff --git a/pyproject.toml b/pyproject.toml index a25fcc1645b2a7..eb270176066575 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,12 @@ +[project] +name = "openpilot" +requires-python = ">= 3.11" +readme = "README.md" +license = {text = "MIT License"} + +[project.urls] +Homepage = "https://comma.ai" + [tool.pytest.ini_options] minversion = "6.0" addopts = "--ignore=openpilot/ --ignore=cereal/ --ignore=opendbc/ --ignore=panda/ --ignore=rednose_repo/ --ignore=tinygrad_repo/ --ignore=teleoprtc_repo/ -Werror --strict-config --strict-markers --durations=10 -n auto --dist=loadgroup" @@ -154,6 +163,8 @@ pytest-subtests = "*" pytest-xdist = "*" pytest-timeout = "*" pytest-randomly = "*" +pytest-asyncio = "*" +pytest-mock = "*" ruff = "*" sphinx = "*" sphinx-rtd-theme = "*" @@ -197,6 +208,7 @@ lint.flake8-implicit-str-concat.allow-multiline=false "third_party".msg = "Use openpilot.third_party" "tools".msg = "Use openpilot.tools" "pytest.main".msg = "pytest.main requires special handling that is easy to mess up!" +"unittest".msg = "Use pytest" [tool.coverage.run] concurrency = ["multiprocessing", "thread"] diff --git a/selfdrive/athena/tests/test_athenad.py b/selfdrive/athena/tests/test_athenad.py index 4850ab9a3fc167..bdce3dccef6e3e 100755 --- a/selfdrive/athena/tests/test_athenad.py +++ b/selfdrive/athena/tests/test_athenad.py @@ -1,5 +1,6 @@ #!/usr/bin/env python3 -from functools import partial, wraps +import pytest +from functools import wraps import json import multiprocessing import os @@ -8,12 +9,9 @@ import time import threading import queue -import unittest from dataclasses import asdict, replace from datetime import datetime, timedelta -from parameterized import parameterized -from unittest import mock from websocket import ABNF from websocket._exceptions import WebSocketConnectionClosedException @@ -24,7 +22,7 @@ from openpilot.selfdrive.athena import athenad from openpilot.selfdrive.athena.athenad import MAX_RETRY_COUNT, dispatcher from openpilot.selfdrive.athena.tests.helpers import HTTPRequestHandler, MockWebsocket, MockApi, EchoSocket -from openpilot.selfdrive.test.helpers import with_http_server +from openpilot.selfdrive.test.helpers import http_server_context from openpilot.system.hardware.hw import Paths @@ -37,10 +35,6 @@ def seed_athena_server(host, port): except requests.exceptions.ConnectionError: time.sleep(0.1) - -with_mock_athena = partial(with_http_server, handler=HTTPRequestHandler, setup=seed_athena_server) - - def with_upload_handler(func): @wraps(func) def wrapper(*args, **kwargs): @@ -54,15 +48,23 @@ def wrapper(*args, **kwargs): thread.join() return wrapper +@pytest.fixture +def mock_create_connection(mocker): + return mocker.patch('openpilot.selfdrive.athena.athenad.create_connection') + +@pytest.fixture +def host(): + with http_server_context(handler=HTTPRequestHandler, setup=seed_athena_server) as (host, port): + yield f"http://{host}:{port}" -class TestAthenadMethods(unittest.TestCase): +class TestAthenadMethods: @classmethod - def setUpClass(cls): + def setup_class(cls): cls.SOCKET_PORT = 45454 athenad.Api = MockApi athenad.LOCAL_PORT_WHITELIST = {cls.SOCKET_PORT} - def setUp(self): + def setup_method(self): self.default_params = { "DongleId": "0000000000000000", "GithubSshKeys": b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC307aE+nuHzTAgaJhzSf5v7ZZQW9gaperjhCmyPyl4PzY7T1mDGenTlVTN7yoVFZ9UfO9oMQqo0n1OwDIiqbIFxqnhrHU0cYfj88rI85m5BEKlNu5RdaVTj1tcbaPpQc5kZEolaI1nDDjzV0lwS7jo5VYDHseiJHlik3HH1SgtdtsuamGR2T80q1SyW+5rHoMOJG73IH2553NnWuikKiuikGHUYBd00K1ilVAK2xSiMWJp55tQfZ0ecr9QjEsJ+J/efL4HqGNXhffxvypCXvbUYAFSddOwXUPo5BTKevpxMtH+2YrkpSjocWA04VnTYFiPG6U4ItKmbLOTFZtPzoez private", # noqa: E501 @@ -109,8 +111,8 @@ def _create_file(file: str, parent: str = None, data: bytes = b'') -> str: def test_echo(self): assert dispatcher["echo"]("bob") == "bob" - def test_getMessage(self): - with self.assertRaises(TimeoutError) as _: + def test_get_message(self): + with pytest.raises(TimeoutError) as _: dispatcher["getMessage"]("controlsState") end_event = multiprocessing.Event() @@ -133,7 +135,7 @@ def send_deviceState(): end_event.set() p.join() - def test_listDataDirectory(self): + def test_list_data_directory(self): route = '2021-03-29--13-32-47' segments = [0, 1, 2, 3, 11] @@ -143,69 +145,66 @@ def test_listDataDirectory(self): self._create_file(file) resp = dispatcher["listDataDirectory"]() - self.assertTrue(resp, 'list empty!') - self.assertCountEqual(resp, files) + assert resp, 'list empty!' + assert len(resp) == len(files) resp = dispatcher["listDataDirectory"](f'{route}--123') - self.assertCountEqual(resp, []) + assert len(resp) == 0 prefix = f'{route}' - expected = filter(lambda f: f.startswith(prefix), files) + expected = list(filter(lambda f: f.startswith(prefix), files)) resp = dispatcher["listDataDirectory"](prefix) - self.assertTrue(resp, 'list empty!') - self.assertCountEqual(resp, expected) + assert resp, 'list empty!' + assert len(resp) == len(expected) prefix = f'{route}--1' - expected = filter(lambda f: f.startswith(prefix), files) + expected = list(filter(lambda f: f.startswith(prefix), files)) resp = dispatcher["listDataDirectory"](prefix) - self.assertTrue(resp, 'list empty!') - self.assertCountEqual(resp, expected) + assert resp, 'list empty!' + assert len(resp) == len(expected) prefix = f'{route}--1/' - expected = filter(lambda f: f.startswith(prefix), files) + expected = list(filter(lambda f: f.startswith(prefix), files)) resp = dispatcher["listDataDirectory"](prefix) - self.assertTrue(resp, 'list empty!') - self.assertCountEqual(resp, expected) + assert resp, 'list empty!' + assert len(resp) == len(expected) prefix = f'{route}--1/q' - expected = filter(lambda f: f.startswith(prefix), files) + expected = list(filter(lambda f: f.startswith(prefix), files)) resp = dispatcher["listDataDirectory"](prefix) - self.assertTrue(resp, 'list empty!') - self.assertCountEqual(resp, expected) + assert resp, 'list empty!' + assert len(resp) == len(expected) def test_strip_bz2_extension(self): fn = self._create_file('qlog.bz2') if fn.endswith('.bz2'): - self.assertEqual(athenad.strip_bz2_extension(fn), fn[:-4]) + assert athenad.strip_bz2_extension(fn) == fn[:-4] - @parameterized.expand([(True,), (False,)]) - @with_mock_athena - def test_do_upload(self, compress, host): + @pytest.mark.parametrize("compress", [True, False]) + def test_do_upload(self, host, compress): # random bytes to ensure rather large object post-compression fn = self._create_file('qlog', data=os.urandom(10000 * 1024)) upload_fn = fn + ('.bz2' if compress else '') item = athenad.UploadItem(path=upload_fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='') - with self.assertRaises(requests.exceptions.ConnectionError): + with pytest.raises(requests.exceptions.ConnectionError): athenad._do_upload(item) item = athenad.UploadItem(path=upload_fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='') resp = athenad._do_upload(item) - self.assertEqual(resp.status_code, 201) + assert resp.status_code == 201 - @with_mock_athena - def test_uploadFileToUrl(self, host): + def test_upload_file_to_url(self, host): fn = self._create_file('qlog.bz2') resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {}) - self.assertEqual(resp['enqueued'], 1) - self.assertNotIn('failed', resp) - self.assertLessEqual({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}.items(), resp['items'][0].items()) - self.assertIsNotNone(resp['items'][0].get('id')) - self.assertEqual(athenad.upload_queue.qsize(), 1) - - @with_mock_athena - def test_uploadFileToUrl_duplicate(self, host): + assert resp['enqueued'] == 1 + assert 'failed' not in resp + assert {"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}.items() <= resp['items'][0].items() + assert resp['items'][0].get('id') is not None + assert athenad.upload_queue.qsize() == 1 + + def test_upload_file_to_url_duplicate(self, host): self._create_file('qlog.bz2') url1 = f"{host}/qlog.bz2?sig=sig1" @@ -214,14 +213,12 @@ def test_uploadFileToUrl_duplicate(self, host): # Upload same file again, but with different signature url2 = f"{host}/qlog.bz2?sig=sig2" resp = dispatcher["uploadFileToUrl"]("qlog.bz2", url2, {}) - self.assertEqual(resp, {'enqueued': 0, 'items': []}) + assert resp == {'enqueued': 0, 'items': []} - @with_mock_athena - def test_uploadFileToUrl_does_not_exist(self, host): + def test_upload_file_to_url_does_not_exist(self, host): not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {}) - self.assertEqual(not_exists_resp, {'enqueued': 0, 'items': [], 'failed': ['does_not_exist.bz2']}) + assert not_exists_resp == {'enqueued': 0, 'items': [], 'failed': ['does_not_exist.bz2']} - @with_mock_athena @with_upload_handler def test_upload_handler(self, host): fn = self._create_file('qlog.bz2') @@ -233,13 +230,12 @@ def test_upload_handler(self, host): # TODO: verify that upload actually succeeded # TODO: also check that end_event and metered network raises AbortTransferException - self.assertEqual(athenad.upload_queue.qsize(), 0) + assert athenad.upload_queue.qsize() == 0 - @parameterized.expand([(500, True), (412, False)]) - @with_mock_athena - @mock.patch('requests.put') + @pytest.mark.parametrize("status,retry", [(500,True), (412,False)]) @with_upload_handler - def test_upload_handler_retry(self, status, retry, mock_put, host): + def test_upload_handler_retry(self, mocker, host, status, retry): + mock_put = mocker.patch('requests.put') mock_put.return_value.status_code = status fn = self._create_file('qlog.bz2') item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True) @@ -248,10 +244,10 @@ def test_upload_handler_retry(self, status, retry, mock_put, host): self._wait_for_upload() time.sleep(0.1) - self.assertEqual(athenad.upload_queue.qsize(), 1 if retry else 0) + assert athenad.upload_queue.qsize() == (1 if retry else 0) if retry: - self.assertEqual(athenad.upload_queue.get().retry_count, 1) + assert athenad.upload_queue.get().retry_count == 1 @with_upload_handler def test_upload_handler_timeout(self): @@ -265,33 +261,33 @@ def test_upload_handler_timeout(self): time.sleep(0.1) # Check that upload with retry count exceeded is not put back - self.assertEqual(athenad.upload_queue.qsize(), 0) + assert athenad.upload_queue.qsize() == 0 athenad.upload_queue.put_nowait(item) self._wait_for_upload() time.sleep(0.1) # Check that upload item was put back in the queue with incremented retry count - self.assertEqual(athenad.upload_queue.qsize(), 1) - self.assertEqual(athenad.upload_queue.get().retry_count, 1) + assert athenad.upload_queue.qsize() == 1 + assert athenad.upload_queue.get().retry_count == 1 @with_upload_handler - def test_cancelUpload(self): + def test_cancel_upload(self): item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id', allow_cellular=True) athenad.upload_queue.put_nowait(item) dispatcher["cancelUpload"](item.id) - self.assertIn(item.id, athenad.cancelled_uploads) + assert item.id in athenad.cancelled_uploads self._wait_for_upload() time.sleep(0.1) - self.assertEqual(athenad.upload_queue.qsize(), 0) - self.assertEqual(len(athenad.cancelled_uploads), 0) + assert athenad.upload_queue.qsize() == 0 + assert len(athenad.cancelled_uploads) == 0 @with_upload_handler - def test_cancelExpiry(self): + def test_cancel_expiry(self): t_future = datetime.now() - timedelta(days=40) ts = int(t_future.strftime("%s")) * 1000 @@ -303,15 +299,14 @@ def test_cancelExpiry(self): self._wait_for_upload() time.sleep(0.1) - self.assertEqual(athenad.upload_queue.qsize(), 0) + assert athenad.upload_queue.qsize() == 0 - def test_listUploadQueueEmpty(self): + def test_list_upload_queue_empty(self): items = dispatcher["listUploadQueue"]() - self.assertEqual(len(items), 0) + assert len(items) == 0 - @with_http_server @with_upload_handler - def test_listUploadQueueCurrent(self, host: str): + def test_list_upload_queue_current(self, host: str): fn = self._create_file('qlog.bz2') item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True) @@ -319,22 +314,22 @@ def test_listUploadQueueCurrent(self, host: str): self._wait_for_upload() items = dispatcher["listUploadQueue"]() - self.assertEqual(len(items), 1) - self.assertTrue(items[0]['current']) + assert len(items) == 1 + assert items[0]['current'] - def test_listUploadQueue(self): + def test_list_upload_queue(self): item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='id', allow_cellular=True) athenad.upload_queue.put_nowait(item) items = dispatcher["listUploadQueue"]() - self.assertEqual(len(items), 1) - self.assertDictEqual(items[0], asdict(item)) - self.assertFalse(items[0]['current']) + assert len(items) == 1 + assert items[0] == asdict(item) + assert not items[0]['current'] athenad.cancelled_uploads.add(item.id) items = dispatcher["listUploadQueue"]() - self.assertEqual(len(items), 0) + assert len(items) == 0 def test_upload_queue_persistence(self): item1 = athenad.UploadItem(path="_", url="_", headers={}, created_at=int(time.time()), id='id1') @@ -353,11 +348,10 @@ def test_upload_queue_persistence(self): athenad.upload_queue.queue.clear() athenad.UploadQueueCache.initialize(athenad.upload_queue) - self.assertEqual(athenad.upload_queue.qsize(), 1) - self.assertDictEqual(asdict(athenad.upload_queue.queue[-1]), asdict(item1)) + assert athenad.upload_queue.qsize() == 1 + assert asdict(athenad.upload_queue.queue[-1]) == asdict(item1) - @mock.patch('openpilot.selfdrive.athena.athenad.create_connection') - def test_startLocalProxy(self, mock_create_connection): + def test_start_local_proxy(self, mock_create_connection): end_event = threading.Event() ws_recv = queue.Queue() @@ -380,21 +374,21 @@ def test_startLocalProxy(self, mock_create_connection): ws_recv.put_nowait(WebSocketConnectionClosedException()) socket_thread.join() - def test_getSshAuthorizedKeys(self): + def test_get_ssh_authorized_keys(self): keys = dispatcher["getSshAuthorizedKeys"]() - self.assertEqual(keys, self.default_params["GithubSshKeys"].decode('utf-8')) + assert keys == self.default_params["GithubSshKeys"].decode('utf-8') - def test_getGithubUsername(self): + def test_get_github_username(self): keys = dispatcher["getGithubUsername"]() - self.assertEqual(keys, self.default_params["GithubUsername"].decode('utf-8')) + assert keys == self.default_params["GithubUsername"].decode('utf-8') - def test_getVersion(self): + def test_get_version(self): resp = dispatcher["getVersion"]() keys = ["version", "remote", "branch", "commit"] - self.assertEqual(list(resp.keys()), keys) + assert list(resp.keys()) == keys for k in keys: - self.assertIsInstance(resp[k], str, f"{k} is not a string") - self.assertTrue(len(resp[k]) > 0, f"{k} has no value") + assert isinstance(resp[k], str), f"{k} is not a string" + assert len(resp[k]) > 0, f"{k} has no value" def test_jsonrpc_handler(self): end_event = threading.Event() @@ -405,15 +399,15 @@ def test_jsonrpc_handler(self): # with params athenad.recv_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0})) resp = athenad.send_queue.get(timeout=3) - self.assertDictEqual(json.loads(resp), {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'}) + assert json.loads(resp) == {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'} # without params athenad.recv_queue.put_nowait(json.dumps({"method": "getNetworkType", "jsonrpc": "2.0", "id": 0})) resp = athenad.send_queue.get(timeout=3) - self.assertDictEqual(json.loads(resp), {'result': 1, 'id': 0, 'jsonrpc': '2.0'}) + assert json.loads(resp) == {'result': 1, 'id': 0, 'jsonrpc': '2.0'} # log forwarding athenad.recv_queue.put_nowait(json.dumps({'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'})) resp = athenad.log_recv_queue.get(timeout=3) - self.assertDictEqual(json.loads(resp), {'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'}) + assert json.loads(resp) == {'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'} finally: end_event.set() thread.join() @@ -427,8 +421,4 @@ def test_get_logs_to_send_sorted(self): # ensure the list is all logs except most recent sl = athenad.get_logs_to_send_sorted() - self.assertListEqual(sl, fl[:-1]) - - -if __name__ == '__main__': - unittest.main() + assert sl == fl[:-1] diff --git a/selfdrive/athena/tests/test_athenad_ping.py b/selfdrive/athena/tests/test_athenad_ping.py index f56fcac8b58453..44fa0b8481dd39 100755 --- a/selfdrive/athena/tests/test_athenad_ping.py +++ b/selfdrive/athena/tests/test_athenad_ping.py @@ -1,10 +1,9 @@ #!/usr/bin/env python3 +import pytest import subprocess import threading import time -import unittest from typing import cast -from unittest import mock from openpilot.common.params import Params from openpilot.common.timeout import Timeout @@ -22,7 +21,7 @@ def wifi_radio(on: bool) -> None: subprocess.run(["nmcli", "radio", "wifi", "on" if on else "off"], check=True) -class TestAthenadPing(unittest.TestCase): +class TestAthenadPing: params: Params dongle_id: str @@ -39,10 +38,10 @@ def _received_ping(self) -> bool: return self._get_ping_time() is not None @classmethod - def tearDownClass(cls) -> None: + def teardown_class(cls) -> None: wifi_radio(True) - def setUp(self) -> None: + def setup_method(self) -> None: self.params = Params() self.dongle_id = self.params.get("DongleId", encoding="utf-8") @@ -52,21 +51,23 @@ def setUp(self) -> None: self.exit_event = threading.Event() self.athenad = threading.Thread(target=athenad.main, args=(self.exit_event,)) - def tearDown(self) -> None: + def teardown_method(self) -> None: if self.athenad.is_alive(): self.exit_event.set() self.athenad.join() - @mock.patch('openpilot.selfdrive.athena.athenad.create_connection', new_callable=lambda: mock.MagicMock(wraps=athenad.create_connection)) - def assertTimeout(self, reconnect_time: float, mock_create_connection: mock.MagicMock) -> None: + def assertTimeout(self, reconnect_time: float, subtests, mocker) -> None: self.athenad.start() + mock_create_connection = mocker.patch('openpilot.selfdrive.athena.athenad.create_connection', + new_callable=lambda: mocker.MagicMock(wraps=athenad.create_connection)) + time.sleep(1) mock_create_connection.assert_called_once() mock_create_connection.reset_mock() # check normal behaviour, server pings on connection - with self.subTest("Wi-Fi: receives ping"), Timeout(70, "no ping received"): + with subtests.test("Wi-Fi: receives ping"), Timeout(70, "no ping received"): while not self._received_ping(): time.sleep(0.1) print("ping received") @@ -74,7 +75,7 @@ def assertTimeout(self, reconnect_time: float, mock_create_connection: mock.Magi mock_create_connection.assert_not_called() # websocket should attempt reconnect after short time - with self.subTest("LTE: attempt reconnect"): + with subtests.test("LTE: attempt reconnect"): wifi_radio(False) print("waiting for reconnect attempt") start_time = time.monotonic() @@ -86,21 +87,17 @@ def assertTimeout(self, reconnect_time: float, mock_create_connection: mock.Magi self._clear_ping_time() # check ping received after reconnect - with self.subTest("LTE: receives ping"), Timeout(70, "no ping received"): + with subtests.test("LTE: receives ping"), Timeout(70, "no ping received"): while not self._received_ping(): time.sleep(0.1) print("ping received") - @unittest.skipIf(not TICI, "only run on desk") - def test_offroad(self) -> None: + @pytest.mark.skipif(not TICI, reason="only run on desk") + def test_offroad(self, subtests, mocker) -> None: write_onroad_params(False, self.params) - self.assertTimeout(60 + TIMEOUT_TOLERANCE) # based using TCP keepalive settings + self.assertTimeout(60 + TIMEOUT_TOLERANCE, subtests, mocker) # based using TCP keepalive settings - @unittest.skipIf(not TICI, "only run on desk") - def test_onroad(self) -> None: + @pytest.mark.skipif(not TICI, reason="only run on desk") + def test_onroad(self, subtests, mocker) -> None: write_onroad_params(True, self.params) - self.assertTimeout(21 + TIMEOUT_TOLERANCE) - - -if __name__ == "__main__": - unittest.main() + self.assertTimeout(21 + TIMEOUT_TOLERANCE, subtests, mocker) diff --git a/selfdrive/athena/tests/test_registration.py b/selfdrive/athena/tests/test_registration.py index e7ad63a370ee1e..a808dd566878ba 100755 --- a/selfdrive/athena/tests/test_registration.py +++ b/selfdrive/athena/tests/test_registration.py @@ -1,9 +1,7 @@ #!/usr/bin/env python3 import json -import unittest from Crypto.PublicKey import RSA from pathlib import Path -from unittest import mock from openpilot.common.params import Params from openpilot.selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID @@ -11,9 +9,9 @@ from openpilot.system.hardware.hw import Paths -class TestRegistration(unittest.TestCase): +class TestRegistration: - def setUp(self): + def setup_method(self): # clear params and setup key paths self.params = Params() self.params.clear_all() @@ -32,50 +30,46 @@ def _generate_keys(self): with open(self.pub_key, "wb") as f: f.write(k.publickey().export_key()) - def test_valid_cache(self): + def test_valid_cache(self, mocker): # if all params are written, return the cached dongle id self.params.put("IMEI", "imei") self.params.put("HardwareSerial", "serial") self._generate_keys() - with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m: - dongle = "DONGLE_ID_123" - self.params.put("DongleId", dongle) - self.assertEqual(register(), dongle) - self.assertFalse(m.called) + m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) + dongle = "DONGLE_ID_123" + self.params.put("DongleId", dongle) + assert register() == dongle + assert not m.called - def test_no_keys(self): + def test_no_keys(self, mocker): # missing pubkey - with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m: - dongle = register() - self.assertEqual(m.call_count, 0) - self.assertEqual(dongle, UNREGISTERED_DONGLE_ID) - self.assertEqual(self.params.get("DongleId", encoding='utf-8'), dongle) + m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) + dongle = register() + assert m.call_count == 0 + assert dongle == UNREGISTERED_DONGLE_ID + assert self.params.get("DongleId", encoding='utf-8') == dongle - def test_missing_cache(self): + def test_missing_cache(self, mocker): # keys exist but no dongle id self._generate_keys() - with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m: - dongle = "DONGLE_ID_123" - m.return_value = MockResponse(json.dumps({'dongle_id': dongle}), 200) - self.assertEqual(register(), dongle) - self.assertEqual(m.call_count, 1) + m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) + dongle = "DONGLE_ID_123" + m.return_value = MockResponse(json.dumps({'dongle_id': dongle}), 200) + assert register() == dongle + assert m.call_count == 1 - # call again, shouldn't hit the API this time - self.assertEqual(register(), dongle) - self.assertEqual(m.call_count, 1) - self.assertEqual(self.params.get("DongleId", encoding='utf-8'), dongle) + # call again, shouldn't hit the API this time + assert register() == dongle + assert m.call_count == 1 + assert self.params.get("DongleId", encoding='utf-8') == dongle - def test_unregistered(self): + def test_unregistered(self, mocker): # keys exist, but unregistered self._generate_keys() - with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m: - m.return_value = MockResponse(None, 402) - dongle = register() - self.assertEqual(m.call_count, 1) - self.assertEqual(dongle, UNREGISTERED_DONGLE_ID) - self.assertEqual(self.params.get("DongleId", encoding='utf-8'), dongle) - - -if __name__ == "__main__": - unittest.main() + m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) + m.return_value = MockResponse(None, 402) + dongle = register() + assert m.call_count == 1 + assert dongle == UNREGISTERED_DONGLE_ID + assert self.params.get("DongleId", encoding='utf-8') == dongle diff --git a/selfdrive/boardd/tests/test_boardd_loopback.py b/selfdrive/boardd/tests/test_boardd_loopback.py index 3ab3a9c5b106ad..fa9eb957c21837 100755 --- a/selfdrive/boardd/tests/test_boardd_loopback.py +++ b/selfdrive/boardd/tests/test_boardd_loopback.py @@ -4,7 +4,6 @@ import random import time import pytest -import unittest from collections import defaultdict from pprint import pprint @@ -107,7 +106,3 @@ def test_loopback(self): pprint(sm['pandaStates']) # may drop messages due to RX buffer overflow for bus in sent_loopback.keys(): assert not len(sent_loopback[bus]), f"loop {i}: bus {bus} missing {len(sent_loopback[bus])} out of {sent_total[bus]} messages" - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/boardd/tests/test_pandad.py b/selfdrive/boardd/tests/test_pandad.py index 3434be3fe46f9b..65f3ad657c71aa 100755 --- a/selfdrive/boardd/tests/test_pandad.py +++ b/selfdrive/boardd/tests/test_pandad.py @@ -2,7 +2,6 @@ import os import pytest import time -import unittest import cereal.messaging as messaging from cereal import log @@ -16,16 +15,16 @@ @pytest.mark.tici -class TestPandad(unittest.TestCase): +class TestPandad: - def setUp(self): + def setup_method(self): # ensure panda is up if len(Panda.list()) == 0: self._run_test(60) self.spi = HARDWARE.get_device_type() != 'tici' - def tearDown(self): + def teardown_method(self): managed_processes['pandad'].stop() def _run_test(self, timeout=30) -> float: @@ -65,7 +64,7 @@ def _flash_bootstub_and_test(self, fn, expect_mismatch=False): assert Panda.wait_for_panda(None, 10) if expect_mismatch: - with self.assertRaises(PandaProtocolMismatch): + with pytest.raises(PandaProtocolMismatch): Panda() else: with Panda() as p: @@ -108,9 +107,10 @@ def test_best_case_startup_time(self): assert 0.1 < (sum(ts)/len(ts)) < (0.5 if self.spi else 5.0) print("startup times", ts, sum(ts) / len(ts)) + def test_protocol_version_check(self): if not self.spi: - raise unittest.SkipTest("SPI test") + pytest.skip("SPI test") # flash old fw fn = os.path.join(HERE, "bootstub.panda_h7_spiv0.bin") self._flash_bootstub_and_test(fn, expect_mismatch=True) @@ -127,7 +127,3 @@ def test_recover_from_bad_bootstub(self): self._assert_no_panda() self._run_test(60) - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/car/card.py b/selfdrive/car/card.py index 4dcd5ad0b4ebad..ed916a6fe1b896 100755 --- a/selfdrive/car/card.py +++ b/selfdrive/car/card.py @@ -14,13 +14,11 @@ from openpilot.selfdrive.boardd.boardd import can_list_to_can_capnp from openpilot.selfdrive.car.car_helpers import get_car, get_one_can from openpilot.selfdrive.car.interfaces import CarInterfaceBase -from openpilot.selfdrive.controls.lib.drive_helpers import VCruiseHelper from openpilot.selfdrive.controls.lib.events import Events REPLAY = "REPLAY" in os.environ EventName = car.CarEvent.EventName -ButtonType = car.CarState.ButtonEvent.Type class CarD: @@ -35,7 +33,6 @@ def __init__(self, CI=None): self.can_rcv_cum_timeout_counter = 0 # cumulative timeout count self.CC_prev = car.CarControl.new_message() - self.CS_prev = car.CarState.new_message() self.last_actuators = None @@ -79,8 +76,8 @@ def __init__(self, CI=None): self.params.put_nonblocking("CarParamsCache", cp_bytes) self.params.put_nonblocking("CarParamsPersistent", cp_bytes) + self.CS_prev = car.CarState.new_message() self.events = Events() - self.v_cruise_helper = VCruiseHelper(self.CP) def initialize(self): """Initialize CarInterface, once controls are ready""" @@ -121,11 +118,6 @@ def update_events(self, CS: car.CarState) -> car.CarState: self.events.add_from_msg(CS.events) - # Block resume if cruise never previously enabled - resume_pressed = any(be.type in (ButtonType.accelCruise, ButtonType.resumeCruise) for be in CS.buttonEvents) - if not self.CP.pcmCruise and not self.v_cruise_helper.v_cruise_initialized and resume_pressed: - self.events.add(EventName.resumeBlocked) - # Disable on rising edge of accelerator or brake. Also disable on brake when speed > 0 if (CS.gasPressed and not self.CS_prev.gasPressed and self.disengage_on_accelerator) or \ (CS.brakePressed and (not self.CS_prev.brakePressed or not CS.standstill)) or \ diff --git a/selfdrive/car/ford/tests/test_ford.py b/selfdrive/car/ford/tests/test_ford.py index 5d7b2c3332ba5a..72dd69980a224f 100755 --- a/selfdrive/car/ford/tests/test_ford.py +++ b/selfdrive/car/ford/tests/test_ford.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 import random -import unittest from collections.abc import Iterable import capnp @@ -43,31 +42,31 @@ } -class TestFordFW(unittest.TestCase): +class TestFordFW: def test_fw_query_config(self): for (ecu, addr, subaddr) in FW_QUERY_CONFIG.extra_ecus: - self.assertIn(ecu, ECU_ADDRESSES, "Unknown ECU") - self.assertEqual(addr, ECU_ADDRESSES[ecu], "ECU address mismatch") - self.assertIsNone(subaddr, "Unexpected ECU subaddress") + assert ecu in ECU_ADDRESSES, "Unknown ECU" + assert addr == ECU_ADDRESSES[ecu], "ECU address mismatch" + assert subaddr is None, "Unexpected ECU subaddress" @parameterized.expand(FW_VERSIONS.items()) def test_fw_versions(self, car_model: str, fw_versions: dict[tuple[capnp.lib.capnp._EnumModule, int, int | None], Iterable[bytes]]): for (ecu, addr, subaddr), fws in fw_versions.items(): - self.assertIn(ecu, ECU_PART_NUMBER, "Unexpected ECU") - self.assertEqual(addr, ECU_ADDRESSES[ecu], "ECU address mismatch") - self.assertIsNone(subaddr, "Unexpected ECU subaddress") + assert ecu in ECU_PART_NUMBER, "Unexpected ECU" + assert addr == ECU_ADDRESSES[ecu], "ECU address mismatch" + assert subaddr is None, "Unexpected ECU subaddress" for fw in fws: - self.assertEqual(len(fw), 24, "Expected ECU response to be 24 bytes") + assert len(fw) == 24, "Expected ECU response to be 24 bytes" match = FW_PATTERN.match(fw) - self.assertIsNotNone(match, f"Unable to parse FW: {fw!r}") + assert match is not None, f"Unable to parse FW: {fw!r}" if match: part_number = match.group("part_number") - self.assertIn(part_number, ECU_PART_NUMBER[ecu], f"Unexpected part number for {fw!r}") + assert part_number in ECU_PART_NUMBER[ecu], f"Unexpected part number for {fw!r}" codes = get_platform_codes([fw]) - self.assertEqual(1, len(codes), f"Unable to parse FW: {fw!r}") + assert 1 == len(codes), f"Unable to parse FW: {fw!r}" @settings(max_examples=100) @given(data=st.data()) @@ -85,7 +84,7 @@ def test_platform_codes_spot_check(self): b"PJ6T-14H102-ABJ\x00\x00\x00\x00\x00\x00\x00\x00\x00", b"LB5A-14C204-EAC\x00\x00\x00\x00\x00\x00\x00\x00\x00", ]) - self.assertEqual(results, {(b"X6A", b"J"), (b"Z6T", b"N"), (b"J6T", b"P"), (b"B5A", b"L")}) + assert results == {(b"X6A", b"J"), (b"Z6T", b"N"), (b"J6T", b"P"), (b"B5A", b"L")} def test_fuzzy_match(self): for platform, fw_by_addr in FW_VERSIONS.items(): @@ -100,7 +99,7 @@ def test_fuzzy_match(self): CP = car.CarParams.new_message(carFw=car_fw) matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), CP.carVin, FW_VERSIONS) - self.assertEqual(matches, {platform}) + assert matches == {platform} def test_match_fw_fuzzy(self): offline_fw = { @@ -132,18 +131,14 @@ def test_match_fw_fuzzy(self): (0x706, None): {b"LB5T-14F397-XX\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"}, } candidates = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fw, '', {expected_fingerprint: offline_fw}) - self.assertEqual(candidates, {expected_fingerprint}) + assert candidates == {expected_fingerprint} # model year hint in between the range should match live_fw[(0x706, None)] = {b"MB5T-14F397-XX\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"} candidates = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fw, '', {expected_fingerprint: offline_fw,}) - self.assertEqual(candidates, {expected_fingerprint}) + assert candidates == {expected_fingerprint} # unseen model year hint should not match live_fw[(0x760, None)] = {b"M1MC-2D053-XX\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"} candidates = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fw, '', {expected_fingerprint: offline_fw}) - self.assertEqual(len(candidates), 0, "Should not match new model year hint") - - -if __name__ == "__main__": - unittest.main() + assert len(candidates) == 0, "Should not match new model year hint" diff --git a/selfdrive/car/gm/tests/test_gm.py b/selfdrive/car/gm/tests/test_gm.py index 01ec8533b830c4..389d0636c91f91 100755 --- a/selfdrive/car/gm/tests/test_gm.py +++ b/selfdrive/car/gm/tests/test_gm.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 from parameterized import parameterized -import unittest from openpilot.selfdrive.car.gm.fingerprints import FINGERPRINTS from openpilot.selfdrive.car.gm.values import CAMERA_ACC_CAR, GM_RX_OFFSET @@ -8,19 +7,15 @@ CAMERA_DIAGNOSTIC_ADDRESS = 0x24b -class TestGMFingerprint(unittest.TestCase): +class TestGMFingerprint: @parameterized.expand(FINGERPRINTS.items()) def test_can_fingerprints(self, car_model, fingerprints): - self.assertGreater(len(fingerprints), 0) + assert len(fingerprints) > 0 - self.assertTrue(all(len(finger) for finger in fingerprints)) + assert all(len(finger) for finger in fingerprints) # The camera can sometimes be communicating on startup if car_model in CAMERA_ACC_CAR: for finger in fingerprints: for required_addr in (CAMERA_DIAGNOSTIC_ADDRESS, CAMERA_DIAGNOSTIC_ADDRESS + GM_RX_OFFSET): - self.assertEqual(finger.get(required_addr), 8, required_addr) - - -if __name__ == "__main__": - unittest.main() + assert finger.get(required_addr) == 8, required_addr diff --git a/selfdrive/car/honda/tests/test_honda.py b/selfdrive/car/honda/tests/test_honda.py index 60d91b84a832f8..54d177d2ed2880 100755 --- a/selfdrive/car/honda/tests/test_honda.py +++ b/selfdrive/car/honda/tests/test_honda.py @@ -1,20 +1,15 @@ #!/usr/bin/env python3 import re -import unittest from openpilot.selfdrive.car.honda.fingerprints import FW_VERSIONS HONDA_FW_VERSION_RE = br"[A-Z0-9]{5}-[A-Z0-9]{3}(-|,)[A-Z0-9]{4}(\x00){2}$" -class TestHondaFingerprint(unittest.TestCase): +class TestHondaFingerprint: def test_fw_version_format(self): # Asserts all FW versions follow an expected format for fw_by_ecu in FW_VERSIONS.values(): for fws in fw_by_ecu.values(): for fw in fws: - self.assertTrue(re.match(HONDA_FW_VERSION_RE, fw) is not None, fw) - - -if __name__ == "__main__": - unittest.main() + assert re.match(HONDA_FW_VERSION_RE, fw) is not None, fw diff --git a/selfdrive/car/hyundai/fingerprints.py b/selfdrive/car/hyundai/fingerprints.py index 0c43b8e340bda4..b6318eb8fa0b32 100644 --- a/selfdrive/car/hyundai/fingerprints.py +++ b/selfdrive/car/hyundai/fingerprints.py @@ -1003,6 +1003,7 @@ CAR.HYUNDAI_TUCSON_4TH_GEN: { (Ecu.fwdCamera, 0x7c4, None): [ b'\xf1\x00NX4 FR_CMR AT CAN LHD 1.00 1.01 99211-N9100 14A', + b'\xf1\x00NX4 FR_CMR AT CAN LHD 1.00 1.00 99211-N9260 14Y', b'\xf1\x00NX4 FR_CMR AT EUR LHD 1.00 1.00 99211-N9220 14K', b'\xf1\x00NX4 FR_CMR AT EUR LHD 1.00 2.02 99211-N9000 14E', b'\xf1\x00NX4 FR_CMR AT USA LHD 1.00 1.00 99211-N9210 14G', diff --git a/selfdrive/car/hyundai/tests/test_hyundai.py b/selfdrive/car/hyundai/tests/test_hyundai.py index 0753b372e147c3..db2110b0de4c1b 100755 --- a/selfdrive/car/hyundai/tests/test_hyundai.py +++ b/selfdrive/car/hyundai/tests/test_hyundai.py @@ -1,6 +1,7 @@ #!/usr/bin/env python3 from hypothesis import settings, given, strategies as st -import unittest + +import pytest from cereal import car from openpilot.selfdrive.car.fw_versions import build_fw_dict @@ -39,19 +40,19 @@ CANFD_EXPECTED_ECUS = {Ecu.fwdCamera, Ecu.fwdRadar} -class TestHyundaiFingerprint(unittest.TestCase): +class TestHyundaiFingerprint: def test_can_features(self): # Test no EV/HEV in any gear lists (should all use ELECT_GEAR) - self.assertEqual(set.union(*CAN_GEARS.values()) & (HYBRID_CAR | EV_CAR), set()) + assert set.union(*CAN_GEARS.values()) & (HYBRID_CAR | EV_CAR) == set() # Test CAN FD car not in CAN feature lists can_specific_feature_list = set.union(*CAN_GEARS.values(), *CHECKSUM.values(), LEGACY_SAFETY_MODE_CAR, UNSUPPORTED_LONGITUDINAL_CAR, CAMERA_SCC_CAR) for car_model in CANFD_CAR: - self.assertNotIn(car_model, can_specific_feature_list, "CAN FD car unexpectedly found in a CAN feature list") + assert car_model not in can_specific_feature_list, "CAN FD car unexpectedly found in a CAN feature list" def test_hybrid_ev_sets(self): - self.assertEqual(HYBRID_CAR & EV_CAR, set(), "Shared cars between hybrid and EV") - self.assertEqual(CANFD_CAR & HYBRID_CAR, set(), "Hard coding CAN FD cars as hybrid is no longer supported") + assert HYBRID_CAR & EV_CAR == set(), "Shared cars between hybrid and EV" + assert CANFD_CAR & HYBRID_CAR == set(), "Hard coding CAN FD cars as hybrid is no longer supported" def test_canfd_ecu_whitelist(self): # Asserts only expected Ecus can exist in database for CAN-FD cars @@ -59,34 +60,34 @@ def test_canfd_ecu_whitelist(self): ecus = {fw[0] for fw in FW_VERSIONS[car_model].keys()} ecus_not_in_whitelist = ecus - CANFD_EXPECTED_ECUS ecu_strings = ", ".join([f"Ecu.{ECU_NAME[ecu]}" for ecu in ecus_not_in_whitelist]) - self.assertEqual(len(ecus_not_in_whitelist), 0, - f"{car_model}: Car model has unexpected ECUs: {ecu_strings}") + assert len(ecus_not_in_whitelist) == 0, \ + f"{car_model}: Car model has unexpected ECUs: {ecu_strings}" - def test_blacklisted_parts(self): + def test_blacklisted_parts(self, subtests): # Asserts no ECUs known to be shared across platforms exist in the database. # Tucson having Santa Cruz camera and EPS for example for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): if car_model == CAR.HYUNDAI_SANTA_CRUZ_1ST_GEN: - raise unittest.SkipTest("Skip checking Santa Cruz for its parts") + pytest.skip("Skip checking Santa Cruz for its parts") for code, _ in get_platform_codes(ecus[(Ecu.fwdCamera, 0x7c4, None)]): if b"-" not in code: continue part = code.split(b"-")[1] - self.assertFalse(part.startswith(b'CW'), "Car has bad part number") + assert not part.startswith(b'CW'), "Car has bad part number" - def test_correct_ecu_response_database(self): + def test_correct_ecu_response_database(self, subtests): """ Assert standard responses for certain ECUs, since they can respond to multiple queries with different data """ expected_fw_prefix = HYUNDAI_VERSION_REQUEST_LONG[1:] for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for ecu, fws in ecus.items(): - self.assertTrue(all(fw.startswith(expected_fw_prefix) for fw in fws), - f"FW from unexpected request in database: {(ecu, fws)}") + assert all(fw.startswith(expected_fw_prefix) for fw in fws), \ + f"FW from unexpected request in database: {(ecu, fws)}" @settings(max_examples=100) @given(data=st.data()) @@ -96,10 +97,10 @@ def test_platform_codes_fuzzy_fw(self, data): fws = data.draw(fw_strategy) get_platform_codes(fws) - def test_expected_platform_codes(self): + def test_expected_platform_codes(self, subtests): # Ensures we don't accidentally add multiple platform codes for a car unless it is intentional for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for ecu, fws in ecus.items(): if ecu[0] not in PLATFORM_CODE_ECUS: continue @@ -107,37 +108,37 @@ def test_expected_platform_codes(self): # Third and fourth character are usually EV/hybrid identifiers codes = {code.split(b"-")[0][:2] for code, _ in get_platform_codes(fws)} if car_model == CAR.HYUNDAI_PALISADE: - self.assertEqual(codes, {b"LX", b"ON"}, f"Car has unexpected platform codes: {car_model} {codes}") + assert codes == {b"LX", b"ON"}, f"Car has unexpected platform codes: {car_model} {codes}" elif car_model == CAR.HYUNDAI_KONA_EV and ecu[0] == Ecu.fwdCamera: - self.assertEqual(codes, {b"OE", b"OS"}, f"Car has unexpected platform codes: {car_model} {codes}") + assert codes == {b"OE", b"OS"}, f"Car has unexpected platform codes: {car_model} {codes}" else: - self.assertEqual(len(codes), 1, f"Car has multiple platform codes: {car_model} {codes}") + assert len(codes) == 1, f"Car has multiple platform codes: {car_model} {codes}" # Tests for platform codes, part numbers, and FW dates which Hyundai will use to fuzzy # fingerprint in the absence of full FW matches: - def test_platform_code_ecus_available(self): + def test_platform_code_ecus_available(self, subtests): # TODO: add queries for these non-CAN FD cars to get EPS no_eps_platforms = CANFD_CAR | {CAR.KIA_SORENTO, CAR.KIA_OPTIMA_G4, CAR.KIA_OPTIMA_G4_FL, CAR.KIA_OPTIMA_H, CAR.KIA_OPTIMA_H_G4_FL, CAR.HYUNDAI_SONATA_LF, CAR.HYUNDAI_TUCSON, CAR.GENESIS_G90, CAR.GENESIS_G80, CAR.HYUNDAI_ELANTRA} # Asserts ECU keys essential for fuzzy fingerprinting are available on all platforms for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for platform_code_ecu in PLATFORM_CODE_ECUS: if platform_code_ecu in (Ecu.fwdRadar, Ecu.eps) and car_model == CAR.HYUNDAI_GENESIS: continue if platform_code_ecu == Ecu.eps and car_model in no_eps_platforms: continue - self.assertIn(platform_code_ecu, [e[0] for e in ecus]) + assert platform_code_ecu in [e[0] for e in ecus] - def test_fw_format(self): + def test_fw_format(self, subtests): # Asserts: # - every supported ECU FW version returns one platform code # - every supported ECU FW version has a part number # - expected parsing of ECU FW dates for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for ecu, fws in ecus.items(): if ecu[0] not in PLATFORM_CODE_ECUS: continue @@ -145,40 +146,40 @@ def test_fw_format(self): codes = set() for fw in fws: result = get_platform_codes([fw]) - self.assertEqual(1, len(result), f"Unable to parse FW: {fw}") + assert 1 == len(result), f"Unable to parse FW: {fw}" codes |= result if ecu[0] not in DATE_FW_ECUS or car_model in NO_DATES_PLATFORMS: - self.assertTrue(all(date is None for _, date in codes)) + assert all(date is None for _, date in codes) else: - self.assertTrue(all(date is not None for _, date in codes)) + assert all(date is not None for _, date in codes) if car_model == CAR.HYUNDAI_GENESIS: - raise unittest.SkipTest("No part numbers for car model") + pytest.skip("No part numbers for car model") # Hyundai places the ECU part number in their FW versions, assert all parsable # Some examples of valid formats: b"56310-L0010", b"56310L0010", b"56310/M6300" - self.assertTrue(all(b"-" in code for code, _ in codes), - f"FW does not have part number: {fw}") + assert all(b"-" in code for code, _ in codes), \ + f"FW does not have part number: {fw}" def test_platform_codes_spot_check(self): # Asserts basic platform code parsing behavior for a few cases results = get_platform_codes([b"\xf1\x00DH LKAS 1.1 -150210"]) - self.assertEqual(results, {(b"DH", b"150210")}) + assert results == {(b"DH", b"150210")} # Some cameras and all radars do not have dates results = get_platform_codes([b"\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 "]) - self.assertEqual(results, {(b"AEhe-G2000", None)}) + assert results == {(b"AEhe-G2000", None)} results = get_platform_codes([b"\xf1\x00CV1_ RDR ----- 1.00 1.01 99110-CV000 "]) - self.assertEqual(results, {(b"CV1-CV000", None)}) + assert results == {(b"CV1-CV000", None)} results = get_platform_codes([ b"\xf1\x00DH LKAS 1.1 -150210", b"\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 ", b"\xf1\x00CV1_ RDR ----- 1.00 1.01 99110-CV000 ", ]) - self.assertEqual(results, {(b"DH", b"150210"), (b"AEhe-G2000", None), (b"CV1-CV000", None)}) + assert results == {(b"DH", b"150210"), (b"AEhe-G2000", None), (b"CV1-CV000", None)} results = get_platform_codes([ b"\xf1\x00LX2 MFC AT USA LHD 1.00 1.07 99211-S8100 220222", @@ -186,8 +187,8 @@ def test_platform_codes_spot_check(self): b"\xf1\x00ON MFC AT USA LHD 1.00 1.01 99211-S9100 190405", b"\xf1\x00ON MFC AT USA LHD 1.00 1.03 99211-S9100 190720", ]) - self.assertEqual(results, {(b"LX2-S8100", b"220222"), (b"LX2-S8100", b"211103"), - (b"ON-S9100", b"190405"), (b"ON-S9100", b"190720")}) + assert results == {(b"LX2-S8100", b"220222"), (b"LX2-S8100", b"211103"), + (b"ON-S9100", b"190405"), (b"ON-S9100", b"190720")} def test_fuzzy_excluded_platforms(self): # Asserts a list of platforms that will not fuzzy fingerprint with platform codes due to them being shared. @@ -211,12 +212,8 @@ def test_fuzzy_excluded_platforms(self): CP = car.CarParams.new_message(carFw=car_fw) matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), CP.carVin, FW_VERSIONS) if len(matches) == 1: - self.assertEqual(list(matches)[0], platform) + assert list(matches)[0] == platform else: platforms_with_shared_codes.add(platform) - self.assertEqual(platforms_with_shared_codes, excluded_platforms) - - -if __name__ == "__main__": - unittest.main() + assert platforms_with_shared_codes == excluded_platforms diff --git a/selfdrive/car/hyundai/values.py b/selfdrive/car/hyundai/values.py index db905597046d40..1e752185e6049a 100644 --- a/selfdrive/car/hyundai/values.py +++ b/selfdrive/car/hyundai/values.py @@ -323,6 +323,7 @@ class CAR(Platforms): HyundaiCarDocs("Hyundai Tucson 2022", car_parts=CarParts.common([CarHarness.hyundai_n])), HyundaiCarDocs("Hyundai Tucson 2023-24", "All", car_parts=CarParts.common([CarHarness.hyundai_n])), HyundaiCarDocs("Hyundai Tucson Hybrid 2022-24", "All", car_parts=CarParts.common([CarHarness.hyundai_n])), + HyundaiCarDocs("Hyundai Tucson Plug-in Hybrid 2024", "All", car_parts=CarParts.common([CarHarness.hyundai_n])), ], CarSpecs(mass=1630, wheelbase=2.756, steerRatio=13.7, tireStiffnessFactor=0.385), ) diff --git a/selfdrive/car/subaru/tests/test_subaru.py b/selfdrive/car/subaru/tests/test_subaru.py index c8cdf66065c2c2..33040442b69778 100644 --- a/selfdrive/car/subaru/tests/test_subaru.py +++ b/selfdrive/car/subaru/tests/test_subaru.py @@ -1,5 +1,4 @@ from cereal import car -import unittest from openpilot.selfdrive.car.subaru.fingerprints import FW_VERSIONS Ecu = car.CarParams.Ecu @@ -7,14 +6,11 @@ ECU_NAME = {v: k for k, v in Ecu.schema.enumerants.items()} -class TestSubaruFingerprint(unittest.TestCase): +class TestSubaruFingerprint: def test_fw_version_format(self): for platform, fws_per_ecu in FW_VERSIONS.items(): for (ecu, _, _), fws in fws_per_ecu.items(): fw_size = len(fws[0]) for fw in fws: - self.assertEqual(len(fw), fw_size, f"{platform} {ecu}: {len(fw)} {fw_size}") + assert len(fw) == fw_size, f"{platform} {ecu}: {len(fw)} {fw_size}" - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/car/tests/test_can_fingerprint.py b/selfdrive/car/tests/test_can_fingerprint.py index 8df70073394fb1..bb585d567f326a 100755 --- a/selfdrive/car/tests/test_can_fingerprint.py +++ b/selfdrive/car/tests/test_can_fingerprint.py @@ -1,13 +1,12 @@ #!/usr/bin/env python3 from parameterized import parameterized -import unittest from cereal import log, messaging from openpilot.selfdrive.car.car_helpers import FRAME_FINGERPRINT, can_fingerprint from openpilot.selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS -class TestCanFingerprint(unittest.TestCase): +class TestCanFingerprint: @parameterized.expand(list(FINGERPRINTS.items())) def test_can_fingerprint(self, car_model, fingerprints): """Tests online fingerprinting function on offline fingerprints""" @@ -21,12 +20,12 @@ def test_can_fingerprint(self, car_model, fingerprints): empty_can = messaging.new_message('can', 0) car_fingerprint, finger = can_fingerprint(lambda: next(fingerprint_iter, empty_can)) # noqa: B023 - self.assertEqual(car_fingerprint, car_model) - self.assertEqual(finger[0], fingerprint) - self.assertEqual(finger[1], fingerprint) - self.assertEqual(finger[2], {}) + assert car_fingerprint == car_model + assert finger[0] == fingerprint + assert finger[1] == fingerprint + assert finger[2] == {} - def test_timing(self): + def test_timing(self, subtests): # just pick any CAN fingerprinting car car_model = "CHEVROLET_BOLT_EUV" fingerprint = FINGERPRINTS[car_model][0] @@ -50,7 +49,7 @@ def test_timing(self): cases.append((FRAME_FINGERPRINT * 2, None, can)) for expected_frames, car_model, can in cases: - with self.subTest(expected_frames=expected_frames, car_model=car_model): + with subtests.test(expected_frames=expected_frames, car_model=car_model): frames = 0 def test(): @@ -59,9 +58,5 @@ def test(): return can # noqa: B023 car_fingerprint, _ = can_fingerprint(test) - self.assertEqual(car_fingerprint, car_model) - self.assertEqual(frames, expected_frames + 2) # TODO: fix extra frames - - -if __name__ == "__main__": - unittest.main() + assert car_fingerprint == car_model + assert frames == expected_frames + 2# TODO: fix extra frames diff --git a/selfdrive/car/tests/test_car_interfaces.py b/selfdrive/car/tests/test_car_interfaces.py index 72c1e27ab971b2..4bbecd99feb4b8 100755 --- a/selfdrive/car/tests/test_car_interfaces.py +++ b/selfdrive/car/tests/test_car_interfaces.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import os import math -import unittest import hypothesis.strategies as st from hypothesis import Phase, given, settings import importlib @@ -45,7 +44,7 @@ def get_fuzzy_car_interface_args(draw: DrawType) -> dict: return params -class TestCarInterfaces(unittest.TestCase): +class TestCarInterfaces: # FIXME: Due to the lists used in carParams, Phase.target is very slow and will cause # many generated examples to overrun when max_examples > ~20, don't use it @parameterized.expand([(car,) for car in sorted(all_known_cars())]) @@ -63,28 +62,28 @@ def test_car_interfaces(self, car_name, data): assert car_params assert car_interface - self.assertGreater(car_params.mass, 1) - self.assertGreater(car_params.wheelbase, 0) + assert car_params.mass > 1 + assert car_params.wheelbase > 0 # centerToFront is center of gravity to front wheels, assert a reasonable range - self.assertTrue(car_params.wheelbase * 0.3 < car_params.centerToFront < car_params.wheelbase * 0.7) - self.assertGreater(car_params.maxLateralAccel, 0) + assert car_params.wheelbase * 0.3 < car_params.centerToFront < car_params.wheelbase * 0.7 + assert car_params.maxLateralAccel > 0 # Longitudinal sanity checks - self.assertEqual(len(car_params.longitudinalTuning.kpV), len(car_params.longitudinalTuning.kpBP)) - self.assertEqual(len(car_params.longitudinalTuning.kiV), len(car_params.longitudinalTuning.kiBP)) - self.assertEqual(len(car_params.longitudinalTuning.deadzoneV), len(car_params.longitudinalTuning.deadzoneBP)) + assert len(car_params.longitudinalTuning.kpV) == len(car_params.longitudinalTuning.kpBP) + assert len(car_params.longitudinalTuning.kiV) == len(car_params.longitudinalTuning.kiBP) + assert len(car_params.longitudinalTuning.deadzoneV) == len(car_params.longitudinalTuning.deadzoneBP) # Lateral sanity checks if car_params.steerControlType != car.CarParams.SteerControlType.angle: tune = car_params.lateralTuning if tune.which() == 'pid': - self.assertTrue(not math.isnan(tune.pid.kf) and tune.pid.kf > 0) - self.assertTrue(len(tune.pid.kpV) > 0 and len(tune.pid.kpV) == len(tune.pid.kpBP)) - self.assertTrue(len(tune.pid.kiV) > 0 and len(tune.pid.kiV) == len(tune.pid.kiBP)) + assert not math.isnan(tune.pid.kf) and tune.pid.kf > 0 + assert len(tune.pid.kpV) > 0 and len(tune.pid.kpV) == len(tune.pid.kpBP) + assert len(tune.pid.kiV) > 0 and len(tune.pid.kiV) == len(tune.pid.kiBP) elif tune.which() == 'torque': - self.assertTrue(not math.isnan(tune.torque.kf) and tune.torque.kf > 0) - self.assertTrue(not math.isnan(tune.torque.friction) and tune.torque.friction > 0) + assert not math.isnan(tune.torque.kf) and tune.torque.kf > 0 + assert not math.isnan(tune.torque.friction) and tune.torque.friction > 0 cc_msg = FuzzyGenerator.get_random_msg(data.draw, car.CarControl, real_floats=True) # Run car interface @@ -93,7 +92,6 @@ def test_car_interfaces(self, car_name, data): for _ in range(10): car_interface.update(CC, []) car_interface.apply(CC, now_nanos) - car_interface.apply(CC, now_nanos) now_nanos += DT_CTRL * 1e9 # 10 ms CC = car.CarControl.new_message(**cc_msg) @@ -101,7 +99,6 @@ def test_car_interfaces(self, car_name, data): for _ in range(10): car_interface.update(CC, []) car_interface.apply(CC, now_nanos) - car_interface.apply(CC, now_nanos) now_nanos += DT_CTRL * 1e9 # 10ms # Test controller initialization @@ -130,33 +127,29 @@ def test_car_interfaces(self, car_name, data): if not car_params.radarUnavailable and radar_interface.rcp is not None: cans = [messaging.new_message('can', 1).to_bytes() for _ in range(5)] rr = radar_interface.update(cans) - self.assertTrue(rr is None or len(rr.errors) > 0) + assert rr is None or len(rr.errors) > 0 def test_interface_attrs(self): """Asserts basic behavior of interface attribute getter""" num_brands = len(get_interface_attr('CAR')) - self.assertGreaterEqual(num_brands, 13) + assert num_brands >= 13 # Should return value for all brands when not combining, even if attribute doesn't exist ret = get_interface_attr('FAKE_ATTR') - self.assertEqual(len(ret), num_brands) + assert len(ret) == num_brands # Make sure we can combine dicts ret = get_interface_attr('DBC', combine_brands=True) - self.assertGreaterEqual(len(ret), 160) + assert len(ret) >= 160 # We don't support combining non-dicts ret = get_interface_attr('CAR', combine_brands=True) - self.assertEqual(len(ret), 0) + assert len(ret) == 0 # If brand has None value, it shouldn't return when ignore_none=True is specified none_brands = {b for b, v in get_interface_attr('FINGERPRINTS').items() if v is None} - self.assertGreaterEqual(len(none_brands), 1) + assert len(none_brands) >= 1 ret = get_interface_attr('FINGERPRINTS', ignore_none=True) none_brands_in_ret = none_brands.intersection(ret) - self.assertEqual(len(none_brands_in_ret), 0, f'Brands with None values in ignore_none=True result: {none_brands_in_ret}') - - -if __name__ == "__main__": - unittest.main() + assert len(none_brands_in_ret) == 0, f'Brands with None values in ignore_none=True result: {none_brands_in_ret}' diff --git a/selfdrive/car/tests/test_docs.py b/selfdrive/car/tests/test_docs.py index 143b402d5f264c..0ed95e18f25985 100755 --- a/selfdrive/car/tests/test_docs.py +++ b/selfdrive/car/tests/test_docs.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 from collections import defaultdict import os +import pytest import re -import unittest from openpilot.common.basedir import BASEDIR from openpilot.selfdrive.car.car_helpers import interfaces @@ -14,9 +14,9 @@ from openpilot.selfdrive.debug.print_docs_diff import print_car_docs_diff -class TestCarDocs(unittest.TestCase): +class TestCarDocs: @classmethod - def setUpClass(cls): + def setup_class(cls): cls.all_cars = get_all_car_docs() def test_generator(self): @@ -24,8 +24,7 @@ def test_generator(self): with open(CARS_MD_OUT) as f: current_cars_md = f.read() - self.assertEqual(generated_cars_md, current_cars_md, - "Run selfdrive/car/docs.py to update the compatibility documentation") + assert generated_cars_md == current_cars_md, "Run selfdrive/car/docs.py to update the compatibility documentation" def test_docs_diff(self): dump_path = os.path.join(BASEDIR, "selfdrive", "car", "tests", "cars_dump") @@ -33,65 +32,61 @@ def test_docs_diff(self): print_car_docs_diff(dump_path) os.remove(dump_path) - def test_duplicate_years(self): + def test_duplicate_years(self, subtests): make_model_years = defaultdict(list) for car in self.all_cars: - with self.subTest(car_docs_name=car.name): + with subtests.test(car_docs_name=car.name): make_model = (car.make, car.model) for year in car.year_list: - self.assertNotIn(year, make_model_years[make_model], f"{car.name}: Duplicate model year") + assert year not in make_model_years[make_model], f"{car.name}: Duplicate model year" make_model_years[make_model].append(year) - def test_missing_car_docs(self): + def test_missing_car_docs(self, subtests): all_car_docs_platforms = [name for name, config in PLATFORMS.items()] for platform in sorted(interfaces.keys()): - with self.subTest(platform=platform): - self.assertTrue(platform in all_car_docs_platforms, f"Platform: {platform} doesn't have a CarDocs entry") + with subtests.test(platform=platform): + assert platform in all_car_docs_platforms, f"Platform: {platform} doesn't have a CarDocs entry" - def test_naming_conventions(self): + def test_naming_conventions(self, subtests): # Asserts market-standard car naming conventions by brand for car in self.all_cars: - with self.subTest(car=car): + with subtests.test(car=car.name): tokens = car.model.lower().split(" ") if car.car_name == "hyundai": - self.assertNotIn("phev", tokens, "Use `Plug-in Hybrid`") - self.assertNotIn("hev", tokens, "Use `Hybrid`") + assert "phev" not in tokens, "Use `Plug-in Hybrid`" + assert "hev" not in tokens, "Use `Hybrid`" if "plug-in hybrid" in car.model.lower(): - self.assertIn("Plug-in Hybrid", car.model, "Use correct capitalization") + assert "Plug-in Hybrid" in car.model, "Use correct capitalization" if car.make != "Kia": - self.assertNotIn("ev", tokens, "Use `Electric`") + assert "ev" not in tokens, "Use `Electric`" elif car.car_name == "toyota": if "rav4" in tokens: - self.assertIn("RAV4", car.model, "Use correct capitalization") + assert "RAV4" in car.model, "Use correct capitalization" - def test_torque_star(self): + def test_torque_star(self, subtests): # Asserts brand-specific assumptions around steering torque star for car in self.all_cars: - with self.subTest(car=car): + with subtests.test(car=car.name): # honda sanity check, it's the definition of a no torque star if car.car_fingerprint in (HONDA.HONDA_ACCORD, HONDA.HONDA_CIVIC, HONDA.HONDA_CRV, HONDA.HONDA_ODYSSEY, HONDA.HONDA_PILOT): - self.assertEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has full torque star") + assert car.row[Column.STEERING_TORQUE] == Star.EMPTY, f"{car.name} has full torque star" elif car.car_name in ("toyota", "hyundai"): - self.assertNotEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has no torque star") + assert car.row[Column.STEERING_TORQUE] != Star.EMPTY, f"{car.name} has no torque star" - def test_year_format(self): + def test_year_format(self, subtests): for car in self.all_cars: - with self.subTest(car=car): - self.assertIsNone(re.search(r"\d{4}-\d{4}", car.name), f"Format years correctly: {car.name}") + with subtests.test(car=car.name): + assert re.search(r"\d{4}-\d{4}", car.name) is None, f"Format years correctly: {car.name}" - def test_harnesses(self): + def test_harnesses(self, subtests): for car in self.all_cars: - with self.subTest(car=car): + with subtests.test(car=car.name): if car.name == "comma body": - raise unittest.SkipTest + pytest.skip() car_part_type = [p.part_type for p in car.car_parts.all_parts()] car_parts = list(car.car_parts.all_parts()) - self.assertTrue(len(car_parts) > 0, f"Need to specify car parts: {car.name}") - self.assertTrue(car_part_type.count(PartType.connector) == 1, f"Need to specify one harness connector: {car.name}") - self.assertTrue(car_part_type.count(PartType.mount) == 1, f"Need to specify one mount: {car.name}") - self.assertTrue(Cable.right_angle_obd_c_cable_1_5ft in car_parts, f"Need to specify a right angle OBD-C cable (1.5ft): {car.name}") - - -if __name__ == "__main__": - unittest.main() + assert len(car_parts) > 0, f"Need to specify car parts: {car.name}" + assert car_part_type.count(PartType.connector) == 1, f"Need to specify one harness connector: {car.name}" + assert car_part_type.count(PartType.mount) == 1, f"Need to specify one mount: {car.name}" + assert Cable.right_angle_obd_c_cable_1_5ft in car_parts, f"Need to specify a right angle OBD-C cable (1.5ft): {car.name}" diff --git a/selfdrive/car/tests/test_fw_fingerprint.py b/selfdrive/car/tests/test_fw_fingerprint.py index ed5edbef313633..230e6f10e1c27f 100755 --- a/selfdrive/car/tests/test_fw_fingerprint.py +++ b/selfdrive/car/tests/test_fw_fingerprint.py @@ -1,10 +1,9 @@ #!/usr/bin/env python3 +import pytest import random import time -import unittest from collections import defaultdict from parameterized import parameterized -from unittest import mock from cereal import car from openpilot.selfdrive.car.car_helpers import interfaces @@ -27,11 +26,11 @@ def send(self, msg): pass -class TestFwFingerprint(unittest.TestCase): +class TestFwFingerprint: def assertFingerprints(self, candidates, expected): candidates = list(candidates) - self.assertEqual(len(candidates), 1, f"got more than one candidate: {candidates}") - self.assertEqual(candidates[0], expected) + assert len(candidates) == 1, f"got more than one candidate: {candidates}" + assert candidates[0] == expected @parameterized.expand([(b, c, e[c], n) for b, e in VERSIONS.items() for c in e for n in (True, False)]) def test_exact_match(self, brand, car_model, ecus, test_non_essential): @@ -62,7 +61,7 @@ def test_custom_fuzzy_match(self, brand, car_model, ecus): # Assert brand-specific fuzzy fingerprinting function doesn't disagree with standard fuzzy function config = FW_QUERY_CONFIGS[brand] if config.match_fw_to_car_fuzzy is None: - raise unittest.SkipTest("Brand does not implement custom fuzzy fingerprinting function") + pytest.skip("Brand does not implement custom fuzzy fingerprinting function") CP = car.CarParams.new_message() for _ in range(5): @@ -77,14 +76,14 @@ def test_custom_fuzzy_match(self, brand, car_model, ecus): # If both have matches, they must agree if len(matches) == 1 and len(brand_matches) == 1: - self.assertEqual(matches, brand_matches) + assert matches == brand_matches @parameterized.expand([(b, c, e[c]) for b, e in VERSIONS.items() for c in e]) def test_fuzzy_match_ecu_count(self, brand, car_model, ecus): # Asserts that fuzzy matching does not count matching FW, but ECU address keys valid_ecus = [e for e in ecus if e[0] not in FUZZY_EXCLUDE_ECUS] if not len(valid_ecus): - raise unittest.SkipTest("Car model has no compatible ECUs for fuzzy matching") + pytest.skip("Car model has no compatible ECUs for fuzzy matching") fw = [] for ecu in valid_ecus: @@ -99,19 +98,19 @@ def test_fuzzy_match_ecu_count(self, brand, car_model, ecus): # Assert no match if there are not enough unique ECUs unique_ecus = {(f['address'], f['subAddress']) for f in fw} if len(unique_ecus) < 2: - self.assertEqual(len(matches), 0, car_model) + assert len(matches) == 0, car_model # There won't always be a match due to shared FW, but if there is it should be correct elif len(matches): self.assertFingerprints(matches, car_model) - def test_fw_version_lists(self): + def test_fw_version_lists(self, subtests): for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for ecu, ecu_fw in ecus.items(): - with self.subTest(ecu): + with subtests.test(ecu): duplicates = {fw for fw in ecu_fw if ecu_fw.count(fw) > 1} - self.assertFalse(len(duplicates), f'{car_model}: Duplicate FW versions: Ecu.{ECU_NAME[ecu[0]]}, {duplicates}') - self.assertGreater(len(ecu_fw), 0, f'{car_model}: No FW versions: Ecu.{ECU_NAME[ecu[0]]}') + assert not len(duplicates), f'{car_model}: Duplicate FW versions: Ecu.{ECU_NAME[ecu[0]]}, {duplicates}' + assert len(ecu_fw) > 0, f'{car_model}: No FW versions: Ecu.{ECU_NAME[ecu[0]]}' def test_all_addrs_map_to_one_ecu(self): for brand, cars in VERSIONS.items(): @@ -121,59 +120,59 @@ def test_all_addrs_map_to_one_ecu(self): addr_to_ecu[(addr, sub_addr)].add(ecu_type) ecus_for_addr = addr_to_ecu[(addr, sub_addr)] ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_for_addr]) - self.assertLessEqual(len(ecus_for_addr), 1, f"{brand} has multiple ECUs that map to one address: {ecu_strings} -> ({hex(addr)}, {sub_addr})") + assert len(ecus_for_addr) <= 1, f"{brand} has multiple ECUs that map to one address: {ecu_strings} -> ({hex(addr)}, {sub_addr})" - def test_data_collection_ecus(self): + def test_data_collection_ecus(self, subtests): # Asserts no extra ECUs are in the fingerprinting database for brand, config in FW_QUERY_CONFIGS.items(): for car_model, ecus in VERSIONS[brand].items(): bad_ecus = set(ecus).intersection(config.extra_ecus) - with self.subTest(car_model=car_model.value): - self.assertFalse(len(bad_ecus), f'{car_model}: Fingerprints contain ECUs added for data collection: {bad_ecus}') + with subtests.test(car_model=car_model.value): + assert not len(bad_ecus), f'{car_model}: Fingerprints contain ECUs added for data collection: {bad_ecus}' - def test_blacklisted_ecus(self): + def test_blacklisted_ecus(self, subtests): blacklisted_addrs = (0x7c4, 0x7d0) # includes A/C ecu and an unknown ecu for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): CP = interfaces[car_model][0].get_non_essential_params(car_model) if CP.carName == 'subaru': for ecu in ecus.keys(): - self.assertNotIn(ecu[1], blacklisted_addrs, f'{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})') + assert ecu[1] not in blacklisted_addrs, f'{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})' elif CP.carName == "chrysler": # Some HD trucks have a combined TCM and ECM if CP.carFingerprint.startswith("RAM HD"): for ecu in ecus.keys(): - self.assertNotEqual(ecu[0], Ecu.transmission, f"{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})") + assert ecu[0] != Ecu.transmission, f"{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})" - def test_non_essential_ecus(self): + def test_non_essential_ecus(self, subtests): for brand, config in FW_QUERY_CONFIGS.items(): - with self.subTest(brand): + with subtests.test(brand): # These ECUs are already not in ESSENTIAL_ECUS which the fingerprint functions give a pass if missing unnecessary_non_essential_ecus = set(config.non_essential_ecus) - set(ESSENTIAL_ECUS) - self.assertEqual(unnecessary_non_essential_ecus, set(), "Declaring non-essential ECUs non-essential is not required: " + - f"{', '.join([f'Ecu.{ECU_NAME[ecu]}' for ecu in unnecessary_non_essential_ecus])}") + assert unnecessary_non_essential_ecus == set(), "Declaring non-essential ECUs non-essential is not required: " + \ + f"{', '.join([f'Ecu.{ECU_NAME[ecu]}' for ecu in unnecessary_non_essential_ecus])}" - def test_missing_versions_and_configs(self): + def test_missing_versions_and_configs(self, subtests): brand_versions = set(VERSIONS.keys()) brand_configs = set(FW_QUERY_CONFIGS.keys()) if len(brand_configs - brand_versions): - with self.subTest(): - self.fail(f"Brands do not implement FW_VERSIONS: {brand_configs - brand_versions}") + with subtests.test(): + pytest.fail(f"Brands do not implement FW_VERSIONS: {brand_configs - brand_versions}") if len(brand_versions - brand_configs): - with self.subTest(): - self.fail(f"Brands do not implement FW_QUERY_CONFIG: {brand_versions - brand_configs}") + with subtests.test(): + pytest.fail(f"Brands do not implement FW_QUERY_CONFIG: {brand_versions - brand_configs}") # Ensure each brand has at least 1 ECU to query, and extra ECU retrieval for brand, config in FW_QUERY_CONFIGS.items(): - self.assertEqual(len(config.get_all_ecus({}, include_extra_ecus=False)), 0) - self.assertEqual(config.get_all_ecus({}), set(config.extra_ecus)) - self.assertGreater(len(config.get_all_ecus(VERSIONS[brand])), 0) + assert len(config.get_all_ecus({}, include_extra_ecus=False)) == 0 + assert config.get_all_ecus({}) == set(config.extra_ecus) + assert len(config.get_all_ecus(VERSIONS[brand])) > 0 - def test_fw_request_ecu_whitelist(self): + def test_fw_request_ecu_whitelist(self, subtests): for brand, config in FW_QUERY_CONFIGS.items(): - with self.subTest(brand=brand): + with subtests.test(brand=brand): whitelisted_ecus = {ecu for r in config.requests for ecu in r.whitelist_ecus} brand_ecus = {fw[0] for car_fw in VERSIONS[brand].values() for fw in car_fw} brand_ecus |= {ecu[0] for ecu in config.extra_ecus} @@ -182,30 +181,30 @@ def test_fw_request_ecu_whitelist(self): ecus_not_whitelisted = brand_ecus - whitelisted_ecus ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_not_whitelisted]) - self.assertFalse(len(whitelisted_ecus) and len(ecus_not_whitelisted), - f'{brand.title()}: ECUs not in any FW query whitelists: {ecu_strings}') + assert not (len(whitelisted_ecus) and len(ecus_not_whitelisted)), \ + f'{brand.title()}: ECUs not in any FW query whitelists: {ecu_strings}' - def test_fw_requests(self): + def test_fw_requests(self, subtests): # Asserts equal length request and response lists for brand, config in FW_QUERY_CONFIGS.items(): - with self.subTest(brand=brand): + with subtests.test(brand=brand): for request_obj in config.requests: - self.assertEqual(len(request_obj.request), len(request_obj.response)) + assert len(request_obj.request) == len(request_obj.response) # No request on the OBD port (bus 1, multiplexed) should be run on an aux panda - self.assertFalse(request_obj.auxiliary and request_obj.bus == 1 and request_obj.obd_multiplexing, - f"{brand.title()}: OBD multiplexed request is marked auxiliary: {request_obj}") + assert not (request_obj.auxiliary and request_obj.bus == 1 and request_obj.obd_multiplexing), \ + f"{brand.title()}: OBD multiplexed request is marked auxiliary: {request_obj}" def test_brand_ecu_matches(self): empty_response = {brand: set() for brand in FW_QUERY_CONFIGS} - self.assertEqual(get_brand_ecu_matches(set()), empty_response) + assert get_brand_ecu_matches(set()) == empty_response # we ignore bus expected_response = empty_response | {'toyota': {(0x750, 0xf)}} - self.assertEqual(get_brand_ecu_matches({(0x758, 0xf, 99)}), expected_response) + assert get_brand_ecu_matches({(0x758, 0xf, 99)}) == expected_response -class TestFwFingerprintTiming(unittest.TestCase): +class TestFwFingerprintTiming: N: int = 5 TOL: float = 0.05 @@ -223,26 +222,26 @@ def fake_get_data(self, timeout): self.total_time += timeout return {} - def _benchmark_brand(self, brand, num_pandas): + def _benchmark_brand(self, brand, num_pandas, mocker): fake_socket = FakeSocket() self.total_time = 0 - with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing), - mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)): - for _ in range(self.N): - # Treat each brand as the most likely (aka, the first) brand with OBD multiplexing initially on - self.current_obd_multiplexing = True + mocker.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing) + mocker.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data) + for _ in range(self.N): + # Treat each brand as the most likely (aka, the first) brand with OBD multiplexing initially on + self.current_obd_multiplexing = True - t = time.perf_counter() - get_fw_versions(fake_socket, fake_socket, brand, num_pandas=num_pandas) - self.total_time += time.perf_counter() - t + t = time.perf_counter() + get_fw_versions(fake_socket, fake_socket, brand, num_pandas=num_pandas) + self.total_time += time.perf_counter() - t return self.total_time / self.N def _assert_timing(self, avg_time, ref_time): - self.assertLess(avg_time, ref_time + self.TOL) - self.assertGreater(avg_time, ref_time - self.TOL, "Performance seems to have improved, update test refs.") + assert avg_time < ref_time + self.TOL + assert avg_time > ref_time - self.TOL, "Performance seems to have improved, update test refs." - def test_startup_timing(self): + def test_startup_timing(self, subtests, mocker): # Tests worse-case VIN query time and typical present ECU query time vin_ref_times = {'worst': 1.4, 'best': 0.7} # best assumes we go through all queries to get a match present_ecu_ref_time = 0.45 @@ -253,24 +252,24 @@ def fake_get_ecu_addrs(*_, timeout): fake_socket = FakeSocket() self.total_time = 0.0 - with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing), - mock.patch("openpilot.selfdrive.car.fw_versions.get_ecu_addrs", fake_get_ecu_addrs)): - for _ in range(self.N): - self.current_obd_multiplexing = True - get_present_ecus(fake_socket, fake_socket, num_pandas=2) + mocker.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing) + mocker.patch("openpilot.selfdrive.car.fw_versions.get_ecu_addrs", fake_get_ecu_addrs) + for _ in range(self.N): + self.current_obd_multiplexing = True + get_present_ecus(fake_socket, fake_socket, num_pandas=2) self._assert_timing(self.total_time / self.N, present_ecu_ref_time) print(f'get_present_ecus, query time={self.total_time / self.N} seconds') for name, args in (('worst', {}), ('best', {'retry': 1})): - with self.subTest(name=name): + with subtests.test(name=name): self.total_time = 0.0 - with (mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)): - for _ in range(self.N): - get_vin(fake_socket, fake_socket, (0, 1), **args) + mocker.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data) + for _ in range(self.N): + get_vin(fake_socket, fake_socket, (0, 1), **args) self._assert_timing(self.total_time / self.N, vin_ref_times[name]) print(f'get_vin {name} case, query time={self.total_time / self.N} seconds') - def test_fw_query_timing(self): + def test_fw_query_timing(self, subtests, mocker): total_ref_time = {1: 7.2, 2: 7.8} brand_ref_times = { 1: { @@ -297,8 +296,8 @@ def test_fw_query_timing(self): total_times = {1: 0.0, 2: 0.0} for num_pandas in (1, 2): for brand, config in FW_QUERY_CONFIGS.items(): - with self.subTest(brand=brand, num_pandas=num_pandas): - avg_time = self._benchmark_brand(brand, num_pandas) + with subtests.test(brand=brand, num_pandas=num_pandas): + avg_time = self._benchmark_brand(brand, num_pandas, mocker) total_times[num_pandas] += avg_time avg_time = round(avg_time, 2) @@ -311,11 +310,7 @@ def test_fw_query_timing(self): print(f'{brand=}, {num_pandas=}, {len(config.requests)=}, avg FW query time={avg_time} seconds') for num_pandas in (1, 2): - with self.subTest(brand='all_brands', num_pandas=num_pandas): + with subtests.test(brand='all_brands', num_pandas=num_pandas): total_time = round(total_times[num_pandas], 2) self._assert_timing(total_time, total_ref_time[num_pandas]) print(f'all brands, total FW query time={total_time} seconds') - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/car/tests/test_lateral_limits.py b/selfdrive/car/tests/test_lateral_limits.py index e5cfd972bdcb7c..a478bc601a5a3c 100755 --- a/selfdrive/car/tests/test_lateral_limits.py +++ b/selfdrive/car/tests/test_lateral_limits.py @@ -2,8 +2,7 @@ from collections import defaultdict import importlib from parameterized import parameterized_class -import sys -import unittest +import pytest from openpilot.common.realtime import DT_CTRL from openpilot.selfdrive.car.car_helpers import interfaces @@ -25,23 +24,23 @@ @parameterized_class('car_model', [(c,) for c in sorted(CAR_MODELS)]) -class TestLateralLimits(unittest.TestCase): +class TestLateralLimits: car_model: str @classmethod - def setUpClass(cls): + def setup_class(cls): CarInterface, _, _ = interfaces[cls.car_model] CP = CarInterface.get_non_essential_params(cls.car_model) if CP.dashcamOnly: - raise unittest.SkipTest("Platform is behind dashcamOnly") + pytest.skip("Platform is behind dashcamOnly") # TODO: test all platforms if CP.lateralTuning.which() != 'torque': - raise unittest.SkipTest + pytest.skip() if CP.notCar: - raise unittest.SkipTest + pytest.skip() CarControllerParams = importlib.import_module(f'selfdrive.car.{CP.carName}.values').CarControllerParams cls.control_params = CarControllerParams(CP) @@ -66,26 +65,8 @@ def calculate_0_5s_jerk(control_params, torque_params): def test_jerk_limits(self): up_jerk, down_jerk = self.calculate_0_5s_jerk(self.control_params, self.torque_params) car_model_jerks[self.car_model] = {"up_jerk": up_jerk, "down_jerk": down_jerk} - self.assertLessEqual(up_jerk, MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE) - self.assertLessEqual(down_jerk, MAX_LAT_JERK_DOWN) + assert up_jerk <= MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE + assert down_jerk <= MAX_LAT_JERK_DOWN def test_max_lateral_accel(self): - self.assertLessEqual(self.torque_params["MAX_LAT_ACCEL_MEASURED"], MAX_LAT_ACCEL) - - -if __name__ == "__main__": - result = unittest.main(exit=False) - - print(f"\n\n---- Lateral limit report ({len(CAR_MODELS)} cars) ----\n") - - max_car_model_len = max([len(car_model) for car_model in car_model_jerks]) - for car_model, _jerks in sorted(car_model_jerks.items(), key=lambda i: i[1]['up_jerk'], reverse=True): - violation = _jerks["up_jerk"] > MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE or \ - _jerks["down_jerk"] > MAX_LAT_JERK_DOWN - violation_str = " - VIOLATION" if violation else "" - - print(f"{car_model:{max_car_model_len}} - up jerk: {round(_jerks['up_jerk'], 2):5} " + - f"m/s^3, down jerk: {round(_jerks['down_jerk'], 2):5} m/s^3{violation_str}") - - # exit with test result - sys.exit(not result.result.wasSuccessful()) + assert self.torque_params["MAX_LAT_ACCEL_MEASURED"] <= MAX_LAT_ACCEL diff --git a/selfdrive/car/tests/test_models.py b/selfdrive/car/tests/test_models.py index dc3d4256a20e1d..026693bdce7933 100755 --- a/selfdrive/car/tests/test_models.py +++ b/selfdrive/car/tests/test_models.py @@ -4,7 +4,7 @@ import importlib import pytest import random -import unittest +import unittest # noqa: TID251 from collections import defaultdict, Counter import hypothesis.strategies as st from hypothesis import Phase, given, settings diff --git a/selfdrive/car/tests/test_platform_configs.py b/selfdrive/car/tests/test_platform_configs.py index 523c331b9e3c83..217189255e2a86 100755 --- a/selfdrive/car/tests/test_platform_configs.py +++ b/selfdrive/car/tests/test_platform_configs.py @@ -1,25 +1,19 @@ #!/usr/bin/env python3 -import unittest - from openpilot.selfdrive.car.values import PLATFORMS -class TestPlatformConfigs(unittest.TestCase): - def test_configs(self): +class TestPlatformConfigs: + def test_configs(self, subtests): for name, platform in PLATFORMS.items(): - with self.subTest(platform=str(platform)): - self.assertTrue(platform.config._frozen) + with subtests.test(platform=str(platform)): + assert platform.config._frozen if platform != "MOCK": - self.assertIn("pt", platform.config.dbc_dict) - self.assertTrue(len(platform.config.platform_str) > 0) - - self.assertEqual(name, platform.config.platform_str) - - self.assertIsNotNone(platform.config.specs) + assert "pt" in platform.config.dbc_dict + assert len(platform.config.platform_str) > 0 + assert name == platform.config.platform_str -if __name__ == "__main__": - unittest.main() + assert platform.config.specs is not None diff --git a/selfdrive/car/toyota/carcontroller.py b/selfdrive/car/toyota/carcontroller.py index f2d8a4a7bc5637..354d10a90eba09 100644 --- a/selfdrive/car/toyota/carcontroller.py +++ b/selfdrive/car/toyota/carcontroller.py @@ -102,11 +102,6 @@ def update(self, CC, CS, now_nanos): # *** gas and brake *** pcm_accel_cmd = clip(actuators.accel, self.params.ACCEL_MIN, self.params.ACCEL_MAX) - # TODO: probably can delete this. CS.pcm_acc_status uses a different signal - # than CS.cruiseState.enabled. confirm they're not meaningfully different - if not CC.enabled and CS.pcm_acc_status: - pcm_cancel_cmd = 1 - # on entering standstill, send standstill request if CS.out.standstill and not self.last_standstill and (self.CP.carFingerprint not in NO_STOP_TIMER_CAR): self.standstill_req = True diff --git a/selfdrive/car/toyota/tests/test_toyota.py b/selfdrive/car/toyota/tests/test_toyota.py index e2a9b46eb4dd75..ef49e005512efc 100755 --- a/selfdrive/car/toyota/tests/test_toyota.py +++ b/selfdrive/car/toyota/tests/test_toyota.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 from hypothesis import given, settings, strategies as st -import unittest from cereal import car from openpilot.selfdrive.car.fw_versions import build_fw_dict @@ -17,59 +16,58 @@ def check_fw_version(fw_version: bytes) -> bool: return b'?' not in fw_version -class TestToyotaInterfaces(unittest.TestCase): +class TestToyotaInterfaces: def test_car_sets(self): - self.assertTrue(len(ANGLE_CONTROL_CAR - TSS2_CAR) == 0) - self.assertTrue(len(RADAR_ACC_CAR - TSS2_CAR) == 0) + assert len(ANGLE_CONTROL_CAR - TSS2_CAR) == 0 + assert len(RADAR_ACC_CAR - TSS2_CAR) == 0 def test_lta_platforms(self): # At this time, only RAV4 2023 is expected to use LTA/angle control - self.assertEqual(ANGLE_CONTROL_CAR, {CAR.TOYOTA_RAV4_TSS2_2023}) + assert ANGLE_CONTROL_CAR == {CAR.TOYOTA_RAV4_TSS2_2023} def test_tss2_dbc(self): # We make some assumptions about TSS2 platforms, # like looking up certain signals only in this DBC for car_model, dbc in DBC.items(): if car_model in TSS2_CAR: - self.assertEqual(dbc["pt"], "toyota_nodsu_pt_generated") + assert dbc["pt"] == "toyota_nodsu_pt_generated" - def test_essential_ecus(self): + def test_essential_ecus(self, subtests): # Asserts standard ECUs exist for each platform common_ecus = {Ecu.fwdRadar, Ecu.fwdCamera} for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): present_ecus = {ecu[0] for ecu in ecus} missing_ecus = common_ecus - present_ecus - self.assertEqual(len(missing_ecus), 0) + assert len(missing_ecus) == 0 # Some exceptions for other common ECUs if car_model not in (CAR.TOYOTA_ALPHARD_TSS2,): - self.assertIn(Ecu.abs, present_ecus) + assert Ecu.abs in present_ecus if car_model not in (CAR.TOYOTA_MIRAI,): - self.assertIn(Ecu.engine, present_ecus) + assert Ecu.engine in present_ecus if car_model not in (CAR.TOYOTA_PRIUS_V, CAR.LEXUS_CTH): - self.assertIn(Ecu.eps, present_ecus) + assert Ecu.eps in present_ecus -class TestToyotaFingerprint(unittest.TestCase): - def test_non_essential_ecus(self): +class TestToyotaFingerprint: + def test_non_essential_ecus(self, subtests): # Ensures only the cars that have multiple engine ECUs are in the engine non-essential ECU list for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): engine_ecus = {ecu for ecu in ecus if ecu[0] == Ecu.engine} - self.assertEqual(len(engine_ecus) > 1, - car_model in FW_QUERY_CONFIG.non_essential_ecus[Ecu.engine], - f"Car model unexpectedly {'not ' if len(engine_ecus) > 1 else ''}in non-essential list") + assert (len(engine_ecus) > 1) == (car_model in FW_QUERY_CONFIG.non_essential_ecus[Ecu.engine]), \ + f"Car model unexpectedly {'not ' if len(engine_ecus) > 1 else ''}in non-essential list" - def test_valid_fw_versions(self): + def test_valid_fw_versions(self, subtests): # Asserts all FW versions are valid for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for fws in ecus.values(): for fw in fws: - self.assertTrue(check_fw_version(fw), fw) + assert check_fw_version(fw), fw # Tests for part numbers, platform codes, and sub-versions which Toyota will use to fuzzy # fingerprint in the absence of full FW matches: @@ -80,25 +78,25 @@ def test_platform_codes_fuzzy_fw(self, data): fws = data.draw(fw_strategy) get_platform_codes(fws) - def test_platform_code_ecus_available(self): + def test_platform_code_ecus_available(self, subtests): # Asserts ECU keys essential for fuzzy fingerprinting are available on all platforms for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for platform_code_ecu in PLATFORM_CODE_ECUS: if platform_code_ecu == Ecu.eps and car_model in (CAR.TOYOTA_PRIUS_V, CAR.LEXUS_CTH,): continue if platform_code_ecu == Ecu.abs and car_model in (CAR.TOYOTA_ALPHARD_TSS2,): continue - self.assertIn(platform_code_ecu, [e[0] for e in ecus]) + assert platform_code_ecu in [e[0] for e in ecus] - def test_fw_format(self): + def test_fw_format(self, subtests): # Asserts: # - every supported ECU FW version returns one platform code # - every supported ECU FW version has a part number # - expected parsing of ECU sub-versions for car_model, ecus in FW_VERSIONS.items(): - with self.subTest(car_model=car_model.value): + with subtests.test(car_model=car_model.value): for ecu, fws in ecus.items(): if ecu[0] not in PLATFORM_CODE_ECUS: continue @@ -107,15 +105,14 @@ def test_fw_format(self): for fw in fws: result = get_platform_codes([fw]) # Check only one platform code and sub-version - self.assertEqual(1, len(result), f"Unable to parse FW: {fw}") - self.assertEqual(1, len(list(result.values())[0]), f"Unable to parse FW: {fw}") + assert 1 == len(result), f"Unable to parse FW: {fw}" + assert 1 == len(list(result.values())[0]), f"Unable to parse FW: {fw}" codes |= result # Toyota places the ECU part number in their FW versions, assert all parsable # Note that there is only one unique part number per ECU across the fleet, so this # is not important for identification, just a sanity check. - self.assertTrue(all(code.count(b"-") > 1 for code in codes), - f"FW does not have part number: {fw} {codes}") + assert all(code.count(b"-") > 1 for code in codes), f"FW does not have part number: {fw} {codes}" def test_platform_codes_spot_check(self): # Asserts basic platform code parsing behavior for a few cases @@ -125,20 +122,20 @@ def test_platform_codes_spot_check(self): b"F152607110\x00\x00\x00\x00\x00\x00", b"F152607180\x00\x00\x00\x00\x00\x00", ]) - self.assertEqual(results, {b"F1526-07-1": {b"10", b"40", b"71", b"80"}}) + assert results == {b"F1526-07-1": {b"10", b"40", b"71", b"80"}} results = get_platform_codes([ b"\x028646F4104100\x00\x00\x00\x008646G5301200\x00\x00\x00\x00", b"\x028646F4104100\x00\x00\x00\x008646G3304000\x00\x00\x00\x00", ]) - self.assertEqual(results, {b"8646F-41-04": {b"100"}}) + assert results == {b"8646F-41-04": {b"100"}} # Short version has no part number results = get_platform_codes([ b"\x0235870000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00", b"\x0235883000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00", ]) - self.assertEqual(results, {b"58-70": {b"000"}, b"58-83": {b"000"}}) + assert results == {b"58-70": {b"000"}, b"58-83": {b"000"}} results = get_platform_codes([ b"F152607110\x00\x00\x00\x00\x00\x00", @@ -146,7 +143,7 @@ def test_platform_codes_spot_check(self): b"\x028646F4104100\x00\x00\x00\x008646G5301200\x00\x00\x00\x00", b"\x0235879000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00", ]) - self.assertEqual(results, {b"F1526-07-1": {b"10", b"40"}, b"8646F-41-04": {b"100"}, b"58-79": {b"000"}}) + assert results == {b"F1526-07-1": {b"10", b"40"}, b"8646F-41-04": {b"100"}, b"58-79": {b"000"}} def test_fuzzy_excluded_platforms(self): # Asserts a list of platforms that will not fuzzy fingerprint with platform codes due to them being shared. @@ -162,13 +159,9 @@ def test_fuzzy_excluded_platforms(self): CP = car.CarParams.new_message(carFw=car_fw) matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), CP.carVin, FW_VERSIONS) if len(matches) == 1: - self.assertEqual(list(matches)[0], platform) + assert list(matches)[0] == platform else: # If a platform has multiple matches, add it and its matches platforms_with_shared_codes |= {str(platform), *matches} - self.assertEqual(platforms_with_shared_codes, FUZZY_EXCLUDED_PLATFORMS, (len(platforms_with_shared_codes), len(FW_VERSIONS))) - - -if __name__ == "__main__": - unittest.main() + assert platforms_with_shared_codes == FUZZY_EXCLUDED_PLATFORMS, (len(platforms_with_shared_codes), len(FW_VERSIONS)) diff --git a/selfdrive/car/volkswagen/tests/test_volkswagen.py b/selfdrive/car/volkswagen/tests/test_volkswagen.py index 17331203bbe855..561d28b9fbcfe8 100755 --- a/selfdrive/car/volkswagen/tests/test_volkswagen.py +++ b/selfdrive/car/volkswagen/tests/test_volkswagen.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import random import re -import unittest from cereal import car from openpilot.selfdrive.car.volkswagen.values import CAR, FW_QUERY_CONFIG, WMI @@ -14,35 +13,35 @@ SPARE_PART_FW_PATTERN = re.compile(b'\xf1\x87(?P[0-9][0-9A-Z]{2})(?P[0-9][0-9A-Z][0-9])(?P[0-9A-Z]{2}[0-9])([A-Z0-9]| )') -class TestVolkswagenPlatformConfigs(unittest.TestCase): - def test_spare_part_fw_pattern(self): +class TestVolkswagenPlatformConfigs: + def test_spare_part_fw_pattern(self, subtests): # Relied on for determining if a FW is likely VW for platform, ecus in FW_VERSIONS.items(): - with self.subTest(platform=platform): + with subtests.test(platform=platform.value): for fws in ecus.values(): for fw in fws: - self.assertNotEqual(SPARE_PART_FW_PATTERN.match(fw), None, f"Bad FW: {fw}") + assert SPARE_PART_FW_PATTERN.match(fw) is not None, f"Bad FW: {fw}" - def test_chassis_codes(self): + def test_chassis_codes(self, subtests): for platform in CAR: - with self.subTest(platform=platform): - self.assertTrue(len(platform.config.wmis) > 0, "WMIs not set") - self.assertTrue(len(platform.config.chassis_codes) > 0, "Chassis codes not set") - self.assertTrue(all(CHASSIS_CODE_PATTERN.match(cc) for cc in - platform.config.chassis_codes), "Bad chassis codes") + with subtests.test(platform=platform.value): + assert len(platform.config.wmis) > 0, "WMIs not set" + assert len(platform.config.chassis_codes) > 0, "Chassis codes not set" + assert all(CHASSIS_CODE_PATTERN.match(cc) for cc in \ + platform.config.chassis_codes), "Bad chassis codes" # No two platforms should share chassis codes for comp in CAR: if platform == comp: continue - self.assertEqual(set(), platform.config.chassis_codes & comp.config.chassis_codes, - f"Shared chassis codes: {comp}") + assert set() == platform.config.chassis_codes & comp.config.chassis_codes, \ + f"Shared chassis codes: {comp}" - def test_custom_fuzzy_fingerprinting(self): + def test_custom_fuzzy_fingerprinting(self, subtests): all_radar_fw = list({fw for ecus in FW_VERSIONS.values() for fw in ecus[Ecu.fwdRadar, 0x757, None]}) for platform in CAR: - with self.subTest(platform=platform): + with subtests.test(platform=platform.name): for wmi in WMI: for chassis_code in platform.config.chassis_codes | {"00"}: vin = ["0"] * 17 @@ -59,8 +58,4 @@ def test_custom_fuzzy_fingerprinting(self): matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fws, vin, FW_VERSIONS) expected_matches = {platform} if should_match else set() - self.assertEqual(expected_matches, matches, "Bad match") - - -if __name__ == "__main__": - unittest.main() + assert expected_matches == matches, "Bad match" diff --git a/selfdrive/controls/controlsd.py b/selfdrive/controls/controlsd.py index eba4142ca85a4c..92c93311218970 100755 --- a/selfdrive/controls/controlsd.py +++ b/selfdrive/controls/controlsd.py @@ -21,7 +21,7 @@ from openpilot.selfdrive.car.car_helpers import get_startup_event from openpilot.selfdrive.car.card import CarD from openpilot.selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert -from openpilot.selfdrive.controls.lib.drive_helpers import clip_curvature +from openpilot.selfdrive.controls.lib.drive_helpers import VCruiseHelper, clip_curvature from openpilot.selfdrive.controls.lib.events import Events, ET from openpilot.selfdrive.controls.lib.latcontrol import LatControl, MIN_LATERAL_CONTROL_SPEED from openpilot.selfdrive.controls.lib.latcontrol_pid import LatControlPID @@ -143,6 +143,7 @@ def __init__(self, CI=None): self.desired_curvature = 0.0 self.experimental_mode = False self.personality = self.read_personality_param() + self.v_cruise_helper = VCruiseHelper(self.CP) self.recalibrating_seen = False self.can_log_mono_time = 0 @@ -168,7 +169,7 @@ def set_initial_state(self): controls_state = self.params.get("ReplayControlsState") if controls_state is not None: with log.ControlsState.from_bytes(controls_state) as controls_state: - self.card.v_cruise_helper.v_cruise_kph = controls_state.vCruise + self.v_cruise_helper.v_cruise_kph = controls_state.vCruise if any(ps.controlsAllowed for ps in self.sm['pandaStates']): self.state = State.enabled @@ -197,6 +198,11 @@ def update_events(self, CS): if self.CP.passive: return + # Block resume if cruise never previously enabled + resume_pressed = any(be.type in (ButtonType.accelCruise, ButtonType.resumeCruise) for be in CS.buttonEvents) + if not self.CP.pcmCruise and not self.v_cruise_helper.v_cruise_initialized and resume_pressed: + self.events.add(EventName.resumeBlocked) + if not self.CP.notCar: self.events.add_from_msg(self.sm['driverMonitoringState'].events) @@ -425,7 +431,7 @@ def data_sample(self): def state_transition(self, CS): """Compute conditional state transitions and execute actions on state transitions""" - self.card.v_cruise_helper.update_v_cruise(CS, self.enabled, self.is_metric) + self.v_cruise_helper.update_v_cruise(CS, self.enabled, self.is_metric) # decrement the soft disable timer at every step, as it's reset on # entrance in SOFT_DISABLING state @@ -500,7 +506,7 @@ def state_transition(self, CS): else: self.state = State.enabled self.current_alert_types.append(ET.ENABLE) - self.card.v_cruise_helper.initialize_v_cruise(CS, self.experimental_mode) + self.v_cruise_helper.initialize_v_cruise(CS, self.experimental_mode) # Check if openpilot is engaged and actuators are enabled self.enabled = self.state in ENABLED_STATES @@ -556,7 +562,7 @@ def state_control(self, CS): if not self.joystick_mode: # accel PID loop - pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.card.v_cruise_helper.v_cruise_kph * CV.KPH_TO_MS) + pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_helper.v_cruise_kph * CV.KPH_TO_MS) t_since_plan = (self.sm.frame - self.sm.recv_frame['longitudinalPlan']) * DT_CTRL actuators.accel = self.LoC.update(CC.longActive, CS, long_plan, pid_accel_limits, t_since_plan) @@ -659,7 +665,7 @@ def publish_logs(self, CS, start_time, CC, lac_log): CC.cruiseControl.resume = self.enabled and CS.cruiseState.standstill and speeds[-1] > 0.1 hudControl = CC.hudControl - hudControl.setSpeed = float(self.card.v_cruise_helper.v_cruise_cluster_kph * CV.KPH_TO_MS) + hudControl.setSpeed = float(self.v_cruise_helper.v_cruise_cluster_kph * CV.KPH_TO_MS) hudControl.speedVisible = self.enabled hudControl.lanesVisible = self.enabled hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead @@ -743,8 +749,8 @@ def publish_logs(self, CS, start_time, CC, lac_log): controlsState.engageable = not self.events.contains(ET.NO_ENTRY) controlsState.longControlState = self.LoC.long_control_state controlsState.vPid = float(self.LoC.v_pid) - controlsState.vCruise = float(self.card.v_cruise_helper.v_cruise_kph) - controlsState.vCruiseCluster = float(self.card.v_cruise_helper.v_cruise_cluster_kph) + controlsState.vCruise = float(self.v_cruise_helper.v_cruise_kph) + controlsState.vCruiseCluster = float(self.v_cruise_helper.v_cruise_cluster_kph) controlsState.upAccelCmd = float(self.LoC.pid.p) controlsState.uiAccelCmd = float(self.LoC.pid.i) controlsState.ufAccelCmd = float(self.LoC.pid.f) diff --git a/selfdrive/controls/lib/events.py b/selfdrive/controls/lib/events.py index f1bd07c69bf290..99246a139789ef 100755 --- a/selfdrive/controls/lib/events.py +++ b/selfdrive/controls/lib/events.py @@ -487,7 +487,7 @@ def joystick_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster, "Press Resume to Exit Standstill", "", AlertStatus.userPrompt, AlertSize.small, - Priority.MID, VisualAlert.none, AudibleAlert.none, .2), + Priority.LOW, VisualAlert.none, AudibleAlert.none, .2), }, EventName.belowSteerSpeed: { diff --git a/selfdrive/controls/lib/tests/test_alertmanager.py b/selfdrive/controls/lib/tests/test_alertmanager.py index dbd42858a07977..c234cc49d60ea8 100755 --- a/selfdrive/controls/lib/tests/test_alertmanager.py +++ b/selfdrive/controls/lib/tests/test_alertmanager.py @@ -1,12 +1,11 @@ #!/usr/bin/env python3 import random -import unittest from openpilot.selfdrive.controls.lib.events import Alert, EVENTS from openpilot.selfdrive.controls.lib.alertmanager import AlertManager -class TestAlertManager(unittest.TestCase): +class TestAlertManager: def test_duration(self): """ @@ -38,8 +37,4 @@ def test_duration(self): shown = current_alert is not None should_show = frame <= show_duration - self.assertEqual(shown, should_show, msg=f"{frame=} {add_duration=} {duration=}") - - -if __name__ == "__main__": - unittest.main() + assert shown == should_show, f"{frame=} {add_duration=} {duration=}" diff --git a/selfdrive/controls/lib/tests/test_latcontrol.py b/selfdrive/controls/lib/tests/test_latcontrol.py index 838023af720720..b731bbd9500096 100755 --- a/selfdrive/controls/lib/tests/test_latcontrol.py +++ b/selfdrive/controls/lib/tests/test_latcontrol.py @@ -1,6 +1,4 @@ #!/usr/bin/env python3 -import unittest - from parameterized import parameterized from cereal import car, log @@ -15,7 +13,7 @@ from openpilot.common.mock.generators import generate_liveLocationKalman -class TestLatControl(unittest.TestCase): +class TestLatControl: @parameterized.expand([(HONDA.HONDA_CIVIC, LatControlPID), (TOYOTA.TOYOTA_RAV4, LatControlTorque), (NISSAN.NISSAN_LEAF, LatControlAngle)]) def test_saturation(self, car_name, controller): @@ -36,8 +34,4 @@ def test_saturation(self, car_name, controller): for _ in range(1000): _, _, lac_log = controller.update(True, CS, VM, params, False, 1, llk) - self.assertTrue(lac_log.saturated) - - -if __name__ == "__main__": - unittest.main() + assert lac_log.saturated diff --git a/selfdrive/controls/lib/tests/test_vehicle_model.py b/selfdrive/controls/lib/tests/test_vehicle_model.py index c3997afdf36bcb..2efcf2fbbd0fc4 100755 --- a/selfdrive/controls/lib/tests/test_vehicle_model.py +++ b/selfdrive/controls/lib/tests/test_vehicle_model.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 +import pytest import math -import unittest import numpy as np from control import StateSpace @@ -10,8 +10,8 @@ from openpilot.selfdrive.controls.lib.vehicle_model import VehicleModel, dyn_ss_sol, create_dyn_state_matrices -class TestVehicleModel(unittest.TestCase): - def setUp(self): +class TestVehicleModel: + def setup_method(self): CP = CarInterface.get_non_essential_params(CAR.HONDA_CIVIC) self.VM = VehicleModel(CP) @@ -23,7 +23,7 @@ def test_round_trip_yaw_rate(self): yr = self.VM.yaw_rate(sa, u, roll) new_sa = self.VM.get_steer_from_yaw_rate(yr, u, roll) - self.assertAlmostEqual(sa, new_sa) + assert sa == pytest.approx(new_sa) def test_dyn_ss_sol_against_yaw_rate(self): """Verify that the yaw_rate helper function matches the results @@ -38,7 +38,7 @@ def test_dyn_ss_sol_against_yaw_rate(self): # Compute yaw rate using direct computations yr2 = self.VM.yaw_rate(sa, u, roll) - self.assertAlmostEqual(float(yr1[0]), yr2) + assert float(yr1[0]) == pytest.approx(yr2) def test_syn_ss_sol_simulate(self): """Verifies that dyn_ss_sol matches a simulation""" @@ -63,8 +63,3 @@ def test_syn_ss_sol_simulate(self): x2 = dyn_ss_sol(sa, u, roll, self.VM) np.testing.assert_almost_equal(x1, x2, decimal=3) - - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/controls/tests/test_alerts.py b/selfdrive/controls/tests/test_alerts.py index 7b4fba0dcee5f5..e29a6322ab5a15 100755 --- a/selfdrive/controls/tests/test_alerts.py +++ b/selfdrive/controls/tests/test_alerts.py @@ -2,7 +2,6 @@ import copy import json import os -import unittest import random from PIL import Image, ImageDraw, ImageFont @@ -25,10 +24,10 @@ ALERTS.append(alert) -class TestAlerts(unittest.TestCase): +class TestAlerts: @classmethod - def setUpClass(cls): + def setup_class(cls): with open(OFFROAD_ALERTS_PATH) as f: cls.offroad_alerts = json.loads(f.read()) @@ -45,7 +44,7 @@ def test_events_defined(self): for name, e in events.items(): if not name.endswith("DEPRECATED"): fail_msg = "%s @%d not in EVENTS" % (name, e) - self.assertTrue(e in EVENTS.keys(), msg=fail_msg) + assert e in EVENTS.keys(), fail_msg # ensure alert text doesn't exceed allowed width def test_alert_text_length(self): @@ -80,7 +79,7 @@ def test_alert_text_length(self): left, _, right, _ = draw.textbbox((0, 0), txt, font) width = right - left msg = f"type: {alert.alert_type} msg: {txt}" - self.assertLessEqual(width, max_text_width, msg=msg) + assert width <= max_text_width, msg def test_alert_sanity_check(self): for event_types in EVENTS.values(): @@ -90,21 +89,21 @@ def test_alert_sanity_check(self): continue if a.alert_size == AlertSize.none: - self.assertEqual(len(a.alert_text_1), 0) - self.assertEqual(len(a.alert_text_2), 0) + assert len(a.alert_text_1) == 0 + assert len(a.alert_text_2) == 0 elif a.alert_size == AlertSize.small: - self.assertGreater(len(a.alert_text_1), 0) - self.assertEqual(len(a.alert_text_2), 0) + assert len(a.alert_text_1) > 0 + assert len(a.alert_text_2) == 0 elif a.alert_size == AlertSize.mid: - self.assertGreater(len(a.alert_text_1), 0) - self.assertGreater(len(a.alert_text_2), 0) + assert len(a.alert_text_1) > 0 + assert len(a.alert_text_2) > 0 else: - self.assertGreater(len(a.alert_text_1), 0) + assert len(a.alert_text_1) > 0 - self.assertGreaterEqual(a.duration, 0.) + assert a.duration >= 0. if event_type not in (ET.WARNING, ET.PERMANENT, ET.PRE_ENABLE): - self.assertEqual(a.creation_delay, 0.) + assert a.creation_delay == 0. def test_offroad_alerts(self): params = Params() @@ -113,11 +112,11 @@ def test_offroad_alerts(self): alert = copy.copy(self.offroad_alerts[a]) set_offroad_alert(a, True) alert['extra'] = '' - self.assertTrue(json.dumps(alert) == params.get(a, encoding='utf8')) + assert json.dumps(alert) == params.get(a, encoding='utf8') # then delete it set_offroad_alert(a, False) - self.assertTrue(params.get(a) is None) + assert params.get(a) is None def test_offroad_alerts_extra_text(self): params = Params() @@ -128,8 +127,5 @@ def test_offroad_alerts_extra_text(self): set_offroad_alert(a, True, extra_text="a"*i) written_alert = json.loads(params.get(a, encoding='utf8')) - self.assertTrue("a"*i == written_alert['extra']) - self.assertTrue(alert["text"] == written_alert['text']) - -if __name__ == "__main__": - unittest.main() + assert "a"*i == written_alert['extra'] + assert alert["text"] == written_alert['text'] diff --git a/selfdrive/controls/tests/test_cruise_speed.py b/selfdrive/controls/tests/test_cruise_speed.py index c46d03ad1e9897..6c46285e81fb3a 100755 --- a/selfdrive/controls/tests/test_cruise_speed.py +++ b/selfdrive/controls/tests/test_cruise_speed.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 +import pytest import itertools import numpy as np -import unittest from parameterized import parameterized_class from cereal import log @@ -36,19 +36,19 @@ def run_cruise_simulation(cruise, e2e, personality, t_end=20.): [True, False], # e2e log.LongitudinalPersonality.schema.enumerants, # personality [5,35])) # speed -class TestCruiseSpeed(unittest.TestCase): +class TestCruiseSpeed: def test_cruise_speed(self): print(f'Testing {self.speed} m/s') cruise_speed = float(self.speed) simulation_steady_state = run_cruise_simulation(cruise_speed, self.e2e, self.personality) - self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {self.speed} m/s') + assert simulation_steady_state == pytest.approx(cruise_speed, abs=.01), f'Did not reach {self.speed} m/s' # TODO: test pcmCruise @parameterized_class(('pcm_cruise',), [(False,)]) -class TestVCruiseHelper(unittest.TestCase): - def setUp(self): +class TestVCruiseHelper: + def setup_method(self): self.CP = car.CarParams(pcmCruise=self.pcm_cruise) self.v_cruise_helper = VCruiseHelper(self.CP) self.reset_cruise_speed_state() @@ -75,7 +75,7 @@ def test_adjust_speed(self): CS.buttonEvents = [ButtonEvent(type=btn, pressed=pressed)] self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False) - self.assertEqual(pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) + assert pressed == (self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) def test_rising_edge_enable(self): """ @@ -94,7 +94,7 @@ def test_rising_edge_enable(self): self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False) # Expected diff on enabling. Speed should not change on falling edge of pressed - self.assertEqual(not pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) + assert not pressed == self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last def test_resume_in_standstill(self): """ @@ -111,7 +111,7 @@ def test_resume_in_standstill(self): # speed should only update if not at standstill and button falling edge should_equal = standstill or pressed - self.assertEqual(should_equal, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) + assert should_equal == (self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last) def test_set_gas_pressed(self): """ @@ -135,7 +135,7 @@ def test_set_gas_pressed(self): # TODO: fix skipping first run due to enabled on rising edge exception if v_ego == 0.0: continue - self.assertEqual(expected_v_cruise_kph, self.v_cruise_helper.v_cruise_kph) + assert expected_v_cruise_kph == self.v_cruise_helper.v_cruise_kph def test_initialize_v_cruise(self): """ @@ -145,12 +145,8 @@ def test_initialize_v_cruise(self): for experimental_mode in (True, False): for v_ego in np.linspace(0, 100, 101): self.reset_cruise_speed_state() - self.assertFalse(self.v_cruise_helper.v_cruise_initialized) + assert not self.v_cruise_helper.v_cruise_initialized self.enable(float(v_ego), experimental_mode) - self.assertTrue(V_CRUISE_INITIAL <= self.v_cruise_helper.v_cruise_kph <= V_CRUISE_MAX) - self.assertTrue(self.v_cruise_helper.v_cruise_initialized) - - -if __name__ == "__main__": - unittest.main() + assert V_CRUISE_INITIAL <= self.v_cruise_helper.v_cruise_kph <= V_CRUISE_MAX + assert self.v_cruise_helper.v_cruise_initialized diff --git a/selfdrive/controls/tests/test_following_distance.py b/selfdrive/controls/tests/test_following_distance.py index f58e6383c42c27..5d60911805ee0a 100755 --- a/selfdrive/controls/tests/test_following_distance.py +++ b/selfdrive/controls/tests/test_following_distance.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -import unittest +import pytest import itertools from parameterized import parameterized_class @@ -32,14 +32,10 @@ def run_following_distance_simulation(v_lead, t_end=100.0, e2e=False, personalit log.LongitudinalPersonality.standard, log.LongitudinalPersonality.aggressive], [0,10,35])) # speed -class TestFollowingDistance(unittest.TestCase): +class TestFollowingDistance(): def test_following_distance(self): v_lead = float(self.speed) simulation_steady_state = run_following_distance_simulation(v_lead, e2e=self.e2e, personality=self.personality) correct_steady_state = desired_follow_distance(v_lead, v_lead, get_T_FOLLOW(self.personality)) err_ratio = 0.2 if self.e2e else 0.1 - self.assertAlmostEqual(simulation_steady_state, correct_steady_state, delta=(err_ratio * correct_steady_state + .5)) - - -if __name__ == "__main__": - unittest.main() + assert simulation_steady_state == pytest.approx(correct_steady_state, abs=err_ratio * correct_steady_state + .5) diff --git a/selfdrive/controls/tests/test_lateral_mpc.py b/selfdrive/controls/tests/test_lateral_mpc.py index 8c09f46b60c0e4..3aa0fd1bce4804 100644 --- a/selfdrive/controls/tests/test_lateral_mpc.py +++ b/selfdrive/controls/tests/test_lateral_mpc.py @@ -1,4 +1,4 @@ -import unittest +import pytest import numpy as np from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc from openpilot.selfdrive.controls.lib.drive_helpers import CAR_ROTATION_RADIUS @@ -27,20 +27,20 @@ def run_mpc(lat_mpc=None, v_ref=30., x_init=0., y_init=0., psi_init=0., curvatur return lat_mpc.x_sol -class TestLateralMpc(unittest.TestCase): +class TestLateralMpc: def _assert_null(self, sol, curvature=1e-6): for i in range(len(sol)): - self.assertAlmostEqual(sol[0,i,1], 0., delta=curvature) - self.assertAlmostEqual(sol[0,i,2], 0., delta=curvature) - self.assertAlmostEqual(sol[0,i,3], 0., delta=curvature) + assert sol[0,i,1] == pytest.approx(0, abs=curvature) + assert sol[0,i,2] == pytest.approx(0, abs=curvature) + assert sol[0,i,3] == pytest.approx(0, abs=curvature) def _assert_simmetry(self, sol, curvature=1e-6): for i in range(len(sol)): - self.assertAlmostEqual(sol[0,i,1], -sol[1,i,1], delta=curvature) - self.assertAlmostEqual(sol[0,i,2], -sol[1,i,2], delta=curvature) - self.assertAlmostEqual(sol[0,i,3], -sol[1,i,3], delta=curvature) - self.assertAlmostEqual(sol[0,i,0], sol[1,i,0], delta=curvature) + assert sol[0,i,1] == pytest.approx(-sol[1,i,1], abs=curvature) + assert sol[0,i,2] == pytest.approx(-sol[1,i,2], abs=curvature) + assert sol[0,i,3] == pytest.approx(-sol[1,i,3], abs=curvature) + assert sol[0,i,0] == pytest.approx(sol[1,i,0], abs=curvature) def test_straight(self): sol = run_mpc() @@ -74,7 +74,7 @@ def test_no_overshoot(self): y_init = 1. sol = run_mpc(y_init=y_init) for y in list(sol[:,1]): - self.assertGreaterEqual(y_init, abs(y)) + assert y_init >= abs(y) def test_switch_convergence(self): lat_mpc = LateralMpc() @@ -83,7 +83,3 @@ def test_switch_convergence(self): sol = run_mpc(lat_mpc=lat_mpc, poly_shift=-3.0, v_ref=7.0) left_psi_deg = np.degrees(sol[:,2]) np.testing.assert_almost_equal(right_psi_deg, -left_psi_deg, decimal=3) - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/controls/tests/test_leads.py b/selfdrive/controls/tests/test_leads.py index a06387a0872430..f4e97725ff1069 100755 --- a/selfdrive/controls/tests/test_leads.py +++ b/selfdrive/controls/tests/test_leads.py @@ -1,13 +1,11 @@ #!/usr/bin/env python3 -import unittest - import cereal.messaging as messaging from openpilot.selfdrive.test.process_replay import replay_process_with_name from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA -class TestLeads(unittest.TestCase): +class TestLeads: def test_radar_fault(self): # if there's no radar-related can traffic, radard should either not respond or respond with an error # this is tightly coupled with underlying car radar_interface implementation, but it's a good sanity check @@ -29,8 +27,4 @@ def single_iter_pkg(): states = [m for m in out if m.which() == "radarState"] failures = [not state.valid and len(state.radarState.radarErrors) for state in states] - self.assertTrue(len(states) == 0 or all(failures)) - - -if __name__ == "__main__": - unittest.main() + assert len(states) == 0 or all(failures) diff --git a/selfdrive/controls/tests/test_state_machine.py b/selfdrive/controls/tests/test_state_machine.py index d49111752d2021..b92724ce4337a9 100755 --- a/selfdrive/controls/tests/test_state_machine.py +++ b/selfdrive/controls/tests/test_state_machine.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -import unittest from cereal import car, log from openpilot.common.realtime import DT_CTRL @@ -28,9 +27,9 @@ def make_event(event_types): return 0 -class TestStateMachine(unittest.TestCase): +class TestStateMachine: - def setUp(self): + def setup_method(self): CarInterface, CarController, CarState = interfaces[MOCK.MOCK] CP = CarInterface.get_non_essential_params(MOCK.MOCK) CI = CarInterface(CP, CarController, CarState) @@ -46,7 +45,7 @@ def test_immediate_disable(self): self.controlsd.events.add(make_event([et, ET.IMMEDIATE_DISABLE])) self.controlsd.state = state self.controlsd.state_transition(self.CS) - self.assertEqual(State.disabled, self.controlsd.state) + assert State.disabled == self.controlsd.state self.controlsd.events.clear() def test_user_disable(self): @@ -55,7 +54,7 @@ def test_user_disable(self): self.controlsd.events.add(make_event([et, ET.USER_DISABLE])) self.controlsd.state = state self.controlsd.state_transition(self.CS) - self.assertEqual(State.disabled, self.controlsd.state) + assert State.disabled == self.controlsd.state self.controlsd.events.clear() def test_soft_disable(self): @@ -66,7 +65,7 @@ def test_soft_disable(self): self.controlsd.events.add(make_event([et, ET.SOFT_DISABLE])) self.controlsd.state = state self.controlsd.state_transition(self.CS) - self.assertEqual(self.controlsd.state, State.disabled if state == State.disabled else State.softDisabling) + assert self.controlsd.state == State.disabled if state == State.disabled else State.softDisabling self.controlsd.events.clear() def test_soft_disable_timer(self): @@ -74,17 +73,17 @@ def test_soft_disable_timer(self): self.controlsd.events.add(make_event([ET.SOFT_DISABLE])) self.controlsd.state_transition(self.CS) for _ in range(int(SOFT_DISABLE_TIME / DT_CTRL)): - self.assertEqual(self.controlsd.state, State.softDisabling) + assert self.controlsd.state == State.softDisabling self.controlsd.state_transition(self.CS) - self.assertEqual(self.controlsd.state, State.disabled) + assert self.controlsd.state == State.disabled def test_no_entry(self): # Make sure noEntry keeps us disabled for et in ENABLE_EVENT_TYPES: self.controlsd.events.add(make_event([ET.NO_ENTRY, et])) self.controlsd.state_transition(self.CS) - self.assertEqual(self.controlsd.state, State.disabled) + assert self.controlsd.state == State.disabled self.controlsd.events.clear() def test_no_entry_pre_enable(self): @@ -92,7 +91,7 @@ def test_no_entry_pre_enable(self): self.controlsd.state = State.preEnabled self.controlsd.events.add(make_event([ET.NO_ENTRY, ET.PRE_ENABLE])) self.controlsd.state_transition(self.CS) - self.assertEqual(self.controlsd.state, State.preEnabled) + assert self.controlsd.state == State.preEnabled def test_maintain_states(self): # Given current state's event type, we should maintain state @@ -101,9 +100,5 @@ def test_maintain_states(self): self.controlsd.state = state self.controlsd.events.add(make_event([et])) self.controlsd.state_transition(self.CS) - self.assertEqual(self.controlsd.state, state) + assert self.controlsd.state == state self.controlsd.events.clear() - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/locationd/test/test_calibrationd.py b/selfdrive/locationd/test/test_calibrationd.py index e2db0943978e25..598d5d2d5f882e 100755 --- a/selfdrive/locationd/test/test_calibrationd.py +++ b/selfdrive/locationd/test/test_calibrationd.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 import random -import unittest import numpy as np @@ -31,7 +30,7 @@ def process_messages(c, cam_odo_calib, cycles, [0.0, 0.0, HEIGHT_INIT.item()], [cam_odo_height_std, cam_odo_height_std, cam_odo_height_std]) -class TestCalibrationd(unittest.TestCase): +class TestCalibrationd: def test_read_saved_params(self): msg = messaging.new_message('liveCalibration') @@ -43,13 +42,13 @@ def test_read_saved_params(self): np.testing.assert_allclose(msg.liveCalibration.rpyCalib, c.rpy) np.testing.assert_allclose(msg.liveCalibration.height, c.height) - self.assertEqual(msg.liveCalibration.validBlocks, c.valid_blocks) + assert msg.liveCalibration.validBlocks == c.valid_blocks def test_calibration_basics(self): c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED) - self.assertEqual(c.valid_blocks, INPUTS_WANTED) + assert c.valid_blocks == INPUTS_WANTED np.testing.assert_allclose(c.rpy, np.zeros(3)) np.testing.assert_allclose(c.height, HEIGHT_INIT) c.reset() @@ -59,7 +58,7 @@ def test_calibration_low_speed_reject(self): c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed=MIN_SPEED_FILTER - 1) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, carstate_speed=MIN_SPEED_FILTER - 1) - self.assertEqual(c.valid_blocks, 0) + assert c.valid_blocks == 0 np.testing.assert_allclose(c.rpy, np.zeros(3)) np.testing.assert_allclose(c.height, HEIGHT_INIT) @@ -67,7 +66,7 @@ def test_calibration_low_speed_reject(self): def test_calibration_yaw_rate_reject(self): c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_yr=MAX_YAW_RATE_FILTER) - self.assertEqual(c.valid_blocks, 0) + assert c.valid_blocks == 0 np.testing.assert_allclose(c.rpy, np.zeros(3)) np.testing.assert_allclose(c.height, HEIGHT_INIT) @@ -75,43 +74,40 @@ def test_calibration_yaw_rate_reject(self): def test_calibration_speed_std_reject(self): c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed_std=1e3) - self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + assert c.valid_blocks == INPUTS_NEEDED np.testing.assert_allclose(c.rpy, np.zeros(3)) def test_calibration_speed_std_height_reject(self): c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_height_std=1e3) - self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + assert c.valid_blocks == INPUTS_NEEDED np.testing.assert_allclose(c.rpy, np.zeros(3)) def test_calibration_auto_reset(self): c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED) - self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + assert c.valid_blocks == INPUTS_NEEDED np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0], atol=1e-3) process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*0.9, MAX_ALLOWED_YAW_SPREAD*0.9], BLOCK_SIZE + 10) - self.assertEqual(c.valid_blocks, INPUTS_NEEDED + 1) - self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.calibrated) + assert c.valid_blocks == INPUTS_NEEDED + 1 + assert c.cal_status == log.LiveCalibrationData.Status.calibrated c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED) - self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + assert c.valid_blocks == INPUTS_NEEDED np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0]) process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], BLOCK_SIZE + 10) - self.assertEqual(c.valid_blocks, 1) - self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating) + assert c.valid_blocks == 1 + assert c.cal_status == log.LiveCalibrationData.Status.recalibrating np.testing.assert_allclose(c.rpy, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], atol=1e-2) c = Calibrator(param_put=False) process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED) - self.assertEqual(c.valid_blocks, INPUTS_NEEDED) + assert c.valid_blocks == INPUTS_NEEDED np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0]) process_messages(c, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], BLOCK_SIZE + 10) - self.assertEqual(c.valid_blocks, 1) - self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating) + assert c.valid_blocks == 1 + assert c.cal_status == log.LiveCalibrationData.Status.recalibrating np.testing.assert_allclose(c.rpy, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], atol=1e-2) - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/locationd/test/test_locationd.py b/selfdrive/locationd/test/test_locationd.py index cd032dbaf058fb..bac824badaa9e2 100755 --- a/selfdrive/locationd/test/test_locationd.py +++ b/selfdrive/locationd/test/test_locationd.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 +import pytest import json import random -import unittest import time import capnp @@ -13,13 +13,11 @@ from openpilot.selfdrive.manager.process_config import managed_processes -class TestLocationdProc(unittest.TestCase): +class TestLocationdProc: LLD_MSGS = ['gpsLocationExternal', 'cameraOdometry', 'carState', 'liveCalibration', 'accelerometer', 'gyroscope', 'magnetometer'] - def setUp(self): - random.seed(123489234) - + def setup_method(self): self.pm = messaging.PubMaster(self.LLD_MSGS) self.params = Params() @@ -27,7 +25,7 @@ def setUp(self): managed_processes['locationd'].prepare() managed_processes['locationd'].start() - def tearDown(self): + def teardown_method(self): managed_processes['locationd'].stop() def get_msg(self, name, t): @@ -65,6 +63,7 @@ def get_msg(self, name, t): return msg def test_params_gps(self): + random.seed(123489234) self.params.remove('LastGPSPosition') self.x = -2710700 + (random.random() * 1e5) @@ -86,10 +85,6 @@ def test_params_gps(self): time.sleep(1) # wait for async params write lastGPS = json.loads(self.params.get('LastGPSPosition')) - self.assertAlmostEqual(lastGPS['latitude'], self.lat, places=3) - self.assertAlmostEqual(lastGPS['longitude'], self.lon, places=3) - self.assertAlmostEqual(lastGPS['altitude'], self.alt, places=3) - - -if __name__ == "__main__": - unittest.main() + assert lastGPS['latitude'] == pytest.approx(self.lat, abs=0.001) + assert lastGPS['longitude'] == pytest.approx(self.lon, abs=0.001) + assert lastGPS['altitude'] == pytest.approx(self.alt, abs=0.001) diff --git a/selfdrive/locationd/test/test_locationd_scenarios.py b/selfdrive/locationd/test/test_locationd_scenarios.py index 3fdd47275f2864..be95c6fffb78e4 100755 --- a/selfdrive/locationd/test/test_locationd_scenarios.py +++ b/selfdrive/locationd/test/test_locationd_scenarios.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 import pytest -import unittest import numpy as np from collections import defaultdict from enum import Enum @@ -99,7 +98,7 @@ def run_scenarios(scenario, logs): @pytest.mark.xdist_group("test_locationd_scenarios") @pytest.mark.shared_download_cache -class TestLocationdScenarios(unittest.TestCase): +class TestLocationdScenarios: """ Test locationd with different scenarios. In all these scenarios, we expect the following: - locationd kalman filter should never go unstable (we care mostly about yaw_rate, roll, gpsOK, inputsOK, sensorsOK) @@ -107,7 +106,7 @@ class TestLocationdScenarios(unittest.TestCase): """ @classmethod - def setUpClass(cls): + def setup_class(cls): cls.logs = migrate_all(LogReader(TEST_ROUTE)) def test_base(self): @@ -118,8 +117,8 @@ def test_base(self): - roll: unchanged """ orig_data, replayed_data = run_scenarios(Scenario.BASE, self.logs) - self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) - self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) + assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)) + assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)) def test_gps_off(self): """ @@ -130,9 +129,9 @@ def test_gps_off(self): - gpsOK: False """ orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF, self.logs) - self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) - self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) - self.assertTrue(np.all(replayed_data['gps_flag'] == 0.0)) + assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)) + assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)) + assert np.all(replayed_data['gps_flag'] == 0.0) def test_gps_off_midway(self): """ @@ -143,9 +142,9 @@ def test_gps_off_midway(self): - gpsOK: True for the first half, False for the second half """ orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF_MIDWAY, self.logs) - self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) - self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) - self.assertTrue(np.diff(replayed_data['gps_flag'])[512] == -1.0) + assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)) + assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)) + assert np.diff(replayed_data['gps_flag'])[512] == -1.0 def test_gps_on_midway(self): """ @@ -156,9 +155,9 @@ def test_gps_on_midway(self): - gpsOK: False for the first half, True for the second half """ orig_data, replayed_data = run_scenarios(Scenario.GPS_ON_MIDWAY, self.logs) - self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) - self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(1.5))) - self.assertTrue(np.diff(replayed_data['gps_flag'])[505] == 1.0) + assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)) + assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(1.5)) + assert np.diff(replayed_data['gps_flag'])[505] == 1.0 def test_gps_tunnel(self): """ @@ -169,10 +168,10 @@ def test_gps_tunnel(self): - gpsOK: False for the middle section, True for the rest """ orig_data, replayed_data = run_scenarios(Scenario.GPS_TUNNEL, self.logs) - self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) - self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) - self.assertTrue(np.diff(replayed_data['gps_flag'])[213] == -1.0) - self.assertTrue(np.diff(replayed_data['gps_flag'])[805] == 1.0) + assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)) + assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)) + assert np.diff(replayed_data['gps_flag'])[213] == -1.0 + assert np.diff(replayed_data['gps_flag'])[805] == 1.0 def test_gyro_off(self): """ @@ -183,9 +182,9 @@ def test_gyro_off(self): - sensorsOK: False """ _, replayed_data = run_scenarios(Scenario.GYRO_OFF, self.logs) - self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0)) - self.assertTrue(np.allclose(replayed_data['roll'], 0.0)) - self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0)) + assert np.allclose(replayed_data['yaw_rate'], 0.0) + assert np.allclose(replayed_data['roll'], 0.0) + assert np.all(replayed_data['sensors_flag'] == 0.0) def test_gyro_spikes(self): """ @@ -196,10 +195,10 @@ def test_gyro_spikes(self): - inputsOK: False for some time after the spike, True for the rest """ orig_data, replayed_data = run_scenarios(Scenario.GYRO_SPIKE_MIDWAY, self.logs) - self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) - self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) - self.assertTrue(np.diff(replayed_data['inputs_flag'])[500] == -1.0) - self.assertTrue(np.diff(replayed_data['inputs_flag'])[694] == 1.0) + assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)) + assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)) + assert np.diff(replayed_data['inputs_flag'])[500] == -1.0 + assert np.diff(replayed_data['inputs_flag'])[694] == 1.0 def test_accel_off(self): """ @@ -210,9 +209,9 @@ def test_accel_off(self): - sensorsOK: False """ _, replayed_data = run_scenarios(Scenario.ACCEL_OFF, self.logs) - self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0)) - self.assertTrue(np.allclose(replayed_data['roll'], 0.0)) - self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0)) + assert np.allclose(replayed_data['yaw_rate'], 0.0) + assert np.allclose(replayed_data['roll'], 0.0) + assert np.all(replayed_data['sensors_flag'] == 0.0) def test_accel_spikes(self): """ @@ -221,9 +220,5 @@ def test_accel_spikes(self): Expected Result: Right now, the kalman filter is not robust to small spikes like it is to gyroscope spikes. """ orig_data, replayed_data = run_scenarios(Scenario.ACCEL_SPIKE_MIDWAY, self.logs) - self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))) - self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))) - - -if __name__ == "__main__": - unittest.main() + assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)) + assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)) diff --git a/selfdrive/manager/test/test_manager.py b/selfdrive/manager/test/test_manager.py index 1ae94b26a146f1..4cdc99c240b885 100755 --- a/selfdrive/manager/test/test_manager.py +++ b/selfdrive/manager/test/test_manager.py @@ -3,7 +3,6 @@ import pytest import signal import time -import unittest from parameterized import parameterized @@ -21,15 +20,15 @@ @pytest.mark.tici -class TestManager(unittest.TestCase): - def setUp(self): +class TestManager: + def setup_method(self): HARDWARE.set_power_save(False) # ensure clean CarParams params = Params() params.clear_all() - def tearDown(self): + def teardown_method(self): manager.manager_cleanup() def test_manager_prepare(self): @@ -38,7 +37,7 @@ def test_manager_prepare(self): def test_blacklisted_procs(self): # TODO: ensure there are blacklisted procs until we have a dedicated test - self.assertTrue(len(BLACKLIST_PROCS), "No blacklisted procs to test not_run") + assert len(BLACKLIST_PROCS), "No blacklisted procs to test not_run" @parameterized.expand([(i,) for i in range(10)]) def test_startup_time(self, index): @@ -48,8 +47,8 @@ def test_startup_time(self, index): t = time.monotonic() - start assert t < MAX_STARTUP_TIME, f"startup took {t}s, expected <{MAX_STARTUP_TIME}s" - @unittest.skip("this test is flaky the way it's currently written, should be moved to test_onroad") - def test_clean_exit(self): + @pytest.mark.skip("this test is flaky the way it's currently written, should be moved to test_onroad") + def test_clean_exit(self, subtests): """ Ensure all processes exit cleanly when stopped. """ @@ -62,21 +61,17 @@ def test_clean_exit(self): time.sleep(10) for p in procs: - with self.subTest(proc=p.name): + with subtests.test(proc=p.name): state = p.get_process_state_msg() - self.assertTrue(state.running, f"{p.name} not running") + assert state.running, f"{p.name} not running" exit_code = p.stop(retry=False) - self.assertNotIn(p.name, BLACKLIST_PROCS, f"{p.name} was started") + assert p.name not in BLACKLIST_PROCS, f"{p.name} was started" - self.assertTrue(exit_code is not None, f"{p.name} failed to exit") + assert exit_code is not None, f"{p.name} failed to exit" # TODO: interrupted blocking read exits with 1 in cereal. use a more unique return code exit_codes = [0, 1] if p.sigkill: exit_codes = [-signal.SIGKILL] - self.assertIn(exit_code, exit_codes, f"{p.name} died with {exit_code}") - - -if __name__ == "__main__": - unittest.main() + assert exit_code in exit_codes, f"{p.name} died with {exit_code}" diff --git a/selfdrive/modeld/models/dmonitoring_model.current b/selfdrive/modeld/models/dmonitoring_model.current index ed495df3a85e57..f935ab06b34e27 100644 --- a/selfdrive/modeld/models/dmonitoring_model.current +++ b/selfdrive/modeld/models/dmonitoring_model.current @@ -1,2 +1,2 @@ -60abed33-9f25-4e34-9937-aaf918d41dfc -50887963963a3022e85ac0c94b0801aef955a608 \ No newline at end of file +5ec97a39-0095-4cea-adfa-6d72b1966cc1 +26cac7a9757a27c783a365403040a1bd27ccdaea \ No newline at end of file diff --git a/selfdrive/modeld/models/dmonitoring_model.onnx b/selfdrive/modeld/models/dmonitoring_model.onnx index 3e94e7d36a5668..dd3a6aba2e756e 100644 --- a/selfdrive/modeld/models/dmonitoring_model.onnx +++ b/selfdrive/modeld/models/dmonitoring_model.onnx @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9a667cbde6eca86c5b653f57853e3d33b9a875bceb557e193d1bef78c2df9c37 -size 16132779 +oid sha256:3dd3982940d823c4fbb0429b733a0b78b0688d7d67aa76ff7b754a3e2f3d8683 +size 16132780 diff --git a/selfdrive/modeld/models/dmonitoring_model_q.dlc b/selfdrive/modeld/models/dmonitoring_model_q.dlc index 041949ad9476df..fc285954d4aa61 100644 --- a/selfdrive/modeld/models/dmonitoring_model_q.dlc +++ b/selfdrive/modeld/models/dmonitoring_model_q.dlc @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f5729c457dd47f6c35e2a818eae14113526fd0bfac6417a809cbaf9d38697d9b -size 4488413 +oid sha256:7c26f13816b143f5bb29ac2980f8557bd5687a75729e4d895313fb9a5a1f0f46 +size 4488449 diff --git a/selfdrive/modeld/tests/test_modeld.py b/selfdrive/modeld/tests/test_modeld.py index 67c6f7103804c4..a18ce8fa424ea9 100755 --- a/selfdrive/modeld/tests/test_modeld.py +++ b/selfdrive/modeld/tests/test_modeld.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -import unittest import numpy as np import random @@ -16,9 +15,9 @@ IMG_BYTES = IMG.flatten().tobytes() -class TestModeld(unittest.TestCase): +class TestModeld: - def setUp(self): + def setup_method(self): self.vipc_server = VisionIpcServer("camerad") self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 40, False, CAM.width, CAM.height) self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 40, False, CAM.width, CAM.height) @@ -32,7 +31,7 @@ def setUp(self): managed_processes['modeld'].start() self.pm.wait_for_readers_to_update("roadCameraState", 10) - def tearDown(self): + def teardown_method(self): managed_processes['modeld'].stop() del self.vipc_server @@ -65,15 +64,15 @@ def test_modeld(self): self._wait() mdl = self.sm['modelV2'] - self.assertEqual(mdl.frameId, n) - self.assertEqual(mdl.frameIdExtra, n) - self.assertEqual(mdl.timestampEof, cs.timestampEof) - self.assertEqual(mdl.frameAge, 0) - self.assertEqual(mdl.frameDropPerc, 0) + assert mdl.frameId == n + assert mdl.frameIdExtra == n + assert mdl.timestampEof == cs.timestampEof + assert mdl.frameAge == 0 + assert mdl.frameDropPerc == 0 odo = self.sm['cameraOdometry'] - self.assertEqual(odo.frameId, n) - self.assertEqual(odo.timestampEof, cs.timestampEof) + assert odo.frameId == n + assert odo.timestampEof == cs.timestampEof def test_dropped_frames(self): """ @@ -95,13 +94,9 @@ def test_dropped_frames(self): mdl = self.sm['modelV2'] odo = self.sm['cameraOdometry'] - self.assertEqual(mdl.frameId, frame_id) - self.assertEqual(mdl.frameIdExtra, frame_id) - self.assertEqual(odo.frameId, frame_id) + assert mdl.frameId == frame_id + assert mdl.frameIdExtra == frame_id + assert odo.frameId == frame_id if n != frame_id: - self.assertFalse(self.sm.updated['modelV2']) - self.assertFalse(self.sm.updated['cameraOdometry']) - - -if __name__ == "__main__": - unittest.main() + assert not self.sm.updated['modelV2'] + assert not self.sm.updated['cameraOdometry'] diff --git a/selfdrive/monitoring/dmonitoringd.py b/selfdrive/monitoring/dmonitoringd.py index c18bbf29b991f3..91a4932584ef4e 100755 --- a/selfdrive/monitoring/dmonitoringd.py +++ b/selfdrive/monitoring/dmonitoringd.py @@ -19,7 +19,6 @@ def dmonitoringd_thread(): driver_status = DriverStatus(rhd_saved=params.get_bool("IsRhdDetected"), always_on=params.get_bool("AlwaysOnDM")) - v_cruise_last = 0 driver_engaged = False # 20Hz <- dmonitoringmodeld @@ -30,12 +29,8 @@ def dmonitoringd_thread(): # Get interaction if sm.updated['carState']: - v_cruise = sm['carState'].cruiseState.speed - driver_engaged = len(sm['carState'].buttonEvents) > 0 or \ - v_cruise != v_cruise_last or \ - sm['carState'].steeringPressed or \ - sm['carState'].gasPressed - v_cruise_last = v_cruise + driver_engaged = sm['carState'].steeringPressed or \ + sm['carState'].gasPressed if sm.updated['modelV2']: driver_status.set_policy(sm['modelV2'], sm['carState'].vEgo) diff --git a/selfdrive/monitoring/driver_monitor.py b/selfdrive/monitoring/driver_monitor.py index 7db0bd6c9f7d53..9faa5f4d05d1b9 100644 --- a/selfdrive/monitoring/driver_monitor.py +++ b/selfdrive/monitoring/driver_monitor.py @@ -31,8 +31,8 @@ def __init__(self): self._SG_THRESHOLD = 0.9 self._BLINK_THRESHOLD = 0.865 - self._EE_THRESH11 = 0.241 - self._EE_THRESH12 = 4.7 + self._EE_THRESH11 = 0.25 + self._EE_THRESH12 = 7.5 self._EE_MAX_OFFSET1 = 0.06 self._EE_MIN_OFFSET1 = 0.025 self._EE_THRESH21 = 0.01 diff --git a/selfdrive/monitoring/test_monitoring.py b/selfdrive/monitoring/test_monitoring.py index 50b2746e2dd66f..9395960b659a28 100755 --- a/selfdrive/monitoring/test_monitoring.py +++ b/selfdrive/monitoring/test_monitoring.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -import unittest import numpy as np from cereal import car, log @@ -53,7 +52,7 @@ def make_msg(face_detected, distracted=False, model_uncertain=False): always_false = [False] * int(TEST_TIMESPAN / DT_DMON) # TODO: this only tests DriverStatus -class TestMonitoring(unittest.TestCase): +class TestMonitoring: def _run_seq(self, msgs, interaction, engaged, standstill): DS = DriverStatus() events = [] @@ -69,7 +68,7 @@ def _run_seq(self, msgs, interaction, engaged, standstill): return events, DS def _assert_no_events(self, events): - self.assertTrue(all(not len(e) for e in events)) + assert all(not len(e) for e in events) # engaged, driver is attentive all the time def test_fully_aware_driver(self): @@ -79,27 +78,27 @@ def test_fully_aware_driver(self): # engaged, driver is distracted and does nothing def test_fully_distracted_driver(self): events, d_status = self._run_seq(always_distracted, always_false, always_true, always_false) - self.assertEqual(len(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]), 0) - self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL + - ((d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], - EventName.preDriverDistracted) - self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL + - ((d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], EventName.promptDriverDistracted) - self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME + - ((TEST_TIMESPAN-10-d_status.settings._DISTRACTED_TIME)/2))/DT_DMON)].names[0], EventName.driverDistracted) - self.assertIs(type(d_status.awareness), float) + assert len(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]) == 0 + assert events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL + \ + ((d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == \ + EventName.preDriverDistracted + assert events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL + \ + ((d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == EventName.promptDriverDistracted + assert events[int((d_status.settings._DISTRACTED_TIME + \ + ((TEST_TIMESPAN-10-d_status.settings._DISTRACTED_TIME)/2))/DT_DMON)].names[0] == EventName.driverDistracted + assert isinstance(d_status.awareness, float) # engaged, no face detected the whole time, no action def test_fully_invisible_driver(self): events, d_status = self._run_seq(always_no_face, always_false, always_true, always_false) - self.assertTrue(len(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]) == 0) - self.assertEqual(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL + - ((d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], - EventName.preDriverUnresponsive) - self.assertEqual(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL + - ((d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], EventName.promptDriverUnresponsive) - self.assertEqual(events[int((d_status.settings._AWARENESS_TIME + - ((TEST_TIMESPAN-10-d_status.settings._AWARENESS_TIME)/2))/DT_DMON)].names[0], EventName.driverUnresponsive) + assert len(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]) == 0 + assert events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL + \ + ((d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == \ + EventName.preDriverUnresponsive + assert events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL + \ + ((d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == EventName.promptDriverUnresponsive + assert events[int((d_status.settings._AWARENESS_TIME + \ + ((TEST_TIMESPAN-10-d_status.settings._AWARENESS_TIME)/2))/DT_DMON)].names[0] == EventName.driverUnresponsive # engaged, down to orange, driver pays attention, back to normal; then down to orange, driver touches wheel # - should have short orange recovery time and no green afterwards; wheel touch only recovers when paying attention @@ -111,12 +110,12 @@ def test_normal_driver(self): interaction_vector = [car_interaction_NOT_DETECTED] * int(DISTRACTED_SECONDS_TO_ORANGE*3/DT_DMON) + \ [car_interaction_DETECTED] * (int(TEST_TIMESPAN/DT_DMON)-int(DISTRACTED_SECONDS_TO_ORANGE*3/DT_DMON)) events, _ = self._run_seq(ds_vector, interaction_vector, always_true, always_false) - self.assertEqual(len(events[int(DISTRACTED_SECONDS_TO_ORANGE*0.5/DT_DMON)]), 0) - self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted) - self.assertEqual(len(events[int(DISTRACTED_SECONDS_TO_ORANGE*1.5/DT_DMON)]), 0) - self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE*3-0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted) - self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted) - self.assertEqual(len(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+2.5)/DT_DMON)]), 0) + assert len(events[int(DISTRACTED_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0 + assert events[int((DISTRACTED_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0] == EventName.promptDriverDistracted + assert len(events[int(DISTRACTED_SECONDS_TO_ORANGE*1.5/DT_DMON)]) == 0 + assert events[int((DISTRACTED_SECONDS_TO_ORANGE*3-0.1)/DT_DMON)].names[0] == EventName.promptDriverDistracted + assert events[int((DISTRACTED_SECONDS_TO_ORANGE*3+0.1)/DT_DMON)].names[0] == EventName.promptDriverDistracted + assert len(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+2.5)/DT_DMON)]) == 0 # engaged, down to orange, driver dodges camera, then comes back still distracted, down to red, \ # driver dodges, and then touches wheel to no avail, disengages and reengages @@ -135,10 +134,10 @@ def test_biggest_comma_fan(self): op_vector[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+2.5)/DT_DMON):int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3)/DT_DMON)] \ = [False] * int(0.5/DT_DMON) events, _ = self._run_seq(ds_vector, interaction_vector, op_vector, always_false) - self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE+0.5*_invisible_time)/DT_DMON)].names[0], EventName.promptDriverDistracted) - self.assertEqual(events[int((DISTRACTED_SECONDS_TO_RED+1.5*_invisible_time)/DT_DMON)].names[0], EventName.driverDistracted) - self.assertEqual(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+1.5)/DT_DMON)].names[0], EventName.driverDistracted) - self.assertTrue(len(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3.5)/DT_DMON)]) == 0) + assert events[int((DISTRACTED_SECONDS_TO_ORANGE+0.5*_invisible_time)/DT_DMON)].names[0] == EventName.promptDriverDistracted + assert events[int((DISTRACTED_SECONDS_TO_RED+1.5*_invisible_time)/DT_DMON)].names[0] == EventName.driverDistracted + assert events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+1.5)/DT_DMON)].names[0] == EventName.driverDistracted + assert len(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3.5)/DT_DMON)]) == 0 # engaged, invisible driver, down to orange, driver touches wheel; then down to orange again, driver appears # - both actions should clear the alert, but momentary appearance should not @@ -150,15 +149,15 @@ def test_sometimes_transparent_commuter(self): [msg_ATTENTIVE] * int(_visible_time/DT_DMON) interaction_vector[int((INVISIBLE_SECONDS_TO_ORANGE)/DT_DMON):int((INVISIBLE_SECONDS_TO_ORANGE+1)/DT_DMON)] = [True] * int(1/DT_DMON) events, _ = self._run_seq(ds_vector, interaction_vector, 2*always_true, 2*always_false) - self.assertTrue(len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0) - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive) - self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_ORANGE+0.1)/DT_DMON)]) == 0) + assert len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0 + assert events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive + assert len(events[int((INVISIBLE_SECONDS_TO_ORANGE+0.1)/DT_DMON)]) == 0 if _visible_time == 0.5: - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive) - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)].names[0], EventName.preDriverUnresponsive) + assert events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive + assert events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)].names[0] == EventName.preDriverUnresponsive elif _visible_time == 10: - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive) - self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)]) == 0) + assert events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive + assert len(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)]) == 0 # engaged, invisible driver, down to red, driver appears and then touches wheel, then disengages/reengages # - only disengage will clear the alert @@ -171,18 +170,18 @@ def test_last_second_responder(self): interaction_vector[int((INVISIBLE_SECONDS_TO_RED+_visible_time)/DT_DMON):int((INVISIBLE_SECONDS_TO_RED+_visible_time+1)/DT_DMON)] = [True] * int(1/DT_DMON) op_vector[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1)/DT_DMON):int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)] = [False] * int(0.5/DT_DMON) events, _ = self._run_seq(ds_vector, interaction_vector, op_vector, always_false) - self.assertTrue(len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0) - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive) - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED-0.1)/DT_DMON)].names[0], EventName.driverUnresponsive) - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED+0.5*_visible_time)/DT_DMON)].names[0], EventName.driverUnresponsive) - self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)].names[0], EventName.driverUnresponsive) - self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1+0.1)/DT_DMON)]) == 0) + assert len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0 + assert events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive + assert events[int((INVISIBLE_SECONDS_TO_RED-0.1)/DT_DMON)].names[0] == EventName.driverUnresponsive + assert events[int((INVISIBLE_SECONDS_TO_RED+0.5*_visible_time)/DT_DMON)].names[0] == EventName.driverUnresponsive + assert events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)].names[0] == EventName.driverUnresponsive + assert len(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1+0.1)/DT_DMON)]) == 0 # disengaged, always distracted driver # - dm should stay quiet when not engaged def test_pure_dashcam_user(self): events, _ = self._run_seq(always_distracted, always_false, always_false, always_false) - self.assertTrue(sum(len(event) for event in events) == 0) + assert sum(len(event) for event in events) == 0 # engaged, car stops at traffic light, down to orange, no action, then car starts moving # - should only reach green when stopped, but continues counting down on launch @@ -191,10 +190,10 @@ def test_long_traffic_light_victim(self): standstill_vector = always_true[:] standstill_vector[int(_redlight_time/DT_DMON):] = [False] * int((TEST_TIMESPAN-_redlight_time)/DT_DMON) events, d_status = self._run_seq(always_distracted, always_false, always_true, standstill_vector) - self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL+1)/DT_DMON)].names[0], - EventName.preDriverDistracted) - self.assertEqual(events[int((_redlight_time-0.1)/DT_DMON)].names[0], EventName.preDriverDistracted) - self.assertEqual(events[int((_redlight_time+0.5)/DT_DMON)].names[0], EventName.promptDriverDistracted) + assert events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL+1)/DT_DMON)].names[0] == \ + EventName.preDriverDistracted + assert events[int((_redlight_time-0.1)/DT_DMON)].names[0] == EventName.preDriverDistracted + assert events[int((_redlight_time+0.5)/DT_DMON)].names[0] == EventName.promptDriverDistracted # engaged, model is somehow uncertain and driver is distracted # - should fall back to wheel touch after uncertain alert @@ -202,13 +201,10 @@ def test_somehow_indecisive_model(self): ds_vector = [msg_DISTRACTED_BUT_SOMEHOW_UNCERTAIN] * int(TEST_TIMESPAN/DT_DMON) interaction_vector = always_false[:] events, d_status = self._run_seq(ds_vector, interaction_vector, always_true, always_false) - self.assertTrue(EventName.preDriverUnresponsive in - events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME-0.1)/DT_DMON)].names) - self.assertTrue(EventName.promptDriverUnresponsive in - events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names) - self.assertTrue(EventName.driverUnresponsive in - events[int((INVISIBLE_SECONDS_TO_RED-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names) + assert EventName.preDriverUnresponsive in \ + events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME-0.1)/DT_DMON)].names + assert EventName.promptDriverUnresponsive in \ + events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names + assert EventName.driverUnresponsive in \ + events[int((INVISIBLE_SECONDS_TO_RED-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/navd/tests/test_map_renderer.py b/selfdrive/navd/tests/test_map_renderer.py index 832e0d1eab454a..52b594a57e8f77 100755 --- a/selfdrive/navd/tests/test_map_renderer.py +++ b/selfdrive/navd/tests/test_map_renderer.py @@ -3,7 +3,6 @@ import numpy as np import os import pytest -import unittest import requests import threading import http.server @@ -66,11 +65,11 @@ def enable_internet(self): @pytest.mark.skip(reason="not used") -class TestMapRenderer(unittest.TestCase): +class TestMapRenderer: server: MapBoxInternetDisabledServer @classmethod - def setUpClass(cls): + def setup_class(cls): assert "MAPBOX_TOKEN" in os.environ cls.original_token = os.environ["MAPBOX_TOKEN"] cls.server = MapBoxInternetDisabledServer() @@ -78,10 +77,10 @@ def setUpClass(cls): time.sleep(0.5) # wait for server to startup @classmethod - def tearDownClass(cls) -> None: + def teardown_class(cls) -> None: cls.server.stop() - def setUp(self): + def setup_method(self): self.server.enable_internet() os.environ['MAPS_HOST'] = f'http://localhost:{self.server.port}' @@ -203,15 +202,12 @@ def test_render_time_distribution(self): def assert_stat(stat, nominal, tol=0.3): tol = (nominal / (1+tol)), (nominal * (1+tol)) - self.assertTrue(tol[0] < stat < tol[1], f"{stat} not in tolerance {tol}") + assert tol[0] < stat < tol[1], f"{stat} not in tolerance {tol}" assert_stat(_mean, 0.030) assert_stat(_median, 0.027) assert_stat(_stddev, 0.0078) - self.assertLess(_max, 0.065) - self.assertGreater(_min, 0.015) + assert _max < 0.065 + assert _min > 0.015 - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/navd/tests/test_navd.py b/selfdrive/navd/tests/test_navd.py index 61be6cc387f75d..07f93036535a9d 100755 --- a/selfdrive/navd/tests/test_navd.py +++ b/selfdrive/navd/tests/test_navd.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import json import random -import unittest import numpy as np from parameterized import parameterized @@ -11,12 +10,12 @@ from openpilot.selfdrive.manager.process_config import managed_processes -class TestNavd(unittest.TestCase): - def setUp(self): +class TestNavd: + def setup_method(self): self.params = Params() self.sm = messaging.SubMaster(['navRoute', 'navInstruction']) - def tearDown(self): + def teardown_method(self): managed_processes['navd'].stop() def _check_route(self, start, end, check_coords=True): @@ -57,7 +56,3 @@ def test_random(self, index): start = {"latitude": random.uniform(-90, 90), "longitude": random.uniform(-180, 180)} end = {"latitude": random.uniform(-90, 90), "longitude": random.uniform(-180, 180)} self._check_route(start, end, check_coords=False) - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/test/helpers.py b/selfdrive/test/helpers.py index ce8918aec32d31..148d142bb6ceea 100644 --- a/selfdrive/test/helpers.py +++ b/selfdrive/test/helpers.py @@ -3,6 +3,7 @@ import os import threading import time +import pytest from functools import wraps @@ -32,15 +33,15 @@ def phone_only(f): @wraps(f) def wrap(self, *args, **kwargs): if PC: - self.skipTest("This test is not meant to run on PC") - f(self, *args, **kwargs) + pytest.skip("This test is not meant to run on PC") + return f(self, *args, **kwargs) return wrap def release_only(f): @wraps(f) def wrap(self, *args, **kwargs): if "RELEASE" not in os.environ: - self.skipTest("This test is only for release branches") + pytest.skip("This test is only for release branches") f(self, *args, **kwargs) return wrap diff --git a/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py b/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py index 713b7801f8ad41..0ad6d6d4fd4420 100755 --- a/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py +++ b/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 import itertools -import unittest from parameterized import parameterized_class from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import STOP_DISTANCE @@ -144,17 +143,13 @@ def create_maneuvers(kwargs): @parameterized_class(("e2e", "force_decel"), itertools.product([True, False], repeat=2)) -class LongitudinalControl(unittest.TestCase): +class TestLongitudinalControl: e2e: bool force_decel: bool - def test_maneuver(self): + def test_maneuver(self, subtests): for maneuver in create_maneuvers({"e2e": self.e2e, "force_decel": self.force_decel}): - with self.subTest(title=maneuver.title, e2e=maneuver.e2e, force_decel=maneuver.force_decel): + with subtests.test(title=maneuver.title, e2e=maneuver.e2e, force_decel=maneuver.force_decel): print(maneuver.title, f'in {"e2e" if maneuver.e2e else "acc"} mode') valid, _ = maneuver.evaluate() - self.assertTrue(valid) - - -if __name__ == "__main__": - unittest.main(failfast=True) + assert valid diff --git a/selfdrive/test/process_replay/model_replay_ref_commit b/selfdrive/test/process_replay/model_replay_ref_commit index 47571656e56777..87f1adfadad766 100644 --- a/selfdrive/test/process_replay/model_replay_ref_commit +++ b/selfdrive/test/process_replay/model_replay_ref_commit @@ -1 +1 @@ -69a3b169ebc478285dad5eea87658ed2cb8fee13 +73eb11111fb6407fa307c3f2bdd3331f2514c707 diff --git a/selfdrive/test/process_replay/ref_commit b/selfdrive/test/process_replay/ref_commit index a61fd8e23bccbd..3fc6acc3cb1cbb 100644 --- a/selfdrive/test/process_replay/ref_commit +++ b/selfdrive/test/process_replay/ref_commit @@ -1 +1 @@ -e5a385503e4307ae77c73736a602380b08e61334 +f1ecdf9048fb12e289baf4933cb3ef12e486252c diff --git a/selfdrive/test/process_replay/test_fuzzy.py b/selfdrive/test/process_replay/test_fuzzy.py index 6c81119fbf11c0..d295092b207d5d 100755 --- a/selfdrive/test/process_replay/test_fuzzy.py +++ b/selfdrive/test/process_replay/test_fuzzy.py @@ -3,7 +3,6 @@ from hypothesis import given, HealthCheck, Phase, settings import hypothesis.strategies as st from parameterized import parameterized -import unittest from cereal import log from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA @@ -17,7 +16,7 @@ TEST_CASES = [(cfg.proc_name, copy.deepcopy(cfg)) for cfg in pr.CONFIGS if cfg.proc_name not in NOT_TESTED] -class TestFuzzProcesses(unittest.TestCase): +class TestFuzzProcesses: # TODO: make this faster and increase examples @parameterized.expand(TEST_CASES) @@ -28,6 +27,3 @@ def test_fuzz_process(self, proc_name, cfg, data): lr = [log.Event.new_message(**m).as_reader() for m in msgs] cfg.timeout = 5 pr.replay_process(cfg, lr, fingerprint=TOYOTA.TOYOTA_COROLLA_TSS2, disable_progress=True) - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/test/process_replay/test_processes.py b/selfdrive/test/process_replay/test_processes.py index 5fa80f0e095f33..7a482c1bdc8566 100755 --- a/selfdrive/test/process_replay/test_processes.py +++ b/selfdrive/test/process_replay/test_processes.py @@ -111,6 +111,12 @@ def test_process(cfg, lr, segment, ref_log_path, new_log_path, ignore_fields=Non if segment not in ("regen6CA24BC3035|2023-10-30--23-14-28--0", "regen7D2D3F82D5B|2023-10-30--23-15-55--0"): return f"Route did not enable at all or for long enough: {new_log_path}", log_msgs + if cfg.proc_name != 'ubloxd' or segment != 'regen6CA24BC3035|2023-10-30--23-14-28--0': + seen_msgs = {m.which() for m in log_msgs} + expected_msgs = set(cfg.subs) + if seen_msgs != expected_msgs: + return f"Expected messages: {expected_msgs}, but got: {seen_msgs}", log_msgs + try: return compare_logs(ref_log_msgs, log_msgs, ignore_fields + cfg.ignore, ignore_msgs, cfg.tolerance), log_msgs except Exception as e: diff --git a/selfdrive/test/process_replay/test_regen.py b/selfdrive/test/process_replay/test_regen.py index d98963549727e7..c27d9e8f7b4c52 100755 --- a/selfdrive/test/process_replay/test_regen.py +++ b/selfdrive/test/process_replay/test_regen.py @@ -1,7 +1,5 @@ #!/usr/bin/env python3 -import unittest - from parameterized import parameterized from openpilot.selfdrive.test.process_replay.regen import regen_segment, DummyFrameReader @@ -30,7 +28,7 @@ def ci_setup_data_readers(route, sidx): return lr, frs -class TestRegen(unittest.TestCase): +class TestRegen: @parameterized.expand(TESTED_SEGMENTS) def test_engaged(self, case_name, segment): route, sidx = segment.rsplit("--", 1) @@ -38,8 +36,4 @@ def test_engaged(self, case_name, segment): output_logs = regen_segment(lr, frs, disable_tqdm=True) engaged = check_openpilot_enabled(output_logs) - self.assertTrue(engaged, f"openpilot not engaged in {case_name}") - - -if __name__=='__main__': - unittest.main() + assert engaged, f"openpilot not engaged in {case_name}" diff --git a/selfdrive/test/test_onroad.py b/selfdrive/test/test_onroad.py index cd0846c894aa35..9aca9afcd98c6b 100755 --- a/selfdrive/test/test_onroad.py +++ b/selfdrive/test/test_onroad.py @@ -10,7 +10,6 @@ import subprocess import time import numpy as np -import unittest from collections import Counter, defaultdict from functools import cached_property from pathlib import Path @@ -102,10 +101,10 @@ def cputime_total(ct): @pytest.mark.tici -class TestOnroad(unittest.TestCase): +class TestOnroad: @classmethod - def setUpClass(cls): + def setup_class(cls): if "DEBUG" in os.environ: segs = filter(lambda x: os.path.exists(os.path.join(x, "rlog")), Path(Paths.log_root()).iterdir()) segs = sorted(segs, key=lambda x: x.stat().st_mtime) @@ -181,7 +180,7 @@ def service_msgs(self): msgs[m.which()].append(m) return msgs - def test_service_frequencies(self): + def test_service_frequencies(self, subtests): for s, msgs in self.service_msgs.items(): if s in ('initData', 'sentinel'): continue @@ -190,18 +189,18 @@ def test_service_frequencies(self): if s in ('ubloxGnss', 'ubloxRaw', 'gnssMeasurements', 'gpsLocation', 'gpsLocationExternal', 'qcomGnss'): continue - with self.subTest(service=s): + with subtests.test(service=s): assert len(msgs) >= math.floor(SERVICE_LIST[s].frequency*55) def test_cloudlog_size(self): msgs = [m for m in self.lr if m.which() == 'logMessage'] total_size = sum(len(m.as_builder().to_bytes()) for m in msgs) - self.assertLess(total_size, 3.5e5) + assert total_size < 3.5e5 cnt = Counter(json.loads(m.logMessage)['filename'] for m in msgs) big_logs = [f for f, n in cnt.most_common(3) if n / sum(cnt.values()) > 30.] - self.assertEqual(len(big_logs), 0, f"Log spam: {big_logs}") + assert len(big_logs) == 0, f"Log spam: {big_logs}" def test_log_sizes(self): for f, sz in self.log_sizes.items(): @@ -230,15 +229,15 @@ def test_ui_timings(self): result += "------------------------------------------------\n" print(result) - self.assertLess(max(ts), 250.) - self.assertLess(np.mean(ts), 10.) + assert max(ts) < 250. + assert np.mean(ts) < 10. #self.assertLess(np.std(ts), 5.) # some slow frames are expected since camerad/modeld can preempt ui veryslow = [x for x in ts if x > 40.] assert len(veryslow) < 5, f"Too many slow frame draw times: {veryslow}" - def test_cpu_usage(self): + def test_cpu_usage(self, subtests): result = "\n" result += "------------------------------------------------\n" result += "------------------ CPU Usage -------------------\n" @@ -286,12 +285,12 @@ def test_cpu_usage(self): # Ensure there's no missing procs all_procs = {p.name for p in self.service_msgs['managerState'][0].managerState.processes if p.shouldBeRunning} for p in all_procs: - with self.subTest(proc=p): + with subtests.test(proc=p): assert any(p in pp for pp in PROCS.keys()), f"Expected CPU usage missing for {p}" # total CPU check procs_tot = sum([(max(x) if isinstance(x, tuple) else x) for x in PROCS.values()]) - with self.subTest(name="total CPU"): + with subtests.test(name="total CPU"): assert procs_tot < MAX_TOTAL_CPU, "Total CPU budget exceeded" result += "------------------------------------------------\n" result += f"Total allocated CPU usage is {procs_tot}%, budget is {MAX_TOTAL_CPU}%, {MAX_TOTAL_CPU-procs_tot:.1f}% left\n" @@ -299,7 +298,7 @@ def test_cpu_usage(self): print(result) - self.assertTrue(cpu_ok) + assert cpu_ok def test_memory_usage(self): mems = [m.deviceState.memoryUsagePercent for m in self.service_msgs['deviceState']] @@ -307,10 +306,10 @@ def test_memory_usage(self): # check for big leaks. note that memory usage is # expected to go up while the MSGQ buffers fill up - self.assertLessEqual(max(mems) - min(mems), 3.0) + assert max(mems) - min(mems) <= 3.0 def test_gpu_usage(self): - self.assertEqual(self.gpu_procs, {"weston", "ui", "camerad", "selfdrive.modeld.modeld"}) + assert self.gpu_procs == {"weston", "ui", "camerad", "selfdrive.modeld.modeld"} def test_camera_processing_time(self): result = "\n" @@ -319,14 +318,14 @@ def test_camera_processing_time(self): result += "------------------------------------------------\n" ts = [getattr(m, m.which()).processingTime for m in self.lr if 'CameraState' in m.which()] - self.assertLess(min(ts), 0.025, f"high execution time: {min(ts)}") + assert min(ts) < 0.025, f"high execution time: {min(ts)}" result += f"execution time: min {min(ts):.5f}s\n" result += f"execution time: max {max(ts):.5f}s\n" result += f"execution time: mean {np.mean(ts):.5f}s\n" result += "------------------------------------------------\n" print(result) - @unittest.skip("TODO: enable once timings are fixed") + @pytest.mark.skip("TODO: enable once timings are fixed") def test_camera_frame_timings(self): result = "\n" result += "------------------------------------------------\n" @@ -336,7 +335,7 @@ def test_camera_frame_timings(self): ts = [getattr(m, m.which()).timestampSof for m in self.lr if name in m.which()] d_ms = np.diff(ts) / 1e6 d50 = np.abs(d_ms-50) - self.assertLess(max(d50), 1.0, f"high sof delta vs 50ms: {max(d50)}") + assert max(d50) < 1.0, f"high sof delta vs 50ms: {max(d50)}" result += f"{name} sof delta vs 50ms: min {min(d50):.5f}s\n" result += f"{name} sof delta vs 50ms: max {max(d50):.5f}s\n" result += f"{name} sof delta vs 50ms: mean {d50.mean():.5f}s\n" @@ -352,8 +351,8 @@ def test_mpc_execution_timings(self): cfgs = [("longitudinalPlan", 0.05, 0.05),] for (s, instant_max, avg_max) in cfgs: ts = [getattr(m, s).solverExecutionTime for m in self.service_msgs[s]] - self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}") - self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}") + assert max(ts) < instant_max, f"high '{s}' execution time: {max(ts)}" + assert np.mean(ts) < avg_max, f"high avg '{s}' execution time: {np.mean(ts)}" result += f"'{s}' execution time: min {min(ts):.5f}s\n" result += f"'{s}' execution time: max {max(ts):.5f}s\n" result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n" @@ -372,8 +371,8 @@ def test_model_execution_timings(self): ] for (s, instant_max, avg_max) in cfgs: ts = [getattr(m, s).modelExecutionTime for m in self.service_msgs[s]] - self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}") - self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}") + assert max(ts) < instant_max, f"high '{s}' execution time: {max(ts)}" + assert np.mean(ts) < avg_max, f"high avg '{s}' execution time: {np.mean(ts)}" result += f"'{s}' execution time: min {min(ts):.5f}s\n" result += f"'{s}' execution time: max {max(ts):.5f}s\n" result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n" @@ -409,7 +408,7 @@ def test_timings(self): result += f"{''.ljust(40)} {np.max(np.absolute([np.max(ts)/dt, np.min(ts)/dt]))} {np.std(ts)/dt}\n" result += "="*67 print(result) - self.assertTrue(passed) + assert passed @release_only def test_startup(self): @@ -420,7 +419,7 @@ def test_startup(self): startup_alert = msg.controlsState.alertText1 break expected = EVENTS[car.CarEvent.EventName.startup][ET.PERMANENT].alert_text_1 - self.assertEqual(startup_alert, expected, "wrong startup alert") + assert startup_alert == expected, "wrong startup alert" def test_engagable(self): no_entries = Counter() @@ -432,7 +431,3 @@ def test_engagable(self): eng = [m.controlsState.engageable for m in self.service_msgs['controlsState']] assert all(eng), \ f"Not engageable for whole segment:\n- controlsState.engageable: {Counter(eng)}\n- No entry events: {no_entries}" - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/test/test_updated.py b/selfdrive/test/test_updated.py index dd79e03de4a5e8..5220cfb2889eae 100755 --- a/selfdrive/test/test_updated.py +++ b/selfdrive/test/test_updated.py @@ -4,7 +4,6 @@ import pytest import time import tempfile -import unittest import shutil import signal import subprocess @@ -15,9 +14,9 @@ @pytest.mark.tici -class TestUpdated(unittest.TestCase): +class TestUpdated: - def setUp(self): + def setup_method(self): self.updated_proc = None self.tmp_dir = tempfile.TemporaryDirectory() @@ -59,7 +58,7 @@ def setUp(self): self.params.clear_all() os.sync() - def tearDown(self): + def teardown_method(self): try: if self.updated_proc is not None: self.updated_proc.terminate() @@ -167,7 +166,7 @@ def _check_update_state(self, update_available): t = self._read_param("LastUpdateTime") last_update_time = datetime.datetime.fromisoformat(t) td = datetime.datetime.utcnow() - last_update_time - self.assertLess(td.total_seconds(), 10) + assert td.total_seconds() < 10 self.params.remove("LastUpdateTime") # wait a bit for the rest of the params to be written @@ -175,13 +174,13 @@ def _check_update_state(self, update_available): # check params update = self._read_param("UpdateAvailable") - self.assertEqual(update == "1", update_available, f"UpdateAvailable: {repr(update)}") - self.assertEqual(self._read_param("UpdateFailedCount"), "0") + assert update == "1" == update_available, f"UpdateAvailable: {repr(update)}" + assert self._read_param("UpdateFailedCount") == "0" # TODO: check that the finalized update actually matches remote # check the .overlay_init and .overlay_consistent flags - self.assertTrue(os.path.isfile(os.path.join(self.basedir, ".overlay_init"))) - self.assertEqual(os.path.isfile(os.path.join(self.finalized_dir, ".overlay_consistent")), update_available) + assert os.path.isfile(os.path.join(self.basedir, ".overlay_init")) + assert os.path.isfile(os.path.join(self.finalized_dir, ".overlay_consistent")) == update_available # *** test cases *** @@ -214,7 +213,7 @@ def test_update(self): self._check_update_state(True) # Let the updater run for 10 cycles, and write an update every cycle - @unittest.skip("need to make this faster") + @pytest.mark.skip("need to make this faster") def test_update_loop(self): self._start_updater() @@ -243,12 +242,12 @@ def test_overlay_reinit(self): # run another cycle, should have a new mtime self._wait_for_update(clear_param=True) second_mtime = os.path.getmtime(overlay_init_fn) - self.assertTrue(first_mtime != second_mtime) + assert first_mtime != second_mtime # run another cycle, mtime should be same as last cycle self._wait_for_update(clear_param=True) new_mtime = os.path.getmtime(overlay_init_fn) - self.assertTrue(second_mtime == new_mtime) + assert second_mtime == new_mtime # Make sure updated exits if another instance is running def test_multiple_instances(self): @@ -260,7 +259,7 @@ def test_multiple_instances(self): # start another instance second_updated = self._get_updated_proc() ret_code = second_updated.wait(timeout=5) - self.assertTrue(ret_code is not None) + assert ret_code is not None # *** test cases with NEOS updates *** @@ -277,10 +276,10 @@ def test_clear_neos_cache(self): self._start_updater() self._wait_for_update(clear_param=True) self._check_update_state(False) - self.assertFalse(os.path.isdir(self.neosupdate_dir)) + assert not os.path.isdir(self.neosupdate_dir) # Let the updater run with no update for a cycle, then write an update - @unittest.skip("TODO: only runs on device") + @pytest.mark.skip("TODO: only runs on device") def test_update_with_neos_update(self): # bump the NEOS version and commit it self._run([ @@ -295,8 +294,4 @@ def test_update_with_neos_update(self): self._check_update_state(True) # TODO: more comprehensive check - self.assertTrue(os.path.isdir(self.neosupdate_dir)) - - -if __name__ == "__main__": - unittest.main() + assert os.path.isdir(self.neosupdate_dir) diff --git a/selfdrive/test/test_valgrind_replay.py b/selfdrive/test/test_valgrind_replay.py deleted file mode 100755 index 75520df91be9f8..00000000000000 --- a/selfdrive/test/test_valgrind_replay.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python3 -import os -import threading -import time -import unittest -import subprocess -import signal - -if "CI" in os.environ: - def tqdm(x): - return x -else: - from tqdm import tqdm # type: ignore - -import cereal.messaging as messaging -from collections import namedtuple -from openpilot.tools.lib.logreader import LogReader -from openpilot.tools.lib.openpilotci import get_url -from openpilot.common.basedir import BASEDIR - -ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'command', 'path', 'segment', 'wait_for_response']) - -CONFIGS = [ - ProcessConfig( - proc_name="ubloxd", - pub_sub={ - "ubloxRaw": ["ubloxGnss", "gpsLocationExternal"], - }, - ignore=[], - command="./ubloxd", - path="system/ubloxd", - segment="0375fdf7b1ce594d|2019-06-13--08-32-25--3", - wait_for_response=True - ), -] - - -class TestValgrind(unittest.TestCase): - def extract_leak_sizes(self, log): - if "All heap blocks were freed -- no leaks are possible" in log: - return (0,0,0) - - log = log.replace(",","") # fixes casting to int issue with large leaks - err_lost1 = log.split("definitely lost: ")[1] - err_lost2 = log.split("indirectly lost: ")[1] - err_lost3 = log.split("possibly lost: ")[1] - definitely_lost = int(err_lost1.split(" ")[0]) - indirectly_lost = int(err_lost2.split(" ")[0]) - possibly_lost = int(err_lost3.split(" ")[0]) - return (definitely_lost, indirectly_lost, possibly_lost) - - def valgrindlauncher(self, arg, cwd): - os.chdir(os.path.join(BASEDIR, cwd)) - # Run valgrind on a process - command = "valgrind --leak-check=full " + arg - p = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid) - - while not self.replay_done: - time.sleep(0.1) - - # Kill valgrind and extract leak output - os.killpg(os.getpgid(p.pid), signal.SIGINT) - _, err = p.communicate() - error_msg = str(err, encoding='utf-8') - with open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "a") as f: - f.write(error_msg) - f.write(5 * "\n") - definitely_lost, indirectly_lost, possibly_lost = self.extract_leak_sizes(error_msg) - if max(definitely_lost, indirectly_lost, possibly_lost) > 0: - self.leak = True - print("LEAKS from", arg, "\nDefinitely lost:", definitely_lost, "\nIndirectly lost", indirectly_lost, "\nPossibly lost", possibly_lost) - else: - self.leak = False - - def replay_process(self, config, logreader): - pub_sockets = list(config.pub_sub.keys()) # We dump data from logs here - sub_sockets = [s for _, sub in config.pub_sub.items() for s in sub] # We get responses here - pm = messaging.PubMaster(pub_sockets) - sm = messaging.SubMaster(sub_sockets) - - print("Sorting logs") - all_msgs = sorted(logreader, key=lambda msg: msg.logMonoTime) - pub_msgs = [msg for msg in all_msgs if msg.which() in list(config.pub_sub.keys())] - - thread = threading.Thread(target=self.valgrindlauncher, args=(config.command, config.path)) - thread.daemon = True - thread.start() - - while not all(pm.all_readers_updated(s) for s in config.pub_sub.keys()): - time.sleep(0) - - for msg in tqdm(pub_msgs): - pm.send(msg.which(), msg.as_builder()) - if config.wait_for_response: - sm.update(100) - - self.replay_done = True - - def test_config(self): - open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "w").close() - - for cfg in CONFIGS: - self.leak = None - self.replay_done = False - - r, n = cfg.segment.rsplit("--", 1) - lr = LogReader(get_url(r, n)) - self.replay_process(cfg, lr) - - while self.leak is None: - time.sleep(0.1) # Wait for the valgrind to finish - - self.assertFalse(self.leak) - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/thermald/tests/test_fan_controller.py b/selfdrive/thermald/tests/test_fan_controller.py index 7081e1353ee087..c2b1a64509162b 100755 --- a/selfdrive/thermald/tests/test_fan_controller.py +++ b/selfdrive/thermald/tests/test_fan_controller.py @@ -1,17 +1,15 @@ #!/usr/bin/env python3 -import unittest -from unittest.mock import Mock, patch -from parameterized import parameterized +import pytest from openpilot.selfdrive.thermald.fan_controller import TiciFanController -ALL_CONTROLLERS = [(TiciFanController,)] +ALL_CONTROLLERS = [TiciFanController] -def patched_controller(controller_class): - with patch("os.system", new=Mock()): - return controller_class() +def patched_controller(mocker, controller_class): + mocker.patch("os.system", new=mocker.Mock()) + return controller_class() -class TestFanController(unittest.TestCase): +class TestFanController: def wind_up(self, controller, ignition=True): for _ in range(1000): controller.update(100, ignition) @@ -20,37 +18,34 @@ def wind_down(self, controller, ignition=False): for _ in range(1000): controller.update(10, ignition) - @parameterized.expand(ALL_CONTROLLERS) - def test_hot_onroad(self, controller_class): - controller = patched_controller(controller_class) + @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS) + def test_hot_onroad(self, mocker, controller_class): + controller = patched_controller(mocker, controller_class) self.wind_up(controller) - self.assertGreaterEqual(controller.update(100, True), 70) + assert controller.update(100, True) >= 70 - @parameterized.expand(ALL_CONTROLLERS) - def test_offroad_limits(self, controller_class): - controller = patched_controller(controller_class) + @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS) + def test_offroad_limits(self, mocker, controller_class): + controller = patched_controller(mocker, controller_class) self.wind_up(controller) - self.assertLessEqual(controller.update(100, False), 30) + assert controller.update(100, False) <= 30 - @parameterized.expand(ALL_CONTROLLERS) - def test_no_fan_wear(self, controller_class): - controller = patched_controller(controller_class) + @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS) + def test_no_fan_wear(self, mocker, controller_class): + controller = patched_controller(mocker, controller_class) self.wind_down(controller) - self.assertEqual(controller.update(10, False), 0) + assert controller.update(10, False) == 0 - @parameterized.expand(ALL_CONTROLLERS) - def test_limited(self, controller_class): - controller = patched_controller(controller_class) + @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS) + def test_limited(self, mocker, controller_class): + controller = patched_controller(mocker, controller_class) self.wind_up(controller, True) - self.assertEqual(controller.update(100, True), 100) + assert controller.update(100, True) == 100 - @parameterized.expand(ALL_CONTROLLERS) - def test_windup_speed(self, controller_class): - controller = patched_controller(controller_class) + @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS) + def test_windup_speed(self, mocker, controller_class): + controller = patched_controller(mocker, controller_class) self.wind_down(controller, True) for _ in range(10): controller.update(90, True) - self.assertGreaterEqual(controller.update(90, True), 60) - -if __name__ == "__main__": - unittest.main() + assert controller.update(90, True) >= 60 diff --git a/selfdrive/thermald/tests/test_power_monitoring.py b/selfdrive/thermald/tests/test_power_monitoring.py index c3a890f068e658..f68191475b5b05 100755 --- a/selfdrive/thermald/tests/test_power_monitoring.py +++ b/selfdrive/thermald/tests/test_power_monitoring.py @@ -1,12 +1,10 @@ #!/usr/bin/env python3 -import unittest -from unittest.mock import patch +import pytest from openpilot.common.params import Params from openpilot.selfdrive.thermald.power_monitoring import PowerMonitoring, CAR_BATTERY_CAPACITY_uWh, \ CAR_CHARGING_RATE_W, VBATT_PAUSE_CHARGING, DELAY_SHUTDOWN_TIME_S - # Create fake time ssb = 0. def mock_time_monotonic(): @@ -18,163 +16,169 @@ def mock_time_monotonic(): GOOD_VOLTAGE = 12 * 1e3 VOLTAGE_BELOW_PAUSE_CHARGING = (VBATT_PAUSE_CHARGING - 1) * 1e3 -def pm_patch(name, value, constant=False): +def pm_patch(mocker, name, value, constant=False): if constant: - return patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", value) - return patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", return_value=value) + mocker.patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", value) + else: + mocker.patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", return_value=value) + + +@pytest.fixture(autouse=True) +def mock_time(mocker): + mocker.patch("time.monotonic", mock_time_monotonic) -@patch("time.monotonic", new=mock_time_monotonic) -class TestPowerMonitoring(unittest.TestCase): - def setUp(self): +class TestPowerMonitoring: + def setup_method(self): self.params = Params() # Test to see that it doesn't do anything when pandaState is None - def test_pandaState_present(self): + def test_panda_state_present(self): pm = PowerMonitoring() for _ in range(10): pm.calculate(None, None) - self.assertEqual(pm.get_power_used(), 0) - self.assertEqual(pm.get_car_battery_capacity(), (CAR_BATTERY_CAPACITY_uWh / 10)) + assert pm.get_power_used() == 0 + assert pm.get_car_battery_capacity() == (CAR_BATTERY_CAPACITY_uWh / 10) # Test to see that it doesn't integrate offroad when ignition is True def test_offroad_ignition(self): pm = PowerMonitoring() for _ in range(10): pm.calculate(GOOD_VOLTAGE, True) - self.assertEqual(pm.get_power_used(), 0) + assert pm.get_power_used() == 0 # Test to see that it integrates with discharging battery - def test_offroad_integration_discharging(self): + def test_offroad_integration_discharging(self, mocker): POWER_DRAW = 4 - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - for _ in range(TEST_DURATION_S + 1): - pm.calculate(GOOD_VOLTAGE, False) - expected_power_usage = ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6) - self.assertLess(abs(pm.get_power_used() - expected_power_usage), 10) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + for _ in range(TEST_DURATION_S + 1): + pm.calculate(GOOD_VOLTAGE, False) + expected_power_usage = ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6) + assert abs(pm.get_power_used() - expected_power_usage) < 10 # Test to check positive integration of car_battery_capacity - def test_car_battery_integration_onroad(self): + def test_car_battery_integration_onroad(self, mocker): POWER_DRAW = 4 - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = 0 - for _ in range(TEST_DURATION_S + 1): - pm.calculate(GOOD_VOLTAGE, True) - expected_capacity = ((TEST_DURATION_S/3600) * CAR_CHARGING_RATE_W * 1e6) - self.assertLess(abs(pm.get_car_battery_capacity() - expected_capacity), 10) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = 0 + for _ in range(TEST_DURATION_S + 1): + pm.calculate(GOOD_VOLTAGE, True) + expected_capacity = ((TEST_DURATION_S/3600) * CAR_CHARGING_RATE_W * 1e6) + assert abs(pm.get_car_battery_capacity() - expected_capacity) < 10 # Test to check positive integration upper limit - def test_car_battery_integration_upper_limit(self): + def test_car_battery_integration_upper_limit(self, mocker): POWER_DRAW = 4 - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - 1000 - for _ in range(TEST_DURATION_S + 1): - pm.calculate(GOOD_VOLTAGE, True) - estimated_capacity = CAR_BATTERY_CAPACITY_uWh + (CAR_CHARGING_RATE_W / 3600 * 1e6) - self.assertLess(abs(pm.get_car_battery_capacity() - estimated_capacity), 10) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - 1000 + for _ in range(TEST_DURATION_S + 1): + pm.calculate(GOOD_VOLTAGE, True) + estimated_capacity = CAR_BATTERY_CAPACITY_uWh + (CAR_CHARGING_RATE_W / 3600 * 1e6) + assert abs(pm.get_car_battery_capacity() - estimated_capacity) < 10 # Test to check negative integration of car_battery_capacity - def test_car_battery_integration_offroad(self): + def test_car_battery_integration_offroad(self, mocker): POWER_DRAW = 4 - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - for _ in range(TEST_DURATION_S + 1): - pm.calculate(GOOD_VOLTAGE, False) - expected_capacity = CAR_BATTERY_CAPACITY_uWh - ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6) - self.assertLess(abs(pm.get_car_battery_capacity() - expected_capacity), 10) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh + for _ in range(TEST_DURATION_S + 1): + pm.calculate(GOOD_VOLTAGE, False) + expected_capacity = CAR_BATTERY_CAPACITY_uWh - ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6) + assert abs(pm.get_car_battery_capacity() - expected_capacity) < 10 # Test to check negative integration lower limit - def test_car_battery_integration_lower_limit(self): + def test_car_battery_integration_lower_limit(self, mocker): POWER_DRAW = 4 - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = 1000 - for _ in range(TEST_DURATION_S + 1): - pm.calculate(GOOD_VOLTAGE, False) - estimated_capacity = 0 - ((1/3600) * POWER_DRAW * 1e6) - self.assertLess(abs(pm.get_car_battery_capacity() - estimated_capacity), 10) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = 1000 + for _ in range(TEST_DURATION_S + 1): + pm.calculate(GOOD_VOLTAGE, False) + estimated_capacity = 0 - ((1/3600) * POWER_DRAW * 1e6) + assert abs(pm.get_car_battery_capacity() - estimated_capacity) < 10 # Test to check policy of stopping charging after MAX_TIME_OFFROAD_S - def test_max_time_offroad(self): + def test_max_time_offroad(self, mocker): MOCKED_MAX_OFFROAD_TIME = 3600 POWER_DRAW = 0 # To stop shutting down for other reasons - with pm_patch("MAX_TIME_OFFROAD_S", MOCKED_MAX_OFFROAD_TIME, constant=True), pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - start_time = ssb - ignition = False - while ssb <= start_time + MOCKED_MAX_OFFROAD_TIME: - pm.calculate(GOOD_VOLTAGE, ignition) - if (ssb - start_time) % 1000 == 0 and ssb < start_time + MOCKED_MAX_OFFROAD_TIME: - self.assertFalse(pm.should_shutdown(ignition, True, start_time, False)) - self.assertTrue(pm.should_shutdown(ignition, True, start_time, False)) - - def test_car_voltage(self): + pm_patch(mocker, "MAX_TIME_OFFROAD_S", MOCKED_MAX_OFFROAD_TIME, constant=True) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh + start_time = ssb + ignition = False + while ssb <= start_time + MOCKED_MAX_OFFROAD_TIME: + pm.calculate(GOOD_VOLTAGE, ignition) + if (ssb - start_time) % 1000 == 0 and ssb < start_time + MOCKED_MAX_OFFROAD_TIME: + assert not pm.should_shutdown(ignition, True, start_time, False) + assert pm.should_shutdown(ignition, True, start_time, False) + + def test_car_voltage(self, mocker): POWER_DRAW = 0 # To stop shutting down for other reasons TEST_TIME = 350 VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S = 50 - with pm_patch("VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S", VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S, constant=True), \ - pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - ignition = False - start_time = ssb - for i in range(TEST_TIME): - pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) - if i % 10 == 0: - self.assertEqual(pm.should_shutdown(ignition, True, start_time, True), - (pm.car_voltage_mV < VBATT_PAUSE_CHARGING * 1e3 and - (ssb - start_time) > VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S and - (ssb - start_time) > DELAY_SHUTDOWN_TIME_S)) - self.assertTrue(pm.should_shutdown(ignition, True, start_time, True)) + pm_patch(mocker, "VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S", VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S, constant=True) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh + ignition = False + start_time = ssb + for i in range(TEST_TIME): + pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) + if i % 10 == 0: + assert pm.should_shutdown(ignition, True, start_time, True) == \ + (pm.car_voltage_mV < VBATT_PAUSE_CHARGING * 1e3 and \ + (ssb - start_time) > VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S and \ + (ssb - start_time) > DELAY_SHUTDOWN_TIME_S) + assert pm.should_shutdown(ignition, True, start_time, True) # Test to check policy of not stopping charging when DisablePowerDown is set - def test_disable_power_down(self): + def test_disable_power_down(self, mocker): POWER_DRAW = 0 # To stop shutting down for other reasons TEST_TIME = 100 self.params.put_bool("DisablePowerDown", True) - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - ignition = False - for i in range(TEST_TIME): - pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) - if i % 10 == 0: - self.assertFalse(pm.should_shutdown(ignition, True, ssb, False)) - self.assertFalse(pm.should_shutdown(ignition, True, ssb, False)) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh + ignition = False + for i in range(TEST_TIME): + pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) + if i % 10 == 0: + assert not pm.should_shutdown(ignition, True, ssb, False) + assert not pm.should_shutdown(ignition, True, ssb, False) # Test to check policy of not stopping charging when ignition - def test_ignition(self): + def test_ignition(self, mocker): POWER_DRAW = 0 # To stop shutting down for other reasons TEST_TIME = 100 - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - ignition = True - for i in range(TEST_TIME): - pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) - if i % 10 == 0: - self.assertFalse(pm.should_shutdown(ignition, True, ssb, False)) - self.assertFalse(pm.should_shutdown(ignition, True, ssb, False)) + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh + ignition = True + for i in range(TEST_TIME): + pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) + if i % 10 == 0: + assert not pm.should_shutdown(ignition, True, ssb, False) + assert not pm.should_shutdown(ignition, True, ssb, False) # Test to check policy of not stopping charging when harness is not connected - def test_harness_connection(self): + def test_harness_connection(self, mocker): POWER_DRAW = 0 # To stop shutting down for other reasons TEST_TIME = 100 - with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW): - pm = PowerMonitoring() - pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh + pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW) + pm = PowerMonitoring() + pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - ignition = False - for i in range(TEST_TIME): - pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) - if i % 10 == 0: - self.assertFalse(pm.should_shutdown(ignition, False, ssb, False)) - self.assertFalse(pm.should_shutdown(ignition, False, ssb, False)) + ignition = False + for i in range(TEST_TIME): + pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) + if i % 10 == 0: + assert not pm.should_shutdown(ignition, False, ssb, False) + assert not pm.should_shutdown(ignition, False, ssb, False) def test_delay_shutdown_time(self): pm = PowerMonitoring() @@ -186,15 +190,11 @@ def test_delay_shutdown_time(self): pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition) while ssb < offroad_timestamp + DELAY_SHUTDOWN_TIME_S: - self.assertFalse(pm.should_shutdown(ignition, in_car, + assert not pm.should_shutdown(ignition, in_car, offroad_timestamp, - started_seen), - f"Should not shutdown before {DELAY_SHUTDOWN_TIME_S} seconds offroad time") - self.assertTrue(pm.should_shutdown(ignition, in_car, + started_seen), \ + f"Should not shutdown before {DELAY_SHUTDOWN_TIME_S} seconds offroad time" + assert pm.should_shutdown(ignition, in_car, offroad_timestamp, - started_seen), - f"Should shutdown after {DELAY_SHUTDOWN_TIME_S} seconds offroad time") - - -if __name__ == "__main__": - unittest.main() + started_seen), \ + f"Should shutdown after {DELAY_SHUTDOWN_TIME_S} seconds offroad time" diff --git a/selfdrive/ui/qt/offroad/driverview.cc b/selfdrive/ui/qt/offroad/driverview.cc index df9bb246510cfb..f14d0593327f70 100644 --- a/selfdrive/ui/qt/offroad/driverview.cc +++ b/selfdrive/ui/qt/offroad/driverview.cc @@ -7,7 +7,7 @@ const int FACE_IMG_SIZE = 130; -DriverViewWindow::DriverViewWindow(QWidget* parent) : CameraWidget("camerad", VISION_STREAM_DRIVER, true, parent) { +DriverViewWindow::DriverViewWindow(QWidget* parent) : CameraView("camerad", VISION_STREAM_DRIVER, true, parent) { face_img = loadPixmap("../assets/img_driver_face_static.png", {FACE_IMG_SIZE, FACE_IMG_SIZE}); QObject::connect(this, &CameraWidget::clicked, this, &DriverViewWindow::done); QObject::connect(device(), &Device::interactiveTimeout, this, [this]() { @@ -20,22 +20,20 @@ DriverViewWindow::DriverViewWindow(QWidget* parent) : CameraWidget("camerad", VI void DriverViewWindow::showEvent(QShowEvent* event) { params.putBool("IsDriverViewEnabled", true); device()->resetInteractiveTimeout(60); - CameraWidget::showEvent(event); + CameraView::showEvent(event); } void DriverViewWindow::hideEvent(QHideEvent* event) { params.putBool("IsDriverViewEnabled", false); - stopVipcThread(); - CameraWidget::hideEvent(event); + CameraView::hideEvent(event); } void DriverViewWindow::paintGL() { - CameraWidget::paintGL(); + CameraView::paintGL(); - std::lock_guard lk(frame_lock); QPainter p(this); // startup msg - if (frames.empty()) { + if (recent_frames.empty()) { p.setPen(Qt::white); p.setRenderHint(QPainter::TextAntialiasing); p.setFont(InterFont(100, QFont::Bold)); @@ -47,7 +45,6 @@ void DriverViewWindow::paintGL() { cereal::DriverStateV2::Reader driver_state = sm["driverStateV2"].getDriverStateV2(); bool is_rhd = driver_state.getWheelOnRightProb() > 0.5; auto driver_data = is_rhd ? driver_state.getRightDriverData() : driver_state.getLeftDriverData(); - bool face_detected = driver_data.getFaceProb() > 0.7; if (face_detected) { auto fxy_list = driver_data.getFacePosition(); diff --git a/selfdrive/ui/qt/offroad/driverview.h b/selfdrive/ui/qt/offroad/driverview.h index 155e4ede32073d..97dd2faa7856b3 100644 --- a/selfdrive/ui/qt/offroad/driverview.h +++ b/selfdrive/ui/qt/offroad/driverview.h @@ -2,7 +2,7 @@ #include "selfdrive/ui/qt/widgets/cameraview.h" -class DriverViewWindow : public CameraWidget { +class DriverViewWindow : public CameraView { Q_OBJECT public: diff --git a/selfdrive/ui/qt/onroad/annotated_camera.cc b/selfdrive/ui/qt/onroad/annotated_camera.cc index f7fb6b480f0f7a..637d7275811335 100644 --- a/selfdrive/ui/qt/onroad/annotated_camera.cc +++ b/selfdrive/ui/qt/onroad/annotated_camera.cc @@ -6,11 +6,11 @@ #include #include "common/swaglog.h" -#include "selfdrive/ui/qt/onroad/buttons.h" #include "selfdrive/ui/qt/util.h" // Window that shows camera view and variety of info drawn on top -AnnotatedCameraWidget::AnnotatedCameraWidget(VisionStreamType type, QWidget* parent) : fps_filter(UI_FREQ, 3, 1. / UI_FREQ), CameraWidget("camerad", type, true, parent) { +AnnotatedCameraWidget::AnnotatedCameraWidget(VisionStreamType type, QWidget *parent) + : fps_filter(UI_FREQ, 3, 1. / UI_FREQ), CameraWidget("camerad", type, true, parent) { pm = std::make_unique>({"uiDebug"}); main_layout = new QVBoxLayout(this); @@ -76,6 +76,8 @@ void AnnotatedCameraWidget::updateState(const UIState &s) { map_settings_btn->setVisible(!hideBottomIcons); main_layout->setAlignment(map_settings_btn, (rightHandDM ? Qt::AlignLeft : Qt::AlignRight) | Qt::AlignBottom); } + + update(); } void AnnotatedCameraWidget::drawHud(QPainter &p) { @@ -353,25 +355,8 @@ void AnnotatedCameraWidget::paintGL() { UIState *s = uiState(); SubMaster &sm = *(s->sm); const double start_draw_t = millis_since_boot(); - const cereal::ModelDataV2::Reader &model = sm["modelV2"].getModelV2(); - // draw camera frame { - std::lock_guard lk(frame_lock); - - if (frames.empty()) { - if (skip_frame_count > 0) { - skip_frame_count--; - qDebug() << "skipping frame, not ready"; - return; - } - } else { - // skip drawing up to this many frames if we're - // missing camera frames. this smooths out the - // transitions from the narrow and wide cameras - skip_frame_count = 5; - } - // Wide or narrow cam dependent on speed bool has_wide_cam = available_streams.count(VISION_STREAM_WIDE_ROAD); if (has_wide_cam) { @@ -387,14 +372,16 @@ void AnnotatedCameraWidget::paintGL() { } CameraWidget::setStreamType(wide_cam_requested ? VISION_STREAM_WIDE_ROAD : VISION_STREAM_ROAD); - s->scene.wide_cam = CameraWidget::getStreamType() == VISION_STREAM_WIDE_ROAD; + s->scene.wide_cam = CameraWidget::streamType() == VISION_STREAM_WIDE_ROAD; if (s->scene.calibration_valid) { auto calib = s->scene.wide_cam ? s->scene.view_from_wide_calib : s->scene.view_from_calib; CameraWidget::updateCalibration(calib); } else { CameraWidget::updateCalibration(DEFAULT_CALIBRATION); } - CameraWidget::setFrameId(model.getFrameId()); + + // Draw the frame based on the UI plan's frame ID + CameraWidget::setFrameId(sm["uiPlan"].getUiPlan().getFrameId()); CameraWidget::paintGL(); } @@ -403,6 +390,7 @@ void AnnotatedCameraWidget::paintGL() { painter.setPen(Qt::NoPen); if (s->scene.world_objects_visible) { + const cereal::ModelDataV2::Reader &model = sm["modelV2"].getModelV2(); update_model(s, model, sm["uiPlan"].getUiPlan()); drawLaneLines(painter, s); diff --git a/selfdrive/ui/qt/onroad/annotated_camera.h b/selfdrive/ui/qt/onroad/annotated_camera.h index 0be4adfffa68dd..b25846001e2c66 100644 --- a/selfdrive/ui/qt/onroad/annotated_camera.h +++ b/selfdrive/ui/qt/onroad/annotated_camera.h @@ -37,7 +37,6 @@ class AnnotatedCameraWidget : public CameraWidget { int status = STATUS_DISENGAGED; std::unique_ptr pm; - int skip_frame_count = 0; bool wide_cam_requested = false; protected: diff --git a/selfdrive/ui/qt/onroad/onroad_home.cc b/selfdrive/ui/qt/onroad/onroad_home.cc index 1c12a2c3a2e971..66eb1812e63a94 100644 --- a/selfdrive/ui/qt/onroad/onroad_home.cc +++ b/selfdrive/ui/qt/onroad/onroad_home.cc @@ -1,6 +1,7 @@ #include "selfdrive/ui/qt/onroad/onroad_home.h" #include +#include #ifdef ENABLE_MAPS #include "selfdrive/ui/qt/maps/map_helpers.h" diff --git a/selfdrive/ui/qt/widgets/cameraview.cc b/selfdrive/ui/qt/widgets/cameraview.cc index e4f13962683aa5..a7d4432de09671 100644 --- a/selfdrive/ui/qt/widgets/cameraview.cc +++ b/selfdrive/ui/qt/widgets/cameraview.cc @@ -97,37 +97,22 @@ mat4 get_fit_view_transform(float widget_aspect_ratio, float frame_aspect_ratio) } // namespace -CameraWidget::CameraWidget(std::string stream_name, VisionStreamType type, bool zoom, QWidget* parent) : - stream_name(stream_name), active_stream_type(type), requested_stream_type(type), zoomed_view(zoom), QOpenGLWidget(parent) { - setAttribute(Qt::WA_OpaquePaintEvent); - qRegisterMetaType>("availableStreams"); - QObject::connect(this, &CameraWidget::vipcThreadConnected, this, &CameraWidget::vipcConnected, Qt::BlockingQueuedConnection); - QObject::connect(this, &CameraWidget::vipcThreadFrameReceived, this, &CameraWidget::vipcFrameReceived, Qt::QueuedConnection); - QObject::connect(this, &CameraWidget::vipcAvailableStreamsUpdated, this, &CameraWidget::availableStreamsUpdated, Qt::QueuedConnection); +CameraWidget::CameraWidget(std::string stream_name, VisionStreamType type, bool zoom, QWidget *parent) + : stream_name(stream_name), stream_type(type), zoomed_view(zoom), QOpenGLWidget(parent) { } CameraWidget::~CameraWidget() { makeCurrent(); - stopVipcThread(); if (isValid()) { glDeleteVertexArrays(1, &frame_vao); glDeleteBuffers(1, &frame_vbo); glDeleteBuffers(1, &frame_ibo); glDeleteBuffers(2, textures); } + clearEGLImages(); doneCurrent(); } -// Qt uses device-independent pixels, depending on platform this may be -// different to what OpenGL uses -int CameraWidget::glWidth() { - return width() * devicePixelRatio(); -} - -int CameraWidget::glHeight() { - return height() * devicePixelRatio(); -} - void CameraWidget::initializeGL() { initializeOpenGLFunctions(); @@ -141,7 +126,7 @@ void CameraWidget::initializeGL() { GLint frame_pos_loc = program->attributeLocation("aPosition"); GLint frame_texcoord_loc = program->attributeLocation("aTexCoord"); - auto [x1, x2, y1, y2] = requested_stream_type == VISION_STREAM_DRIVER ? std::tuple(0.f, 1.f, 1.f, 0.f) : std::tuple(1.f, 0.f, 1.f, 0.f); + auto [x1, x2, y1, y2] = stream_type == VISION_STREAM_DRIVER ? std::tuple(0.f, 1.f, 1.f, 0.f) : std::tuple(1.f, 0.f, 1.f, 0.f); const uint8_t frame_indicies[] = {0, 1, 2, 0, 2, 3}; const float frame_coords[4][4] = { {-1.0, -1.0, x2, y1}, // bl @@ -178,45 +163,11 @@ void CameraWidget::initializeGL() { #endif } -void CameraWidget::showEvent(QShowEvent *event) { - if (!vipc_thread) { - clearFrames(); - vipc_thread = new QThread(); - connect(vipc_thread, &QThread::started, [=]() { vipcThread(); }); - connect(vipc_thread, &QThread::finished, vipc_thread, &QObject::deleteLater); - vipc_thread->start(); - } -} - -void CameraWidget::stopVipcThread() { - makeCurrent(); - if (vipc_thread) { - vipc_thread->requestInterruption(); - vipc_thread->quit(); - vipc_thread->wait(); - vipc_thread = nullptr; - } - -#ifdef QCOM2 - EGLDisplay egl_display = eglGetCurrentDisplay(); - assert(egl_display != EGL_NO_DISPLAY); - for (auto &pair : egl_images) { - eglDestroyImageKHR(egl_display, pair.second); - assert(eglGetError() == EGL_SUCCESS); - } - egl_images.clear(); -#endif -} - -void CameraWidget::availableStreamsUpdated(std::set streams) { - available_streams = streams; -} - void CameraWidget::updateFrameMat() { int w = glWidth(), h = glHeight(); if (zoomed_view) { - if (active_stream_type == VISION_STREAM_DRIVER) { + if (streamType() == VISION_STREAM_DRIVER) { if (stream_width > 0 && stream_height > 0) { frame_mat = get_driver_view_transform(w, h, stream_width, stream_height); } @@ -225,7 +176,7 @@ void CameraWidget::updateFrameMat() { // to ensure this ends up in the middle of the screen // for narrow come and a little lower for wide cam. // TODO: use proper perspective transform? - if (active_stream_type == VISION_STREAM_WIDE_ROAD) { + if (streamType() == VISION_STREAM_WIDE_ROAD) { intrinsic_matrix = ECAM_INTRINSIC_MATRIX; zoom = 2.0; } else { @@ -247,13 +198,12 @@ void CameraWidget::updateFrameMat() { float zx = zoom * 2 * intrinsic_matrix.v[2] / w; float zy = zoom * 2 * intrinsic_matrix.v[5] / h; - const mat4 frame_transform = {{ + frame_mat = {{ zx, 0.0, 0.0, -x_offset / w * 2, 0.0, zy, 0.0, y_offset / h * 2, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 1.0, }}; - frame_mat = frame_transform; } } else if (stream_width > 0 && stream_height > 0) { // fit frame to widget size @@ -271,25 +221,15 @@ void CameraWidget::paintGL() { glClearColor(bg.redF(), bg.greenF(), bg.blueF(), bg.alphaF()); glClear(GL_STENCIL_BUFFER_BIT | GL_COLOR_BUFFER_BIT); - std::lock_guard lk(frame_lock); - if (frames.empty()) return; - - int frame_idx = frames.size() - 1; - - // Always draw latest frame until sync logic is more stable - // for (frame_idx = 0; frame_idx < frames.size() - 1; frame_idx++) { - // if (frames[frame_idx].first == draw_frame_id) break; - // } + VisionBuf *frame = receiveFrame(draw_frame_id); + if (!frame) return; // Log duplicate/dropped frames - if (frames[frame_idx].first == prev_frame_id) { - qDebug() << "Drawing same frame twice" << frames[frame_idx].first; - } else if (frames[frame_idx].first != prev_frame_id + 1) { - qDebug() << "Skipped frame" << frames[frame_idx].first; + uint32_t frame_id = frame->get_frame_id(); + if (prev_frame_id != INVALID_FRAME_ID && frame_id != prev_frame_id + 1) { + qDebug() << (frame_id == prev_frame_id ? "Drawing same frame twice" : "Skip frame") << frame_id; } - prev_frame_id = frames[frame_idx].first; - VisionBuf *frame = frames[frame_idx].second; - assert(frame != nullptr); + prev_frame_id = frame_id; updateFrameMat(); @@ -329,20 +269,14 @@ void CameraWidget::paintGL() { glPixelStorei(GL_UNPACK_ROW_LENGTH, 0); } -void CameraWidget::vipcConnected(VisionIpcClient *vipc_client) { - makeCurrent(); +void CameraWidget::vipcConnected() { stream_width = vipc_client->buffers[0].width; stream_height = vipc_client->buffers[0].height; stream_stride = vipc_client->buffers[0].stride; #ifdef QCOM2 + clearEGLImages(); EGLDisplay egl_display = eglGetCurrentDisplay(); - assert(egl_display != EGL_NO_DISPLAY); - for (auto &pair : egl_images) { - eglDestroyImageKHR(egl_display, pair.second); - } - egl_images.clear(); - for (int i = 0; i < vipc_client->num_buffers; i++) { // import buffers into OpenGL int fd = dup(vipc_client->buffers[i].fd); // eglDestroyImageKHR will close, so duplicate EGLint img_attrs[] = { @@ -379,59 +313,78 @@ void CameraWidget::vipcConnected(VisionIpcClient *vipc_client) { #endif } -void CameraWidget::vipcFrameReceived() { - update(); -} +VisionBuf *CameraWidget::receiveFrame(uint64_t request_frame_id) { + if (!ensureConnection()) { + return nullptr; + } -void CameraWidget::vipcThread() { - VisionStreamType cur_stream = requested_stream_type; - std::unique_ptr vipc_client; - VisionIpcBufExtra meta_main = {0}; - - while (!QThread::currentThread()->isInterruptionRequested()) { - if (!vipc_client || cur_stream != requested_stream_type) { - clearFrames(); - qDebug().nospace() << "connecting to stream " << requested_stream_type << ", was connected to " << cur_stream; - cur_stream = requested_stream_type; - vipc_client.reset(new VisionIpcClient(stream_name, cur_stream, false)); + // Receive frames and store them in recent_frames + while (auto buf = vipc_client->recv(nullptr, 0)) { + recent_frames.emplace_back(buf); + if (recent_frames.size() > FRAME_BUFFER_SIZE) { + recent_frames.pop_front(); } - active_stream_type = cur_stream; - - if (!vipc_client->connected) { - clearFrames(); - auto streams = VisionIpcClient::getAvailableStreams(stream_name, false); - if (streams.empty()) { - QThread::msleep(100); - continue; - } - emit vipcAvailableStreamsUpdated(streams); + } + if (!vipc_client->connected || recent_frames.empty()) { + return nullptr; + } - if (!vipc_client->connect(false)) { - QThread::msleep(100); - continue; - } - emit vipcThreadConnected(vipc_client.get()); - } + // Find the requested frame + auto it = std::find_if(recent_frames.rbegin(), recent_frames.rend(), + [request_frame_id](VisionBuf *buf) { return buf->get_frame_id() == request_frame_id; }); + return it != recent_frames.rend() ? *it : recent_frames.back(); +} - if (VisionBuf *buf = vipc_client->recv(&meta_main, 1000)) { - { - std::lock_guard lk(frame_lock); - frames.push_back(std::make_pair(meta_main.frame_id, buf)); - while (frames.size() > FRAME_BUFFER_SIZE) { - frames.pop_front(); - } - } - emit vipcThreadFrameReceived(); - } else { - if (!isVisible()) { - vipc_client->connected = false; - } +bool CameraWidget::ensureConnection() { + // Reconnect if the client is not initialized or the stream type has changed + if (!vipc_client || vipc_client->type != stream_type) { + qDebug() << "connecting to stream" << stream_type; + vipc_client.reset(new VisionIpcClient(stream_name, stream_type, false)); + } + + // Re-establish connection if not connected + if (!vipc_client->connected) { + clearFrames(); + available_streams = VisionIpcClient::getAvailableStreams(stream_name, false); + if (available_streams.empty() || !vipc_client->connect(false)) { + return false; } + emit vipcAvailableStreamsUpdated(); + vipcConnected(); } + return true; } void CameraWidget::clearFrames() { - std::lock_guard lk(frame_lock); - frames.clear(); - available_streams.clear(); + recent_frames.clear(); + draw_frame_id = INVALID_FRAME_ID; + prev_frame_id = INVALID_FRAME_ID; +} + +void CameraWidget::clearEGLImages() { +#ifdef QCOM2 + EGLDisplay egl_display = eglGetCurrentDisplay(); + assert(egl_display != EGL_NO_DISPLAY); + for (auto &pair : egl_images) { + eglDestroyImageKHR(egl_display, pair.second); + } + egl_images.clear(); +#endif +} + +// Cameraview + +CameraView::CameraView(const std::string &name, VisionStreamType stream_type, bool zoom, QWidget *parent) + : CameraWidget(name, stream_type, zoom, parent) { + timer = new QTimer(this); + timer->setInterval(1000.0 / UI_FREQ); + timer->callOnTimeout(this, [this]() { update(); }); +} + +void CameraView::showEvent(QShowEvent *event) { + timer->start(); +} + +void CameraView::hideEvent(QHideEvent *event) { + timer->stop(); } diff --git a/selfdrive/ui/qt/widgets/cameraview.h b/selfdrive/ui/qt/widgets/cameraview.h index c97038cf436e6d..fb31d094bbe645 100644 --- a/selfdrive/ui/qt/widgets/cameraview.h +++ b/selfdrive/ui/qt/widgets/cameraview.h @@ -3,7 +3,6 @@ #include #include #include -#include #include #include #include @@ -11,7 +10,7 @@ #include #include #include -#include +#include #ifdef QCOM2 #define EGL_EGLEXT_PROTOTYPES @@ -27,40 +26,40 @@ #include "selfdrive/ui/ui.h" const int FRAME_BUFFER_SIZE = 5; +const uint32_t INVALID_FRAME_ID = -1; static_assert(FRAME_BUFFER_SIZE <= YUV_BUFFER_COUNT); class CameraWidget : public QOpenGLWidget, protected QOpenGLFunctions { Q_OBJECT public: - using QOpenGLWidget::QOpenGLWidget; explicit CameraWidget(std::string stream_name, VisionStreamType stream_type, bool zoom, QWidget* parent = nullptr); ~CameraWidget(); void setBackgroundColor(const QColor &color) { bg = color; } void setFrameId(int frame_id) { draw_frame_id = frame_id; } - void setStreamType(VisionStreamType type) { requested_stream_type = type; } - VisionStreamType getStreamType() { return active_stream_type; } - void stopVipcThread(); + void setStreamType(VisionStreamType type) { stream_type = type; } + inline VisionStreamType streamType() const { return stream_type; } + inline const std::set &availableStreams() const { return available_streams; } + VisionBuf *receiveFrame(uint64_t request_frame_id = INVALID_FRAME_ID); signals: + void vipcAvailableStreamsUpdated(); void clicked(); - void vipcThreadConnected(VisionIpcClient *); - void vipcThreadFrameReceived(); - void vipcAvailableStreamsUpdated(std::set); protected: + bool ensureConnection(); void paintGL() override; void initializeGL() override; + void vipcConnected(); + void clearFrames(); + void clearEGLImages(); + void resizeGL(int w, int h) override { updateFrameMat(); } - void showEvent(QShowEvent *event) override; void mouseReleaseEvent(QMouseEvent *event) override { emit clicked(); } virtual void updateFrameMat(); void updateCalibration(const mat3 &calib); - void vipcThread(); - void clearFrames(); - - int glWidth(); - int glHeight(); + int glWidth() const { return width() * devicePixelRatio(); } + int glHeight() const { return height() * devicePixelRatio(); } bool zoomed_view; GLuint frame_vao, frame_vbo, frame_ibo; @@ -77,10 +76,7 @@ class CameraWidget : public QOpenGLWidget, protected QOpenGLFunctions { int stream_width = 0; int stream_height = 0; int stream_stride = 0; - std::atomic active_stream_type; - std::atomic requested_stream_type; - std::set available_streams; - QThread *vipc_thread = nullptr; + VisionStreamType stream_type; // Calibration float x_offset = 0; @@ -89,15 +85,21 @@ class CameraWidget : public QOpenGLWidget, protected QOpenGLFunctions { mat3 calibration = DEFAULT_CALIBRATION; mat3 intrinsic_matrix = FCAM_INTRINSIC_MATRIX; - std::recursive_mutex frame_lock; - std::deque> frames; - uint32_t draw_frame_id = 0; - uint32_t prev_frame_id = 0; - -protected slots: - void vipcConnected(VisionIpcClient *vipc_client); - void vipcFrameReceived(); - void availableStreamsUpdated(std::set streams); + std::set available_streams; + std::unique_ptr vipc_client; + std::deque recent_frames; + uint32_t draw_frame_id = INVALID_FRAME_ID; + uint32_t prev_frame_id = INVALID_FRAME_ID; }; -Q_DECLARE_METATYPE(std::set); +// update frames based on timer +class CameraView : public CameraWidget { + Q_OBJECT +public: + CameraView(const std::string &name, VisionStreamType stream_type, bool zoom, QWidget *parent = nullptr); + void showEvent(QShowEvent *event) override; + void hideEvent(QHideEvent *event) override; + +private: + QTimer *timer; +}; diff --git a/selfdrive/ui/tests/test_soundd.py b/selfdrive/ui/tests/test_soundd.py index 94ce26eb4797d1..d15a6c1831b704 100755 --- a/selfdrive/ui/tests/test_soundd.py +++ b/selfdrive/ui/tests/test_soundd.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -import unittest from cereal import car from cereal import messaging @@ -11,7 +10,7 @@ AudibleAlert = car.CarControl.HUDControl.AudibleAlert -class TestSoundd(unittest.TestCase): +class TestSoundd: def test_check_controls_timeout_alert(self): sm = SubMaster(['controlsState']) pm = PubMaster(['controlsState']) @@ -26,16 +25,13 @@ def test_check_controls_timeout_alert(self): sm.update(0) - self.assertFalse(check_controls_timeout_alert(sm)) + assert not check_controls_timeout_alert(sm) for _ in range(CONTROLS_TIMEOUT * 110): sm.update(0) time.sleep(0.01) - self.assertTrue(check_controls_timeout_alert(sm)) + assert check_controls_timeout_alert(sm) # TODO: add test with micd for checking that soundd actually outputs sounds - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/ui/tests/test_translations.py b/selfdrive/ui/tests/test_translations.py index 8e50695e7083a5..57de069d0b3aad 100755 --- a/selfdrive/ui/tests/test_translations.py +++ b/selfdrive/ui/tests/test_translations.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 +import pytest import json import os import re -import unittest import shutil import tempfile import xml.etree.ElementTree as ET @@ -21,7 +21,7 @@ @parameterized_class(("name", "file"), translation_files.items()) -class TestTranslations(unittest.TestCase): +class TestTranslations: name: str file: str @@ -32,8 +32,8 @@ def _read_translation_file(path, file): return f.read() def test_missing_translation_files(self): - self.assertTrue(os.path.exists(os.path.join(TRANSLATIONS_DIR, f"{self.file}.ts")), - f"{self.name} has no XML translation file, run selfdrive/ui/update_translations.py") + assert os.path.exists(os.path.join(TRANSLATIONS_DIR, f"{self.file}.ts")), \ + f"{self.name} has no XML translation file, run selfdrive/ui/update_translations.py" def test_translations_updated(self): with tempfile.TemporaryDirectory() as tmpdir: @@ -42,19 +42,19 @@ def test_translations_updated(self): cur_translations = self._read_translation_file(TRANSLATIONS_DIR, self.file) new_translations = self._read_translation_file(tmpdir, self.file) - self.assertEqual(cur_translations, new_translations, - f"{self.file} ({self.name}) XML translation file out of date. Run selfdrive/ui/update_translations.py to update the translation files") + assert cur_translations == new_translations, \ + f"{self.file} ({self.name}) XML translation file out of date. Run selfdrive/ui/update_translations.py to update the translation files" - @unittest.skip("Only test unfinished translations before going to release") + @pytest.mark.skip("Only test unfinished translations before going to release") def test_unfinished_translations(self): cur_translations = self._read_translation_file(TRANSLATIONS_DIR, self.file) - self.assertTrue(UNFINISHED_TRANSLATION_TAG not in cur_translations, - f"{self.file} ({self.name}) translation file has unfinished translations. Finish translations or mark them as completed in Qt Linguist") + assert UNFINISHED_TRANSLATION_TAG not in cur_translations, \ + f"{self.file} ({self.name}) translation file has unfinished translations. Finish translations or mark them as completed in Qt Linguist" def test_vanished_translations(self): cur_translations = self._read_translation_file(TRANSLATIONS_DIR, self.file) - self.assertTrue("" not in cur_translations, - f"{self.file} ({self.name}) translation file has obsolete translations. Run selfdrive/ui/update_translations.py --vanish to remove them") + assert "" not in cur_translations, \ + f"{self.file} ({self.name}) translation file has obsolete translations. Run selfdrive/ui/update_translations.py --vanish to remove them" def test_finished_translations(self): """ @@ -81,27 +81,27 @@ def test_finished_translations(self): numerusform = [t.text for t in translation.findall("numerusform")] for nf in numerusform: - self.assertIsNotNone(nf, f"Ensure all plural translation forms are completed: {source_text}") - self.assertIn("%n", nf, "Ensure numerus argument (%n) exists in translation.") - self.assertIsNone(FORMAT_ARG.search(nf), f"Plural translations must use %n, not %1, %2, etc.: {numerusform}") + assert nf is not None, f"Ensure all plural translation forms are completed: {source_text}" + assert "%n" in nf, "Ensure numerus argument (%n) exists in translation." + assert FORMAT_ARG.search(nf) is None, f"Plural translations must use %n, not %1, %2, etc.: {numerusform}" else: - self.assertIsNotNone(translation.text, f"Ensure translation is completed: {source_text}") + assert translation.text is not None, f"Ensure translation is completed: {source_text}" source_args = FORMAT_ARG.findall(source_text) translation_args = FORMAT_ARG.findall(translation.text) - self.assertEqual(sorted(source_args), sorted(translation_args), - f"Ensure format arguments are consistent: `{source_text}` vs. `{translation.text}`") + assert sorted(source_args) == sorted(translation_args), \ + f"Ensure format arguments are consistent: `{source_text}` vs. `{translation.text}`" def test_no_locations(self): for line in self._read_translation_file(TRANSLATIONS_DIR, self.file).splitlines(): - self.assertFalse(line.strip().startswith(LOCATION_TAG), - f"Line contains location tag: {line.strip()}, remove all line numbers.") + assert not line.strip().startswith(LOCATION_TAG), \ + f"Line contains location tag: {line.strip()}, remove all line numbers." def test_entities_error(self): cur_translations = self._read_translation_file(TRANSLATIONS_DIR, self.file) matches = re.findall(r'@(\w+);', cur_translations) - self.assertEqual(len(matches), 0, f"The string(s) {matches} were found with '@' instead of '&'") + assert len(matches) == 0, f"The string(s) {matches} were found with '@' instead of '&'" def test_bad_language(self): IGNORED_WORDS = {'pédale'} @@ -128,7 +128,3 @@ def test_bad_language(self): words = set(translation_text.translate(str.maketrans('', '', string.punctuation + '%n')).lower().split()) bad_words_found = words & (banned_words - IGNORED_WORDS) assert not bad_words_found, f"Bad language found in {self.name}: '{translation_text}'. Bad word(s): {', '.join(bad_words_found)}" - - -if __name__ == "__main__": - unittest.main() diff --git a/selfdrive/ui/tests/test_ui/run.py b/selfdrive/ui/tests/test_ui/run.py index c834107780c9c7..871333563d4379 100644 --- a/selfdrive/ui/tests/test_ui/run.py +++ b/selfdrive/ui/tests/test_ui/run.py @@ -8,9 +8,7 @@ import os import pywinctl import time -import unittest -from parameterized import parameterized from cereal import messaging, car, log from cereal.visionipc import VisionIpcServer, VisionStreamType @@ -122,16 +120,11 @@ def setup_onroad_sidebar(click, pm: PubMaster): SCREENSHOTS_DIR = TEST_OUTPUT_DIR / "screenshots" -class TestUI(unittest.TestCase): - @classmethod - def setUpClass(cls): +class TestUI: + def __init__(self): os.environ["SCALE"] = "1" sys.modules["mouseinfo"] = False - @classmethod - def tearDownClass(cls): - del sys.modules["mouseinfo"] - def setup(self): self.sm = SubMaster(["uiDebug"]) self.pm = PubMaster(["deviceState", "pandaStates", "controlsState", 'roadCameraState', 'wideRoadCameraState', 'liveLocationKalman']) @@ -147,8 +140,8 @@ def setup(self): def screenshot(self): import pyautogui im = pyautogui.screenshot(region=(self.ui.left, self.ui.top, self.ui.width, self.ui.height)) - self.assertEqual(im.width, 2160) - self.assertEqual(im.height, 1080) + assert im.width == 2160 + assert im.height == 1080 img = np.array(im) im.close() return img @@ -158,7 +151,6 @@ def click(self, x, y, *args, **kwargs): pyautogui.click(self.ui.left + x, self.ui.top + y, *args, **kwargs) time.sleep(UI_DELAY) # give enough time for the UI to react - @parameterized.expand(CASES.items()) @with_processes(["ui"]) def test_ui(self, name, setup_case): self.setup() @@ -188,7 +180,10 @@ def create_screenshots(): shutil.rmtree(TEST_OUTPUT_DIR) SCREENSHOTS_DIR.mkdir(parents=True) - unittest.main(exit=False) + + t = TestUI() + for name, setup in CASES.items(): + t.test_ui(name, setup) if __name__ == "__main__": print("creating test screenshots") diff --git a/selfdrive/ui/translations/main_zh-CHS.ts b/selfdrive/ui/translations/main_zh-CHS.ts index 306678363a860f..32119ee10f4d78 100644 --- a/selfdrive/ui/translations/main_zh-CHS.ts +++ b/selfdrive/ui/translations/main_zh-CHS.ts @@ -299,11 +299,11 @@ Pair Device - + 配对设备 PAIR - + 配对 @@ -487,23 +487,23 @@ OnroadAlerts openpilot Unavailable - + 无法使用 openpilot Waiting for controls to start - + 等待控制服务啟動 TAKE CONTROL IMMEDIATELY - + 立即接管 Controls Unresponsive - + 控制服务无响应 Reboot Device - + 重启设备 @@ -628,7 +628,7 @@ now - + 现在 @@ -1152,11 +1152,11 @@ This may take up to a minute. Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. In relaxed mode openpilot will stay further away from lead cars. On supported cars, you can cycle through these personalities with your steering wheel distance button. - + 推荐使用标准模式。在积极模式下,openpilot 会更靠近前方车辆,并在油门和刹车方面更加激进。在放松模式下,openpilot 会与前方车辆保持更远距离。在支持的车型上,你可以使用方向盘上的距离按钮来循环切换这些驾驶风格。 The driving visualization will transition to the road-facing wide-angle camera at low speeds to better show some turns. The Experimental mode logo will also be shown in the top right corner. - + 在低速时,驾驶可视化将转换为道路朝向的广角摄像头,以更好地展示某些转弯。测试模式标志也将显示在右上角。 Always-On Driver Monitoring diff --git a/selfdrive/ui/translations/main_zh-CHT.ts b/selfdrive/ui/translations/main_zh-CHT.ts index 8d0df30f9478a4..9d1c16db9f08ad 100644 --- a/selfdrive/ui/translations/main_zh-CHT.ts +++ b/selfdrive/ui/translations/main_zh-CHT.ts @@ -247,11 +247,11 @@ openpilot requires the device to be mounted within 4° left or right and within 5° up or 9° down. openpilot is continuously calibrating, resetting is rarely required. - openpilot 需要將設備固定在左右偏差 4° 以內,朝上偏差 5° 以內或朝下偏差 9° 以內。鏡頭在後台會持續自動校準,很少有需要重設的情況。 + openpilot 需要將裝置固定在左右偏差 4° 以內,朝上偏差 5° 以內或朝下偏差 9° 以內。鏡頭在後台會持續自動校準,很少有需要重置的情況。 Your device is pointed %1° %2 and %3° %4. - 你的設備目前朝%2 %1° 以及朝%4 %3° 。 + 你的裝置目前朝%2 %1° 以及朝%4 %3° 。 down @@ -295,15 +295,15 @@ Pair your device with comma connect (connect.comma.ai) and claim your comma prime offer. - 將您的設備與 comma connect (connect.comma.ai) 配對並領取您的 comma 高級會員優惠。 + 將您的裝置與 comma connect (connect.comma.ai) 配對並領取您的 comma 高級會員優惠。 Pair Device - + 配對裝置 PAIR - + 配對 @@ -437,11 +437,11 @@ An update to your device's operating system is downloading in the background. You will be prompted to update when it's ready to install. - 一個給您設備的操作系統的更新正在後台下載中。當更新準備好安裝時,您將收到提示進行更新。 + 一個有關操作系統的更新正在後台下載中。當更新準備好安裝時,您將收到提示進行更新。 Device failed to register. It will not connect to or upload to comma.ai servers, and receives no support from comma.ai. If this is an official device, visit https://comma.ai/support. - 設備註冊失敗。它將無法連接或上傳至 comma.ai 伺服器,並且無法獲得 comma.ai 的支援。如果這是一個官方設備,請訪問 https://comma.ai/support 。 + 裝置註冊失敗。它將無法連接或上傳至 comma.ai 伺服器,並且無法獲得 comma.ai 的支援。如果這是一個官方裝置,請訪問 https://comma.ai/support 。 NVMe drive not mounted. @@ -449,7 +449,7 @@ Unsupported NVMe drive detected. Device may draw significantly more power and overheat due to the unsupported NVMe. - 檢測到不支援的 NVMe 固態硬碟。您的設備因為使用了不支援的 NVMe 固態硬碟可能會消耗更多電力並更易過熱。 + 檢測到不支援的 NVMe 固態硬碟。您的裝置因為使用了不支援的 NVMe 固態硬碟可能會消耗更多電力並更易過熱。 openpilot was unable to identify your car. Your car is either unsupported or its ECUs are not recognized. Please submit a pull request to add the firmware versions to the proper vehicle. Need help? Join discord.comma.ai. @@ -461,11 +461,11 @@ openpilot detected a change in the device's mounting position. Ensure the device is fully seated in the mount and the mount is firmly secured to the windshield. - openpilot 偵測到設備的安裝位置發生變化。請確保設備完全安裝在支架上,並確保支架牢固地固定在擋風玻璃上。 + openpilot 偵測到裝置的安裝位置發生變化。請確保裝置完全安裝在支架上,並確保支架牢固地固定在擋風玻璃上。 Device temperature too high. System cooling down before starting. Current internal component temperature: %1 - 設備溫度過高。系統正在冷卻中,等冷卻完畢後才會啟動。目前內部組件溫度:%1 + 裝置溫度過高。系統正在冷卻中,等冷卻完畢後才會啟動。目前內部組件溫度:%1 @@ -487,30 +487,30 @@ OnroadAlerts openpilot Unavailable - + 無法使用 openpilot Waiting for controls to start - + 等待操控服務開始 TAKE CONTROL IMMEDIATELY - + 立即接管 Controls Unresponsive - + 操控服務沒有反應 Reboot Device - + 請重新啟裝置 PairingPopup Pair your device to your comma account - 將設備與您的 comma 帳號配對 + 將裝置與您的 comma 帳號配對 Go to https://connect.comma.ai on your phone @@ -628,7 +628,7 @@ now - + 現在 @@ -639,7 +639,7 @@ Are you sure you want to reset your device? - 您確定要重設你的設備嗎? + 您確定要重設你的裝置嗎? System Reset @@ -659,12 +659,12 @@ Unable to mount data partition. Partition may be corrupted. Press confirm to erase and reset your device. - 無法掛載資料分割區。分割區可能已經毀損。請確認是否要刪除並重新設定。 + 無法掛載資料分割區。分割區可能已經毀損。請確認是否要刪除並重置。 Resetting device... This may take up to a minute. - 設備重設中… + 重置中… 這可能需要一分鐘的時間。 @@ -680,7 +680,7 @@ This may take up to a minute. Device - 設備 + 裝置 Network @@ -755,7 +755,7 @@ This may take up to a minute. Ensure the entered URL is valid, and the device’s internet connection is good. - 請確定您輸入的是有效的安裝網址,並且確定設備的網路連線狀態良好。 + 請確定您輸入的是有效的安裝網址,並且確定裝置的網路連線狀態良好。 Reboot device @@ -771,7 +771,7 @@ This may take up to a minute. Something went wrong. Reboot the device. - 發生了一些錯誤。請重新啟動您的設備。 + 發生了一些錯誤。請重新啟動您的裝置。 Select a language @@ -798,11 +798,11 @@ This may take up to a minute. Pair your device with comma connect (connect.comma.ai) and claim your comma prime offer. - 將您的設備與 comma connect (connect.comma.ai) 配對並領取您的 comma 高級會員優惠。 + 將您的裝置與 comma connect (connect.comma.ai) 配對並領取您的 comma 高級會員優惠。 Pair device - 配對設備 + 配對裝置 @@ -1152,11 +1152,11 @@ This may take up to a minute. Standard is recommended. In aggressive mode, openpilot will follow lead cars closer and be more aggressive with the gas and brake. In relaxed mode openpilot will stay further away from lead cars. On supported cars, you can cycle through these personalities with your steering wheel distance button. - + 建議使用標準模式。在積極模式下,openpilot 會更接近前車並更積極地使用油門和剎車。在輕鬆模式下,openpilot 會與前車保持較遠距離。對於支援的汽車,您可以使用方向盤上的距離按鈕來切換這些駕駛風格。 The driving visualization will transition to the road-facing wide-angle camera at low speeds to better show some turns. The Experimental mode logo will also be shown in the top right corner. - + 在低速時,駕駛可視化將切換至道路朝向的廣角攝影機,以更好地顯示某些彎道。在右上角還會顯示「實驗模式」的標誌。 Always-On Driver Monitoring @@ -1175,7 +1175,7 @@ This may take up to a minute. An operating system update is required. Connect your device to Wi-Fi for the fastest update experience. The download size is approximately 1GB. - 設備的操作系統需要更新。請將您的設備連接到 Wi-Fi 以獲得最快的更新體驗。下載大小約為 1GB。 + 需要進行作業系統更新。建議將您的裝置連接上 Wi-Fi 獲得更快的更新下載。下載大小約為 1GB。 Connect to Wi-Fi diff --git a/selfdrive/ui/watch3.cc b/selfdrive/ui/watch3.cc index ec35c29b6b4c55..cb12f82ea8e9ba 100644 --- a/selfdrive/ui/watch3.cc +++ b/selfdrive/ui/watch3.cc @@ -19,15 +19,15 @@ int main(int argc, char *argv[]) { { QHBoxLayout *hlayout = new QHBoxLayout(); layout->addLayout(hlayout); - hlayout->addWidget(new CameraWidget("navd", VISION_STREAM_MAP, false)); - hlayout->addWidget(new CameraWidget("camerad", VISION_STREAM_ROAD, false)); + hlayout->addWidget(new CameraView("navd", VISION_STREAM_MAP, false)); + hlayout->addWidget(new CameraView("camerad", VISION_STREAM_ROAD, false)); } { QHBoxLayout *hlayout = new QHBoxLayout(); layout->addLayout(hlayout); - hlayout->addWidget(new CameraWidget("camerad", VISION_STREAM_DRIVER, false)); - hlayout->addWidget(new CameraWidget("camerad", VISION_STREAM_WIDE_ROAD, false)); + hlayout->addWidget(new CameraView("camerad", VISION_STREAM_DRIVER, false)); + hlayout->addWidget(new CameraView("camerad", VISION_STREAM_WIDE_ROAD, false)); } return a.exec(); diff --git a/selfdrive/updated/tests/test_base.py b/selfdrive/updated/tests/test_base.py index b59f03fe776a31..615d0de99c91b4 100644 --- a/selfdrive/updated/tests/test_base.py +++ b/selfdrive/updated/tests/test_base.py @@ -6,9 +6,6 @@ import subprocess import tempfile import time -import unittest -from unittest import mock - import pytest from openpilot.common.params import Params @@ -52,13 +49,13 @@ def get_version(path: str) -> str: @pytest.mark.slow # TODO: can we test overlayfs in GHA? -class BaseUpdateTest(unittest.TestCase): +class TestBaseUpdate: @classmethod - def setUpClass(cls): + def setup_class(cls): if "Base" in cls.__name__: - raise unittest.SkipTest + pytest.skip() - def setUp(self): + def setup_method(self): self.tmpdir = tempfile.mkdtemp() run(["sudo", "mount", "-t", "tmpfs", "tmpfs", self.tmpdir]) # overlayfs doesn't work inside of docker unless this is a tmpfs @@ -76,8 +73,6 @@ def setUp(self): self.remote_dir = self.mock_update_path / "remote" self.remote_dir.mkdir() - mock.patch("openpilot.common.basedir.BASEDIR", self.basedir).start() - os.environ["UPDATER_STAGING_ROOT"] = str(self.staging_root) os.environ["UPDATER_LOCK_FILE"] = str(self.mock_update_path / "safe_staging_overlay.lock") @@ -86,6 +81,10 @@ def setUp(self): "master": ("0.1.3", "1.2", "0.1.3 release notes"), } + @pytest.fixture(autouse=True) + def mock_basedir(self, mocker): + mocker.patch("openpilot.common.basedir.BASEDIR", self.basedir) + def set_target_branch(self, branch): self.params.put("UpdaterTargetBranch", branch) @@ -102,8 +101,7 @@ def setup_remote_release(self, release): def additional_context(self): raise NotImplementedError("") - def tearDown(self): - mock.patch.stopall() + def teardown_method(self): try: run(["sudo", "umount", "-l", str(self.staging_root / "merged")]) run(["sudo", "umount", "-l", self.tmpdir]) @@ -125,17 +123,17 @@ def wait_for_condition(self, condition, timeout=12): time.sleep(1) def _test_finalized_update(self, branch, version, agnos_version, release_notes): - self.assertEqual(get_version(str(self.staging_root / "finalized")), version) - self.assertEqual(get_consistent_flag(str(self.staging_root / "finalized")), True) - self.assertTrue(os.access(str(self.staging_root / "finalized" / "launch_env.sh"), os.X_OK)) + assert get_version(str(self.staging_root / "finalized")) == version + assert get_consistent_flag(str(self.staging_root / "finalized")) + assert os.access(str(self.staging_root / "finalized" / "launch_env.sh"), os.X_OK) with open(self.staging_root / "finalized" / "test_symlink") as f: - self.assertIn(version, f.read()) + assert version in f.read() -class ParamsBaseUpdateTest(BaseUpdateTest): +class ParamsBaseUpdateTest(TestBaseUpdate): def _test_finalized_update(self, branch, version, agnos_version, release_notes): - self.assertTrue(self.params.get("UpdaterNewDescription", encoding="utf-8").startswith(f"{version} / {branch}")) - self.assertEqual(self.params.get("UpdaterNewReleaseNotes", encoding="utf-8"), f"

{release_notes}

\n") + assert self.params.get("UpdaterNewDescription", encoding="utf-8").startswith(f"{version} / {branch}") + assert self.params.get("UpdaterNewReleaseNotes", encoding="utf-8") == f"

{release_notes}

\n" super()._test_finalized_update(branch, version, agnos_version, release_notes) def send_check_for_updates_signal(self, updated: ManagerProcess): @@ -145,9 +143,9 @@ def send_download_signal(self, updated: ManagerProcess): updated.signal(signal.SIGHUP.value) def _test_params(self, branch, fetch_available, update_available): - self.assertEqual(self.params.get("UpdaterTargetBranch", encoding="utf-8"), branch) - self.assertEqual(self.params.get_bool("UpdaterFetchAvailable"), fetch_available) - self.assertEqual(self.params.get_bool("UpdateAvailable"), update_available) + assert self.params.get("UpdaterTargetBranch", encoding="utf-8") == branch + assert self.params.get_bool("UpdaterFetchAvailable") == fetch_available + assert self.params.get_bool("UpdateAvailable") == update_available def wait_for_idle(self): self.wait_for_condition(lambda: self.params.get("UpdaterState", encoding="utf-8") == "idle") @@ -229,17 +227,16 @@ def test_switch_branches(self): self._test_params("master", False, True) self._test_finalized_update("master", *self.MOCK_RELEASES["master"]) - def test_agnos_update(self): + def test_agnos_update(self, mocker): # Start on release3, push an update with an agnos change self.setup_remote_release("release3") self.setup_basedir_release("release3") - with self.additional_context(), \ - mock.patch("openpilot.system.hardware.AGNOS", "True"), \ - mock.patch("openpilot.system.hardware.tici.hardware.Tici.get_os_version", "1.2"), \ - mock.patch("openpilot.system.hardware.tici.agnos.get_target_slot_number"), \ - mock.patch("openpilot.system.hardware.tici.agnos.flash_agnos_update"), \ - processes_context(["updated"]) as [updated]: + with self.additional_context(), processes_context(["updated"]) as [updated]: + mocker.patch("openpilot.system.hardware.AGNOS", "True") + mocker.patch("openpilot.system.hardware.tici.hardware.Tici.get_os_version", "1.2") + mocker.patch("openpilot.system.hardware.tici.agnos.get_target_slot_number") + mocker.patch("openpilot.system.hardware.tici.agnos.flash_agnos_update") self._test_params("release3", False, False) self.wait_for_idle() diff --git a/system/camerad/test/test_exposure.py b/system/camerad/test/test_exposure.py index 50467f9db40222..36e8522b1dcdfa 100755 --- a/system/camerad/test/test_exposure.py +++ b/system/camerad/test/test_exposure.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 import time -import unittest import numpy as np from openpilot.selfdrive.test.helpers import with_processes, phone_only @@ -9,9 +8,9 @@ TEST_TIME = 45 REPEAT = 5 -class TestCamerad(unittest.TestCase): +class TestCamerad: @classmethod - def setUpClass(cls): + def setup_class(cls): pass def _numpy_rgb2gray(self, im): @@ -49,7 +48,4 @@ def test_camera_operation(self): passed += int(res) time.sleep(2) - self.assertGreaterEqual(passed, REPEAT) - -if __name__ == "__main__": - unittest.main() + assert passed >= REPEAT diff --git a/system/hardware/tici/tests/test_agnos_updater.py b/system/hardware/tici/tests/test_agnos_updater.py index 86bc78881e2ea6..462cf6cb5ca5e8 100755 --- a/system/hardware/tici/tests/test_agnos_updater.py +++ b/system/hardware/tici/tests/test_agnos_updater.py @@ -1,14 +1,13 @@ #!/usr/bin/env python3 import json import os -import unittest import requests TEST_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__))) MANIFEST = os.path.join(TEST_DIR, "../agnos.json") -class TestAgnosUpdater(unittest.TestCase): +class TestAgnosUpdater: def test_manifest(self): with open(MANIFEST) as f: @@ -17,10 +16,6 @@ def test_manifest(self): for img in m: r = requests.head(img['url'], timeout=10) r.raise_for_status() - self.assertEqual(r.headers['Content-Type'], "application/x-xz") + assert r.headers['Content-Type'] == "application/x-xz" if not img['sparse']: assert img['hash'] == img['hash_raw'] - - -if __name__ == "__main__": - unittest.main() diff --git a/system/hardware/tici/tests/test_amplifier.py b/system/hardware/tici/tests/test_amplifier.py index cd3b0f90fee601..dfba84b942c596 100755 --- a/system/hardware/tici/tests/test_amplifier.py +++ b/system/hardware/tici/tests/test_amplifier.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 +import pytest import time import random -import unittest import subprocess from panda import Panda @@ -10,14 +10,14 @@ from openpilot.system.hardware.tici.amplifier import Amplifier -class TestAmplifier(unittest.TestCase): +class TestAmplifier: @classmethod - def setUpClass(cls): + def setup_class(cls): if not TICI: - raise unittest.SkipTest + pytest.skip() - def setUp(self): + def setup_method(self): # clear dmesg subprocess.check_call("sudo dmesg -C", shell=True) @@ -25,7 +25,7 @@ def setUp(self): Panda.wait_for_panda(None, 30) self.panda = Panda() - def tearDown(self): + def teardown_method(self): HARDWARE.reset_internal_panda() def _check_for_i2c_errors(self, expected): @@ -68,8 +68,4 @@ def test_init_while_siren_play(self): if self._check_for_i2c_errors(True): break else: - self.fail("didn't hit any i2c errors") - - -if __name__ == "__main__": - unittest.main() + pytest.fail("didn't hit any i2c errors") diff --git a/system/hardware/tici/tests/test_hardware.py b/system/hardware/tici/tests/test_hardware.py index 6c41c383a049b6..49d4ac769951ec 100755 --- a/system/hardware/tici/tests/test_hardware.py +++ b/system/hardware/tici/tests/test_hardware.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import pytest import time -import unittest import numpy as np from openpilot.system.hardware.tici.hardware import Tici @@ -10,7 +9,7 @@ @pytest.mark.tici -class TestHardware(unittest.TestCase): +class TestHardware: def test_power_save_time(self): ts = [] @@ -22,7 +21,3 @@ def test_power_save_time(self): assert 0.1 < np.mean(ts) < 0.25 assert max(ts) < 0.3 - - -if __name__ == "__main__": - unittest.main() diff --git a/system/hardware/tici/tests/test_power_draw.py b/system/hardware/tici/tests/test_power_draw.py index ba7e0a6d9d0f7e..104329da42357e 100755 --- a/system/hardware/tici/tests/test_power_draw.py +++ b/system/hardware/tici/tests/test_power_draw.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 from collections import defaultdict, deque import pytest -import unittest import time import numpy as np from dataclasses import dataclass @@ -40,15 +39,15 @@ def name(self): @pytest.mark.tici -class TestPowerDraw(unittest.TestCase): +class TestPowerDraw: - def setUp(self): + def setup_method(self): write_car_param() # wait a bit for power save to disable time.sleep(5) - def tearDown(self): + def teardown_method(self): manager_cleanup() def get_expected_messages(self, proc): @@ -97,7 +96,7 @@ def get_power_with_warmup_for_target(self, proc, prev): return now, msg_counts, time.monotonic() - start_time - SAMPLE_TIME @mock_messages(['liveLocationKalman']) - def test_camera_procs(self): + def test_camera_procs(self, subtests): baseline = get_power() prev = baseline @@ -122,12 +121,8 @@ def test_camera_procs(self): expected = proc.power msgs_received = sum(msg_counts[msg] for msg in proc.msgs) tab.append([proc.name, round(expected, 2), round(cur, 2), self.get_expected_messages(proc), msgs_received, round(warmup_time[proc.name], 2)]) - with self.subTest(proc=proc.name): - self.assertTrue(self.valid_msg_count(proc, msg_counts), f"expected {self.get_expected_messages(proc)} msgs, got {msgs_received} msgs") - self.assertTrue(self.valid_power_draw(proc, cur), f"expected {expected:.2f}W, got {cur:.2f}W") + with subtests.test(proc=proc.name): + assert self.valid_msg_count(proc, msg_counts), f"expected {self.get_expected_messages(proc)} msgs, got {msgs_received} msgs" + assert self.valid_power_draw(proc, cur), f"expected {expected:.2f}W, got {cur:.2f}W" print(tabulate(tab)) print(f"Baseline {baseline:.2f}W\n") - - -if __name__ == "__main__": - unittest.main() diff --git a/system/loggerd/tests/loggerd_tests_common.py b/system/loggerd/tests/loggerd_tests_common.py index 877c872b6b1079..e8a6d031c4086f 100644 --- a/system/loggerd/tests/loggerd_tests_common.py +++ b/system/loggerd/tests/loggerd_tests_common.py @@ -1,6 +1,5 @@ import os import random -import unittest from pathlib import Path @@ -54,7 +53,7 @@ def get(self, *args, **kwargs): def get_token(self): return "fake-token" -class UploaderTestCase(unittest.TestCase): +class UploaderTestCase: f_type = "UNKNOWN" root: Path @@ -66,7 +65,7 @@ class UploaderTestCase(unittest.TestCase): def set_ignore(self): uploader.Api = MockApiIgnore - def setUp(self): + def setup_method(self): uploader.Api = MockApi uploader.fake_upload = True uploader.force_wifi = True diff --git a/system/loggerd/tests/test_deleter.py b/system/loggerd/tests/test_deleter.py index 37d25507e0d2b5..3ba6ad403108e9 100755 --- a/system/loggerd/tests/test_deleter.py +++ b/system/loggerd/tests/test_deleter.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import time import threading -import unittest from collections import namedtuple from pathlib import Path from collections.abc import Sequence @@ -17,9 +16,9 @@ class TestDeleter(UploaderTestCase): def fake_statvfs(self, d): return self.fake_stats - def setUp(self): + def setup_method(self): self.f_type = "fcamera.hevc" - super().setUp() + super().setup_method() self.fake_stats = Stats(f_bavail=0, f_blocks=10, f_frsize=4096) deleter.os.statvfs = self.fake_statvfs @@ -64,7 +63,7 @@ def assertDeleteOrder(self, f_paths: Sequence[Path], timeout: int = 5) -> None: finally: self.join_thread() - self.assertEqual(deleted_order, f_paths, "Files not deleted in expected order") + assert deleted_order == f_paths, "Files not deleted in expected order" def test_delete_order(self): self.assertDeleteOrder([ @@ -105,7 +104,7 @@ def test_no_delete_when_available_space(self): time.sleep(0.01) self.join_thread() - self.assertTrue(f_path.exists(), "File deleted with available space") + assert f_path.exists(), "File deleted with available space" def test_no_delete_with_lock_file(self): f_path = self.make_file_with_data(self.seg_dir, self.f_type, lock=True) @@ -116,8 +115,4 @@ def test_no_delete_with_lock_file(self): time.sleep(0.01) self.join_thread() - self.assertTrue(f_path.exists(), "File deleted when locked") - - -if __name__ == "__main__": - unittest.main() + assert f_path.exists(), "File deleted when locked" diff --git a/system/loggerd/tests/test_encoder.py b/system/loggerd/tests/test_encoder.py index bd076dc5f3177a..f6eb9b90119ebd 100755 --- a/system/loggerd/tests/test_encoder.py +++ b/system/loggerd/tests/test_encoder.py @@ -6,7 +6,6 @@ import shutil import subprocess import time -import unittest from pathlib import Path from parameterized import parameterized @@ -33,14 +32,14 @@ @pytest.mark.tici # TODO: all of loggerd should work on PC -class TestEncoder(unittest.TestCase): +class TestEncoder: - def setUp(self): + def setup_method(self): self._clear_logs() os.environ["LOGGERD_TEST"] = "1" os.environ["LOGGERD_SEGMENT_LENGTH"] = str(SEGMENT_LENGTH) - def tearDown(self): + def teardown_method(self): self._clear_logs() def _clear_logs(self): @@ -85,7 +84,7 @@ def check_seg(i): file_path = f"{route_prefix_path}--{i}/{camera}" # check file exists - self.assertTrue(os.path.exists(file_path), f"segment #{i}: '{file_path}' missing") + assert os.path.exists(file_path), f"segment #{i}: '{file_path}' missing" # TODO: this ffprobe call is really slow # check frame count @@ -98,13 +97,13 @@ def check_seg(i): frame_count = int(probe.split('\n')[0].strip()) counts.append(frame_count) - self.assertEqual(frame_count, expected_frames, - f"segment #{i}: {camera} failed frame count check: expected {expected_frames}, got {frame_count}") + assert frame_count == expected_frames, \ + f"segment #{i}: {camera} failed frame count check: expected {expected_frames}, got {frame_count}" # sanity check file size file_size = os.path.getsize(file_path) - self.assertTrue(math.isclose(file_size, size, rel_tol=FILE_SIZE_TOLERANCE), - f"{file_path} size {file_size} isn't close to target size {size}") + assert math.isclose(file_size, size, rel_tol=FILE_SIZE_TOLERANCE), \ + f"{file_path} size {file_size} isn't close to target size {size}" # Check encodeIdx if encode_idx_name is not None: @@ -118,24 +117,24 @@ def check_seg(i): frame_idxs = [m.frameId for m in encode_msgs] # Check frame count - self.assertEqual(frame_count, len(segment_idxs)) - self.assertEqual(frame_count, len(encode_idxs)) + assert frame_count == len(segment_idxs) + assert frame_count == len(encode_idxs) # Check for duplicates or skips - self.assertEqual(0, segment_idxs[0]) - self.assertEqual(len(set(segment_idxs)), len(segment_idxs)) + assert 0 == segment_idxs[0] + assert len(set(segment_idxs)) == len(segment_idxs) - self.assertTrue(all(valid)) + assert all(valid) - self.assertEqual(expected_frames * i, encode_idxs[0]) + assert expected_frames * i == encode_idxs[0] first_frames.append(frame_idxs[0]) - self.assertEqual(len(set(encode_idxs)), len(encode_idxs)) + assert len(set(encode_idxs)) == len(encode_idxs) - self.assertEqual(1, len(set(first_frames))) + assert 1 == len(set(first_frames)) if TICI: expected_frames = fps * SEGMENT_LENGTH - self.assertEqual(min(counts), expected_frames) + assert min(counts) == expected_frames shutil.rmtree(f"{route_prefix_path}--{i}") try: @@ -150,7 +149,3 @@ def check_seg(i): managed_processes['encoderd'].stop() managed_processes['camerad'].stop() managed_processes['sensord'].stop() - - -if __name__ == "__main__": - unittest.main() diff --git a/system/loggerd/tests/test_uploader.py b/system/loggerd/tests/test_uploader.py index 73917a30cf128e..c0a9770e5348ae 100755 --- a/system/loggerd/tests/test_uploader.py +++ b/system/loggerd/tests/test_uploader.py @@ -2,7 +2,6 @@ import os import time import threading -import unittest import logging import json from pathlib import Path @@ -38,8 +37,8 @@ def emit(self, record): class TestUploader(UploaderTestCase): - def setUp(self): - super().setUp() + def setup_method(self): + super().setup_method() log_handler.reset() def start_thread(self): @@ -80,13 +79,13 @@ def test_upload(self): exp_order = self.gen_order([self.seg_num], []) - self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored") - self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload") - self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice") + assert len(log_handler.upload_ignored) == 0, "Some files were ignored" + assert not len(log_handler.upload_order) < len(exp_order), "Some files failed to upload" + assert not len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice" for f_path in exp_order: - self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not uploaded") + assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not uploaded" - self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order") + assert log_handler.upload_order == exp_order, "Files uploaded in wrong order" def test_upload_with_wrong_xattr(self): self.gen_files(lock=False, xattr=b'0') @@ -98,13 +97,13 @@ def test_upload_with_wrong_xattr(self): exp_order = self.gen_order([self.seg_num], []) - self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored") - self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload") - self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice") + assert len(log_handler.upload_ignored) == 0, "Some files were ignored" + assert not len(log_handler.upload_order) < len(exp_order), "Some files failed to upload" + assert not len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice" for f_path in exp_order: - self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not uploaded") + assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not uploaded" - self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order") + assert log_handler.upload_order == exp_order, "Files uploaded in wrong order" def test_upload_ignored(self): self.set_ignore() @@ -117,13 +116,13 @@ def test_upload_ignored(self): exp_order = self.gen_order([self.seg_num], []) - self.assertTrue(len(log_handler.upload_order) == 0, "Some files were not ignored") - self.assertFalse(len(log_handler.upload_ignored) < len(exp_order), "Some files failed to ignore") - self.assertFalse(len(log_handler.upload_ignored) > len(exp_order), "Some files were ignored twice") + assert len(log_handler.upload_order) == 0, "Some files were not ignored" + assert not len(log_handler.upload_ignored) < len(exp_order), "Some files failed to ignore" + assert not len(log_handler.upload_ignored) > len(exp_order), "Some files were ignored twice" for f_path in exp_order: - self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not ignored") + assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not ignored" - self.assertTrue(log_handler.upload_ignored == exp_order, "Files ignored in wrong order") + assert log_handler.upload_ignored == exp_order, "Files ignored in wrong order" def test_upload_files_in_create_order(self): seg1_nums = [0, 1, 2, 10, 20] @@ -142,13 +141,13 @@ def test_upload_files_in_create_order(self): time.sleep(5) self.join_thread() - self.assertTrue(len(log_handler.upload_ignored) == 0, "Some files were ignored") - self.assertFalse(len(log_handler.upload_order) < len(exp_order), "Some files failed to upload") - self.assertFalse(len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice") + assert len(log_handler.upload_ignored) == 0, "Some files were ignored" + assert not len(log_handler.upload_order) < len(exp_order), "Some files failed to upload" + assert not len(log_handler.upload_order) > len(exp_order), "Some files were uploaded twice" for f_path in exp_order: - self.assertEqual(os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME), UPLOAD_ATTR_VALUE, "All files not uploaded") + assert os.getxattr((Path(Paths.log_root()) / f_path).with_suffix(""), UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE, "All files not uploaded" - self.assertTrue(log_handler.upload_order == exp_order, "Files uploaded in wrong order") + assert log_handler.upload_order == exp_order, "Files uploaded in wrong order" def test_no_upload_with_lock_file(self): self.start_thread() @@ -163,7 +162,7 @@ def test_no_upload_with_lock_file(self): for f_path in f_paths: fn = f_path.with_suffix(f_path.suffix.replace(".bz2", "")) uploaded = UPLOAD_ATTR_NAME in os.listxattr(fn) and os.getxattr(fn, UPLOAD_ATTR_NAME) == UPLOAD_ATTR_VALUE - self.assertFalse(uploaded, "File upload when locked") + assert not uploaded, "File upload when locked" def test_no_upload_with_xattr(self): self.gen_files(lock=False, xattr=UPLOAD_ATTR_VALUE) @@ -173,7 +172,7 @@ def test_no_upload_with_xattr(self): time.sleep(5) self.join_thread() - self.assertEqual(len(log_handler.upload_order), 0, "File uploaded again") + assert len(log_handler.upload_order) == 0, "File uploaded again" def test_clear_locks_on_startup(self): f_paths = self.gen_files(lock=True, boot=False) @@ -183,8 +182,4 @@ def test_clear_locks_on_startup(self): for f_path in f_paths: lock_path = f_path.with_suffix(f_path.suffix + ".lock") - self.assertFalse(lock_path.is_file(), "File lock not cleared on startup") - - -if __name__ == "__main__": - unittest.main() + assert not lock_path.is_file(), "File lock not cleared on startup" diff --git a/system/qcomgpsd/tests/test_qcomgpsd.py b/system/qcomgpsd/tests/test_qcomgpsd.py index 6c93f7dd9382fb..d47ea5d634d1e0 100755 --- a/system/qcomgpsd/tests/test_qcomgpsd.py +++ b/system/qcomgpsd/tests/test_qcomgpsd.py @@ -4,7 +4,6 @@ import json import time import datetime -import unittest import subprocess import cereal.messaging as messaging @@ -15,24 +14,24 @@ @pytest.mark.tici -class TestRawgpsd(unittest.TestCase): +class TestRawgpsd: @classmethod - def setUpClass(cls): + def setup_class(cls): os.system("sudo systemctl start systemd-resolved") os.system("sudo systemctl restart ModemManager lte") wait_for_modem() @classmethod - def tearDownClass(cls): + def teardown_class(cls): managed_processes['qcomgpsd'].stop() os.system("sudo systemctl restart systemd-resolved") os.system("sudo systemctl restart ModemManager lte") - def setUp(self): + def setup_method(self): at_cmd("AT+QGPSDEL=0") self.sm = messaging.SubMaster(['qcomGnss', 'gpsLocation', 'gnssMeasurements']) - def tearDown(self): + def teardown_method(self): managed_processes['qcomgpsd'].stop() os.system("sudo systemctl restart systemd-resolved") @@ -57,18 +56,18 @@ def test_wait_for_modem(self): os.system("sudo systemctl restart ModemManager") assert self._wait_for_output(30) - def test_startup_time(self): + def test_startup_time(self, subtests): for internet in (True, False): if not internet: os.system("sudo systemctl stop systemd-resolved") - with self.subTest(internet=internet): + with subtests.test(internet=internet): managed_processes['qcomgpsd'].start() assert self._wait_for_output(7) managed_processes['qcomgpsd'].stop() - def test_turns_off_gnss(self): + def test_turns_off_gnss(self, subtests): for s in (0.1, 1, 5): - with self.subTest(runtime=s): + with subtests.test(runtime=s): managed_processes['qcomgpsd'].start() time.sleep(s) managed_processes['qcomgpsd'].stop() @@ -87,7 +86,7 @@ def check_assistance(self, should_be_loaded): if should_be_loaded: assert valid_duration == "10080" # should be max time injected_time = datetime.datetime.strptime(injected_time_str.replace("\"", ""), "%Y/%m/%d,%H:%M:%S") - self.assertLess(abs((datetime.datetime.utcnow() - injected_time).total_seconds()), 60*60*12) + assert abs((datetime.datetime.utcnow() - injected_time).total_seconds()) < 60*60*12 else: valid_duration, injected_time_str = out.split(",", 1) injected_time_str = injected_time_str.replace('\"', '').replace('\'', '') @@ -119,6 +118,3 @@ def test_late_assistance_loading(self): time.sleep(15) managed_processes['qcomgpsd'].stop() self.check_assistance(True) - -if __name__ == "__main__": - unittest.main(failfast=True) diff --git a/system/sensord/tests/test_sensord.py b/system/sensord/tests/test_sensord.py index 3075c8a343bd86..1b3b78da88aa97 100755 --- a/system/sensord/tests/test_sensord.py +++ b/system/sensord/tests/test_sensord.py @@ -2,7 +2,6 @@ import os import pytest import time -import unittest import numpy as np from collections import namedtuple, defaultdict @@ -99,9 +98,9 @@ def read_sensor_events(duration_sec): return {k: v for k, v in events.items() if len(v) > 0} @pytest.mark.tici -class TestSensord(unittest.TestCase): +class TestSensord: @classmethod - def setUpClass(cls): + def setup_class(cls): # enable LSM self test os.environ["LSM_SELF_TEST"] = "1" @@ -119,10 +118,10 @@ def setUpClass(cls): managed_processes["sensord"].stop() @classmethod - def tearDownClass(cls): + def teardown_class(cls): managed_processes["sensord"].stop() - def tearDown(self): + def teardown_method(self): managed_processes["sensord"].stop() def test_sensors_present(self): @@ -133,9 +132,9 @@ def test_sensors_present(self): m = getattr(measurement, measurement.which()) seen.add((str(m.source), m.which())) - self.assertIn(seen, SENSOR_CONFIGURATIONS) + assert seen in SENSOR_CONFIGURATIONS - def test_lsm6ds3_timing(self): + def test_lsm6ds3_timing(self, subtests): # verify measurements are sampled and published at 104Hz sensor_t = { @@ -152,7 +151,7 @@ def test_lsm6ds3_timing(self): sensor_t[m.sensor].append(m.timestamp) for s, vals in sensor_t.items(): - with self.subTest(sensor=s): + with subtests.test(sensor=s): assert len(vals) > 0 tdiffs = np.diff(vals) / 1e6 # millis @@ -166,9 +165,9 @@ def test_lsm6ds3_timing(self): stddev = np.std(tdiffs) assert stddev < 2.0, f"Standard-dev to big {stddev}" - def test_sensor_frequency(self): + def test_sensor_frequency(self, subtests): for s, msgs in self.events.items(): - with self.subTest(sensor=s): + with subtests.test(sensor=s): freq = len(msgs) / self.sample_secs ef = SERVICE_LIST[s].frequency assert ef*0.85 <= freq <= ef*1.15 @@ -246,6 +245,3 @@ def test_sensor_verify_no_interrupts_after_stop(self): state_two = get_irq_count(self.sensord_irq) assert state_one == state_two, "Interrupts received after sensord stop!" - -if __name__ == "__main__": - unittest.main() diff --git a/system/tests/test_logmessaged.py b/system/tests/test_logmessaged.py index d27dae46adb09b..6d59bdcb086ec7 100755 --- a/system/tests/test_logmessaged.py +++ b/system/tests/test_logmessaged.py @@ -2,7 +2,6 @@ import glob import os import time -import unittest import cereal.messaging as messaging from openpilot.selfdrive.manager.process_config import managed_processes @@ -10,8 +9,8 @@ from openpilot.common.swaglog import cloudlog, ipchandler -class TestLogmessaged(unittest.TestCase): - def setUp(self): +class TestLogmessaged: + def setup_method(self): # clear the IPC buffer in case some other tests used cloudlog and filled it ipchandler.close() ipchandler.connect() @@ -25,7 +24,7 @@ def setUp(self): messaging.drain_sock(self.sock) messaging.drain_sock(self.error_sock) - def tearDown(self): + def teardown_method(self): del self.sock del self.error_sock managed_processes['logmessaged'].stop(block=True) @@ -55,6 +54,3 @@ def test_big_log(self): logsize = sum([os.path.getsize(f) for f in self._get_log_files()]) assert (n*len(msg)) < logsize < (n*(len(msg)+1024)) - -if __name__ == "__main__": - unittest.main() diff --git a/system/ubloxd/tests/test_pigeond.py b/system/ubloxd/tests/test_pigeond.py index 742e20bb903d20..a24414466ad236 100755 --- a/system/ubloxd/tests/test_pigeond.py +++ b/system/ubloxd/tests/test_pigeond.py @@ -1,7 +1,6 @@ #!/usr/bin/env python3 import pytest import time -import unittest import cereal.messaging as messaging from cereal.services import SERVICE_LIST @@ -13,9 +12,9 @@ # TODO: test TTFF when we have good A-GNSS @pytest.mark.tici -class TestPigeond(unittest.TestCase): +class TestPigeond: - def tearDown(self): + def teardown_method(self): managed_processes['pigeond'].stop() @with_processes(['pigeond']) @@ -54,7 +53,3 @@ def test_turns_off_ublox(self): assert gpio_read(GPIO.UBLOX_RST_N) == 0 assert gpio_read(GPIO.GNSS_PWR_EN) == 0 - - -if __name__ == "__main__": - unittest.main() diff --git a/system/updated/casync/tests/test_casync.py b/system/updated/casync/tests/test_casync.py index 34427d5625cfa9..80c5d2705c341d 100755 --- a/system/updated/casync/tests/test_casync.py +++ b/system/updated/casync/tests/test_casync.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 +import pytest import os import pathlib -import unittest import tempfile import subprocess @@ -14,9 +14,9 @@ LOOPBACK = os.environ.get('LOOPBACK', None) -class TestCasync(unittest.TestCase): +class TestCasync: @classmethod - def setUpClass(cls): + def setup_class(cls): cls.tmpdir = tempfile.TemporaryDirectory() # Build example contents @@ -43,7 +43,7 @@ def setUpClass(cls): # Ensure we have chunk reuse assert len(hashes) > len(set(hashes)) - def setUp(self): + def setup_method(self): # Clear target_lo if LOOPBACK is not None: self.target_lo = LOOPBACK @@ -53,7 +53,7 @@ def setUp(self): self.target_fn = os.path.join(self.tmpdir.name, next(tempfile._get_candidate_names())) self.seed_fn = os.path.join(self.tmpdir.name, next(tempfile._get_candidate_names())) - def tearDown(self): + def teardown_method(self): for fn in [self.target_fn, self.seed_fn]: try: os.unlink(fn) @@ -67,9 +67,9 @@ def test_simple_extract(self): stats = casync.extract(target, sources, self.target_fn) with open(self.target_fn, 'rb') as target_f: - self.assertEqual(target_f.read(), self.contents) + assert target_f.read() == self.contents - self.assertEqual(stats['remote'], len(self.contents)) + assert stats['remote'] == len(self.contents) def test_seed(self): target = casync.parse_caibx(self.manifest_fn) @@ -83,10 +83,10 @@ def test_seed(self): stats = casync.extract(target, sources, self.target_fn) with open(self.target_fn, 'rb') as target_f: - self.assertEqual(target_f.read(), self.contents) + assert target_f.read() == self.contents - self.assertGreater(stats['seed'], 0) - self.assertLess(stats['remote'], len(self.contents)) + assert stats['seed'] > 0 + assert stats['remote'] < len(self.contents) def test_already_done(self): """Test that an already flashed target doesn't download any chunks""" @@ -101,9 +101,9 @@ def test_already_done(self): stats = casync.extract(target, sources, self.target_fn) with open(self.target_fn, 'rb') as f: - self.assertEqual(f.read(), self.contents) + assert f.read() == self.contents - self.assertEqual(stats['target'], len(self.contents)) + assert stats['target'] == len(self.contents) def test_chunk_reuse(self): """Test that chunks that are reused are only downloaded once""" @@ -119,11 +119,11 @@ def test_chunk_reuse(self): stats = casync.extract(target, sources, self.target_fn) with open(self.target_fn, 'rb') as f: - self.assertEqual(f.read(), self.contents) + assert f.read() == self.contents - self.assertLess(stats['remote'], len(self.contents)) + assert stats['remote'] < len(self.contents) - @unittest.skipUnless(LOOPBACK, "requires loopback device") + @pytest.mark.skipif(not LOOPBACK, reason="requires loopback device") def test_lo_simple_extract(self): target = casync.parse_caibx(self.manifest_fn) sources = [('remote', casync.RemoteChunkReader(self.store_fn), casync.build_chunk_dict(target))] @@ -131,11 +131,11 @@ def test_lo_simple_extract(self): stats = casync.extract(target, sources, self.target_lo) with open(self.target_lo, 'rb') as target_f: - self.assertEqual(target_f.read(len(self.contents)), self.contents) + assert target_f.read(len(self.contents)) == self.contents - self.assertEqual(stats['remote'], len(self.contents)) + assert stats['remote'] == len(self.contents) - @unittest.skipUnless(LOOPBACK, "requires loopback device") + @pytest.mark.skipif(not LOOPBACK, reason="requires loopback device") def test_lo_chunk_reuse(self): """Test that chunks that are reused are only downloaded once""" target = casync.parse_caibx(self.manifest_fn) @@ -146,12 +146,12 @@ def test_lo_chunk_reuse(self): stats = casync.extract(target, sources, self.target_lo) with open(self.target_lo, 'rb') as f: - self.assertEqual(f.read(len(self.contents)), self.contents) + assert f.read(len(self.contents)) == self.contents - self.assertLess(stats['remote'], len(self.contents)) + assert stats['remote'] < len(self.contents) -class TestCasyncDirectory(unittest.TestCase): +class TestCasyncDirectory: """Tests extracting a directory stored as a casync tar archive""" NUM_FILES = 16 @@ -174,7 +174,7 @@ def setup_cache(cls, directory, files=None): os.symlink(f"file_{i}.txt", os.path.join(directory, f"link_{i}.txt")) @classmethod - def setUpClass(cls): + def setup_class(cls): cls.tmpdir = tempfile.TemporaryDirectory() # Create casync files @@ -190,16 +190,16 @@ def setUpClass(cls): subprocess.check_output(["casync", "make", "--compression=xz", "--store", cls.store_fn, cls.manifest_fn, cls.orig_fn]) @classmethod - def tearDownClass(cls): + def teardown_class(cls): cls.tmpdir.cleanup() cls.directory_to_extract.cleanup() - def setUp(self): + def setup_method(self): self.cache_dir = tempfile.TemporaryDirectory() self.working_dir = tempfile.TemporaryDirectory() self.out_dir = tempfile.TemporaryDirectory() - def tearDown(self): + def teardown_method(self): self.cache_dir.cleanup() self.working_dir.cleanup() self.out_dir.cleanup() @@ -216,32 +216,32 @@ def run_test(self): stats = casync.extract_directory(target, sources, pathlib.Path(self.out_dir.name), tmp_filename) with open(os.path.join(self.out_dir.name, "file_0.txt"), "rb") as f: - self.assertEqual(f.read(), self.contents) + assert f.read() == self.contents with open(os.path.join(self.out_dir.name, "link_0.txt"), "rb") as f: - self.assertEqual(f.read(), self.contents) - self.assertEqual(os.readlink(os.path.join(self.out_dir.name, "link_0.txt")), "file_0.txt") + assert f.read() == self.contents + assert os.readlink(os.path.join(self.out_dir.name, "link_0.txt")) == "file_0.txt" return stats def test_no_cache(self): self.setup_cache(self.cache_dir.name, []) stats = self.run_test() - self.assertGreater(stats['remote'], 0) - self.assertEqual(stats['cache'], 0) + assert stats['remote'] > 0 + assert stats['cache'] == 0 def test_full_cache(self): self.setup_cache(self.cache_dir.name, range(self.NUM_FILES)) stats = self.run_test() - self.assertEqual(stats['remote'], 0) - self.assertGreater(stats['cache'], 0) + assert stats['remote'] == 0 + assert stats['cache'] > 0 def test_one_file_cache(self): self.setup_cache(self.cache_dir.name, range(1)) stats = self.run_test() - self.assertGreater(stats['remote'], 0) - self.assertGreater(stats['cache'], 0) - self.assertLess(stats['cache'], stats['remote']) + assert stats['remote'] > 0 + assert stats['cache'] > 0 + assert stats['cache'] < stats['remote'] def test_one_file_incorrect_cache(self): self.setup_cache(self.cache_dir.name, range(self.NUM_FILES)) @@ -249,19 +249,15 @@ def test_one_file_incorrect_cache(self): f.write(b"1234") stats = self.run_test() - self.assertGreater(stats['remote'], 0) - self.assertGreater(stats['cache'], 0) - self.assertGreater(stats['cache'], stats['remote']) + assert stats['remote'] > 0 + assert stats['cache'] > 0 + assert stats['cache'] > stats['remote'] def test_one_file_missing_cache(self): self.setup_cache(self.cache_dir.name, range(self.NUM_FILES)) os.unlink(os.path.join(self.cache_dir.name, "file_12.txt")) stats = self.run_test() - self.assertGreater(stats['remote'], 0) - self.assertGreater(stats['cache'], 0) - self.assertGreater(stats['cache'], stats['remote']) - - -if __name__ == "__main__": - unittest.main() + assert stats['remote'] > 0 + assert stats['cache'] > 0 + assert stats['cache'] > stats['remote'] diff --git a/system/webrtc/tests/test_stream_session.py b/system/webrtc/tests/test_stream_session.py index 2173c3806b1c90..d8defab13f071d 100755 --- a/system/webrtc/tests/test_stream_session.py +++ b/system/webrtc/tests/test_stream_session.py @@ -1,7 +1,5 @@ #!/usr/bin/env python3 import asyncio -import unittest -from unittest.mock import Mock, MagicMock, patch import json # for aiortc and its dependencies import warnings @@ -20,15 +18,15 @@ from openpilot.common.realtime import DT_DMON -class TestStreamSession(unittest.TestCase): - def setUp(self): +class TestStreamSession: + def setup_method(self): self.loop = asyncio.new_event_loop() - def tearDown(self): + def teardown_method(self): self.loop.stop() self.loop.close() - def test_outgoing_proxy(self): + def test_outgoing_proxy(self, mocker): test_msg = log.Event.new_message() test_msg.logMonoTime = 123 test_msg.valid = True @@ -36,27 +34,27 @@ def test_outgoing_proxy(self): expected_dict = {"type": "customReservedRawData0", "logMonoTime": 123, "valid": True, "data": "test"} expected_json = json.dumps(expected_dict).encode() - channel = Mock(spec=RTCDataChannel) + channel = mocker.Mock(spec=RTCDataChannel) mocked_submaster = messaging.SubMaster(["customReservedRawData0"]) def mocked_update(t): mocked_submaster.update_msgs(0, [test_msg]) - with patch.object(messaging.SubMaster, "update", side_effect=mocked_update): - proxy = CerealOutgoingMessageProxy(mocked_submaster) - proxy.add_channel(channel) + mocker.patch.object(messaging.SubMaster, "update", side_effect=mocked_update) + proxy = CerealOutgoingMessageProxy(mocked_submaster) + proxy.add_channel(channel) - proxy.update() + proxy.update() - channel.send.assert_called_once_with(expected_json) + channel.send.assert_called_once_with(expected_json) - def test_incoming_proxy(self): + def test_incoming_proxy(self, mocker): tested_msgs = [ {"type": "customReservedRawData0", "data": "test"}, # primitive {"type": "can", "data": [{"address": 0, "busTime": 0, "dat": "", "src": 0}]}, # list {"type": "testJoystick", "data": {"axes": [0, 0], "buttons": [False]}}, # dict ] - mocked_pubmaster = MagicMock(spec=messaging.PubMaster) + mocked_pubmaster = mocker.MagicMock(spec=messaging.PubMaster) proxy = CerealIncomingMessageProxy(mocked_pubmaster) @@ -65,44 +63,40 @@ def test_incoming_proxy(self): mocked_pubmaster.send.assert_called_once() mt, md = mocked_pubmaster.send.call_args.args - self.assertEqual(mt, msg["type"]) - self.assertIsInstance(md, capnp._DynamicStructBuilder) - self.assertTrue(hasattr(md, msg["type"])) + assert mt == msg["type"] + assert isinstance(md, capnp._DynamicStructBuilder) + assert hasattr(md, msg["type"]) mocked_pubmaster.reset_mock() - def test_livestream_track(self): + def test_livestream_track(self, mocker): fake_msg = messaging.new_message("livestreamDriverEncodeData") config = {"receive.return_value": fake_msg.to_bytes()} - with patch("cereal.messaging.SubSocket", spec=True, **config): - track = LiveStreamVideoStreamTrack("driver") + mocker.patch("cereal.messaging.SubSocket", spec=True, **config) + track = LiveStreamVideoStreamTrack("driver") - self.assertTrue(track.id.startswith("driver")) - self.assertEqual(track.codec_preference(), "H264") + assert track.id.startswith("driver") + assert track.codec_preference() == "H264" - for i in range(5): - packet = self.loop.run_until_complete(track.recv()) - self.assertEqual(packet.time_base, VIDEO_TIME_BASE) - self.assertEqual(packet.pts, int(i * DT_DMON * VIDEO_CLOCK_RATE)) - self.assertEqual(packet.size, 0) + for i in range(5): + packet = self.loop.run_until_complete(track.recv()) + assert packet.time_base == VIDEO_TIME_BASE + assert packet.pts == int(i * DT_DMON * VIDEO_CLOCK_RATE) + assert packet.size == 0 - def test_input_audio_track(self): + def test_input_audio_track(self, mocker): packet_time, rate = 0.02, 16000 sample_count = int(packet_time * rate) - mocked_stream = MagicMock(spec=pyaudio.Stream) + mocked_stream = mocker.MagicMock(spec=pyaudio.Stream) mocked_stream.read.return_value = b"\x00" * 2 * sample_count config = {"open.side_effect": lambda *args, **kwargs: mocked_stream} - with patch("pyaudio.PyAudio", spec=True, **config): - track = AudioInputStreamTrack(audio_format=pyaudio.paInt16, packet_time=packet_time, rate=rate) - - for i in range(5): - frame = self.loop.run_until_complete(track.recv()) - self.assertEqual(frame.rate, rate) - self.assertEqual(frame.samples, sample_count) - self.assertEqual(frame.pts, i * sample_count) - - -if __name__ == "__main__": - unittest.main() + mocker.patch("pyaudio.PyAudio", spec=True, **config) + track = AudioInputStreamTrack(audio_format=pyaudio.paInt16, packet_time=packet_time, rate=rate) + + for i in range(5): + frame = self.loop.run_until_complete(track.recv()) + assert frame.rate == rate + assert frame.samples == sample_count + assert frame.pts == i * sample_count diff --git a/system/webrtc/tests/test_webrtcd.py b/system/webrtc/tests/test_webrtcd.py index e5742dba070eb3..684c7cf35985fd 100755 --- a/system/webrtc/tests/test_webrtcd.py +++ b/system/webrtc/tests/test_webrtcd.py @@ -1,8 +1,7 @@ #!/usr/bin/env python +import pytest import asyncio import json -import unittest -from unittest.mock import MagicMock, AsyncMock # for aiortc and its dependencies import warnings warnings.filterwarnings("ignore", category=DeprecationWarning) @@ -20,19 +19,20 @@ (["testJoystick"], []), ([], []), ]) -class TestWebrtcdProc(unittest.IsolatedAsyncioTestCase): +@pytest.mark.asyncio +class TestWebrtcdProc(): async def assertCompletesWithTimeout(self, awaitable, timeout=1): try: async with asyncio.timeout(timeout): await awaitable except TimeoutError: - self.fail("Timeout while waiting for awaitable to complete") + pytest.fail("Timeout while waiting for awaitable to complete") - async def test_webrtcd(self): - mock_request = MagicMock() + async def test_webrtcd(self, mocker): + mock_request = mocker.MagicMock() async def connect(offer): body = {'sdp': offer.sdp, 'cameras': offer.video, 'bridge_services_in': self.in_services, 'bridge_services_out': self.out_services} - mock_request.json.side_effect = AsyncMock(return_value=body) + mock_request.json.side_effect = mocker.AsyncMock(return_value=body) response = await get_stream(mock_request) response_json = json.loads(response.text) return aiortc.RTCSessionDescription(**response_json) @@ -48,9 +48,9 @@ async def connect(offer): await self.assertCompletesWithTimeout(stream.start()) await self.assertCompletesWithTimeout(stream.wait_for_connection()) - self.assertTrue(stream.has_incoming_video_track("road")) - self.assertTrue(stream.has_incoming_audio_track()) - self.assertEqual(stream.has_messaging_channel(), len(self.in_services) > 0 or len(self.out_services) > 0) + assert stream.has_incoming_video_track("road") + assert stream.has_incoming_audio_track() + assert stream.has_messaging_channel() == (len(self.in_services) > 0 or len(self.out_services) > 0) video_track, audio_track = stream.get_incoming_video_track("road"), stream.get_incoming_audio_track() await self.assertCompletesWithTimeout(video_track.recv()) @@ -59,10 +59,6 @@ async def connect(offer): await self.assertCompletesWithTimeout(stream.stop()) # cleanup, very implementation specific, test may break if it changes - self.assertTrue(mock_request.app["streams"].__setitem__.called, "Implementation changed, please update this test") + assert mock_request.app["streams"].__setitem__.called, "Implementation changed, please update this test" _, session = mock_request.app["streams"].__setitem__.call_args.args await self.assertCompletesWithTimeout(session.post_run_cleanup()) - - -if __name__ == "__main__": - unittest.main() diff --git a/tools/cabana/videowidget.cc b/tools/cabana/videowidget.cc index cd412f72713e9e..8ce812282f9758 100644 --- a/tools/cabana/videowidget.cc +++ b/tools/cabana/videowidget.cc @@ -139,7 +139,7 @@ QWidget *VideoWidget::createCameraWidget() { QStackedLayout *stacked = new QStackedLayout(); stacked->setStackingMode(QStackedLayout::StackAll); - stacked->addWidget(cam_widget = new CameraWidget("camerad", VISION_STREAM_ROAD, false)); + stacked->addWidget(cam_widget = new CameraView("camerad", VISION_STREAM_ROAD, false)); cam_widget->setMinimumHeight(MIN_VIDEO_HEIGHT); cam_widget->setSizePolicy(QSizePolicy::Preferred, QSizePolicy::MinimumExpanding); stacked->addWidget(alert_label = new InfoLabel(this)); @@ -161,12 +161,13 @@ QWidget *VideoWidget::createCameraWidget() { return w; } -void VideoWidget::vipcAvailableStreamsUpdated(std::set streams) { +void VideoWidget::vipcAvailableStreamsUpdated() { static const QString stream_names[] = { [VISION_STREAM_ROAD] = "Road camera", [VISION_STREAM_WIDE_ROAD] = "Wide road camera", [VISION_STREAM_DRIVER] = "Driver camera"}; + const auto &streams = cam_widget->availableStreams(); for (int i = 0; i < streams.size(); ++i) { if (camera_tab->count() <= i) { camera_tab->addTab(QString()); diff --git a/tools/cabana/videowidget.h b/tools/cabana/videowidget.h index b2039e09a46324..ae095fd559d0c8 100644 --- a/tools/cabana/videowidget.h +++ b/tools/cabana/videowidget.h @@ -73,9 +73,9 @@ class VideoWidget : public QFrame { QWidget *createCameraWidget(); QHBoxLayout *createPlaybackController(); void loopPlaybackClicked(); - void vipcAvailableStreamsUpdated(std::set streams); + void vipcAvailableStreamsUpdated(); - CameraWidget *cam_widget; + CameraView *cam_widget; double maximum_time = 0; QToolButton *time_btn = nullptr; ToolButton *seek_backward_btn = nullptr; diff --git a/tools/car_porting/test_car_model.py b/tools/car_porting/test_car_model.py index 5f8294fd3c53c9..cf0be1a80a6448 100755 --- a/tools/car_porting/test_car_model.py +++ b/tools/car_porting/test_car_model.py @@ -1,7 +1,7 @@ #!/usr/bin/env python3 import argparse import sys -import unittest +import unittest # noqa: TID251 from openpilot.selfdrive.car.tests.routes import CarTestRoute from openpilot.selfdrive.car.tests.test_models import TestCarModel diff --git a/tools/install_ubuntu_dependencies.sh b/tools/install_ubuntu_dependencies.sh index 75d19141b68d49..70a7ff2ea785b8 100755 --- a/tools/install_ubuntu_dependencies.sh +++ b/tools/install_ubuntu_dependencies.sh @@ -77,8 +77,7 @@ function install_ubuntu_common_requirements() { libqt5x11extras5-dev \ libqt5opengl5-dev \ libreadline-dev \ - libdw1 \ - valgrind + libdw1 } # Install Ubuntu 24.04 LTS packages diff --git a/tools/lib/tests/test_caching.py b/tools/lib/tests/test_caching.py index 5d3dfeba426c35..32f55df5d1d295 100755 --- a/tools/lib/tests/test_caching.py +++ b/tools/lib/tests/test_caching.py @@ -1,13 +1,11 @@ #!/usr/bin/env python3 -from functools import partial import http.server import os import shutil import socket -import unittest +import pytest -from parameterized import parameterized -from openpilot.selfdrive.test.helpers import with_http_server +from openpilot.selfdrive.test.helpers import http_server_context from openpilot.system.hardware.hw import Paths from openpilot.tools.lib.url_file import URLFile @@ -31,22 +29,23 @@ def do_HEAD(self): self.end_headers() -with_caching_server = partial(with_http_server, handler=CachingTestRequestHandler) +@pytest.fixture +def host(): + with http_server_context(handler=CachingTestRequestHandler) as (host, port): + yield f"http://{host}:{port}" +class TestFileDownload: -class TestFileDownload(unittest.TestCase): - - @with_caching_server def test_pipeline_defaults(self, host): # TODO: parameterize the defaults so we don't rely on hard-coded values in xx - self.assertEqual(URLFile.pool_manager().pools._maxsize, 10) # PoolManager num_pools param + assert URLFile.pool_manager().pools._maxsize == 10# PoolManager num_pools param pool_manager_defaults = { "maxsize": 100, "socket_options": [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),], } for k, v in pool_manager_defaults.items(): - self.assertEqual(URLFile.pool_manager().connection_pool_kw.get(k), v) + assert URLFile.pool_manager().connection_pool_kw.get(k) == v retry_defaults = { "total": 5, @@ -54,7 +53,7 @@ def test_pipeline_defaults(self, host): "status_forcelist": [409, 429, 503, 504], } for k, v in retry_defaults.items(): - self.assertEqual(getattr(URLFile.pool_manager().connection_pool_kw["retries"], k), v) + assert getattr(URLFile.pool_manager().connection_pool_kw["retries"], k) == v # ensure caching off by default and cache dir doesn't get created os.environ.pop("FILEREADER_CACHE", None) @@ -62,7 +61,7 @@ def test_pipeline_defaults(self, host): shutil.rmtree(Paths.download_cache_root()) URLFile(f"{host}/test.txt").get_length() URLFile(f"{host}/test.txt").read() - self.assertEqual(os.path.exists(Paths.download_cache_root()), False) + assert not os.path.exists(Paths.download_cache_root()) def compare_loads(self, url, start=0, length=None): """Compares range between cached and non cached version""" @@ -72,21 +71,21 @@ def compare_loads(self, url, start=0, length=None): file_cached.seek(start) file_downloaded.seek(start) - self.assertEqual(file_cached.get_length(), file_downloaded.get_length()) - self.assertLessEqual(length + start if length is not None else 0, file_downloaded.get_length()) + assert file_cached.get_length() == file_downloaded.get_length() + assert length + start if length is not None else 0 <= file_downloaded.get_length() response_cached = file_cached.read(ll=length) response_downloaded = file_downloaded.read(ll=length) - self.assertEqual(response_cached, response_downloaded) + assert response_cached == response_downloaded # Now test with cache in place file_cached = URLFile(url, cache=True) file_cached.seek(start) response_cached = file_cached.read(ll=length) - self.assertEqual(file_cached.get_length(), file_downloaded.get_length()) - self.assertEqual(response_cached, response_downloaded) + assert file_cached.get_length() == file_downloaded.get_length() + assert response_cached == response_downloaded def test_small_file(self): # Make sure we don't force cache @@ -117,22 +116,16 @@ def test_large_file(self): self.compare_loads(large_file_url, length - 100, 100) self.compare_loads(large_file_url) - @parameterized.expand([(True, ), (False, )]) - @with_caching_server - def test_recover_from_missing_file(self, cache_enabled, host): + @pytest.mark.parametrize("cache_enabled", [True, False]) + def test_recover_from_missing_file(self, host, cache_enabled): os.environ["FILEREADER_CACHE"] = "1" if cache_enabled else "0" file_url = f"{host}/test.png" CachingTestRequestHandler.FILE_EXISTS = False length = URLFile(file_url).get_length() - self.assertEqual(length, -1) + assert length == -1 CachingTestRequestHandler.FILE_EXISTS = True length = URLFile(file_url).get_length() - self.assertEqual(length, 4) - - - -if __name__ == "__main__": - unittest.main() + assert length == 4 diff --git a/tools/lib/tests/test_comma_car_segments.py b/tools/lib/tests/test_comma_car_segments.py index 91bab943435f8f..b9a4def75fb5bf 100644 --- a/tools/lib/tests/test_comma_car_segments.py +++ b/tools/lib/tests/test_comma_car_segments.py @@ -1,5 +1,4 @@ import pytest -import unittest import requests from openpilot.selfdrive.car.fingerprints import MIGRATION from openpilot.tools.lib.comma_car_segments import get_comma_car_segments_database, get_url @@ -8,7 +7,7 @@ @pytest.mark.skip(reason="huggingface is flaky, run this test manually to check for issues") -class TestCommaCarSegments(unittest.TestCase): +class TestCommaCarSegments: def test_database(self): database = get_comma_car_segments_database() @@ -28,12 +27,8 @@ def test_download_segment(self): url = get_url(sr.route_name, sr.slice) resp = requests.get(url) - self.assertEqual(resp.status_code, 200) + assert resp.status_code == 200 lr = LogReader(url) CP = lr.first("carParams") - self.assertEqual(MIGRATION.get(CP.carFingerprint, CP.carFingerprint), fp) - - -if __name__ == "__main__": - unittest.main() + assert MIGRATION.get(CP.carFingerprint, CP.carFingerprint) == fp diff --git a/tools/lib/tests/test_logreader.py b/tools/lib/tests/test_logreader.py index fc72202b260cec..58d22a07efe67f 100755 --- a/tools/lib/tests/test_logreader.py +++ b/tools/lib/tests/test_logreader.py @@ -5,12 +5,10 @@ import shutil import tempfile import os -import unittest import pytest import requests from parameterized import parameterized -from unittest import mock from cereal import log as capnp_log from openpilot.tools.lib.logreader import LogIterable, LogReader, comma_api_source, parse_indirect, ReadMode, InternalUnavailableException @@ -28,24 +26,22 @@ def noop(segment: LogIterable): @contextlib.contextmanager -def setup_source_scenario(is_internal=False): - with ( - mock.patch("openpilot.tools.lib.logreader.internal_source") as internal_source_mock, - mock.patch("openpilot.tools.lib.logreader.openpilotci_source") as openpilotci_source_mock, - mock.patch("openpilot.tools.lib.logreader.comma_api_source") as comma_api_source_mock, - ): - if is_internal: - internal_source_mock.return_value = [QLOG_FILE] - else: - internal_source_mock.side_effect = InternalUnavailableException +def setup_source_scenario(mocker, is_internal=False): + internal_source_mock = mocker.patch("openpilot.tools.lib.logreader.internal_source") + openpilotci_source_mock = mocker.patch("openpilot.tools.lib.logreader.openpilotci_source") + comma_api_source_mock = mocker.patch("openpilot.tools.lib.logreader.comma_api_source") + if is_internal: + internal_source_mock.return_value = [QLOG_FILE] + else: + internal_source_mock.side_effect = InternalUnavailableException - openpilotci_source_mock.return_value = [None] - comma_api_source_mock.return_value = [QLOG_FILE] + openpilotci_source_mock.return_value = [None] + comma_api_source_mock.return_value = [QLOG_FILE] - yield + yield -class TestLogReader(unittest.TestCase): +class TestLogReader: @parameterized.expand([ (f"{TEST_ROUTE}", ALL_SEGS), (f"{TEST_ROUTE.replace('/', '|')}", ALL_SEGS), @@ -74,7 +70,7 @@ class TestLogReader(unittest.TestCase): def test_indirect_parsing(self, identifier, expected): parsed, _, _ = parse_indirect(identifier) sr = SegmentRange(parsed) - self.assertListEqual(list(sr.seg_idxs), expected, identifier) + assert list(sr.seg_idxs) == expected, identifier @parameterized.expand([ (f"{TEST_ROUTE}", f"{TEST_ROUTE}"), @@ -86,11 +82,11 @@ def test_indirect_parsing(self, identifier, expected): ]) def test_canonical_name(self, identifier, expected): sr = SegmentRange(identifier) - self.assertEqual(str(sr), expected) + assert str(sr) == expected - @parameterized.expand([(True,), (False,)]) - @mock.patch("openpilot.tools.lib.logreader.file_exists") - def test_direct_parsing(self, cache_enabled, file_exists_mock): + @pytest.mark.parametrize("cache_enabled", [True, False]) + def test_direct_parsing(self, mocker, cache_enabled): + file_exists_mock = mocker.patch("openpilot.tools.lib.logreader.file_exists") os.environ["FILEREADER_CACHE"] = "1" if cache_enabled else "0" qlog = tempfile.NamedTemporaryFile(mode='wb', delete=False) @@ -100,13 +96,13 @@ def test_direct_parsing(self, cache_enabled, file_exists_mock): for f in [QLOG_FILE, qlog.name]: l = len(list(LogReader(f))) - self.assertGreater(l, 100) + assert l > 100 - with self.assertRaises(URLFileException) if not cache_enabled else self.assertRaises(AssertionError): + with pytest.raises(URLFileException) if not cache_enabled else pytest.raises(AssertionError): l = len(list(LogReader(QLOG_FILE.replace("/3/", "/200/")))) # file_exists should not be called for direct files - self.assertEqual(file_exists_mock.call_count, 0) + assert file_exists_mock.call_count == 0 @parameterized.expand([ (f"{TEST_ROUTE}///",), @@ -121,110 +117,110 @@ def test_direct_parsing(self, cache_enabled, file_exists_mock): (f"{TEST_ROUTE}--3a",), ]) def test_bad_ranges(self, segment_range): - with self.assertRaises(AssertionError): + with pytest.raises(AssertionError): _ = SegmentRange(segment_range).seg_idxs - @parameterized.expand([ + @pytest.mark.parametrize("segment_range, api_call", [ (f"{TEST_ROUTE}/0", False), (f"{TEST_ROUTE}/:2", False), (f"{TEST_ROUTE}/0:", True), (f"{TEST_ROUTE}/-1", True), (f"{TEST_ROUTE}", True), ]) - def test_slicing_api_call(self, segment_range, api_call): - with mock.patch("openpilot.tools.lib.route.get_max_seg_number_cached") as max_seg_mock: - max_seg_mock.return_value = NUM_SEGS - _ = SegmentRange(segment_range).seg_idxs - self.assertEqual(api_call, max_seg_mock.called) + def test_slicing_api_call(self, mocker, segment_range, api_call): + max_seg_mock = mocker.patch("openpilot.tools.lib.route.get_max_seg_number_cached") + max_seg_mock.return_value = NUM_SEGS + _ = SegmentRange(segment_range).seg_idxs + assert api_call == max_seg_mock.called @pytest.mark.slow def test_modes(self): qlog_len = len(list(LogReader(f"{TEST_ROUTE}/0", ReadMode.QLOG))) rlog_len = len(list(LogReader(f"{TEST_ROUTE}/0", ReadMode.RLOG))) - self.assertLess(qlog_len * 6, rlog_len) + assert qlog_len * 6 < rlog_len @pytest.mark.slow def test_modes_from_name(self): qlog_len = len(list(LogReader(f"{TEST_ROUTE}/0/q"))) rlog_len = len(list(LogReader(f"{TEST_ROUTE}/0/r"))) - self.assertLess(qlog_len * 6, rlog_len) + assert qlog_len * 6 < rlog_len @pytest.mark.slow def test_list(self): qlog_len = len(list(LogReader(f"{TEST_ROUTE}/0/q"))) qlog_len_2 = len(list(LogReader([f"{TEST_ROUTE}/0/q", f"{TEST_ROUTE}/0/q"]))) - self.assertEqual(qlog_len * 2, qlog_len_2) + assert qlog_len * 2 == qlog_len_2 @pytest.mark.slow - @mock.patch("openpilot.tools.lib.logreader._LogFileReader") - def test_multiple_iterations(self, init_mock): + def test_multiple_iterations(self, mocker): + init_mock = mocker.patch("openpilot.tools.lib.logreader._LogFileReader") lr = LogReader(f"{TEST_ROUTE}/0/q") qlog_len1 = len(list(lr)) qlog_len2 = len(list(lr)) # ensure we don't create multiple instances of _LogFileReader, which means downloading the files twice - self.assertEqual(init_mock.call_count, 1) + assert init_mock.call_count == 1 - self.assertEqual(qlog_len1, qlog_len2) + assert qlog_len1 == qlog_len2 @pytest.mark.slow def test_helpers(self): lr = LogReader(f"{TEST_ROUTE}/0/q") - self.assertEqual(lr.first("carParams").carFingerprint, "SUBARU OUTBACK 6TH GEN") - self.assertTrue(0 < len(list(lr.filter("carParams"))) < len(list(lr))) + assert lr.first("carParams").carFingerprint == "SUBARU OUTBACK 6TH GEN" + assert 0 < len(list(lr.filter("carParams"))) < len(list(lr)) @parameterized.expand([(True,), (False,)]) @pytest.mark.slow def test_run_across_segments(self, cache_enabled): os.environ["FILEREADER_CACHE"] = "1" if cache_enabled else "0" lr = LogReader(f"{TEST_ROUTE}/0:4") - self.assertEqual(len(lr.run_across_segments(4, noop)), len(list(lr))) + assert len(lr.run_across_segments(4, noop)) == len(list(lr)) @pytest.mark.slow - def test_auto_mode(self): + def test_auto_mode(self, subtests, mocker): lr = LogReader(f"{TEST_ROUTE}/0/q") qlog_len = len(list(lr)) - with mock.patch("openpilot.tools.lib.route.Route.log_paths") as log_paths_mock: - log_paths_mock.return_value = [None] * NUM_SEGS - # Should fall back to qlogs since rlogs are not available - - with self.subTest("interactive_yes"): - with mock.patch("sys.stdin", new=io.StringIO("y\n")): - lr = LogReader(f"{TEST_ROUTE}/0", default_mode=ReadMode.AUTO_INTERACTIVE, default_source=comma_api_source) - log_len = len(list(lr)) - self.assertEqual(qlog_len, log_len) - - with self.subTest("interactive_no"): - with mock.patch("sys.stdin", new=io.StringIO("n\n")): - with self.assertRaises(AssertionError): - lr = LogReader(f"{TEST_ROUTE}/0", default_mode=ReadMode.AUTO_INTERACTIVE, default_source=comma_api_source) - - with self.subTest("non_interactive"): - lr = LogReader(f"{TEST_ROUTE}/0", default_mode=ReadMode.AUTO, default_source=comma_api_source) - log_len = len(list(lr)) - self.assertEqual(qlog_len, log_len) + log_paths_mock = mocker.patch("openpilot.tools.lib.route.Route.log_paths") + log_paths_mock.return_value = [None] * NUM_SEGS + # Should fall back to qlogs since rlogs are not available - @parameterized.expand([(True,), (False,)]) + with subtests.test("interactive_yes"): + mocker.patch("sys.stdin", new=io.StringIO("y\n")) + lr = LogReader(f"{TEST_ROUTE}/0", default_mode=ReadMode.AUTO_INTERACTIVE, default_source=comma_api_source) + log_len = len(list(lr)) + assert qlog_len == log_len + + with subtests.test("interactive_no"): + mocker.patch("sys.stdin", new=io.StringIO("n\n")) + with pytest.raises(AssertionError): + lr = LogReader(f"{TEST_ROUTE}/0", default_mode=ReadMode.AUTO_INTERACTIVE, default_source=comma_api_source) + + with subtests.test("non_interactive"): + lr = LogReader(f"{TEST_ROUTE}/0", default_mode=ReadMode.AUTO, default_source=comma_api_source) + log_len = len(list(lr)) + assert qlog_len == log_len + + @pytest.mark.parametrize("is_internal", [True, False]) @pytest.mark.slow - def test_auto_source_scenarios(self, is_internal): + def test_auto_source_scenarios(self, mocker, is_internal): lr = LogReader(QLOG_FILE) qlog_len = len(list(lr)) - with setup_source_scenario(is_internal=is_internal): + with setup_source_scenario(mocker, is_internal=is_internal): lr = LogReader(f"{TEST_ROUTE}/0/q") log_len = len(list(lr)) - self.assertEqual(qlog_len, log_len) + assert qlog_len == log_len @pytest.mark.slow def test_sort_by_time(self): msgs = list(LogReader(f"{TEST_ROUTE}/0/q")) - self.assertNotEqual(msgs, sorted(msgs, key=lambda m: m.logMonoTime)) + assert msgs != sorted(msgs, key=lambda m: m.logMonoTime) msgs = list(LogReader(f"{TEST_ROUTE}/0/q", sort_by_time=True)) - self.assertEqual(msgs, sorted(msgs, key=lambda m: m.logMonoTime)) + assert msgs == sorted(msgs, key=lambda m: m.logMonoTime) def test_only_union_types(self): with tempfile.NamedTemporaryFile() as qlog: @@ -234,7 +230,7 @@ def test_only_union_types(self): f.write(b"".join(capnp_log.Event.new_message().to_bytes() for _ in range(num_msgs))) msgs = list(LogReader(qlog.name)) - self.assertEqual(len(msgs), num_msgs) + assert len(msgs) == num_msgs [m.which() for m in msgs] # append non-union Event message @@ -246,15 +242,11 @@ def test_only_union_types(self): # ensure new message is added, but is not a union type msgs = list(LogReader(qlog.name)) - self.assertEqual(len(msgs), num_msgs + 1) - with self.assertRaises(capnp.KjException): + assert len(msgs) == num_msgs + 1 + with pytest.raises(capnp.KjException): [m.which() for m in msgs] # should not be added when only_union_types=True msgs = list(LogReader(qlog.name, only_union_types=True)) - self.assertEqual(len(msgs), num_msgs) + assert len(msgs) == num_msgs [m.which() for m in msgs] - - -if __name__ == "__main__": - unittest.main() diff --git a/tools/lib/tests/test_readers.py b/tools/lib/tests/test_readers.py index 1f24ae5c8e6f92..f92554872fa9fe 100755 --- a/tools/lib/tests/test_readers.py +++ b/tools/lib/tests/test_readers.py @@ -1,5 +1,5 @@ #!/usr/bin/env python -import unittest +import pytest import requests import tempfile @@ -9,16 +9,16 @@ from openpilot.tools.lib.logreader import LogReader -class TestReaders(unittest.TestCase): - @unittest.skip("skip for bandwidth reasons") +class TestReaders: + @pytest.mark.skip("skip for bandwidth reasons") def test_logreader(self): def _check_data(lr): hist = defaultdict(int) for l in lr: hist[l.which()] += 1 - self.assertEqual(hist['carControl'], 6000) - self.assertEqual(hist['logMessage'], 6857) + assert hist['carControl'] == 6000 + assert hist['logMessage'] == 6857 with tempfile.NamedTemporaryFile(suffix=".bz2") as fp: r = requests.get("https://github.com/commaai/comma2k19/blob/master/Example_1/b0c9d2329ad1606b%7C2018-08-02--08-34-47/40/raw_log.bz2?raw=true", timeout=10) @@ -31,15 +31,15 @@ def _check_data(lr): lr_url = LogReader("https://github.com/commaai/comma2k19/blob/master/Example_1/b0c9d2329ad1606b%7C2018-08-02--08-34-47/40/raw_log.bz2?raw=true") _check_data(lr_url) - @unittest.skip("skip for bandwidth reasons") + @pytest.mark.skip("skip for bandwidth reasons") def test_framereader(self): def _check_data(f): - self.assertEqual(f.frame_count, 1200) - self.assertEqual(f.w, 1164) - self.assertEqual(f.h, 874) + assert f.frame_count == 1200 + assert f.w == 1164 + assert f.h == 874 frame_first_30 = f.get(0, 30) - self.assertEqual(len(frame_first_30), 30) + assert len(frame_first_30) == 30 print(frame_first_30[15]) @@ -62,6 +62,3 @@ def _check_data(f): fr_url = FrameReader("https://github.com/commaai/comma2k19/blob/master/Example_1/b0c9d2329ad1606b%7C2018-08-02--08-34-47/40/video.hevc?raw=true") _check_data(fr_url) - -if __name__ == "__main__": - unittest.main() diff --git a/tools/lib/tests/test_route_library.py b/tools/lib/tests/test_route_library.py index 7977f17be21897..8f75fa19c0d0f0 100755 --- a/tools/lib/tests/test_route_library.py +++ b/tools/lib/tests/test_route_library.py @@ -1,10 +1,9 @@ #!/usr/bin/env python -import unittest from collections import namedtuple from openpilot.tools.lib.route import SegmentName -class TestRouteLibrary(unittest.TestCase): +class TestRouteLibrary: def test_segment_name_formats(self): Case = namedtuple('Case', ['input', 'expected_route', 'expected_segment_num', 'expected_data_dir']) @@ -21,12 +20,9 @@ def _validate(case): s = SegmentName(route_or_segment_name, allow_route_name=True) - self.assertEqual(str(s.route_name), case.expected_route) - self.assertEqual(s.segment_num, case.expected_segment_num) - self.assertEqual(s.data_dir, case.expected_data_dir) + assert str(s.route_name) == case.expected_route + assert s.segment_num == case.expected_segment_num + assert s.data_dir == case.expected_data_dir for case in cases: _validate(case) - -if __name__ == "__main__": - unittest.main() diff --git a/tools/mac_setup.sh b/tools/mac_setup.sh index 9ec2097ea48a34..d26ec3cfe2ab88 100755 --- a/tools/mac_setup.sh +++ b/tools/mac_setup.sh @@ -63,7 +63,6 @@ brew "pyenv" brew "pyenv-virtualenv" brew "qt@5" brew "zeromq" -brew "gcc@13" cask "gcc-arm-embedded" brew "portaudio" EOS diff --git a/tools/plotjuggler/test_plotjuggler.py b/tools/plotjuggler/test_plotjuggler.py index 17287fb8032873..8b811f484722af 100755 --- a/tools/plotjuggler/test_plotjuggler.py +++ b/tools/plotjuggler/test_plotjuggler.py @@ -4,7 +4,6 @@ import signal import subprocess import time -import unittest from openpilot.common.basedir import BASEDIR from openpilot.common.timeout import Timeout @@ -12,7 +11,7 @@ PJ_DIR = os.path.join(BASEDIR, "tools/plotjuggler") -class TestPlotJuggler(unittest.TestCase): +class TestPlotJuggler: def test_demo(self): install() @@ -28,13 +27,13 @@ def test_demo(self): # ensure plotjuggler didn't crash after exiting the plugin time.sleep(15) - self.assertEqual(p.poll(), None) + assert p.poll() is None os.killpg(os.getpgid(p.pid), signal.SIGTERM) - self.assertNotIn("Raw file read failed", output) + assert "Raw file read failed" not in output # TODO: also test that layouts successfully load - def test_layouts(self): + def test_layouts(self, subtests): bad_strings = ( # if a previously loaded file is defined, # PJ will throw a warning when loading the layout @@ -43,12 +42,8 @@ def test_layouts(self): ) for fn in glob.glob(os.path.join(PJ_DIR, "layouts/*")): name = os.path.basename(fn) - with self.subTest(layout=name): + with subtests.test(layout=name): with open(fn) as f: layout = f.read() violations = [s for s in bad_strings if s in layout] assert len(violations) == 0, f"These should be stripped out of the layout: {str(violations)}" - - -if __name__ == "__main__": - unittest.main() diff --git a/tools/sim/bridge/common.py b/tools/sim/bridge/common.py index ddd2d741310a58..275a9a7640e5fa 100644 --- a/tools/sim/bridge/common.py +++ b/tools/sim/bridge/common.py @@ -21,7 +21,7 @@ class QueueMessageType(Enum): START_STATUS = 0 CONTROL_COMMAND = 1 - TERMINATION_STATUS = 2 + TERMINATION_INFO = 2 CLOSE_STATUS = 3 def control_cmd_gen(cmd: str): diff --git a/tools/sim/bridge/metadrive/metadrive_world.py b/tools/sim/bridge/metadrive/metadrive_world.py index 34333ad7e9f790..e5b94ad6f8b971 100644 --- a/tools/sim/bridge/metadrive/metadrive_world.py +++ b/tools/sim/bridge/metadrive/metadrive_world.py @@ -70,7 +70,7 @@ def read_state(self): while self.simulation_state_recv.poll(0): md_state: metadrive_simulation_state = self.simulation_state_recv.recv() if md_state.done: - self.status_q.put(QueueMessage(QueueMessageType.TERMINATION_STATUS, md_state.done_info)) + self.status_q.put(QueueMessage(QueueMessageType.TERMINATION_INFO, md_state.done_info)) self.exit_event.set() def read_sensors(self, state: SimulatorState): @@ -91,7 +91,7 @@ def tick(self): def reset(self): self.should_reset = True - def close(self, reason): + def close(self, reason: str): self.status_q.put(QueueMessage(QueueMessageType.CLOSE_STATUS, reason)) self.exit_event.set() self.metadrive_process.join() diff --git a/tools/sim/lib/keyboard_ctrl.py b/tools/sim/lib/keyboard_ctrl.py index f39d6012135f10..0a17f0ee851086 100644 --- a/tools/sim/lib/keyboard_ctrl.py +++ b/tools/sim/lib/keyboard_ctrl.py @@ -61,35 +61,32 @@ def keyboard_poll_thread(q: 'Queue[QueueMessage]'): while True: c = getch() if c == '1': - message = control_cmd_gen("cruise_up") + q.put(control_cmd_gen("cruise_up")) elif c == '2': - message = control_cmd_gen("cruise_down") + q.put(control_cmd_gen("cruise_down")) elif c == '3': - message = control_cmd_gen("cruise_cancel") + q.put(control_cmd_gen("cruise_cancel")) elif c == 'w': - message = control_cmd_gen(f"throttle_{1.0}") + q.put(control_cmd_gen(f"throttle_{1.0}")) elif c == 'a': - message = control_cmd_gen(f"steer_{-0.15}") + q.put(control_cmd_gen(f"steer_{-0.15}")) elif c == 's': - message = control_cmd_gen(f"brake_{1.0}") + q.put(control_cmd_gen(f"brake_{1.0}")) elif c == 'd': - message = control_cmd_gen(f"steer_{0.15}") + q.put(control_cmd_gen(f"steer_{0.15}")) elif c == 'z': - message = control_cmd_gen("blinker_left") + q.put(control_cmd_gen("blinker_left")) elif c == 'x': - message = control_cmd_gen("blinker_right") + q.put(control_cmd_gen("blinker_right")) elif c == 'i': - message = control_cmd_gen("ignition") + q.put(control_cmd_gen("ignition")) elif c == 'r': - message = control_cmd_gen("reset") + q.put(control_cmd_gen("reset")) elif c == 'q': - message = control_cmd_gen("quit") - q.put(message) + q.put(control_cmd_gen("quit")) break else: print_keyboard_help() - continue - q.put(message) def test(q: 'Queue[str]') -> NoReturn: while True: @@ -98,7 +95,7 @@ def test(q: 'Queue[str]') -> NoReturn: if __name__ == '__main__': from multiprocessing import Process, Queue - q: Queue[QueueMessage] = Queue() + q: 'Queue[QueueMessage]' = Queue() p = Process(target=test, args=(q,)) p.daemon = True p.start() diff --git a/tools/sim/lib/manual_ctrl.py b/tools/sim/lib/manual_ctrl.py index dbd65a1ed4a090..972f7023bb9323 100755 --- a/tools/sim/lib/manual_ctrl.py +++ b/tools/sim/lib/manual_ctrl.py @@ -155,34 +155,33 @@ def wheel_poll_thread(q: 'Queue[str]') -> NoReturn: fvalue = value / 32767.0 axis_states[axis] = fvalue normalized = (1 - fvalue) * 50 - message = control_cmd_gen(f"throttle_{normalized:f}") + q.put(control_cmd_gen(f"throttle_{normalized:f}")) elif axis == "rz": # brake fvalue = value / 32767.0 axis_states[axis] = fvalue normalized = (1 - fvalue) * 50 - message = control_cmd_gen(f"brake_{normalized:f}") + q.put(control_cmd_gen(f"brake_{normalized:f}")) elif axis == "x": # steer angle fvalue = value / 32767.0 axis_states[axis] = fvalue normalized = fvalue - message = control_cmd_gen(f"steer_{normalized:f}") + q.put(control_cmd_gen(f"steer_{normalized:f}")) elif mtype & 0x01: # buttons if value == 1: # press down if number in [0, 19]: # X - message = control_cmd_gen("cruise_down") + q.put(control_cmd_gen("cruise_down")) elif number in [3, 18]: # triangle - message = control_cmd_gen("cruise_up") + q.put(control_cmd_gen("cruise_up")) elif number in [1, 6]: # square - message = control_cmd_gen("cruise_cancel") + q.put(control_cmd_gen("cruise_cancel")) elif number in [10, 21]: # R3 - message = control_cmd_gen("reverse_switch") - q.put(message) + q.put(control_cmd_gen("reverse_switch")) if __name__ == '__main__': from multiprocessing import Process, Queue diff --git a/tools/sim/tests/test_metadrive_bridge.py b/tools/sim/tests/test_metadrive_bridge.py index e0df7a7e58fe91..ed0172dc901d26 100755 --- a/tools/sim/tests/test_metadrive_bridge.py +++ b/tools/sim/tests/test_metadrive_bridge.py @@ -10,11 +10,9 @@ @pytest.mark.slow class TestMetaDriveBridge(TestSimBridgeBase): - TRACK_SIZE = 60 - @pytest.fixture(autouse=True) def setup_class(self, track_size): self.track_size = track_size def create_bridge(self): - return MetaDriveBridge(False, False, self.track_size, True) \ No newline at end of file + return MetaDriveBridge(False, False, self.track_size, True) diff --git a/tools/webcam/Dockerfile b/tools/webcam/Dockerfile deleted file mode 100644 index 78a25e836d0e3b..00000000000000 --- a/tools/webcam/Dockerfile +++ /dev/null @@ -1,41 +0,0 @@ -FROM ghcr.io/commaai/openpilot-base:latest - -ENV PYTHONUNBUFFERED 1 -ENV PYTHONPATH /tmp/openpilot:${PYTHONPATH} - -# Install opencv -ENV OPENCV_VERSION '4.2.0' -ENV DEBIAN_FRONTEND=noninteractive -RUN apt-get update && apt-get install -y --no-install-recommends \ - libvtk6-dev \ - libdc1394-22-dev \ - libavcodec-dev \ - libavformat-dev \ - libswscale-dev \ - libtheora-dev \ - libvorbis-dev \ - libxvidcore-dev \ - libx264-dev \ - yasm \ - libopencore-amrnb-dev \ - libopencore-amrwb-dev \ - libv4l-dev \ - libxine2-dev \ - libtbb-dev \ - && rm -rf /var/lib/apt/lists/* && \ - - mkdir /tmp/opencv_build && \ - cd /tmp/opencv_build && \ - - curl -L -O https://github.com/opencv/opencv/archive/${OPENCV_VERSION}.tar.gz && \ - tar -xvf ${OPENCV_VERSION}.tar.gz && \ - mv opencv-${OPENCV_VERSION} OpenCV && \ - cd OpenCV && mkdir build && cd build && \ - cmake -DWITH_OPENGL=ON -DFORCE_VTK=ON -DWITH_TBB=ON -DWITH_GDAL=ON \ - -DWITH_XINE=ON -DENABLE_PRECOMPILED_HEADERS=OFF -DBUILD_TESTS=OFF \ - -DBUILD_PERF_TESTS=OFF -DBUILD_EXAMPLES=OFF -DBUILD_opencv_apps=OFF .. && \ - make -j8 && \ - make install && \ - ldconfig && \ - - cd / && rm -rf /tmp/* diff --git a/tools/webcam/README.md b/tools/webcam/README.md index 709e8514c7d578..76733b173d6007 100644 --- a/tools/webcam/README.md +++ b/tools/webcam/README.md @@ -14,10 +14,7 @@ cd ~ git clone https://github.com/commaai/openpilot.git ``` - Follow [this readme](https://github.com/commaai/openpilot/tree/master/tools) to install the requirements -- Add line "export PYTHONPATH=$HOME/openpilot" to your ~/.bashrc -- Install tensorflow 2.2 and nvidia drivers: nvidia-xxx/cuda10.0/cudnn7.6.5 - Install [OpenCL Driver](https://registrationcenter-download.intel.com/akdlm/irc_nas/vcp/15532/l_opencl_p_18.1.0.015.tgz) -- Install [OpenCV4](https://www.pyimagesearch.com/2018/08/15/how-to-install-opencv-4-on-ubuntu/) (ignore the Python part) ## Build openpilot for webcam ```