diff --git a/.github/workflows/selfdrive_tests.yaml b/.github/workflows/selfdrive_tests.yaml
index 2be83b16540e74..d3c8009bc23a37 100644
--- a/.github/workflows/selfdrive_tests.yaml
+++ b/.github/workflows/selfdrive_tests.yaml
@@ -134,24 +134,6 @@ jobs:
timeout-minutes: 4
run: ${{ env.RUN }} "unset PYTHONWARNINGS && pre-commit run --all && chmod -R 777 /tmp/pre-commit"
- valgrind:
- name: valgrind
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- with:
- submodules: true
- - uses: ./.github/workflows/setup-with-retry
- - name: Build openpilot
- run: ${{ env.RUN }} "scons -j$(nproc)"
- - name: Run valgrind
- timeout-minutes: 1
- run: |
- ${{ env.RUN }} "python selfdrive/test/test_valgrind_replay.py"
- - name: Print logs
- if: always()
- run: cat selfdrive/test/valgrind_logs.txt
-
unit_tests:
name: unit tests
runs-on: ${{ ((github.repository == 'commaai/openpilot') &&
@@ -200,7 +182,7 @@ jobs:
uses: actions/cache@v4
with:
path: .ci_cache/comma_download_cache
- key: proc-replay-${{ hashFiles('.github/workflows/selfdrive_tests.yaml', 'selfdrive/test/process_replay/ref_commit') }}
+ key: proc-replay-${{ hashFiles('.github/workflows/selfdrive_tests.yaml', 'selfdrive/test/process_replay/ref_commit', 'selfdrive/test/process_replay/test_regen.py') }}
- name: Build openpilot
run: |
${{ env.RUN }} "scons -j$(nproc)"
@@ -225,64 +207,18 @@ jobs:
if: ${{ failure() && steps.print-diff.outcome == 'success' && github.repository == 'commaai/openpilot' && env.AZURE_TOKEN != '' }}
run: |
${{ env.RUN }} "unset PYTHONWARNINGS && AZURE_TOKEN='$AZURE_TOKEN' python selfdrive/test/process_replay/test_processes.py -j$(nproc) --upload-only"
- - name: "Upload coverage to Codecov"
- uses: codecov/codecov-action@v4
- with:
- name: ${{ github.job }}
- env:
- CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
-
- regen:
- name: regen
- runs-on: 'ubuntu-latest'
- steps:
- - uses: actions/checkout@v4
- with:
- submodules: true
- - uses: ./.github/workflows/setup-with-retry
- - name: Cache test routes
- id: dependency-cache
- uses: actions/cache@v4
- with:
- path: .ci_cache/comma_download_cache
- key: regen-${{ hashFiles('.github/workflows/selfdrive_tests.yaml', 'selfdrive/test/process_replay/test_regen.py') }}
- - name: Build base Docker image
- run: eval "$BUILD"
- - name: Build openpilot
- run: |
- ${{ env.RUN }} "scons -j$(nproc)"
- - name: Run regen
- timeout-minutes: 30
- run: |
- ${{ env.RUN }} "ONNXCPU=1 $PYTEST selfdrive/test/process_replay/test_regen.py && \
- chmod -R 777 /tmp/comma_download_cache"
-
- test_modeld:
- name: model tests
- runs-on: ubuntu-latest
- steps:
- - uses: actions/checkout@v4
- with:
- submodules: true
- - uses: ./.github/workflows/setup-with-retry
- - name: Build base Docker image
- run: eval "$BUILD"
- - name: Build openpilot
- run: |
- ${{ env.RUN }} "scons -j$(nproc)"
# PYTHONWARNINGS triggers a SyntaxError in onnxruntime
- name: Run model replay with ONNX
timeout-minutes: 4
run: |
${{ env.RUN }} "unset PYTHONWARNINGS && \
- ONNXCPU=1 NO_NAV=1 coverage run selfdrive/test/process_replay/model_replay.py && \
- coverage combine && \
- coverage xml"
- - name: Run unit tests
+ ONNXCPU=1 NO_NAV=1 coverage run selfdrive/test/process_replay/model_replay.py && \
+ coverage combine && coverage xml"
+ - name: Run regen
timeout-minutes: 4
run: |
- ${{ env.RUN }} "unset PYTHONWARNINGS && \
- $PYTEST selfdrive/modeld"
+ ${{ env.RUN }} "ONNXCPU=1 $PYTEST selfdrive/test/process_replay/test_regen.py && \
+ chmod -R 777 /tmp/comma_download_cache"
- name: "Upload coverage to Codecov"
uses: codecov/codecov-action@v4
with:
@@ -298,7 +234,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- job: [0, 1, 2, 3, 4]
+ job: [0, 1]
steps:
- uses: actions/checkout@v4
with:
@@ -313,12 +249,12 @@ jobs:
- name: Build openpilot
run: ${{ env.RUN }} "scons -j$(nproc)"
- name: Test car models
- timeout-minutes: 10
+ timeout-minutes: 20
run: |
${{ env.RUN }} "$PYTEST selfdrive/car/tests/test_models.py && \
chmod -R 777 /tmp/comma_download_cache"
env:
- NUM_JOBS: 5
+ NUM_JOBS: 2
JOB_ID: ${{ matrix.job }}
- name: "Upload coverage to Codecov"
uses: codecov/codecov-action@v4
diff --git a/common/tests/test_file_helpers.py b/common/tests/test_file_helpers.py
index 1817f77cd2d472..a9977c2362d301 100644
--- a/common/tests/test_file_helpers.py
+++ b/common/tests/test_file_helpers.py
@@ -1,11 +1,10 @@
import os
-import unittest
from uuid import uuid4
from openpilot.common.file_helpers import atomic_write_in_dir
-class TestFileHelpers(unittest.TestCase):
+class TestFileHelpers:
def run_atomic_write_func(self, atomic_write_func):
path = f"/tmp/tmp{uuid4()}"
with atomic_write_func(path) as f:
@@ -13,12 +12,8 @@ def run_atomic_write_func(self, atomic_write_func):
assert not os.path.exists(path)
with open(path) as f:
- self.assertEqual(f.read(), "test")
+ assert f.read() == "test"
os.remove(path)
def test_atomic_write_in_dir(self):
self.run_atomic_write_func(atomic_write_in_dir)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/common/tests/test_numpy_fast.py b/common/tests/test_numpy_fast.py
index de7bb972e78190..aa53851db04981 100644
--- a/common/tests/test_numpy_fast.py
+++ b/common/tests/test_numpy_fast.py
@@ -1,10 +1,9 @@
import numpy as np
-import unittest
from openpilot.common.numpy_fast import interp
-class InterpTest(unittest.TestCase):
+class TestInterp:
def test_correctness_controls(self):
_A_CRUISE_MIN_BP = np.asarray([0., 5., 10., 20., 40.])
_A_CRUISE_MIN_V = np.asarray([-1.0, -.8, -.67, -.5, -.30])
@@ -20,7 +19,3 @@ def test_correctness_controls(self):
expected = np.interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
actual = interp(v_ego, _A_CRUISE_MIN_BP, _A_CRUISE_MIN_V)
np.testing.assert_equal(actual, expected)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/common/tests/test_params.py b/common/tests/test_params.py
index 490ee122beaf53..16cbc45295d527 100644
--- a/common/tests/test_params.py
+++ b/common/tests/test_params.py
@@ -1,13 +1,13 @@
+import pytest
import os
import threading
import time
import uuid
-import unittest
from openpilot.common.params import Params, ParamKeyType, UnknownKeyName
-class TestParams(unittest.TestCase):
- def setUp(self):
+class TestParams:
+ def setup_method(self):
self.params = Params()
def test_params_put_and_get(self):
@@ -49,16 +49,16 @@ def _delayed_writer():
assert self.params.get("CarParams", True) == b"test"
def test_params_unknown_key_fails(self):
- with self.assertRaises(UnknownKeyName):
+ with pytest.raises(UnknownKeyName):
self.params.get("swag")
- with self.assertRaises(UnknownKeyName):
+ with pytest.raises(UnknownKeyName):
self.params.get_bool("swag")
- with self.assertRaises(UnknownKeyName):
+ with pytest.raises(UnknownKeyName):
self.params.put("swag", "abc")
- with self.assertRaises(UnknownKeyName):
+ with pytest.raises(UnknownKeyName):
self.params.put_bool("swag", True)
def test_remove_not_there(self):
@@ -68,19 +68,19 @@ def test_remove_not_there(self):
def test_get_bool(self):
self.params.remove("IsMetric")
- self.assertFalse(self.params.get_bool("IsMetric"))
+ assert not self.params.get_bool("IsMetric")
self.params.put_bool("IsMetric", True)
- self.assertTrue(self.params.get_bool("IsMetric"))
+ assert self.params.get_bool("IsMetric")
self.params.put_bool("IsMetric", False)
- self.assertFalse(self.params.get_bool("IsMetric"))
+ assert not self.params.get_bool("IsMetric")
self.params.put("IsMetric", "1")
- self.assertTrue(self.params.get_bool("IsMetric"))
+ assert self.params.get_bool("IsMetric")
self.params.put("IsMetric", "0")
- self.assertFalse(self.params.get_bool("IsMetric"))
+ assert not self.params.get_bool("IsMetric")
def test_put_non_blocking_with_get_block(self):
q = Params()
@@ -107,7 +107,3 @@ def test_params_all_keys(self):
assert len(keys) > 20
assert len(keys) == len(set(keys))
assert b"CarParams" in keys
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/common/tests/test_simple_kalman.py b/common/tests/test_simple_kalman.py
index f641cd19e690a4..f4a967e58ad418 100644
--- a/common/tests/test_simple_kalman.py
+++ b/common/tests/test_simple_kalman.py
@@ -1,10 +1,8 @@
-import unittest
-
from openpilot.common.simple_kalman import KF1D
-class TestSimpleKalman(unittest.TestCase):
- def setUp(self):
+class TestSimpleKalman:
+ def setup_method(self):
dt = 0.01
x0_0 = 0.0
x1_0 = 0.0
@@ -24,12 +22,8 @@ def setUp(self):
def test_getter_setter(self):
self.kf.set_x([[1.0], [1.0]])
- self.assertEqual(self.kf.x, [[1.0], [1.0]])
+ assert self.kf.x == [[1.0], [1.0]]
def update_returns_state(self):
x = self.kf.update(100)
- self.assertEqual(x, self.kf.x)
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert x == self.kf.x
diff --git a/common/transformations/tests/test_coordinates.py b/common/transformations/tests/test_coordinates.py
index 7ae79403bdd1e2..41076d9b3fb7fa 100755
--- a/common/transformations/tests/test_coordinates.py
+++ b/common/transformations/tests/test_coordinates.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import numpy as np
-import unittest
import openpilot.common.transformations.coordinates as coord
@@ -44,7 +43,7 @@
[ 78.56272609, 18.53100158, -43.25290759]])
-class TestNED(unittest.TestCase):
+class TestNED:
def test_small_distances(self):
start_geodetic = np.array([33.8042184, -117.888593, 0.0])
local_coord = coord.LocalCoord.from_geodetic(start_geodetic)
@@ -54,13 +53,13 @@ def test_small_distances(self):
west_geodetic = start_geodetic + [0, -0.0005, 0]
west_ned = local_coord.geodetic2ned(west_geodetic)
- self.assertLess(np.abs(west_ned[0]), 1e-3)
- self.assertLess(west_ned[1], 0)
+ assert np.abs(west_ned[0]) < 1e-3
+ assert west_ned[1] < 0
southwest_geodetic = start_geodetic + [-0.0005, -0.002, 0]
southwest_ned = local_coord.geodetic2ned(southwest_geodetic)
- self.assertLess(southwest_ned[0], 0)
- self.assertLess(southwest_ned[1], 0)
+ assert southwest_ned[0] < 0
+ assert southwest_ned[1] < 0
def test_ecef_geodetic(self):
# testing single
@@ -105,5 +104,3 @@ def test_ned_batch(self):
np.testing.assert_allclose(converter.ned2ecef(ned_offsets_batch),
ecef_positions_offset_batch,
rtol=1e-9, atol=1e-7)
-if __name__ == "__main__":
- unittest.main()
diff --git a/common/transformations/tests/test_orientation.py b/common/transformations/tests/test_orientation.py
index f77827d2f98eb6..695642774e2c3e 100755
--- a/common/transformations/tests/test_orientation.py
+++ b/common/transformations/tests/test_orientation.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import numpy as np
-import unittest
from openpilot.common.transformations.orientation import euler2quat, quat2euler, euler2rot, rot2euler, \
rot2quat, quat2rot, \
@@ -32,7 +31,7 @@
[ 2.50450101, 0.36304151, 0.33136365]])
-class TestOrientation(unittest.TestCase):
+class TestOrientation:
def test_quat_euler(self):
for i, eul in enumerate(eulers):
np.testing.assert_allclose(quats[i], euler2quat(eul), rtol=1e-7)
@@ -62,7 +61,3 @@ def test_euler_ned(self):
np.testing.assert_allclose(ned_eulers[i], ned_euler_from_ecef(ecef_positions[i], eulers[i]), rtol=1e-7)
#np.testing.assert_allclose(eulers[i], ecef_euler_from_ned(ecef_positions[i], ned_eulers[i]), rtol=1e-7)
# np.testing.assert_allclose(ned_eulers, ned_euler_from_ecef(ecef_positions, eulers), rtol=1e-7)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/docs/CARS.md b/docs/CARS.md
index de9320594aa4cb..4364f10b08da9f 100644
--- a/docs/CARS.md
+++ b/docs/CARS.md
@@ -4,7 +4,7 @@
A supported vehicle is one that just works when you install a comma device. All supported cars provide a better experience than any stock system. Supported vehicles reference the US market unless otherwise specified.
-# 286 Supported Cars
+# 287 Supported Cars
|Make|Model|Supported Package|ACC|No ACC accel below|No ALC below|Steering Torque|Resume from stop|
Hardware Needed
|Video|
|---|---|---|:---:|:---:|:---:|:---:|:---:|:---:|:---:|
@@ -120,6 +120,7 @@ A supported vehicle is one that just works when you install a comma device. All
|Hyundai|Tucson 2023-24[5](#footnotes)|All|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Parts
- 1 Hyundai N connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here ||
|Hyundai|Tucson Diesel 2019|Smart Cruise Control (SCC)|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Parts
- 1 Hyundai L connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here ||
|Hyundai|Tucson Hybrid 2022-24[5](#footnotes)|All|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Parts
- 1 Hyundai N connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here ||
+|Hyundai|Tucson Plug-in Hybrid 2024[5](#footnotes)|All|openpilot available[1](#footnotes)|0 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Parts
- 1 Hyundai N connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here ||
|Hyundai|Veloster 2019-20|Smart Cruise Control (SCC)|Stock|5 mph|0 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-empty.svg)](##)|Parts
- 1 Hyundai E connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here ||
|Jeep|Grand Cherokee 2016-18|Adaptive Cruise Control (ACC)|Stock|0 mph|9 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Parts
- 1 FCA connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here |
|
|Jeep|Grand Cherokee 2019-21|Adaptive Cruise Control (ACC)|Stock|0 mph|39 mph|[![star](assets/icon-star-full.svg)](##)|[![star](assets/icon-star-full.svg)](##)|Parts
- 1 FCA connector
- 1 RJ45 cable (7 ft)
- 1 comma 3X
- 1 comma power v2
- 1 harness box
- 1 mount
- 1 right angle OBD-C cable (1.5 ft)
Buy Here |
|
diff --git a/poetry.lock b/poetry.lock
index 978574a1e277e2..f1f8d9f51835f7 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 1.7.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "aiohttp"
@@ -6641,6 +6641,24 @@ pluggy = ">=1.5,<2.0"
[package.extras]
dev = ["argcomplete", "attrs (>=19.2)", "hypothesis (>=3.56)", "mock", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+[[package]]
+name = "pytest-asyncio"
+version = "0.23.6"
+description = "Pytest support for asyncio"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest-asyncio-0.23.6.tar.gz", hash = "sha256:ffe523a89c1c222598c76856e76852b787504ddb72dd5d9b6617ffa8aa2cde5f"},
+ {file = "pytest_asyncio-0.23.6-py3-none-any.whl", hash = "sha256:68516fdd1018ac57b846c9846b954f0393b26f094764a28c955eabb0536a4e8a"},
+]
+
+[package.dependencies]
+pytest = ">=7.0.0,<9"
+
+[package.extras]
+docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
+testing = ["coverage (>=6.2)", "hypothesis (>=5.7.1)"]
+
[[package]]
name = "pytest-cov"
version = "5.0.0"
@@ -6674,6 +6692,23 @@ files = [
colorama = "*"
pytest = ">=7.0"
+[[package]]
+name = "pytest-mock"
+version = "3.14.0"
+description = "Thin-wrapper around the mock package for easier use with pytest"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pytest-mock-3.14.0.tar.gz", hash = "sha256:2719255a1efeceadbc056d6bf3df3d1c5015530fb40cf347c0f9afac88410bd0"},
+ {file = "pytest_mock-3.14.0-py3-none-any.whl", hash = "sha256:0b72c38033392a5f4621342fe11e9219ac11ec9d375f8e2a0c164539e0d70f6f"},
+]
+
+[package.dependencies]
+pytest = ">=6.2.5"
+
+[package.extras]
+dev = ["pre-commit", "pytest-asyncio", "tox"]
+
[[package]]
name = "pytest-randomly"
version = "3.15.0"
@@ -7987,4 +8022,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p
[metadata]
lock-version = "2.0"
python-versions = "~3.11"
-content-hash = "5f0a1b6f26faa3effeaa5393b73d9188be385a72c1d3b9befb3f03df3b38c86d"
+content-hash = "9f69dc7862f33f61e94e960f0ead2cbcd306b4502163d1934381d476143344f4"
diff --git a/pyproject.toml b/pyproject.toml
index a25fcc1645b2a7..eb270176066575 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,3 +1,12 @@
+[project]
+name = "openpilot"
+requires-python = ">= 3.11"
+readme = "README.md"
+license = {text = "MIT License"}
+
+[project.urls]
+Homepage = "https://comma.ai"
+
[tool.pytest.ini_options]
minversion = "6.0"
addopts = "--ignore=openpilot/ --ignore=cereal/ --ignore=opendbc/ --ignore=panda/ --ignore=rednose_repo/ --ignore=tinygrad_repo/ --ignore=teleoprtc_repo/ -Werror --strict-config --strict-markers --durations=10 -n auto --dist=loadgroup"
@@ -154,6 +163,8 @@ pytest-subtests = "*"
pytest-xdist = "*"
pytest-timeout = "*"
pytest-randomly = "*"
+pytest-asyncio = "*"
+pytest-mock = "*"
ruff = "*"
sphinx = "*"
sphinx-rtd-theme = "*"
@@ -197,6 +208,7 @@ lint.flake8-implicit-str-concat.allow-multiline=false
"third_party".msg = "Use openpilot.third_party"
"tools".msg = "Use openpilot.tools"
"pytest.main".msg = "pytest.main requires special handling that is easy to mess up!"
+"unittest".msg = "Use pytest"
[tool.coverage.run]
concurrency = ["multiprocessing", "thread"]
diff --git a/selfdrive/athena/tests/test_athenad.py b/selfdrive/athena/tests/test_athenad.py
index 4850ab9a3fc167..bdce3dccef6e3e 100755
--- a/selfdrive/athena/tests/test_athenad.py
+++ b/selfdrive/athena/tests/test_athenad.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
-from functools import partial, wraps
+import pytest
+from functools import wraps
import json
import multiprocessing
import os
@@ -8,12 +9,9 @@
import time
import threading
import queue
-import unittest
from dataclasses import asdict, replace
from datetime import datetime, timedelta
-from parameterized import parameterized
-from unittest import mock
from websocket import ABNF
from websocket._exceptions import WebSocketConnectionClosedException
@@ -24,7 +22,7 @@
from openpilot.selfdrive.athena import athenad
from openpilot.selfdrive.athena.athenad import MAX_RETRY_COUNT, dispatcher
from openpilot.selfdrive.athena.tests.helpers import HTTPRequestHandler, MockWebsocket, MockApi, EchoSocket
-from openpilot.selfdrive.test.helpers import with_http_server
+from openpilot.selfdrive.test.helpers import http_server_context
from openpilot.system.hardware.hw import Paths
@@ -37,10 +35,6 @@ def seed_athena_server(host, port):
except requests.exceptions.ConnectionError:
time.sleep(0.1)
-
-with_mock_athena = partial(with_http_server, handler=HTTPRequestHandler, setup=seed_athena_server)
-
-
def with_upload_handler(func):
@wraps(func)
def wrapper(*args, **kwargs):
@@ -54,15 +48,23 @@ def wrapper(*args, **kwargs):
thread.join()
return wrapper
+@pytest.fixture
+def mock_create_connection(mocker):
+ return mocker.patch('openpilot.selfdrive.athena.athenad.create_connection')
+
+@pytest.fixture
+def host():
+ with http_server_context(handler=HTTPRequestHandler, setup=seed_athena_server) as (host, port):
+ yield f"http://{host}:{port}"
-class TestAthenadMethods(unittest.TestCase):
+class TestAthenadMethods:
@classmethod
- def setUpClass(cls):
+ def setup_class(cls):
cls.SOCKET_PORT = 45454
athenad.Api = MockApi
athenad.LOCAL_PORT_WHITELIST = {cls.SOCKET_PORT}
- def setUp(self):
+ def setup_method(self):
self.default_params = {
"DongleId": "0000000000000000",
"GithubSshKeys": b"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC307aE+nuHzTAgaJhzSf5v7ZZQW9gaperjhCmyPyl4PzY7T1mDGenTlVTN7yoVFZ9UfO9oMQqo0n1OwDIiqbIFxqnhrHU0cYfj88rI85m5BEKlNu5RdaVTj1tcbaPpQc5kZEolaI1nDDjzV0lwS7jo5VYDHseiJHlik3HH1SgtdtsuamGR2T80q1SyW+5rHoMOJG73IH2553NnWuikKiuikGHUYBd00K1ilVAK2xSiMWJp55tQfZ0ecr9QjEsJ+J/efL4HqGNXhffxvypCXvbUYAFSddOwXUPo5BTKevpxMtH+2YrkpSjocWA04VnTYFiPG6U4ItKmbLOTFZtPzoez private", # noqa: E501
@@ -109,8 +111,8 @@ def _create_file(file: str, parent: str = None, data: bytes = b'') -> str:
def test_echo(self):
assert dispatcher["echo"]("bob") == "bob"
- def test_getMessage(self):
- with self.assertRaises(TimeoutError) as _:
+ def test_get_message(self):
+ with pytest.raises(TimeoutError) as _:
dispatcher["getMessage"]("controlsState")
end_event = multiprocessing.Event()
@@ -133,7 +135,7 @@ def send_deviceState():
end_event.set()
p.join()
- def test_listDataDirectory(self):
+ def test_list_data_directory(self):
route = '2021-03-29--13-32-47'
segments = [0, 1, 2, 3, 11]
@@ -143,69 +145,66 @@ def test_listDataDirectory(self):
self._create_file(file)
resp = dispatcher["listDataDirectory"]()
- self.assertTrue(resp, 'list empty!')
- self.assertCountEqual(resp, files)
+ assert resp, 'list empty!'
+ assert len(resp) == len(files)
resp = dispatcher["listDataDirectory"](f'{route}--123')
- self.assertCountEqual(resp, [])
+ assert len(resp) == 0
prefix = f'{route}'
- expected = filter(lambda f: f.startswith(prefix), files)
+ expected = list(filter(lambda f: f.startswith(prefix), files))
resp = dispatcher["listDataDirectory"](prefix)
- self.assertTrue(resp, 'list empty!')
- self.assertCountEqual(resp, expected)
+ assert resp, 'list empty!'
+ assert len(resp) == len(expected)
prefix = f'{route}--1'
- expected = filter(lambda f: f.startswith(prefix), files)
+ expected = list(filter(lambda f: f.startswith(prefix), files))
resp = dispatcher["listDataDirectory"](prefix)
- self.assertTrue(resp, 'list empty!')
- self.assertCountEqual(resp, expected)
+ assert resp, 'list empty!'
+ assert len(resp) == len(expected)
prefix = f'{route}--1/'
- expected = filter(lambda f: f.startswith(prefix), files)
+ expected = list(filter(lambda f: f.startswith(prefix), files))
resp = dispatcher["listDataDirectory"](prefix)
- self.assertTrue(resp, 'list empty!')
- self.assertCountEqual(resp, expected)
+ assert resp, 'list empty!'
+ assert len(resp) == len(expected)
prefix = f'{route}--1/q'
- expected = filter(lambda f: f.startswith(prefix), files)
+ expected = list(filter(lambda f: f.startswith(prefix), files))
resp = dispatcher["listDataDirectory"](prefix)
- self.assertTrue(resp, 'list empty!')
- self.assertCountEqual(resp, expected)
+ assert resp, 'list empty!'
+ assert len(resp) == len(expected)
def test_strip_bz2_extension(self):
fn = self._create_file('qlog.bz2')
if fn.endswith('.bz2'):
- self.assertEqual(athenad.strip_bz2_extension(fn), fn[:-4])
+ assert athenad.strip_bz2_extension(fn) == fn[:-4]
- @parameterized.expand([(True,), (False,)])
- @with_mock_athena
- def test_do_upload(self, compress, host):
+ @pytest.mark.parametrize("compress", [True, False])
+ def test_do_upload(self, host, compress):
# random bytes to ensure rather large object post-compression
fn = self._create_file('qlog', data=os.urandom(10000 * 1024))
upload_fn = fn + ('.bz2' if compress else '')
item = athenad.UploadItem(path=upload_fn, url="http://localhost:1238", headers={}, created_at=int(time.time()*1000), id='')
- with self.assertRaises(requests.exceptions.ConnectionError):
+ with pytest.raises(requests.exceptions.ConnectionError):
athenad._do_upload(item)
item = athenad.UploadItem(path=upload_fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='')
resp = athenad._do_upload(item)
- self.assertEqual(resp.status_code, 201)
+ assert resp.status_code == 201
- @with_mock_athena
- def test_uploadFileToUrl(self, host):
+ def test_upload_file_to_url(self, host):
fn = self._create_file('qlog.bz2')
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", f"{host}/qlog.bz2", {})
- self.assertEqual(resp['enqueued'], 1)
- self.assertNotIn('failed', resp)
- self.assertLessEqual({"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}.items(), resp['items'][0].items())
- self.assertIsNotNone(resp['items'][0].get('id'))
- self.assertEqual(athenad.upload_queue.qsize(), 1)
-
- @with_mock_athena
- def test_uploadFileToUrl_duplicate(self, host):
+ assert resp['enqueued'] == 1
+ assert 'failed' not in resp
+ assert {"path": fn, "url": f"{host}/qlog.bz2", "headers": {}}.items() <= resp['items'][0].items()
+ assert resp['items'][0].get('id') is not None
+ assert athenad.upload_queue.qsize() == 1
+
+ def test_upload_file_to_url_duplicate(self, host):
self._create_file('qlog.bz2')
url1 = f"{host}/qlog.bz2?sig=sig1"
@@ -214,14 +213,12 @@ def test_uploadFileToUrl_duplicate(self, host):
# Upload same file again, but with different signature
url2 = f"{host}/qlog.bz2?sig=sig2"
resp = dispatcher["uploadFileToUrl"]("qlog.bz2", url2, {})
- self.assertEqual(resp, {'enqueued': 0, 'items': []})
+ assert resp == {'enqueued': 0, 'items': []}
- @with_mock_athena
- def test_uploadFileToUrl_does_not_exist(self, host):
+ def test_upload_file_to_url_does_not_exist(self, host):
not_exists_resp = dispatcher["uploadFileToUrl"]("does_not_exist.bz2", "http://localhost:1238", {})
- self.assertEqual(not_exists_resp, {'enqueued': 0, 'items': [], 'failed': ['does_not_exist.bz2']})
+ assert not_exists_resp == {'enqueued': 0, 'items': [], 'failed': ['does_not_exist.bz2']}
- @with_mock_athena
@with_upload_handler
def test_upload_handler(self, host):
fn = self._create_file('qlog.bz2')
@@ -233,13 +230,12 @@ def test_upload_handler(self, host):
# TODO: verify that upload actually succeeded
# TODO: also check that end_event and metered network raises AbortTransferException
- self.assertEqual(athenad.upload_queue.qsize(), 0)
+ assert athenad.upload_queue.qsize() == 0
- @parameterized.expand([(500, True), (412, False)])
- @with_mock_athena
- @mock.patch('requests.put')
+ @pytest.mark.parametrize("status,retry", [(500,True), (412,False)])
@with_upload_handler
- def test_upload_handler_retry(self, status, retry, mock_put, host):
+ def test_upload_handler_retry(self, mocker, host, status, retry):
+ mock_put = mocker.patch('requests.put')
mock_put.return_value.status_code = status
fn = self._create_file('qlog.bz2')
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True)
@@ -248,10 +244,10 @@ def test_upload_handler_retry(self, status, retry, mock_put, host):
self._wait_for_upload()
time.sleep(0.1)
- self.assertEqual(athenad.upload_queue.qsize(), 1 if retry else 0)
+ assert athenad.upload_queue.qsize() == (1 if retry else 0)
if retry:
- self.assertEqual(athenad.upload_queue.get().retry_count, 1)
+ assert athenad.upload_queue.get().retry_count == 1
@with_upload_handler
def test_upload_handler_timeout(self):
@@ -265,33 +261,33 @@ def test_upload_handler_timeout(self):
time.sleep(0.1)
# Check that upload with retry count exceeded is not put back
- self.assertEqual(athenad.upload_queue.qsize(), 0)
+ assert athenad.upload_queue.qsize() == 0
athenad.upload_queue.put_nowait(item)
self._wait_for_upload()
time.sleep(0.1)
# Check that upload item was put back in the queue with incremented retry count
- self.assertEqual(athenad.upload_queue.qsize(), 1)
- self.assertEqual(athenad.upload_queue.get().retry_count, 1)
+ assert athenad.upload_queue.qsize() == 1
+ assert athenad.upload_queue.get().retry_count == 1
@with_upload_handler
- def test_cancelUpload(self):
+ def test_cancel_upload(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={},
created_at=int(time.time()*1000), id='id', allow_cellular=True)
athenad.upload_queue.put_nowait(item)
dispatcher["cancelUpload"](item.id)
- self.assertIn(item.id, athenad.cancelled_uploads)
+ assert item.id in athenad.cancelled_uploads
self._wait_for_upload()
time.sleep(0.1)
- self.assertEqual(athenad.upload_queue.qsize(), 0)
- self.assertEqual(len(athenad.cancelled_uploads), 0)
+ assert athenad.upload_queue.qsize() == 0
+ assert len(athenad.cancelled_uploads) == 0
@with_upload_handler
- def test_cancelExpiry(self):
+ def test_cancel_expiry(self):
t_future = datetime.now() - timedelta(days=40)
ts = int(t_future.strftime("%s")) * 1000
@@ -303,15 +299,14 @@ def test_cancelExpiry(self):
self._wait_for_upload()
time.sleep(0.1)
- self.assertEqual(athenad.upload_queue.qsize(), 0)
+ assert athenad.upload_queue.qsize() == 0
- def test_listUploadQueueEmpty(self):
+ def test_list_upload_queue_empty(self):
items = dispatcher["listUploadQueue"]()
- self.assertEqual(len(items), 0)
+ assert len(items) == 0
- @with_http_server
@with_upload_handler
- def test_listUploadQueueCurrent(self, host: str):
+ def test_list_upload_queue_current(self, host: str):
fn = self._create_file('qlog.bz2')
item = athenad.UploadItem(path=fn, url=f"{host}/qlog.bz2", headers={}, created_at=int(time.time()*1000), id='', allow_cellular=True)
@@ -319,22 +314,22 @@ def test_listUploadQueueCurrent(self, host: str):
self._wait_for_upload()
items = dispatcher["listUploadQueue"]()
- self.assertEqual(len(items), 1)
- self.assertTrue(items[0]['current'])
+ assert len(items) == 1
+ assert items[0]['current']
- def test_listUploadQueue(self):
+ def test_list_upload_queue(self):
item = athenad.UploadItem(path="qlog.bz2", url="http://localhost:44444/qlog.bz2", headers={},
created_at=int(time.time()*1000), id='id', allow_cellular=True)
athenad.upload_queue.put_nowait(item)
items = dispatcher["listUploadQueue"]()
- self.assertEqual(len(items), 1)
- self.assertDictEqual(items[0], asdict(item))
- self.assertFalse(items[0]['current'])
+ assert len(items) == 1
+ assert items[0] == asdict(item)
+ assert not items[0]['current']
athenad.cancelled_uploads.add(item.id)
items = dispatcher["listUploadQueue"]()
- self.assertEqual(len(items), 0)
+ assert len(items) == 0
def test_upload_queue_persistence(self):
item1 = athenad.UploadItem(path="_", url="_", headers={}, created_at=int(time.time()), id='id1')
@@ -353,11 +348,10 @@ def test_upload_queue_persistence(self):
athenad.upload_queue.queue.clear()
athenad.UploadQueueCache.initialize(athenad.upload_queue)
- self.assertEqual(athenad.upload_queue.qsize(), 1)
- self.assertDictEqual(asdict(athenad.upload_queue.queue[-1]), asdict(item1))
+ assert athenad.upload_queue.qsize() == 1
+ assert asdict(athenad.upload_queue.queue[-1]) == asdict(item1)
- @mock.patch('openpilot.selfdrive.athena.athenad.create_connection')
- def test_startLocalProxy(self, mock_create_connection):
+ def test_start_local_proxy(self, mock_create_connection):
end_event = threading.Event()
ws_recv = queue.Queue()
@@ -380,21 +374,21 @@ def test_startLocalProxy(self, mock_create_connection):
ws_recv.put_nowait(WebSocketConnectionClosedException())
socket_thread.join()
- def test_getSshAuthorizedKeys(self):
+ def test_get_ssh_authorized_keys(self):
keys = dispatcher["getSshAuthorizedKeys"]()
- self.assertEqual(keys, self.default_params["GithubSshKeys"].decode('utf-8'))
+ assert keys == self.default_params["GithubSshKeys"].decode('utf-8')
- def test_getGithubUsername(self):
+ def test_get_github_username(self):
keys = dispatcher["getGithubUsername"]()
- self.assertEqual(keys, self.default_params["GithubUsername"].decode('utf-8'))
+ assert keys == self.default_params["GithubUsername"].decode('utf-8')
- def test_getVersion(self):
+ def test_get_version(self):
resp = dispatcher["getVersion"]()
keys = ["version", "remote", "branch", "commit"]
- self.assertEqual(list(resp.keys()), keys)
+ assert list(resp.keys()) == keys
for k in keys:
- self.assertIsInstance(resp[k], str, f"{k} is not a string")
- self.assertTrue(len(resp[k]) > 0, f"{k} has no value")
+ assert isinstance(resp[k], str), f"{k} is not a string"
+ assert len(resp[k]) > 0, f"{k} has no value"
def test_jsonrpc_handler(self):
end_event = threading.Event()
@@ -405,15 +399,15 @@ def test_jsonrpc_handler(self):
# with params
athenad.recv_queue.put_nowait(json.dumps({"method": "echo", "params": ["hello"], "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
- self.assertDictEqual(json.loads(resp), {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'})
+ assert json.loads(resp) == {'result': 'hello', 'id': 0, 'jsonrpc': '2.0'}
# without params
athenad.recv_queue.put_nowait(json.dumps({"method": "getNetworkType", "jsonrpc": "2.0", "id": 0}))
resp = athenad.send_queue.get(timeout=3)
- self.assertDictEqual(json.loads(resp), {'result': 1, 'id': 0, 'jsonrpc': '2.0'})
+ assert json.loads(resp) == {'result': 1, 'id': 0, 'jsonrpc': '2.0'}
# log forwarding
athenad.recv_queue.put_nowait(json.dumps({'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'}))
resp = athenad.log_recv_queue.get(timeout=3)
- self.assertDictEqual(json.loads(resp), {'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'})
+ assert json.loads(resp) == {'result': {'success': 1}, 'id': 0, 'jsonrpc': '2.0'}
finally:
end_event.set()
thread.join()
@@ -427,8 +421,4 @@ def test_get_logs_to_send_sorted(self):
# ensure the list is all logs except most recent
sl = athenad.get_logs_to_send_sorted()
- self.assertListEqual(sl, fl[:-1])
-
-
-if __name__ == '__main__':
- unittest.main()
+ assert sl == fl[:-1]
diff --git a/selfdrive/athena/tests/test_athenad_ping.py b/selfdrive/athena/tests/test_athenad_ping.py
index f56fcac8b58453..44fa0b8481dd39 100755
--- a/selfdrive/athena/tests/test_athenad_ping.py
+++ b/selfdrive/athena/tests/test_athenad_ping.py
@@ -1,10 +1,9 @@
#!/usr/bin/env python3
+import pytest
import subprocess
import threading
import time
-import unittest
from typing import cast
-from unittest import mock
from openpilot.common.params import Params
from openpilot.common.timeout import Timeout
@@ -22,7 +21,7 @@ def wifi_radio(on: bool) -> None:
subprocess.run(["nmcli", "radio", "wifi", "on" if on else "off"], check=True)
-class TestAthenadPing(unittest.TestCase):
+class TestAthenadPing:
params: Params
dongle_id: str
@@ -39,10 +38,10 @@ def _received_ping(self) -> bool:
return self._get_ping_time() is not None
@classmethod
- def tearDownClass(cls) -> None:
+ def teardown_class(cls) -> None:
wifi_radio(True)
- def setUp(self) -> None:
+ def setup_method(self) -> None:
self.params = Params()
self.dongle_id = self.params.get("DongleId", encoding="utf-8")
@@ -52,21 +51,23 @@ def setUp(self) -> None:
self.exit_event = threading.Event()
self.athenad = threading.Thread(target=athenad.main, args=(self.exit_event,))
- def tearDown(self) -> None:
+ def teardown_method(self) -> None:
if self.athenad.is_alive():
self.exit_event.set()
self.athenad.join()
- @mock.patch('openpilot.selfdrive.athena.athenad.create_connection', new_callable=lambda: mock.MagicMock(wraps=athenad.create_connection))
- def assertTimeout(self, reconnect_time: float, mock_create_connection: mock.MagicMock) -> None:
+ def assertTimeout(self, reconnect_time: float, subtests, mocker) -> None:
self.athenad.start()
+ mock_create_connection = mocker.patch('openpilot.selfdrive.athena.athenad.create_connection',
+ new_callable=lambda: mocker.MagicMock(wraps=athenad.create_connection))
+
time.sleep(1)
mock_create_connection.assert_called_once()
mock_create_connection.reset_mock()
# check normal behaviour, server pings on connection
- with self.subTest("Wi-Fi: receives ping"), Timeout(70, "no ping received"):
+ with subtests.test("Wi-Fi: receives ping"), Timeout(70, "no ping received"):
while not self._received_ping():
time.sleep(0.1)
print("ping received")
@@ -74,7 +75,7 @@ def assertTimeout(self, reconnect_time: float, mock_create_connection: mock.Magi
mock_create_connection.assert_not_called()
# websocket should attempt reconnect after short time
- with self.subTest("LTE: attempt reconnect"):
+ with subtests.test("LTE: attempt reconnect"):
wifi_radio(False)
print("waiting for reconnect attempt")
start_time = time.monotonic()
@@ -86,21 +87,17 @@ def assertTimeout(self, reconnect_time: float, mock_create_connection: mock.Magi
self._clear_ping_time()
# check ping received after reconnect
- with self.subTest("LTE: receives ping"), Timeout(70, "no ping received"):
+ with subtests.test("LTE: receives ping"), Timeout(70, "no ping received"):
while not self._received_ping():
time.sleep(0.1)
print("ping received")
- @unittest.skipIf(not TICI, "only run on desk")
- def test_offroad(self) -> None:
+ @pytest.mark.skipif(not TICI, reason="only run on desk")
+ def test_offroad(self, subtests, mocker) -> None:
write_onroad_params(False, self.params)
- self.assertTimeout(60 + TIMEOUT_TOLERANCE) # based using TCP keepalive settings
+ self.assertTimeout(60 + TIMEOUT_TOLERANCE, subtests, mocker) # based using TCP keepalive settings
- @unittest.skipIf(not TICI, "only run on desk")
- def test_onroad(self) -> None:
+ @pytest.mark.skipif(not TICI, reason="only run on desk")
+ def test_onroad(self, subtests, mocker) -> None:
write_onroad_params(True, self.params)
- self.assertTimeout(21 + TIMEOUT_TOLERANCE)
-
-
-if __name__ == "__main__":
- unittest.main()
+ self.assertTimeout(21 + TIMEOUT_TOLERANCE, subtests, mocker)
diff --git a/selfdrive/athena/tests/test_registration.py b/selfdrive/athena/tests/test_registration.py
index e7ad63a370ee1e..a808dd566878ba 100755
--- a/selfdrive/athena/tests/test_registration.py
+++ b/selfdrive/athena/tests/test_registration.py
@@ -1,9 +1,7 @@
#!/usr/bin/env python3
import json
-import unittest
from Crypto.PublicKey import RSA
from pathlib import Path
-from unittest import mock
from openpilot.common.params import Params
from openpilot.selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
@@ -11,9 +9,9 @@
from openpilot.system.hardware.hw import Paths
-class TestRegistration(unittest.TestCase):
+class TestRegistration:
- def setUp(self):
+ def setup_method(self):
# clear params and setup key paths
self.params = Params()
self.params.clear_all()
@@ -32,50 +30,46 @@ def _generate_keys(self):
with open(self.pub_key, "wb") as f:
f.write(k.publickey().export_key())
- def test_valid_cache(self):
+ def test_valid_cache(self, mocker):
# if all params are written, return the cached dongle id
self.params.put("IMEI", "imei")
self.params.put("HardwareSerial", "serial")
self._generate_keys()
- with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m:
- dongle = "DONGLE_ID_123"
- self.params.put("DongleId", dongle)
- self.assertEqual(register(), dongle)
- self.assertFalse(m.called)
+ m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True)
+ dongle = "DONGLE_ID_123"
+ self.params.put("DongleId", dongle)
+ assert register() == dongle
+ assert not m.called
- def test_no_keys(self):
+ def test_no_keys(self, mocker):
# missing pubkey
- with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m:
- dongle = register()
- self.assertEqual(m.call_count, 0)
- self.assertEqual(dongle, UNREGISTERED_DONGLE_ID)
- self.assertEqual(self.params.get("DongleId", encoding='utf-8'), dongle)
+ m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True)
+ dongle = register()
+ assert m.call_count == 0
+ assert dongle == UNREGISTERED_DONGLE_ID
+ assert self.params.get("DongleId", encoding='utf-8') == dongle
- def test_missing_cache(self):
+ def test_missing_cache(self, mocker):
# keys exist but no dongle id
self._generate_keys()
- with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m:
- dongle = "DONGLE_ID_123"
- m.return_value = MockResponse(json.dumps({'dongle_id': dongle}), 200)
- self.assertEqual(register(), dongle)
- self.assertEqual(m.call_count, 1)
+ m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True)
+ dongle = "DONGLE_ID_123"
+ m.return_value = MockResponse(json.dumps({'dongle_id': dongle}), 200)
+ assert register() == dongle
+ assert m.call_count == 1
- # call again, shouldn't hit the API this time
- self.assertEqual(register(), dongle)
- self.assertEqual(m.call_count, 1)
- self.assertEqual(self.params.get("DongleId", encoding='utf-8'), dongle)
+ # call again, shouldn't hit the API this time
+ assert register() == dongle
+ assert m.call_count == 1
+ assert self.params.get("DongleId", encoding='utf-8') == dongle
- def test_unregistered(self):
+ def test_unregistered(self, mocker):
# keys exist, but unregistered
self._generate_keys()
- with mock.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True) as m:
- m.return_value = MockResponse(None, 402)
- dongle = register()
- self.assertEqual(m.call_count, 1)
- self.assertEqual(dongle, UNREGISTERED_DONGLE_ID)
- self.assertEqual(self.params.get("DongleId", encoding='utf-8'), dongle)
-
-
-if __name__ == "__main__":
- unittest.main()
+ m = mocker.patch("openpilot.selfdrive.athena.registration.api_get", autospec=True)
+ m.return_value = MockResponse(None, 402)
+ dongle = register()
+ assert m.call_count == 1
+ assert dongle == UNREGISTERED_DONGLE_ID
+ assert self.params.get("DongleId", encoding='utf-8') == dongle
diff --git a/selfdrive/boardd/tests/test_boardd_loopback.py b/selfdrive/boardd/tests/test_boardd_loopback.py
index 3ab3a9c5b106ad..fa9eb957c21837 100755
--- a/selfdrive/boardd/tests/test_boardd_loopback.py
+++ b/selfdrive/boardd/tests/test_boardd_loopback.py
@@ -4,7 +4,6 @@
import random
import time
import pytest
-import unittest
from collections import defaultdict
from pprint import pprint
@@ -107,7 +106,3 @@ def test_loopback(self):
pprint(sm['pandaStates']) # may drop messages due to RX buffer overflow
for bus in sent_loopback.keys():
assert not len(sent_loopback[bus]), f"loop {i}: bus {bus} missing {len(sent_loopback[bus])} out of {sent_total[bus]} messages"
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/boardd/tests/test_pandad.py b/selfdrive/boardd/tests/test_pandad.py
index 3434be3fe46f9b..65f3ad657c71aa 100755
--- a/selfdrive/boardd/tests/test_pandad.py
+++ b/selfdrive/boardd/tests/test_pandad.py
@@ -2,7 +2,6 @@
import os
import pytest
import time
-import unittest
import cereal.messaging as messaging
from cereal import log
@@ -16,16 +15,16 @@
@pytest.mark.tici
-class TestPandad(unittest.TestCase):
+class TestPandad:
- def setUp(self):
+ def setup_method(self):
# ensure panda is up
if len(Panda.list()) == 0:
self._run_test(60)
self.spi = HARDWARE.get_device_type() != 'tici'
- def tearDown(self):
+ def teardown_method(self):
managed_processes['pandad'].stop()
def _run_test(self, timeout=30) -> float:
@@ -65,7 +64,7 @@ def _flash_bootstub_and_test(self, fn, expect_mismatch=False):
assert Panda.wait_for_panda(None, 10)
if expect_mismatch:
- with self.assertRaises(PandaProtocolMismatch):
+ with pytest.raises(PandaProtocolMismatch):
Panda()
else:
with Panda() as p:
@@ -108,9 +107,10 @@ def test_best_case_startup_time(self):
assert 0.1 < (sum(ts)/len(ts)) < (0.5 if self.spi else 5.0)
print("startup times", ts, sum(ts) / len(ts))
+
def test_protocol_version_check(self):
if not self.spi:
- raise unittest.SkipTest("SPI test")
+ pytest.skip("SPI test")
# flash old fw
fn = os.path.join(HERE, "bootstub.panda_h7_spiv0.bin")
self._flash_bootstub_and_test(fn, expect_mismatch=True)
@@ -127,7 +127,3 @@ def test_recover_from_bad_bootstub(self):
self._assert_no_panda()
self._run_test(60)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/car/card.py b/selfdrive/car/card.py
index 4dcd5ad0b4ebad..ed916a6fe1b896 100755
--- a/selfdrive/car/card.py
+++ b/selfdrive/car/card.py
@@ -14,13 +14,11 @@
from openpilot.selfdrive.boardd.boardd import can_list_to_can_capnp
from openpilot.selfdrive.car.car_helpers import get_car, get_one_can
from openpilot.selfdrive.car.interfaces import CarInterfaceBase
-from openpilot.selfdrive.controls.lib.drive_helpers import VCruiseHelper
from openpilot.selfdrive.controls.lib.events import Events
REPLAY = "REPLAY" in os.environ
EventName = car.CarEvent.EventName
-ButtonType = car.CarState.ButtonEvent.Type
class CarD:
@@ -35,7 +33,6 @@ def __init__(self, CI=None):
self.can_rcv_cum_timeout_counter = 0 # cumulative timeout count
self.CC_prev = car.CarControl.new_message()
- self.CS_prev = car.CarState.new_message()
self.last_actuators = None
@@ -79,8 +76,8 @@ def __init__(self, CI=None):
self.params.put_nonblocking("CarParamsCache", cp_bytes)
self.params.put_nonblocking("CarParamsPersistent", cp_bytes)
+ self.CS_prev = car.CarState.new_message()
self.events = Events()
- self.v_cruise_helper = VCruiseHelper(self.CP)
def initialize(self):
"""Initialize CarInterface, once controls are ready"""
@@ -121,11 +118,6 @@ def update_events(self, CS: car.CarState) -> car.CarState:
self.events.add_from_msg(CS.events)
- # Block resume if cruise never previously enabled
- resume_pressed = any(be.type in (ButtonType.accelCruise, ButtonType.resumeCruise) for be in CS.buttonEvents)
- if not self.CP.pcmCruise and not self.v_cruise_helper.v_cruise_initialized and resume_pressed:
- self.events.add(EventName.resumeBlocked)
-
# Disable on rising edge of accelerator or brake. Also disable on brake when speed > 0
if (CS.gasPressed and not self.CS_prev.gasPressed and self.disengage_on_accelerator) or \
(CS.brakePressed and (not self.CS_prev.brakePressed or not CS.standstill)) or \
diff --git a/selfdrive/car/ford/tests/test_ford.py b/selfdrive/car/ford/tests/test_ford.py
index 5d7b2c3332ba5a..72dd69980a224f 100755
--- a/selfdrive/car/ford/tests/test_ford.py
+++ b/selfdrive/car/ford/tests/test_ford.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
import random
-import unittest
from collections.abc import Iterable
import capnp
@@ -43,31 +42,31 @@
}
-class TestFordFW(unittest.TestCase):
+class TestFordFW:
def test_fw_query_config(self):
for (ecu, addr, subaddr) in FW_QUERY_CONFIG.extra_ecus:
- self.assertIn(ecu, ECU_ADDRESSES, "Unknown ECU")
- self.assertEqual(addr, ECU_ADDRESSES[ecu], "ECU address mismatch")
- self.assertIsNone(subaddr, "Unexpected ECU subaddress")
+ assert ecu in ECU_ADDRESSES, "Unknown ECU"
+ assert addr == ECU_ADDRESSES[ecu], "ECU address mismatch"
+ assert subaddr is None, "Unexpected ECU subaddress"
@parameterized.expand(FW_VERSIONS.items())
def test_fw_versions(self, car_model: str, fw_versions: dict[tuple[capnp.lib.capnp._EnumModule, int, int | None], Iterable[bytes]]):
for (ecu, addr, subaddr), fws in fw_versions.items():
- self.assertIn(ecu, ECU_PART_NUMBER, "Unexpected ECU")
- self.assertEqual(addr, ECU_ADDRESSES[ecu], "ECU address mismatch")
- self.assertIsNone(subaddr, "Unexpected ECU subaddress")
+ assert ecu in ECU_PART_NUMBER, "Unexpected ECU"
+ assert addr == ECU_ADDRESSES[ecu], "ECU address mismatch"
+ assert subaddr is None, "Unexpected ECU subaddress"
for fw in fws:
- self.assertEqual(len(fw), 24, "Expected ECU response to be 24 bytes")
+ assert len(fw) == 24, "Expected ECU response to be 24 bytes"
match = FW_PATTERN.match(fw)
- self.assertIsNotNone(match, f"Unable to parse FW: {fw!r}")
+ assert match is not None, f"Unable to parse FW: {fw!r}"
if match:
part_number = match.group("part_number")
- self.assertIn(part_number, ECU_PART_NUMBER[ecu], f"Unexpected part number for {fw!r}")
+ assert part_number in ECU_PART_NUMBER[ecu], f"Unexpected part number for {fw!r}"
codes = get_platform_codes([fw])
- self.assertEqual(1, len(codes), f"Unable to parse FW: {fw!r}")
+ assert 1 == len(codes), f"Unable to parse FW: {fw!r}"
@settings(max_examples=100)
@given(data=st.data())
@@ -85,7 +84,7 @@ def test_platform_codes_spot_check(self):
b"PJ6T-14H102-ABJ\x00\x00\x00\x00\x00\x00\x00\x00\x00",
b"LB5A-14C204-EAC\x00\x00\x00\x00\x00\x00\x00\x00\x00",
])
- self.assertEqual(results, {(b"X6A", b"J"), (b"Z6T", b"N"), (b"J6T", b"P"), (b"B5A", b"L")})
+ assert results == {(b"X6A", b"J"), (b"Z6T", b"N"), (b"J6T", b"P"), (b"B5A", b"L")}
def test_fuzzy_match(self):
for platform, fw_by_addr in FW_VERSIONS.items():
@@ -100,7 +99,7 @@ def test_fuzzy_match(self):
CP = car.CarParams.new_message(carFw=car_fw)
matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), CP.carVin, FW_VERSIONS)
- self.assertEqual(matches, {platform})
+ assert matches == {platform}
def test_match_fw_fuzzy(self):
offline_fw = {
@@ -132,18 +131,14 @@ def test_match_fw_fuzzy(self):
(0x706, None): {b"LB5T-14F397-XX\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"},
}
candidates = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fw, '', {expected_fingerprint: offline_fw})
- self.assertEqual(candidates, {expected_fingerprint})
+ assert candidates == {expected_fingerprint}
# model year hint in between the range should match
live_fw[(0x706, None)] = {b"MB5T-14F397-XX\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"}
candidates = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fw, '', {expected_fingerprint: offline_fw,})
- self.assertEqual(candidates, {expected_fingerprint})
+ assert candidates == {expected_fingerprint}
# unseen model year hint should not match
live_fw[(0x760, None)] = {b"M1MC-2D053-XX\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"}
candidates = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fw, '', {expected_fingerprint: offline_fw})
- self.assertEqual(len(candidates), 0, "Should not match new model year hint")
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert len(candidates) == 0, "Should not match new model year hint"
diff --git a/selfdrive/car/gm/tests/test_gm.py b/selfdrive/car/gm/tests/test_gm.py
index 01ec8533b830c4..389d0636c91f91 100755
--- a/selfdrive/car/gm/tests/test_gm.py
+++ b/selfdrive/car/gm/tests/test_gm.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
from parameterized import parameterized
-import unittest
from openpilot.selfdrive.car.gm.fingerprints import FINGERPRINTS
from openpilot.selfdrive.car.gm.values import CAMERA_ACC_CAR, GM_RX_OFFSET
@@ -8,19 +7,15 @@
CAMERA_DIAGNOSTIC_ADDRESS = 0x24b
-class TestGMFingerprint(unittest.TestCase):
+class TestGMFingerprint:
@parameterized.expand(FINGERPRINTS.items())
def test_can_fingerprints(self, car_model, fingerprints):
- self.assertGreater(len(fingerprints), 0)
+ assert len(fingerprints) > 0
- self.assertTrue(all(len(finger) for finger in fingerprints))
+ assert all(len(finger) for finger in fingerprints)
# The camera can sometimes be communicating on startup
if car_model in CAMERA_ACC_CAR:
for finger in fingerprints:
for required_addr in (CAMERA_DIAGNOSTIC_ADDRESS, CAMERA_DIAGNOSTIC_ADDRESS + GM_RX_OFFSET):
- self.assertEqual(finger.get(required_addr), 8, required_addr)
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert finger.get(required_addr) == 8, required_addr
diff --git a/selfdrive/car/honda/tests/test_honda.py b/selfdrive/car/honda/tests/test_honda.py
index 60d91b84a832f8..54d177d2ed2880 100755
--- a/selfdrive/car/honda/tests/test_honda.py
+++ b/selfdrive/car/honda/tests/test_honda.py
@@ -1,20 +1,15 @@
#!/usr/bin/env python3
import re
-import unittest
from openpilot.selfdrive.car.honda.fingerprints import FW_VERSIONS
HONDA_FW_VERSION_RE = br"[A-Z0-9]{5}-[A-Z0-9]{3}(-|,)[A-Z0-9]{4}(\x00){2}$"
-class TestHondaFingerprint(unittest.TestCase):
+class TestHondaFingerprint:
def test_fw_version_format(self):
# Asserts all FW versions follow an expected format
for fw_by_ecu in FW_VERSIONS.values():
for fws in fw_by_ecu.values():
for fw in fws:
- self.assertTrue(re.match(HONDA_FW_VERSION_RE, fw) is not None, fw)
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert re.match(HONDA_FW_VERSION_RE, fw) is not None, fw
diff --git a/selfdrive/car/hyundai/fingerprints.py b/selfdrive/car/hyundai/fingerprints.py
index 0c43b8e340bda4..b6318eb8fa0b32 100644
--- a/selfdrive/car/hyundai/fingerprints.py
+++ b/selfdrive/car/hyundai/fingerprints.py
@@ -1003,6 +1003,7 @@
CAR.HYUNDAI_TUCSON_4TH_GEN: {
(Ecu.fwdCamera, 0x7c4, None): [
b'\xf1\x00NX4 FR_CMR AT CAN LHD 1.00 1.01 99211-N9100 14A',
+ b'\xf1\x00NX4 FR_CMR AT CAN LHD 1.00 1.00 99211-N9260 14Y',
b'\xf1\x00NX4 FR_CMR AT EUR LHD 1.00 1.00 99211-N9220 14K',
b'\xf1\x00NX4 FR_CMR AT EUR LHD 1.00 2.02 99211-N9000 14E',
b'\xf1\x00NX4 FR_CMR AT USA LHD 1.00 1.00 99211-N9210 14G',
diff --git a/selfdrive/car/hyundai/tests/test_hyundai.py b/selfdrive/car/hyundai/tests/test_hyundai.py
index 0753b372e147c3..db2110b0de4c1b 100755
--- a/selfdrive/car/hyundai/tests/test_hyundai.py
+++ b/selfdrive/car/hyundai/tests/test_hyundai.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from hypothesis import settings, given, strategies as st
-import unittest
+
+import pytest
from cereal import car
from openpilot.selfdrive.car.fw_versions import build_fw_dict
@@ -39,19 +40,19 @@
CANFD_EXPECTED_ECUS = {Ecu.fwdCamera, Ecu.fwdRadar}
-class TestHyundaiFingerprint(unittest.TestCase):
+class TestHyundaiFingerprint:
def test_can_features(self):
# Test no EV/HEV in any gear lists (should all use ELECT_GEAR)
- self.assertEqual(set.union(*CAN_GEARS.values()) & (HYBRID_CAR | EV_CAR), set())
+ assert set.union(*CAN_GEARS.values()) & (HYBRID_CAR | EV_CAR) == set()
# Test CAN FD car not in CAN feature lists
can_specific_feature_list = set.union(*CAN_GEARS.values(), *CHECKSUM.values(), LEGACY_SAFETY_MODE_CAR, UNSUPPORTED_LONGITUDINAL_CAR, CAMERA_SCC_CAR)
for car_model in CANFD_CAR:
- self.assertNotIn(car_model, can_specific_feature_list, "CAN FD car unexpectedly found in a CAN feature list")
+ assert car_model not in can_specific_feature_list, "CAN FD car unexpectedly found in a CAN feature list"
def test_hybrid_ev_sets(self):
- self.assertEqual(HYBRID_CAR & EV_CAR, set(), "Shared cars between hybrid and EV")
- self.assertEqual(CANFD_CAR & HYBRID_CAR, set(), "Hard coding CAN FD cars as hybrid is no longer supported")
+ assert HYBRID_CAR & EV_CAR == set(), "Shared cars between hybrid and EV"
+ assert CANFD_CAR & HYBRID_CAR == set(), "Hard coding CAN FD cars as hybrid is no longer supported"
def test_canfd_ecu_whitelist(self):
# Asserts only expected Ecus can exist in database for CAN-FD cars
@@ -59,34 +60,34 @@ def test_canfd_ecu_whitelist(self):
ecus = {fw[0] for fw in FW_VERSIONS[car_model].keys()}
ecus_not_in_whitelist = ecus - CANFD_EXPECTED_ECUS
ecu_strings = ", ".join([f"Ecu.{ECU_NAME[ecu]}" for ecu in ecus_not_in_whitelist])
- self.assertEqual(len(ecus_not_in_whitelist), 0,
- f"{car_model}: Car model has unexpected ECUs: {ecu_strings}")
+ assert len(ecus_not_in_whitelist) == 0, \
+ f"{car_model}: Car model has unexpected ECUs: {ecu_strings}"
- def test_blacklisted_parts(self):
+ def test_blacklisted_parts(self, subtests):
# Asserts no ECUs known to be shared across platforms exist in the database.
# Tucson having Santa Cruz camera and EPS for example
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
if car_model == CAR.HYUNDAI_SANTA_CRUZ_1ST_GEN:
- raise unittest.SkipTest("Skip checking Santa Cruz for its parts")
+ pytest.skip("Skip checking Santa Cruz for its parts")
for code, _ in get_platform_codes(ecus[(Ecu.fwdCamera, 0x7c4, None)]):
if b"-" not in code:
continue
part = code.split(b"-")[1]
- self.assertFalse(part.startswith(b'CW'), "Car has bad part number")
+ assert not part.startswith(b'CW'), "Car has bad part number"
- def test_correct_ecu_response_database(self):
+ def test_correct_ecu_response_database(self, subtests):
"""
Assert standard responses for certain ECUs, since they can
respond to multiple queries with different data
"""
expected_fw_prefix = HYUNDAI_VERSION_REQUEST_LONG[1:]
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for ecu, fws in ecus.items():
- self.assertTrue(all(fw.startswith(expected_fw_prefix) for fw in fws),
- f"FW from unexpected request in database: {(ecu, fws)}")
+ assert all(fw.startswith(expected_fw_prefix) for fw in fws), \
+ f"FW from unexpected request in database: {(ecu, fws)}"
@settings(max_examples=100)
@given(data=st.data())
@@ -96,10 +97,10 @@ def test_platform_codes_fuzzy_fw(self, data):
fws = data.draw(fw_strategy)
get_platform_codes(fws)
- def test_expected_platform_codes(self):
+ def test_expected_platform_codes(self, subtests):
# Ensures we don't accidentally add multiple platform codes for a car unless it is intentional
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for ecu, fws in ecus.items():
if ecu[0] not in PLATFORM_CODE_ECUS:
continue
@@ -107,37 +108,37 @@ def test_expected_platform_codes(self):
# Third and fourth character are usually EV/hybrid identifiers
codes = {code.split(b"-")[0][:2] for code, _ in get_platform_codes(fws)}
if car_model == CAR.HYUNDAI_PALISADE:
- self.assertEqual(codes, {b"LX", b"ON"}, f"Car has unexpected platform codes: {car_model} {codes}")
+ assert codes == {b"LX", b"ON"}, f"Car has unexpected platform codes: {car_model} {codes}"
elif car_model == CAR.HYUNDAI_KONA_EV and ecu[0] == Ecu.fwdCamera:
- self.assertEqual(codes, {b"OE", b"OS"}, f"Car has unexpected platform codes: {car_model} {codes}")
+ assert codes == {b"OE", b"OS"}, f"Car has unexpected platform codes: {car_model} {codes}"
else:
- self.assertEqual(len(codes), 1, f"Car has multiple platform codes: {car_model} {codes}")
+ assert len(codes) == 1, f"Car has multiple platform codes: {car_model} {codes}"
# Tests for platform codes, part numbers, and FW dates which Hyundai will use to fuzzy
# fingerprint in the absence of full FW matches:
- def test_platform_code_ecus_available(self):
+ def test_platform_code_ecus_available(self, subtests):
# TODO: add queries for these non-CAN FD cars to get EPS
no_eps_platforms = CANFD_CAR | {CAR.KIA_SORENTO, CAR.KIA_OPTIMA_G4, CAR.KIA_OPTIMA_G4_FL, CAR.KIA_OPTIMA_H,
CAR.KIA_OPTIMA_H_G4_FL, CAR.HYUNDAI_SONATA_LF, CAR.HYUNDAI_TUCSON, CAR.GENESIS_G90, CAR.GENESIS_G80, CAR.HYUNDAI_ELANTRA}
# Asserts ECU keys essential for fuzzy fingerprinting are available on all platforms
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for platform_code_ecu in PLATFORM_CODE_ECUS:
if platform_code_ecu in (Ecu.fwdRadar, Ecu.eps) and car_model == CAR.HYUNDAI_GENESIS:
continue
if platform_code_ecu == Ecu.eps and car_model in no_eps_platforms:
continue
- self.assertIn(platform_code_ecu, [e[0] for e in ecus])
+ assert platform_code_ecu in [e[0] for e in ecus]
- def test_fw_format(self):
+ def test_fw_format(self, subtests):
# Asserts:
# - every supported ECU FW version returns one platform code
# - every supported ECU FW version has a part number
# - expected parsing of ECU FW dates
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for ecu, fws in ecus.items():
if ecu[0] not in PLATFORM_CODE_ECUS:
continue
@@ -145,40 +146,40 @@ def test_fw_format(self):
codes = set()
for fw in fws:
result = get_platform_codes([fw])
- self.assertEqual(1, len(result), f"Unable to parse FW: {fw}")
+ assert 1 == len(result), f"Unable to parse FW: {fw}"
codes |= result
if ecu[0] not in DATE_FW_ECUS or car_model in NO_DATES_PLATFORMS:
- self.assertTrue(all(date is None for _, date in codes))
+ assert all(date is None for _, date in codes)
else:
- self.assertTrue(all(date is not None for _, date in codes))
+ assert all(date is not None for _, date in codes)
if car_model == CAR.HYUNDAI_GENESIS:
- raise unittest.SkipTest("No part numbers for car model")
+ pytest.skip("No part numbers for car model")
# Hyundai places the ECU part number in their FW versions, assert all parsable
# Some examples of valid formats: b"56310-L0010", b"56310L0010", b"56310/M6300"
- self.assertTrue(all(b"-" in code for code, _ in codes),
- f"FW does not have part number: {fw}")
+ assert all(b"-" in code for code, _ in codes), \
+ f"FW does not have part number: {fw}"
def test_platform_codes_spot_check(self):
# Asserts basic platform code parsing behavior for a few cases
results = get_platform_codes([b"\xf1\x00DH LKAS 1.1 -150210"])
- self.assertEqual(results, {(b"DH", b"150210")})
+ assert results == {(b"DH", b"150210")}
# Some cameras and all radars do not have dates
results = get_platform_codes([b"\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 "])
- self.assertEqual(results, {(b"AEhe-G2000", None)})
+ assert results == {(b"AEhe-G2000", None)}
results = get_platform_codes([b"\xf1\x00CV1_ RDR ----- 1.00 1.01 99110-CV000 "])
- self.assertEqual(results, {(b"CV1-CV000", None)})
+ assert results == {(b"CV1-CV000", None)}
results = get_platform_codes([
b"\xf1\x00DH LKAS 1.1 -150210",
b"\xf1\x00AEhe SCC H-CUP 1.01 1.01 96400-G2000 ",
b"\xf1\x00CV1_ RDR ----- 1.00 1.01 99110-CV000 ",
])
- self.assertEqual(results, {(b"DH", b"150210"), (b"AEhe-G2000", None), (b"CV1-CV000", None)})
+ assert results == {(b"DH", b"150210"), (b"AEhe-G2000", None), (b"CV1-CV000", None)}
results = get_platform_codes([
b"\xf1\x00LX2 MFC AT USA LHD 1.00 1.07 99211-S8100 220222",
@@ -186,8 +187,8 @@ def test_platform_codes_spot_check(self):
b"\xf1\x00ON MFC AT USA LHD 1.00 1.01 99211-S9100 190405",
b"\xf1\x00ON MFC AT USA LHD 1.00 1.03 99211-S9100 190720",
])
- self.assertEqual(results, {(b"LX2-S8100", b"220222"), (b"LX2-S8100", b"211103"),
- (b"ON-S9100", b"190405"), (b"ON-S9100", b"190720")})
+ assert results == {(b"LX2-S8100", b"220222"), (b"LX2-S8100", b"211103"),
+ (b"ON-S9100", b"190405"), (b"ON-S9100", b"190720")}
def test_fuzzy_excluded_platforms(self):
# Asserts a list of platforms that will not fuzzy fingerprint with platform codes due to them being shared.
@@ -211,12 +212,8 @@ def test_fuzzy_excluded_platforms(self):
CP = car.CarParams.new_message(carFw=car_fw)
matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), CP.carVin, FW_VERSIONS)
if len(matches) == 1:
- self.assertEqual(list(matches)[0], platform)
+ assert list(matches)[0] == platform
else:
platforms_with_shared_codes.add(platform)
- self.assertEqual(platforms_with_shared_codes, excluded_platforms)
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert platforms_with_shared_codes == excluded_platforms
diff --git a/selfdrive/car/hyundai/values.py b/selfdrive/car/hyundai/values.py
index db905597046d40..1e752185e6049a 100644
--- a/selfdrive/car/hyundai/values.py
+++ b/selfdrive/car/hyundai/values.py
@@ -323,6 +323,7 @@ class CAR(Platforms):
HyundaiCarDocs("Hyundai Tucson 2022", car_parts=CarParts.common([CarHarness.hyundai_n])),
HyundaiCarDocs("Hyundai Tucson 2023-24", "All", car_parts=CarParts.common([CarHarness.hyundai_n])),
HyundaiCarDocs("Hyundai Tucson Hybrid 2022-24", "All", car_parts=CarParts.common([CarHarness.hyundai_n])),
+ HyundaiCarDocs("Hyundai Tucson Plug-in Hybrid 2024", "All", car_parts=CarParts.common([CarHarness.hyundai_n])),
],
CarSpecs(mass=1630, wheelbase=2.756, steerRatio=13.7, tireStiffnessFactor=0.385),
)
diff --git a/selfdrive/car/subaru/tests/test_subaru.py b/selfdrive/car/subaru/tests/test_subaru.py
index c8cdf66065c2c2..33040442b69778 100644
--- a/selfdrive/car/subaru/tests/test_subaru.py
+++ b/selfdrive/car/subaru/tests/test_subaru.py
@@ -1,5 +1,4 @@
from cereal import car
-import unittest
from openpilot.selfdrive.car.subaru.fingerprints import FW_VERSIONS
Ecu = car.CarParams.Ecu
@@ -7,14 +6,11 @@
ECU_NAME = {v: k for k, v in Ecu.schema.enumerants.items()}
-class TestSubaruFingerprint(unittest.TestCase):
+class TestSubaruFingerprint:
def test_fw_version_format(self):
for platform, fws_per_ecu in FW_VERSIONS.items():
for (ecu, _, _), fws in fws_per_ecu.items():
fw_size = len(fws[0])
for fw in fws:
- self.assertEqual(len(fw), fw_size, f"{platform} {ecu}: {len(fw)} {fw_size}")
+ assert len(fw) == fw_size, f"{platform} {ecu}: {len(fw)} {fw_size}"
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/car/tests/test_can_fingerprint.py b/selfdrive/car/tests/test_can_fingerprint.py
index 8df70073394fb1..bb585d567f326a 100755
--- a/selfdrive/car/tests/test_can_fingerprint.py
+++ b/selfdrive/car/tests/test_can_fingerprint.py
@@ -1,13 +1,12 @@
#!/usr/bin/env python3
from parameterized import parameterized
-import unittest
from cereal import log, messaging
from openpilot.selfdrive.car.car_helpers import FRAME_FINGERPRINT, can_fingerprint
from openpilot.selfdrive.car.fingerprints import _FINGERPRINTS as FINGERPRINTS
-class TestCanFingerprint(unittest.TestCase):
+class TestCanFingerprint:
@parameterized.expand(list(FINGERPRINTS.items()))
def test_can_fingerprint(self, car_model, fingerprints):
"""Tests online fingerprinting function on offline fingerprints"""
@@ -21,12 +20,12 @@ def test_can_fingerprint(self, car_model, fingerprints):
empty_can = messaging.new_message('can', 0)
car_fingerprint, finger = can_fingerprint(lambda: next(fingerprint_iter, empty_can)) # noqa: B023
- self.assertEqual(car_fingerprint, car_model)
- self.assertEqual(finger[0], fingerprint)
- self.assertEqual(finger[1], fingerprint)
- self.assertEqual(finger[2], {})
+ assert car_fingerprint == car_model
+ assert finger[0] == fingerprint
+ assert finger[1] == fingerprint
+ assert finger[2] == {}
- def test_timing(self):
+ def test_timing(self, subtests):
# just pick any CAN fingerprinting car
car_model = "CHEVROLET_BOLT_EUV"
fingerprint = FINGERPRINTS[car_model][0]
@@ -50,7 +49,7 @@ def test_timing(self):
cases.append((FRAME_FINGERPRINT * 2, None, can))
for expected_frames, car_model, can in cases:
- with self.subTest(expected_frames=expected_frames, car_model=car_model):
+ with subtests.test(expected_frames=expected_frames, car_model=car_model):
frames = 0
def test():
@@ -59,9 +58,5 @@ def test():
return can # noqa: B023
car_fingerprint, _ = can_fingerprint(test)
- self.assertEqual(car_fingerprint, car_model)
- self.assertEqual(frames, expected_frames + 2) # TODO: fix extra frames
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert car_fingerprint == car_model
+ assert frames == expected_frames + 2# TODO: fix extra frames
diff --git a/selfdrive/car/tests/test_car_interfaces.py b/selfdrive/car/tests/test_car_interfaces.py
index 72c1e27ab971b2..4bbecd99feb4b8 100755
--- a/selfdrive/car/tests/test_car_interfaces.py
+++ b/selfdrive/car/tests/test_car_interfaces.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import os
import math
-import unittest
import hypothesis.strategies as st
from hypothesis import Phase, given, settings
import importlib
@@ -45,7 +44,7 @@ def get_fuzzy_car_interface_args(draw: DrawType) -> dict:
return params
-class TestCarInterfaces(unittest.TestCase):
+class TestCarInterfaces:
# FIXME: Due to the lists used in carParams, Phase.target is very slow and will cause
# many generated examples to overrun when max_examples > ~20, don't use it
@parameterized.expand([(car,) for car in sorted(all_known_cars())])
@@ -63,28 +62,28 @@ def test_car_interfaces(self, car_name, data):
assert car_params
assert car_interface
- self.assertGreater(car_params.mass, 1)
- self.assertGreater(car_params.wheelbase, 0)
+ assert car_params.mass > 1
+ assert car_params.wheelbase > 0
# centerToFront is center of gravity to front wheels, assert a reasonable range
- self.assertTrue(car_params.wheelbase * 0.3 < car_params.centerToFront < car_params.wheelbase * 0.7)
- self.assertGreater(car_params.maxLateralAccel, 0)
+ assert car_params.wheelbase * 0.3 < car_params.centerToFront < car_params.wheelbase * 0.7
+ assert car_params.maxLateralAccel > 0
# Longitudinal sanity checks
- self.assertEqual(len(car_params.longitudinalTuning.kpV), len(car_params.longitudinalTuning.kpBP))
- self.assertEqual(len(car_params.longitudinalTuning.kiV), len(car_params.longitudinalTuning.kiBP))
- self.assertEqual(len(car_params.longitudinalTuning.deadzoneV), len(car_params.longitudinalTuning.deadzoneBP))
+ assert len(car_params.longitudinalTuning.kpV) == len(car_params.longitudinalTuning.kpBP)
+ assert len(car_params.longitudinalTuning.kiV) == len(car_params.longitudinalTuning.kiBP)
+ assert len(car_params.longitudinalTuning.deadzoneV) == len(car_params.longitudinalTuning.deadzoneBP)
# Lateral sanity checks
if car_params.steerControlType != car.CarParams.SteerControlType.angle:
tune = car_params.lateralTuning
if tune.which() == 'pid':
- self.assertTrue(not math.isnan(tune.pid.kf) and tune.pid.kf > 0)
- self.assertTrue(len(tune.pid.kpV) > 0 and len(tune.pid.kpV) == len(tune.pid.kpBP))
- self.assertTrue(len(tune.pid.kiV) > 0 and len(tune.pid.kiV) == len(tune.pid.kiBP))
+ assert not math.isnan(tune.pid.kf) and tune.pid.kf > 0
+ assert len(tune.pid.kpV) > 0 and len(tune.pid.kpV) == len(tune.pid.kpBP)
+ assert len(tune.pid.kiV) > 0 and len(tune.pid.kiV) == len(tune.pid.kiBP)
elif tune.which() == 'torque':
- self.assertTrue(not math.isnan(tune.torque.kf) and tune.torque.kf > 0)
- self.assertTrue(not math.isnan(tune.torque.friction) and tune.torque.friction > 0)
+ assert not math.isnan(tune.torque.kf) and tune.torque.kf > 0
+ assert not math.isnan(tune.torque.friction) and tune.torque.friction > 0
cc_msg = FuzzyGenerator.get_random_msg(data.draw, car.CarControl, real_floats=True)
# Run car interface
@@ -93,7 +92,6 @@ def test_car_interfaces(self, car_name, data):
for _ in range(10):
car_interface.update(CC, [])
car_interface.apply(CC, now_nanos)
- car_interface.apply(CC, now_nanos)
now_nanos += DT_CTRL * 1e9 # 10 ms
CC = car.CarControl.new_message(**cc_msg)
@@ -101,7 +99,6 @@ def test_car_interfaces(self, car_name, data):
for _ in range(10):
car_interface.update(CC, [])
car_interface.apply(CC, now_nanos)
- car_interface.apply(CC, now_nanos)
now_nanos += DT_CTRL * 1e9 # 10ms
# Test controller initialization
@@ -130,33 +127,29 @@ def test_car_interfaces(self, car_name, data):
if not car_params.radarUnavailable and radar_interface.rcp is not None:
cans = [messaging.new_message('can', 1).to_bytes() for _ in range(5)]
rr = radar_interface.update(cans)
- self.assertTrue(rr is None or len(rr.errors) > 0)
+ assert rr is None or len(rr.errors) > 0
def test_interface_attrs(self):
"""Asserts basic behavior of interface attribute getter"""
num_brands = len(get_interface_attr('CAR'))
- self.assertGreaterEqual(num_brands, 13)
+ assert num_brands >= 13
# Should return value for all brands when not combining, even if attribute doesn't exist
ret = get_interface_attr('FAKE_ATTR')
- self.assertEqual(len(ret), num_brands)
+ assert len(ret) == num_brands
# Make sure we can combine dicts
ret = get_interface_attr('DBC', combine_brands=True)
- self.assertGreaterEqual(len(ret), 160)
+ assert len(ret) >= 160
# We don't support combining non-dicts
ret = get_interface_attr('CAR', combine_brands=True)
- self.assertEqual(len(ret), 0)
+ assert len(ret) == 0
# If brand has None value, it shouldn't return when ignore_none=True is specified
none_brands = {b for b, v in get_interface_attr('FINGERPRINTS').items() if v is None}
- self.assertGreaterEqual(len(none_brands), 1)
+ assert len(none_brands) >= 1
ret = get_interface_attr('FINGERPRINTS', ignore_none=True)
none_brands_in_ret = none_brands.intersection(ret)
- self.assertEqual(len(none_brands_in_ret), 0, f'Brands with None values in ignore_none=True result: {none_brands_in_ret}')
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert len(none_brands_in_ret) == 0, f'Brands with None values in ignore_none=True result: {none_brands_in_ret}'
diff --git a/selfdrive/car/tests/test_docs.py b/selfdrive/car/tests/test_docs.py
index 143b402d5f264c..0ed95e18f25985 100755
--- a/selfdrive/car/tests/test_docs.py
+++ b/selfdrive/car/tests/test_docs.py
@@ -1,8 +1,8 @@
#!/usr/bin/env python3
from collections import defaultdict
import os
+import pytest
import re
-import unittest
from openpilot.common.basedir import BASEDIR
from openpilot.selfdrive.car.car_helpers import interfaces
@@ -14,9 +14,9 @@
from openpilot.selfdrive.debug.print_docs_diff import print_car_docs_diff
-class TestCarDocs(unittest.TestCase):
+class TestCarDocs:
@classmethod
- def setUpClass(cls):
+ def setup_class(cls):
cls.all_cars = get_all_car_docs()
def test_generator(self):
@@ -24,8 +24,7 @@ def test_generator(self):
with open(CARS_MD_OUT) as f:
current_cars_md = f.read()
- self.assertEqual(generated_cars_md, current_cars_md,
- "Run selfdrive/car/docs.py to update the compatibility documentation")
+ assert generated_cars_md == current_cars_md, "Run selfdrive/car/docs.py to update the compatibility documentation"
def test_docs_diff(self):
dump_path = os.path.join(BASEDIR, "selfdrive", "car", "tests", "cars_dump")
@@ -33,65 +32,61 @@ def test_docs_diff(self):
print_car_docs_diff(dump_path)
os.remove(dump_path)
- def test_duplicate_years(self):
+ def test_duplicate_years(self, subtests):
make_model_years = defaultdict(list)
for car in self.all_cars:
- with self.subTest(car_docs_name=car.name):
+ with subtests.test(car_docs_name=car.name):
make_model = (car.make, car.model)
for year in car.year_list:
- self.assertNotIn(year, make_model_years[make_model], f"{car.name}: Duplicate model year")
+ assert year not in make_model_years[make_model], f"{car.name}: Duplicate model year"
make_model_years[make_model].append(year)
- def test_missing_car_docs(self):
+ def test_missing_car_docs(self, subtests):
all_car_docs_platforms = [name for name, config in PLATFORMS.items()]
for platform in sorted(interfaces.keys()):
- with self.subTest(platform=platform):
- self.assertTrue(platform in all_car_docs_platforms, f"Platform: {platform} doesn't have a CarDocs entry")
+ with subtests.test(platform=platform):
+ assert platform in all_car_docs_platforms, f"Platform: {platform} doesn't have a CarDocs entry"
- def test_naming_conventions(self):
+ def test_naming_conventions(self, subtests):
# Asserts market-standard car naming conventions by brand
for car in self.all_cars:
- with self.subTest(car=car):
+ with subtests.test(car=car.name):
tokens = car.model.lower().split(" ")
if car.car_name == "hyundai":
- self.assertNotIn("phev", tokens, "Use `Plug-in Hybrid`")
- self.assertNotIn("hev", tokens, "Use `Hybrid`")
+ assert "phev" not in tokens, "Use `Plug-in Hybrid`"
+ assert "hev" not in tokens, "Use `Hybrid`"
if "plug-in hybrid" in car.model.lower():
- self.assertIn("Plug-in Hybrid", car.model, "Use correct capitalization")
+ assert "Plug-in Hybrid" in car.model, "Use correct capitalization"
if car.make != "Kia":
- self.assertNotIn("ev", tokens, "Use `Electric`")
+ assert "ev" not in tokens, "Use `Electric`"
elif car.car_name == "toyota":
if "rav4" in tokens:
- self.assertIn("RAV4", car.model, "Use correct capitalization")
+ assert "RAV4" in car.model, "Use correct capitalization"
- def test_torque_star(self):
+ def test_torque_star(self, subtests):
# Asserts brand-specific assumptions around steering torque star
for car in self.all_cars:
- with self.subTest(car=car):
+ with subtests.test(car=car.name):
# honda sanity check, it's the definition of a no torque star
if car.car_fingerprint in (HONDA.HONDA_ACCORD, HONDA.HONDA_CIVIC, HONDA.HONDA_CRV, HONDA.HONDA_ODYSSEY, HONDA.HONDA_PILOT):
- self.assertEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has full torque star")
+ assert car.row[Column.STEERING_TORQUE] == Star.EMPTY, f"{car.name} has full torque star"
elif car.car_name in ("toyota", "hyundai"):
- self.assertNotEqual(car.row[Column.STEERING_TORQUE], Star.EMPTY, f"{car.name} has no torque star")
+ assert car.row[Column.STEERING_TORQUE] != Star.EMPTY, f"{car.name} has no torque star"
- def test_year_format(self):
+ def test_year_format(self, subtests):
for car in self.all_cars:
- with self.subTest(car=car):
- self.assertIsNone(re.search(r"\d{4}-\d{4}", car.name), f"Format years correctly: {car.name}")
+ with subtests.test(car=car.name):
+ assert re.search(r"\d{4}-\d{4}", car.name) is None, f"Format years correctly: {car.name}"
- def test_harnesses(self):
+ def test_harnesses(self, subtests):
for car in self.all_cars:
- with self.subTest(car=car):
+ with subtests.test(car=car.name):
if car.name == "comma body":
- raise unittest.SkipTest
+ pytest.skip()
car_part_type = [p.part_type for p in car.car_parts.all_parts()]
car_parts = list(car.car_parts.all_parts())
- self.assertTrue(len(car_parts) > 0, f"Need to specify car parts: {car.name}")
- self.assertTrue(car_part_type.count(PartType.connector) == 1, f"Need to specify one harness connector: {car.name}")
- self.assertTrue(car_part_type.count(PartType.mount) == 1, f"Need to specify one mount: {car.name}")
- self.assertTrue(Cable.right_angle_obd_c_cable_1_5ft in car_parts, f"Need to specify a right angle OBD-C cable (1.5ft): {car.name}")
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert len(car_parts) > 0, f"Need to specify car parts: {car.name}"
+ assert car_part_type.count(PartType.connector) == 1, f"Need to specify one harness connector: {car.name}"
+ assert car_part_type.count(PartType.mount) == 1, f"Need to specify one mount: {car.name}"
+ assert Cable.right_angle_obd_c_cable_1_5ft in car_parts, f"Need to specify a right angle OBD-C cable (1.5ft): {car.name}"
diff --git a/selfdrive/car/tests/test_fw_fingerprint.py b/selfdrive/car/tests/test_fw_fingerprint.py
index ed5edbef313633..230e6f10e1c27f 100755
--- a/selfdrive/car/tests/test_fw_fingerprint.py
+++ b/selfdrive/car/tests/test_fw_fingerprint.py
@@ -1,10 +1,9 @@
#!/usr/bin/env python3
+import pytest
import random
import time
-import unittest
from collections import defaultdict
from parameterized import parameterized
-from unittest import mock
from cereal import car
from openpilot.selfdrive.car.car_helpers import interfaces
@@ -27,11 +26,11 @@ def send(self, msg):
pass
-class TestFwFingerprint(unittest.TestCase):
+class TestFwFingerprint:
def assertFingerprints(self, candidates, expected):
candidates = list(candidates)
- self.assertEqual(len(candidates), 1, f"got more than one candidate: {candidates}")
- self.assertEqual(candidates[0], expected)
+ assert len(candidates) == 1, f"got more than one candidate: {candidates}"
+ assert candidates[0] == expected
@parameterized.expand([(b, c, e[c], n) for b, e in VERSIONS.items() for c in e for n in (True, False)])
def test_exact_match(self, brand, car_model, ecus, test_non_essential):
@@ -62,7 +61,7 @@ def test_custom_fuzzy_match(self, brand, car_model, ecus):
# Assert brand-specific fuzzy fingerprinting function doesn't disagree with standard fuzzy function
config = FW_QUERY_CONFIGS[brand]
if config.match_fw_to_car_fuzzy is None:
- raise unittest.SkipTest("Brand does not implement custom fuzzy fingerprinting function")
+ pytest.skip("Brand does not implement custom fuzzy fingerprinting function")
CP = car.CarParams.new_message()
for _ in range(5):
@@ -77,14 +76,14 @@ def test_custom_fuzzy_match(self, brand, car_model, ecus):
# If both have matches, they must agree
if len(matches) == 1 and len(brand_matches) == 1:
- self.assertEqual(matches, brand_matches)
+ assert matches == brand_matches
@parameterized.expand([(b, c, e[c]) for b, e in VERSIONS.items() for c in e])
def test_fuzzy_match_ecu_count(self, brand, car_model, ecus):
# Asserts that fuzzy matching does not count matching FW, but ECU address keys
valid_ecus = [e for e in ecus if e[0] not in FUZZY_EXCLUDE_ECUS]
if not len(valid_ecus):
- raise unittest.SkipTest("Car model has no compatible ECUs for fuzzy matching")
+ pytest.skip("Car model has no compatible ECUs for fuzzy matching")
fw = []
for ecu in valid_ecus:
@@ -99,19 +98,19 @@ def test_fuzzy_match_ecu_count(self, brand, car_model, ecus):
# Assert no match if there are not enough unique ECUs
unique_ecus = {(f['address'], f['subAddress']) for f in fw}
if len(unique_ecus) < 2:
- self.assertEqual(len(matches), 0, car_model)
+ assert len(matches) == 0, car_model
# There won't always be a match due to shared FW, but if there is it should be correct
elif len(matches):
self.assertFingerprints(matches, car_model)
- def test_fw_version_lists(self):
+ def test_fw_version_lists(self, subtests):
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for ecu, ecu_fw in ecus.items():
- with self.subTest(ecu):
+ with subtests.test(ecu):
duplicates = {fw for fw in ecu_fw if ecu_fw.count(fw) > 1}
- self.assertFalse(len(duplicates), f'{car_model}: Duplicate FW versions: Ecu.{ECU_NAME[ecu[0]]}, {duplicates}')
- self.assertGreater(len(ecu_fw), 0, f'{car_model}: No FW versions: Ecu.{ECU_NAME[ecu[0]]}')
+ assert not len(duplicates), f'{car_model}: Duplicate FW versions: Ecu.{ECU_NAME[ecu[0]]}, {duplicates}'
+ assert len(ecu_fw) > 0, f'{car_model}: No FW versions: Ecu.{ECU_NAME[ecu[0]]}'
def test_all_addrs_map_to_one_ecu(self):
for brand, cars in VERSIONS.items():
@@ -121,59 +120,59 @@ def test_all_addrs_map_to_one_ecu(self):
addr_to_ecu[(addr, sub_addr)].add(ecu_type)
ecus_for_addr = addr_to_ecu[(addr, sub_addr)]
ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_for_addr])
- self.assertLessEqual(len(ecus_for_addr), 1, f"{brand} has multiple ECUs that map to one address: {ecu_strings} -> ({hex(addr)}, {sub_addr})")
+ assert len(ecus_for_addr) <= 1, f"{brand} has multiple ECUs that map to one address: {ecu_strings} -> ({hex(addr)}, {sub_addr})"
- def test_data_collection_ecus(self):
+ def test_data_collection_ecus(self, subtests):
# Asserts no extra ECUs are in the fingerprinting database
for brand, config in FW_QUERY_CONFIGS.items():
for car_model, ecus in VERSIONS[brand].items():
bad_ecus = set(ecus).intersection(config.extra_ecus)
- with self.subTest(car_model=car_model.value):
- self.assertFalse(len(bad_ecus), f'{car_model}: Fingerprints contain ECUs added for data collection: {bad_ecus}')
+ with subtests.test(car_model=car_model.value):
+ assert not len(bad_ecus), f'{car_model}: Fingerprints contain ECUs added for data collection: {bad_ecus}'
- def test_blacklisted_ecus(self):
+ def test_blacklisted_ecus(self, subtests):
blacklisted_addrs = (0x7c4, 0x7d0) # includes A/C ecu and an unknown ecu
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
CP = interfaces[car_model][0].get_non_essential_params(car_model)
if CP.carName == 'subaru':
for ecu in ecus.keys():
- self.assertNotIn(ecu[1], blacklisted_addrs, f'{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})')
+ assert ecu[1] not in blacklisted_addrs, f'{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})'
elif CP.carName == "chrysler":
# Some HD trucks have a combined TCM and ECM
if CP.carFingerprint.startswith("RAM HD"):
for ecu in ecus.keys():
- self.assertNotEqual(ecu[0], Ecu.transmission, f"{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})")
+ assert ecu[0] != Ecu.transmission, f"{car_model}: Blacklisted ecu: (Ecu.{ECU_NAME[ecu[0]]}, {hex(ecu[1])})"
- def test_non_essential_ecus(self):
+ def test_non_essential_ecus(self, subtests):
for brand, config in FW_QUERY_CONFIGS.items():
- with self.subTest(brand):
+ with subtests.test(brand):
# These ECUs are already not in ESSENTIAL_ECUS which the fingerprint functions give a pass if missing
unnecessary_non_essential_ecus = set(config.non_essential_ecus) - set(ESSENTIAL_ECUS)
- self.assertEqual(unnecessary_non_essential_ecus, set(), "Declaring non-essential ECUs non-essential is not required: " +
- f"{', '.join([f'Ecu.{ECU_NAME[ecu]}' for ecu in unnecessary_non_essential_ecus])}")
+ assert unnecessary_non_essential_ecus == set(), "Declaring non-essential ECUs non-essential is not required: " + \
+ f"{', '.join([f'Ecu.{ECU_NAME[ecu]}' for ecu in unnecessary_non_essential_ecus])}"
- def test_missing_versions_and_configs(self):
+ def test_missing_versions_and_configs(self, subtests):
brand_versions = set(VERSIONS.keys())
brand_configs = set(FW_QUERY_CONFIGS.keys())
if len(brand_configs - brand_versions):
- with self.subTest():
- self.fail(f"Brands do not implement FW_VERSIONS: {brand_configs - brand_versions}")
+ with subtests.test():
+ pytest.fail(f"Brands do not implement FW_VERSIONS: {brand_configs - brand_versions}")
if len(brand_versions - brand_configs):
- with self.subTest():
- self.fail(f"Brands do not implement FW_QUERY_CONFIG: {brand_versions - brand_configs}")
+ with subtests.test():
+ pytest.fail(f"Brands do not implement FW_QUERY_CONFIG: {brand_versions - brand_configs}")
# Ensure each brand has at least 1 ECU to query, and extra ECU retrieval
for brand, config in FW_QUERY_CONFIGS.items():
- self.assertEqual(len(config.get_all_ecus({}, include_extra_ecus=False)), 0)
- self.assertEqual(config.get_all_ecus({}), set(config.extra_ecus))
- self.assertGreater(len(config.get_all_ecus(VERSIONS[brand])), 0)
+ assert len(config.get_all_ecus({}, include_extra_ecus=False)) == 0
+ assert config.get_all_ecus({}) == set(config.extra_ecus)
+ assert len(config.get_all_ecus(VERSIONS[brand])) > 0
- def test_fw_request_ecu_whitelist(self):
+ def test_fw_request_ecu_whitelist(self, subtests):
for brand, config in FW_QUERY_CONFIGS.items():
- with self.subTest(brand=brand):
+ with subtests.test(brand=brand):
whitelisted_ecus = {ecu for r in config.requests for ecu in r.whitelist_ecus}
brand_ecus = {fw[0] for car_fw in VERSIONS[brand].values() for fw in car_fw}
brand_ecus |= {ecu[0] for ecu in config.extra_ecus}
@@ -182,30 +181,30 @@ def test_fw_request_ecu_whitelist(self):
ecus_not_whitelisted = brand_ecus - whitelisted_ecus
ecu_strings = ", ".join([f'Ecu.{ECU_NAME[ecu]}' for ecu in ecus_not_whitelisted])
- self.assertFalse(len(whitelisted_ecus) and len(ecus_not_whitelisted),
- f'{brand.title()}: ECUs not in any FW query whitelists: {ecu_strings}')
+ assert not (len(whitelisted_ecus) and len(ecus_not_whitelisted)), \
+ f'{brand.title()}: ECUs not in any FW query whitelists: {ecu_strings}'
- def test_fw_requests(self):
+ def test_fw_requests(self, subtests):
# Asserts equal length request and response lists
for brand, config in FW_QUERY_CONFIGS.items():
- with self.subTest(brand=brand):
+ with subtests.test(brand=brand):
for request_obj in config.requests:
- self.assertEqual(len(request_obj.request), len(request_obj.response))
+ assert len(request_obj.request) == len(request_obj.response)
# No request on the OBD port (bus 1, multiplexed) should be run on an aux panda
- self.assertFalse(request_obj.auxiliary and request_obj.bus == 1 and request_obj.obd_multiplexing,
- f"{brand.title()}: OBD multiplexed request is marked auxiliary: {request_obj}")
+ assert not (request_obj.auxiliary and request_obj.bus == 1 and request_obj.obd_multiplexing), \
+ f"{brand.title()}: OBD multiplexed request is marked auxiliary: {request_obj}"
def test_brand_ecu_matches(self):
empty_response = {brand: set() for brand in FW_QUERY_CONFIGS}
- self.assertEqual(get_brand_ecu_matches(set()), empty_response)
+ assert get_brand_ecu_matches(set()) == empty_response
# we ignore bus
expected_response = empty_response | {'toyota': {(0x750, 0xf)}}
- self.assertEqual(get_brand_ecu_matches({(0x758, 0xf, 99)}), expected_response)
+ assert get_brand_ecu_matches({(0x758, 0xf, 99)}) == expected_response
-class TestFwFingerprintTiming(unittest.TestCase):
+class TestFwFingerprintTiming:
N: int = 5
TOL: float = 0.05
@@ -223,26 +222,26 @@ def fake_get_data(self, timeout):
self.total_time += timeout
return {}
- def _benchmark_brand(self, brand, num_pandas):
+ def _benchmark_brand(self, brand, num_pandas, mocker):
fake_socket = FakeSocket()
self.total_time = 0
- with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing),
- mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)):
- for _ in range(self.N):
- # Treat each brand as the most likely (aka, the first) brand with OBD multiplexing initially on
- self.current_obd_multiplexing = True
+ mocker.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing)
+ mocker.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)
+ for _ in range(self.N):
+ # Treat each brand as the most likely (aka, the first) brand with OBD multiplexing initially on
+ self.current_obd_multiplexing = True
- t = time.perf_counter()
- get_fw_versions(fake_socket, fake_socket, brand, num_pandas=num_pandas)
- self.total_time += time.perf_counter() - t
+ t = time.perf_counter()
+ get_fw_versions(fake_socket, fake_socket, brand, num_pandas=num_pandas)
+ self.total_time += time.perf_counter() - t
return self.total_time / self.N
def _assert_timing(self, avg_time, ref_time):
- self.assertLess(avg_time, ref_time + self.TOL)
- self.assertGreater(avg_time, ref_time - self.TOL, "Performance seems to have improved, update test refs.")
+ assert avg_time < ref_time + self.TOL
+ assert avg_time > ref_time - self.TOL, "Performance seems to have improved, update test refs."
- def test_startup_timing(self):
+ def test_startup_timing(self, subtests, mocker):
# Tests worse-case VIN query time and typical present ECU query time
vin_ref_times = {'worst': 1.4, 'best': 0.7} # best assumes we go through all queries to get a match
present_ecu_ref_time = 0.45
@@ -253,24 +252,24 @@ def fake_get_ecu_addrs(*_, timeout):
fake_socket = FakeSocket()
self.total_time = 0.0
- with (mock.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing),
- mock.patch("openpilot.selfdrive.car.fw_versions.get_ecu_addrs", fake_get_ecu_addrs)):
- for _ in range(self.N):
- self.current_obd_multiplexing = True
- get_present_ecus(fake_socket, fake_socket, num_pandas=2)
+ mocker.patch("openpilot.selfdrive.car.fw_versions.set_obd_multiplexing", self.fake_set_obd_multiplexing)
+ mocker.patch("openpilot.selfdrive.car.fw_versions.get_ecu_addrs", fake_get_ecu_addrs)
+ for _ in range(self.N):
+ self.current_obd_multiplexing = True
+ get_present_ecus(fake_socket, fake_socket, num_pandas=2)
self._assert_timing(self.total_time / self.N, present_ecu_ref_time)
print(f'get_present_ecus, query time={self.total_time / self.N} seconds')
for name, args in (('worst', {}), ('best', {'retry': 1})):
- with self.subTest(name=name):
+ with subtests.test(name=name):
self.total_time = 0.0
- with (mock.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)):
- for _ in range(self.N):
- get_vin(fake_socket, fake_socket, (0, 1), **args)
+ mocker.patch("openpilot.selfdrive.car.isotp_parallel_query.IsoTpParallelQuery.get_data", self.fake_get_data)
+ for _ in range(self.N):
+ get_vin(fake_socket, fake_socket, (0, 1), **args)
self._assert_timing(self.total_time / self.N, vin_ref_times[name])
print(f'get_vin {name} case, query time={self.total_time / self.N} seconds')
- def test_fw_query_timing(self):
+ def test_fw_query_timing(self, subtests, mocker):
total_ref_time = {1: 7.2, 2: 7.8}
brand_ref_times = {
1: {
@@ -297,8 +296,8 @@ def test_fw_query_timing(self):
total_times = {1: 0.0, 2: 0.0}
for num_pandas in (1, 2):
for brand, config in FW_QUERY_CONFIGS.items():
- with self.subTest(brand=brand, num_pandas=num_pandas):
- avg_time = self._benchmark_brand(brand, num_pandas)
+ with subtests.test(brand=brand, num_pandas=num_pandas):
+ avg_time = self._benchmark_brand(brand, num_pandas, mocker)
total_times[num_pandas] += avg_time
avg_time = round(avg_time, 2)
@@ -311,11 +310,7 @@ def test_fw_query_timing(self):
print(f'{brand=}, {num_pandas=}, {len(config.requests)=}, avg FW query time={avg_time} seconds')
for num_pandas in (1, 2):
- with self.subTest(brand='all_brands', num_pandas=num_pandas):
+ with subtests.test(brand='all_brands', num_pandas=num_pandas):
total_time = round(total_times[num_pandas], 2)
self._assert_timing(total_time, total_ref_time[num_pandas])
print(f'all brands, total FW query time={total_time} seconds')
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/car/tests/test_lateral_limits.py b/selfdrive/car/tests/test_lateral_limits.py
index e5cfd972bdcb7c..a478bc601a5a3c 100755
--- a/selfdrive/car/tests/test_lateral_limits.py
+++ b/selfdrive/car/tests/test_lateral_limits.py
@@ -2,8 +2,7 @@
from collections import defaultdict
import importlib
from parameterized import parameterized_class
-import sys
-import unittest
+import pytest
from openpilot.common.realtime import DT_CTRL
from openpilot.selfdrive.car.car_helpers import interfaces
@@ -25,23 +24,23 @@
@parameterized_class('car_model', [(c,) for c in sorted(CAR_MODELS)])
-class TestLateralLimits(unittest.TestCase):
+class TestLateralLimits:
car_model: str
@classmethod
- def setUpClass(cls):
+ def setup_class(cls):
CarInterface, _, _ = interfaces[cls.car_model]
CP = CarInterface.get_non_essential_params(cls.car_model)
if CP.dashcamOnly:
- raise unittest.SkipTest("Platform is behind dashcamOnly")
+ pytest.skip("Platform is behind dashcamOnly")
# TODO: test all platforms
if CP.lateralTuning.which() != 'torque':
- raise unittest.SkipTest
+ pytest.skip()
if CP.notCar:
- raise unittest.SkipTest
+ pytest.skip()
CarControllerParams = importlib.import_module(f'selfdrive.car.{CP.carName}.values').CarControllerParams
cls.control_params = CarControllerParams(CP)
@@ -66,26 +65,8 @@ def calculate_0_5s_jerk(control_params, torque_params):
def test_jerk_limits(self):
up_jerk, down_jerk = self.calculate_0_5s_jerk(self.control_params, self.torque_params)
car_model_jerks[self.car_model] = {"up_jerk": up_jerk, "down_jerk": down_jerk}
- self.assertLessEqual(up_jerk, MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE)
- self.assertLessEqual(down_jerk, MAX_LAT_JERK_DOWN)
+ assert up_jerk <= MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE
+ assert down_jerk <= MAX_LAT_JERK_DOWN
def test_max_lateral_accel(self):
- self.assertLessEqual(self.torque_params["MAX_LAT_ACCEL_MEASURED"], MAX_LAT_ACCEL)
-
-
-if __name__ == "__main__":
- result = unittest.main(exit=False)
-
- print(f"\n\n---- Lateral limit report ({len(CAR_MODELS)} cars) ----\n")
-
- max_car_model_len = max([len(car_model) for car_model in car_model_jerks])
- for car_model, _jerks in sorted(car_model_jerks.items(), key=lambda i: i[1]['up_jerk'], reverse=True):
- violation = _jerks["up_jerk"] > MAX_LAT_JERK_UP + MAX_LAT_JERK_UP_TOLERANCE or \
- _jerks["down_jerk"] > MAX_LAT_JERK_DOWN
- violation_str = " - VIOLATION" if violation else ""
-
- print(f"{car_model:{max_car_model_len}} - up jerk: {round(_jerks['up_jerk'], 2):5} " +
- f"m/s^3, down jerk: {round(_jerks['down_jerk'], 2):5} m/s^3{violation_str}")
-
- # exit with test result
- sys.exit(not result.result.wasSuccessful())
+ assert self.torque_params["MAX_LAT_ACCEL_MEASURED"] <= MAX_LAT_ACCEL
diff --git a/selfdrive/car/tests/test_models.py b/selfdrive/car/tests/test_models.py
index dc3d4256a20e1d..026693bdce7933 100755
--- a/selfdrive/car/tests/test_models.py
+++ b/selfdrive/car/tests/test_models.py
@@ -4,7 +4,7 @@
import importlib
import pytest
import random
-import unittest
+import unittest # noqa: TID251
from collections import defaultdict, Counter
import hypothesis.strategies as st
from hypothesis import Phase, given, settings
diff --git a/selfdrive/car/tests/test_platform_configs.py b/selfdrive/car/tests/test_platform_configs.py
index 523c331b9e3c83..217189255e2a86 100755
--- a/selfdrive/car/tests/test_platform_configs.py
+++ b/selfdrive/car/tests/test_platform_configs.py
@@ -1,25 +1,19 @@
#!/usr/bin/env python3
-import unittest
-
from openpilot.selfdrive.car.values import PLATFORMS
-class TestPlatformConfigs(unittest.TestCase):
- def test_configs(self):
+class TestPlatformConfigs:
+ def test_configs(self, subtests):
for name, platform in PLATFORMS.items():
- with self.subTest(platform=str(platform)):
- self.assertTrue(platform.config._frozen)
+ with subtests.test(platform=str(platform)):
+ assert platform.config._frozen
if platform != "MOCK":
- self.assertIn("pt", platform.config.dbc_dict)
- self.assertTrue(len(platform.config.platform_str) > 0)
-
- self.assertEqual(name, platform.config.platform_str)
-
- self.assertIsNotNone(platform.config.specs)
+ assert "pt" in platform.config.dbc_dict
+ assert len(platform.config.platform_str) > 0
+ assert name == platform.config.platform_str
-if __name__ == "__main__":
- unittest.main()
+ assert platform.config.specs is not None
diff --git a/selfdrive/car/toyota/carcontroller.py b/selfdrive/car/toyota/carcontroller.py
index f2d8a4a7bc5637..354d10a90eba09 100644
--- a/selfdrive/car/toyota/carcontroller.py
+++ b/selfdrive/car/toyota/carcontroller.py
@@ -102,11 +102,6 @@ def update(self, CC, CS, now_nanos):
# *** gas and brake ***
pcm_accel_cmd = clip(actuators.accel, self.params.ACCEL_MIN, self.params.ACCEL_MAX)
- # TODO: probably can delete this. CS.pcm_acc_status uses a different signal
- # than CS.cruiseState.enabled. confirm they're not meaningfully different
- if not CC.enabled and CS.pcm_acc_status:
- pcm_cancel_cmd = 1
-
# on entering standstill, send standstill request
if CS.out.standstill and not self.last_standstill and (self.CP.carFingerprint not in NO_STOP_TIMER_CAR):
self.standstill_req = True
diff --git a/selfdrive/car/toyota/tests/test_toyota.py b/selfdrive/car/toyota/tests/test_toyota.py
index e2a9b46eb4dd75..ef49e005512efc 100755
--- a/selfdrive/car/toyota/tests/test_toyota.py
+++ b/selfdrive/car/toyota/tests/test_toyota.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
from hypothesis import given, settings, strategies as st
-import unittest
from cereal import car
from openpilot.selfdrive.car.fw_versions import build_fw_dict
@@ -17,59 +16,58 @@ def check_fw_version(fw_version: bytes) -> bool:
return b'?' not in fw_version
-class TestToyotaInterfaces(unittest.TestCase):
+class TestToyotaInterfaces:
def test_car_sets(self):
- self.assertTrue(len(ANGLE_CONTROL_CAR - TSS2_CAR) == 0)
- self.assertTrue(len(RADAR_ACC_CAR - TSS2_CAR) == 0)
+ assert len(ANGLE_CONTROL_CAR - TSS2_CAR) == 0
+ assert len(RADAR_ACC_CAR - TSS2_CAR) == 0
def test_lta_platforms(self):
# At this time, only RAV4 2023 is expected to use LTA/angle control
- self.assertEqual(ANGLE_CONTROL_CAR, {CAR.TOYOTA_RAV4_TSS2_2023})
+ assert ANGLE_CONTROL_CAR == {CAR.TOYOTA_RAV4_TSS2_2023}
def test_tss2_dbc(self):
# We make some assumptions about TSS2 platforms,
# like looking up certain signals only in this DBC
for car_model, dbc in DBC.items():
if car_model in TSS2_CAR:
- self.assertEqual(dbc["pt"], "toyota_nodsu_pt_generated")
+ assert dbc["pt"] == "toyota_nodsu_pt_generated"
- def test_essential_ecus(self):
+ def test_essential_ecus(self, subtests):
# Asserts standard ECUs exist for each platform
common_ecus = {Ecu.fwdRadar, Ecu.fwdCamera}
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
present_ecus = {ecu[0] for ecu in ecus}
missing_ecus = common_ecus - present_ecus
- self.assertEqual(len(missing_ecus), 0)
+ assert len(missing_ecus) == 0
# Some exceptions for other common ECUs
if car_model not in (CAR.TOYOTA_ALPHARD_TSS2,):
- self.assertIn(Ecu.abs, present_ecus)
+ assert Ecu.abs in present_ecus
if car_model not in (CAR.TOYOTA_MIRAI,):
- self.assertIn(Ecu.engine, present_ecus)
+ assert Ecu.engine in present_ecus
if car_model not in (CAR.TOYOTA_PRIUS_V, CAR.LEXUS_CTH):
- self.assertIn(Ecu.eps, present_ecus)
+ assert Ecu.eps in present_ecus
-class TestToyotaFingerprint(unittest.TestCase):
- def test_non_essential_ecus(self):
+class TestToyotaFingerprint:
+ def test_non_essential_ecus(self, subtests):
# Ensures only the cars that have multiple engine ECUs are in the engine non-essential ECU list
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
engine_ecus = {ecu for ecu in ecus if ecu[0] == Ecu.engine}
- self.assertEqual(len(engine_ecus) > 1,
- car_model in FW_QUERY_CONFIG.non_essential_ecus[Ecu.engine],
- f"Car model unexpectedly {'not ' if len(engine_ecus) > 1 else ''}in non-essential list")
+ assert (len(engine_ecus) > 1) == (car_model in FW_QUERY_CONFIG.non_essential_ecus[Ecu.engine]), \
+ f"Car model unexpectedly {'not ' if len(engine_ecus) > 1 else ''}in non-essential list"
- def test_valid_fw_versions(self):
+ def test_valid_fw_versions(self, subtests):
# Asserts all FW versions are valid
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for fws in ecus.values():
for fw in fws:
- self.assertTrue(check_fw_version(fw), fw)
+ assert check_fw_version(fw), fw
# Tests for part numbers, platform codes, and sub-versions which Toyota will use to fuzzy
# fingerprint in the absence of full FW matches:
@@ -80,25 +78,25 @@ def test_platform_codes_fuzzy_fw(self, data):
fws = data.draw(fw_strategy)
get_platform_codes(fws)
- def test_platform_code_ecus_available(self):
+ def test_platform_code_ecus_available(self, subtests):
# Asserts ECU keys essential for fuzzy fingerprinting are available on all platforms
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for platform_code_ecu in PLATFORM_CODE_ECUS:
if platform_code_ecu == Ecu.eps and car_model in (CAR.TOYOTA_PRIUS_V, CAR.LEXUS_CTH,):
continue
if platform_code_ecu == Ecu.abs and car_model in (CAR.TOYOTA_ALPHARD_TSS2,):
continue
- self.assertIn(platform_code_ecu, [e[0] for e in ecus])
+ assert platform_code_ecu in [e[0] for e in ecus]
- def test_fw_format(self):
+ def test_fw_format(self, subtests):
# Asserts:
# - every supported ECU FW version returns one platform code
# - every supported ECU FW version has a part number
# - expected parsing of ECU sub-versions
for car_model, ecus in FW_VERSIONS.items():
- with self.subTest(car_model=car_model.value):
+ with subtests.test(car_model=car_model.value):
for ecu, fws in ecus.items():
if ecu[0] not in PLATFORM_CODE_ECUS:
continue
@@ -107,15 +105,14 @@ def test_fw_format(self):
for fw in fws:
result = get_platform_codes([fw])
# Check only one platform code and sub-version
- self.assertEqual(1, len(result), f"Unable to parse FW: {fw}")
- self.assertEqual(1, len(list(result.values())[0]), f"Unable to parse FW: {fw}")
+ assert 1 == len(result), f"Unable to parse FW: {fw}"
+ assert 1 == len(list(result.values())[0]), f"Unable to parse FW: {fw}"
codes |= result
# Toyota places the ECU part number in their FW versions, assert all parsable
# Note that there is only one unique part number per ECU across the fleet, so this
# is not important for identification, just a sanity check.
- self.assertTrue(all(code.count(b"-") > 1 for code in codes),
- f"FW does not have part number: {fw} {codes}")
+ assert all(code.count(b"-") > 1 for code in codes), f"FW does not have part number: {fw} {codes}"
def test_platform_codes_spot_check(self):
# Asserts basic platform code parsing behavior for a few cases
@@ -125,20 +122,20 @@ def test_platform_codes_spot_check(self):
b"F152607110\x00\x00\x00\x00\x00\x00",
b"F152607180\x00\x00\x00\x00\x00\x00",
])
- self.assertEqual(results, {b"F1526-07-1": {b"10", b"40", b"71", b"80"}})
+ assert results == {b"F1526-07-1": {b"10", b"40", b"71", b"80"}}
results = get_platform_codes([
b"\x028646F4104100\x00\x00\x00\x008646G5301200\x00\x00\x00\x00",
b"\x028646F4104100\x00\x00\x00\x008646G3304000\x00\x00\x00\x00",
])
- self.assertEqual(results, {b"8646F-41-04": {b"100"}})
+ assert results == {b"8646F-41-04": {b"100"}}
# Short version has no part number
results = get_platform_codes([
b"\x0235870000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00",
b"\x0235883000\x00\x00\x00\x00\x00\x00\x00\x00A0202000\x00\x00\x00\x00\x00\x00\x00\x00",
])
- self.assertEqual(results, {b"58-70": {b"000"}, b"58-83": {b"000"}})
+ assert results == {b"58-70": {b"000"}, b"58-83": {b"000"}}
results = get_platform_codes([
b"F152607110\x00\x00\x00\x00\x00\x00",
@@ -146,7 +143,7 @@ def test_platform_codes_spot_check(self):
b"\x028646F4104100\x00\x00\x00\x008646G5301200\x00\x00\x00\x00",
b"\x0235879000\x00\x00\x00\x00\x00\x00\x00\x00A4701000\x00\x00\x00\x00\x00\x00\x00\x00",
])
- self.assertEqual(results, {b"F1526-07-1": {b"10", b"40"}, b"8646F-41-04": {b"100"}, b"58-79": {b"000"}})
+ assert results == {b"F1526-07-1": {b"10", b"40"}, b"8646F-41-04": {b"100"}, b"58-79": {b"000"}}
def test_fuzzy_excluded_platforms(self):
# Asserts a list of platforms that will not fuzzy fingerprint with platform codes due to them being shared.
@@ -162,13 +159,9 @@ def test_fuzzy_excluded_platforms(self):
CP = car.CarParams.new_message(carFw=car_fw)
matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(build_fw_dict(CP.carFw), CP.carVin, FW_VERSIONS)
if len(matches) == 1:
- self.assertEqual(list(matches)[0], platform)
+ assert list(matches)[0] == platform
else:
# If a platform has multiple matches, add it and its matches
platforms_with_shared_codes |= {str(platform), *matches}
- self.assertEqual(platforms_with_shared_codes, FUZZY_EXCLUDED_PLATFORMS, (len(platforms_with_shared_codes), len(FW_VERSIONS)))
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert platforms_with_shared_codes == FUZZY_EXCLUDED_PLATFORMS, (len(platforms_with_shared_codes), len(FW_VERSIONS))
diff --git a/selfdrive/car/volkswagen/tests/test_volkswagen.py b/selfdrive/car/volkswagen/tests/test_volkswagen.py
index 17331203bbe855..561d28b9fbcfe8 100755
--- a/selfdrive/car/volkswagen/tests/test_volkswagen.py
+++ b/selfdrive/car/volkswagen/tests/test_volkswagen.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import random
import re
-import unittest
from cereal import car
from openpilot.selfdrive.car.volkswagen.values import CAR, FW_QUERY_CONFIG, WMI
@@ -14,35 +13,35 @@
SPARE_PART_FW_PATTERN = re.compile(b'\xf1\x87(?P[0-9][0-9A-Z]{2})(?P[0-9][0-9A-Z][0-9])(?P[0-9A-Z]{2}[0-9])([A-Z0-9]| )')
-class TestVolkswagenPlatformConfigs(unittest.TestCase):
- def test_spare_part_fw_pattern(self):
+class TestVolkswagenPlatformConfigs:
+ def test_spare_part_fw_pattern(self, subtests):
# Relied on for determining if a FW is likely VW
for platform, ecus in FW_VERSIONS.items():
- with self.subTest(platform=platform):
+ with subtests.test(platform=platform.value):
for fws in ecus.values():
for fw in fws:
- self.assertNotEqual(SPARE_PART_FW_PATTERN.match(fw), None, f"Bad FW: {fw}")
+ assert SPARE_PART_FW_PATTERN.match(fw) is not None, f"Bad FW: {fw}"
- def test_chassis_codes(self):
+ def test_chassis_codes(self, subtests):
for platform in CAR:
- with self.subTest(platform=platform):
- self.assertTrue(len(platform.config.wmis) > 0, "WMIs not set")
- self.assertTrue(len(platform.config.chassis_codes) > 0, "Chassis codes not set")
- self.assertTrue(all(CHASSIS_CODE_PATTERN.match(cc) for cc in
- platform.config.chassis_codes), "Bad chassis codes")
+ with subtests.test(platform=platform.value):
+ assert len(platform.config.wmis) > 0, "WMIs not set"
+ assert len(platform.config.chassis_codes) > 0, "Chassis codes not set"
+ assert all(CHASSIS_CODE_PATTERN.match(cc) for cc in \
+ platform.config.chassis_codes), "Bad chassis codes"
# No two platforms should share chassis codes
for comp in CAR:
if platform == comp:
continue
- self.assertEqual(set(), platform.config.chassis_codes & comp.config.chassis_codes,
- f"Shared chassis codes: {comp}")
+ assert set() == platform.config.chassis_codes & comp.config.chassis_codes, \
+ f"Shared chassis codes: {comp}"
- def test_custom_fuzzy_fingerprinting(self):
+ def test_custom_fuzzy_fingerprinting(self, subtests):
all_radar_fw = list({fw for ecus in FW_VERSIONS.values() for fw in ecus[Ecu.fwdRadar, 0x757, None]})
for platform in CAR:
- with self.subTest(platform=platform):
+ with subtests.test(platform=platform.name):
for wmi in WMI:
for chassis_code in platform.config.chassis_codes | {"00"}:
vin = ["0"] * 17
@@ -59,8 +58,4 @@ def test_custom_fuzzy_fingerprinting(self):
matches = FW_QUERY_CONFIG.match_fw_to_car_fuzzy(live_fws, vin, FW_VERSIONS)
expected_matches = {platform} if should_match else set()
- self.assertEqual(expected_matches, matches, "Bad match")
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert expected_matches == matches, "Bad match"
diff --git a/selfdrive/controls/controlsd.py b/selfdrive/controls/controlsd.py
index eba4142ca85a4c..92c93311218970 100755
--- a/selfdrive/controls/controlsd.py
+++ b/selfdrive/controls/controlsd.py
@@ -21,7 +21,7 @@
from openpilot.selfdrive.car.car_helpers import get_startup_event
from openpilot.selfdrive.car.card import CarD
from openpilot.selfdrive.controls.lib.alertmanager import AlertManager, set_offroad_alert
-from openpilot.selfdrive.controls.lib.drive_helpers import clip_curvature
+from openpilot.selfdrive.controls.lib.drive_helpers import VCruiseHelper, clip_curvature
from openpilot.selfdrive.controls.lib.events import Events, ET
from openpilot.selfdrive.controls.lib.latcontrol import LatControl, MIN_LATERAL_CONTROL_SPEED
from openpilot.selfdrive.controls.lib.latcontrol_pid import LatControlPID
@@ -143,6 +143,7 @@ def __init__(self, CI=None):
self.desired_curvature = 0.0
self.experimental_mode = False
self.personality = self.read_personality_param()
+ self.v_cruise_helper = VCruiseHelper(self.CP)
self.recalibrating_seen = False
self.can_log_mono_time = 0
@@ -168,7 +169,7 @@ def set_initial_state(self):
controls_state = self.params.get("ReplayControlsState")
if controls_state is not None:
with log.ControlsState.from_bytes(controls_state) as controls_state:
- self.card.v_cruise_helper.v_cruise_kph = controls_state.vCruise
+ self.v_cruise_helper.v_cruise_kph = controls_state.vCruise
if any(ps.controlsAllowed for ps in self.sm['pandaStates']):
self.state = State.enabled
@@ -197,6 +198,11 @@ def update_events(self, CS):
if self.CP.passive:
return
+ # Block resume if cruise never previously enabled
+ resume_pressed = any(be.type in (ButtonType.accelCruise, ButtonType.resumeCruise) for be in CS.buttonEvents)
+ if not self.CP.pcmCruise and not self.v_cruise_helper.v_cruise_initialized and resume_pressed:
+ self.events.add(EventName.resumeBlocked)
+
if not self.CP.notCar:
self.events.add_from_msg(self.sm['driverMonitoringState'].events)
@@ -425,7 +431,7 @@ def data_sample(self):
def state_transition(self, CS):
"""Compute conditional state transitions and execute actions on state transitions"""
- self.card.v_cruise_helper.update_v_cruise(CS, self.enabled, self.is_metric)
+ self.v_cruise_helper.update_v_cruise(CS, self.enabled, self.is_metric)
# decrement the soft disable timer at every step, as it's reset on
# entrance in SOFT_DISABLING state
@@ -500,7 +506,7 @@ def state_transition(self, CS):
else:
self.state = State.enabled
self.current_alert_types.append(ET.ENABLE)
- self.card.v_cruise_helper.initialize_v_cruise(CS, self.experimental_mode)
+ self.v_cruise_helper.initialize_v_cruise(CS, self.experimental_mode)
# Check if openpilot is engaged and actuators are enabled
self.enabled = self.state in ENABLED_STATES
@@ -556,7 +562,7 @@ def state_control(self, CS):
if not self.joystick_mode:
# accel PID loop
- pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.card.v_cruise_helper.v_cruise_kph * CV.KPH_TO_MS)
+ pid_accel_limits = self.CI.get_pid_accel_limits(self.CP, CS.vEgo, self.v_cruise_helper.v_cruise_kph * CV.KPH_TO_MS)
t_since_plan = (self.sm.frame - self.sm.recv_frame['longitudinalPlan']) * DT_CTRL
actuators.accel = self.LoC.update(CC.longActive, CS, long_plan, pid_accel_limits, t_since_plan)
@@ -659,7 +665,7 @@ def publish_logs(self, CS, start_time, CC, lac_log):
CC.cruiseControl.resume = self.enabled and CS.cruiseState.standstill and speeds[-1] > 0.1
hudControl = CC.hudControl
- hudControl.setSpeed = float(self.card.v_cruise_helper.v_cruise_cluster_kph * CV.KPH_TO_MS)
+ hudControl.setSpeed = float(self.v_cruise_helper.v_cruise_cluster_kph * CV.KPH_TO_MS)
hudControl.speedVisible = self.enabled
hudControl.lanesVisible = self.enabled
hudControl.leadVisible = self.sm['longitudinalPlan'].hasLead
@@ -743,8 +749,8 @@ def publish_logs(self, CS, start_time, CC, lac_log):
controlsState.engageable = not self.events.contains(ET.NO_ENTRY)
controlsState.longControlState = self.LoC.long_control_state
controlsState.vPid = float(self.LoC.v_pid)
- controlsState.vCruise = float(self.card.v_cruise_helper.v_cruise_kph)
- controlsState.vCruiseCluster = float(self.card.v_cruise_helper.v_cruise_cluster_kph)
+ controlsState.vCruise = float(self.v_cruise_helper.v_cruise_kph)
+ controlsState.vCruiseCluster = float(self.v_cruise_helper.v_cruise_cluster_kph)
controlsState.upAccelCmd = float(self.LoC.pid.p)
controlsState.uiAccelCmd = float(self.LoC.pid.i)
controlsState.ufAccelCmd = float(self.LoC.pid.f)
diff --git a/selfdrive/controls/lib/events.py b/selfdrive/controls/lib/events.py
index f1bd07c69bf290..99246a139789ef 100755
--- a/selfdrive/controls/lib/events.py
+++ b/selfdrive/controls/lib/events.py
@@ -487,7 +487,7 @@ def joystick_alert(CP: car.CarParams, CS: car.CarState, sm: messaging.SubMaster,
"Press Resume to Exit Standstill",
"",
AlertStatus.userPrompt, AlertSize.small,
- Priority.MID, VisualAlert.none, AudibleAlert.none, .2),
+ Priority.LOW, VisualAlert.none, AudibleAlert.none, .2),
},
EventName.belowSteerSpeed: {
diff --git a/selfdrive/controls/lib/tests/test_alertmanager.py b/selfdrive/controls/lib/tests/test_alertmanager.py
index dbd42858a07977..c234cc49d60ea8 100755
--- a/selfdrive/controls/lib/tests/test_alertmanager.py
+++ b/selfdrive/controls/lib/tests/test_alertmanager.py
@@ -1,12 +1,11 @@
#!/usr/bin/env python3
import random
-import unittest
from openpilot.selfdrive.controls.lib.events import Alert, EVENTS
from openpilot.selfdrive.controls.lib.alertmanager import AlertManager
-class TestAlertManager(unittest.TestCase):
+class TestAlertManager:
def test_duration(self):
"""
@@ -38,8 +37,4 @@ def test_duration(self):
shown = current_alert is not None
should_show = frame <= show_duration
- self.assertEqual(shown, should_show, msg=f"{frame=} {add_duration=} {duration=}")
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert shown == should_show, f"{frame=} {add_duration=} {duration=}"
diff --git a/selfdrive/controls/lib/tests/test_latcontrol.py b/selfdrive/controls/lib/tests/test_latcontrol.py
index 838023af720720..b731bbd9500096 100755
--- a/selfdrive/controls/lib/tests/test_latcontrol.py
+++ b/selfdrive/controls/lib/tests/test_latcontrol.py
@@ -1,6 +1,4 @@
#!/usr/bin/env python3
-import unittest
-
from parameterized import parameterized
from cereal import car, log
@@ -15,7 +13,7 @@
from openpilot.common.mock.generators import generate_liveLocationKalman
-class TestLatControl(unittest.TestCase):
+class TestLatControl:
@parameterized.expand([(HONDA.HONDA_CIVIC, LatControlPID), (TOYOTA.TOYOTA_RAV4, LatControlTorque), (NISSAN.NISSAN_LEAF, LatControlAngle)])
def test_saturation(self, car_name, controller):
@@ -36,8 +34,4 @@ def test_saturation(self, car_name, controller):
for _ in range(1000):
_, _, lac_log = controller.update(True, CS, VM, params, False, 1, llk)
- self.assertTrue(lac_log.saturated)
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert lac_log.saturated
diff --git a/selfdrive/controls/lib/tests/test_vehicle_model.py b/selfdrive/controls/lib/tests/test_vehicle_model.py
index c3997afdf36bcb..2efcf2fbbd0fc4 100755
--- a/selfdrive/controls/lib/tests/test_vehicle_model.py
+++ b/selfdrive/controls/lib/tests/test_vehicle_model.py
@@ -1,6 +1,6 @@
#!/usr/bin/env python3
+import pytest
import math
-import unittest
import numpy as np
from control import StateSpace
@@ -10,8 +10,8 @@
from openpilot.selfdrive.controls.lib.vehicle_model import VehicleModel, dyn_ss_sol, create_dyn_state_matrices
-class TestVehicleModel(unittest.TestCase):
- def setUp(self):
+class TestVehicleModel:
+ def setup_method(self):
CP = CarInterface.get_non_essential_params(CAR.HONDA_CIVIC)
self.VM = VehicleModel(CP)
@@ -23,7 +23,7 @@ def test_round_trip_yaw_rate(self):
yr = self.VM.yaw_rate(sa, u, roll)
new_sa = self.VM.get_steer_from_yaw_rate(yr, u, roll)
- self.assertAlmostEqual(sa, new_sa)
+ assert sa == pytest.approx(new_sa)
def test_dyn_ss_sol_against_yaw_rate(self):
"""Verify that the yaw_rate helper function matches the results
@@ -38,7 +38,7 @@ def test_dyn_ss_sol_against_yaw_rate(self):
# Compute yaw rate using direct computations
yr2 = self.VM.yaw_rate(sa, u, roll)
- self.assertAlmostEqual(float(yr1[0]), yr2)
+ assert float(yr1[0]) == pytest.approx(yr2)
def test_syn_ss_sol_simulate(self):
"""Verifies that dyn_ss_sol matches a simulation"""
@@ -63,8 +63,3 @@ def test_syn_ss_sol_simulate(self):
x2 = dyn_ss_sol(sa, u, roll, self.VM)
np.testing.assert_almost_equal(x1, x2, decimal=3)
-
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/controls/tests/test_alerts.py b/selfdrive/controls/tests/test_alerts.py
index 7b4fba0dcee5f5..e29a6322ab5a15 100755
--- a/selfdrive/controls/tests/test_alerts.py
+++ b/selfdrive/controls/tests/test_alerts.py
@@ -2,7 +2,6 @@
import copy
import json
import os
-import unittest
import random
from PIL import Image, ImageDraw, ImageFont
@@ -25,10 +24,10 @@
ALERTS.append(alert)
-class TestAlerts(unittest.TestCase):
+class TestAlerts:
@classmethod
- def setUpClass(cls):
+ def setup_class(cls):
with open(OFFROAD_ALERTS_PATH) as f:
cls.offroad_alerts = json.loads(f.read())
@@ -45,7 +44,7 @@ def test_events_defined(self):
for name, e in events.items():
if not name.endswith("DEPRECATED"):
fail_msg = "%s @%d not in EVENTS" % (name, e)
- self.assertTrue(e in EVENTS.keys(), msg=fail_msg)
+ assert e in EVENTS.keys(), fail_msg
# ensure alert text doesn't exceed allowed width
def test_alert_text_length(self):
@@ -80,7 +79,7 @@ def test_alert_text_length(self):
left, _, right, _ = draw.textbbox((0, 0), txt, font)
width = right - left
msg = f"type: {alert.alert_type} msg: {txt}"
- self.assertLessEqual(width, max_text_width, msg=msg)
+ assert width <= max_text_width, msg
def test_alert_sanity_check(self):
for event_types in EVENTS.values():
@@ -90,21 +89,21 @@ def test_alert_sanity_check(self):
continue
if a.alert_size == AlertSize.none:
- self.assertEqual(len(a.alert_text_1), 0)
- self.assertEqual(len(a.alert_text_2), 0)
+ assert len(a.alert_text_1) == 0
+ assert len(a.alert_text_2) == 0
elif a.alert_size == AlertSize.small:
- self.assertGreater(len(a.alert_text_1), 0)
- self.assertEqual(len(a.alert_text_2), 0)
+ assert len(a.alert_text_1) > 0
+ assert len(a.alert_text_2) == 0
elif a.alert_size == AlertSize.mid:
- self.assertGreater(len(a.alert_text_1), 0)
- self.assertGreater(len(a.alert_text_2), 0)
+ assert len(a.alert_text_1) > 0
+ assert len(a.alert_text_2) > 0
else:
- self.assertGreater(len(a.alert_text_1), 0)
+ assert len(a.alert_text_1) > 0
- self.assertGreaterEqual(a.duration, 0.)
+ assert a.duration >= 0.
if event_type not in (ET.WARNING, ET.PERMANENT, ET.PRE_ENABLE):
- self.assertEqual(a.creation_delay, 0.)
+ assert a.creation_delay == 0.
def test_offroad_alerts(self):
params = Params()
@@ -113,11 +112,11 @@ def test_offroad_alerts(self):
alert = copy.copy(self.offroad_alerts[a])
set_offroad_alert(a, True)
alert['extra'] = ''
- self.assertTrue(json.dumps(alert) == params.get(a, encoding='utf8'))
+ assert json.dumps(alert) == params.get(a, encoding='utf8')
# then delete it
set_offroad_alert(a, False)
- self.assertTrue(params.get(a) is None)
+ assert params.get(a) is None
def test_offroad_alerts_extra_text(self):
params = Params()
@@ -128,8 +127,5 @@ def test_offroad_alerts_extra_text(self):
set_offroad_alert(a, True, extra_text="a"*i)
written_alert = json.loads(params.get(a, encoding='utf8'))
- self.assertTrue("a"*i == written_alert['extra'])
- self.assertTrue(alert["text"] == written_alert['text'])
-
-if __name__ == "__main__":
- unittest.main()
+ assert "a"*i == written_alert['extra']
+ assert alert["text"] == written_alert['text']
diff --git a/selfdrive/controls/tests/test_cruise_speed.py b/selfdrive/controls/tests/test_cruise_speed.py
index c46d03ad1e9897..6c46285e81fb3a 100755
--- a/selfdrive/controls/tests/test_cruise_speed.py
+++ b/selfdrive/controls/tests/test_cruise_speed.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
+import pytest
import itertools
import numpy as np
-import unittest
from parameterized import parameterized_class
from cereal import log
@@ -36,19 +36,19 @@ def run_cruise_simulation(cruise, e2e, personality, t_end=20.):
[True, False], # e2e
log.LongitudinalPersonality.schema.enumerants, # personality
[5,35])) # speed
-class TestCruiseSpeed(unittest.TestCase):
+class TestCruiseSpeed:
def test_cruise_speed(self):
print(f'Testing {self.speed} m/s')
cruise_speed = float(self.speed)
simulation_steady_state = run_cruise_simulation(cruise_speed, self.e2e, self.personality)
- self.assertAlmostEqual(simulation_steady_state, cruise_speed, delta=.01, msg=f'Did not reach {self.speed} m/s')
+ assert simulation_steady_state == pytest.approx(cruise_speed, abs=.01), f'Did not reach {self.speed} m/s'
# TODO: test pcmCruise
@parameterized_class(('pcm_cruise',), [(False,)])
-class TestVCruiseHelper(unittest.TestCase):
- def setUp(self):
+class TestVCruiseHelper:
+ def setup_method(self):
self.CP = car.CarParams(pcmCruise=self.pcm_cruise)
self.v_cruise_helper = VCruiseHelper(self.CP)
self.reset_cruise_speed_state()
@@ -75,7 +75,7 @@ def test_adjust_speed(self):
CS.buttonEvents = [ButtonEvent(type=btn, pressed=pressed)]
self.v_cruise_helper.update_v_cruise(CS, enabled=True, is_metric=False)
- self.assertEqual(pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
+ assert pressed == (self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
def test_rising_edge_enable(self):
"""
@@ -94,7 +94,7 @@ def test_rising_edge_enable(self):
self.enable(V_CRUISE_INITIAL * CV.KPH_TO_MS, False)
# Expected diff on enabling. Speed should not change on falling edge of pressed
- self.assertEqual(not pressed, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
+ assert not pressed == self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last
def test_resume_in_standstill(self):
"""
@@ -111,7 +111,7 @@ def test_resume_in_standstill(self):
# speed should only update if not at standstill and button falling edge
should_equal = standstill or pressed
- self.assertEqual(should_equal, self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
+ assert should_equal == (self.v_cruise_helper.v_cruise_kph == self.v_cruise_helper.v_cruise_kph_last)
def test_set_gas_pressed(self):
"""
@@ -135,7 +135,7 @@ def test_set_gas_pressed(self):
# TODO: fix skipping first run due to enabled on rising edge exception
if v_ego == 0.0:
continue
- self.assertEqual(expected_v_cruise_kph, self.v_cruise_helper.v_cruise_kph)
+ assert expected_v_cruise_kph == self.v_cruise_helper.v_cruise_kph
def test_initialize_v_cruise(self):
"""
@@ -145,12 +145,8 @@ def test_initialize_v_cruise(self):
for experimental_mode in (True, False):
for v_ego in np.linspace(0, 100, 101):
self.reset_cruise_speed_state()
- self.assertFalse(self.v_cruise_helper.v_cruise_initialized)
+ assert not self.v_cruise_helper.v_cruise_initialized
self.enable(float(v_ego), experimental_mode)
- self.assertTrue(V_CRUISE_INITIAL <= self.v_cruise_helper.v_cruise_kph <= V_CRUISE_MAX)
- self.assertTrue(self.v_cruise_helper.v_cruise_initialized)
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert V_CRUISE_INITIAL <= self.v_cruise_helper.v_cruise_kph <= V_CRUISE_MAX
+ assert self.v_cruise_helper.v_cruise_initialized
diff --git a/selfdrive/controls/tests/test_following_distance.py b/selfdrive/controls/tests/test_following_distance.py
index f58e6383c42c27..5d60911805ee0a 100755
--- a/selfdrive/controls/tests/test_following_distance.py
+++ b/selfdrive/controls/tests/test_following_distance.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-import unittest
+import pytest
import itertools
from parameterized import parameterized_class
@@ -32,14 +32,10 @@ def run_following_distance_simulation(v_lead, t_end=100.0, e2e=False, personalit
log.LongitudinalPersonality.standard,
log.LongitudinalPersonality.aggressive],
[0,10,35])) # speed
-class TestFollowingDistance(unittest.TestCase):
+class TestFollowingDistance():
def test_following_distance(self):
v_lead = float(self.speed)
simulation_steady_state = run_following_distance_simulation(v_lead, e2e=self.e2e, personality=self.personality)
correct_steady_state = desired_follow_distance(v_lead, v_lead, get_T_FOLLOW(self.personality))
err_ratio = 0.2 if self.e2e else 0.1
- self.assertAlmostEqual(simulation_steady_state, correct_steady_state, delta=(err_ratio * correct_steady_state + .5))
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert simulation_steady_state == pytest.approx(correct_steady_state, abs=err_ratio * correct_steady_state + .5)
diff --git a/selfdrive/controls/tests/test_lateral_mpc.py b/selfdrive/controls/tests/test_lateral_mpc.py
index 8c09f46b60c0e4..3aa0fd1bce4804 100644
--- a/selfdrive/controls/tests/test_lateral_mpc.py
+++ b/selfdrive/controls/tests/test_lateral_mpc.py
@@ -1,4 +1,4 @@
-import unittest
+import pytest
import numpy as np
from openpilot.selfdrive.controls.lib.lateral_mpc_lib.lat_mpc import LateralMpc
from openpilot.selfdrive.controls.lib.drive_helpers import CAR_ROTATION_RADIUS
@@ -27,20 +27,20 @@ def run_mpc(lat_mpc=None, v_ref=30., x_init=0., y_init=0., psi_init=0., curvatur
return lat_mpc.x_sol
-class TestLateralMpc(unittest.TestCase):
+class TestLateralMpc:
def _assert_null(self, sol, curvature=1e-6):
for i in range(len(sol)):
- self.assertAlmostEqual(sol[0,i,1], 0., delta=curvature)
- self.assertAlmostEqual(sol[0,i,2], 0., delta=curvature)
- self.assertAlmostEqual(sol[0,i,3], 0., delta=curvature)
+ assert sol[0,i,1] == pytest.approx(0, abs=curvature)
+ assert sol[0,i,2] == pytest.approx(0, abs=curvature)
+ assert sol[0,i,3] == pytest.approx(0, abs=curvature)
def _assert_simmetry(self, sol, curvature=1e-6):
for i in range(len(sol)):
- self.assertAlmostEqual(sol[0,i,1], -sol[1,i,1], delta=curvature)
- self.assertAlmostEqual(sol[0,i,2], -sol[1,i,2], delta=curvature)
- self.assertAlmostEqual(sol[0,i,3], -sol[1,i,3], delta=curvature)
- self.assertAlmostEqual(sol[0,i,0], sol[1,i,0], delta=curvature)
+ assert sol[0,i,1] == pytest.approx(-sol[1,i,1], abs=curvature)
+ assert sol[0,i,2] == pytest.approx(-sol[1,i,2], abs=curvature)
+ assert sol[0,i,3] == pytest.approx(-sol[1,i,3], abs=curvature)
+ assert sol[0,i,0] == pytest.approx(sol[1,i,0], abs=curvature)
def test_straight(self):
sol = run_mpc()
@@ -74,7 +74,7 @@ def test_no_overshoot(self):
y_init = 1.
sol = run_mpc(y_init=y_init)
for y in list(sol[:,1]):
- self.assertGreaterEqual(y_init, abs(y))
+ assert y_init >= abs(y)
def test_switch_convergence(self):
lat_mpc = LateralMpc()
@@ -83,7 +83,3 @@ def test_switch_convergence(self):
sol = run_mpc(lat_mpc=lat_mpc, poly_shift=-3.0, v_ref=7.0)
left_psi_deg = np.degrees(sol[:,2])
np.testing.assert_almost_equal(right_psi_deg, -left_psi_deg, decimal=3)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/controls/tests/test_leads.py b/selfdrive/controls/tests/test_leads.py
index a06387a0872430..f4e97725ff1069 100755
--- a/selfdrive/controls/tests/test_leads.py
+++ b/selfdrive/controls/tests/test_leads.py
@@ -1,13 +1,11 @@
#!/usr/bin/env python3
-import unittest
-
import cereal.messaging as messaging
from openpilot.selfdrive.test.process_replay import replay_process_with_name
from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA
-class TestLeads(unittest.TestCase):
+class TestLeads:
def test_radar_fault(self):
# if there's no radar-related can traffic, radard should either not respond or respond with an error
# this is tightly coupled with underlying car radar_interface implementation, but it's a good sanity check
@@ -29,8 +27,4 @@ def single_iter_pkg():
states = [m for m in out if m.which() == "radarState"]
failures = [not state.valid and len(state.radarState.radarErrors) for state in states]
- self.assertTrue(len(states) == 0 or all(failures))
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert len(states) == 0 or all(failures)
diff --git a/selfdrive/controls/tests/test_state_machine.py b/selfdrive/controls/tests/test_state_machine.py
index d49111752d2021..b92724ce4337a9 100755
--- a/selfdrive/controls/tests/test_state_machine.py
+++ b/selfdrive/controls/tests/test_state_machine.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-import unittest
from cereal import car, log
from openpilot.common.realtime import DT_CTRL
@@ -28,9 +27,9 @@ def make_event(event_types):
return 0
-class TestStateMachine(unittest.TestCase):
+class TestStateMachine:
- def setUp(self):
+ def setup_method(self):
CarInterface, CarController, CarState = interfaces[MOCK.MOCK]
CP = CarInterface.get_non_essential_params(MOCK.MOCK)
CI = CarInterface(CP, CarController, CarState)
@@ -46,7 +45,7 @@ def test_immediate_disable(self):
self.controlsd.events.add(make_event([et, ET.IMMEDIATE_DISABLE]))
self.controlsd.state = state
self.controlsd.state_transition(self.CS)
- self.assertEqual(State.disabled, self.controlsd.state)
+ assert State.disabled == self.controlsd.state
self.controlsd.events.clear()
def test_user_disable(self):
@@ -55,7 +54,7 @@ def test_user_disable(self):
self.controlsd.events.add(make_event([et, ET.USER_DISABLE]))
self.controlsd.state = state
self.controlsd.state_transition(self.CS)
- self.assertEqual(State.disabled, self.controlsd.state)
+ assert State.disabled == self.controlsd.state
self.controlsd.events.clear()
def test_soft_disable(self):
@@ -66,7 +65,7 @@ def test_soft_disable(self):
self.controlsd.events.add(make_event([et, ET.SOFT_DISABLE]))
self.controlsd.state = state
self.controlsd.state_transition(self.CS)
- self.assertEqual(self.controlsd.state, State.disabled if state == State.disabled else State.softDisabling)
+ assert self.controlsd.state == State.disabled if state == State.disabled else State.softDisabling
self.controlsd.events.clear()
def test_soft_disable_timer(self):
@@ -74,17 +73,17 @@ def test_soft_disable_timer(self):
self.controlsd.events.add(make_event([ET.SOFT_DISABLE]))
self.controlsd.state_transition(self.CS)
for _ in range(int(SOFT_DISABLE_TIME / DT_CTRL)):
- self.assertEqual(self.controlsd.state, State.softDisabling)
+ assert self.controlsd.state == State.softDisabling
self.controlsd.state_transition(self.CS)
- self.assertEqual(self.controlsd.state, State.disabled)
+ assert self.controlsd.state == State.disabled
def test_no_entry(self):
# Make sure noEntry keeps us disabled
for et in ENABLE_EVENT_TYPES:
self.controlsd.events.add(make_event([ET.NO_ENTRY, et]))
self.controlsd.state_transition(self.CS)
- self.assertEqual(self.controlsd.state, State.disabled)
+ assert self.controlsd.state == State.disabled
self.controlsd.events.clear()
def test_no_entry_pre_enable(self):
@@ -92,7 +91,7 @@ def test_no_entry_pre_enable(self):
self.controlsd.state = State.preEnabled
self.controlsd.events.add(make_event([ET.NO_ENTRY, ET.PRE_ENABLE]))
self.controlsd.state_transition(self.CS)
- self.assertEqual(self.controlsd.state, State.preEnabled)
+ assert self.controlsd.state == State.preEnabled
def test_maintain_states(self):
# Given current state's event type, we should maintain state
@@ -101,9 +100,5 @@ def test_maintain_states(self):
self.controlsd.state = state
self.controlsd.events.add(make_event([et]))
self.controlsd.state_transition(self.CS)
- self.assertEqual(self.controlsd.state, state)
+ assert self.controlsd.state == state
self.controlsd.events.clear()
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/locationd/test/test_calibrationd.py b/selfdrive/locationd/test/test_calibrationd.py
index e2db0943978e25..598d5d2d5f882e 100755
--- a/selfdrive/locationd/test/test_calibrationd.py
+++ b/selfdrive/locationd/test/test_calibrationd.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
import random
-import unittest
import numpy as np
@@ -31,7 +30,7 @@ def process_messages(c, cam_odo_calib, cycles,
[0.0, 0.0, HEIGHT_INIT.item()],
[cam_odo_height_std, cam_odo_height_std, cam_odo_height_std])
-class TestCalibrationd(unittest.TestCase):
+class TestCalibrationd:
def test_read_saved_params(self):
msg = messaging.new_message('liveCalibration')
@@ -43,13 +42,13 @@ def test_read_saved_params(self):
np.testing.assert_allclose(msg.liveCalibration.rpyCalib, c.rpy)
np.testing.assert_allclose(msg.liveCalibration.height, c.height)
- self.assertEqual(msg.liveCalibration.validBlocks, c.valid_blocks)
+ assert msg.liveCalibration.validBlocks == c.valid_blocks
def test_calibration_basics(self):
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED)
- self.assertEqual(c.valid_blocks, INPUTS_WANTED)
+ assert c.valid_blocks == INPUTS_WANTED
np.testing.assert_allclose(c.rpy, np.zeros(3))
np.testing.assert_allclose(c.height, HEIGHT_INIT)
c.reset()
@@ -59,7 +58,7 @@ def test_calibration_low_speed_reject(self):
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed=MIN_SPEED_FILTER - 1)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, carstate_speed=MIN_SPEED_FILTER - 1)
- self.assertEqual(c.valid_blocks, 0)
+ assert c.valid_blocks == 0
np.testing.assert_allclose(c.rpy, np.zeros(3))
np.testing.assert_allclose(c.height, HEIGHT_INIT)
@@ -67,7 +66,7 @@ def test_calibration_low_speed_reject(self):
def test_calibration_yaw_rate_reject(self):
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_yr=MAX_YAW_RATE_FILTER)
- self.assertEqual(c.valid_blocks, 0)
+ assert c.valid_blocks == 0
np.testing.assert_allclose(c.rpy, np.zeros(3))
np.testing.assert_allclose(c.height, HEIGHT_INIT)
@@ -75,43 +74,40 @@ def test_calibration_yaw_rate_reject(self):
def test_calibration_speed_std_reject(self):
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_speed_std=1e3)
- self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
+ assert c.valid_blocks == INPUTS_NEEDED
np.testing.assert_allclose(c.rpy, np.zeros(3))
def test_calibration_speed_std_height_reject(self):
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_WANTED, cam_odo_height_std=1e3)
- self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
+ assert c.valid_blocks == INPUTS_NEEDED
np.testing.assert_allclose(c.rpy, np.zeros(3))
def test_calibration_auto_reset(self):
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
- self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
+ assert c.valid_blocks == INPUTS_NEEDED
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0], atol=1e-3)
process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*0.9, MAX_ALLOWED_YAW_SPREAD*0.9], BLOCK_SIZE + 10)
- self.assertEqual(c.valid_blocks, INPUTS_NEEDED + 1)
- self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.calibrated)
+ assert c.valid_blocks == INPUTS_NEEDED + 1
+ assert c.cal_status == log.LiveCalibrationData.Status.calibrated
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
- self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
+ assert c.valid_blocks == INPUTS_NEEDED
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0])
process_messages(c, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], BLOCK_SIZE + 10)
- self.assertEqual(c.valid_blocks, 1)
- self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating)
+ assert c.valid_blocks == 1
+ assert c.cal_status == log.LiveCalibrationData.Status.recalibrating
np.testing.assert_allclose(c.rpy, [0.0, MAX_ALLOWED_PITCH_SPREAD*1.1, 0.0], atol=1e-2)
c = Calibrator(param_put=False)
process_messages(c, [0.0, 0.0, 0.0], BLOCK_SIZE * INPUTS_NEEDED)
- self.assertEqual(c.valid_blocks, INPUTS_NEEDED)
+ assert c.valid_blocks == INPUTS_NEEDED
np.testing.assert_allclose(c.rpy, [0.0, 0.0, 0.0])
process_messages(c, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], BLOCK_SIZE + 10)
- self.assertEqual(c.valid_blocks, 1)
- self.assertEqual(c.cal_status, log.LiveCalibrationData.Status.recalibrating)
+ assert c.valid_blocks == 1
+ assert c.cal_status == log.LiveCalibrationData.Status.recalibrating
np.testing.assert_allclose(c.rpy, [0.0, 0.0, MAX_ALLOWED_YAW_SPREAD*1.1], atol=1e-2)
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/locationd/test/test_locationd.py b/selfdrive/locationd/test/test_locationd.py
index cd032dbaf058fb..bac824badaa9e2 100755
--- a/selfdrive/locationd/test/test_locationd.py
+++ b/selfdrive/locationd/test/test_locationd.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
+import pytest
import json
import random
-import unittest
import time
import capnp
@@ -13,13 +13,11 @@
from openpilot.selfdrive.manager.process_config import managed_processes
-class TestLocationdProc(unittest.TestCase):
+class TestLocationdProc:
LLD_MSGS = ['gpsLocationExternal', 'cameraOdometry', 'carState', 'liveCalibration',
'accelerometer', 'gyroscope', 'magnetometer']
- def setUp(self):
- random.seed(123489234)
-
+ def setup_method(self):
self.pm = messaging.PubMaster(self.LLD_MSGS)
self.params = Params()
@@ -27,7 +25,7 @@ def setUp(self):
managed_processes['locationd'].prepare()
managed_processes['locationd'].start()
- def tearDown(self):
+ def teardown_method(self):
managed_processes['locationd'].stop()
def get_msg(self, name, t):
@@ -65,6 +63,7 @@ def get_msg(self, name, t):
return msg
def test_params_gps(self):
+ random.seed(123489234)
self.params.remove('LastGPSPosition')
self.x = -2710700 + (random.random() * 1e5)
@@ -86,10 +85,6 @@ def test_params_gps(self):
time.sleep(1) # wait for async params write
lastGPS = json.loads(self.params.get('LastGPSPosition'))
- self.assertAlmostEqual(lastGPS['latitude'], self.lat, places=3)
- self.assertAlmostEqual(lastGPS['longitude'], self.lon, places=3)
- self.assertAlmostEqual(lastGPS['altitude'], self.alt, places=3)
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert lastGPS['latitude'] == pytest.approx(self.lat, abs=0.001)
+ assert lastGPS['longitude'] == pytest.approx(self.lon, abs=0.001)
+ assert lastGPS['altitude'] == pytest.approx(self.alt, abs=0.001)
diff --git a/selfdrive/locationd/test/test_locationd_scenarios.py b/selfdrive/locationd/test/test_locationd_scenarios.py
index 3fdd47275f2864..be95c6fffb78e4 100755
--- a/selfdrive/locationd/test/test_locationd_scenarios.py
+++ b/selfdrive/locationd/test/test_locationd_scenarios.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
import pytest
-import unittest
import numpy as np
from collections import defaultdict
from enum import Enum
@@ -99,7 +98,7 @@ def run_scenarios(scenario, logs):
@pytest.mark.xdist_group("test_locationd_scenarios")
@pytest.mark.shared_download_cache
-class TestLocationdScenarios(unittest.TestCase):
+class TestLocationdScenarios:
"""
Test locationd with different scenarios. In all these scenarios, we expect the following:
- locationd kalman filter should never go unstable (we care mostly about yaw_rate, roll, gpsOK, inputsOK, sensorsOK)
@@ -107,7 +106,7 @@ class TestLocationdScenarios(unittest.TestCase):
"""
@classmethod
- def setUpClass(cls):
+ def setup_class(cls):
cls.logs = migrate_all(LogReader(TEST_ROUTE))
def test_base(self):
@@ -118,8 +117,8 @@ def test_base(self):
- roll: unchanged
"""
orig_data, replayed_data = run_scenarios(Scenario.BASE, self.logs)
- self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
- self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
+ assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))
+ assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))
def test_gps_off(self):
"""
@@ -130,9 +129,9 @@ def test_gps_off(self):
- gpsOK: False
"""
orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF, self.logs)
- self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
- self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
- self.assertTrue(np.all(replayed_data['gps_flag'] == 0.0))
+ assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))
+ assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))
+ assert np.all(replayed_data['gps_flag'] == 0.0)
def test_gps_off_midway(self):
"""
@@ -143,9 +142,9 @@ def test_gps_off_midway(self):
- gpsOK: True for the first half, False for the second half
"""
orig_data, replayed_data = run_scenarios(Scenario.GPS_OFF_MIDWAY, self.logs)
- self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
- self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
- self.assertTrue(np.diff(replayed_data['gps_flag'])[512] == -1.0)
+ assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))
+ assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))
+ assert np.diff(replayed_data['gps_flag'])[512] == -1.0
def test_gps_on_midway(self):
"""
@@ -156,9 +155,9 @@ def test_gps_on_midway(self):
- gpsOK: False for the first half, True for the second half
"""
orig_data, replayed_data = run_scenarios(Scenario.GPS_ON_MIDWAY, self.logs)
- self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
- self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(1.5)))
- self.assertTrue(np.diff(replayed_data['gps_flag'])[505] == 1.0)
+ assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))
+ assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(1.5))
+ assert np.diff(replayed_data['gps_flag'])[505] == 1.0
def test_gps_tunnel(self):
"""
@@ -169,10 +168,10 @@ def test_gps_tunnel(self):
- gpsOK: False for the middle section, True for the rest
"""
orig_data, replayed_data = run_scenarios(Scenario.GPS_TUNNEL, self.logs)
- self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
- self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
- self.assertTrue(np.diff(replayed_data['gps_flag'])[213] == -1.0)
- self.assertTrue(np.diff(replayed_data['gps_flag'])[805] == 1.0)
+ assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))
+ assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))
+ assert np.diff(replayed_data['gps_flag'])[213] == -1.0
+ assert np.diff(replayed_data['gps_flag'])[805] == 1.0
def test_gyro_off(self):
"""
@@ -183,9 +182,9 @@ def test_gyro_off(self):
- sensorsOK: False
"""
_, replayed_data = run_scenarios(Scenario.GYRO_OFF, self.logs)
- self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0))
- self.assertTrue(np.allclose(replayed_data['roll'], 0.0))
- self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0))
+ assert np.allclose(replayed_data['yaw_rate'], 0.0)
+ assert np.allclose(replayed_data['roll'], 0.0)
+ assert np.all(replayed_data['sensors_flag'] == 0.0)
def test_gyro_spikes(self):
"""
@@ -196,10 +195,10 @@ def test_gyro_spikes(self):
- inputsOK: False for some time after the spike, True for the rest
"""
orig_data, replayed_data = run_scenarios(Scenario.GYRO_SPIKE_MIDWAY, self.logs)
- self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
- self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
- self.assertTrue(np.diff(replayed_data['inputs_flag'])[500] == -1.0)
- self.assertTrue(np.diff(replayed_data['inputs_flag'])[694] == 1.0)
+ assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))
+ assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))
+ assert np.diff(replayed_data['inputs_flag'])[500] == -1.0
+ assert np.diff(replayed_data['inputs_flag'])[694] == 1.0
def test_accel_off(self):
"""
@@ -210,9 +209,9 @@ def test_accel_off(self):
- sensorsOK: False
"""
_, replayed_data = run_scenarios(Scenario.ACCEL_OFF, self.logs)
- self.assertTrue(np.allclose(replayed_data['yaw_rate'], 0.0))
- self.assertTrue(np.allclose(replayed_data['roll'], 0.0))
- self.assertTrue(np.all(replayed_data['sensors_flag'] == 0.0))
+ assert np.allclose(replayed_data['yaw_rate'], 0.0)
+ assert np.allclose(replayed_data['roll'], 0.0)
+ assert np.all(replayed_data['sensors_flag'] == 0.0)
def test_accel_spikes(self):
"""
@@ -221,9 +220,5 @@ def test_accel_spikes(self):
Expected Result: Right now, the kalman filter is not robust to small spikes like it is to gyroscope spikes.
"""
orig_data, replayed_data = run_scenarios(Scenario.ACCEL_SPIKE_MIDWAY, self.logs)
- self.assertTrue(np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2)))
- self.assertTrue(np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5)))
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert np.allclose(orig_data['yaw_rate'], replayed_data['yaw_rate'], atol=np.radians(0.2))
+ assert np.allclose(orig_data['roll'], replayed_data['roll'], atol=np.radians(0.5))
diff --git a/selfdrive/manager/test/test_manager.py b/selfdrive/manager/test/test_manager.py
index 1ae94b26a146f1..4cdc99c240b885 100755
--- a/selfdrive/manager/test/test_manager.py
+++ b/selfdrive/manager/test/test_manager.py
@@ -3,7 +3,6 @@
import pytest
import signal
import time
-import unittest
from parameterized import parameterized
@@ -21,15 +20,15 @@
@pytest.mark.tici
-class TestManager(unittest.TestCase):
- def setUp(self):
+class TestManager:
+ def setup_method(self):
HARDWARE.set_power_save(False)
# ensure clean CarParams
params = Params()
params.clear_all()
- def tearDown(self):
+ def teardown_method(self):
manager.manager_cleanup()
def test_manager_prepare(self):
@@ -38,7 +37,7 @@ def test_manager_prepare(self):
def test_blacklisted_procs(self):
# TODO: ensure there are blacklisted procs until we have a dedicated test
- self.assertTrue(len(BLACKLIST_PROCS), "No blacklisted procs to test not_run")
+ assert len(BLACKLIST_PROCS), "No blacklisted procs to test not_run"
@parameterized.expand([(i,) for i in range(10)])
def test_startup_time(self, index):
@@ -48,8 +47,8 @@ def test_startup_time(self, index):
t = time.monotonic() - start
assert t < MAX_STARTUP_TIME, f"startup took {t}s, expected <{MAX_STARTUP_TIME}s"
- @unittest.skip("this test is flaky the way it's currently written, should be moved to test_onroad")
- def test_clean_exit(self):
+ @pytest.mark.skip("this test is flaky the way it's currently written, should be moved to test_onroad")
+ def test_clean_exit(self, subtests):
"""
Ensure all processes exit cleanly when stopped.
"""
@@ -62,21 +61,17 @@ def test_clean_exit(self):
time.sleep(10)
for p in procs:
- with self.subTest(proc=p.name):
+ with subtests.test(proc=p.name):
state = p.get_process_state_msg()
- self.assertTrue(state.running, f"{p.name} not running")
+ assert state.running, f"{p.name} not running"
exit_code = p.stop(retry=False)
- self.assertNotIn(p.name, BLACKLIST_PROCS, f"{p.name} was started")
+ assert p.name not in BLACKLIST_PROCS, f"{p.name} was started"
- self.assertTrue(exit_code is not None, f"{p.name} failed to exit")
+ assert exit_code is not None, f"{p.name} failed to exit"
# TODO: interrupted blocking read exits with 1 in cereal. use a more unique return code
exit_codes = [0, 1]
if p.sigkill:
exit_codes = [-signal.SIGKILL]
- self.assertIn(exit_code, exit_codes, f"{p.name} died with {exit_code}")
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert exit_code in exit_codes, f"{p.name} died with {exit_code}"
diff --git a/selfdrive/modeld/models/dmonitoring_model.current b/selfdrive/modeld/models/dmonitoring_model.current
index ed495df3a85e57..f935ab06b34e27 100644
--- a/selfdrive/modeld/models/dmonitoring_model.current
+++ b/selfdrive/modeld/models/dmonitoring_model.current
@@ -1,2 +1,2 @@
-60abed33-9f25-4e34-9937-aaf918d41dfc
-50887963963a3022e85ac0c94b0801aef955a608
\ No newline at end of file
+5ec97a39-0095-4cea-adfa-6d72b1966cc1
+26cac7a9757a27c783a365403040a1bd27ccdaea
\ No newline at end of file
diff --git a/selfdrive/modeld/models/dmonitoring_model.onnx b/selfdrive/modeld/models/dmonitoring_model.onnx
index 3e94e7d36a5668..dd3a6aba2e756e 100644
--- a/selfdrive/modeld/models/dmonitoring_model.onnx
+++ b/selfdrive/modeld/models/dmonitoring_model.onnx
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:9a667cbde6eca86c5b653f57853e3d33b9a875bceb557e193d1bef78c2df9c37
-size 16132779
+oid sha256:3dd3982940d823c4fbb0429b733a0b78b0688d7d67aa76ff7b754a3e2f3d8683
+size 16132780
diff --git a/selfdrive/modeld/models/dmonitoring_model_q.dlc b/selfdrive/modeld/models/dmonitoring_model_q.dlc
index 041949ad9476df..fc285954d4aa61 100644
--- a/selfdrive/modeld/models/dmonitoring_model_q.dlc
+++ b/selfdrive/modeld/models/dmonitoring_model_q.dlc
@@ -1,3 +1,3 @@
version https://git-lfs.github.com/spec/v1
-oid sha256:f5729c457dd47f6c35e2a818eae14113526fd0bfac6417a809cbaf9d38697d9b
-size 4488413
+oid sha256:7c26f13816b143f5bb29ac2980f8557bd5687a75729e4d895313fb9a5a1f0f46
+size 4488449
diff --git a/selfdrive/modeld/tests/test_modeld.py b/selfdrive/modeld/tests/test_modeld.py
index 67c6f7103804c4..a18ce8fa424ea9 100755
--- a/selfdrive/modeld/tests/test_modeld.py
+++ b/selfdrive/modeld/tests/test_modeld.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-import unittest
import numpy as np
import random
@@ -16,9 +15,9 @@
IMG_BYTES = IMG.flatten().tobytes()
-class TestModeld(unittest.TestCase):
+class TestModeld:
- def setUp(self):
+ def setup_method(self):
self.vipc_server = VisionIpcServer("camerad")
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_ROAD, 40, False, CAM.width, CAM.height)
self.vipc_server.create_buffers(VisionStreamType.VISION_STREAM_DRIVER, 40, False, CAM.width, CAM.height)
@@ -32,7 +31,7 @@ def setUp(self):
managed_processes['modeld'].start()
self.pm.wait_for_readers_to_update("roadCameraState", 10)
- def tearDown(self):
+ def teardown_method(self):
managed_processes['modeld'].stop()
del self.vipc_server
@@ -65,15 +64,15 @@ def test_modeld(self):
self._wait()
mdl = self.sm['modelV2']
- self.assertEqual(mdl.frameId, n)
- self.assertEqual(mdl.frameIdExtra, n)
- self.assertEqual(mdl.timestampEof, cs.timestampEof)
- self.assertEqual(mdl.frameAge, 0)
- self.assertEqual(mdl.frameDropPerc, 0)
+ assert mdl.frameId == n
+ assert mdl.frameIdExtra == n
+ assert mdl.timestampEof == cs.timestampEof
+ assert mdl.frameAge == 0
+ assert mdl.frameDropPerc == 0
odo = self.sm['cameraOdometry']
- self.assertEqual(odo.frameId, n)
- self.assertEqual(odo.timestampEof, cs.timestampEof)
+ assert odo.frameId == n
+ assert odo.timestampEof == cs.timestampEof
def test_dropped_frames(self):
"""
@@ -95,13 +94,9 @@ def test_dropped_frames(self):
mdl = self.sm['modelV2']
odo = self.sm['cameraOdometry']
- self.assertEqual(mdl.frameId, frame_id)
- self.assertEqual(mdl.frameIdExtra, frame_id)
- self.assertEqual(odo.frameId, frame_id)
+ assert mdl.frameId == frame_id
+ assert mdl.frameIdExtra == frame_id
+ assert odo.frameId == frame_id
if n != frame_id:
- self.assertFalse(self.sm.updated['modelV2'])
- self.assertFalse(self.sm.updated['cameraOdometry'])
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert not self.sm.updated['modelV2']
+ assert not self.sm.updated['cameraOdometry']
diff --git a/selfdrive/monitoring/dmonitoringd.py b/selfdrive/monitoring/dmonitoringd.py
index c18bbf29b991f3..91a4932584ef4e 100755
--- a/selfdrive/monitoring/dmonitoringd.py
+++ b/selfdrive/monitoring/dmonitoringd.py
@@ -19,7 +19,6 @@ def dmonitoringd_thread():
driver_status = DriverStatus(rhd_saved=params.get_bool("IsRhdDetected"), always_on=params.get_bool("AlwaysOnDM"))
- v_cruise_last = 0
driver_engaged = False
# 20Hz <- dmonitoringmodeld
@@ -30,12 +29,8 @@ def dmonitoringd_thread():
# Get interaction
if sm.updated['carState']:
- v_cruise = sm['carState'].cruiseState.speed
- driver_engaged = len(sm['carState'].buttonEvents) > 0 or \
- v_cruise != v_cruise_last or \
- sm['carState'].steeringPressed or \
- sm['carState'].gasPressed
- v_cruise_last = v_cruise
+ driver_engaged = sm['carState'].steeringPressed or \
+ sm['carState'].gasPressed
if sm.updated['modelV2']:
driver_status.set_policy(sm['modelV2'], sm['carState'].vEgo)
diff --git a/selfdrive/monitoring/driver_monitor.py b/selfdrive/monitoring/driver_monitor.py
index 7db0bd6c9f7d53..9faa5f4d05d1b9 100644
--- a/selfdrive/monitoring/driver_monitor.py
+++ b/selfdrive/monitoring/driver_monitor.py
@@ -31,8 +31,8 @@ def __init__(self):
self._SG_THRESHOLD = 0.9
self._BLINK_THRESHOLD = 0.865
- self._EE_THRESH11 = 0.241
- self._EE_THRESH12 = 4.7
+ self._EE_THRESH11 = 0.25
+ self._EE_THRESH12 = 7.5
self._EE_MAX_OFFSET1 = 0.06
self._EE_MIN_OFFSET1 = 0.025
self._EE_THRESH21 = 0.01
diff --git a/selfdrive/monitoring/test_monitoring.py b/selfdrive/monitoring/test_monitoring.py
index 50b2746e2dd66f..9395960b659a28 100755
--- a/selfdrive/monitoring/test_monitoring.py
+++ b/selfdrive/monitoring/test_monitoring.py
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
-import unittest
import numpy as np
from cereal import car, log
@@ -53,7 +52,7 @@ def make_msg(face_detected, distracted=False, model_uncertain=False):
always_false = [False] * int(TEST_TIMESPAN / DT_DMON)
# TODO: this only tests DriverStatus
-class TestMonitoring(unittest.TestCase):
+class TestMonitoring:
def _run_seq(self, msgs, interaction, engaged, standstill):
DS = DriverStatus()
events = []
@@ -69,7 +68,7 @@ def _run_seq(self, msgs, interaction, engaged, standstill):
return events, DS
def _assert_no_events(self, events):
- self.assertTrue(all(not len(e) for e in events))
+ assert all(not len(e) for e in events)
# engaged, driver is attentive all the time
def test_fully_aware_driver(self):
@@ -79,27 +78,27 @@ def test_fully_aware_driver(self):
# engaged, driver is distracted and does nothing
def test_fully_distracted_driver(self):
events, d_status = self._run_seq(always_distracted, always_false, always_true, always_false)
- self.assertEqual(len(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]), 0)
- self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL +
- ((d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0],
- EventName.preDriverDistracted)
- self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL +
- ((d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], EventName.promptDriverDistracted)
- self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME +
- ((TEST_TIMESPAN-10-d_status.settings._DISTRACTED_TIME)/2))/DT_DMON)].names[0], EventName.driverDistracted)
- self.assertIs(type(d_status.awareness), float)
+ assert len(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]) == 0
+ assert events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL + \
+ ((d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == \
+ EventName.preDriverDistracted
+ assert events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL + \
+ ((d_status.settings._DISTRACTED_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == EventName.promptDriverDistracted
+ assert events[int((d_status.settings._DISTRACTED_TIME + \
+ ((TEST_TIMESPAN-10-d_status.settings._DISTRACTED_TIME)/2))/DT_DMON)].names[0] == EventName.driverDistracted
+ assert isinstance(d_status.awareness, float)
# engaged, no face detected the whole time, no action
def test_fully_invisible_driver(self):
events, d_status = self._run_seq(always_no_face, always_false, always_true, always_false)
- self.assertTrue(len(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]) == 0)
- self.assertEqual(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL +
- ((d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0],
- EventName.preDriverUnresponsive)
- self.assertEqual(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL +
- ((d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
- self.assertEqual(events[int((d_status.settings._AWARENESS_TIME +
- ((TEST_TIMESPAN-10-d_status.settings._AWARENESS_TIME)/2))/DT_DMON)].names[0], EventName.driverUnresponsive)
+ assert len(events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL)/2/DT_DMON)]) == 0
+ assert events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL + \
+ ((d_status.settings._AWARENESS_PRE_TIME_TILL_TERMINAL-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == \
+ EventName.preDriverUnresponsive
+ assert events[int((d_status.settings._AWARENESS_TIME-d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL + \
+ ((d_status.settings._AWARENESS_PROMPT_TIME_TILL_TERMINAL)/2))/DT_DMON)].names[0] == EventName.promptDriverUnresponsive
+ assert events[int((d_status.settings._AWARENESS_TIME + \
+ ((TEST_TIMESPAN-10-d_status.settings._AWARENESS_TIME)/2))/DT_DMON)].names[0] == EventName.driverUnresponsive
# engaged, down to orange, driver pays attention, back to normal; then down to orange, driver touches wheel
# - should have short orange recovery time and no green afterwards; wheel touch only recovers when paying attention
@@ -111,12 +110,12 @@ def test_normal_driver(self):
interaction_vector = [car_interaction_NOT_DETECTED] * int(DISTRACTED_SECONDS_TO_ORANGE*3/DT_DMON) + \
[car_interaction_DETECTED] * (int(TEST_TIMESPAN/DT_DMON)-int(DISTRACTED_SECONDS_TO_ORANGE*3/DT_DMON))
events, _ = self._run_seq(ds_vector, interaction_vector, always_true, always_false)
- self.assertEqual(len(events[int(DISTRACTED_SECONDS_TO_ORANGE*0.5/DT_DMON)]), 0)
- self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted)
- self.assertEqual(len(events[int(DISTRACTED_SECONDS_TO_ORANGE*1.5/DT_DMON)]), 0)
- self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE*3-0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted)
- self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+0.1)/DT_DMON)].names[0], EventName.promptDriverDistracted)
- self.assertEqual(len(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+2.5)/DT_DMON)]), 0)
+ assert len(events[int(DISTRACTED_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0
+ assert events[int((DISTRACTED_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0] == EventName.promptDriverDistracted
+ assert len(events[int(DISTRACTED_SECONDS_TO_ORANGE*1.5/DT_DMON)]) == 0
+ assert events[int((DISTRACTED_SECONDS_TO_ORANGE*3-0.1)/DT_DMON)].names[0] == EventName.promptDriverDistracted
+ assert events[int((DISTRACTED_SECONDS_TO_ORANGE*3+0.1)/DT_DMON)].names[0] == EventName.promptDriverDistracted
+ assert len(events[int((DISTRACTED_SECONDS_TO_ORANGE*3+2.5)/DT_DMON)]) == 0
# engaged, down to orange, driver dodges camera, then comes back still distracted, down to red, \
# driver dodges, and then touches wheel to no avail, disengages and reengages
@@ -135,10 +134,10 @@ def test_biggest_comma_fan(self):
op_vector[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+2.5)/DT_DMON):int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3)/DT_DMON)] \
= [False] * int(0.5/DT_DMON)
events, _ = self._run_seq(ds_vector, interaction_vector, op_vector, always_false)
- self.assertEqual(events[int((DISTRACTED_SECONDS_TO_ORANGE+0.5*_invisible_time)/DT_DMON)].names[0], EventName.promptDriverDistracted)
- self.assertEqual(events[int((DISTRACTED_SECONDS_TO_RED+1.5*_invisible_time)/DT_DMON)].names[0], EventName.driverDistracted)
- self.assertEqual(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+1.5)/DT_DMON)].names[0], EventName.driverDistracted)
- self.assertTrue(len(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3.5)/DT_DMON)]) == 0)
+ assert events[int((DISTRACTED_SECONDS_TO_ORANGE+0.5*_invisible_time)/DT_DMON)].names[0] == EventName.promptDriverDistracted
+ assert events[int((DISTRACTED_SECONDS_TO_RED+1.5*_invisible_time)/DT_DMON)].names[0] == EventName.driverDistracted
+ assert events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+1.5)/DT_DMON)].names[0] == EventName.driverDistracted
+ assert len(events[int((DISTRACTED_SECONDS_TO_RED+2*_invisible_time+3.5)/DT_DMON)]) == 0
# engaged, invisible driver, down to orange, driver touches wheel; then down to orange again, driver appears
# - both actions should clear the alert, but momentary appearance should not
@@ -150,15 +149,15 @@ def test_sometimes_transparent_commuter(self):
[msg_ATTENTIVE] * int(_visible_time/DT_DMON)
interaction_vector[int((INVISIBLE_SECONDS_TO_ORANGE)/DT_DMON):int((INVISIBLE_SECONDS_TO_ORANGE+1)/DT_DMON)] = [True] * int(1/DT_DMON)
events, _ = self._run_seq(ds_vector, interaction_vector, 2*always_true, 2*always_false)
- self.assertTrue(len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0)
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
- self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_ORANGE+0.1)/DT_DMON)]) == 0)
+ assert len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0
+ assert events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive
+ assert len(events[int((INVISIBLE_SECONDS_TO_ORANGE+0.1)/DT_DMON)]) == 0
if _visible_time == 0.5:
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)].names[0], EventName.preDriverUnresponsive)
+ assert events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive
+ assert events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)].names[0] == EventName.preDriverUnresponsive
elif _visible_time == 10:
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
- self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)]) == 0)
+ assert events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive
+ assert len(events[int((INVISIBLE_SECONDS_TO_ORANGE*2+1+0.1+_visible_time)/DT_DMON)]) == 0
# engaged, invisible driver, down to red, driver appears and then touches wheel, then disengages/reengages
# - only disengage will clear the alert
@@ -171,18 +170,18 @@ def test_last_second_responder(self):
interaction_vector[int((INVISIBLE_SECONDS_TO_RED+_visible_time)/DT_DMON):int((INVISIBLE_SECONDS_TO_RED+_visible_time+1)/DT_DMON)] = [True] * int(1/DT_DMON)
op_vector[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1)/DT_DMON):int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)] = [False] * int(0.5/DT_DMON)
events, _ = self._run_seq(ds_vector, interaction_vector, op_vector, always_false)
- self.assertTrue(len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0)
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0], EventName.promptDriverUnresponsive)
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED-0.1)/DT_DMON)].names[0], EventName.driverUnresponsive)
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED+0.5*_visible_time)/DT_DMON)].names[0], EventName.driverUnresponsive)
- self.assertEqual(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)].names[0], EventName.driverUnresponsive)
- self.assertTrue(len(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1+0.1)/DT_DMON)]) == 0)
+ assert len(events[int(INVISIBLE_SECONDS_TO_ORANGE*0.5/DT_DMON)]) == 0
+ assert events[int((INVISIBLE_SECONDS_TO_ORANGE-0.1)/DT_DMON)].names[0] == EventName.promptDriverUnresponsive
+ assert events[int((INVISIBLE_SECONDS_TO_RED-0.1)/DT_DMON)].names[0] == EventName.driverUnresponsive
+ assert events[int((INVISIBLE_SECONDS_TO_RED+0.5*_visible_time)/DT_DMON)].names[0] == EventName.driverUnresponsive
+ assert events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+0.5)/DT_DMON)].names[0] == EventName.driverUnresponsive
+ assert len(events[int((INVISIBLE_SECONDS_TO_RED+_visible_time+1+0.1)/DT_DMON)]) == 0
# disengaged, always distracted driver
# - dm should stay quiet when not engaged
def test_pure_dashcam_user(self):
events, _ = self._run_seq(always_distracted, always_false, always_false, always_false)
- self.assertTrue(sum(len(event) for event in events) == 0)
+ assert sum(len(event) for event in events) == 0
# engaged, car stops at traffic light, down to orange, no action, then car starts moving
# - should only reach green when stopped, but continues counting down on launch
@@ -191,10 +190,10 @@ def test_long_traffic_light_victim(self):
standstill_vector = always_true[:]
standstill_vector[int(_redlight_time/DT_DMON):] = [False] * int((TEST_TIMESPAN-_redlight_time)/DT_DMON)
events, d_status = self._run_seq(always_distracted, always_false, always_true, standstill_vector)
- self.assertEqual(events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL+1)/DT_DMON)].names[0],
- EventName.preDriverDistracted)
- self.assertEqual(events[int((_redlight_time-0.1)/DT_DMON)].names[0], EventName.preDriverDistracted)
- self.assertEqual(events[int((_redlight_time+0.5)/DT_DMON)].names[0], EventName.promptDriverDistracted)
+ assert events[int((d_status.settings._DISTRACTED_TIME-d_status.settings._DISTRACTED_PRE_TIME_TILL_TERMINAL+1)/DT_DMON)].names[0] == \
+ EventName.preDriverDistracted
+ assert events[int((_redlight_time-0.1)/DT_DMON)].names[0] == EventName.preDriverDistracted
+ assert events[int((_redlight_time+0.5)/DT_DMON)].names[0] == EventName.promptDriverDistracted
# engaged, model is somehow uncertain and driver is distracted
# - should fall back to wheel touch after uncertain alert
@@ -202,13 +201,10 @@ def test_somehow_indecisive_model(self):
ds_vector = [msg_DISTRACTED_BUT_SOMEHOW_UNCERTAIN] * int(TEST_TIMESPAN/DT_DMON)
interaction_vector = always_false[:]
events, d_status = self._run_seq(ds_vector, interaction_vector, always_true, always_false)
- self.assertTrue(EventName.preDriverUnresponsive in
- events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME-0.1)/DT_DMON)].names)
- self.assertTrue(EventName.promptDriverUnresponsive in
- events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names)
- self.assertTrue(EventName.driverUnresponsive in
- events[int((INVISIBLE_SECONDS_TO_RED-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names)
+ assert EventName.preDriverUnresponsive in \
+ events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME-0.1)/DT_DMON)].names
+ assert EventName.promptDriverUnresponsive in \
+ events[int((INVISIBLE_SECONDS_TO_ORANGE-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names
+ assert EventName.driverUnresponsive in \
+ events[int((INVISIBLE_SECONDS_TO_RED-1+DT_DMON*d_status.settings._HI_STD_FALLBACK_TIME+0.1)/DT_DMON)].names
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/navd/tests/test_map_renderer.py b/selfdrive/navd/tests/test_map_renderer.py
index 832e0d1eab454a..52b594a57e8f77 100755
--- a/selfdrive/navd/tests/test_map_renderer.py
+++ b/selfdrive/navd/tests/test_map_renderer.py
@@ -3,7 +3,6 @@
import numpy as np
import os
import pytest
-import unittest
import requests
import threading
import http.server
@@ -66,11 +65,11 @@ def enable_internet(self):
@pytest.mark.skip(reason="not used")
-class TestMapRenderer(unittest.TestCase):
+class TestMapRenderer:
server: MapBoxInternetDisabledServer
@classmethod
- def setUpClass(cls):
+ def setup_class(cls):
assert "MAPBOX_TOKEN" in os.environ
cls.original_token = os.environ["MAPBOX_TOKEN"]
cls.server = MapBoxInternetDisabledServer()
@@ -78,10 +77,10 @@ def setUpClass(cls):
time.sleep(0.5) # wait for server to startup
@classmethod
- def tearDownClass(cls) -> None:
+ def teardown_class(cls) -> None:
cls.server.stop()
- def setUp(self):
+ def setup_method(self):
self.server.enable_internet()
os.environ['MAPS_HOST'] = f'http://localhost:{self.server.port}'
@@ -203,15 +202,12 @@ def test_render_time_distribution(self):
def assert_stat(stat, nominal, tol=0.3):
tol = (nominal / (1+tol)), (nominal * (1+tol))
- self.assertTrue(tol[0] < stat < tol[1], f"{stat} not in tolerance {tol}")
+ assert tol[0] < stat < tol[1], f"{stat} not in tolerance {tol}"
assert_stat(_mean, 0.030)
assert_stat(_median, 0.027)
assert_stat(_stddev, 0.0078)
- self.assertLess(_max, 0.065)
- self.assertGreater(_min, 0.015)
+ assert _max < 0.065
+ assert _min > 0.015
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/navd/tests/test_navd.py b/selfdrive/navd/tests/test_navd.py
index 61be6cc387f75d..07f93036535a9d 100755
--- a/selfdrive/navd/tests/test_navd.py
+++ b/selfdrive/navd/tests/test_navd.py
@@ -1,7 +1,6 @@
#!/usr/bin/env python3
import json
import random
-import unittest
import numpy as np
from parameterized import parameterized
@@ -11,12 +10,12 @@
from openpilot.selfdrive.manager.process_config import managed_processes
-class TestNavd(unittest.TestCase):
- def setUp(self):
+class TestNavd:
+ def setup_method(self):
self.params = Params()
self.sm = messaging.SubMaster(['navRoute', 'navInstruction'])
- def tearDown(self):
+ def teardown_method(self):
managed_processes['navd'].stop()
def _check_route(self, start, end, check_coords=True):
@@ -57,7 +56,3 @@ def test_random(self, index):
start = {"latitude": random.uniform(-90, 90), "longitude": random.uniform(-180, 180)}
end = {"latitude": random.uniform(-90, 90), "longitude": random.uniform(-180, 180)}
self._check_route(start, end, check_coords=False)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/test/helpers.py b/selfdrive/test/helpers.py
index ce8918aec32d31..148d142bb6ceea 100644
--- a/selfdrive/test/helpers.py
+++ b/selfdrive/test/helpers.py
@@ -3,6 +3,7 @@
import os
import threading
import time
+import pytest
from functools import wraps
@@ -32,15 +33,15 @@ def phone_only(f):
@wraps(f)
def wrap(self, *args, **kwargs):
if PC:
- self.skipTest("This test is not meant to run on PC")
- f(self, *args, **kwargs)
+ pytest.skip("This test is not meant to run on PC")
+ return f(self, *args, **kwargs)
return wrap
def release_only(f):
@wraps(f)
def wrap(self, *args, **kwargs):
if "RELEASE" not in os.environ:
- self.skipTest("This test is only for release branches")
+ pytest.skip("This test is only for release branches")
f(self, *args, **kwargs)
return wrap
diff --git a/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py b/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py
index 713b7801f8ad41..0ad6d6d4fd4420 100755
--- a/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py
+++ b/selfdrive/test/longitudinal_maneuvers/test_longitudinal.py
@@ -1,6 +1,5 @@
#!/usr/bin/env python3
import itertools
-import unittest
from parameterized import parameterized_class
from openpilot.selfdrive.controls.lib.longitudinal_mpc_lib.long_mpc import STOP_DISTANCE
@@ -144,17 +143,13 @@ def create_maneuvers(kwargs):
@parameterized_class(("e2e", "force_decel"), itertools.product([True, False], repeat=2))
-class LongitudinalControl(unittest.TestCase):
+class TestLongitudinalControl:
e2e: bool
force_decel: bool
- def test_maneuver(self):
+ def test_maneuver(self, subtests):
for maneuver in create_maneuvers({"e2e": self.e2e, "force_decel": self.force_decel}):
- with self.subTest(title=maneuver.title, e2e=maneuver.e2e, force_decel=maneuver.force_decel):
+ with subtests.test(title=maneuver.title, e2e=maneuver.e2e, force_decel=maneuver.force_decel):
print(maneuver.title, f'in {"e2e" if maneuver.e2e else "acc"} mode')
valid, _ = maneuver.evaluate()
- self.assertTrue(valid)
-
-
-if __name__ == "__main__":
- unittest.main(failfast=True)
+ assert valid
diff --git a/selfdrive/test/process_replay/model_replay_ref_commit b/selfdrive/test/process_replay/model_replay_ref_commit
index 47571656e56777..87f1adfadad766 100644
--- a/selfdrive/test/process_replay/model_replay_ref_commit
+++ b/selfdrive/test/process_replay/model_replay_ref_commit
@@ -1 +1 @@
-69a3b169ebc478285dad5eea87658ed2cb8fee13
+73eb11111fb6407fa307c3f2bdd3331f2514c707
diff --git a/selfdrive/test/process_replay/ref_commit b/selfdrive/test/process_replay/ref_commit
index a61fd8e23bccbd..3fc6acc3cb1cbb 100644
--- a/selfdrive/test/process_replay/ref_commit
+++ b/selfdrive/test/process_replay/ref_commit
@@ -1 +1 @@
-e5a385503e4307ae77c73736a602380b08e61334
+f1ecdf9048fb12e289baf4933cb3ef12e486252c
diff --git a/selfdrive/test/process_replay/test_fuzzy.py b/selfdrive/test/process_replay/test_fuzzy.py
index 6c81119fbf11c0..d295092b207d5d 100755
--- a/selfdrive/test/process_replay/test_fuzzy.py
+++ b/selfdrive/test/process_replay/test_fuzzy.py
@@ -3,7 +3,6 @@
from hypothesis import given, HealthCheck, Phase, settings
import hypothesis.strategies as st
from parameterized import parameterized
-import unittest
from cereal import log
from openpilot.selfdrive.car.toyota.values import CAR as TOYOTA
@@ -17,7 +16,7 @@
TEST_CASES = [(cfg.proc_name, copy.deepcopy(cfg)) for cfg in pr.CONFIGS if cfg.proc_name not in NOT_TESTED]
-class TestFuzzProcesses(unittest.TestCase):
+class TestFuzzProcesses:
# TODO: make this faster and increase examples
@parameterized.expand(TEST_CASES)
@@ -28,6 +27,3 @@ def test_fuzz_process(self, proc_name, cfg, data):
lr = [log.Event.new_message(**m).as_reader() for m in msgs]
cfg.timeout = 5
pr.replay_process(cfg, lr, fingerprint=TOYOTA.TOYOTA_COROLLA_TSS2, disable_progress=True)
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/test/process_replay/test_processes.py b/selfdrive/test/process_replay/test_processes.py
index 5fa80f0e095f33..7a482c1bdc8566 100755
--- a/selfdrive/test/process_replay/test_processes.py
+++ b/selfdrive/test/process_replay/test_processes.py
@@ -111,6 +111,12 @@ def test_process(cfg, lr, segment, ref_log_path, new_log_path, ignore_fields=Non
if segment not in ("regen6CA24BC3035|2023-10-30--23-14-28--0", "regen7D2D3F82D5B|2023-10-30--23-15-55--0"):
return f"Route did not enable at all or for long enough: {new_log_path}", log_msgs
+ if cfg.proc_name != 'ubloxd' or segment != 'regen6CA24BC3035|2023-10-30--23-14-28--0':
+ seen_msgs = {m.which() for m in log_msgs}
+ expected_msgs = set(cfg.subs)
+ if seen_msgs != expected_msgs:
+ return f"Expected messages: {expected_msgs}, but got: {seen_msgs}", log_msgs
+
try:
return compare_logs(ref_log_msgs, log_msgs, ignore_fields + cfg.ignore, ignore_msgs, cfg.tolerance), log_msgs
except Exception as e:
diff --git a/selfdrive/test/process_replay/test_regen.py b/selfdrive/test/process_replay/test_regen.py
index d98963549727e7..c27d9e8f7b4c52 100755
--- a/selfdrive/test/process_replay/test_regen.py
+++ b/selfdrive/test/process_replay/test_regen.py
@@ -1,7 +1,5 @@
#!/usr/bin/env python3
-import unittest
-
from parameterized import parameterized
from openpilot.selfdrive.test.process_replay.regen import regen_segment, DummyFrameReader
@@ -30,7 +28,7 @@ def ci_setup_data_readers(route, sidx):
return lr, frs
-class TestRegen(unittest.TestCase):
+class TestRegen:
@parameterized.expand(TESTED_SEGMENTS)
def test_engaged(self, case_name, segment):
route, sidx = segment.rsplit("--", 1)
@@ -38,8 +36,4 @@ def test_engaged(self, case_name, segment):
output_logs = regen_segment(lr, frs, disable_tqdm=True)
engaged = check_openpilot_enabled(output_logs)
- self.assertTrue(engaged, f"openpilot not engaged in {case_name}")
-
-
-if __name__=='__main__':
- unittest.main()
+ assert engaged, f"openpilot not engaged in {case_name}"
diff --git a/selfdrive/test/test_onroad.py b/selfdrive/test/test_onroad.py
index cd0846c894aa35..9aca9afcd98c6b 100755
--- a/selfdrive/test/test_onroad.py
+++ b/selfdrive/test/test_onroad.py
@@ -10,7 +10,6 @@
import subprocess
import time
import numpy as np
-import unittest
from collections import Counter, defaultdict
from functools import cached_property
from pathlib import Path
@@ -102,10 +101,10 @@ def cputime_total(ct):
@pytest.mark.tici
-class TestOnroad(unittest.TestCase):
+class TestOnroad:
@classmethod
- def setUpClass(cls):
+ def setup_class(cls):
if "DEBUG" in os.environ:
segs = filter(lambda x: os.path.exists(os.path.join(x, "rlog")), Path(Paths.log_root()).iterdir())
segs = sorted(segs, key=lambda x: x.stat().st_mtime)
@@ -181,7 +180,7 @@ def service_msgs(self):
msgs[m.which()].append(m)
return msgs
- def test_service_frequencies(self):
+ def test_service_frequencies(self, subtests):
for s, msgs in self.service_msgs.items():
if s in ('initData', 'sentinel'):
continue
@@ -190,18 +189,18 @@ def test_service_frequencies(self):
if s in ('ubloxGnss', 'ubloxRaw', 'gnssMeasurements', 'gpsLocation', 'gpsLocationExternal', 'qcomGnss'):
continue
- with self.subTest(service=s):
+ with subtests.test(service=s):
assert len(msgs) >= math.floor(SERVICE_LIST[s].frequency*55)
def test_cloudlog_size(self):
msgs = [m for m in self.lr if m.which() == 'logMessage']
total_size = sum(len(m.as_builder().to_bytes()) for m in msgs)
- self.assertLess(total_size, 3.5e5)
+ assert total_size < 3.5e5
cnt = Counter(json.loads(m.logMessage)['filename'] for m in msgs)
big_logs = [f for f, n in cnt.most_common(3) if n / sum(cnt.values()) > 30.]
- self.assertEqual(len(big_logs), 0, f"Log spam: {big_logs}")
+ assert len(big_logs) == 0, f"Log spam: {big_logs}"
def test_log_sizes(self):
for f, sz in self.log_sizes.items():
@@ -230,15 +229,15 @@ def test_ui_timings(self):
result += "------------------------------------------------\n"
print(result)
- self.assertLess(max(ts), 250.)
- self.assertLess(np.mean(ts), 10.)
+ assert max(ts) < 250.
+ assert np.mean(ts) < 10.
#self.assertLess(np.std(ts), 5.)
# some slow frames are expected since camerad/modeld can preempt ui
veryslow = [x for x in ts if x > 40.]
assert len(veryslow) < 5, f"Too many slow frame draw times: {veryslow}"
- def test_cpu_usage(self):
+ def test_cpu_usage(self, subtests):
result = "\n"
result += "------------------------------------------------\n"
result += "------------------ CPU Usage -------------------\n"
@@ -286,12 +285,12 @@ def test_cpu_usage(self):
# Ensure there's no missing procs
all_procs = {p.name for p in self.service_msgs['managerState'][0].managerState.processes if p.shouldBeRunning}
for p in all_procs:
- with self.subTest(proc=p):
+ with subtests.test(proc=p):
assert any(p in pp for pp in PROCS.keys()), f"Expected CPU usage missing for {p}"
# total CPU check
procs_tot = sum([(max(x) if isinstance(x, tuple) else x) for x in PROCS.values()])
- with self.subTest(name="total CPU"):
+ with subtests.test(name="total CPU"):
assert procs_tot < MAX_TOTAL_CPU, "Total CPU budget exceeded"
result += "------------------------------------------------\n"
result += f"Total allocated CPU usage is {procs_tot}%, budget is {MAX_TOTAL_CPU}%, {MAX_TOTAL_CPU-procs_tot:.1f}% left\n"
@@ -299,7 +298,7 @@ def test_cpu_usage(self):
print(result)
- self.assertTrue(cpu_ok)
+ assert cpu_ok
def test_memory_usage(self):
mems = [m.deviceState.memoryUsagePercent for m in self.service_msgs['deviceState']]
@@ -307,10 +306,10 @@ def test_memory_usage(self):
# check for big leaks. note that memory usage is
# expected to go up while the MSGQ buffers fill up
- self.assertLessEqual(max(mems) - min(mems), 3.0)
+ assert max(mems) - min(mems) <= 3.0
def test_gpu_usage(self):
- self.assertEqual(self.gpu_procs, {"weston", "ui", "camerad", "selfdrive.modeld.modeld"})
+ assert self.gpu_procs == {"weston", "ui", "camerad", "selfdrive.modeld.modeld"}
def test_camera_processing_time(self):
result = "\n"
@@ -319,14 +318,14 @@ def test_camera_processing_time(self):
result += "------------------------------------------------\n"
ts = [getattr(m, m.which()).processingTime for m in self.lr if 'CameraState' in m.which()]
- self.assertLess(min(ts), 0.025, f"high execution time: {min(ts)}")
+ assert min(ts) < 0.025, f"high execution time: {min(ts)}"
result += f"execution time: min {min(ts):.5f}s\n"
result += f"execution time: max {max(ts):.5f}s\n"
result += f"execution time: mean {np.mean(ts):.5f}s\n"
result += "------------------------------------------------\n"
print(result)
- @unittest.skip("TODO: enable once timings are fixed")
+ @pytest.mark.skip("TODO: enable once timings are fixed")
def test_camera_frame_timings(self):
result = "\n"
result += "------------------------------------------------\n"
@@ -336,7 +335,7 @@ def test_camera_frame_timings(self):
ts = [getattr(m, m.which()).timestampSof for m in self.lr if name in m.which()]
d_ms = np.diff(ts) / 1e6
d50 = np.abs(d_ms-50)
- self.assertLess(max(d50), 1.0, f"high sof delta vs 50ms: {max(d50)}")
+ assert max(d50) < 1.0, f"high sof delta vs 50ms: {max(d50)}"
result += f"{name} sof delta vs 50ms: min {min(d50):.5f}s\n"
result += f"{name} sof delta vs 50ms: max {max(d50):.5f}s\n"
result += f"{name} sof delta vs 50ms: mean {d50.mean():.5f}s\n"
@@ -352,8 +351,8 @@ def test_mpc_execution_timings(self):
cfgs = [("longitudinalPlan", 0.05, 0.05),]
for (s, instant_max, avg_max) in cfgs:
ts = [getattr(m, s).solverExecutionTime for m in self.service_msgs[s]]
- self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}")
- self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}")
+ assert max(ts) < instant_max, f"high '{s}' execution time: {max(ts)}"
+ assert np.mean(ts) < avg_max, f"high avg '{s}' execution time: {np.mean(ts)}"
result += f"'{s}' execution time: min {min(ts):.5f}s\n"
result += f"'{s}' execution time: max {max(ts):.5f}s\n"
result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n"
@@ -372,8 +371,8 @@ def test_model_execution_timings(self):
]
for (s, instant_max, avg_max) in cfgs:
ts = [getattr(m, s).modelExecutionTime for m in self.service_msgs[s]]
- self.assertLess(max(ts), instant_max, f"high '{s}' execution time: {max(ts)}")
- self.assertLess(np.mean(ts), avg_max, f"high avg '{s}' execution time: {np.mean(ts)}")
+ assert max(ts) < instant_max, f"high '{s}' execution time: {max(ts)}"
+ assert np.mean(ts) < avg_max, f"high avg '{s}' execution time: {np.mean(ts)}"
result += f"'{s}' execution time: min {min(ts):.5f}s\n"
result += f"'{s}' execution time: max {max(ts):.5f}s\n"
result += f"'{s}' execution time: mean {np.mean(ts):.5f}s\n"
@@ -409,7 +408,7 @@ def test_timings(self):
result += f"{''.ljust(40)} {np.max(np.absolute([np.max(ts)/dt, np.min(ts)/dt]))} {np.std(ts)/dt}\n"
result += "="*67
print(result)
- self.assertTrue(passed)
+ assert passed
@release_only
def test_startup(self):
@@ -420,7 +419,7 @@ def test_startup(self):
startup_alert = msg.controlsState.alertText1
break
expected = EVENTS[car.CarEvent.EventName.startup][ET.PERMANENT].alert_text_1
- self.assertEqual(startup_alert, expected, "wrong startup alert")
+ assert startup_alert == expected, "wrong startup alert"
def test_engagable(self):
no_entries = Counter()
@@ -432,7 +431,3 @@ def test_engagable(self):
eng = [m.controlsState.engageable for m in self.service_msgs['controlsState']]
assert all(eng), \
f"Not engageable for whole segment:\n- controlsState.engageable: {Counter(eng)}\n- No entry events: {no_entries}"
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/test/test_updated.py b/selfdrive/test/test_updated.py
index dd79e03de4a5e8..5220cfb2889eae 100755
--- a/selfdrive/test/test_updated.py
+++ b/selfdrive/test/test_updated.py
@@ -4,7 +4,6 @@
import pytest
import time
import tempfile
-import unittest
import shutil
import signal
import subprocess
@@ -15,9 +14,9 @@
@pytest.mark.tici
-class TestUpdated(unittest.TestCase):
+class TestUpdated:
- def setUp(self):
+ def setup_method(self):
self.updated_proc = None
self.tmp_dir = tempfile.TemporaryDirectory()
@@ -59,7 +58,7 @@ def setUp(self):
self.params.clear_all()
os.sync()
- def tearDown(self):
+ def teardown_method(self):
try:
if self.updated_proc is not None:
self.updated_proc.terminate()
@@ -167,7 +166,7 @@ def _check_update_state(self, update_available):
t = self._read_param("LastUpdateTime")
last_update_time = datetime.datetime.fromisoformat(t)
td = datetime.datetime.utcnow() - last_update_time
- self.assertLess(td.total_seconds(), 10)
+ assert td.total_seconds() < 10
self.params.remove("LastUpdateTime")
# wait a bit for the rest of the params to be written
@@ -175,13 +174,13 @@ def _check_update_state(self, update_available):
# check params
update = self._read_param("UpdateAvailable")
- self.assertEqual(update == "1", update_available, f"UpdateAvailable: {repr(update)}")
- self.assertEqual(self._read_param("UpdateFailedCount"), "0")
+ assert update == "1" == update_available, f"UpdateAvailable: {repr(update)}"
+ assert self._read_param("UpdateFailedCount") == "0"
# TODO: check that the finalized update actually matches remote
# check the .overlay_init and .overlay_consistent flags
- self.assertTrue(os.path.isfile(os.path.join(self.basedir, ".overlay_init")))
- self.assertEqual(os.path.isfile(os.path.join(self.finalized_dir, ".overlay_consistent")), update_available)
+ assert os.path.isfile(os.path.join(self.basedir, ".overlay_init"))
+ assert os.path.isfile(os.path.join(self.finalized_dir, ".overlay_consistent")) == update_available
# *** test cases ***
@@ -214,7 +213,7 @@ def test_update(self):
self._check_update_state(True)
# Let the updater run for 10 cycles, and write an update every cycle
- @unittest.skip("need to make this faster")
+ @pytest.mark.skip("need to make this faster")
def test_update_loop(self):
self._start_updater()
@@ -243,12 +242,12 @@ def test_overlay_reinit(self):
# run another cycle, should have a new mtime
self._wait_for_update(clear_param=True)
second_mtime = os.path.getmtime(overlay_init_fn)
- self.assertTrue(first_mtime != second_mtime)
+ assert first_mtime != second_mtime
# run another cycle, mtime should be same as last cycle
self._wait_for_update(clear_param=True)
new_mtime = os.path.getmtime(overlay_init_fn)
- self.assertTrue(second_mtime == new_mtime)
+ assert second_mtime == new_mtime
# Make sure updated exits if another instance is running
def test_multiple_instances(self):
@@ -260,7 +259,7 @@ def test_multiple_instances(self):
# start another instance
second_updated = self._get_updated_proc()
ret_code = second_updated.wait(timeout=5)
- self.assertTrue(ret_code is not None)
+ assert ret_code is not None
# *** test cases with NEOS updates ***
@@ -277,10 +276,10 @@ def test_clear_neos_cache(self):
self._start_updater()
self._wait_for_update(clear_param=True)
self._check_update_state(False)
- self.assertFalse(os.path.isdir(self.neosupdate_dir))
+ assert not os.path.isdir(self.neosupdate_dir)
# Let the updater run with no update for a cycle, then write an update
- @unittest.skip("TODO: only runs on device")
+ @pytest.mark.skip("TODO: only runs on device")
def test_update_with_neos_update(self):
# bump the NEOS version and commit it
self._run([
@@ -295,8 +294,4 @@ def test_update_with_neos_update(self):
self._check_update_state(True)
# TODO: more comprehensive check
- self.assertTrue(os.path.isdir(self.neosupdate_dir))
-
-
-if __name__ == "__main__":
- unittest.main()
+ assert os.path.isdir(self.neosupdate_dir)
diff --git a/selfdrive/test/test_valgrind_replay.py b/selfdrive/test/test_valgrind_replay.py
deleted file mode 100755
index 75520df91be9f8..00000000000000
--- a/selfdrive/test/test_valgrind_replay.py
+++ /dev/null
@@ -1,117 +0,0 @@
-#!/usr/bin/env python3
-import os
-import threading
-import time
-import unittest
-import subprocess
-import signal
-
-if "CI" in os.environ:
- def tqdm(x):
- return x
-else:
- from tqdm import tqdm # type: ignore
-
-import cereal.messaging as messaging
-from collections import namedtuple
-from openpilot.tools.lib.logreader import LogReader
-from openpilot.tools.lib.openpilotci import get_url
-from openpilot.common.basedir import BASEDIR
-
-ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'command', 'path', 'segment', 'wait_for_response'])
-
-CONFIGS = [
- ProcessConfig(
- proc_name="ubloxd",
- pub_sub={
- "ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
- },
- ignore=[],
- command="./ubloxd",
- path="system/ubloxd",
- segment="0375fdf7b1ce594d|2019-06-13--08-32-25--3",
- wait_for_response=True
- ),
-]
-
-
-class TestValgrind(unittest.TestCase):
- def extract_leak_sizes(self, log):
- if "All heap blocks were freed -- no leaks are possible" in log:
- return (0,0,0)
-
- log = log.replace(",","") # fixes casting to int issue with large leaks
- err_lost1 = log.split("definitely lost: ")[1]
- err_lost2 = log.split("indirectly lost: ")[1]
- err_lost3 = log.split("possibly lost: ")[1]
- definitely_lost = int(err_lost1.split(" ")[0])
- indirectly_lost = int(err_lost2.split(" ")[0])
- possibly_lost = int(err_lost3.split(" ")[0])
- return (definitely_lost, indirectly_lost, possibly_lost)
-
- def valgrindlauncher(self, arg, cwd):
- os.chdir(os.path.join(BASEDIR, cwd))
- # Run valgrind on a process
- command = "valgrind --leak-check=full " + arg
- p = subprocess.Popen(command, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
-
- while not self.replay_done:
- time.sleep(0.1)
-
- # Kill valgrind and extract leak output
- os.killpg(os.getpgid(p.pid), signal.SIGINT)
- _, err = p.communicate()
- error_msg = str(err, encoding='utf-8')
- with open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "a") as f:
- f.write(error_msg)
- f.write(5 * "\n")
- definitely_lost, indirectly_lost, possibly_lost = self.extract_leak_sizes(error_msg)
- if max(definitely_lost, indirectly_lost, possibly_lost) > 0:
- self.leak = True
- print("LEAKS from", arg, "\nDefinitely lost:", definitely_lost, "\nIndirectly lost", indirectly_lost, "\nPossibly lost", possibly_lost)
- else:
- self.leak = False
-
- def replay_process(self, config, logreader):
- pub_sockets = list(config.pub_sub.keys()) # We dump data from logs here
- sub_sockets = [s for _, sub in config.pub_sub.items() for s in sub] # We get responses here
- pm = messaging.PubMaster(pub_sockets)
- sm = messaging.SubMaster(sub_sockets)
-
- print("Sorting logs")
- all_msgs = sorted(logreader, key=lambda msg: msg.logMonoTime)
- pub_msgs = [msg for msg in all_msgs if msg.which() in list(config.pub_sub.keys())]
-
- thread = threading.Thread(target=self.valgrindlauncher, args=(config.command, config.path))
- thread.daemon = True
- thread.start()
-
- while not all(pm.all_readers_updated(s) for s in config.pub_sub.keys()):
- time.sleep(0)
-
- for msg in tqdm(pub_msgs):
- pm.send(msg.which(), msg.as_builder())
- if config.wait_for_response:
- sm.update(100)
-
- self.replay_done = True
-
- def test_config(self):
- open(os.path.join(BASEDIR, "selfdrive/test/valgrind_logs.txt"), "w").close()
-
- for cfg in CONFIGS:
- self.leak = None
- self.replay_done = False
-
- r, n = cfg.segment.rsplit("--", 1)
- lr = LogReader(get_url(r, n))
- self.replay_process(cfg, lr)
-
- while self.leak is None:
- time.sleep(0.1) # Wait for the valgrind to finish
-
- self.assertFalse(self.leak)
-
-
-if __name__ == "__main__":
- unittest.main()
diff --git a/selfdrive/thermald/tests/test_fan_controller.py b/selfdrive/thermald/tests/test_fan_controller.py
index 7081e1353ee087..c2b1a64509162b 100755
--- a/selfdrive/thermald/tests/test_fan_controller.py
+++ b/selfdrive/thermald/tests/test_fan_controller.py
@@ -1,17 +1,15 @@
#!/usr/bin/env python3
-import unittest
-from unittest.mock import Mock, patch
-from parameterized import parameterized
+import pytest
from openpilot.selfdrive.thermald.fan_controller import TiciFanController
-ALL_CONTROLLERS = [(TiciFanController,)]
+ALL_CONTROLLERS = [TiciFanController]
-def patched_controller(controller_class):
- with patch("os.system", new=Mock()):
- return controller_class()
+def patched_controller(mocker, controller_class):
+ mocker.patch("os.system", new=mocker.Mock())
+ return controller_class()
-class TestFanController(unittest.TestCase):
+class TestFanController:
def wind_up(self, controller, ignition=True):
for _ in range(1000):
controller.update(100, ignition)
@@ -20,37 +18,34 @@ def wind_down(self, controller, ignition=False):
for _ in range(1000):
controller.update(10, ignition)
- @parameterized.expand(ALL_CONTROLLERS)
- def test_hot_onroad(self, controller_class):
- controller = patched_controller(controller_class)
+ @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS)
+ def test_hot_onroad(self, mocker, controller_class):
+ controller = patched_controller(mocker, controller_class)
self.wind_up(controller)
- self.assertGreaterEqual(controller.update(100, True), 70)
+ assert controller.update(100, True) >= 70
- @parameterized.expand(ALL_CONTROLLERS)
- def test_offroad_limits(self, controller_class):
- controller = patched_controller(controller_class)
+ @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS)
+ def test_offroad_limits(self, mocker, controller_class):
+ controller = patched_controller(mocker, controller_class)
self.wind_up(controller)
- self.assertLessEqual(controller.update(100, False), 30)
+ assert controller.update(100, False) <= 30
- @parameterized.expand(ALL_CONTROLLERS)
- def test_no_fan_wear(self, controller_class):
- controller = patched_controller(controller_class)
+ @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS)
+ def test_no_fan_wear(self, mocker, controller_class):
+ controller = patched_controller(mocker, controller_class)
self.wind_down(controller)
- self.assertEqual(controller.update(10, False), 0)
+ assert controller.update(10, False) == 0
- @parameterized.expand(ALL_CONTROLLERS)
- def test_limited(self, controller_class):
- controller = patched_controller(controller_class)
+ @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS)
+ def test_limited(self, mocker, controller_class):
+ controller = patched_controller(mocker, controller_class)
self.wind_up(controller, True)
- self.assertEqual(controller.update(100, True), 100)
+ assert controller.update(100, True) == 100
- @parameterized.expand(ALL_CONTROLLERS)
- def test_windup_speed(self, controller_class):
- controller = patched_controller(controller_class)
+ @pytest.mark.parametrize("controller_class", ALL_CONTROLLERS)
+ def test_windup_speed(self, mocker, controller_class):
+ controller = patched_controller(mocker, controller_class)
self.wind_down(controller, True)
for _ in range(10):
controller.update(90, True)
- self.assertGreaterEqual(controller.update(90, True), 60)
-
-if __name__ == "__main__":
- unittest.main()
+ assert controller.update(90, True) >= 60
diff --git a/selfdrive/thermald/tests/test_power_monitoring.py b/selfdrive/thermald/tests/test_power_monitoring.py
index c3a890f068e658..f68191475b5b05 100755
--- a/selfdrive/thermald/tests/test_power_monitoring.py
+++ b/selfdrive/thermald/tests/test_power_monitoring.py
@@ -1,12 +1,10 @@
#!/usr/bin/env python3
-import unittest
-from unittest.mock import patch
+import pytest
from openpilot.common.params import Params
from openpilot.selfdrive.thermald.power_monitoring import PowerMonitoring, CAR_BATTERY_CAPACITY_uWh, \
CAR_CHARGING_RATE_W, VBATT_PAUSE_CHARGING, DELAY_SHUTDOWN_TIME_S
-
# Create fake time
ssb = 0.
def mock_time_monotonic():
@@ -18,163 +16,169 @@ def mock_time_monotonic():
GOOD_VOLTAGE = 12 * 1e3
VOLTAGE_BELOW_PAUSE_CHARGING = (VBATT_PAUSE_CHARGING - 1) * 1e3
-def pm_patch(name, value, constant=False):
+def pm_patch(mocker, name, value, constant=False):
if constant:
- return patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", value)
- return patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", return_value=value)
+ mocker.patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", value)
+ else:
+ mocker.patch(f"openpilot.selfdrive.thermald.power_monitoring.{name}", return_value=value)
+
+
+@pytest.fixture(autouse=True)
+def mock_time(mocker):
+ mocker.patch("time.monotonic", mock_time_monotonic)
-@patch("time.monotonic", new=mock_time_monotonic)
-class TestPowerMonitoring(unittest.TestCase):
- def setUp(self):
+class TestPowerMonitoring:
+ def setup_method(self):
self.params = Params()
# Test to see that it doesn't do anything when pandaState is None
- def test_pandaState_present(self):
+ def test_panda_state_present(self):
pm = PowerMonitoring()
for _ in range(10):
pm.calculate(None, None)
- self.assertEqual(pm.get_power_used(), 0)
- self.assertEqual(pm.get_car_battery_capacity(), (CAR_BATTERY_CAPACITY_uWh / 10))
+ assert pm.get_power_used() == 0
+ assert pm.get_car_battery_capacity() == (CAR_BATTERY_CAPACITY_uWh / 10)
# Test to see that it doesn't integrate offroad when ignition is True
def test_offroad_ignition(self):
pm = PowerMonitoring()
for _ in range(10):
pm.calculate(GOOD_VOLTAGE, True)
- self.assertEqual(pm.get_power_used(), 0)
+ assert pm.get_power_used() == 0
# Test to see that it integrates with discharging battery
- def test_offroad_integration_discharging(self):
+ def test_offroad_integration_discharging(self, mocker):
POWER_DRAW = 4
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- for _ in range(TEST_DURATION_S + 1):
- pm.calculate(GOOD_VOLTAGE, False)
- expected_power_usage = ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6)
- self.assertLess(abs(pm.get_power_used() - expected_power_usage), 10)
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ for _ in range(TEST_DURATION_S + 1):
+ pm.calculate(GOOD_VOLTAGE, False)
+ expected_power_usage = ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6)
+ assert abs(pm.get_power_used() - expected_power_usage) < 10
# Test to check positive integration of car_battery_capacity
- def test_car_battery_integration_onroad(self):
+ def test_car_battery_integration_onroad(self, mocker):
POWER_DRAW = 4
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = 0
- for _ in range(TEST_DURATION_S + 1):
- pm.calculate(GOOD_VOLTAGE, True)
- expected_capacity = ((TEST_DURATION_S/3600) * CAR_CHARGING_RATE_W * 1e6)
- self.assertLess(abs(pm.get_car_battery_capacity() - expected_capacity), 10)
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = 0
+ for _ in range(TEST_DURATION_S + 1):
+ pm.calculate(GOOD_VOLTAGE, True)
+ expected_capacity = ((TEST_DURATION_S/3600) * CAR_CHARGING_RATE_W * 1e6)
+ assert abs(pm.get_car_battery_capacity() - expected_capacity) < 10
# Test to check positive integration upper limit
- def test_car_battery_integration_upper_limit(self):
+ def test_car_battery_integration_upper_limit(self, mocker):
POWER_DRAW = 4
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - 1000
- for _ in range(TEST_DURATION_S + 1):
- pm.calculate(GOOD_VOLTAGE, True)
- estimated_capacity = CAR_BATTERY_CAPACITY_uWh + (CAR_CHARGING_RATE_W / 3600 * 1e6)
- self.assertLess(abs(pm.get_car_battery_capacity() - estimated_capacity), 10)
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh - 1000
+ for _ in range(TEST_DURATION_S + 1):
+ pm.calculate(GOOD_VOLTAGE, True)
+ estimated_capacity = CAR_BATTERY_CAPACITY_uWh + (CAR_CHARGING_RATE_W / 3600 * 1e6)
+ assert abs(pm.get_car_battery_capacity() - estimated_capacity) < 10
# Test to check negative integration of car_battery_capacity
- def test_car_battery_integration_offroad(self):
+ def test_car_battery_integration_offroad(self, mocker):
POWER_DRAW = 4
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
- for _ in range(TEST_DURATION_S + 1):
- pm.calculate(GOOD_VOLTAGE, False)
- expected_capacity = CAR_BATTERY_CAPACITY_uWh - ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6)
- self.assertLess(abs(pm.get_car_battery_capacity() - expected_capacity), 10)
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
+ for _ in range(TEST_DURATION_S + 1):
+ pm.calculate(GOOD_VOLTAGE, False)
+ expected_capacity = CAR_BATTERY_CAPACITY_uWh - ((TEST_DURATION_S/3600) * POWER_DRAW * 1e6)
+ assert abs(pm.get_car_battery_capacity() - expected_capacity) < 10
# Test to check negative integration lower limit
- def test_car_battery_integration_lower_limit(self):
+ def test_car_battery_integration_lower_limit(self, mocker):
POWER_DRAW = 4
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = 1000
- for _ in range(TEST_DURATION_S + 1):
- pm.calculate(GOOD_VOLTAGE, False)
- estimated_capacity = 0 - ((1/3600) * POWER_DRAW * 1e6)
- self.assertLess(abs(pm.get_car_battery_capacity() - estimated_capacity), 10)
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = 1000
+ for _ in range(TEST_DURATION_S + 1):
+ pm.calculate(GOOD_VOLTAGE, False)
+ estimated_capacity = 0 - ((1/3600) * POWER_DRAW * 1e6)
+ assert abs(pm.get_car_battery_capacity() - estimated_capacity) < 10
# Test to check policy of stopping charging after MAX_TIME_OFFROAD_S
- def test_max_time_offroad(self):
+ def test_max_time_offroad(self, mocker):
MOCKED_MAX_OFFROAD_TIME = 3600
POWER_DRAW = 0 # To stop shutting down for other reasons
- with pm_patch("MAX_TIME_OFFROAD_S", MOCKED_MAX_OFFROAD_TIME, constant=True), pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
- start_time = ssb
- ignition = False
- while ssb <= start_time + MOCKED_MAX_OFFROAD_TIME:
- pm.calculate(GOOD_VOLTAGE, ignition)
- if (ssb - start_time) % 1000 == 0 and ssb < start_time + MOCKED_MAX_OFFROAD_TIME:
- self.assertFalse(pm.should_shutdown(ignition, True, start_time, False))
- self.assertTrue(pm.should_shutdown(ignition, True, start_time, False))
-
- def test_car_voltage(self):
+ pm_patch(mocker, "MAX_TIME_OFFROAD_S", MOCKED_MAX_OFFROAD_TIME, constant=True)
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
+ start_time = ssb
+ ignition = False
+ while ssb <= start_time + MOCKED_MAX_OFFROAD_TIME:
+ pm.calculate(GOOD_VOLTAGE, ignition)
+ if (ssb - start_time) % 1000 == 0 and ssb < start_time + MOCKED_MAX_OFFROAD_TIME:
+ assert not pm.should_shutdown(ignition, True, start_time, False)
+ assert pm.should_shutdown(ignition, True, start_time, False)
+
+ def test_car_voltage(self, mocker):
POWER_DRAW = 0 # To stop shutting down for other reasons
TEST_TIME = 350
VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S = 50
- with pm_patch("VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S", VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S, constant=True), \
- pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
- ignition = False
- start_time = ssb
- for i in range(TEST_TIME):
- pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
- if i % 10 == 0:
- self.assertEqual(pm.should_shutdown(ignition, True, start_time, True),
- (pm.car_voltage_mV < VBATT_PAUSE_CHARGING * 1e3 and
- (ssb - start_time) > VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S and
- (ssb - start_time) > DELAY_SHUTDOWN_TIME_S))
- self.assertTrue(pm.should_shutdown(ignition, True, start_time, True))
+ pm_patch(mocker, "VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S", VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S, constant=True)
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
+ ignition = False
+ start_time = ssb
+ for i in range(TEST_TIME):
+ pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
+ if i % 10 == 0:
+ assert pm.should_shutdown(ignition, True, start_time, True) == \
+ (pm.car_voltage_mV < VBATT_PAUSE_CHARGING * 1e3 and \
+ (ssb - start_time) > VOLTAGE_SHUTDOWN_MIN_OFFROAD_TIME_S and \
+ (ssb - start_time) > DELAY_SHUTDOWN_TIME_S)
+ assert pm.should_shutdown(ignition, True, start_time, True)
# Test to check policy of not stopping charging when DisablePowerDown is set
- def test_disable_power_down(self):
+ def test_disable_power_down(self, mocker):
POWER_DRAW = 0 # To stop shutting down for other reasons
TEST_TIME = 100
self.params.put_bool("DisablePowerDown", True)
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
- ignition = False
- for i in range(TEST_TIME):
- pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
- if i % 10 == 0:
- self.assertFalse(pm.should_shutdown(ignition, True, ssb, False))
- self.assertFalse(pm.should_shutdown(ignition, True, ssb, False))
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
+ ignition = False
+ for i in range(TEST_TIME):
+ pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
+ if i % 10 == 0:
+ assert not pm.should_shutdown(ignition, True, ssb, False)
+ assert not pm.should_shutdown(ignition, True, ssb, False)
# Test to check policy of not stopping charging when ignition
- def test_ignition(self):
+ def test_ignition(self, mocker):
POWER_DRAW = 0 # To stop shutting down for other reasons
TEST_TIME = 100
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
- ignition = True
- for i in range(TEST_TIME):
- pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
- if i % 10 == 0:
- self.assertFalse(pm.should_shutdown(ignition, True, ssb, False))
- self.assertFalse(pm.should_shutdown(ignition, True, ssb, False))
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
+ ignition = True
+ for i in range(TEST_TIME):
+ pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
+ if i % 10 == 0:
+ assert not pm.should_shutdown(ignition, True, ssb, False)
+ assert not pm.should_shutdown(ignition, True, ssb, False)
# Test to check policy of not stopping charging when harness is not connected
- def test_harness_connection(self):
+ def test_harness_connection(self, mocker):
POWER_DRAW = 0 # To stop shutting down for other reasons
TEST_TIME = 100
- with pm_patch("HARDWARE.get_current_power_draw", POWER_DRAW):
- pm = PowerMonitoring()
- pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
+ pm_patch(mocker, "HARDWARE.get_current_power_draw", POWER_DRAW)
+ pm = PowerMonitoring()
+ pm.car_battery_capacity_uWh = CAR_BATTERY_CAPACITY_uWh
- ignition = False
- for i in range(TEST_TIME):
- pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
- if i % 10 == 0:
- self.assertFalse(pm.should_shutdown(ignition, False, ssb, False))
- self.assertFalse(pm.should_shutdown(ignition, False, ssb, False))
+ ignition = False
+ for i in range(TEST_TIME):
+ pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
+ if i % 10 == 0:
+ assert not pm.should_shutdown(ignition, False, ssb, False)
+ assert not pm.should_shutdown(ignition, False, ssb, False)
def test_delay_shutdown_time(self):
pm = PowerMonitoring()
@@ -186,15 +190,11 @@ def test_delay_shutdown_time(self):
pm.calculate(VOLTAGE_BELOW_PAUSE_CHARGING, ignition)
while ssb < offroad_timestamp + DELAY_SHUTDOWN_TIME_S:
- self.assertFalse(pm.should_shutdown(ignition, in_car,
+ assert not pm.should_shutdown(ignition, in_car,
offroad_timestamp,
- started_seen),
- f"Should not shutdown before {DELAY_SHUTDOWN_TIME_S} seconds offroad time")
- self.assertTrue(pm.should_shutdown(ignition, in_car,
+ started_seen), \
+ f"Should not shutdown before {DELAY_SHUTDOWN_TIME_S} seconds offroad time"
+ assert pm.should_shutdown(ignition, in_car,
offroad_timestamp,
- started_seen),
- f"Should shutdown after {DELAY_SHUTDOWN_TIME_S} seconds offroad time")
-
-
-if __name__ == "__main__":
- unittest.main()
+ started_seen), \
+ f"Should shutdown after {DELAY_SHUTDOWN_TIME_S} seconds offroad time"
diff --git a/selfdrive/ui/qt/offroad/driverview.cc b/selfdrive/ui/qt/offroad/driverview.cc
index df9bb246510cfb..f14d0593327f70 100644
--- a/selfdrive/ui/qt/offroad/driverview.cc
+++ b/selfdrive/ui/qt/offroad/driverview.cc
@@ -7,7 +7,7 @@
const int FACE_IMG_SIZE = 130;
-DriverViewWindow::DriverViewWindow(QWidget* parent) : CameraWidget("camerad", VISION_STREAM_DRIVER, true, parent) {
+DriverViewWindow::DriverViewWindow(QWidget* parent) : CameraView("camerad", VISION_STREAM_DRIVER, true, parent) {
face_img = loadPixmap("../assets/img_driver_face_static.png", {FACE_IMG_SIZE, FACE_IMG_SIZE});
QObject::connect(this, &CameraWidget::clicked, this, &DriverViewWindow::done);
QObject::connect(device(), &Device::interactiveTimeout, this, [this]() {
@@ -20,22 +20,20 @@ DriverViewWindow::DriverViewWindow(QWidget* parent) : CameraWidget("camerad", VI
void DriverViewWindow::showEvent(QShowEvent* event) {
params.putBool("IsDriverViewEnabled", true);
device()->resetInteractiveTimeout(60);
- CameraWidget::showEvent(event);
+ CameraView::showEvent(event);
}
void DriverViewWindow::hideEvent(QHideEvent* event) {
params.putBool("IsDriverViewEnabled", false);
- stopVipcThread();
- CameraWidget::hideEvent(event);
+ CameraView::hideEvent(event);
}
void DriverViewWindow::paintGL() {
- CameraWidget::paintGL();
+ CameraView::paintGL();
- std::lock_guard lk(frame_lock);
QPainter p(this);
// startup msg
- if (frames.empty()) {
+ if (recent_frames.empty()) {
p.setPen(Qt::white);
p.setRenderHint(QPainter::TextAntialiasing);
p.setFont(InterFont(100, QFont::Bold));
@@ -47,7 +45,6 @@ void DriverViewWindow::paintGL() {
cereal::DriverStateV2::Reader driver_state = sm["driverStateV2"].getDriverStateV2();
bool is_rhd = driver_state.getWheelOnRightProb() > 0.5;
auto driver_data = is_rhd ? driver_state.getRightDriverData() : driver_state.getLeftDriverData();
-
bool face_detected = driver_data.getFaceProb() > 0.7;
if (face_detected) {
auto fxy_list = driver_data.getFacePosition();
diff --git a/selfdrive/ui/qt/offroad/driverview.h b/selfdrive/ui/qt/offroad/driverview.h
index 155e4ede32073d..97dd2faa7856b3 100644
--- a/selfdrive/ui/qt/offroad/driverview.h
+++ b/selfdrive/ui/qt/offroad/driverview.h
@@ -2,7 +2,7 @@
#include "selfdrive/ui/qt/widgets/cameraview.h"
-class DriverViewWindow : public CameraWidget {
+class DriverViewWindow : public CameraView {
Q_OBJECT
public:
diff --git a/selfdrive/ui/qt/onroad/annotated_camera.cc b/selfdrive/ui/qt/onroad/annotated_camera.cc
index f7fb6b480f0f7a..637d7275811335 100644
--- a/selfdrive/ui/qt/onroad/annotated_camera.cc
+++ b/selfdrive/ui/qt/onroad/annotated_camera.cc
@@ -6,11 +6,11 @@
#include
#include "common/swaglog.h"
-#include "selfdrive/ui/qt/onroad/buttons.h"
#include "selfdrive/ui/qt/util.h"
// Window that shows camera view and variety of info drawn on top
-AnnotatedCameraWidget::AnnotatedCameraWidget(VisionStreamType type, QWidget* parent) : fps_filter(UI_FREQ, 3, 1. / UI_FREQ), CameraWidget("camerad", type, true, parent) {
+AnnotatedCameraWidget::AnnotatedCameraWidget(VisionStreamType type, QWidget *parent)
+ : fps_filter(UI_FREQ, 3, 1. / UI_FREQ), CameraWidget("camerad", type, true, parent) {
pm = std::make_unique>({"uiDebug"});
main_layout = new QVBoxLayout(this);
@@ -76,6 +76,8 @@ void AnnotatedCameraWidget::updateState(const UIState &s) {
map_settings_btn->setVisible(!hideBottomIcons);
main_layout->setAlignment(map_settings_btn, (rightHandDM ? Qt::AlignLeft : Qt::AlignRight) | Qt::AlignBottom);
}
+
+ update();
}
void AnnotatedCameraWidget::drawHud(QPainter &p) {
@@ -353,25 +355,8 @@ void AnnotatedCameraWidget::paintGL() {
UIState *s = uiState();
SubMaster &sm = *(s->sm);
const double start_draw_t = millis_since_boot();
- const cereal::ModelDataV2::Reader &model = sm["modelV2"].getModelV2();
- // draw camera frame
{
- std::lock_guard lk(frame_lock);
-
- if (frames.empty()) {
- if (skip_frame_count > 0) {
- skip_frame_count--;
- qDebug() << "skipping frame, not ready";
- return;
- }
- } else {
- // skip drawing up to this many frames if we're
- // missing camera frames. this smooths out the
- // transitions from the narrow and wide cameras
- skip_frame_count = 5;
- }
-
// Wide or narrow cam dependent on speed
bool has_wide_cam = available_streams.count(VISION_STREAM_WIDE_ROAD);
if (has_wide_cam) {
@@ -387,14 +372,16 @@ void AnnotatedCameraWidget::paintGL() {
}
CameraWidget::setStreamType(wide_cam_requested ? VISION_STREAM_WIDE_ROAD : VISION_STREAM_ROAD);
- s->scene.wide_cam = CameraWidget::getStreamType() == VISION_STREAM_WIDE_ROAD;
+ s->scene.wide_cam = CameraWidget::streamType() == VISION_STREAM_WIDE_ROAD;
if (s->scene.calibration_valid) {
auto calib = s->scene.wide_cam ? s->scene.view_from_wide_calib : s->scene.view_from_calib;
CameraWidget::updateCalibration(calib);
} else {
CameraWidget::updateCalibration(DEFAULT_CALIBRATION);
}
- CameraWidget::setFrameId(model.getFrameId());
+
+ // Draw the frame based on the UI plan's frame ID
+ CameraWidget::setFrameId(sm["uiPlan"].getUiPlan().getFrameId());
CameraWidget::paintGL();
}
@@ -403,6 +390,7 @@ void AnnotatedCameraWidget::paintGL() {
painter.setPen(Qt::NoPen);
if (s->scene.world_objects_visible) {
+ const cereal::ModelDataV2::Reader &model = sm["modelV2"].getModelV2();
update_model(s, model, sm["uiPlan"].getUiPlan());
drawLaneLines(painter, s);
diff --git a/selfdrive/ui/qt/onroad/annotated_camera.h b/selfdrive/ui/qt/onroad/annotated_camera.h
index 0be4adfffa68dd..b25846001e2c66 100644
--- a/selfdrive/ui/qt/onroad/annotated_camera.h
+++ b/selfdrive/ui/qt/onroad/annotated_camera.h
@@ -37,7 +37,6 @@ class AnnotatedCameraWidget : public CameraWidget {
int status = STATUS_DISENGAGED;
std::unique_ptr pm;
- int skip_frame_count = 0;
bool wide_cam_requested = false;
protected:
diff --git a/selfdrive/ui/qt/onroad/onroad_home.cc b/selfdrive/ui/qt/onroad/onroad_home.cc
index 1c12a2c3a2e971..66eb1812e63a94 100644
--- a/selfdrive/ui/qt/onroad/onroad_home.cc
+++ b/selfdrive/ui/qt/onroad/onroad_home.cc
@@ -1,6 +1,7 @@
#include "selfdrive/ui/qt/onroad/onroad_home.h"
#include
+#include
#ifdef ENABLE_MAPS
#include "selfdrive/ui/qt/maps/map_helpers.h"
diff --git a/selfdrive/ui/qt/widgets/cameraview.cc b/selfdrive/ui/qt/widgets/cameraview.cc
index e4f13962683aa5..a7d4432de09671 100644
--- a/selfdrive/ui/qt/widgets/cameraview.cc
+++ b/selfdrive/ui/qt/widgets/cameraview.cc
@@ -97,37 +97,22 @@ mat4 get_fit_view_transform(float widget_aspect_ratio, float frame_aspect_ratio)
} // namespace
-CameraWidget::CameraWidget(std::string stream_name, VisionStreamType type, bool zoom, QWidget* parent) :
- stream_name(stream_name), active_stream_type(type), requested_stream_type(type), zoomed_view(zoom), QOpenGLWidget(parent) {
- setAttribute(Qt::WA_OpaquePaintEvent);
- qRegisterMetaType>("availableStreams");
- QObject::connect(this, &CameraWidget::vipcThreadConnected, this, &CameraWidget::vipcConnected, Qt::BlockingQueuedConnection);
- QObject::connect(this, &CameraWidget::vipcThreadFrameReceived, this, &CameraWidget::vipcFrameReceived, Qt::QueuedConnection);
- QObject::connect(this, &CameraWidget::vipcAvailableStreamsUpdated, this, &CameraWidget::availableStreamsUpdated, Qt::QueuedConnection);
+CameraWidget::CameraWidget(std::string stream_name, VisionStreamType type, bool zoom, QWidget *parent)
+ : stream_name(stream_name), stream_type(type), zoomed_view(zoom), QOpenGLWidget(parent) {
}
CameraWidget::~CameraWidget() {
makeCurrent();
- stopVipcThread();
if (isValid()) {
glDeleteVertexArrays(1, &frame_vao);
glDeleteBuffers(1, &frame_vbo);
glDeleteBuffers(1, &frame_ibo);
glDeleteBuffers(2, textures);
}
+ clearEGLImages();
doneCurrent();
}
-// Qt uses device-independent pixels, depending on platform this may be
-// different to what OpenGL uses
-int CameraWidget::glWidth() {
- return width() * devicePixelRatio();
-}
-
-int CameraWidget::glHeight() {
- return height() * devicePixelRatio();
-}
-
void CameraWidget::initializeGL() {
initializeOpenGLFunctions();
@@ -141,7 +126,7 @@ void CameraWidget::initializeGL() {
GLint frame_pos_loc = program->attributeLocation("aPosition");
GLint frame_texcoord_loc = program->attributeLocation("aTexCoord");
- auto [x1, x2, y1, y2] = requested_stream_type == VISION_STREAM_DRIVER ? std::tuple(0.f, 1.f, 1.f, 0.f) : std::tuple(1.f, 0.f, 1.f, 0.f);
+ auto [x1, x2, y1, y2] = stream_type == VISION_STREAM_DRIVER ? std::tuple(0.f, 1.f, 1.f, 0.f) : std::tuple(1.f, 0.f, 1.f, 0.f);
const uint8_t frame_indicies[] = {0, 1, 2, 0, 2, 3};
const float frame_coords[4][4] = {
{-1.0, -1.0, x2, y1}, // bl
@@ -178,45 +163,11 @@ void CameraWidget::initializeGL() {
#endif
}
-void CameraWidget::showEvent(QShowEvent *event) {
- if (!vipc_thread) {
- clearFrames();
- vipc_thread = new QThread();
- connect(vipc_thread, &QThread::started, [=]() { vipcThread(); });
- connect(vipc_thread, &QThread::finished, vipc_thread, &QObject::deleteLater);
- vipc_thread->start();
- }
-}
-
-void CameraWidget::stopVipcThread() {
- makeCurrent();
- if (vipc_thread) {
- vipc_thread->requestInterruption();
- vipc_thread->quit();
- vipc_thread->wait();
- vipc_thread = nullptr;
- }
-
-#ifdef QCOM2
- EGLDisplay egl_display = eglGetCurrentDisplay();
- assert(egl_display != EGL_NO_DISPLAY);
- for (auto &pair : egl_images) {
- eglDestroyImageKHR(egl_display, pair.second);
- assert(eglGetError() == EGL_SUCCESS);
- }
- egl_images.clear();
-#endif
-}
-
-void CameraWidget::availableStreamsUpdated(std::set streams) {
- available_streams = streams;
-}
-
void CameraWidget::updateFrameMat() {
int w = glWidth(), h = glHeight();
if (zoomed_view) {
- if (active_stream_type == VISION_STREAM_DRIVER) {
+ if (streamType() == VISION_STREAM_DRIVER) {
if (stream_width > 0 && stream_height > 0) {
frame_mat = get_driver_view_transform(w, h, stream_width, stream_height);
}
@@ -225,7 +176,7 @@ void CameraWidget::updateFrameMat() {
// to ensure this ends up in the middle of the screen
// for narrow come and a little lower for wide cam.
// TODO: use proper perspective transform?
- if (active_stream_type == VISION_STREAM_WIDE_ROAD) {
+ if (streamType() == VISION_STREAM_WIDE_ROAD) {
intrinsic_matrix = ECAM_INTRINSIC_MATRIX;
zoom = 2.0;
} else {
@@ -247,13 +198,12 @@ void CameraWidget::updateFrameMat() {
float zx = zoom * 2 * intrinsic_matrix.v[2] / w;
float zy = zoom * 2 * intrinsic_matrix.v[5] / h;
- const mat4 frame_transform = {{
+ frame_mat = {{
zx, 0.0, 0.0, -x_offset / w * 2,
0.0, zy, 0.0, y_offset / h * 2,
0.0, 0.0, 1.0, 0.0,
0.0, 0.0, 0.0, 1.0,
}};
- frame_mat = frame_transform;
}
} else if (stream_width > 0 && stream_height > 0) {
// fit frame to widget size
@@ -271,25 +221,15 @@ void CameraWidget::paintGL() {
glClearColor(bg.redF(), bg.greenF(), bg.blueF(), bg.alphaF());
glClear(GL_STENCIL_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
- std::lock_guard lk(frame_lock);
- if (frames.empty()) return;
-
- int frame_idx = frames.size() - 1;
-
- // Always draw latest frame until sync logic is more stable
- // for (frame_idx = 0; frame_idx < frames.size() - 1; frame_idx++) {
- // if (frames[frame_idx].first == draw_frame_id) break;
- // }
+ VisionBuf *frame = receiveFrame(draw_frame_id);
+ if (!frame) return;
// Log duplicate/dropped frames
- if (frames[frame_idx].first == prev_frame_id) {
- qDebug() << "Drawing same frame twice" << frames[frame_idx].first;
- } else if (frames[frame_idx].first != prev_frame_id + 1) {
- qDebug() << "Skipped frame" << frames[frame_idx].first;
+ uint32_t frame_id = frame->get_frame_id();
+ if (prev_frame_id != INVALID_FRAME_ID && frame_id != prev_frame_id + 1) {
+ qDebug() << (frame_id == prev_frame_id ? "Drawing same frame twice" : "Skip frame") << frame_id;
}
- prev_frame_id = frames[frame_idx].first;
- VisionBuf *frame = frames[frame_idx].second;
- assert(frame != nullptr);
+ prev_frame_id = frame_id;
updateFrameMat();
@@ -329,20 +269,14 @@ void CameraWidget::paintGL() {
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
}
-void CameraWidget::vipcConnected(VisionIpcClient *vipc_client) {
- makeCurrent();
+void CameraWidget::vipcConnected() {
stream_width = vipc_client->buffers[0].width;
stream_height = vipc_client->buffers[0].height;
stream_stride = vipc_client->buffers[0].stride;
#ifdef QCOM2
+ clearEGLImages();
EGLDisplay egl_display = eglGetCurrentDisplay();
- assert(egl_display != EGL_NO_DISPLAY);
- for (auto &pair : egl_images) {
- eglDestroyImageKHR(egl_display, pair.second);
- }
- egl_images.clear();
-
for (int i = 0; i < vipc_client->num_buffers; i++) { // import buffers into OpenGL
int fd = dup(vipc_client->buffers[i].fd); // eglDestroyImageKHR will close, so duplicate
EGLint img_attrs[] = {
@@ -379,59 +313,78 @@ void CameraWidget::vipcConnected(VisionIpcClient *vipc_client) {
#endif
}
-void CameraWidget::vipcFrameReceived() {
- update();
-}
+VisionBuf *CameraWidget::receiveFrame(uint64_t request_frame_id) {
+ if (!ensureConnection()) {
+ return nullptr;
+ }
-void CameraWidget::vipcThread() {
- VisionStreamType cur_stream = requested_stream_type;
- std::unique_ptr vipc_client;
- VisionIpcBufExtra meta_main = {0};
-
- while (!QThread::currentThread()->isInterruptionRequested()) {
- if (!vipc_client || cur_stream != requested_stream_type) {
- clearFrames();
- qDebug().nospace() << "connecting to stream " << requested_stream_type << ", was connected to " << cur_stream;
- cur_stream = requested_stream_type;
- vipc_client.reset(new VisionIpcClient(stream_name, cur_stream, false));
+ // Receive frames and store them in recent_frames
+ while (auto buf = vipc_client->recv(nullptr, 0)) {
+ recent_frames.emplace_back(buf);
+ if (recent_frames.size() > FRAME_BUFFER_SIZE) {
+ recent_frames.pop_front();
}
- active_stream_type = cur_stream;
-
- if (!vipc_client->connected) {
- clearFrames();
- auto streams = VisionIpcClient::getAvailableStreams(stream_name, false);
- if (streams.empty()) {
- QThread::msleep(100);
- continue;
- }
- emit vipcAvailableStreamsUpdated(streams);
+ }
+ if (!vipc_client->connected || recent_frames.empty()) {
+ return nullptr;
+ }
- if (!vipc_client->connect(false)) {
- QThread::msleep(100);
- continue;
- }
- emit vipcThreadConnected(vipc_client.get());
- }
+ // Find the requested frame
+ auto it = std::find_if(recent_frames.rbegin(), recent_frames.rend(),
+ [request_frame_id](VisionBuf *buf) { return buf->get_frame_id() == request_frame_id; });
+ return it != recent_frames.rend() ? *it : recent_frames.back();
+}
- if (VisionBuf *buf = vipc_client->recv(&meta_main, 1000)) {
- {
- std::lock_guard lk(frame_lock);
- frames.push_back(std::make_pair(meta_main.frame_id, buf));
- while (frames.size() > FRAME_BUFFER_SIZE) {
- frames.pop_front();
- }
- }
- emit vipcThreadFrameReceived();
- } else {
- if (!isVisible()) {
- vipc_client->connected = false;
- }
+bool CameraWidget::ensureConnection() {
+ // Reconnect if the client is not initialized or the stream type has changed
+ if (!vipc_client || vipc_client->type != stream_type) {
+ qDebug() << "connecting to stream" << stream_type;
+ vipc_client.reset(new VisionIpcClient(stream_name, stream_type, false));
+ }
+
+ // Re-establish connection if not connected
+ if (!vipc_client->connected) {
+ clearFrames();
+ available_streams = VisionIpcClient::getAvailableStreams(stream_name, false);
+ if (available_streams.empty() || !vipc_client->connect(false)) {
+ return false;
}
+ emit vipcAvailableStreamsUpdated();
+ vipcConnected();
}
+ return true;
}
void CameraWidget::clearFrames() {
- std::lock_guard lk(frame_lock);
- frames.clear();
- available_streams.clear();
+ recent_frames.clear();
+ draw_frame_id = INVALID_FRAME_ID;
+ prev_frame_id = INVALID_FRAME_ID;
+}
+
+void CameraWidget::clearEGLImages() {
+#ifdef QCOM2
+ EGLDisplay egl_display = eglGetCurrentDisplay();
+ assert(egl_display != EGL_NO_DISPLAY);
+ for (auto &pair : egl_images) {
+ eglDestroyImageKHR(egl_display, pair.second);
+ }
+ egl_images.clear();
+#endif
+}
+
+// Cameraview
+
+CameraView::CameraView(const std::string &name, VisionStreamType stream_type, bool zoom, QWidget *parent)
+ : CameraWidget(name, stream_type, zoom, parent) {
+ timer = new QTimer(this);
+ timer->setInterval(1000.0 / UI_FREQ);
+ timer->callOnTimeout(this, [this]() { update(); });
+}
+
+void CameraView::showEvent(QShowEvent *event) {
+ timer->start();
+}
+
+void CameraView::hideEvent(QHideEvent *event) {
+ timer->stop();
}
diff --git a/selfdrive/ui/qt/widgets/cameraview.h b/selfdrive/ui/qt/widgets/cameraview.h
index c97038cf436e6d..fb31d094bbe645 100644
--- a/selfdrive/ui/qt/widgets/cameraview.h
+++ b/selfdrive/ui/qt/widgets/cameraview.h
@@ -3,7 +3,6 @@
#include
#include