diff --git a/docs/source/physical_robot_core_setup/index.rst b/docs/source/physical_robot_core_setup/index.rst index 533a0b640..012c40f91 100644 --- a/docs/source/physical_robot_core_setup/index.rst +++ b/docs/source/physical_robot_core_setup/index.rst @@ -39,6 +39,8 @@ On the RPi adjust the config in `/boot/config.txt` or on newer systems `/boot/fi ------------------ Setting up the RPi ------------------ +**Note**: For students in the CI Group, the RPi is already set up. All RPis are flashed with the same image, so the following steps are not necessary. Additionally, there should be an IP address on the head, allowing you to SSH into it. However, be aware that there will always be ongoing development changes in the revolve2-modular-robot_physical and revolve2-robohat packages, so make sure to pip install the latest version in your virtual environment. + This step is the same for all types of hardware. #. Flash the SD card with Raspberry Pi OS (previously Raspbian). Some Important notes: diff --git a/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py b/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py index b7ae25644..b63898ef8 100644 --- a/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py +++ b/examples/5_physical_modular_robots/5a_physical_robot_remote/main.py @@ -1,9 +1,12 @@ """An example on how to remote control a physical modular robot.""" +from pyrr import Vector3 + from revolve2.experimentation.rng import make_rng_time_seed from revolve2.modular_robot import ModularRobot from revolve2.modular_robot.body import RightAngles from revolve2.modular_robot.body.base import ActiveHinge +from revolve2.modular_robot.body.sensors import CameraSensor from revolve2.modular_robot.body.v2 import ActiveHingeV2, BodyV2, BrickV2 from revolve2.modular_robot.brain.cpg import BrainCpgNetworkNeighborRandom from revolve2.modular_robot_physical import Config, UUIDKey @@ -40,6 +43,10 @@ def make_body() -> ( body.core_v2.right_face.bottom, body.core_v2.right_face.bottom.attachment, ) + """Add a camera sensor to the core.""" + body.core.add_sensor( + CameraSensor(position=Vector3([0, 0, 0]), camera_size=(480, 640)) + ) return body, active_hinges @@ -100,6 +107,7 @@ def main() -> None: Create a Remote for the physical modular robot. Make sure to target the correct hardware type and fill in the correct IP and credentials. The debug flag is turned on. If the remote complains it cannot keep up, turning off debugging might improve performance. + If you want to display the camera view, set display_camera_view to True. """ print("Initializing robot..") run_remote( @@ -107,6 +115,7 @@ def main() -> None: hostname="localhost", # "Set the robot IP here. debug=True, on_prepared=on_prepared, + display_camera_view=False, ) """ Note that theoretically if you want the robot to be self controlled and not dependant on a external remote, you can run this script on the robot locally. diff --git a/modular_robot_physical/pyproject.toml b/modular_robot_physical/pyproject.toml index 6adc977a7..da3c1b8d5 100644 --- a/modular_robot_physical/pyproject.toml +++ b/modular_robot_physical/pyproject.toml @@ -32,8 +32,9 @@ pyrr = "^0.10.3" typed-argparse = "^0.3.1" pycapnp = { version = "^2.0.0b2" } pigpio = { version = "^1.78", optional = true } -revolve2-robohat = { version = "0.5.0", optional = true } +revolve2-robohat = { version = "0.6.2", optional = true } rpi-lgpio = { version = "0.5", optional = true } +opencv-python = "^4.10.0.84" # cpnp-stub-generator is disabled because it depends on pycapnp <2.0.0. # It is rarely used and by developers only, so we remove it for now and developers can install it manually. # If you manually install it make sure you also install the correct pycpanp version afterwards as it will be overridden. diff --git a/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py b/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py index 5f73f5923..7d34ddec7 100644 --- a/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py +++ b/modular_robot_physical/revolve2/modular_robot_physical/physical_interfaces/v2/_v2_physical_interface.py @@ -1,13 +1,13 @@ import math from typing import Sequence -import cv2 import numpy as np from numpy.typing import NDArray from pyrr import Vector3 from robohatlib.hal.assemblyboard.PwmPlug import PwmPlug from robohatlib.hal.assemblyboard.servo.ServoData import ServoData from robohatlib.hal.assemblyboard.ServoAssemblyConfig import ServoAssemblyConfig +from robohatlib.hal.Camera import Camera from robohatlib.Robohat import Robohat from .._physical_interface import PhysicalInterface @@ -84,6 +84,7 @@ def __init__(self, debug: bool, dry: bool) -> None: self._robohat.init(servoboard_1_datas_list, servoboard_2_datas_list) self._robohat.do_buzzer_beep() self._robohat.set_servo_direct_mode(_mode=True) + self.cam: Camera = self._robohat.get_camera() def set_servo_targets(self, pins: list[int], targets: list[float]) -> None: """ @@ -181,17 +182,7 @@ def get_camera_view(self) -> NDArray[np.uint8]: """ Get the current view from the camera. - :returns: A dummy image until robohatlib has camera support. - """ - image = np.zeros((3, 100, 100), dtype=int) - cv2.putText( - image, - "Dummy Image", - (10, 10), - fontFace=cv2.FONT_HERSHEY_SIMPLEX, - fontScale=1, - color=(255, 0, 0), - thickness=1, - lineType=2, - ) + :returns: An image captured from robohatlib. + """ + image = self.cam.get_capture_array().astype(np.uint8) return image diff --git a/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py b/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py index 6bbf0feca..085b16233 100644 --- a/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py +++ b/modular_robot_physical/revolve2/modular_robot_physical/remote/_remote.py @@ -3,6 +3,7 @@ from typing import Callable import capnp +import cv2 import numpy as np from numpy.typing import NDArray from pyrr import Vector3 @@ -50,6 +51,7 @@ async def _run_remote_impl( port: int, debug: bool, manual_mode: bool, + display_camera_view: bool, ) -> None: active_hinge_sensor_to_pin = { UUIDKey(key.value.sensors.active_hinge_sensor): pin @@ -207,6 +209,7 @@ async def _run_remote_impl( pin_controls = _active_hinge_targets_to_pin_controls( config, control_interface._set_active_hinges ) + match hardware_type: case HardwareType.v1: await service.control( @@ -242,6 +245,13 @@ async def _run_remote_impl( camera_sensor_states=camera_sensor_states, ) + # Display camera image + if display_camera_view: + _display_camera_view( + config.modular_robot.body.core.sensors.camera_sensor, + sensor_readings, + ) + if battery_print_timer > 5.0: print( f"Battery level is at {sensor_readings.battery * 100.0}%." @@ -265,11 +275,17 @@ def _capnp_to_camera_view( :param camera_size: The camera size to reconstruct the image. :return: The NDArray imag. """ - np_image = np.zeros(shape=(3, *camera_size), dtype=np.uint8) - np_image[0] = np.array(image.r).reshape(camera_size).astype(np.uint8) - np_image[1] = np.array(image.g).reshape(camera_size).astype(np.uint8) - np_image[2] = np.array(image.b).reshape(camera_size).astype(np.uint8) - return np_image + r_channel = np.array(image.r, dtype=np.uint8).reshape( + (camera_size[0], camera_size[1]) + ) + g_channel = np.array(image.g, dtype=np.uint8).reshape( + (camera_size[0], camera_size[1]) + ) + b_channel = np.array(image.b, dtype=np.uint8).reshape( + (camera_size[0], camera_size[1]) + ) + rgb_image = cv2.merge((r_channel, g_channel, b_channel)).astype(np.uint8) + return rgb_image def _get_imu_sensor_state( @@ -318,6 +334,26 @@ def _get_camera_sensor_state( } +def _display_camera_view( + camera_sensor: CameraSensor | None, + sensor_readings: robot_daemon_protocol_capnp.SensorReadings, +) -> None: + """ + Display a camera view from the camera readings. + + :param camera_sensor: The sensor in question. + :param sensor_readings: The sensor readings. + """ + if camera_sensor is None: + print("No camera sensor found.") + else: + rgb_image = _capnp_to_camera_view( + sensor_readings.cameraView, camera_sensor.camera_size + ) + cv2.imshow("Captured Image", rgb_image) + cv2.waitKey(1) + + def run_remote( config: Config, hostname: str, @@ -325,6 +361,7 @@ def run_remote( port: int = STANDARD_PORT, debug: bool = False, manual_mode: bool = False, + display_camera_view: bool = False, ) -> None: """ Control a robot remotely, running the controller on your local machine. @@ -335,6 +372,7 @@ def run_remote( :param port: Port the robot daemon uses. :param debug: Enable debug messages. :param manual_mode: Enable manual controls for the robot, ignoring the brain. + :param display_camera_view: Display the camera view of the robot. """ asyncio.run( capnp.run( @@ -345,6 +383,7 @@ def run_remote( port=port, debug=debug, manual_mode=manual_mode, + display_camera_view=display_camera_view, ) ) ) diff --git a/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py b/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py index 39e8ab6b8..30824cb2c 100644 --- a/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py +++ b/modular_robot_physical/revolve2/modular_robot_physical/robot_daemon/_robo_server_impl.py @@ -293,8 +293,9 @@ def _camera_view_to_capnp(image: NDArray[np.uint8]) -> capnpImage: :param image: The NDArray image. :return: The capnp Image object. """ + # Convert each channel to a list of Int32 for Cap'n Proto return robot_daemon_protocol_capnp.Image( - r=image[0].flatten().tolist(), - g=image[1].flatten().tolist(), - b=image[2].flatten().tolist(), + r=image[:, :, 0].astype(np.int32).flatten().tolist(), + g=image[:, :, 1].astype(np.int32).flatten().tolist(), + b=image[:, :, 2].astype(np.int32).flatten().tolist(), )