Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add camera support for v2 physical interfaces #580

Merged
merged 14 commits into from
Oct 8, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions docs/source/physical_robot_core_setup/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@ On the RPi adjust the config in `/boot/config.txt` or on newer systems `/boot/fi
------------------
Setting up the RPi
------------------
**Note**: For students in the CI Group, the RPi is already set up. All RPis are flashed with the same image, so the following steps are not necessary. Additionally, there should be an IP address on the head, allowing you to SSH into it. However, be aware that there will always be ongoing development changes in the revolve2-modular-robot_physical and revolve2-robohat packages, so make sure to pip install the latest version in your virtual environment.

This step is the same for all types of hardware.

#. Flash the SD card with Raspberry Pi OS (previously Raspbian). Some Important notes:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,6 +107,7 @@ def main() -> None:
hostname="localhost", # "Set the robot IP here.
debug=True,
on_prepared=on_prepared,
camera_mode=False,
)
"""
Note that theoretically if you want the robot to be self controlled and not dependant on a external remote, you can run this script on the robot locally.
Expand Down
3 changes: 2 additions & 1 deletion modular_robot_physical/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,9 @@ pyrr = "^0.10.3"
typed-argparse = "^0.3.1"
pycapnp = { version = "^2.0.0b2" }
pigpio = { version = "^1.78", optional = true }
revolve2-robohat = { version = "0.5.0", optional = true }
revolve2-robohat = { version = "0.6.1", optional = true }
rpi-lgpio = { version = "0.5", optional = true }
opencv-python = "^4.10.0.84"
# cpnp-stub-generator is disabled because it depends on pycapnp <2.0.0.
# It is rarely used and by developers only, so we remove it for now and developers can install it manually.
# If you manually install it make sure you also install the correct pycpanp version afterwards as it will be overridden.
Expand Down
tckarenchiang marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
import math
from typing import Sequence

import cv2
import numpy as np
from numpy.typing import NDArray
from pyrr import Vector3
from robohatlib.hal.assemblyboard.PwmPlug import PwmPlug
from robohatlib.hal.assemblyboard.servo.ServoData import ServoData
from robohatlib.hal.assemblyboard.ServoAssemblyConfig import ServoAssemblyConfig
from robohatlib.hal.Camera import Camera
from robohatlib.Robohat import Robohat

from .._physical_interface import PhysicalInterface
Expand Down Expand Up @@ -84,6 +84,7 @@ def __init__(self, debug: bool, dry: bool) -> None:
self._robohat.init(servoboard_1_datas_list, servoboard_2_datas_list)
self._robohat.do_buzzer_beep()
self._robohat.set_servo_direct_mode(_mode=True)
self.cam: Camera = self._robohat.get_camera()

def set_servo_targets(self, pins: list[int], targets: list[float]) -> None:
"""
Expand Down Expand Up @@ -181,17 +182,7 @@ def get_camera_view(self) -> NDArray[np.uint8]:
"""
Get the current view from the camera.

:returns: A dummy image until robohatlib has camera support.
"""
image = np.zeros((3, 100, 100), dtype=int)
cv2.putText(
image,
"Dummy Image",
(10, 10),
fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=1,
color=(255, 0, 0),
thickness=1,
lineType=2,
)
:returns: An image captured from robohatlib.
"""
image = self.cam.get_capture_array()
return image
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@
from typing import Callable

import capnp
import cv2
import numpy as np
from numpy.typing import NDArray
from pyrr import Vector3

from revolve2.modular_robot.body.base import ActiveHinge
from revolve2.modular_robot.body.sensors import CameraSensor, IMUSensor
from revolve2.modular_robot.sensor_state import ModularRobotSensorState
Expand All @@ -23,7 +23,6 @@
from ._modular_robot_sensor_state_impl_v1 import ModularRobotSensorStateImplV1
from ._modular_robot_sensor_state_impl_v2 import ModularRobotSensorStateImplV2


def _active_hinge_targets_to_pin_controls(
config: Config, active_hinges_and_targets: list[tuple[UUIDKey[ActiveHinge], float]]
) -> list[robot_daemon_protocol_capnp.PinControl]:
Expand All @@ -50,6 +49,7 @@ async def _run_remote_impl(
port: int,
debug: bool,
manual_mode: bool,
camera_mode: bool,
) -> None:
active_hinge_sensor_to_pin = {
UUIDKey(key.value.sensors.active_hinge_sensor): pin
Expand Down Expand Up @@ -207,6 +207,7 @@ async def _run_remote_impl(
pin_controls = _active_hinge_targets_to_pin_controls(
config, control_interface._set_active_hinges
)

match hardware_type:
case HardwareType.v1:
await service.control(
Expand Down Expand Up @@ -242,6 +243,11 @@ async def _run_remote_impl(
camera_sensor_states=camera_sensor_states,
)

# Display camera image
if camera_mode:
capnp_image = sensor_readings.cameraView
_display_camera_view(capnp_image)

if battery_print_timer > 5.0:
print(
f"Battery level is at {sensor_readings.battery * 100.0}%."
Expand Down Expand Up @@ -318,13 +324,31 @@ def _get_camera_sensor_state(
}


def _display_camera_view(
camera_view: robot_daemon_protocol_capnp.Image
) -> None:
"""
Convert Cap'n Proto image back to an ND array for OpenCV and display it
"""
height = 480
width = 640
r_channel = np.array(camera_view.r, dtype=np.uint8).reshape((height, width))
g_channel = np.array(camera_view.g, dtype=np.uint8).reshape((height, width))
b_channel = np.array(camera_view.b, dtype=np.uint8).reshape((height, width))
rgb_image = cv2.merge((r_channel, g_channel, b_channel))

cv2.imshow("Captured Image", rgb_image)
cv2.waitKey(1)


def run_remote(
config: Config,
hostname: str,
on_prepared: Callable[[], None] = lambda: None,
port: int = STANDARD_PORT,
debug: bool = False,
manual_mode: bool = False,
camera_mode: bool = False,
) -> None:
"""
Control a robot remotely, running the controller on your local machine.
Expand All @@ -335,6 +359,7 @@ def run_remote(
:param port: Port the robot daemon uses.
:param debug: Enable debug messages.
:param manual_mode: Enable manual controls for the robot, ignoring the brain.
:param camera_mode: Display the camera view of the robot.
"""
asyncio.run(
capnp.run(
Expand All @@ -345,6 +370,7 @@ def run_remote(
port=port,
debug=debug,
manual_mode=manual_mode,
camera_mode=camera_mode,
)
)
)
Original file line number Diff line number Diff line change
Expand Up @@ -293,8 +293,9 @@ def _camera_view_to_capnp(image: NDArray[np.uint8]) -> capnpImage:
:param image: The NDArray image.
:return: The capnp Image object.
"""
# Convert each channel to a list of Int32 for Cap'n Proto
return robot_daemon_protocol_capnp.Image(
r=image[0].flatten().tolist(),
g=image[1].flatten().tolist(),
b=image[2].flatten().tolist(),
r=image[:, :, 0].astype(np.int32).flatten().tolist(),
g=image[:, :, 1].astype(np.int32).flatten().tolist(),
b=image[:, :, 2].astype(np.int32).flatten().tolist(),
)
Loading