diff --git a/hefestus/hefestus_perception/CMakeLists.txt b/hefestus/hefestus_perception/CMakeLists.txt
index 5a14b9c..0e898ed 100644
--- a/hefestus/hefestus_perception/CMakeLists.txt
+++ b/hefestus/hefestus_perception/CMakeLists.txt
@@ -10,6 +10,7 @@ project(hefestus_perception)
find_package(catkin REQUIRED COMPONENTS
rospy
std_msgs
+ message_generation
)
## System dependencies are found with CMake's conventions
@@ -46,11 +47,10 @@ find_package(catkin REQUIRED COMPONENTS
## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...)
## Generate messages in the 'msg' folder
-# add_message_files(
-# FILES
-# Message1.msg
-# Message2.msg
-# )
+add_message_files(
+ FILES
+ MiddleLine.msg
+)
## Generate services in the 'srv' folder
# add_service_files(
@@ -102,6 +102,7 @@ find_package(catkin REQUIRED COMPONENTS
## CATKIN_DEPENDS: catkin_packages dependent projects also need
## DEPENDS: system dependencies of this project that dependent projects also need
catkin_package(
+ CATKIN_DEPENDS message_runtime
# INCLUDE_DIRS include
# LIBRARIES hefestus_perception
# CATKIN_DEPENDS rospy stdmsgs
diff --git a/hefestus/hefestus_perception/msg/MiddleLine.msg b/hefestus/hefestus_perception/msg/MiddleLine.msg
new file mode 100644
index 0000000..9975dea
--- /dev/null
+++ b/hefestus/hefestus_perception/msg/MiddleLine.msg
@@ -0,0 +1,7 @@
+Header header
+int offset_px_bottom
+int offset_px_top
+float32 offset_pc_bottom
+float32 offset_pc_top
+float32 slope_bottom
+float32 slope_top
\ No newline at end of file
diff --git a/hefestus/hefestus_perception/package.xml b/hefestus/hefestus_perception/package.xml
index 343ffd4..e9a2395 100644
--- a/hefestus/hefestus_perception/package.xml
+++ b/hefestus/hefestus_perception/package.xml
@@ -37,13 +37,13 @@
-
+ message_generation
-
+ message_runtime
diff --git a/hefestus/hefestus_perception/src/lane_detection.py b/hefestus/hefestus_perception/src/lane_detection.py
index 4b3f8a8..7f25899 100755
--- a/hefestus/hefestus_perception/src/lane_detection.py
+++ b/hefestus/hefestus_perception/src/lane_detection.py
@@ -7,6 +7,7 @@
import rospy
from std_msgs.msg import String
from sensor_msgs.msg import Image
+import hefestus_perception
def laneDetection_node():
rospy.init_node('laneDetection', anonymous=False)
@@ -20,7 +21,14 @@ def laneDetection_node():
def callback(data):
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(data,"bgr8")
- testimage=frame_processor(cv_image)
+ middle_line_pos1, middle_line_pos2 = frame_processor(cv_image)
+ offset_px1 = int(middle_line_pos1[0][0]-cv_image.shape[1]/2)
+ offset_px2 = int(middle_line_pos2[0][0]-cv_image.shape[1]/2)
+ offset_pc1 = abs(offset_px1/cv_image.shape[1])
+ offset_pc2 = abs(offset_px2/cv_image.shape[1])
+ slope1 = (middle_line_pos1[0][1]-middle_line_pos1[1][1])/(middle_line_pos1[0][0]-middle_line_pos1[1][0])
+ slope2 = (middle_line_pos2[0][1]-middle_line_pos2[1][1])/(middle_line_pos2[0][0]-middle_line_pos2[1][0])
+ # msg_to_send =
def frame_processor(image):
"""
@@ -46,13 +54,15 @@ def frame_processor(image):
# since we are getting too many edges from our image, we apply
# a mask polygon to only focus on the road
# Will explain Region selection in detail in further steps
- region = region_selection(edges)
+ region1, region2 = region_selection(edges)
# Applying hough transform to get straight lines from our image
# and find the lane lines
- hough = hough_transform(region)
+ hough1 = hough_transform(region1)
+ hough2 = hough_transform(region2)
#lastly we draw the lines on our resulting frame and return it as output
- result = draw_lane_lines(image, lane_lines(image, hough))
- return result
+ all_lines1 = lane_lines(image, hough1)
+ all_lines2 = lane_lines(image, hough2)
+ return all_lines1[2], all_lines2[2] #only the middle one
def region_selection(image):
"""
@@ -62,7 +72,8 @@ def region_selection(image):
identified edges in the frame
"""
# create an array of the same size as of the input image
- mask = np.zeros_like(image)
+ mask_low = np.zeros_like(image)
+ mask_high = np.zeros_like(image)
# if you pass an image with more then one channel
if len(image.shape) > 2:
channel_count = image.shape[2]
@@ -75,15 +86,20 @@ def region_selection(image):
# we have created this polygon in accordance to how the camera was placed
rows, cols = image.shape[:2]
bottom_left = [cols * 0.1, rows * 0.95]
- top_left = [cols * 0.4, rows * 0.6]
+ middle_left = [cols * 0.25, rows * 0.80]
+ top_left = [cols * 0.4, rows * 0.6]
+ middle_right = [cols * 0.75, rows * 0.80]
bottom_right = [cols * 0.9, rows * 0.95]
top_right = [cols * 0.6, rows * 0.6]
- vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
+ vertices_low = np.array([[bottom_left, middle_left, middle_right, bottom_right]], dtype=np.int32)
+ vertices_high = np.array([[middle_left, top_left, top_right, middle_right]], dtype=np.int32)
# filling the polygon with white color and generating the final mask
- cv2.fillPoly(mask, vertices, ignore_mask_color)
+ cv2.fillPoly(mask_low, vertices_low, ignore_mask_color)
+ cv2.fillPoly(mask_high, vertices_high, ignore_mask_color)
# performing Bitwise AND on the input image and mask to get only the edges on the road
- masked_image = cv2.bitwise_and(image, mask)
- return masked_image
+ masked_image1 = cv2.bitwise_and(image, mask_low)
+ masked_image2 = cv2.bitwise_and(image, mask_high)
+ return masked_image1, masked_image2
def hough_transform(image):
"""
@@ -151,7 +167,8 @@ def lane_lines(image, lines):
y2 = y1 * 0.6
left_line = pixel_points(y1, y2, left_lane)
right_line = pixel_points(y1, y2, right_lane)
- return left_line, right_line
+ middle_line = ((int((left_line[0][0]+right_line[0][0])/2),int(y1)),(int((left_line[1][0]+right_line[1][0])/2),int(y2)))
+ return left_line, right_line, middle_line
def pixel_points(y1, y2, line):
"""
@@ -170,22 +187,6 @@ def pixel_points(y1, y2, line):
y2 = int(y2)
return ((x1, y1), (x2, y2))
-
-def draw_lane_lines(image, lines, color=[255, 0, 0], thickness=12):
- """
- Draw lines onto the input image.
- Parameters:
- image: The input test image (video frame in our case).
- lines: The output lines from Hough Transform.
- color (Default = red): Line color.
- thickness (Default = 12): Line thickness.
- """
- line_image = np.zeros_like(image)
- for line in lines:
- if line is not None:
- cv2.line(line_image, *line, color, thickness)
- return cv2.addWeighted(image, 1.0, line_image, 1.0, 0.0)
-
if __name__ == "__main__":
try:
laneDetection_node()
diff --git a/hefestus/hefestus_perception/src/lane_detection_visual.py b/hefestus/hefestus_perception/src/lane_detection_visual.py
index 1db83c7..f69410f 100755
--- a/hefestus/hefestus_perception/src/lane_detection_visual.py
+++ b/hefestus/hefestus_perception/src/lane_detection_visual.py
@@ -20,10 +20,14 @@ def laneDetection_node():
def callback(data):
bridge = CvBridge()
cv_image = bridge.imgmsg_to_cv2(data,"bgr8")
- testimage=frame_processor(cv_image)
- msgimg=bridge.cv2_to_imgmsg(testimage,"bgr8")
+ testimage1, testimage2 =frame_processor(cv_image)
+ msgimg1=bridge.cv2_to_imgmsg(testimage1,"bgr8")
pubState = rospy.Publisher('LaneImage',Image,queue_size=10)
- pubState.publish(msgimg)
+ pubState.publish(msgimg1)
+ msgimg2=bridge.cv2_to_imgmsg(testimage2,"bgr8")
+ pubState = rospy.Publisher('LaneImage2',Image,queue_size=10)
+ pubState.publish(msgimg2)
+
def frame_processor(image):
"""
@@ -49,13 +53,15 @@ def frame_processor(image):
# since we are getting too many edges from our image, we apply
# a mask polygon to only focus on the road
# Will explain Region selection in detail in further steps
- region = region_selection(edges)
+ region1, region2 = region_selection(edges)
# Applying hough transform to get straight lines from our image
# and find the lane lines
- hough = hough_transform(region)
+ hough1 = hough_transform(region1)
+ hough2 = hough_transform(region2)
#lastly we draw the lines on our resulting frame and return it as output
- result = draw_lane_lines(image, lane_lines(image, hough))
- return result
+ result1 = draw_lane_lines(image, lane_lines1(image, hough1))
+ result2 = draw_lane_lines(image, lane_lines2(image, hough2))
+ return result1, result2
def region_selection(image):
"""
@@ -65,7 +71,8 @@ def region_selection(image):
identified edges in the frame
"""
# create an array of the same size as of the input image
- mask = np.zeros_like(image)
+ mask_low = np.zeros_like(image)
+ mask_high = np.zeros_like(image)
# if you pass an image with more then one channel
if len(image.shape) > 2:
channel_count = image.shape[2]
@@ -78,15 +85,20 @@ def region_selection(image):
# we have created this polygon in accordance to how the camera was placed
rows, cols = image.shape[:2]
bottom_left = [cols * 0.1, rows * 0.95]
+ middle_left = [cols * 0.25, rows * 0.80]
top_left = [cols * 0.4, rows * 0.6]
bottom_right = [cols * 0.9, rows * 0.95]
+ middle_right = [cols * 0.75, rows * 0.80]
top_right = [cols * 0.6, rows * 0.6]
- vertices = np.array([[bottom_left, top_left, top_right, bottom_right]], dtype=np.int32)
+ vertices_low = np.array([[bottom_left, middle_left, middle_right, bottom_right]], dtype=np.int32)
+ vertices_high = np.array([[middle_left, top_left, top_right, middle_right]], dtype=np.int32)
# filling the polygon with white color and generating the final mask
- cv2.fillPoly(mask, vertices, ignore_mask_color)
+ cv2.fillPoly(mask_low, vertices_low, ignore_mask_color)
+ cv2.fillPoly(mask_high, vertices_high, ignore_mask_color)
# performing Bitwise AND on the input image and mask to get only the edges on the road
- masked_image = cv2.bitwise_and(image, mask)
- return masked_image
+ masked_image_1 = cv2.bitwise_and(image, mask_low)
+ masked_image_2 = cv2.bitwise_and(image, mask_high)
+ return masked_image_1, masked_image_2
def hough_transform(image):
"""
@@ -142,7 +154,7 @@ def average_slope_intercept(lines):
right_lane = np.dot(right_weights, right_lines) / np.sum(right_weights) if len(right_weights) > 0 else None
return left_lane, right_lane
-def lane_lines(image, lines):
+def lane_lines1(image, lines):
"""
Create full lenght lines from pixel points.
Parameters:
@@ -151,10 +163,27 @@ def lane_lines(image, lines):
"""
left_lane, right_lane = average_slope_intercept(lines)
y1 = image.shape[0]
- y2 = y1 * 0.6
+ y2 = y1 * 0.8
+ left_line = pixel_points(y1, y2, left_lane)
+ right_line = pixel_points(y1, y2, right_lane)
+ middle_line = ((int((left_line[0][0]+right_line[0][0])/2),int(y1)),(int((left_line[1][0]+right_line[1][0])/2),int(y2)))
+ return left_line, right_line, middle_line
+
+def lane_lines2(image, lines):
+ """
+ Create full lenght lines from pixel points.
+ Parameters:
+ image: The input test image.
+ lines: The output lines from Hough Transform.
+ """
+ left_lane, right_lane = average_slope_intercept(lines)
+ yt = image.shape[0]
+ y1 = yt * 0.8
+ y2 = yt * 0.6
left_line = pixel_points(y1, y2, left_lane)
right_line = pixel_points(y1, y2, right_lane)
- return left_line, right_line
+ middle_line = ((int((left_line[0][0]+right_line[0][0])/2),int(y1)),(int((left_line[1][0]+right_line[1][0])/2),int(y2)))
+ return left_line, right_line, middle_line
def pixel_points(y1, y2, line):
"""