Coding-python-深度相机相关

显示RBG和深度图

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
import pyrealsense2 as rs
import numpy as np
import cv2

if __name__ == "__main__":
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
# Start streaming
pipeline.start(config)
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue
# Convert images to numpy arrays

depth_image = np.asanyarray(depth_frame.get_data())

color_image = np.asanyarray(color_frame.get_data())

# Apply colormap on depth image (image must be converted to 8-bit per pixel first)
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)
# Stack both images horizontally
images = np.hstack((color_image, depth_colormap))
# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)
key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break
finally:
# Stop streaming
pipeline.stop()

获取指定点的深度信息

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import pyrealsense2 as rs
import numpy as np
import cv2

def get_depth_at_point(depth_frame, point):
# 获取指定点的深度值
depth_value = depth_frame.get_distance(point[0], point[1])
return depth_value

if __name__ == "__main__":
# Configure depth and color streams
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)

# Start streaming
pipeline.start(config)

# 定义点的坐标 (x, y)
point = (320, 240) # 默认设置为图像中心
try:
while True:
# Wait for a coherent pair of frames: depth and color
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue

# Convert images to numpy arrays
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())

# 获取指定点的深度信息
depth_value = get_depth_at_point(depth_frame, point)
print(f"Depth at point {point}: {depth_value:.2f} meters")

# 在指定点绘制绿色圆圈
cv2.circle(color_image, point, 5, (0, 255, 0), -1)

# Apply colormap on depth image
depth_colormap = cv2.applyColorMap(cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

# Stack both images horizontally
images = np.hstack((color_image, depth_colormap))

# Show images
cv2.namedWindow('RealSense', cv2.WINDOW_AUTOSIZE)
cv2.imshow('RealSense', images)

key = cv2.waitKey(1)
# Press esc or 'q' to close the image window
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break

# 可以在这里添加代码,通过键盘输入更新点的位置
# 如果需要动态输入,可以使用 cv2.setMouseCallback() 来获取鼠标点击的位置

finally:
# Stop streaming
pipeline.stop()

鼠标点击图像,获取像素坐标及深度信息

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import pyrealsense2 as rs
import numpy as np
import cv2




def get_depth_at_point(depth_frame, point):
"""获取指定点的深度值。"""
depth_value = depth_frame.get_distance(point[0], point[1])
return depth_value

def main():
# 初始化摄像头
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline.start(config)



try:
while True:
# 等待深度和颜色帧
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue

# 转换为numpy数组
depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())

# 显示图像
cv2.imshow('Color Image', color_image)

# 鼠标点击获取坐标
def get_point(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
depth_value = get_depth_at_point(depth_frame, (x, y))
print(f"Depth at ({x}, {y}): {depth_value:.2f} meters") #这里


cv2.setMouseCallback('Color Image', get_point)

key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break

finally:

pipeline.stop()

if __name__ == "__main__":
main()

计算手眼标定矩阵,鼠标点击图像,将像素坐标转换为机械臂坐标,进一步发送坐标给机械臂

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import pyrealsense2 as rs
import numpy as np
import cv2
import serial
import time

class RobotArmController:
def __init__(self, port, baudrate=9600):
self.ser = serial.Serial(port, baudrate)
time.sleep(2)

def send_command(self, command):
self.ser.write(f"{command}".encode())
print(f"发送: {command}")

def move_to(self, x, y, z):
command = f"G0 {x} {y} {z}"
self.send_command(command)

def close(self):
self.ser.close()

def get_depth_at_point(depth_frame, point):
return depth_frame.get_distance(point[0], point[1])

def main():
# 初始化摄像头
pipeline = rs.pipeline()
config = rs.config()
config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30)
config.enable_stream(rs.stream.color, 640, 480, rs.format.bgr8, 30)
pipeline.start(config)

# 初始化机械臂
controller = RobotArmController(port='COM5')

# 手眼标定点的存储
camera_points = []
arm_points = []

try:
while True:
frames = pipeline.wait_for_frames()
depth_frame = frames.get_depth_frame()
color_frame = frames.get_color_frame()
if not depth_frame or not color_frame:
continue

depth_image = np.asanyarray(depth_frame.get_data())
color_image = np.asanyarray(color_frame.get_data())
cv2.imshow('Color Image', color_image)

# 鼠标点击获取坐标
def get_point(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
depth_value = get_depth_at_point(depth_frame, (x, y))
print(f"Depth at ({x}, {y}): {depth_value:.2f} meters")
camera_points.append([x, y, depth_value])

# 移动机械臂到指定坐标(假设在Z轴上)
controller.move_to(x, y, depth_value)
arm_points.append([x, y, depth_value]) # 假设机械臂坐标与深度值一致

cv2.setMouseCallback('Color Image', get_point)

key = cv2.waitKey(1)
if key & 0xFF == ord('q') or key == 27:
cv2.destroyAllWindows()
break

# 计算手眼标定矩阵
if len(camera_points) > 0 and len(arm_points) > 0:
camera_points = np.array(camera_points)
arm_points = np.array(arm_points)

# 计算最小二乘法拟合转换矩阵
A = np.hstack((camera_points, np.ones((camera_points.shape[0], 1))))
B = arm_points
# 求解AX = B
X, _, _, _ = np.linalg.lstsq(A, B, rcond=None)

print("Hand-eye calibration matrix:")
print(X)

finally:
controller.close()
pipeline.stop()

if __name__ == "__main__":
main()