紧急通知:禁止一切关于政治&VPN翻墙等话题,发现相关帖子会立马删除封号
小鱼 ROS 2 新书上线!点击链接查看, 新书配套视频点击链接查看。
提问前必看的发帖注意事项—— 提问前必看!不符合要求的问题拒绝回答!!
社区使用指南—如何添加标签修改密码
小鱼 ROS 2 新书上线!点击链接查看, 新书配套视频点击链接查看。
提问前必看的发帖注意事项—— 提问前必看!不符合要求的问题拒绝回答!!
社区使用指南—如何添加标签修改密码
坐标转换有问题
-
是在Ubuntu18中进行的,我用的是gazebo中带的相机,并不是深度相机,因此这个深度我是自己定义的,在仿真中,定义的物体的位置为(-0.6,0.1,0.3165,0,0,0)输出的为 ('anode pose is: ', [-0.50000007943038327, -0.17674752279857978, 1.059866627149683])
并且机械臂的末端会朝上,我想要的是末端朝下,朝着物体
相机的位置为<origin xyz="-0.9 -0.0 1.0" rpy="0 ${M_PI/2} 0"/>
请问哪里出了问题呢?
以下是代码#coding=utf-8 import cv2 import rospy import tf from cv_bridge import CvBridge from sensor_msgs.msg import Image import numpy as np import sys import moveit_commander from geometry_msgs.msg import PoseStamped from std_srvs.srv import Empty from yolov8_ros_msgs.msg import BoundingBox, BoundingBoxes import random class graspDemo: def __init__(self): # 机械臂规划参考系 self.reference_frame = 'world' # 初始化moveit控制器 moveit_commander.roscpp_initializer.roscpp_initialize(sys.argv) self.robot = moveit_commander.robot.RobotCommander() # 初始化manipulator group self.arm = moveit_commander.move_group.MoveGroupCommander("manipulator_e5") self.end_effector_link = self.arm.get_end_effector_link() # 设置容忍误差 self.arm.set_goal_position_tolerance(0.001) self.arm.set_goal_orientation_tolerance(0.05) self.arm.allow_replanning(True) self.arm.set_pose_reference_frame(self.reference_frame) def move(self, pose, orientation): target_pose = PoseStamped() # 设置消息头部的frame_id,即这个姿态数据参考的坐标帧 target_pose.header.frame_id = self.reference_frame target_pose.header.stamp = rospy.Time.now() target_pose.pose.position.x = pose[0] target_pose.pose.position.y = pose[1] target_pose.pose.position.z = pose[2] target_pose.pose.orientation.x = orientation[0] target_pose.pose.orientation.y = orientation[1] target_pose.pose.orientation.z = orientation[2] target_pose.pose.orientation.w = orientation[3] # 设置当前坐标为起始坐标 self.arm.set_start_state_to_current_state() # 设置位置目标 self.arm.set_pose_target(target_pose, self.end_effector_link) self.arm.go(wait=True) # 回到home姿态 def go_named(self, place = 'home'): # 移动至命名的pose,允许用户指定一个预定义的姿态或位置目标 self.arm.set_named_target(place) self.arm.go(wait=True) @property # 返回当前坐标 def poseNow(self): return self.arm.get_current_pose(self.end_effector_link) # 当前位置的基础上进行相对移动 def move_by_posNow(self, dx, dy, dz): # 在当前坐标的基础上设置目标 self.arm.set_start_state_to_current_state() pos = self.poseNow.pose pos.position.x -= dx pos.position.y -= dy pos.position.z -= dz self.arm.set_pose_target(pos, self.end_effector_link) self.arm.go(wait=True) # 指定的关节角度来控制机械臂的移动 def move_by_joints(self,joints): # 设置关节坐标 self.arm.set_joint_value_target(joints) state = self.arm.go(wait=True) return state # 用于控制机械臂通过笛卡尔路径规划(即直线移动)到达指定的三维空间位置 (x, y, z) def move_cartesian(self, x, y, z): # 笛卡尔规划 waypoints = [] pos = self.poseNow.pose pos.position.x = x pos.position.y = y pos.position.z = z waypoints.append(pos) fraction = 0.0 maxtries = 100 attempts = 0 self.arm.set_pose_reference_frame(self.reference_frame) self.arm.set_start_state_to_current_state() while fraction < 1.0 and attempts < maxtries: (plan, fraction) = self.arm.compute_cartesian_path ( waypoints, # waypoint poses,路点列表 0.1, # eef_step,终端步进值 0.0, # jump_threshold,跳跃阈值 True) # avoid_collisions,避障规划 attempts += 1 if fraction == 1.0: self.arm.execute(plan) return True else: pass return False def move_cartesian_byNow(self, x, y, z): # 笛卡尔规划(从当前坐标设置目标) waypoints = [] pos = self.poseNow.pose # waypoints.append(pos) pos.position.x -= x pos.position.y -= y pos.position.z -= z # pos.orientation.x = preState['down'][0] # pos.orientation.y = preState['down'][1] # pos.orientation.z = preState['down'][2] # pos.orientation.w = preState['down'][3] waypoints.append(pos) fraction = 0.0 maxtries = 100 attempts = 0 self.arm.set_pose_reference_frame(self.reference_frame) self.arm.set_start_state_to_current_state() while fraction < 1.0 and attempts < maxtries: (plan, fraction) = self.arm.compute_cartesian_path ( waypoints, # waypoint poses,路点列表 0.1, # eef_step,终端步进值 0.0, # jump_threshold,跳跃阈值 True) # avoid_collisions,避障规划 attempts += 1 if fraction == 1.0: self.arm.execute(plan) return True else: pass return False def shutDowm(self): # moveit关闭 moveit_commander.roscpp_initializer.roscpp_shutdown() rospy.loginfo('grasp complete!!!') def get_rotation_matrix(q): # in TF, it is xyzw # xyzw方向转旋转矩阵 x = q[0] y = q[1] z = q[2] w = q[3] rot = [[1-2*y*y-2*z*z, 2*x*y+2*w*z, 2*x*z-2*w*y], [2*x*y-2*w*z, 1-2*x*x-2*z*z, 2*y*z+2*w*x], [2*x*z+2*w*y, 2*y*z-2*w*x, 1-2*x*x-2*y*y]] return rot def get_CameraFrame_Pos(u, v, depthvalue): # 将像素平面的点(pixel_x, pixel_y)转换到相机坐标(u、v图像坐标,depthValue对应坐标的深度值) # fx fy cx cy为相机内参 fx = 420.93942150909646 fy = 420.93942150909646 cx = 320 cy = 240 z = float(depthvalue) cam_x = float((u - cx) * z) / fx cam_y = float((v - cy) * z) / fy cam_z = z return [cam_x, cam_y, cam_z, 1] def get_RT_matrix(base_frame, reference_frame): # 获取base_frame到reference_frame旋转平移矩阵,通过tf变换获取 listener = tf.TransformListener() i = 3 # 尝试3次,三次未获取到则获取失败 while i!=0: try: listener.waitForTransform(base_frame, reference_frame,rospy.Time.now(), rospy.Duration(3.0)) camera2World = listener.lookupTransform(base_frame, reference_frame, rospy.Time(0)) break except: pass i = i - 1 T = camera2World[0] R = get_rotation_matrix(camera2World[1]) # 将其转换为一个4x4的齐次变换矩阵 R[0].append(0) R[1].append(0) R[2].append(0) R.append([0.0,0.0,0.0,1.0]) R = np.mat(R) return [R,T] def coordinate_transform(cameraFrame_pos, R, T): # 相机系转世界坐标系,先旋转再平移,转置操作,np.mat 函数创建一个矩阵 worldFrame_pos = R.I * np.mat(cameraFrame_pos).T worldFrame_pos[0,0] = worldFrame_pos[0,0] + T[0] worldFrame_pos[1,0] = worldFrame_pos[1,0] + T[1] worldFrame_pos[2,0] = worldFrame_pos[2,0] + T[2] worldFrame_pos = [worldFrame_pos[0,0], worldFrame_pos[1,0], worldFrame_pos[2,0]] return worldFrame_pos findObject = False image = np.array(0) imagehOK = False def BoundingBoxCallBack(data): # yolo检测的回调函数 global findObject, u, v, graspObject if not findObject: # 待抓取目标为空,则请求输入抓取目标 if graspObject == '': graspObject = raw_input('object detected, please input the object you want to grasp:(anode, cathode)\n') objectGrasp = [] for dat in data.bounding_boxes: # 遍历所有目标,种类与待抓取目标相同则保存目标中心位置 if graspObject == dat.Class: objectGrasp.append([dat.Class, (dat.xmin + dat.xmax)/2, (dat.ymin + dat.ymax)/2]) if objectGrasp != []: # 如果待抓取目标存在,则在目标列表随机选择一个返回 rospy.loginfo('{} found, begin grasp!!!'.format(graspObject)) _, u, v = random.choice(objectGrasp) findObject = True else: rospy.loginfo('The object you want to grasp is absent!!!') def imageCallback(data): # 深度图像回调函数 global image,imageOK imageOK = False image = CvBridge().imgmsg_to_cv2(data, data.encoding) imageOK = True # 末端向下对应的xyzw坐标 endDown = [0.0, 0.0, 0.0, 0.0] def grasp_test(grasp_demo): if __name__ == "__main__": # 图像和检测框对应topic名称 image_topic = '/camera/image_raw' BoundingBox_topic = '/yolov8/BoundingBoxes' # 初始化ros节点 rospy.init_node('grasp') # 实例化抓取控制类 grasp_demo = graspDemo() # 机械臂移动到该位置 grasp_demo.go_named('home') # 初始化图像系坐标及待抓取物体 u = 320 v = 240 graspObject = '' # 订阅图像和检测框对应topic,BoundingBoxes是消息类型,定义了消息的数据结构 rospy.Subscriber(BoundingBox_topic, BoundingBoxes, BoundingBoxCallBack, queue_size=1) rospy.Subscriber(image_topic, Image, imageCallback, queue_size=1) while not rospy.is_shutdown(): # 如果检测到待抓取物体且获取到了图 if findObject : # 获取相机系到世界系的旋转平移矩阵 [R, T] = get_RT_matrix('world','camera_link') print("[R, T]",[R, T]) u, v = int(u), int(v) # 防止检测的坐标越界 print("u,v",u,v) # 像素坐标转相机坐标 cameraFrame_pos = get_CameraFrame_Pos(u, v, 0.4) print(cameraFrame_pos) # 相机坐标转世界坐标 worldFrame_pos = coordinate_transform(cameraFrame_pos, R, T) print(graspObject + ' pose is: ', worldFrame_pos) # 移动到抓取物体的上方 grasp_demo.move([worldFrame_pos[0], worldFrame_pos[1], worldFrame_pos[2]],endDown) rospy.sleep(0.5) # wait for grasp # 向上移动仿真防止直接移动吸到其他物体 grasp_demo.move_cartesian_byNow(0, 0 , -0.1) # avoid grasp other object by mistake # 移动到放置位置 #grasp_demo.move([worldFrame_pos[0]+0.4, worldFrame_pos[1]-0.2, worldFrame_pos[2]-1.0],endDown) rospy.sleep(0.5) # 回到检测的位置 grasp_demo.go_named('home') # 重置参数 findObject = False graspObject = ''