From e0da8959d1c70a908bcc4a7d7cb56ffbecc0f33e Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 18 Oct 2023 18:20:53 -0400 Subject: [PATCH 01/33] Implement Jacobian pseudo-inverse IK --- src/pytorch_kinematics/__init__.py | 1 + src/pytorch_kinematics/ik.py | 181 +++++++++++++++++++++++++++++ tests/test_inverse_kinematics.py | 98 ++++++++++++++++ 3 files changed, 280 insertions(+) create mode 100644 src/pytorch_kinematics/ik.py create mode 100644 tests/test_inverse_kinematics.py diff --git a/src/pytorch_kinematics/__init__.py b/src/pytorch_kinematics/__init__.py index 4581e03..2df06d6 100644 --- a/src/pytorch_kinematics/__init__.py +++ b/src/pytorch_kinematics/__init__.py @@ -3,3 +3,4 @@ from pytorch_kinematics.mjcf import * from pytorch_kinematics.transforms import * from pytorch_kinematics.chain import * +from pytorch_kinematics.ik import * \ No newline at end of file diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py new file mode 100644 index 0000000..9b65429 --- /dev/null +++ b/src/pytorch_kinematics/ik.py @@ -0,0 +1,181 @@ +from pytorch_kinematics.chain import SerialChain +from pytorch_kinematics.transforms import Transform3d +from pytorch_kinematics.transforms import rotation_conversions +from typing import NamedTuple, Union, List, Optional, Callable +import torch +from matplotlib import pyplot as plt, cm as cm + + +class IKSolution(NamedTuple): + # N is the total number of attempts + err_pos: torch.Tensor + err_rot: torch.Tensor + # N boolean values indicating whether the solution converged (a solution could be found) + converged_pos: torch.Tensor + converged_rot: torch.Tensor + # whether both position and rotation converged + converged: torch.Tensor + # N x DOF tensor of joint angles; if converged[i] is False, then solutions[i] is undefined + solutions: torch.Tensor + + +# helper config sampling method +def gaussian_around_config(config: torch.Tensor, std: float) -> Callable[[int], torch.Tensor]: + def config_sampling_method(num_configs): + return torch.randn(num_configs, config.shape[0], dtype=config.dtype, device=config.device) * std + config + + return config_sampling_method + + +class InverseKinematics: + """Jacobian follower based inverse kinematics solver""" + + def __init__(self, serial_chain: SerialChain, + pos_tolerance: float = 1e-3, rot_tolerance: float = 1e-2, + initial_configs: Optional[torch.Tensor] = None, num_retries: Optional[int] = None, + joint_limits: Optional[torch.Tensor] = None, + config_sampling_method: Union[str, Callable[[int], torch.Tensor]] = "uniform", + max_iterations: int = 100, lr: float = 0.5, + debug=True, + ): + """ + :param serial_chain: + :param pos_tolerance: + :param rot_tolerance: + :param initial_configs: + :param num_retries: + :param joint_limits: (DOF, 2) tensor of joint limits (min, max) for each joint in radians + :param config_sampling_method: + """ + self.chain = serial_chain + self.dtype = serial_chain.dtype + self.device = serial_chain.device + joint_names = self.chain.get_joint_parameter_names(exclude_fixed=True) + self.dof = len(joint_names) + self.debug = debug + + self.max_iterations = max_iterations + self.lr = lr + + self.pos_tolerance = pos_tolerance + self.rot_tolerance = rot_tolerance + self.initial_config = initial_configs + if initial_configs is None and num_retries is None: + raise ValueError("either initial_configs or num_retries must be specified") + + # sample initial configs instead + self.config_sampling_method = config_sampling_method + self.joint_limits = joint_limits + if initial_configs is None: + self.initial_config = self.sample_configs(num_retries) + else: + if initial_configs.shape[1] != self.dof: + raise ValueError("initial_configs must have shape (N, %d)" % self.dof) + + def sample_configs(self, num_configs: int) -> torch.Tensor: + if self.config_sampling_method == "uniform": + # bound by joint_limits + if self.joint_limits is None: + raise ValueError("joint_limits must be specified if config_sampling_method is uniform") + return torch.rand(num_configs, self.dof, device=self.device) * ( + self.joint_limits[:, 1] - self.joint_limits[:, 0]) + self.joint_limits[:, 0] + elif self.config_sampling_method == "gaussian": + return torch.randn(num_configs, self.dof, device=self.device) + elif callable(self.config_sampling_method): + return self.config_sampling_method(num_configs) + else: + raise ValueError("invalid config_sampling_method %s" % self.config_sampling_method) + + def solve(self, target_poses: Transform3d) -> IKSolution: + """ + :param target_poses: (N, 4, 4) tensor + :return: (N, DOF) tensor of joint angles + """ + raise NotImplementedError() + + +class PseudoInverseIK(InverseKinematics): + def solve(self, target_poses: Transform3d) -> IKSolution: + target = target_poses.get_matrix() + + target_pos = target[:, :3, 3] + # jacobian gives angular rotation about x,y,z axis of the base frame + # convert target rot to desired rotation about x,y,z + target_rot_rpy = rotation_conversions.matrix_to_euler_angles(target[:, :3, :3], "XYZ") + + q = self.initial_config + # for logging, let's keep track of the joint angles at each iteration + if self.debug: + qs = [q] + pos_errors = [] + rot_errors = [] + for i in range(self.max_iterations): + # TODO early termination when below tolerance + # compute forward kinematics + fk = self.chain.forward_kinematics(q) + # N x 6 x DOF + J = self.chain.jacobian(q) + + # compute pose difference + m = fk.get_matrix() + pos_diff = target_pos - m[:, :3, 3] + pos_diff = pos_diff.view(-1, 3, 1) + rot_diff = target_rot_rpy - rotation_conversions.matrix_to_euler_angles(m[:, :3, :3], "XYZ") + rot_diff = rot_diff.view(-1, 3, 1) + # pose_diff = target - fk.get_matrix() + J_pos = J[:, :3, :] + J_pos_pinv = torch.pinverse(J_pos) + J_rot = J[:, 3:, :] + J_rot_pinv = torch.pinverse(J_rot) + # compute joint angle difference + dq_pos = J_pos_pinv @ pos_diff + dq_rot = J_rot_pinv @ rot_diff + + dq = dq_pos.squeeze(2) + dq_rot.squeeze(2) + q = q + self.lr * dq + if self.debug: + qs.append(q) + pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) + rot_errors.append(rot_diff.norm(dim=(1, 2))) + + if self.debug: + # qs = torch.stack(qs, dim=0) + # fig, ax = plt.subplots(ncols=self.dof, figsize=(10, 5)) + # for dof in range(self.dof): + # # don't show everyone, skip some + # for b in range(0, qs.shape[1], 5): + # c = (b + 1) / qs.shape[1] + # ax[dof].plot(qs[:, b, dof], c=cm.GnBu(c)) + + # errors + fig, ax = plt.subplots(ncols=2, figsize=(10, 5)) + pos_e = torch.stack(pos_errors, dim=0).cpu() + rot_e = torch.stack(rot_errors, dim=0).cpu() + ax[0].set_ylim(0, 1.) + ax[1].set_ylim(0, rot_e.max().item() * 1.1) + ax[0].set_xlim(0, self.max_iterations - 1) + ax[1].set_xlim(0, self.max_iterations - 1) + # draw at most 50 lines + draw_max = min(50, pos_e.shape[1]) + for b in range(draw_max): + c = (b + 1) / draw_max + ax[0].plot(pos_e[:, b], c=cm.GnBu(c)) + ax[1].plot(rot_e[:, b], c=cm.GnBu(c)) + # label these axis + ax[0].set_ylabel("position error") + ax[1].set_xlabel("iteration") + ax[1].set_ylabel("rotation error") + plt.show() + + # check convergence + # fk = self.chain.forward_kinematics(q) + # pose_diff = target - fk.get_matrix() + err_pos = pos_diff.squeeze(2).norm(dim=1) + err_rot = rot_diff.squeeze(2).norm(dim=1) + + converged_pos = err_pos < self.pos_tolerance + converged_rot = err_rot < self.rot_tolerance + + return IKSolution(converged_pos=converged_pos, converged_rot=converged_rot, + err_pos=err_pos, err_rot=err_rot, + converged=converged_pos & converged_rot, solutions=q) diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py new file mode 100644 index 0000000..cdb0baa --- /dev/null +++ b/tests/test_inverse_kinematics.py @@ -0,0 +1,98 @@ +import math +import os +from timeit import default_timer as timer + +import torch + +import pytorch_kinematics as pk + +import pybullet as p +import pybullet_data + +TEST_DIR = os.path.dirname(__file__) + +visualize = True + + +def _make_robot_translucent(robot_id, alpha=0.4): + def make_transparent(link): + link_id = link[1] + rgba = list(link[7]) + rgba[3] = alpha + p.changeVisualShape(robot_id, link_id, rgbaColor=rgba) + + visual_data = p.getVisualShapeData(robot_id) + for link in visual_data: + make_transparent(link) + + +def test_jacobian_follower(): + device = "cuda" if torch.cuda.is_available() else "cpu" + # device = "cpu" + urdf = "kuka_iiwa/model.urdf" + search_path = pybullet_data.getDataPath() + full_urdf = os.path.join(search_path, urdf) + chain = pk.build_serial_chain_from_urdf(open(full_urdf).read(), "lbr_iiwa_link_7") + chain = chain.to(device=device) + + joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) + joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 + ik = pk.PseudoInverseIK(chain, max_iterations=50, num_retries=10000, joint_limits=joint_limits) + + # robot frame + pos = torch.tensor([0.0, 0.0, 0.0], device=device) + rot = torch.tensor([0.0, 0.0, 0.0], device=device) + rob_tf = pk.Transform3d(pos=pos, rot=rot, device=device) + + # world frame goal + goal_pos = torch.tensor([0.0, 0.5, 0.5], device=device) + goal_rot = torch.tensor([0.0, 0.0, 0.0], device=device) + goal_tf = pk.Transform3d(pos=goal_pos, rot=goal_rot, device=device) + + # transform to robot frame + goal_in_rob_frame_tf = rob_tf.inverse().compose(goal_tf) + + # do IK + timer_start = timer() + sol = ik.solve(goal_in_rob_frame_tf) + timer_end = timer() + print("IK took %f seconds" % (timer_end - timer_start)) + print("IK converged number: %d / %d" % (sol.converged.sum(), sol.converged.shape[0])) + + # visualize everything + if visualize: + p.connect(p.GUI) + p.setRealTimeSimulation(False) + p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) + p.setAdditionalSearchPath(search_path) + p.loadURDF("plane.urdf", [0, 0, 0], useFixedBase=True) + m = rob_tf.get_matrix() + pos = m[0, :3, 3] + rot = m[0, :3, :3] + quat = pk.matrix_to_quaternion(rot) + armId = p.loadURDF(urdf, basePosition=pos.cpu().numpy(), baseOrientation=pk.wxyz_to_xyzw(quat).cpu().numpy(), + useFixedBase=True) + _make_robot_translucent(armId, alpha=0.6) + # p.resetBasePositionAndOrientation(armId, [0, 0, 0], [0, 0, 0, 1]) + # draw goal + # place a translucent sphere at the goal + visId = p.createVisualShape(p.GEOM_SPHERE, radius=0.05, rgbaColor=[0., 1., 0., 0.5]) + goalId = p.createMultiBody(baseMass=0, baseVisualShapeIndex=visId, basePosition=goal_pos.cpu().numpy()) + + yaw = 90 + pitch = -55 + dist = 1. + target = goal_pos.cpu().numpy() + p.resetDebugVisualizerCamera(dist, yaw, pitch, target) + + for i, q in enumerate(sol.solutions): + input("Press enter to continue") + for dof in range(q.shape[0]): + p.resetJointState(armId, dof, q[dof]) + + while True: + p.stepSimulation() + + +if __name__ == "__main__": + test_jacobian_follower() From 2a8a6537e8af0336faa1fad88fec4fb3f14bb095 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 18 Oct 2023 18:50:38 -0400 Subject: [PATCH 02/33] Return end effector FK from jacobian calculation --- src/pytorch_kinematics/ik.py | 54 +++++++++++++++++------------- src/pytorch_kinematics/jacobian.py | 4 ++- tests/test_inverse_kinematics.py | 2 +- 3 files changed, 35 insertions(+), 25 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 9b65429..0085828 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -36,7 +36,7 @@ def __init__(self, serial_chain: SerialChain, joint_limits: Optional[torch.Tensor] = None, config_sampling_method: Union[str, Callable[[int], torch.Tensor]] = "uniform", max_iterations: int = 100, lr: float = 0.5, - debug=True, + debug=False, ): """ :param serial_chain: @@ -109,29 +109,37 @@ def solve(self, target_poses: Transform3d) -> IKSolution: qs = [q] pos_errors = [] rot_errors = [] + + q.requires_grad = True + optimizer = torch.optim.Adam([q], lr=self.lr) for i in range(self.max_iterations): - # TODO early termination when below tolerance - # compute forward kinematics - fk = self.chain.forward_kinematics(q) - # N x 6 x DOF - J = self.chain.jacobian(q) - - # compute pose difference - m = fk.get_matrix() - pos_diff = target_pos - m[:, :3, 3] - pos_diff = pos_diff.view(-1, 3, 1) - rot_diff = target_rot_rpy - rotation_conversions.matrix_to_euler_angles(m[:, :3, :3], "XYZ") - rot_diff = rot_diff.view(-1, 3, 1) - # pose_diff = target - fk.get_matrix() - J_pos = J[:, :3, :] - J_pos_pinv = torch.pinverse(J_pos) - J_rot = J[:, 3:, :] - J_rot_pinv = torch.pinverse(J_rot) - # compute joint angle difference - dq_pos = J_pos_pinv @ pos_diff - dq_rot = J_rot_pinv @ rot_diff - - dq = dq_pos.squeeze(2) + dq_rot.squeeze(2) + with torch.no_grad(): + # TODO early termination when below tolerance + # compute forward kinematics + # fk = self.chain.forward_kinematics(q) + # N x 6 x DOF + J, m = self.chain.jacobian(q, ret_eef_pose=True) + + # compute pose difference + # m = fk.get_matrix() + pos_diff = target_pos - m[:, :3, 3] + pos_diff = pos_diff.view(-1, 3, 1) + rot_diff = target_rot_rpy - rotation_conversions.matrix_to_euler_angles(m[:, :3, :3], "XYZ") + rot_diff = rot_diff.view(-1, 3, 1) + # pose_diff = target - fk.get_matrix() + J_pos = J[:, :3, :] + J_pos_pinv = torch.pinverse(J_pos) + J_rot = J[:, 3:, :] + J_rot_pinv = torch.pinverse(J_rot) + # compute joint angle difference + dq_pos = J_pos_pinv @ pos_diff + dq_rot = J_rot_pinv @ rot_diff + + dq = dq_pos.squeeze(2) + dq_rot.squeeze(2) + + # q.grad = -dq + # optimizer.step() + # optimizer.zero_grad() q = q + self.lr * dq if self.debug: qs.append(q) diff --git a/src/pytorch_kinematics/jacobian.py b/src/pytorch_kinematics/jacobian.py index fe03c43..a52a32e 100644 --- a/src/pytorch_kinematics/jacobian.py +++ b/src/pytorch_kinematics/jacobian.py @@ -3,7 +3,7 @@ from pytorch_kinematics import transforms -def calc_jacobian(serial_chain, th, tool=None): +def calc_jacobian(serial_chain, th, tool=None, ret_eef_pose=False): """ Return robot Jacobian J in base frame (N,6,DOF) where dot{x} = J dot{q} The first 3 rows relate the translational velocities and the @@ -57,4 +57,6 @@ def calc_jacobian(serial_chain, th, tool=None): j_tr[:, :3, :3] = rotation j_tr[:, 3:, 3:] = rotation j_w = j_tr @ j_eef + if ret_eef_pose: + return j_w, pose return j_w diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index cdb0baa..93c1f46 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -37,7 +37,7 @@ def test_jacobian_follower(): joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=50, num_retries=10000, joint_limits=joint_limits) + ik = pk.PseudoInverseIK(chain, max_iterations=100, num_retries=10000, joint_limits=joint_limits) # robot frame pos = torch.tensor([0.0, 0.0, 0.0], device=device) From 3e5717108ce041f36c595a35a6699942fcefe3c0 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 23 Oct 2023 14:37:35 -0400 Subject: [PATCH 03/33] Damp jacobian calculation and use solve instead for massive speedup --- src/pytorch_kinematics/ik.py | 51 ++++++++++++++------------------ tests/test_inverse_kinematics.py | 3 +- 2 files changed, 25 insertions(+), 29 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 0085828..bc8680c 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -1,8 +1,10 @@ from pytorch_kinematics.chain import SerialChain from pytorch_kinematics.transforms import Transform3d from pytorch_kinematics.transforms import rotation_conversions -from typing import NamedTuple, Union, List, Optional, Callable +from typing import NamedTuple, Union, Optional, Callable +import typing import torch +import inspect from matplotlib import pyplot as plt, cm as cm @@ -37,6 +39,7 @@ def __init__(self, serial_chain: SerialChain, config_sampling_method: Union[str, Callable[[int], torch.Tensor]] = "uniform", max_iterations: int = 100, lr: float = 0.5, debug=False, + optimizer_method: Union[str, typing.Type[torch.optim.Optimizer]] = "sgd" ): """ :param serial_chain: @@ -56,6 +59,8 @@ def __init__(self, serial_chain: SerialChain, self.max_iterations = max_iterations self.lr = lr + self.regularlization = 1e-9 + self.optimizer_method = optimizer_method self.pos_tolerance = pos_tolerance self.rot_tolerance = rot_tolerance @@ -110,8 +115,10 @@ def solve(self, target_poses: Transform3d) -> IKSolution: pos_errors = [] rot_errors = [] - q.requires_grad = True - optimizer = torch.optim.Adam([q], lr=self.lr) + optimizer = None + if inspect.isclass(self.optimizer_method) and issubclass(self.optimizer_method, torch.optim.Optimizer): + q.requires_grad = True + optimizer = torch.optim.Adam([q], lr=self.lr) for i in range(self.max_iterations): with torch.no_grad(): # TODO early termination when below tolerance @@ -120,41 +127,29 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # N x 6 x DOF J, m = self.chain.jacobian(q, ret_eef_pose=True) - # compute pose difference - # m = fk.get_matrix() pos_diff = target_pos - m[:, :3, 3] pos_diff = pos_diff.view(-1, 3, 1) rot_diff = target_rot_rpy - rotation_conversions.matrix_to_euler_angles(m[:, :3, :3], "XYZ") rot_diff = rot_diff.view(-1, 3, 1) - # pose_diff = target - fk.get_matrix() - J_pos = J[:, :3, :] - J_pos_pinv = torch.pinverse(J_pos) - J_rot = J[:, 3:, :] - J_rot_pinv = torch.pinverse(J_rot) - # compute joint angle difference - dq_pos = J_pos_pinv @ pos_diff - dq_rot = J_rot_pinv @ rot_diff - - dq = dq_pos.squeeze(2) + dq_rot.squeeze(2) - - # q.grad = -dq - # optimizer.step() - # optimizer.zero_grad() - q = q + self.lr * dq + + dx = torch.cat((pos_diff, rot_diff), dim=1) + tmpA = J @ J.transpose(1, 2) + self.regularlization * torch.eye(6, device=self.device, dtype=self.dtype) + A = torch.linalg.solve(tmpA, dx) + dq = J.transpose(1, 2) @ A + dq = dq.squeeze(2) + + if optimizer is not None: + q.grad = -dq + optimizer.step() + optimizer.zero_grad() + else: + q = q + self.lr * dq if self.debug: qs.append(q) pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) rot_errors.append(rot_diff.norm(dim=(1, 2))) if self.debug: - # qs = torch.stack(qs, dim=0) - # fig, ax = plt.subplots(ncols=self.dof, figsize=(10, 5)) - # for dof in range(self.dof): - # # don't show everyone, skip some - # for b in range(0, qs.shape[1], 5): - # c = (b + 1) / qs.shape[1] - # ax[dof].plot(qs[:, b, dof], c=cm.GnBu(c)) - # errors fig, ax = plt.subplots(ncols=2, figsize=(10, 5)) pos_e = torch.stack(pos_errors, dim=0).cpu() diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 93c1f46..94d7bf0 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -37,7 +37,8 @@ def test_jacobian_follower(): joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=100, num_retries=10000, joint_limits=joint_limits) + ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=10000, joint_limits=joint_limits, + lr=0.5) # robot frame pos = torch.tensor([0.0, 0.0, 0.0], device=device) From 2d7f098d824cf651a23c0da350fda13c079c0724 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 23 Oct 2023 15:20:56 -0400 Subject: [PATCH 04/33] Handle multiple goals in parallel --- src/pytorch_kinematics/ik.py | 40 +++++++++++++++++-------- tests/test_inverse_kinematics.py | 50 +++++++++++++++++++++----------- 2 files changed, 61 insertions(+), 29 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index bc8680c..55abc60 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -9,14 +9,19 @@ class IKSolution(NamedTuple): + # M is the total number of problems # N is the total number of attempts + # M x N tensor of position and rotation errors err_pos: torch.Tensor err_rot: torch.Tensor - # N boolean values indicating whether the solution converged (a solution could be found) + # M x N boolean values indicating whether the solution converged (a solution could be found) converged_pos: torch.Tensor converged_rot: torch.Tensor - # whether both position and rotation converged converged: torch.Tensor + # M whether any position and rotation converged for that problem + converged_pos_any: torch.tensor + converged_rot_any: torch.tensor + converged_any: torch.tensor # N x DOF tensor of joint angles; if converged[i] is False, then solutions[i] is undefined solutions: torch.Tensor @@ -93,8 +98,9 @@ def sample_configs(self, num_configs: int) -> torch.Tensor: def solve(self, target_poses: Transform3d) -> IKSolution: """ - :param target_poses: (N, 4, 4) tensor - :return: (N, DOF) tensor of joint angles + Solve IK for the given target poses in robot frame + :param target_poses: (N, 4, 4) tensor, goal pose in robot frame + :return: IKSolution solutions """ raise NotImplementedError() @@ -103,15 +109,17 @@ class PseudoInverseIK(InverseKinematics): def solve(self, target_poses: Transform3d) -> IKSolution: target = target_poses.get_matrix() + M = target.shape[0] + target_pos = target[:, :3, 3] # jacobian gives angular rotation about x,y,z axis of the base frame # convert target rot to desired rotation about x,y,z target_rot_rpy = rotation_conversions.matrix_to_euler_angles(target[:, :3, :3], "XYZ") - q = self.initial_config + # manually flatten it + q = self.initial_config.repeat(M, 1) # for logging, let's keep track of the joint angles at each iteration if self.debug: - qs = [q] pos_errors = [] rot_errors = [] @@ -126,10 +134,13 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # fk = self.chain.forward_kinematics(q) # N x 6 x DOF J, m = self.chain.jacobian(q, ret_eef_pose=True) + # unflatten to broadcast with goal + m = m.view(M, -1, 4, 4) - pos_diff = target_pos - m[:, :3, 3] + pos_diff = target_pos.unsqueeze(1) - m[:, :, :3, 3] pos_diff = pos_diff.view(-1, 3, 1) - rot_diff = target_rot_rpy - rotation_conversions.matrix_to_euler_angles(m[:, :3, :3], "XYZ") + rot_diff = target_rot_rpy.unsqueeze(1) - rotation_conversions.matrix_to_euler_angles(m[:, :, :3, :3], + "XYZ") rot_diff = rot_diff.view(-1, 3, 1) dx = torch.cat((pos_diff, rot_diff), dim=1) @@ -145,7 +156,6 @@ def solve(self, target_poses: Transform3d) -> IKSolution: else: q = q + self.lr * dq if self.debug: - qs.append(q) pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) rot_errors.append(rot_diff.norm(dim=(1, 2))) @@ -173,12 +183,18 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # check convergence # fk = self.chain.forward_kinematics(q) # pose_diff = target - fk.get_matrix() - err_pos = pos_diff.squeeze(2).norm(dim=1) - err_rot = rot_diff.squeeze(2).norm(dim=1) + err_pos = pos_diff.reshape(M, -1, 3).norm(dim=-1) + err_rot = rot_diff.reshape(M, -1, 3).norm(dim=-1) + # problem (M) x retry (N) converged_pos = err_pos < self.pos_tolerance converged_rot = err_rot < self.rot_tolerance + converged_pos_any = converged_pos.any(dim=1) + converged_rot_any = converged_rot.any(dim=1) + return IKSolution(converged_pos=converged_pos, converged_rot=converged_rot, + converged_pos_any=converged_pos_any, converged_rot_any=converged_rot_any, err_pos=err_pos, err_rot=err_rot, - converged=converged_pos & converged_rot, solutions=q) + converged=converged_pos & converged_rot, converged_any=converged_pos_any & converged_rot_any, + solutions=q.reshape(M, -1, self.dof)) diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 94d7bf0..e7a26c7 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -5,6 +5,7 @@ import torch import pytorch_kinematics as pk +import pytorch_seed import pybullet as p import pybullet_data @@ -27,6 +28,7 @@ def make_transparent(link): def test_jacobian_follower(): + pytorch_seed.seed(0) device = "cuda" if torch.cuda.is_available() else "cpu" # device = "cpu" urdf = "kuka_iiwa/model.urdf" @@ -37,7 +39,7 @@ def test_jacobian_follower(): joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=10000, joint_limits=joint_limits, + ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=1000, joint_limits=joint_limits, lr=0.5) # robot frame @@ -46,8 +48,11 @@ def test_jacobian_follower(): rob_tf = pk.Transform3d(pos=pos, rot=rot, device=device) # world frame goal - goal_pos = torch.tensor([0.0, 0.5, 0.5], device=device) - goal_rot = torch.tensor([0.0, 0.0, 0.0], device=device) + M = 10 + # generate random goal positions + goal_pos = torch.rand(M, 3, device=device) * 0.5 + # also generate random goal rotations + goal_rot = torch.rand(M, 3, device=device) * 2 * math.pi goal_tf = pk.Transform3d(pos=goal_pos, rot=goal_rot, device=device) # transform to robot frame @@ -58,7 +63,7 @@ def test_jacobian_follower(): sol = ik.solve(goal_in_rob_frame_tf) timer_end = timer() print("IK took %f seconds" % (timer_end - timer_start)) - print("IK converged number: %d / %d" % (sol.converged.sum(), sol.converged.shape[0])) + print("IK converged number: %d / %d" % (sol.converged.sum(), sol.converged.numel())) # visualize everything if visualize: @@ -66,6 +71,13 @@ def test_jacobian_follower(): p.setRealTimeSimulation(False) p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) p.setAdditionalSearchPath(search_path) + + yaw = 90 + pitch = -55 + dist = 1. + target = goal_pos.cpu().numpy() + p.resetDebugVisualizerCamera(dist, yaw, pitch, target) + p.loadURDF("plane.urdf", [0, 0, 0], useFixedBase=True) m = rob_tf.get_matrix() pos = m[0, :3, 3] @@ -77,19 +89,23 @@ def test_jacobian_follower(): # p.resetBasePositionAndOrientation(armId, [0, 0, 0], [0, 0, 0, 1]) # draw goal # place a translucent sphere at the goal - visId = p.createVisualShape(p.GEOM_SPHERE, radius=0.05, rgbaColor=[0., 1., 0., 0.5]) - goalId = p.createMultiBody(baseMass=0, baseVisualShapeIndex=visId, basePosition=goal_pos.cpu().numpy()) - - yaw = 90 - pitch = -55 - dist = 1. - target = goal_pos.cpu().numpy() - p.resetDebugVisualizerCamera(dist, yaw, pitch, target) - - for i, q in enumerate(sol.solutions): - input("Press enter to continue") - for dof in range(q.shape[0]): - p.resetJointState(armId, dof, q[dof]) + show_max_num_retries_per_goal = 10 + for goal_num in range(M): + visId = p.createVisualShape(p.GEOM_SPHERE, radius=0.05, rgbaColor=[0., 1., 0., 0.5]) + goalId = p.createMultiBody(baseMass=0, baseVisualShapeIndex=visId, + basePosition=goal_pos[goal_num].cpu().numpy()) + # print how many retries converged for this one + print("Goal %d converged %d / %d" % ( + goal_num, sol.converged[goal_num].sum(), sol.converged[goal_num].numel())) + + for i, q in enumerate(sol.solutions[goal_num]): + if i > show_max_num_retries_per_goal: + break + input("Press enter to continue") + for dof in range(q.shape[0]): + p.resetJointState(armId, dof, q[dof]) + + p.removeBody(goalId) while True: p.stepSimulation() From 36ef5627c9ea04bf698be0724776f01abd9c0f37 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 23 Oct 2023 15:41:39 -0400 Subject: [PATCH 05/33] Replace sphere with cone --- src/pytorch_kinematics/ik.py | 13 ++- tests/meshes/cone.mtl | 2 + tests/meshes/cone.obj | 169 +++++++++++++++++++++++++++++++ tests/test_inverse_kinematics.py | 22 ++-- 4 files changed, 196 insertions(+), 10 deletions(-) create mode 100644 tests/meshes/cone.mtl create mode 100644 tests/meshes/cone.obj diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 55abc60..353fb86 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -81,6 +81,8 @@ def __init__(self, serial_chain: SerialChain, else: if initial_configs.shape[1] != self.dof: raise ValueError("initial_configs must have shape (N, %d)" % self.dof) + # could give a batch of initial configs + self.num_retries = self.initial_config.shape[-2] def sample_configs(self, num_configs: int) -> torch.Tensor: if self.config_sampling_method == "uniform": @@ -116,8 +118,15 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # convert target rot to desired rotation about x,y,z target_rot_rpy = rotation_conversions.matrix_to_euler_angles(target[:, :3, :3], "XYZ") - # manually flatten it - q = self.initial_config.repeat(M, 1) + q = self.initial_config + if q.numel() == M * self.dof * self.num_retries: + q = q.reshape(-1, self.dof) + elif q.numel() == self.dof * self.num_retries: + # repeat and manually flatten it + q = self.initial_config.repeat(M, 1) + else: + raise ValueError( + f"initial_config must have shape ({M}, {self.num_retries}, {self.dof}) or ({self.num_retries}, {self.dof})") # for logging, let's keep track of the joint angles at each iteration if self.debug: pos_errors = [] diff --git a/tests/meshes/cone.mtl b/tests/meshes/cone.mtl new file mode 100644 index 0000000..cd41027 --- /dev/null +++ b/tests/meshes/cone.mtl @@ -0,0 +1,2 @@ +# Blender 3.6.4 MTL File: 'ycb.blend' +# www.blender.org diff --git a/tests/meshes/cone.obj b/tests/meshes/cone.obj new file mode 100644 index 0000000..408a048 --- /dev/null +++ b/tests/meshes/cone.obj @@ -0,0 +1,169 @@ +# Blender 3.6.4 +# www.blender.org +mtllib cone.mtl +o Cone +v 0.097681 0.000000 -0.097681 +v 0.095804 -0.019057 -0.097681 +v 0.090246 -0.037381 -0.097681 +v 0.081219 -0.054269 -0.097681 +v 0.069071 -0.069071 -0.097681 +v 0.054269 -0.081219 -0.097681 +v 0.037381 -0.090246 -0.097681 +v 0.019057 -0.095804 -0.097681 +v 0.000000 -0.097681 -0.097681 +v -0.019057 -0.095804 -0.097681 +v -0.037381 -0.090246 -0.097681 +v -0.054269 -0.081219 -0.097681 +v -0.069071 -0.069071 -0.097681 +v -0.081219 -0.054269 -0.097681 +v -0.090246 -0.037381 -0.097681 +v -0.095804 -0.019057 -0.097681 +v -0.097681 0.000000 -0.097681 +v -0.095804 0.019057 -0.097681 +v -0.090246 0.037381 -0.097681 +v -0.081219 0.054269 -0.097681 +v -0.069071 0.069071 -0.097681 +v -0.054269 0.081219 -0.097681 +v -0.037381 0.090246 -0.097681 +v -0.019057 0.095804 -0.097681 +v 0.000000 0.097681 -0.097681 +v 0.019057 0.095804 -0.097681 +v 0.037381 0.090246 -0.097681 +v 0.054269 0.081219 -0.097681 +v 0.069071 0.069071 -0.097681 +v 0.081219 0.054269 -0.097681 +v 0.090246 0.037381 -0.097681 +v 0.095804 0.019057 -0.097681 +v 0.000000 0.000000 0.097681 +vn 0.8910 -0.0878 0.4455 +vn 0.8567 -0.2599 0.4455 +vn 0.7896 -0.4220 0.4455 +vn 0.6921 -0.5680 0.4455 +vn 0.5680 -0.6921 0.4455 +vn 0.4220 -0.7896 0.4455 +vn 0.2599 -0.8567 0.4455 +vn 0.0878 -0.8910 0.4455 +vn -0.0878 -0.8910 0.4455 +vn -0.2599 -0.8567 0.4455 +vn -0.4220 -0.7896 0.4455 +vn -0.5680 -0.6921 0.4455 +vn -0.6921 -0.5680 0.4455 +vn -0.7896 -0.4220 0.4455 +vn -0.8567 -0.2599 0.4455 +vn -0.8910 -0.0878 0.4455 +vn -0.8910 0.0878 0.4455 +vn -0.8567 0.2599 0.4455 +vn -0.7896 0.4220 0.4455 +vn -0.6921 0.5680 0.4455 +vn -0.5680 0.6921 0.4455 +vn -0.4220 0.7896 0.4455 +vn -0.2599 0.8567 0.4455 +vn -0.0878 0.8910 0.4455 +vn 0.0878 0.8910 0.4455 +vn 0.2599 0.8567 0.4455 +vn 0.4220 0.7896 0.4455 +vn 0.5680 0.6921 0.4455 +vn 0.6921 0.5680 0.4455 +vn 0.7896 0.4220 0.4455 +vn -0.0000 -0.0000 -1.0000 +vn 0.8567 0.2599 0.4455 +vn 0.8910 0.0878 0.4455 +vt 0.250000 0.490000 +vt 0.250000 0.250000 +vt 0.296822 0.485388 +vt 0.341844 0.471731 +vt 0.383337 0.449553 +vt 0.419706 0.419706 +vt 0.449553 0.383337 +vt 0.471731 0.341844 +vt 0.485388 0.296822 +vt 0.490000 0.250000 +vt 0.485388 0.203178 +vt 0.471731 0.158156 +vt 0.449553 0.116663 +vt 0.419706 0.080294 +vt 0.383337 0.050447 +vt 0.341844 0.028269 +vt 0.296822 0.014612 +vt 0.250000 0.010000 +vt 0.203178 0.014612 +vt 0.158156 0.028269 +vt 0.116663 0.050447 +vt 0.080294 0.080294 +vt 0.050447 0.116663 +vt 0.028269 0.158156 +vt 0.014612 0.203178 +vt 0.010000 0.250000 +vt 0.014612 0.296822 +vt 0.028269 0.341844 +vt 0.050447 0.383337 +vt 0.080294 0.419706 +vt 0.116663 0.449553 +vt 0.158156 0.471731 +vt 0.750000 0.490000 +vt 0.796822 0.485388 +vt 0.841844 0.471731 +vt 0.883337 0.449553 +vt 0.919706 0.419706 +vt 0.949553 0.383337 +vt 0.971731 0.341844 +vt 0.985388 0.296822 +vt 0.990000 0.250000 +vt 0.985388 0.203178 +vt 0.971731 0.158156 +vt 0.949553 0.116663 +vt 0.919706 0.080294 +vt 0.883337 0.050447 +vt 0.841844 0.028269 +vt 0.796822 0.014612 +vt 0.750000 0.010000 +vt 0.703178 0.014612 +vt 0.658156 0.028269 +vt 0.616663 0.050447 +vt 0.580294 0.080294 +vt 0.550447 0.116663 +vt 0.528269 0.158156 +vt 0.514612 0.203178 +vt 0.510000 0.250000 +vt 0.514612 0.296822 +vt 0.528269 0.341844 +vt 0.550447 0.383337 +vt 0.580294 0.419706 +vt 0.616663 0.449553 +vt 0.658156 0.471731 +vt 0.703178 0.485388 +vt 0.203178 0.485388 +s 0 +f 1/1/1 33/2/1 2/3/1 +f 2/3/2 33/2/2 3/4/2 +f 3/4/3 33/2/3 4/5/3 +f 4/5/4 33/2/4 5/6/4 +f 5/6/5 33/2/5 6/7/5 +f 6/7/6 33/2/6 7/8/6 +f 7/8/7 33/2/7 8/9/7 +f 8/9/8 33/2/8 9/10/8 +f 9/10/9 33/2/9 10/11/9 +f 10/11/10 33/2/10 11/12/10 +f 11/12/11 33/2/11 12/13/11 +f 12/13/12 33/2/12 13/14/12 +f 13/14/13 33/2/13 14/15/13 +f 14/15/14 33/2/14 15/16/14 +f 15/16/15 33/2/15 16/17/15 +f 16/17/16 33/2/16 17/18/16 +f 17/18/17 33/2/17 18/19/17 +f 18/19/18 33/2/18 19/20/18 +f 19/20/19 33/2/19 20/21/19 +f 20/21/20 33/2/20 21/22/20 +f 21/22/21 33/2/21 22/23/21 +f 22/23/22 33/2/22 23/24/22 +f 23/24/23 33/2/23 24/25/23 +f 24/25/24 33/2/24 25/26/24 +f 25/26/25 33/2/25 26/27/25 +f 26/27/26 33/2/26 27/28/26 +f 27/28/27 33/2/27 28/29/27 +f 28/29/28 33/2/28 29/30/28 +f 29/30/29 33/2/29 30/31/29 +f 30/31/30 33/2/30 31/32/30 +f 1/33/31 2/34/31 3/35/31 4/36/31 5/37/31 6/38/31 7/39/31 8/40/31 9/41/31 10/42/31 11/43/31 12/44/31 13/45/31 14/46/31 15/47/31 16/48/31 17/49/31 18/50/31 19/51/31 20/52/31 21/53/31 22/54/31 23/55/31 24/56/31 25/57/31 26/58/31 27/59/31 28/60/31 29/61/31 30/62/31 31/63/31 32/64/31 +f 31/32/32 33/2/32 32/65/32 +f 32/65/33 33/2/33 1/1/33 diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index e7a26c7..826a87f 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -28,7 +28,7 @@ def make_transparent(link): def test_jacobian_follower(): - pytorch_seed.seed(0) + pytorch_seed.seed(2) device = "cuda" if torch.cuda.is_available() else "cpu" # device = "cpu" urdf = "kuka_iiwa/model.urdf" @@ -39,7 +39,7 @@ def test_jacobian_follower(): joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=1000, joint_limits=joint_limits, + ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=100, joint_limits=joint_limits, lr=0.5) # robot frame @@ -48,7 +48,7 @@ def test_jacobian_follower(): rob_tf = pk.Transform3d(pos=pos, rot=rot, device=device) # world frame goal - M = 10 + M = 100 # generate random goal positions goal_pos = torch.rand(M, 3, device=device) * 0.5 # also generate random goal rotations @@ -75,7 +75,7 @@ def test_jacobian_follower(): yaw = 90 pitch = -55 dist = 1. - target = goal_pos.cpu().numpy() + target = goal_pos[0].cpu().numpy() p.resetDebugVisualizerCamera(dist, yaw, pitch, target) p.loadURDF("plane.urdf", [0, 0, 0], useFixedBase=True) @@ -91,19 +91,25 @@ def test_jacobian_follower(): # place a translucent sphere at the goal show_max_num_retries_per_goal = 10 for goal_num in range(M): - visId = p.createVisualShape(p.GEOM_SPHERE, radius=0.05, rgbaColor=[0., 1., 0., 0.5]) + # draw cone to indicate pose instead of sphere + visId = p.createVisualShape(p.GEOM_MESH, fileName="meshes/cone.obj", meshScale=1.0, + rgbaColor=[0., 1., 0., 0.5]) + # visId = p.createVisualShape(p.GEOM_SPHERE, radius=0.05, rgbaColor=[0., 1., 0., 0.5]) + r = goal_rot[goal_num] + xyzw = pk.wxyz_to_xyzw(pk.matrix_to_quaternion(pk.euler_angles_to_matrix(r, "XYZ"))) goalId = p.createMultiBody(baseMass=0, baseVisualShapeIndex=visId, - basePosition=goal_pos[goal_num].cpu().numpy()) + basePosition=goal_pos[goal_num].cpu().numpy(), + baseOrientation=xyzw.cpu().numpy()) # print how many retries converged for this one print("Goal %d converged %d / %d" % ( - goal_num, sol.converged[goal_num].sum(), sol.converged[goal_num].numel())) + goal_num, sol.converged[goal_num].sum(), sol.converged[goal_num].numel())) for i, q in enumerate(sol.solutions[goal_num]): if i > show_max_num_retries_per_goal: break - input("Press enter to continue") for dof in range(q.shape[0]): p.resetJointState(armId, dof, q[dof]) + input("Press enter to continue") p.removeBody(goalId) From 76e92ec1889a82a6bb3399ef42c636fd2a7e81e2 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 23 Oct 2023 16:00:54 -0400 Subject: [PATCH 06/33] Parameterize damping parameter (lambda * identity usually) --- src/pytorch_kinematics/ik.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 353fb86..b87913b 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -43,6 +43,7 @@ def __init__(self, serial_chain: SerialChain, joint_limits: Optional[torch.Tensor] = None, config_sampling_method: Union[str, Callable[[int], torch.Tensor]] = "uniform", max_iterations: int = 100, lr: float = 0.5, + regularlization: float = 1e-9, debug=False, optimizer_method: Union[str, typing.Type[torch.optim.Optimizer]] = "sgd" ): @@ -64,7 +65,7 @@ def __init__(self, serial_chain: SerialChain, self.max_iterations = max_iterations self.lr = lr - self.regularlization = 1e-9 + self.regularlization = regularlization self.optimizer_method = optimizer_method self.pos_tolerance = pos_tolerance From 9d23df77a2cc92b59dcccc689226d42d9efc5dc3 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 23 Oct 2023 17:43:53 -0400 Subject: [PATCH 07/33] Add early stopping on any configuration convergence --- src/pytorch_kinematics/ik.py | 115 +++++++++++++++++++++---------- tests/test_inverse_kinematics.py | 16 +++-- 2 files changed, 88 insertions(+), 43 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index b87913b..ade50b7 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -8,22 +8,61 @@ from matplotlib import pyplot as plt, cm as cm -class IKSolution(NamedTuple): - # M is the total number of problems - # N is the total number of attempts - # M x N tensor of position and rotation errors - err_pos: torch.Tensor - err_rot: torch.Tensor - # M x N boolean values indicating whether the solution converged (a solution could be found) - converged_pos: torch.Tensor - converged_rot: torch.Tensor - converged: torch.Tensor - # M whether any position and rotation converged for that problem - converged_pos_any: torch.tensor - converged_rot_any: torch.tensor - converged_any: torch.tensor - # N x DOF tensor of joint angles; if converged[i] is False, then solutions[i] is undefined - solutions: torch.Tensor +class IKSolution: + def __init__(self, dof, num_problems, num_retries, device="cpu"): + self.device = device + self.num_problems = num_problems + self.num_retries = num_retries + self.dof = dof + M = num_problems + # N x DOF tensor of joint angles; if converged[i] is False, then solutions[i] is undefined + self.solutions = torch.zeros((M, self.num_retries, self.dof), device=self.device) + self.remaining = torch.ones(M, dtype=torch.bool, device=self.device) + + # M is the total number of problems + # N is the total number of attempts + # M x N tensor of position and rotation errors + self.err_pos = torch.zeros((M, self.num_retries), device=self.device) + self.err_rot = torch.zeros_like(self.err_pos) + # M x N boolean values indicating whether the solution converged (a solution could be found) + self.converged_pos = torch.zeros((M, self.num_retries), dtype=torch.bool, device=self.device) + self.converged_rot = torch.zeros_like(self.converged_pos) + self.converged = torch.zeros_like(self.converged_pos) + + # M whether any position and rotation converged for that problem + self.converged_pos_any = torch.zeros_like(self.remaining) + self.converged_rot_any = torch.zeros_like(self.remaining) + self.converged_any = torch.zeros_like(self.remaining) + + def update(self, q: torch.tensor, pos_diff: torch.tensor, rot_diff: torch.tensor, pos_tolerance, rot_tolerance, + use_remaining=False): + err_pos = pos_diff.reshape(-1, self.num_retries, 3).norm(dim=-1) + err_rot = rot_diff.reshape(-1, self.num_retries, 3).norm(dim=-1) + converged_pos = err_pos < pos_tolerance + converged_rot = err_rot < rot_tolerance + converged = converged_pos & converged_rot + converged_any = converged.any(dim=1) + + # stop considering problems where any converged + qq = q.reshape(-1, self.num_retries, self.dof) + + if use_remaining: + this_converged = self.remaining + converged_any[:] = True + else: + _r = self.remaining.clone() + self.remaining[_r] = _r[_r] & ~converged_any + this_converged = _r ^ self.remaining + + self.solutions[this_converged] = qq[converged_any] + self.err_pos[this_converged] = err_pos[converged_any] + self.err_rot[this_converged] = err_rot[converged_any] + self.converged_pos[this_converged] = converged_pos[converged_any] + self.converged_rot[this_converged] = converged_rot[converged_any] + self.converged[this_converged] = converged[converged_any] + self.converged_any[this_converged] = converged_any[converged_any] + + return converged_any # helper config sampling method @@ -45,6 +84,7 @@ def __init__(self, serial_chain: SerialChain, max_iterations: int = 100, lr: float = 0.5, regularlization: float = 1e-9, debug=False, + early_stopping_any_converged=False, optimizer_method: Union[str, typing.Type[torch.optim.Optimizer]] = "sgd" ): """ @@ -62,6 +102,7 @@ def __init__(self, serial_chain: SerialChain, joint_names = self.chain.get_joint_parameter_names(exclude_fixed=True) self.dof = len(joint_names) self.debug = debug + self.early_stopping_any_converged = early_stopping_any_converged self.max_iterations = max_iterations self.lr = lr @@ -119,6 +160,8 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # convert target rot to desired rotation about x,y,z target_rot_rpy = rotation_conversions.matrix_to_euler_angles(target[:, :3, :3], "XYZ") + sol = IKSolution(self.dof, M, self.num_retries, device=self.device) + q = self.initial_config if q.numel() == M * self.dof * self.num_retries: q = q.reshape(-1, self.dof) @@ -145,7 +188,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # N x 6 x DOF J, m = self.chain.jacobian(q, ret_eef_pose=True) # unflatten to broadcast with goal - m = m.view(M, -1, 4, 4) + m = m.view(-1, self.num_retries, 4, 4) pos_diff = target_pos.unsqueeze(1) - m[:, :, :3, 3] pos_diff = pos_diff.view(-1, 3, 1) @@ -165,9 +208,21 @@ def solve(self, target_poses: Transform3d) -> IKSolution: optimizer.zero_grad() else: q = q + self.lr * dq - if self.debug: - pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) - rot_errors.append(rot_diff.norm(dim=(1, 2))) + + with torch.no_grad(): + if self.early_stopping_any_converged: + converged_any = sol.update(q, pos_diff, rot_diff, self.pos_tolerance, self.rot_tolerance, + use_remaining=False) + # stop considering problems where any converged + qq = q.reshape(-1, self.num_retries, self.dof) + q = qq[~converged_any] + q = q.reshape(-1, self.dof) + target_pos = target_pos[~converged_any] + target_rot_rpy = target_rot_rpy[~converged_any] + + if self.debug: + pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) + rot_errors.append(rot_diff.norm(dim=(1, 2))) if self.debug: # errors @@ -190,21 +245,5 @@ def solve(self, target_poses: Transform3d) -> IKSolution: ax[1].set_ylabel("rotation error") plt.show() - # check convergence - # fk = self.chain.forward_kinematics(q) - # pose_diff = target - fk.get_matrix() - err_pos = pos_diff.reshape(M, -1, 3).norm(dim=-1) - err_rot = rot_diff.reshape(M, -1, 3).norm(dim=-1) - - # problem (M) x retry (N) - converged_pos = err_pos < self.pos_tolerance - converged_rot = err_rot < self.rot_tolerance - - converged_pos_any = converged_pos.any(dim=1) - converged_rot_any = converged_rot.any(dim=1) - - return IKSolution(converged_pos=converged_pos, converged_rot=converged_rot, - converged_pos_any=converged_pos_any, converged_rot_any=converged_rot_any, - err_pos=err_pos, err_rot=err_rot, - converged=converged_pos & converged_rot, converged_any=converged_pos_any & converged_rot_any, - solutions=q.reshape(M, -1, self.dof)) + sol.update(q, pos_diff, rot_diff, self.pos_tolerance, self.rot_tolerance, use_remaining=True) + return sol diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 826a87f..93b379e 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -39,7 +39,7 @@ def test_jacobian_follower(): joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=100, joint_limits=joint_limits, + ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=1000, joint_limits=joint_limits, lr=0.5) # robot frame @@ -48,7 +48,7 @@ def test_jacobian_follower(): rob_tf = pk.Transform3d(pos=pos, rot=rot, device=device) # world frame goal - M = 100 + M = 10 # generate random goal positions goal_pos = torch.rand(M, 3, device=device) * 0.5 # also generate random goal rotations @@ -100,11 +100,17 @@ def test_jacobian_follower(): goalId = p.createMultiBody(baseMass=0, baseVisualShapeIndex=visId, basePosition=goal_pos[goal_num].cpu().numpy(), baseOrientation=xyzw.cpu().numpy()) + + solutions = sol.solutions[goal_num] + # sort based on if they converged + converged = sol.converged[goal_num] + idx = torch.argsort(converged.to(int), descending=True) + solutions = solutions[idx] + # print how many retries converged for this one - print("Goal %d converged %d / %d" % ( - goal_num, sol.converged[goal_num].sum(), sol.converged[goal_num].numel())) + print("Goal %d converged %d / %d" % (goal_num, converged.sum(), converged.numel())) - for i, q in enumerate(sol.solutions[goal_num]): + for i, q in enumerate(solutions): if i > show_max_num_retries_per_goal: break for dof in range(q.shape[0]): From 90fef3c6e44f7149a54086fdc76e9a442bba4d35 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 1 Nov 2023 18:37:49 -0400 Subject: [PATCH 08/33] Implement backtracking line search --- src/pytorch_kinematics/ik.py | 84 +++++++++++++++++++++++++++----- tests/test_inverse_kinematics.py | 1 + 2 files changed, 74 insertions(+), 11 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index ade50b7..db93f96 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -73,6 +73,45 @@ def config_sampling_method(num_configs): return config_sampling_method +class LineSearch: + def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx): + raise NotImplementedError() + + +class BacktrackingLineSearch(LineSearch): + def __init__(self, max_lr=2.0, decrease_factor=0.5, max_iterations=5, sufficient_decrease=0.01): + self.initial_lr = max_lr + self.decrease_factor = decrease_factor + self.max_iterations = max_iterations + self.sufficient_decrease = sufficient_decrease + + def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx): + N = target_pos.shape[0] + NM = q.shape[0] + M = NM // N + lr = torch.ones(NM, device=q.device) * self.initial_lr + err = initial_dx.squeeze().norm(dim=-1) + for i in range(self.max_iterations): + # try stepping with this learning rate + q_new = q + lr.unsqueeze(1) * dq + # evaluate the error + m = chain.forward_kinematics(q_new).get_matrix() + m = m.view(-1, M, 4, 4) + dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_rot_rpy) + err_new = dx.squeeze().norm(dim=-1) + # check if it's better + improvement = err - err_new + improved = improvement > self.sufficient_decrease + # if it's better, we're done for those + # TODO mask out the ones that are better and stop considering them + # if it's not better, reduce the learning rate + lr[~improved] *= self.decrease_factor + + improvement = improvement.reshape(-1, M) + improvement = improvement.mean(dim=1) + return lr, improvement + + class InverseKinematics: """Jacobian follower based inverse kinematics solver""" @@ -81,7 +120,8 @@ def __init__(self, serial_chain: SerialChain, initial_configs: Optional[torch.Tensor] = None, num_retries: Optional[int] = None, joint_limits: Optional[torch.Tensor] = None, config_sampling_method: Union[str, Callable[[int], torch.Tensor]] = "uniform", - max_iterations: int = 100, lr: float = 0.5, + max_iterations: int = 50, + lr: float = 0.5, line_search: Optional[LineSearch] = None, regularlization: float = 1e-9, debug=False, early_stopping_any_converged=False, @@ -108,6 +148,7 @@ def __init__(self, serial_chain: SerialChain, self.lr = lr self.regularlization = regularlization self.optimizer_method = optimizer_method + self.line_search = line_search self.pos_tolerance = pos_tolerance self.rot_tolerance = rot_tolerance @@ -149,6 +190,25 @@ def solve(self, target_poses: Transform3d) -> IKSolution: raise NotImplementedError() +def delta_pose(m: torch.tensor, target_pos, target_rot_rpy): + """ + Determine the error in position and rotation between the given poses and the target poses + + :param m: (N x M x 4 x 4) tensor of homogenous transforms + :param target_pos: + :param target_rot_rpy: + :return: (N*M, 6, 1) tensor of delta pose (dx, dy, dz, droll, dpitch, dyaw) + """ + pos_diff = target_pos.unsqueeze(1) - m[:, :, :3, 3] + pos_diff = pos_diff.view(-1, 3, 1) + rot_diff = target_rot_rpy.unsqueeze(1) - rotation_conversions.matrix_to_euler_angles(m[:, :, :3, :3], + "XYZ") + rot_diff = rot_diff.view(-1, 3, 1) + + dx = torch.cat((pos_diff, rot_diff), dim=1) + return dx, pos_diff, rot_diff + + class PseudoInverseIK(InverseKinematics): def solve(self, target_poses: Transform3d) -> IKSolution: target = target_poses.get_matrix() @@ -182,34 +242,36 @@ def solve(self, target_poses: Transform3d) -> IKSolution: optimizer = torch.optim.Adam([q], lr=self.lr) for i in range(self.max_iterations): with torch.no_grad(): - # TODO early termination when below tolerance # compute forward kinematics # fk = self.chain.forward_kinematics(q) # N x 6 x DOF J, m = self.chain.jacobian(q, ret_eef_pose=True) # unflatten to broadcast with goal m = m.view(-1, self.num_retries, 4, 4) + dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_rot_rpy) - pos_diff = target_pos.unsqueeze(1) - m[:, :, :3, 3] - pos_diff = pos_diff.view(-1, 3, 1) - rot_diff = target_rot_rpy.unsqueeze(1) - rotation_conversions.matrix_to_euler_angles(m[:, :, :3, :3], - "XYZ") - rot_diff = rot_diff.view(-1, 3, 1) - - dx = torch.cat((pos_diff, rot_diff), dim=1) tmpA = J @ J.transpose(1, 2) + self.regularlization * torch.eye(6, device=self.device, dtype=self.dtype) A = torch.linalg.solve(tmpA, dx) dq = J.transpose(1, 2) @ A dq = dq.squeeze(2) + improvement = None if optimizer is not None: q.grad = -dq optimizer.step() optimizer.zero_grad() else: - q = q + self.lr * dq + with torch.no_grad(): + if self.line_search is not None: + lr, improvement = self.line_search.do_line_search(self.chain, q, dq, target_pos, target_rot_rpy, + dx) + lr = lr.unsqueeze(1) + else: + lr = self.lr + q = q + lr * dq with torch.no_grad(): + # TODO use improved to determine early termination due to lack of improvement if self.early_stopping_any_converged: converged_any = sol.update(q, pos_diff, rot_diff, self.pos_tolerance, self.rot_tolerance, use_remaining=False) @@ -222,7 +284,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: if self.debug: pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) - rot_errors.append(rot_diff.norm(dim=(1, 2))) + rot_errors.append(rot_diff.reshape(-1, 3).norm(dim=1)) if self.debug: # errors diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 93b379e..864faf9 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -40,6 +40,7 @@ def test_jacobian_follower(): joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=1000, joint_limits=joint_limits, + early_stopping_any_converged=True, lr=0.5) # robot frame From b7148c7bfdc456316e99fe5d8a82b9fc53df5321 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 1 Nov 2023 19:53:45 -0400 Subject: [PATCH 09/33] Implement no-improvement based early termination --- src/pytorch_kinematics/ik.py | 123 +++++++++++++++++++++++-------- tests/test_inverse_kinematics.py | 2 + 2 files changed, 96 insertions(+), 29 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index db93f96..767f7a2 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -9,11 +9,14 @@ class IKSolution: - def __init__(self, dof, num_problems, num_retries, device="cpu"): + def __init__(self, dof, num_problems, num_retries, pos_tolerance, rot_tolerance, device="cpu"): self.device = device self.num_problems = num_problems self.num_retries = num_retries self.dof = dof + self.pos_tolerance = pos_tolerance + self.rot_tolerance = rot_tolerance + M = num_problems # N x DOF tensor of joint angles; if converged[i] is False, then solutions[i] is undefined self.solutions = torch.zeros((M, self.num_retries, self.dof), device=self.device) @@ -34,33 +37,43 @@ def __init__(self, dof, num_problems, num_retries, device="cpu"): self.converged_rot_any = torch.zeros_like(self.remaining) self.converged_any = torch.zeros_like(self.remaining) - def update(self, q: torch.tensor, pos_diff: torch.tensor, rot_diff: torch.tensor, pos_tolerance, rot_tolerance, - use_remaining=False): - err_pos = pos_diff.reshape(-1, self.num_retries, 3).norm(dim=-1) - err_rot = rot_diff.reshape(-1, self.num_retries, 3).norm(dim=-1) - converged_pos = err_pos < pos_tolerance - converged_rot = err_rot < rot_tolerance + def update_remaining_with_keep_mask(self, keep: torch.tensor): + _r = self.remaining.clone() + self.remaining[_r] = _r[_r] & keep + this_masked = _r ^ self.remaining + return this_masked + + def update(self, q: torch.tensor, err: torch.tensor, + use_remaining=False, keep_mask=None): + err = err.reshape(-1, self.num_retries, 6) + err_pos = err[..., :3].norm(dim=-1) + err_rot = err[..., 3:].norm(dim=-1) + converged_pos = err_pos < self.pos_tolerance + converged_rot = err_rot < self.rot_tolerance converged = converged_pos & converged_rot converged_any = converged.any(dim=1) + if keep_mask is None: + keep_mask = ~converged_any + # stop considering problems where any converged qq = q.reshape(-1, self.num_retries, self.dof) if use_remaining: this_converged = self.remaining - converged_any[:] = True + keep_mask[:] = False else: - _r = self.remaining.clone() - self.remaining[_r] = _r[_r] & ~converged_any - this_converged = _r ^ self.remaining - - self.solutions[this_converged] = qq[converged_any] - self.err_pos[this_converged] = err_pos[converged_any] - self.err_rot[this_converged] = err_rot[converged_any] - self.converged_pos[this_converged] = converged_pos[converged_any] - self.converged_rot[this_converged] = converged_rot[converged_any] - self.converged[this_converged] = converged[converged_any] - self.converged_any[this_converged] = converged_any[converged_any] + # those that have converged are no longer remaining + this_converged = self.update_remaining_with_keep_mask(keep_mask) + + done = ~keep_mask + self.solutions[this_converged] = qq[done] + self.err_pos[this_converged] = err_pos[done] + self.err_rot[this_converged] = err_rot[done] + self.converged_pos[this_converged] = converged_pos[done] + self.converged_rot[this_converged] = converged_rot[done] + self.converged[this_converged] = converged[done] + self.converged_any[this_converged] = converged_any[done] return converged_any @@ -125,6 +138,7 @@ def __init__(self, serial_chain: SerialChain, regularlization: float = 1e-9, debug=False, early_stopping_any_converged=False, + early_stopping_no_improvement=True, optimizer_method: Union[str, typing.Type[torch.optim.Optimizer]] = "sgd" ): """ @@ -143,6 +157,8 @@ def __init__(self, serial_chain: SerialChain, self.dof = len(joint_names) self.debug = debug self.early_stopping_any_converged = early_stopping_any_converged + self.early_stopping_no_improvement = early_stopping_no_improvement + self.past_improvement = None self.max_iterations = max_iterations self.lr = lr @@ -150,6 +166,9 @@ def __init__(self, serial_chain: SerialChain, self.optimizer_method = optimizer_method self.line_search = line_search + self.err = None + self.err_prev = None + self.pos_tolerance = pos_tolerance self.rot_tolerance = rot_tolerance self.initial_config = initial_configs @@ -209,6 +228,10 @@ def delta_pose(m: torch.tensor, target_pos, target_rot_rpy): return dx, pos_diff, rot_diff +def apply_mask(mask, *args): + return [a[mask] for a in args] + + class PseudoInverseIK(InverseKinematics): def solve(self, target_poses: Transform3d) -> IKSolution: target = target_poses.get_matrix() @@ -220,7 +243,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # convert target rot to desired rotation about x,y,z target_rot_rpy = rotation_conversions.matrix_to_euler_angles(target[:, :3, :3], "XYZ") - sol = IKSolution(self.dof, M, self.num_retries, device=self.device) + sol = IKSolution(self.dof, M, self.num_retries, self.pos_tolerance, self.rot_tolerance, device=self.device) q = self.initial_config if q.numel() == M * self.dof * self.num_retries: @@ -242,6 +265,9 @@ def solve(self, target_poses: Transform3d) -> IKSolution: optimizer = torch.optim.Adam([q], lr=self.lr) for i in range(self.max_iterations): with torch.no_grad(): + # early termination if we're out of problems to solve + if q.numel() == 0: + break # compute forward kinematics # fk = self.chain.forward_kinematics(q) # N x 6 x DOF @@ -271,16 +297,54 @@ def solve(self, target_poses: Transform3d) -> IKSolution: q = q + lr * dq with torch.no_grad(): - # TODO use improved to determine early termination due to lack of improvement + self.err_prev = self.err + self.err_all = dx.squeeze() + self.err = self.err_all.norm(dim=-1) + + def apply_mask_to_all(mask): + nonlocal q, target_pos, target_rot_rpy, improvement + q, target_pos, target_rot_rpy = apply_mask(mask, + q.reshape(-1, + self.num_retries, + self.dof), + target_pos, + target_rot_rpy, + ) + q = q.reshape(-1, self.dof) + if improvement is not None: + improvement = improvement[mask] + if self.err_prev is not None: + self.err_prev = self.err_prev.reshape(-1, self.num_retries) + self.err_prev = self.err_prev[mask] + self.err_prev = self.err_prev.reshape(-1) + if self.past_improvement is not None: + self.past_improvement = self.past_improvement[mask] + self.err = self.err.reshape(-1, self.num_retries) + self.err = self.err[mask] + self.err = self.err.reshape(-1) + self.err_all = self.err_all.reshape(-1, self.num_retries, 6) + self.err_all = self.err_all[mask] + self.err_all = self.err_all.reshape(-1, 6) + + if improvement is None and self.err_prev is not None: + improvement = self.err_prev - self.err + improvement = improvement.reshape(-1, self.num_retries) + improvement = improvement.mean(dim=1) + if self.early_stopping_any_converged: - converged_any = sol.update(q, pos_diff, rot_diff, self.pos_tolerance, self.rot_tolerance, - use_remaining=False) # stop considering problems where any converged - qq = q.reshape(-1, self.num_retries, self.dof) - q = qq[~converged_any] - q = q.reshape(-1, self.dof) - target_pos = target_pos[~converged_any] - target_rot_rpy = target_rot_rpy[~converged_any] + converged_any = sol.update(q, self.err_all, use_remaining=False) + apply_mask_to_all(~converged_any) + + if self.early_stopping_no_improvement: + if self.past_improvement is not None: + # average improvement of this and before + avg_improvement = (improvement + self.past_improvement) / 2 + enough_improvement = avg_improvement > 0.0 + # stop working on those that we can't improve + sol.update(q, self.err_all, use_remaining=False, keep_mask=enough_improvement) + apply_mask_to_all(enough_improvement) + self.past_improvement = improvement if self.debug: pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) @@ -307,5 +371,6 @@ def solve(self, target_poses: Transform3d) -> IKSolution: ax[1].set_ylabel("rotation error") plt.show() - sol.update(q, pos_diff, rot_diff, self.pos_tolerance, self.rot_tolerance, use_remaining=True) + if i == self.max_iterations - 1: + sol.update(q, self.err_all, use_remaining=True) return sol diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 864faf9..a2e4b65 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -41,6 +41,8 @@ def test_jacobian_follower(): joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=1000, joint_limits=joint_limits, early_stopping_any_converged=True, + early_stopping_no_improvement=True, + line_search=pk.BacktrackingLineSearch(), lr=0.5) # robot frame From d9125262c6970234efaaf12333be6fd03a77c073 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Fri, 3 Nov 2023 11:59:00 -0400 Subject: [PATCH 10/33] Move masking function out of loop --- src/pytorch_kinematics/ik.py | 54 +++++++++++++++++------------------- 1 file changed, 26 insertions(+), 28 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 767f7a2..2330453 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -43,8 +43,7 @@ def update_remaining_with_keep_mask(self, keep: torch.tensor): this_masked = _r ^ self.remaining return this_masked - def update(self, q: torch.tensor, err: torch.tensor, - use_remaining=False, keep_mask=None): + def update(self, q: torch.tensor, err: torch.tensor, use_remaining=False, keep_mask=None): err = err.reshape(-1, self.num_retries, 6) err_pos = err[..., :3].norm(dim=-1) err_rot = err[..., 3:].norm(dim=-1) @@ -234,6 +233,31 @@ def apply_mask(mask, *args): class PseudoInverseIK(InverseKinematics): def solve(self, target_poses: Transform3d) -> IKSolution: + def apply_mask_to_all(mask): + nonlocal q, target_pos, target_rot_rpy, improvement + + q = q.reshape(-1, self.num_retries, self.dof) + q = q[mask] + q = q.reshape(-1, self.dof) + + target_pos = target_pos[mask] + target_rot_rpy = target_rot_rpy[mask] + + if improvement is not None: + improvement = improvement[mask] + if self.err_prev is not None: + self.err_prev = self.err_prev.reshape(-1, self.num_retries) + self.err_prev = self.err_prev[mask] + self.err_prev = self.err_prev.reshape(-1) + if self.past_improvement is not None: + self.past_improvement = self.past_improvement[mask] + self.err = self.err.reshape(-1, self.num_retries) + self.err = self.err[mask] + self.err = self.err.reshape(-1) + self.err_all = self.err_all.reshape(-1, self.num_retries, 6) + self.err_all = self.err_all[mask] + self.err_all = self.err_all.reshape(-1, 6) + target = target_poses.get_matrix() M = target.shape[0] @@ -269,7 +293,6 @@ def solve(self, target_poses: Transform3d) -> IKSolution: if q.numel() == 0: break # compute forward kinematics - # fk = self.chain.forward_kinematics(q) # N x 6 x DOF J, m = self.chain.jacobian(q, ret_eef_pose=True) # unflatten to broadcast with goal @@ -301,31 +324,6 @@ def solve(self, target_poses: Transform3d) -> IKSolution: self.err_all = dx.squeeze() self.err = self.err_all.norm(dim=-1) - def apply_mask_to_all(mask): - nonlocal q, target_pos, target_rot_rpy, improvement - q, target_pos, target_rot_rpy = apply_mask(mask, - q.reshape(-1, - self.num_retries, - self.dof), - target_pos, - target_rot_rpy, - ) - q = q.reshape(-1, self.dof) - if improvement is not None: - improvement = improvement[mask] - if self.err_prev is not None: - self.err_prev = self.err_prev.reshape(-1, self.num_retries) - self.err_prev = self.err_prev[mask] - self.err_prev = self.err_prev.reshape(-1) - if self.past_improvement is not None: - self.past_improvement = self.past_improvement[mask] - self.err = self.err.reshape(-1, self.num_retries) - self.err = self.err[mask] - self.err = self.err.reshape(-1) - self.err_all = self.err_all.reshape(-1, self.num_retries, 6) - self.err_all = self.err_all[mask] - self.err_all = self.err_all.reshape(-1, 6) - if improvement is None and self.err_prev is not None: improvement = self.err_prev - self.err improvement = improvement.reshape(-1, self.num_retries) From f5d47500f0b2e6c2a8e9cd61ad7edfcdd9e0cfa2 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 6 Nov 2023 17:47:47 -0500 Subject: [PATCH 11/33] Remove masking that wasn't speeding up the whole process --- src/pytorch_kinematics/ik.py | 59 +++++++------------------------- tests/test_inverse_kinematics.py | 2 +- 2 files changed, 14 insertions(+), 47 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 2330453..bc99b4e 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -38,10 +38,8 @@ def __init__(self, dof, num_problems, num_retries, pos_tolerance, rot_tolerance, self.converged_any = torch.zeros_like(self.remaining) def update_remaining_with_keep_mask(self, keep: torch.tensor): - _r = self.remaining.clone() - self.remaining[_r] = _r[_r] & keep - this_masked = _r ^ self.remaining - return this_masked + self.remaining = self.remaining & keep + return self.remaining def update(self, q: torch.tensor, err: torch.tensor, use_remaining=False, keep_mask=None): err = err.reshape(-1, self.num_retries, 6) @@ -58,21 +56,17 @@ def update(self, q: torch.tensor, err: torch.tensor, use_remaining=False, keep_m # stop considering problems where any converged qq = q.reshape(-1, self.num_retries, self.dof) - if use_remaining: - this_converged = self.remaining - keep_mask[:] = False - else: + if not use_remaining: # those that have converged are no longer remaining - this_converged = self.update_remaining_with_keep_mask(keep_mask) + self.update_remaining_with_keep_mask(keep_mask) - done = ~keep_mask - self.solutions[this_converged] = qq[done] - self.err_pos[this_converged] = err_pos[done] - self.err_rot[this_converged] = err_rot[done] - self.converged_pos[this_converged] = converged_pos[done] - self.converged_rot[this_converged] = converged_rot[done] - self.converged[this_converged] = converged[done] - self.converged_any[this_converged] = converged_any[done] + self.solutions = qq + self.err_pos = err_pos + self.err_rot = err_rot + self.converged_pos = converged_pos + self.converged_rot = converged_rot + self.converged = converged + self.converged_any = converged_any return converged_any @@ -233,31 +227,6 @@ def apply_mask(mask, *args): class PseudoInverseIK(InverseKinematics): def solve(self, target_poses: Transform3d) -> IKSolution: - def apply_mask_to_all(mask): - nonlocal q, target_pos, target_rot_rpy, improvement - - q = q.reshape(-1, self.num_retries, self.dof) - q = q[mask] - q = q.reshape(-1, self.dof) - - target_pos = target_pos[mask] - target_rot_rpy = target_rot_rpy[mask] - - if improvement is not None: - improvement = improvement[mask] - if self.err_prev is not None: - self.err_prev = self.err_prev.reshape(-1, self.num_retries) - self.err_prev = self.err_prev[mask] - self.err_prev = self.err_prev.reshape(-1) - if self.past_improvement is not None: - self.past_improvement = self.past_improvement[mask] - self.err = self.err.reshape(-1, self.num_retries) - self.err = self.err[mask] - self.err = self.err.reshape(-1) - self.err_all = self.err_all.reshape(-1, self.num_retries, 6) - self.err_all = self.err_all[mask] - self.err_all = self.err_all.reshape(-1, 6) - target = target_poses.get_matrix() M = target.shape[0] @@ -290,7 +259,7 @@ def apply_mask_to_all(mask): for i in range(self.max_iterations): with torch.no_grad(): # early termination if we're out of problems to solve - if q.numel() == 0: + if not sol.remaining.any(): break # compute forward kinematics # N x 6 x DOF @@ -331,8 +300,7 @@ def apply_mask_to_all(mask): if self.early_stopping_any_converged: # stop considering problems where any converged - converged_any = sol.update(q, self.err_all, use_remaining=False) - apply_mask_to_all(~converged_any) + sol.update(q, self.err_all, use_remaining=False) if self.early_stopping_no_improvement: if self.past_improvement is not None: @@ -341,7 +309,6 @@ def apply_mask_to_all(mask): enough_improvement = avg_improvement > 0.0 # stop working on those that we can't improve sol.update(q, self.err_all, use_remaining=False, keep_mask=enough_improvement) - apply_mask_to_all(enough_improvement) self.past_improvement = improvement if self.debug: diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index a2e4b65..0687671 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -42,7 +42,7 @@ def test_jacobian_follower(): ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=1000, joint_limits=joint_limits, early_stopping_any_converged=True, early_stopping_no_improvement=True, - line_search=pk.BacktrackingLineSearch(), + # line_search=pk.BacktrackingLineSearch(), lr=0.5) # robot frame From 7e956639a5c8ae7512c05070899602c52e52d1e6 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 6 Nov 2023 17:57:48 -0500 Subject: [PATCH 12/33] Early termination for line search --- src/pytorch_kinematics/ik.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index bc99b4e..abc5522 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -85,7 +85,7 @@ def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx): class BacktrackingLineSearch(LineSearch): - def __init__(self, max_lr=2.0, decrease_factor=0.5, max_iterations=5, sufficient_decrease=0.01): + def __init__(self, max_lr=1.0, decrease_factor=0.5, max_iterations=5, sufficient_decrease=0.01): self.initial_lr = max_lr self.decrease_factor = decrease_factor self.max_iterations = max_iterations @@ -97,7 +97,10 @@ def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx): M = NM // N lr = torch.ones(NM, device=q.device) * self.initial_lr err = initial_dx.squeeze().norm(dim=-1) + remaining = torch.ones(NM, dtype=torch.bool, device=q.device) for i in range(self.max_iterations): + if not remaining.any(): + break # try stepping with this learning rate q_new = q + lr.unsqueeze(1) * dq # evaluate the error @@ -109,9 +112,9 @@ def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx): improvement = err - err_new improved = improvement > self.sufficient_decrease # if it's better, we're done for those - # TODO mask out the ones that are better and stop considering them # if it's not better, reduce the learning rate lr[~improved] *= self.decrease_factor + remaining = remaining & ~improved improvement = improvement.reshape(-1, M) improvement = improvement.mean(dim=1) From 60c11848adb553792a19b3c9895c1b0feaac7c65 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 6 Nov 2023 18:11:52 -0500 Subject: [PATCH 13/33] Track iterations in IK results --- src/pytorch_kinematics/ik.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index abc5522..c9f6581 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -10,6 +10,7 @@ class IKSolution: def __init__(self, dof, num_problems, num_retries, pos_tolerance, rot_tolerance, device="cpu"): + self.iterations = 0 self.device = device self.num_problems = num_problems self.num_retries = num_retries @@ -264,6 +265,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: # early termination if we're out of problems to solve if not sol.remaining.any(): break + sol.iterations += 1 # compute forward kinematics # N x 6 x DOF J, m = self.chain.jacobian(q, ret_eef_pose=True) From 61ed39cc390ba311ee28318acac82cb618927f9d Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 6 Nov 2023 18:20:51 -0500 Subject: [PATCH 14/33] Ignore non-remaining problems in line search --- src/pytorch_kinematics/ik.py | 13 +++++++++---- tests/test_inverse_kinematics.py | 20 +++++++++++--------- 2 files changed, 20 insertions(+), 13 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index c9f6581..d95f0fe 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -81,7 +81,7 @@ def config_sampling_method(num_configs): class LineSearch: - def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx): + def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx, problem_remaining=None): raise NotImplementedError() @@ -92,13 +92,18 @@ def __init__(self, max_lr=1.0, decrease_factor=0.5, max_iterations=5, sufficient self.max_iterations = max_iterations self.sufficient_decrease = sufficient_decrease - def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx): + def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx, problem_remaining=None): N = target_pos.shape[0] NM = q.shape[0] M = NM // N lr = torch.ones(NM, device=q.device) * self.initial_lr err = initial_dx.squeeze().norm(dim=-1) - remaining = torch.ones(NM, dtype=torch.bool, device=q.device) + if problem_remaining is None: + problem_remaining = torch.ones(N, dtype=torch.bool, device=q.device) + remaining = torch.ones((N, M), dtype=torch.bool, device=q.device) + # don't care about the ones that are no longer remaining + remaining[~problem_remaining] = False + remaining = remaining.reshape(-1) for i in range(self.max_iterations): if not remaining.any(): break @@ -287,7 +292,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: with torch.no_grad(): if self.line_search is not None: lr, improvement = self.line_search.do_line_search(self.chain, q, dq, target_pos, target_rot_rpy, - dx) + dx, problem_remaining=sol.remaining) lr = lr.unsqueeze(1) else: lr = self.lr diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 0687671..50e059c 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -37,21 +37,13 @@ def test_jacobian_follower(): chain = pk.build_serial_chain_from_urdf(open(full_urdf).read(), "lbr_iiwa_link_7") chain = chain.to(device=device) - joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) - joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=1000, joint_limits=joint_limits, - early_stopping_any_converged=True, - early_stopping_no_improvement=True, - # line_search=pk.BacktrackingLineSearch(), - lr=0.5) - # robot frame pos = torch.tensor([0.0, 0.0, 0.0], device=device) rot = torch.tensor([0.0, 0.0, 0.0], device=device) rob_tf = pk.Transform3d(pos=pos, rot=rot, device=device) # world frame goal - M = 10 + M = 100 # generate random goal positions goal_pos = torch.rand(M, 3, device=device) * 0.5 # also generate random goal rotations @@ -61,12 +53,22 @@ def test_jacobian_follower(): # transform to robot frame goal_in_rob_frame_tf = rob_tf.inverse().compose(goal_tf) + joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) + joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 + ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=100, joint_limits=joint_limits, + early_stopping_any_converged=True, + early_stopping_no_improvement=True, + # line_search=pk.BacktrackingLineSearch(max_lr=0.2), + lr=0.2) + # do IK timer_start = timer() sol = ik.solve(goal_in_rob_frame_tf) timer_end = timer() print("IK took %f seconds" % (timer_end - timer_start)) print("IK converged number: %d / %d" % (sol.converged.sum(), sol.converged.numel())) + print("IK took %d iterations" % sol.iterations) + print("IK solved %d / %d goals" % (sol.converged_any.sum(), M)) # visualize everything if visualize: From 6ea8a795c73778ee36b034db2d9911305a7193ed Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Tue, 21 Nov 2023 13:09:17 -0500 Subject: [PATCH 15/33] Add function to clean transform3d history and composition --- src/pytorch_kinematics/transforms/transform3d.py | 5 +++++ tests/test_inverse_kinematics.py | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/pytorch_kinematics/transforms/transform3d.py b/src/pytorch_kinematics/transforms/transform3d.py index 1651f0c..4a4d02c 100644 --- a/src/pytorch_kinematics/transforms/transform3d.py +++ b/src/pytorch_kinematics/transforms/transform3d.py @@ -228,6 +228,11 @@ def __repr__(self): rot = matrix_to_quaternion(m[:, :3, :3]) return "Transform3d(rot={}, pos={})".format(rot, pos).replace('\n ', '') + def clean(self): + self._matrix = self.get_matrix() + self._transforms = [] + return self + def compose(self, *others): """ Return a new Transform3d with the tranforms to compose stored as diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 50e059c..3b9a95f 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -43,7 +43,7 @@ def test_jacobian_follower(): rob_tf = pk.Transform3d(pos=pos, rot=rot, device=device) # world frame goal - M = 100 + M = 10000 # generate random goal positions goal_pos = torch.rand(M, 3, device=device) * 0.5 # also generate random goal rotations @@ -55,7 +55,7 @@ def test_jacobian_follower(): joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=100, joint_limits=joint_limits, + ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=10, joint_limits=joint_limits, early_stopping_any_converged=True, early_stopping_no_improvement=True, # line_search=pk.BacktrackingLineSearch(max_lr=0.2), From 5a19f940bf0c9c320e531b31943779fccb861425 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 27 Nov 2023 16:16:05 -0500 Subject: [PATCH 16/33] Fix parameter passing to jacobian calculation --- src/pytorch_kinematics/chain.py | 4 ++-- src/pytorch_kinematics/ik.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/pytorch_kinematics/chain.py b/src/pytorch_kinematics/chain.py index d837567..b69499d 100644 --- a/src/pytorch_kinematics/chain.py +++ b/src/pytorch_kinematics/chain.py @@ -458,10 +458,10 @@ def _generate_serial_chain_recurse(root_frame, end_frame_name): return [child] + frames return None - def jacobian(self, th, locations=None): + def jacobian(self, th, locations=None, **kwargs): if locations is not None: locations = tf.Transform3d(pos=locations) - return jacobian.calc_jacobian(self, th, tool=locations) + return jacobian.calc_jacobian(self, th, tool=locations, **kwargs) def forward_kinematics(self, th, end_only: bool = True): """ Like the base class, except `th` only needs to contain the joints in the SerialChain, not all joints. """ diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index d95f0fe..92a19c7 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -136,7 +136,7 @@ def __init__(self, serial_chain: SerialChain, joint_limits: Optional[torch.Tensor] = None, config_sampling_method: Union[str, Callable[[int], torch.Tensor]] = "uniform", max_iterations: int = 50, - lr: float = 0.5, line_search: Optional[LineSearch] = None, + lr: float = 0.2, line_search: Optional[LineSearch] = None, regularlization: float = 1e-9, debug=False, early_stopping_any_converged=False, From 4ffddff50bee6a0187d0224ef4222bc02b294329 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 27 Nov 2023 17:17:44 -0500 Subject: [PATCH 17/33] Improve no improvement early stopping criteria --- src/pytorch_kinematics/ik.py | 93 ++++++++++++++++++++------------ tests/test_inverse_kinematics.py | 5 +- 2 files changed, 62 insertions(+), 36 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 92a19c7..583257e 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -42,7 +42,7 @@ def update_remaining_with_keep_mask(self, keep: torch.tensor): self.remaining = self.remaining & keep return self.remaining - def update(self, q: torch.tensor, err: torch.tensor, use_remaining=False, keep_mask=None): + def update(self, q: torch.tensor, err: torch.tensor, use_keep_mask=True, keep_mask=None): err = err.reshape(-1, self.num_retries, 6) err_pos = err[..., :3].norm(dim=-1) err_rot = err[..., 3:].norm(dim=-1) @@ -57,7 +57,7 @@ def update(self, q: torch.tensor, err: torch.tensor, use_remaining=False, keep_m # stop considering problems where any converged qq = q.reshape(-1, self.num_retries, self.dof) - if not use_remaining: + if use_keep_mask: # those that have converged are no longer remaining self.update_remaining_with_keep_mask(keep_mask) @@ -132,7 +132,7 @@ class InverseKinematics: def __init__(self, serial_chain: SerialChain, pos_tolerance: float = 1e-3, rot_tolerance: float = 1e-2, - initial_configs: Optional[torch.Tensor] = None, num_retries: Optional[int] = None, + retry_configs: Optional[torch.Tensor] = None, num_retries: Optional[int] = None, joint_limits: Optional[torch.Tensor] = None, config_sampling_method: Union[str, Callable[[int], torch.Tensor]] = "uniform", max_iterations: int = 50, @@ -140,17 +140,32 @@ def __init__(self, serial_chain: SerialChain, regularlization: float = 1e-9, debug=False, early_stopping_any_converged=False, - early_stopping_no_improvement=True, + early_stopping_no_improvement="any", early_stopping_no_improvement_patience=2, optimizer_method: Union[str, typing.Type[torch.optim.Optimizer]] = "sgd" ): """ :param serial_chain: - :param pos_tolerance: - :param rot_tolerance: - :param initial_configs: - :param num_retries: + :param pos_tolerance: position tolerance in meters + :param rot_tolerance: rotation tolerance in radians + :param retry_configs: (M, DOF) tensor of initial configs to try for each problem; leave as None to sample + :param num_retries: number, M, of random initial configs to try for that problem :param joint_limits: (DOF, 2) tensor of joint limits (min, max) for each joint in radians - :param config_sampling_method: + :param config_sampling_method: either "uniform" or "gaussian" or a function that takes in the number of configs + :param max_iterations: maximum number of iterations to run + :param lr: learning rate + :param line_search: LineSearch object to use for line search + :param regularlization: regularization term to add to the Jacobian + :param debug: whether to print debug information + :param early_stopping_any_converged: whether to stop when any of the retries for a problem converged + :param early_stopping_no_improvement: {None, "all", "any", ratio} whether to stop when no improvement is made + (consecutive iterations no improvement in minimum error - number of consecutive iterations is the patience). + None means no early stopping from this, "all" means stop when all retries for that problem makes no improvement, + "any" means stop when any of the retries for that problem makes no improvement, and ratio means stop when + the ratio (between 0 and 1) of the number of retries that is making improvement falls below the ratio. + So "all" is equivalent to ratio=0.999, and "any" is equivalent to ratio=0.001 + :param early_stopping_no_improvement_patience: number of consecutive iterations with no improvement before + considering it no improvement + :param optimizer_method: either a string or a torch.optim.Optimizer class """ self.chain = serial_chain self.dtype = serial_chain.dtype @@ -160,7 +175,7 @@ def __init__(self, serial_chain: SerialChain, self.debug = debug self.early_stopping_any_converged = early_stopping_any_converged self.early_stopping_no_improvement = early_stopping_no_improvement - self.past_improvement = None + self.early_stopping_no_improvement_patience = early_stopping_no_improvement_patience self.max_iterations = max_iterations self.lr = lr @@ -169,21 +184,23 @@ def __init__(self, serial_chain: SerialChain, self.line_search = line_search self.err = None - self.err_prev = None + self.err_all = None + self.err_min = None + self.no_improve_counter = None self.pos_tolerance = pos_tolerance self.rot_tolerance = rot_tolerance - self.initial_config = initial_configs - if initial_configs is None and num_retries is None: + self.initial_config = retry_configs + if retry_configs is None and num_retries is None: raise ValueError("either initial_configs or num_retries must be specified") # sample initial configs instead self.config_sampling_method = config_sampling_method self.joint_limits = joint_limits - if initial_configs is None: + if retry_configs is None: self.initial_config = self.sample_configs(num_retries) else: - if initial_configs.shape[1] != self.dof: + if retry_configs.shape[1] != self.dof: raise ValueError("initial_configs must have shape (N, %d)" % self.dof) # could give a batch of initial configs self.num_retries = self.initial_config.shape[-2] @@ -299,27 +316,35 @@ def solve(self, target_poses: Transform3d) -> IKSolution: q = q + lr * dq with torch.no_grad(): - self.err_prev = self.err self.err_all = dx.squeeze() self.err = self.err_all.norm(dim=-1) + sol.update(q, self.err_all, use_keep_mask=self.early_stopping_any_converged) - if improvement is None and self.err_prev is not None: - improvement = self.err_prev - self.err - improvement = improvement.reshape(-1, self.num_retries) - improvement = improvement.mean(dim=1) - - if self.early_stopping_any_converged: - # stop considering problems where any converged - sol.update(q, self.err_all, use_remaining=False) - - if self.early_stopping_no_improvement: - if self.past_improvement is not None: - # average improvement of this and before - avg_improvement = (improvement + self.past_improvement) / 2 - enough_improvement = avg_improvement > 0.0 - # stop working on those that we can't improve - sol.update(q, self.err_all, use_remaining=False, keep_mask=enough_improvement) - self.past_improvement = improvement + if self.early_stopping_no_improvement is not None: + if self.no_improve_counter is None: + self.no_improve_counter = torch.zeros_like(self.err) + else: + if self.err_min is None: + self.err_min = self.err.clone() + else: + improved = self.err < self.err_min + self.err_min[improved] = self.err[improved] + + self.no_improve_counter[improved] = 0 + self.no_improve_counter[~improved] += 1 + + # those that haven't improved + could_improve = self.no_improve_counter <= self.early_stopping_no_improvement_patience + # consider problems, and only throw out those whose all retries cannot be improved + could_improve = could_improve.reshape(-1, self.num_retries) + if self.early_stopping_no_improvement == "all": + could_improve = could_improve.all(dim=1) + elif self.early_stopping_no_improvement == "any": + could_improve = could_improve.any(dim=1) + elif isinstance(self.early_stopping_no_improvement, float): + ratio_improved = could_improve.sum(dim=1) / self.num_retries + could_improve = ratio_improved > self.early_stopping_no_improvement + sol.update_remaining_with_keep_mask(could_improve) if self.debug: pos_errors.append(pos_diff.reshape(-1, 3).norm(dim=1)) @@ -347,5 +372,5 @@ def solve(self, target_poses: Transform3d) -> IKSolution: plt.show() if i == self.max_iterations - 1: - sol.update(q, self.err_all, use_remaining=True) + sol.update(q, self.err_all, use_keep_mask=False) return sol diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 3b9a95f..cf1c8bd 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -43,7 +43,7 @@ def test_jacobian_follower(): rob_tf = pk.Transform3d(pos=pos, rot=rot, device=device) # world frame goal - M = 10000 + M = 1000 # generate random goal positions goal_pos = torch.rand(M, 3, device=device) * 0.5 # also generate random goal rotations @@ -57,8 +57,9 @@ def test_jacobian_follower(): joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=10, joint_limits=joint_limits, early_stopping_any_converged=True, - early_stopping_no_improvement=True, + early_stopping_no_improvement="all", # line_search=pk.BacktrackingLineSearch(max_lr=0.2), + debug=False, lr=0.2) # do IK From 3e1f6145176212bdbbc6c9efe05a0592dcbef961 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 27 Nov 2023 17:20:34 -0500 Subject: [PATCH 18/33] Remove unnecessary method --- src/pytorch_kinematics/transforms/transform3d.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/src/pytorch_kinematics/transforms/transform3d.py b/src/pytorch_kinematics/transforms/transform3d.py index 4a4d02c..1651f0c 100644 --- a/src/pytorch_kinematics/transforms/transform3d.py +++ b/src/pytorch_kinematics/transforms/transform3d.py @@ -228,11 +228,6 @@ def __repr__(self): rot = matrix_to_quaternion(m[:, :3, :3]) return "Transform3d(rot={}, pos={})".format(rot, pos).replace('\n ', '') - def clean(self): - self._matrix = self.get_matrix() - self._transforms = [] - return self - def compose(self, *others): """ Return a new Transform3d with the tranforms to compose stored as From 08b47b69cf6c88a1f7803a257026c2d7b16493d5 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Tue, 30 Jan 2024 15:15:45 -0500 Subject: [PATCH 19/33] Ignore nan values when plotting --- .gitignore | 1 + src/pytorch_kinematics/ik.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 31df576..6fe76d3 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ .idea +*.mp4 *.so *.pkl *.egg-info diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 583257e..0c200d7 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -356,7 +356,10 @@ def solve(self, target_poses: Transform3d) -> IKSolution: pos_e = torch.stack(pos_errors, dim=0).cpu() rot_e = torch.stack(rot_errors, dim=0).cpu() ax[0].set_ylim(0, 1.) - ax[1].set_ylim(0, rot_e.max().item() * 1.1) + # ignore nan + ignore = torch.isnan(rot_e) + axis_max = rot_e[~ignore].max().item() + ax[1].set_ylim(0, axis_max * 1.1) ax[0].set_xlim(0, self.max_iterations - 1) ax[1].set_xlim(0, self.max_iterations - 1) # draw at most 50 lines From a21ac2a90c840c2de0556d6f99f11654d30fb280 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Fri, 2 Feb 2024 10:34:27 -0500 Subject: [PATCH 20/33] Sample goal poses from feasible goals instead of random ones --- tests/test_inverse_kinematics.py | 34 +++++++++++++++++++------------- 1 file changed, 20 insertions(+), 14 deletions(-) diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index cf1c8bd..d292c58 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -44,18 +44,22 @@ def test_jacobian_follower(): # world frame goal M = 1000 - # generate random goal positions - goal_pos = torch.rand(M, 3, device=device) * 0.5 - # also generate random goal rotations - goal_rot = torch.rand(M, 3, device=device) * 2 * math.pi - goal_tf = pk.Transform3d(pos=goal_pos, rot=goal_rot, device=device) - - # transform to robot frame - goal_in_rob_frame_tf = rob_tf.inverse().compose(goal_tf) - - joints_high = torch.tensor([170, 120, 170, 120, 170, 120, 175], device=device) - joint_limits = torch.stack((-joints_high, joints_high), dim=-1) * math.pi / 180.0 - ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=10, joint_limits=joint_limits, + # generate random goal joint angles (so these are all achievable) + # use the joint limits to generate random joint angles + lim = torch.tensor(chain.get_joint_limits(), device=device) + goal_q = torch.rand(M, 7, device=device) * (lim[1] - lim[0]) + lim[0] + + # get ee pose (in robot frame) + goal_in_rob_frame_tf = chain.forward_kinematics(goal_q) + + # transform to world frame for visualization + goal_tf = rob_tf.compose(goal_in_rob_frame_tf) + goal = goal_tf.get_matrix() + goal_pos = goal[..., :3, 3] + goal_rot = pk.matrix_to_euler_angles(goal[..., :3, :3], "XYZ") + + ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=10, + joint_limits=lim.T, early_stopping_any_converged=True, early_stopping_no_improvement="all", # line_search=pk.BacktrackingLineSearch(max_lr=0.2), @@ -89,8 +93,10 @@ def test_jacobian_follower(): pos = m[0, :3, 3] rot = m[0, :3, :3] quat = pk.matrix_to_quaternion(rot) - armId = p.loadURDF(urdf, basePosition=pos.cpu().numpy(), baseOrientation=pk.wxyz_to_xyzw(quat).cpu().numpy(), - useFixedBase=True) + pos = pos.cpu().numpy() + rot = pk.wxyz_to_xyzw(quat).cpu().numpy() + armId = p.loadURDF(urdf, basePosition=pos, baseOrientation=rot, useFixedBase=True) + _make_robot_translucent(armId, alpha=0.6) # p.resetBasePositionAndOrientation(armId, [0, 0, 0], [0, 0, 0, 1]) # draw goal From 3a5697a151c37b1e81ff90f1ae97b75b9798bea3 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 12 Feb 2024 15:06:32 -0500 Subject: [PATCH 21/33] Add install dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 4f913be..02c0228 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -46,6 +46,7 @@ dependencies = [ 'numpy', 'pyyaml', 'torch', + 'matplotlib', ] [project.optional-dependencies] From f7276928c3a5ca59f99d4661418e89e6c3abc917 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 12 Feb 2024 15:08:14 -0500 Subject: [PATCH 22/33] Remove unused imports and global defines --- tests/test_inverse_kinematics.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index d292c58..8b55de2 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -1,4 +1,3 @@ -import math import os from timeit import default_timer as timer @@ -10,8 +9,6 @@ import pybullet as p import pybullet_data -TEST_DIR = os.path.dirname(__file__) - visualize = True From 90913bea1c51292860ec08c5c9afc5ca5b5f9368 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 12 Feb 2024 17:15:28 -0500 Subject: [PATCH 23/33] Document damped least squares --- src/pytorch_kinematics/ik.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 0c200d7..8deed6c 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -148,7 +148,7 @@ def __init__(self, serial_chain: SerialChain, :param pos_tolerance: position tolerance in meters :param rot_tolerance: rotation tolerance in radians :param retry_configs: (M, DOF) tensor of initial configs to try for each problem; leave as None to sample - :param num_retries: number, M, of random initial configs to try for that problem + :param num_retries: number, M, of random initial configs to try for that problem; implemented with batching :param joint_limits: (DOF, 2) tensor of joint limits (min, max) for each joint in radians :param config_sampling_method: either "uniform" or "gaussian" or a function that takes in the number of configs :param max_iterations: maximum number of iterations to run @@ -295,8 +295,12 @@ def solve(self, target_poses: Transform3d) -> IKSolution: m = m.view(-1, self.num_retries, 4, 4) dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_rot_rpy) + # damped least squares method + # JJ^T + lambda^2*I (lambda^2 is regularization) tmpA = J @ J.transpose(1, 2) + self.regularlization * torch.eye(6, device=self.device, dtype=self.dtype) + # (JJ^T + lambda^2I) A = dx A = torch.linalg.solve(tmpA, dx) + # dq = J^T (JJ^T + lambda^2I)^-1 dx dq = J.transpose(1, 2) @ A dq = dq.squeeze(2) From 67b111ebde30b22755da46e202fa60771187913e Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 12 Feb 2024 18:06:19 -0500 Subject: [PATCH 24/33] Add SVD based IK solver --- src/pytorch_kinematics/ik.py | 46 +++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 6 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 8deed6c..6a5f119 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -252,6 +252,19 @@ def apply_mask(mask, *args): class PseudoInverseIK(InverseKinematics): + def compute_dq(self, J, dx): + # lambda^2*I (lambda^2 is regularization) + reg = self.regularlization * torch.eye(6, device=self.device, dtype=self.dtype) + + # JJ^T + lambda^2*I (lambda^2 is regularization) + tmpA = J @ J.transpose(1, 2) + reg + # (JJ^T + lambda^2I) A = dx + # A = (JJ^T + lambda^2I)^-1 dx + A = torch.linalg.solve(tmpA, dx) + # dq = J^T (JJ^T + lambda^2I)^-1 dx + dq = J.transpose(1, 2) @ A + return dq + def solve(self, target_poses: Transform3d) -> IKSolution: target = target_poses.get_matrix() @@ -296,12 +309,8 @@ def solve(self, target_poses: Transform3d) -> IKSolution: dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_rot_rpy) # damped least squares method - # JJ^T + lambda^2*I (lambda^2 is regularization) - tmpA = J @ J.transpose(1, 2) + self.regularlization * torch.eye(6, device=self.device, dtype=self.dtype) - # (JJ^T + lambda^2I) A = dx - A = torch.linalg.solve(tmpA, dx) - # dq = J^T (JJ^T + lambda^2I)^-1 dx - dq = J.transpose(1, 2) @ A + # lambda^2*I (lambda^2 is regularization) + dq = self.compute_dq(J, dx) dq = dq.squeeze(2) improvement = None @@ -381,3 +390,28 @@ def solve(self, target_poses: Transform3d) -> IKSolution: if i == self.max_iterations - 1: sol.update(q, self.err_all, use_keep_mask=False) return sol + + +class PseudoInverseIKWithSVD(PseudoInverseIK): + # generally slower, but allows for selective damping if needed + def compute_dq(self, J, dx): + # reg = self.regularlization * torch.eye(6, device=self.device, dtype=self.dtype) + U, D, Vh = torch.linalg.svd(J) + m = D.shape[1] + + # tmpA = U @ (D @ D.transpose(1, 2) + reg) @ U.transpose(1, 2) + # singular_val = torch.diagonal(D) + + denom = D ** 2 + self.regularlization + prod = D / denom + # J^T (JJ^T + lambda^2I)^-1 = V @ (D @ D^T + lambda^2I)^-1 @ U^T = sum_i (d_i / (d_i^2 + lambda^2) v_i @ u_i^T) + # should be equivalent to damped least squares + inverted = torch.diag_embed(prod) + + # drop columns from V + Vh = Vh[:, :m, :] + total = Vh.transpose(1, 2) @ inverted @ U.transpose(1, 2) + + # dq = J^T (JJ^T + lambda^2I)^-1 dx + dq = total @ dx + return dq From d5a9788ab0ff3955c1b64cfc4b95cb073165993c Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Mon, 12 Feb 2024 18:41:27 -0500 Subject: [PATCH 25/33] Compare orientations in quaternion space to avoid discontinuities Greatly improve convergence rate and total success chance --- src/pytorch_kinematics/ik.py | 30 +++++++++++++++++++----------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/src/pytorch_kinematics/ik.py b/src/pytorch_kinematics/ik.py index 6a5f119..a46b1be 100644 --- a/src/pytorch_kinematics/ik.py +++ b/src/pytorch_kinematics/ik.py @@ -81,7 +81,7 @@ def config_sampling_method(num_configs): class LineSearch: - def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx, problem_remaining=None): + def do_line_search(self, chain, q, dq, target_pos, target_wxyz, initial_dx, problem_remaining=None): raise NotImplementedError() @@ -92,7 +92,7 @@ def __init__(self, max_lr=1.0, decrease_factor=0.5, max_iterations=5, sufficient self.max_iterations = max_iterations self.sufficient_decrease = sufficient_decrease - def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx, problem_remaining=None): + def do_line_search(self, chain, q, dq, target_pos, target_wxyz, initial_dx, problem_remaining=None): N = target_pos.shape[0] NM = q.shape[0] M = NM // N @@ -112,7 +112,7 @@ def do_line_search(self, chain, q, dq, target_pos, target_rot_rpy, initial_dx, p # evaluate the error m = chain.forward_kinematics(q_new).get_matrix() m = m.view(-1, M, 4, 4) - dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_rot_rpy) + dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_wxyz) err_new = dx.squeeze().norm(dim=-1) # check if it's better improvement = err - err_new @@ -228,20 +228,28 @@ def solve(self, target_poses: Transform3d) -> IKSolution: raise NotImplementedError() -def delta_pose(m: torch.tensor, target_pos, target_rot_rpy): +def delta_pose(m: torch.tensor, target_pos, target_wxyz): """ Determine the error in position and rotation between the given poses and the target poses :param m: (N x M x 4 x 4) tensor of homogenous transforms :param target_pos: - :param target_rot_rpy: + :param target_wxyz: target orientation represented in unit quaternion :return: (N*M, 6, 1) tensor of delta pose (dx, dy, dz, droll, dpitch, dyaw) """ pos_diff = target_pos.unsqueeze(1) - m[:, :, :3, 3] pos_diff = pos_diff.view(-1, 3, 1) - rot_diff = target_rot_rpy.unsqueeze(1) - rotation_conversions.matrix_to_euler_angles(m[:, :, :3, :3], - "XYZ") - rot_diff = rot_diff.view(-1, 3, 1) + cur_wxyz = rotation_conversions.matrix_to_quaternion(m[:, :, :3, :3]) + + # quaternion that rotates from the current orientation to the desired orientation + # inverse for unit quaternion is the conjugate + diff_wxyz = rotation_conversions.quaternion_multiply(target_wxyz.unsqueeze(1), + rotation_conversions.quaternion_invert(cur_wxyz)) + # angular velocity vector needed to correct the orientation + # if time is considered, should divide by \delta t, but doing it iteratively we can choose delta t to be 1 + diff_axis_angle = rotation_conversions.quaternion_to_axis_angle(diff_wxyz) + + rot_diff = diff_axis_angle.view(-1, 3, 1) dx = torch.cat((pos_diff, rot_diff), dim=1) return dx, pos_diff, rot_diff @@ -273,7 +281,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: target_pos = target[:, :3, 3] # jacobian gives angular rotation about x,y,z axis of the base frame # convert target rot to desired rotation about x,y,z - target_rot_rpy = rotation_conversions.matrix_to_euler_angles(target[:, :3, :3], "XYZ") + target_wxyz = rotation_conversions.matrix_to_quaternion(target[:, :3, :3]) sol = IKSolution(self.dof, M, self.num_retries, self.pos_tolerance, self.rot_tolerance, device=self.device) @@ -306,7 +314,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: J, m = self.chain.jacobian(q, ret_eef_pose=True) # unflatten to broadcast with goal m = m.view(-1, self.num_retries, 4, 4) - dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_rot_rpy) + dx, pos_diff, rot_diff = delta_pose(m, target_pos, target_wxyz) # damped least squares method # lambda^2*I (lambda^2 is regularization) @@ -321,7 +329,7 @@ def solve(self, target_poses: Transform3d) -> IKSolution: else: with torch.no_grad(): if self.line_search is not None: - lr, improvement = self.line_search.do_line_search(self.chain, q, dq, target_pos, target_rot_rpy, + lr, improvement = self.line_search.do_line_search(self.chain, q, dq, target_pos, target_wxyz, dx, problem_remaining=sol.remaining) lr = lr.unsqueeze(1) else: From aad3cd7a241af3ef7b3e8db72e556986c8d6e3a2 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 14 Feb 2024 14:28:11 -0500 Subject: [PATCH 26/33] Add install dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index 02c0228..09b65c9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ dependencies = [ 'pyyaml', 'torch', 'matplotlib', + 'pytorch_seed', ] [project.optional-dependencies] From 014a0d73e01535859695fbf6e3e2f291565f7a98 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 14 Feb 2024 15:07:46 -0500 Subject: [PATCH 27/33] Add test dependency --- pyproject.toml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 09b65c9..acef97e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,10 @@ dependencies = [ ] [project.optional-dependencies] -test = ["pytest"] +test = [ + "pytest", + "pybullet", +] [project.urls] "Homepage" = "https://github.com/UM-ARM-Lab/pytorch_kinematics" From f4db26aa5662ac0f3e56ccac8c347ccb042fd816 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 14 Feb 2024 15:20:12 -0500 Subject: [PATCH 28/33] Add test dependency --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index acef97e..0a7f2ec 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,6 +54,7 @@ dependencies = [ test = [ "pytest", "pybullet", + "jax", ] [project.urls] From 2e02fc9b6e4273deeb9573b2d210d46b77f9dfa4 Mon Sep 17 00:00:00 2001 From: Johnson Zhong Date: Wed, 14 Feb 2024 16:23:06 -0500 Subject: [PATCH 29/33] Ignore mujoco-menagerie when running tests --- .github/workflows/python-package.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index 32d4dbc..ae1a3bf 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -42,4 +42,4 @@ jobs: flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest run: | - pytest \ No newline at end of file + pytest --ignore=mujoco_menagerie From 487774f7ec66dc9d995a8299e9d4f8304a447ec9 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Wed, 14 Feb 2024 16:22:51 -0500 Subject: [PATCH 30/33] Remove unused test dependency --- pyproject.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 0a7f2ec..acef97e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -54,7 +54,6 @@ dependencies = [ test = [ "pytest", "pybullet", - "jax", ] [project.urls] From 0f67d5612e3308ea1a06ea71d222081bbf2f3d0a Mon Sep 17 00:00:00 2001 From: Johnson Zhong Date: Wed, 14 Feb 2024 16:46:40 -0500 Subject: [PATCH 31/33] Fix pytest ignore --- .github/workflows/python-package.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml index ae1a3bf..02b5c21 100644 --- a/.github/workflows/python-package.yml +++ b/.github/workflows/python-package.yml @@ -42,4 +42,4 @@ jobs: flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics - name: Test with pytest run: | - pytest --ignore=mujoco_menagerie + pytest --ignore=tests/mujoco_menagerie From 3ec3f1670d720189f1f8530801c68253deae82cd Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Fri, 16 Feb 2024 13:02:09 -0500 Subject: [PATCH 32/33] Default IK test to not visualize to avoid blocking in pytest --- tests/test_inverse_kinematics.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_inverse_kinematics.py b/tests/test_inverse_kinematics.py index 8b55de2..b5ed922 100644 --- a/tests/test_inverse_kinematics.py +++ b/tests/test_inverse_kinematics.py @@ -9,7 +9,7 @@ import pybullet as p import pybullet_data -visualize = True +visualize = False def _make_robot_translucent(robot_id, alpha=0.4): From c85e2942ddcc61f0c428340fc103f22e786af2c5 Mon Sep 17 00:00:00 2001 From: Sheng Zhong Date: Fri, 16 Feb 2024 13:34:01 -0500 Subject: [PATCH 33/33] Enforce optionality of mujoco --- src/pytorch_kinematics/__init__.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/pytorch_kinematics/__init__.py b/src/pytorch_kinematics/__init__.py index 2df06d6..0262a86 100644 --- a/src/pytorch_kinematics/__init__.py +++ b/src/pytorch_kinematics/__init__.py @@ -1,6 +1,10 @@ from pytorch_kinematics.sdf import * from pytorch_kinematics.urdf import * -from pytorch_kinematics.mjcf import * + +try: + from pytorch_kinematics.mjcf import * +except ImportError: + pass from pytorch_kinematics.transforms import * from pytorch_kinematics.chain import * -from pytorch_kinematics.ik import * \ No newline at end of file +from pytorch_kinematics.ik import *