Skip to content

Commit

Permalink
Add IK animation and visualize IK for parallel goals and configs
Browse files Browse the repository at this point in the history
  • Loading branch information
LemonPi committed Aug 21, 2024
1 parent 403e1a6 commit 4e5005e
Show file tree
Hide file tree
Showing 2 changed files with 62 additions and 38 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -174,6 +174,8 @@ Compared to other IK libraries, these are the typical advantages over them:
- batched in both goal specification and retries from different starting configurations
- goal orientation in addition to goal position

![IK](https://i.imgur.com/QgaUME9.gif)

See `tests/test_inverse_kinematics.py` for usage, but generally what you need is below:
```python
full_urdf = os.path.join(search_path, urdf)
Expand Down
98 changes: 60 additions & 38 deletions tests/test_inverse_kinematics.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import os
from timeit import default_timer as timer

import numpy as np
import torch

import pytorch_kinematics as pk
Expand Down Expand Up @@ -55,7 +56,8 @@ def test_jacobian_follower():
goal_pos = goal[..., :3, 3]
goal_rot = pk.matrix_to_euler_angles(goal[..., :3, :3], "XYZ")

ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=10,
num_retries = 10
ik = pk.PseudoInverseIK(chain, max_iterations=30, num_retries=num_retries,
joint_limits=lim.T,
early_stopping_any_converged=True,
early_stopping_no_improvement="all",
Expand Down Expand Up @@ -85,53 +87,73 @@ def test_jacobian_follower():
p.setAdditionalSearchPath(search_path)

yaw = 90
pitch = -55
dist = 1.
target = goal_pos[0].cpu().numpy()
pitch = -65
# dist = 1.
dist = 2.4
target = np.array([2., 1.5, 0])
p.resetDebugVisualizerCamera(dist, yaw, pitch, target)

p.loadURDF("plane.urdf", [0, 0, 0], useFixedBase=True)
plane_id = p.loadURDF("plane.urdf", [0, 0, 0], useFixedBase=True)
p.changeVisualShape(plane_id, -1, rgbaColor=[0.3, 0.3, 0.3, 1])

# make 1 per retry with positional offsets
robots = []
num_robots = 16
# 4x4 grid position offset
offset = 1.0
m = rob_tf.get_matrix()
pos = m[0, :3, 3]
rot = m[0, :3, :3]
quat = pk.matrix_to_quaternion(rot)
pos = pos.cpu().numpy()
rot = pk.wxyz_to_xyzw(quat).cpu().numpy()
armId = p.loadURDF(urdf, basePosition=pos, baseOrientation=rot, useFixedBase=True)

_make_robot_translucent(armId, alpha=0.6)
# p.resetBasePositionAndOrientation(armId, [0, 0, 0], [0, 0, 0, 1])
# draw goal
# place a translucent sphere at the goal
for i in range(num_robots):
this_offset = np.array([i % 4 * offset, i // 4 * offset, 0])
armId = p.loadURDF(urdf, basePosition=pos + this_offset, baseOrientation=rot, useFixedBase=True)
# _make_robot_translucent(armId, alpha=0.6)
robots.append({"id": armId, "offset": this_offset, "pos": pos})

show_max_num_retries_per_goal = 10
for goal_num in range(M):
# draw cone to indicate pose instead of sphere
visId = p.createVisualShape(p.GEOM_MESH, fileName="meshes/cone.obj", meshScale=1.0,
rgbaColor=[0., 1., 0., 0.5])
# visId = p.createVisualShape(p.GEOM_SPHERE, radius=0.05, rgbaColor=[0., 1., 0., 0.5])
r = goal_rot[goal_num]
xyzw = pk.wxyz_to_xyzw(pk.matrix_to_quaternion(pk.euler_angles_to_matrix(r, "XYZ")))
goalId = p.createMultiBody(baseMass=0, baseVisualShapeIndex=visId,
basePosition=goal_pos[goal_num].cpu().numpy(),
baseOrientation=xyzw.cpu().numpy())

solutions = sol.solutions[goal_num]
# sort based on if they converged
converged = sol.converged[goal_num]
idx = torch.argsort(converged.to(int), descending=True)
solutions = solutions[idx]

# print how many retries converged for this one
print("Goal %d converged %d / %d" % (goal_num, converged.sum(), converged.numel()))

for i, q in enumerate(solutions):
if i > show_max_num_retries_per_goal:
break
for dof in range(q.shape[0]):
p.resetJointState(armId, dof, q[dof])
input("Press enter to continue")

p.removeBody(goalId)

goals = []
# draw cone to indicate pose instead of sphere
visId = p.createVisualShape(p.GEOM_MESH, fileName="meshes/cone.obj", meshScale=1.0,
rgbaColor=[0., 1., 0., 0.5])
for i in range(num_robots):
goals.append(p.createMultiBody(baseMass=0, baseVisualShapeIndex=visId))

try:
import window_recorder
with window_recorder.WindowRecorder(save_dir="."):
# batch over goals with num_robots
for j in range(0, M, num_robots):
this_selection = slice(j, j + num_robots)
r = goal_rot[this_selection]
xyzw = pk.wxyz_to_xyzw(pk.matrix_to_quaternion(pk.euler_angles_to_matrix(r, "XYZ")))

solutions = sol.solutions[this_selection, :, :]
converged = sol.converged[this_selection, :]

# print how many retries converged for this one
print("Goal %d to %d converged %d / %d" % (j, j + num_robots, converged.sum(), converged.numel()))

# outer loop over retries, inner loop over goals (for each robot shown in parallel)
for ii in range(num_retries):
if ii > show_max_num_retries_per_goal:
break
for jj in range(num_robots):
p.resetBasePositionAndOrientation(goals[jj],
goal_pos[j + jj].cpu().numpy() + robots[jj]["offset"],
xyzw[jj].cpu().numpy())
armId = robots[jj]["id"]
q = solutions[jj, ii, :]
for dof in range(q.shape[0]):
p.resetJointState(armId, dof, q[dof])

input("Press enter to continue")
except ImportError:
print("pip install window_recorder")

while True:
p.stepSimulation()
Expand Down

0 comments on commit 4e5005e

Please sign in to comment.