You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I have implemented a custom Cost Function in Thesues. The issue I am facing is when I vary the weight term associated with the cost function, the error printed when we set verbose=True does seem to go up. However the resulting solution at the end does not seem to change . PFA code of the mentioned cost function :
class ObstacleCost(CostFunction):
def __init__(
self,
cost_weight: CostWeight,
link_pos: Point3,
tree_node_pos: Point3,
max_distance: float,
name: Optional[str] = None,
):
super().__init__(cost_weight, name=name)
# CHECK : input args are of the same class
if not isinstance(link_pos, tree_node_pos.__class__):
raise ValueError(
"link_pos for the VectorDifference inconsistent with the given tree_node_pos."
)
if not link_pos.dof() == tree_node_pos.dof():
raise ValueError(
"link_pos and tree_node_pos in the VectorDifference must have identical dof."
)
self.link_pos = link_pos
self.tree_node_pos = tree_node_pos
self.max_distance = max_distance
# register link_pos and tree_node_pos
self.register_optim_vars(["link_pos"])
self.register_aux_vars(["tree_node_pos"])
def dim(self) -> int:
return self.link_pos.dof() # SETTING THE DOF
def _copy_impl(self, new_name: Optional[str] = None) -> "ObstacleCost":
return ObstacleCost( # type: ignore
self.weight.copy(), self.link_pos.copy(), self.tree_node_pos.copy(), self.max_distance, name=new_name
)
def error(self) -> torch.Tensor:
# Compute the L2 norm (distance) between link_pos and tree_node_pos
distances = (self.link_pos - self.tree_node_pos).norm(dim=-1, keepdim=True)
error_inv_distance = 1 / distances
return error_inv_distance
def jacobians(self) -> Tuple[List[torch.Tensor], torch.Tensor]:
diff = self.link_pos - self.tree_node_pos
distances = diff.norm(dim=-1, keepdim=True)
# Prevent division by zero by ensuring distances are non-zero
distances = distances.clamp(min=1e-9)
# Compute the gradient components for the inverse distance error
grad_component_x = -diff[:, 0:1] / (distances ** 3)
grad_component_y = -diff[:, 1:2] / (distances ** 3)
grad_component_z = -diff[:, 2:3] / (distances ** 3)
# Stack the components to form the Jacobian
jac = torch.cat((grad_component_x, grad_component_y, grad_component_z), dim=1).to(self.link_pos.device)
jac = jac.unsqueeze(1)
return [jac], self.error()
reacted with thumbs up emoji reacted with thumbs down emoji reacted with laugh emoji reacted with hooray emoji reacted with confused emoji reacted with heart emoji reacted with rocket emoji reacted with eyes emoji
-
I have implemented a custom Cost Function in Thesues. The issue I am facing is when I vary the weight term associated with the cost function, the error printed when we set
verbose=True
does seem to go up. However the resulting solution at the end does not seem to change . PFA code of the mentioned cost function :Beta Was this translation helpful? Give feedback.
All reactions