Skip to content

Commit d647ea5

Browse files
authored
Fixes docstring in reward functions and code block in documentation (isaac-sim#661)
# Description Fixes docstring in reward functions and code block in documentation ## Type of change - Bug fix ## Checklist - [x] I have run the [`pre-commit` checks](https://pre-commit.com/) with `./isaaclab.sh --format` - [x] I have made corresponding changes to the documentation - [x] My changes generate no new warnings - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] I have updated the changelog and the corresponding version in the extension's `config/extension.toml` file - [x] I have added my name to the `CONTRIBUTORS.md` or my name already exists there
1 parent b6d43ba commit d647ea5

File tree

2 files changed

+13
-13
lines changed

2 files changed

+13
-13
lines changed

docs/source/setup/sample.rst

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -165,7 +165,7 @@ format.
165165

166166
.. code:: bash
167167
168-
./isaaclab.sh -p source/standalone//workflows/robomimic/play.py --task Isaac-Lift-Cube-Franka-IK-Rel-v0 --checkpoint /PATH/TO/model.pth
168+
./isaaclab.sh -p source/standalone/workflows/robomimic/play.py --task Isaac-Lift-Cube-Franka-IK-Rel-v0 --checkpoint /PATH/TO/model.pth
169169
170170
Reinforcement Learning
171171
~~~~~~~~~~~~~~~~~~~~~~

source/extensions/omni.isaac.lab/omni/isaac/lab/envs/mdp/rewards.py

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -74,21 +74,21 @@ def __call__(self, env: ManagerBasedRLEnv, term_keys: str | list[str] = ".*") ->
7474

7575

7676
def lin_vel_z_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
77-
"""Penalize z-axis base linear velocity using L2-kernel."""
77+
"""Penalize z-axis base linear velocity using L2 squared kernel."""
7878
# extract the used quantities (to enable type-hinting)
7979
asset: RigidObject = env.scene[asset_cfg.name]
8080
return torch.square(asset.data.root_lin_vel_b[:, 2])
8181

8282

8383
def ang_vel_xy_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
84-
"""Penalize xy-axis base angular velocity using L2-kernel."""
84+
"""Penalize xy-axis base angular velocity using L2 squared kernel."""
8585
# extract the used quantities (to enable type-hinting)
8686
asset: RigidObject = env.scene[asset_cfg.name]
8787
return torch.sum(torch.square(asset.data.root_ang_vel_b[:, :2]), dim=1)
8888

8989

9090
def flat_orientation_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
91-
"""Penalize non-flat base orientation using L2-kernel.
91+
"""Penalize non-flat base orientation using L2 squared kernel.
9292
9393
This is computed by penalizing the xy-components of the projected gravity vector.
9494
"""
@@ -100,7 +100,7 @@ def flat_orientation_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = Scen
100100
def base_height_l2(
101101
env: ManagerBasedRLEnv, target_height: float, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")
102102
) -> torch.Tensor:
103-
"""Penalize asset height from its target using L2-kernel.
103+
"""Penalize asset height from its target using L2 squared kernel.
104104
105105
Note:
106106
Currently, it assumes a flat terrain, i.e. the target height is in the world frame.
@@ -123,9 +123,9 @@ def body_lin_acc_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEnt
123123

124124

125125
def joint_torques_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
126-
"""Penalize joint torques applied on the articulation using L2-kernel.
126+
"""Penalize joint torques applied on the articulation using L2 squared kernel.
127127
128-
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint torques contribute to the L2 norm.
128+
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint torques contribute to the term.
129129
"""
130130
# extract the used quantities (to enable type-hinting)
131131
asset: Articulation = env.scene[asset_cfg.name]
@@ -140,19 +140,19 @@ def joint_vel_l1(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg) -> torch.Ten
140140

141141

142142
def joint_vel_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
143-
"""Penalize joint velocities on the articulation using L1-kernel.
143+
"""Penalize joint velocities on the articulation using L2 squared kernel.
144144
145-
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint velocities contribute to the L1 norm.
145+
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint velocities contribute to the term.
146146
"""
147147
# extract the used quantities (to enable type-hinting)
148148
asset: Articulation = env.scene[asset_cfg.name]
149149
return torch.sum(torch.square(asset.data.joint_vel[:, asset_cfg.joint_ids]), dim=1)
150150

151151

152152
def joint_acc_l2(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = SceneEntityCfg("robot")) -> torch.Tensor:
153-
"""Penalize joint accelerations on the articulation using L2-kernel.
153+
"""Penalize joint accelerations on the articulation using L2 squared kernel.
154154
155-
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint accelerations contribute to the L2 norm.
155+
NOTE: Only the joints configured in :attr:`asset_cfg.joint_ids` will have their joint accelerations contribute to the term.
156156
"""
157157
# extract the used quantities (to enable type-hinting)
158158
asset: Articulation = env.scene[asset_cfg.name]
@@ -232,12 +232,12 @@ def applied_torque_limits(env: ManagerBasedRLEnv, asset_cfg: SceneEntityCfg = Sc
232232

233233

234234
def action_rate_l2(env: ManagerBasedRLEnv) -> torch.Tensor:
235-
"""Penalize the rate of change of the actions using L2-kernel."""
235+
"""Penalize the rate of change of the actions using L2 squared kernel."""
236236
return torch.sum(torch.square(env.action_manager.action - env.action_manager.prev_action), dim=1)
237237

238238

239239
def action_l2(env: ManagerBasedRLEnv) -> torch.Tensor:
240-
"""Penalize the actions using L2-kernel."""
240+
"""Penalize the actions using L2 squared kernel."""
241241
return torch.sum(torch.square(env.action_manager.action), dim=1)
242242

243243

0 commit comments

Comments
 (0)