mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-08-01 14:10:30 +00:00
344 lines
20 KiB
Python
344 lines
20 KiB
Python
__credits__ = ["Kallinteris-Andreas"]
|
|
|
|
import numpy as np
|
|
|
|
from gymnasium import utils
|
|
from gymnasium.envs.mujoco import MujocoEnv
|
|
from gymnasium.spaces import Box
|
|
|
|
|
|
DEFAULT_CAMERA_CONFIG = {
|
|
"trackbodyid": 2,
|
|
"distance": 4.0,
|
|
"lookat": np.array((0.0, 0.0, 1.15)),
|
|
"elevation": -20.0,
|
|
}
|
|
|
|
|
|
class Walker2dEnv(MujocoEnv, utils.EzPickle):
|
|
r"""
|
|
## Description
|
|
This environment builds on the [hopper](https://gymnasium.farama.org/environments/mujoco/hopper/) environment by adding another set of legs that allow the robot to walk forward instead of hop.
|
|
Like other MuJoCo environments, this environment aims to increase the number of independent state and control variables compared to classical control environments.
|
|
The walker is a two-dimensional bipedal robot consisting of seven main body parts - a single torso at the top (with the two legs splitting after the torso), two thighs in the middle below the torso, two legs below the thighs, and two feet attached to the legs on which the entire body rests.
|
|
The goal is to walk in the forward (right) direction by applying torque to the six hinges connecting the seven body parts.
|
|
|
|
|
|
## Action Space
|
|
```{figure} action_space_figures/walker2d.png
|
|
:name: walker2d
|
|
```
|
|
|
|
The action space is a `Box(-1, 1, (6,), float32)`. An action represents the torques applied at the hinge joints.
|
|
|
|
| Num | Action | Control Min | Control Max | Name (in corresponding XML file) | Joint | Type (Unit) |
|
|
|-----|----------------------------------------|-------------|-------------|----------------------------------|-------|--------------|
|
|
| 0 | Torque applied on the thigh rotor | -1 | 1 | thigh_joint | hinge | torque (N m) |
|
|
| 1 | Torque applied on the leg rotor | -1 | 1 | leg_joint | hinge | torque (N m) |
|
|
| 2 | Torque applied on the foot rotor | -1 | 1 | foot_joint | hinge | torque (N m) |
|
|
| 3 | Torque applied on the left thigh rotor | -1 | 1 | thigh_left_joint | hinge | torque (N m) |
|
|
| 4 | Torque applied on the left leg rotor | -1 | 1 | leg_left_joint | hinge | torque (N m) |
|
|
| 5 | Torque applied on the left foot rotor | -1 | 1 | foot_left_joint | hinge | torque (N m) |
|
|
|
|
|
|
## Observation Space
|
|
The observation space consists of the following parts (in order):
|
|
|
|
- *qpos (8 elements by default):* Position values of the robot's body parts.
|
|
- *qvel (9 elements):* The velocities of these individual body parts (their derivatives).
|
|
|
|
By default, the observation does not include the robot's x-coordinate (`rootx`).
|
|
This can be included by passing `exclude_current_positions_from_observation=False` during construction.
|
|
In this case, the observation space will be a `Box(-Inf, Inf, (18,), float64)`, where the first observation element is the x-coordinate of the robot.
|
|
Regardless of whether `exclude_current_positions_from_observation` is set to `True` or `False`, the x-coordinate are returned in `info` with the keys `"x_position"` and `"y_position"`, respectively.
|
|
|
|
By default, however, the observation space is a `Box(-Inf, Inf, (17,), float64)` where the elements are as follows:
|
|
|
|
| Num | Observation | Min | Max | Name (in corresponding XML file) | Joint | Type (Unit) |
|
|
| --- | -------------------------------------------------- | ---- | --- | -------------------------------- | ----- | ------------------------ |
|
|
| 0 | z-coordinate of the torso (height of Walker2d) | -Inf | Inf | rootz | slide | position (m) |
|
|
| 1 | angle of the torso | -Inf | Inf | rooty | hinge | angle (rad) |
|
|
| 2 | angle of the thigh joint | -Inf | Inf | thigh_joint | hinge | angle (rad) |
|
|
| 3 | angle of the leg joint | -Inf | Inf | leg_joint | hinge | angle (rad) |
|
|
| 4 | angle of the foot joint | -Inf | Inf | foot_joint | hinge | angle (rad) |
|
|
| 5 | angle of the left thigh joint | -Inf | Inf | thigh_left_joint | hinge | angle (rad) |
|
|
| 6 | angle of the left leg joint | -Inf | Inf | leg_left_joint | hinge | angle (rad) |
|
|
| 7 | angle of the left foot joint | -Inf | Inf | foot_left_joint | hinge | angle (rad) |
|
|
| 8 | velocity of the x-coordinate of the torso | -Inf | Inf | rootx | slide | velocity (m/s) |
|
|
| 9 | velocity of the z-coordinate (height) of the torso | -Inf | Inf | rootz | slide | velocity (m/s) |
|
|
| 10 | angular velocity of the angle of the torso | -Inf | Inf | rooty | hinge | angular velocity (rad/s) |
|
|
| 11 | angular velocity of the thigh hinge | -Inf | Inf | thigh_joint | hinge | angular velocity (rad/s) |
|
|
| 12 | angular velocity of the leg hinge | -Inf | Inf | leg_joint | hinge | angular velocity (rad/s) |
|
|
| 13 | angular velocity of the foot hinge | -Inf | Inf | foot_joint | hinge | angular velocity (rad/s) |
|
|
| 14 | angular velocity of the thigh hinge | -Inf | Inf | thigh_left_joint | hinge | angular velocity (rad/s) |
|
|
| 15 | angular velocity of the leg hinge | -Inf | Inf | leg_left_joint | hinge | angular velocity (rad/s) |
|
|
| 16 | angular velocity of the foot hinge | -Inf | Inf | foot_left_joint | hinge | angular velocity (rad/s) |
|
|
| excluded | x-coordinate of the torso | -Inf | Inf | rootx | slide | position (m) |
|
|
|
|
|
|
## Rewards
|
|
The total reward is: ***reward*** *=* *healthy_reward bonus + forward_reward - ctrl_cost*.
|
|
|
|
- *healthy_reward*:
|
|
Every timestep that the Walker2d is alive, it receives a fixed reward of value `healthy_reward` (default is $1$),
|
|
- *forward_reward*:
|
|
A reward for moving forward,
|
|
this reward would be positive if the Swimmer moves forward (in the positive $x$ direction / in the right direction).
|
|
$w_{forward} \times \frac{dx}{dt}$, where
|
|
$dx$ is the displacement of the (front) "tip" ($x_{after-action} - x_{before-action}$),
|
|
$dt$ is the time between actions, which depends on the `frame_skip` parameter (default is $4$),
|
|
and `frametime` which is $0.002$ - so the default is $dt = 4 \times 0.002 = 0.008$,
|
|
$w_{forward}$ is the `forward_reward_weight` (default is $1$).
|
|
- *ctrl_cost*:
|
|
A negative reward to penalize the Walker2d for taking actions that are too large.
|
|
$w_{control} \times \|action\|_2^2$,
|
|
where $w_{control}$ is `ctrl_cost_weight` (default is $10^{-3}$).
|
|
|
|
`info` contains the individual reward terms.
|
|
|
|
|
|
## Starting State
|
|
The initial position state is $[0, 1.25, 0, 0, 0, 0, 0, 0, 0] + \mathcal{U}_{[-reset\_noise\_scale \times I_{9}, reset\_noise\_scale \times I_{9}]}$.
|
|
The initial velocity state is $\mathcal{U}_{[-reset\_noise\_scale \times I_{9}, reset\_noise\_scale \times I_{9}]}$.
|
|
|
|
where $\mathcal{U}$ is the multivariate uniform continuous distribution.
|
|
|
|
Note that the z-coordinate is non-zero so that the Walker2d can stand up immediately.
|
|
|
|
|
|
## Episode End
|
|
### Termination
|
|
If `terminate_when_unhealthy is True` (which is the default), the environment terminates when the Walker2d is unhealthy.
|
|
The Walker2d is unhealthy if any of the following happens:
|
|
|
|
1. Any of the state space values is no longer finite
|
|
2. The z-coordinate of the torso (the height) is **not** in the closed interval given by the `healthy_z_range` argument (default to $[0.8, 1.0]$).
|
|
3. The absolute value of the angle (`observation[1]` if `exclude_current_positions_from_observation=False`, else `observation[2]`) is ***not*** in the closed interval specified by the `healthy_angle_range` argument (default is $[-1, 1]$).
|
|
|
|
### Truncation
|
|
The default duration of an episode is 1000 timesteps.
|
|
|
|
|
|
## Arguments
|
|
Walker2d provides a range of parameters to modify the observation space, reward function, initial state, and termination condition.
|
|
These parameters can be applied during `gymnasium.make` in the following way:
|
|
|
|
```python
|
|
import gymnasium as gym
|
|
env = gym.make('Walker2d-v5', ctrl_cost_weight=1e-3, ...)
|
|
```
|
|
|
|
| Parameter | Type | Default | Description |
|
|
| -------------------------------------------- | --------- | ----------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
|
| `xml_file` | **str** |`"walker2d_v5.xml"`| Path to a MuJoCo model |
|
|
| `forward_reward_weight` | **float** | `1` | Weight for _forward_reward_ term (see `Rewards` section) |
|
|
| `ctrl_cost_weight` | **float** | `1e-3` | Weight for _ctr_cost_ term (see `Rewards` section) |
|
|
| `healthy_reward` | **float** | `1` | Weight for _healthy_reward_ reward (see `Rewards` section) |
|
|
| `terminate_when_unhealthy` | **bool** | `True` | If True, issue a `terminated` signal is unhealthy (see `Episode End` section) |
|
|
| `healthy_z_range` | **tuple** | `(0.8, 2)` | The z-coordinate of the torso of the walker must be in this range to be considered healthy (see `Episode End` section) |
|
|
| `healthy_angle_range` | **tuple** | `(-1, 1)` | The angle must be in this range to be considered healthy (see `Episode End` section) |
|
|
| `reset_noise_scale` | **float** | `5e-3` | Scale of random perturbations of initial position and velocity (see `Starting State` section) |
|
|
| `exclude_current_positions_from_observation` | **bool** | `True` | Whether or not to omit the x-coordinate from observations. Excluding the position can serve as an inductive bias to induce position-agnostic behavior in policies (see `Observation Space` section) |
|
|
|
|
|
|
## Version History
|
|
* v5:
|
|
- Minimum `mujoco` version is now 2.3.3.
|
|
- Added support for fully custom/third party `mujoco` models using the `xml_file` argument (previously only a few changes could be made to the existing models).
|
|
- Added `default_camera_config` argument, a dictionary for setting the `mj_camera` properties, mainly useful for custom environments.
|
|
- Added `env.observation_structure`, a dictionary for specifying the observation space compose (e.g. `qpos`, `qvel`), useful for building tooling and wrappers for the MuJoCo environments.
|
|
- Return a non-empty `info` with `reset()`, previously an empty dictionary was returned, the new keys are the same state information as `step()`.
|
|
- Added `frame_skip` argument, used to configure the `dt` (duration of `step()`), default varies by environment check environment documentation pages.
|
|
- In v2, v3 and v4 the models have different friction values for the two feet (left foot friction == 1.9 and right foot friction == 0.9). The `Walker-v5` model is updated to have the same friction for both feet (set to 1.9). This causes the Walker2d's the right foot to slide less on the surface and therefore require more force to move (related [GitHub issue](https://github.com/Farama-Foundation/Gymnasium/issues/477)).
|
|
- Fixed bug: `healthy_reward` was given on every step (even if the Walker2D is unhealthy), now it is only given if the Walker2d is healthy. The `info` "reward_survive" is updated with this change (related [GitHub issue](https://github.com/Farama-Foundation/Gymnasium/issues/526)).
|
|
- Restored the `xml_file` argument (was removed in `v4`).
|
|
- Added individual reward terms in `info` (`info["reward_forward"]`, `info["reward_ctrl"]`, `info["reward_survive"]`).
|
|
- Added `info["z_distance_from_origin"]` which is equal to the vertical distance of the "torso" body from its initial position.
|
|
* v4: All MuJoCo environments now use the MuJoCo bindings in mujoco >= 2.1.3
|
|
* v3: Support for `gymnasium.make` kwargs such as `xml_file`, `ctrl_cost_weight`, `reset_noise_scale`, etc. rgb rendering comes from tracking camera (so agent does not run away from screen). Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
|
|
* v2: All continuous control environments now use mujoco-py >= 1.50. Moved to the [gymnasium-robotics repo](https://github.com/Farama-Foundation/gymnasium-robotics).
|
|
* v1: max_time_steps raised to 1000 for robot based tasks. Added reward_threshold to environments.
|
|
* v0: Initial versions release
|
|
"""
|
|
|
|
metadata = {
|
|
"render_modes": [
|
|
"human",
|
|
"rgb_array",
|
|
"depth_array",
|
|
"rgbd_tuple",
|
|
],
|
|
}
|
|
|
|
def __init__(
|
|
self,
|
|
xml_file: str = "walker2d_v5.xml",
|
|
frame_skip: int = 4,
|
|
default_camera_config: dict[str, float | int] = DEFAULT_CAMERA_CONFIG,
|
|
forward_reward_weight: float = 1.0,
|
|
ctrl_cost_weight: float = 1e-3,
|
|
healthy_reward: float = 1.0,
|
|
terminate_when_unhealthy: bool = True,
|
|
healthy_z_range: tuple[float, float] = (0.8, 2.0),
|
|
healthy_angle_range: tuple[float, float] = (-1.0, 1.0),
|
|
reset_noise_scale: float = 5e-3,
|
|
exclude_current_positions_from_observation: bool = True,
|
|
**kwargs,
|
|
):
|
|
utils.EzPickle.__init__(
|
|
self,
|
|
xml_file,
|
|
frame_skip,
|
|
default_camera_config,
|
|
forward_reward_weight,
|
|
ctrl_cost_weight,
|
|
healthy_reward,
|
|
terminate_when_unhealthy,
|
|
healthy_z_range,
|
|
healthy_angle_range,
|
|
reset_noise_scale,
|
|
exclude_current_positions_from_observation,
|
|
**kwargs,
|
|
)
|
|
|
|
self._forward_reward_weight = forward_reward_weight
|
|
self._ctrl_cost_weight = ctrl_cost_weight
|
|
|
|
self._healthy_reward = healthy_reward
|
|
self._terminate_when_unhealthy = terminate_when_unhealthy
|
|
|
|
self._healthy_z_range = healthy_z_range
|
|
self._healthy_angle_range = healthy_angle_range
|
|
|
|
self._reset_noise_scale = reset_noise_scale
|
|
|
|
self._exclude_current_positions_from_observation = (
|
|
exclude_current_positions_from_observation
|
|
)
|
|
|
|
MujocoEnv.__init__(
|
|
self,
|
|
xml_file,
|
|
frame_skip,
|
|
observation_space=None,
|
|
default_camera_config=default_camera_config,
|
|
**kwargs,
|
|
)
|
|
|
|
self.metadata = {
|
|
"render_modes": [
|
|
"human",
|
|
"rgb_array",
|
|
"depth_array",
|
|
"rgbd_tuple",
|
|
],
|
|
"render_fps": int(np.round(1.0 / self.dt)),
|
|
}
|
|
|
|
obs_size = (
|
|
self.data.qpos.size
|
|
+ self.data.qvel.size
|
|
- exclude_current_positions_from_observation
|
|
)
|
|
self.observation_space = Box(
|
|
low=-np.inf, high=np.inf, shape=(obs_size,), dtype=np.float64
|
|
)
|
|
|
|
self.observation_structure = {
|
|
"skipped_qpos": 1 * exclude_current_positions_from_observation,
|
|
"qpos": self.data.qpos.size
|
|
- 1 * exclude_current_positions_from_observation,
|
|
"qvel": self.data.qvel.size,
|
|
}
|
|
|
|
@property
|
|
def healthy_reward(self):
|
|
return self.is_healthy * self._healthy_reward
|
|
|
|
def control_cost(self, action):
|
|
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
|
|
return control_cost
|
|
|
|
@property
|
|
def is_healthy(self):
|
|
z, angle = self.data.qpos[1:3]
|
|
|
|
min_z, max_z = self._healthy_z_range
|
|
min_angle, max_angle = self._healthy_angle_range
|
|
|
|
healthy_z = min_z < z < max_z
|
|
healthy_angle = min_angle < angle < max_angle
|
|
is_healthy = healthy_z and healthy_angle
|
|
|
|
return is_healthy
|
|
|
|
def _get_obs(self):
|
|
position = self.data.qpos.flatten()
|
|
velocity = np.clip(self.data.qvel.flatten(), -10, 10)
|
|
|
|
if self._exclude_current_positions_from_observation:
|
|
position = position[1:]
|
|
|
|
observation = np.concatenate((position, velocity)).ravel()
|
|
return observation
|
|
|
|
def step(self, action):
|
|
x_position_before = self.data.qpos[0]
|
|
self.do_simulation(action, self.frame_skip)
|
|
x_position_after = self.data.qpos[0]
|
|
x_velocity = (x_position_after - x_position_before) / self.dt
|
|
|
|
observation = self._get_obs()
|
|
reward, reward_info = self._get_rew(x_velocity, action)
|
|
terminated = (not self.is_healthy) and self._terminate_when_unhealthy
|
|
info = {
|
|
"x_position": x_position_after,
|
|
"z_distance_from_origin": self.data.qpos[1] - self.init_qpos[1],
|
|
"x_velocity": x_velocity,
|
|
**reward_info,
|
|
}
|
|
|
|
if self.render_mode == "human":
|
|
self.render()
|
|
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
|
|
return observation, reward, terminated, False, info
|
|
|
|
def _get_rew(self, x_velocity: float, action):
|
|
forward_reward = self._forward_reward_weight * x_velocity
|
|
healthy_reward = self.healthy_reward
|
|
rewards = forward_reward + healthy_reward
|
|
|
|
ctrl_cost = self.control_cost(action)
|
|
costs = ctrl_cost
|
|
reward = rewards - costs
|
|
|
|
reward_info = {
|
|
"reward_forward": forward_reward,
|
|
"reward_ctrl": -ctrl_cost,
|
|
"reward_survive": healthy_reward,
|
|
}
|
|
|
|
return reward, reward_info
|
|
|
|
def reset_model(self):
|
|
noise_low = -self._reset_noise_scale
|
|
noise_high = self._reset_noise_scale
|
|
|
|
qpos = self.init_qpos + self.np_random.uniform(
|
|
low=noise_low, high=noise_high, size=self.model.nq
|
|
)
|
|
qvel = self.init_qvel + self.np_random.uniform(
|
|
low=noise_low, high=noise_high, size=self.model.nv
|
|
)
|
|
|
|
self.set_state(qpos, qvel)
|
|
|
|
observation = self._get_obs()
|
|
return observation
|
|
|
|
def _get_reset_info(self):
|
|
return {
|
|
"x_position": self.data.qpos[0],
|
|
"z_distance_from_origin": self.data.qpos[1] - self.init_qpos[1],
|
|
}
|