Files
Gymnasium/gym/envs/mujoco/swimmer_v3.py
Rodrigo de Lazcano 61a39f41bc Initialize observation spaces and pytest (#2929)
* Remove step initialization for mujoco obs spaces

	* remove step initialization for mujoco obs space

	* pre-commit

pytest obs space mujoco
2022-06-30 10:59:59 -04:00

127 lines
3.7 KiB
Python

__credits__ = ["Rushiv Arora"]
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
from gym.spaces import Box
DEFAULT_CAMERA_CONFIG = {}
class SwimmerEnv(mujoco_env.MujocoEnv, utils.EzPickle):
metadata = {
"render_modes": [
"human",
"rgb_array",
"depth_array",
"single_rgb_array",
"single_depth_array",
],
"render_fps": 25,
}
def __init__(
self,
xml_file="swimmer.xml",
forward_reward_weight=1.0,
ctrl_cost_weight=1e-4,
reset_noise_scale=0.1,
exclude_current_positions_from_observation=True,
**kwargs
):
utils.EzPickle.__init__(**locals())
self._forward_reward_weight = forward_reward_weight
self._ctrl_cost_weight = ctrl_cost_weight
self._reset_noise_scale = reset_noise_scale
self._exclude_current_positions_from_observation = (
exclude_current_positions_from_observation
)
if exclude_current_positions_from_observation:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(8,), dtype=np.float64
)
else:
observation_space = Box(
low=-np.inf, high=np.inf, shape=(10,), dtype=np.float64
)
mujoco_env.MujocoEnv.__init__(
self,
xml_file,
4,
mujoco_bindings="mujoco_py",
observation_space=observation_space,
**kwargs
)
def control_cost(self, action):
control_cost = self._ctrl_cost_weight * np.sum(np.square(action))
return control_cost
def step(self, action):
xy_position_before = self.sim.data.qpos[0:2].copy()
self.do_simulation(action, self.frame_skip)
xy_position_after = self.sim.data.qpos[0:2].copy()
self.renderer.render_step()
xy_velocity = (xy_position_after - xy_position_before) / self.dt
x_velocity, y_velocity = xy_velocity
forward_reward = self._forward_reward_weight * x_velocity
ctrl_cost = self.control_cost(action)
observation = self._get_obs()
reward = forward_reward - ctrl_cost
done = False
info = {
"reward_fwd": forward_reward,
"reward_ctrl": -ctrl_cost,
"x_position": xy_position_after[0],
"y_position": xy_position_after[1],
"distance_from_origin": np.linalg.norm(xy_position_after, ord=2),
"x_velocity": x_velocity,
"y_velocity": y_velocity,
"forward_reward": forward_reward,
}
return observation, reward, done, info
def _get_obs(self):
position = self.sim.data.qpos.flat.copy()
velocity = self.sim.data.qvel.flat.copy()
if self._exclude_current_positions_from_observation:
position = position[2:]
observation = np.concatenate([position, velocity]).ravel()
return observation
def reset_model(self):
noise_low = -self._reset_noise_scale
noise_high = self._reset_noise_scale
qpos = self.init_qpos + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nq
)
qvel = self.init_qvel + self.np_random.uniform(
low=noise_low, high=noise_high, size=self.model.nv
)
self.set_state(qpos, qvel)
observation = self._get_obs()
return observation
def viewer_setup(self):
for key, value in DEFAULT_CAMERA_CONFIG.items():
if isinstance(value, np.ndarray):
getattr(self.viewer.cam, key)[:] = value
else:
setattr(self.viewer.cam, key, value)