mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-08-31 10:09:53 +00:00
* docs+credits * docs: refactor box2d + comment version history * fix mujoco line lengths * fix more env line lengths * black * typos * put docstrings in base environments rather than highest version * fix richer reacher * black * correct black version * continuous mountain car docstring to markdown * remove unneeded images * black Co-authored-by: Andrea PIERRÉ <andrea_pierre@brown.edu>
207 lines
6.9 KiB
Python
207 lines
6.9 KiB
Python
"""
|
|
@author: Olivier Sigaud
|
|
|
|
A merge between two sources:
|
|
|
|
* Adaptation of the MountainCar Environment from the "FAReinforcement" library
|
|
of Jose Antonio Martin H. (version 1.0), adapted by 'Tom Schaul, tom@idsia.ch'
|
|
and then modified by Arnaud de Broissia
|
|
|
|
* the gym MountainCar environment
|
|
itself from
|
|
http://incompleteideas.net/sutton/MountainCar/MountainCar1.cp
|
|
permalink: https://perma.cc/6Z2N-PFWC
|
|
"""
|
|
|
|
import math
|
|
from typing import Optional
|
|
|
|
import numpy as np
|
|
|
|
import gym
|
|
from gym import spaces
|
|
from gym.utils import seeding
|
|
|
|
|
|
class Continuous_MountainCarEnv(gym.Env):
|
|
"""
|
|
The agent (a car) is started at the bottom of a valley. For any given state
|
|
the agent may choose to accelerate to the left, right or cease any
|
|
acceleration. The code is originally based on [this code](http://incompleteideas.net/MountainCar/MountainCar1.cp)
|
|
and the environment appeared first in Andrew Moore's PhD Thesis (1990):
|
|
```
|
|
@TECHREPORT{Moore90efficientmemory-based,
|
|
author = {Andrew William Moore},
|
|
title = {Efficient Memory-based Learning for Robot Control},
|
|
institution = {},
|
|
year = {1990}
|
|
}
|
|
```
|
|
|
|
## Observation Space
|
|
|
|
The observation space is a 2-dim vector, where the 1st element represents the "car position" and the 2nd element represents the "car velocity".
|
|
|
|
## Action
|
|
|
|
The actual driving force is calculated by multiplying the power coef by power (0.0015)
|
|
|
|
## Reward
|
|
|
|
Reward of 100 is awarded if the agent reached the flag (position = 0.45)
|
|
on top of the mountain. Reward is decrease based on amount of energy consumed each step.
|
|
|
|
## Starting State
|
|
|
|
The position of the car is assigned a uniform random value in [-0.6 , -0.4]. The starting velocity of the car is always assigned to 0.
|
|
|
|
## Episode Termination
|
|
|
|
The car position is more than 0.45. Episode length is greater than 200
|
|
|
|
## Arguments
|
|
|
|
```
|
|
gym.make('MountainCarContinuous-v0')
|
|
```
|
|
|
|
## Version History
|
|
|
|
* v0: Initial versions release (1.0.0)
|
|
"""
|
|
|
|
metadata = {"render.modes": ["human", "rgb_array"], "video.frames_per_second": 30}
|
|
|
|
def __init__(self, goal_velocity=0):
|
|
self.min_action = -1.0
|
|
self.max_action = 1.0
|
|
self.min_position = -1.2
|
|
self.max_position = 0.6
|
|
self.max_speed = 0.07
|
|
self.goal_position = (
|
|
0.45 # was 0.5 in gym, 0.45 in Arnaud de Broissia's version
|
|
)
|
|
self.goal_velocity = goal_velocity
|
|
self.power = 0.0015
|
|
|
|
self.low_state = np.array(
|
|
[self.min_position, -self.max_speed], dtype=np.float32
|
|
)
|
|
self.high_state = np.array(
|
|
[self.max_position, self.max_speed], dtype=np.float32
|
|
)
|
|
|
|
self.viewer = None
|
|
|
|
self.action_space = spaces.Box(
|
|
low=self.min_action, high=self.max_action, shape=(1,), dtype=np.float32
|
|
)
|
|
self.observation_space = spaces.Box(
|
|
low=self.low_state, high=self.high_state, dtype=np.float32
|
|
)
|
|
|
|
def step(self, action):
|
|
|
|
position = self.state[0]
|
|
velocity = self.state[1]
|
|
force = min(max(action[0], self.min_action), self.max_action)
|
|
|
|
velocity += force * self.power - 0.0025 * math.cos(3 * position)
|
|
if velocity > self.max_speed:
|
|
velocity = self.max_speed
|
|
if velocity < -self.max_speed:
|
|
velocity = -self.max_speed
|
|
position += velocity
|
|
if position > self.max_position:
|
|
position = self.max_position
|
|
if position < self.min_position:
|
|
position = self.min_position
|
|
if position == self.min_position and velocity < 0:
|
|
velocity = 0
|
|
|
|
# Convert a possible numpy bool to a Python bool.
|
|
done = bool(position >= self.goal_position and velocity >= self.goal_velocity)
|
|
|
|
reward = 0
|
|
if done:
|
|
reward = 100.0
|
|
reward -= math.pow(action[0], 2) * 0.1
|
|
|
|
self.state = np.array([position, velocity], dtype=np.float32)
|
|
return self.state, reward, done, {}
|
|
|
|
def reset(self, *, seed: Optional[int] = None, options: Optional[dict] = None):
|
|
super().reset(seed=seed)
|
|
self.state = np.array([self.np_random.uniform(low=-0.6, high=-0.4), 0])
|
|
return np.array(self.state, dtype=np.float32)
|
|
|
|
def _height(self, xs):
|
|
return np.sin(3 * xs) * 0.45 + 0.55
|
|
|
|
def render(self, mode="human"):
|
|
screen_width = 600
|
|
screen_height = 400
|
|
|
|
world_width = self.max_position - self.min_position
|
|
scale = screen_width / world_width
|
|
carwidth = 40
|
|
carheight = 20
|
|
|
|
if self.viewer is None:
|
|
from gym.utils import pyglet_rendering
|
|
|
|
self.viewer = pyglet_rendering.Viewer(screen_width, screen_height)
|
|
xs = np.linspace(self.min_position, self.max_position, 100)
|
|
ys = self._height(xs)
|
|
xys = list(zip((xs - self.min_position) * scale, ys * scale))
|
|
|
|
self.track = pyglet_rendering.make_polyline(xys)
|
|
self.track.set_linewidth(4)
|
|
self.viewer.add_geom(self.track)
|
|
|
|
clearance = 10
|
|
|
|
l, r, t, b = -carwidth / 2, carwidth / 2, carheight, 0
|
|
car = pyglet_rendering.FilledPolygon([(l, b), (l, t), (r, t), (r, b)])
|
|
car.add_attr(pyglet_rendering.Transform(translation=(0, clearance)))
|
|
self.cartrans = pyglet_rendering.Transform()
|
|
car.add_attr(self.cartrans)
|
|
self.viewer.add_geom(car)
|
|
frontwheel = pyglet_rendering.make_circle(carheight / 2.5)
|
|
frontwheel.set_color(0.5, 0.5, 0.5)
|
|
frontwheel.add_attr(
|
|
pyglet_rendering.Transform(translation=(carwidth / 4, clearance))
|
|
)
|
|
frontwheel.add_attr(self.cartrans)
|
|
self.viewer.add_geom(frontwheel)
|
|
backwheel = pyglet_rendering.make_circle(carheight / 2.5)
|
|
backwheel.add_attr(
|
|
pyglet_rendering.Transform(translation=(-carwidth / 4, clearance))
|
|
)
|
|
backwheel.add_attr(self.cartrans)
|
|
backwheel.set_color(0.5, 0.5, 0.5)
|
|
self.viewer.add_geom(backwheel)
|
|
flagx = (self.goal_position - self.min_position) * scale
|
|
flagy1 = self._height(self.goal_position) * scale
|
|
flagy2 = flagy1 + 50
|
|
flagpole = pyglet_rendering.Line((flagx, flagy1), (flagx, flagy2))
|
|
self.viewer.add_geom(flagpole)
|
|
flag = pyglet_rendering.FilledPolygon(
|
|
[(flagx, flagy2), (flagx, flagy2 - 10), (flagx + 25, flagy2 - 5)]
|
|
)
|
|
flag.set_color(0.8, 0.8, 0)
|
|
self.viewer.add_geom(flag)
|
|
|
|
pos = self.state[0]
|
|
self.cartrans.set_translation(
|
|
(pos - self.min_position) * scale, self._height(pos) * scale
|
|
)
|
|
self.cartrans.set_rotation(math.cos(3 * pos))
|
|
|
|
return self.viewer.render(return_rgb_array=mode == "rgb_array")
|
|
|
|
def close(self):
|
|
if self.viewer:
|
|
self.viewer.close()
|
|
self.viewer = None
|