Files
Gymnasium/gym/core.py

319 lines
11 KiB
Python
Raw Normal View History

from abc import abstractmethod
2018-02-26 17:35:07 +01:00
import gym
from gym import error
2017-02-26 00:01:00 -08:00
from gym.utils import closer
env_closer = closer.Closer()
2016-04-27 08:00:58 -07:00
class Env(object):
"""The main OpenAI Gym class. It encapsulates an environment with
2016-04-28 10:33:37 -07:00
arbitrary behind-the-scenes dynamics. An environment can be
partially or fully observed.
The main API methods that users of this class need to know are:
step
[WIP] add support for seeding environments (#135) * Make environments seedable * Fix monitor bugs - Set monitor_id before setting the infix. This was a bug that would yield incorrect results with multiple monitors. - Remove extra pid from stats recorder filename. This should be purely cosmetic. * Start uploading seeds in episode_batch * Fix _bigint_from_bytes for python3 * Set seed explicitly in random_agent * Pass through seed argument * Also pass through random state to spaces * Pass random state into the observation/action spaces * Make all _seed methods return the list of used seeds * Switch over to np.random where possible * Start hashing seeds, and also seed doom engine * Fixup seeding determinism in many cases * Seed before loading the ROM * Make seeding more Python3 friendly * Make the MuJoCo skipping a bit more forgiving * Remove debugging PDB calls * Make setInt argument into raw bytes * Validate and upload seeds * Skip box2d * Make seeds smaller, and change representation of seeds in upload * Handle long seeds * Fix RandomAgent example to be deterministic * Handle integer types correctly in Python2 and Python3 * Try caching pip * Try adding swap * Add df and free calls * Bump swap * Bump swap size * Try setting overcommit * Try other sysctls * Try fixing overcommit * Try just setting overcommit_memory=1 * Add explanatory comment * Add what's new section to readme * BUG: Mark ElevatorAction-ram-v0 as non-deterministic for now * Document seed * Move nondetermistic check into spec
2016-05-29 09:07:09 -07:00
reset
2016-04-28 10:33:37 -07:00
render
close
[WIP] add support for seeding environments (#135) * Make environments seedable * Fix monitor bugs - Set monitor_id before setting the infix. This was a bug that would yield incorrect results with multiple monitors. - Remove extra pid from stats recorder filename. This should be purely cosmetic. * Start uploading seeds in episode_batch * Fix _bigint_from_bytes for python3 * Set seed explicitly in random_agent * Pass through seed argument * Also pass through random state to spaces * Pass random state into the observation/action spaces * Make all _seed methods return the list of used seeds * Switch over to np.random where possible * Start hashing seeds, and also seed doom engine * Fixup seeding determinism in many cases * Seed before loading the ROM * Make seeding more Python3 friendly * Make the MuJoCo skipping a bit more forgiving * Remove debugging PDB calls * Make setInt argument into raw bytes * Validate and upload seeds * Skip box2d * Make seeds smaller, and change representation of seeds in upload * Handle long seeds * Fix RandomAgent example to be deterministic * Handle integer types correctly in Python2 and Python3 * Try caching pip * Try adding swap * Add df and free calls * Bump swap * Bump swap size * Try setting overcommit * Try other sysctls * Try fixing overcommit * Try just setting overcommit_memory=1 * Add explanatory comment * Add what's new section to readme * BUG: Mark ElevatorAction-ram-v0 as non-deterministic for now * Document seed * Move nondetermistic check into spec
2016-05-29 09:07:09 -07:00
seed
2016-04-27 08:00:58 -07:00
And set the following attributes:
action_space: The Space object corresponding to valid actions
observation_space: The Space object corresponding to valid observations
reward_range: A tuple corresponding to the min and max possible rewards
2016-04-27 08:00:58 -07:00
Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range.
The methods are accessed publicly as "step", "reset", etc...
2016-04-27 08:00:58 -07:00
"""
2021-07-29 02:26:34 +02:00
2016-04-27 08:00:58 -07:00
# Set this in SOME subclasses
2021-07-29 02:26:34 +02:00
metadata = {"render.modes": []}
reward_range = (-float("inf"), float("inf"))
spec = None
2016-04-27 08:00:58 -07:00
# Set these in ALL subclasses
action_space = None
observation_space = None
@abstractmethod
2016-04-27 08:00:58 -07:00
def step(self, action):
2016-04-28 10:33:37 -07:00
"""Run one timestep of the environment's dynamics. When end of
episode is reached, you are responsible for calling `reset()`
to reset this environment's state.
2016-04-27 08:00:58 -07:00
Accepts an action and returns a tuple (observation, reward, done, info).
2016-04-27 08:00:58 -07:00
Args:
2019-05-24 14:29:01 -07:00
action (object): an action provided by the agent
2016-04-27 08:00:58 -07:00
Returns:
observation (object): agent's observation of the current environment
reward (float) : amount of reward returned after previous action
done (bool): whether the episode has ended, in which case further step() calls will return undefined results
info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
2016-04-27 08:00:58 -07:00
"""
raise NotImplementedError
2016-04-27 08:00:58 -07:00
@abstractmethod
2016-04-27 08:00:58 -07:00
def reset(self):
"""Resets the environment to an initial state and returns an initial
observation.
Note that this function should not reset the environment's random
number generator(s); random variables in the environment's state should
be sampled independently between multiple calls to `reset()`. In other
words, each call of `reset()` should yield an environment suitable for
a new episode, independent of previous episodes.
2016-04-27 08:00:58 -07:00
Returns:
observation (object): the initial observation.
2016-04-27 08:00:58 -07:00
"""
raise NotImplementedError
2016-04-27 08:00:58 -07:00
@abstractmethod
def render(self, mode='human'):
2016-04-27 08:00:58 -07:00
"""Renders the environment.
The set of supported modes varies per environment. (And some
environments do not support rendering at all.) By convention,
if mode is:
- human: render to the current display or terminal and
return nothing. Usually for human consumption.
- rgb_array: Return an numpy.ndarray with shape (x, y, 3),
representing RGB values for an x-by-y pixel image, suitable
for turning into a video.
- ansi: Return a string (str) or StringIO.StringIO containing a
terminal-style text representation. The text can include newlines
and ANSI escape sequences (e.g. for colors).
Note:
Make sure that your class's metadata 'render.modes' key includes
the list of supported modes. It's recommended to call super()
in implementations to use the functionality of this method.
Args:
mode (str): the mode to render with
Example:
class MyEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def render(self, mode='human'):
if mode == 'rgb_array':
return np.array(...) # return RGB frame suitable for video
elif mode == 'human':
... # pop up a window and render
else:
super(MyEnv, self).render(mode=mode) # just raise an exception
2016-04-27 08:00:58 -07:00
"""
raise NotImplementedError
2016-04-27 08:00:58 -07:00
def close(self):
"""Override close in your subclass to perform any necessary cleanup.
Environments will automatically close() themselves when
garbage collected or when the program exits.
"""
pass
[WIP] add support for seeding environments (#135) * Make environments seedable * Fix monitor bugs - Set monitor_id before setting the infix. This was a bug that would yield incorrect results with multiple monitors. - Remove extra pid from stats recorder filename. This should be purely cosmetic. * Start uploading seeds in episode_batch * Fix _bigint_from_bytes for python3 * Set seed explicitly in random_agent * Pass through seed argument * Also pass through random state to spaces * Pass random state into the observation/action spaces * Make all _seed methods return the list of used seeds * Switch over to np.random where possible * Start hashing seeds, and also seed doom engine * Fixup seeding determinism in many cases * Seed before loading the ROM * Make seeding more Python3 friendly * Make the MuJoCo skipping a bit more forgiving * Remove debugging PDB calls * Make setInt argument into raw bytes * Validate and upload seeds * Skip box2d * Make seeds smaller, and change representation of seeds in upload * Handle long seeds * Fix RandomAgent example to be deterministic * Handle integer types correctly in Python2 and Python3 * Try caching pip * Try adding swap * Add df and free calls * Bump swap * Bump swap size * Try setting overcommit * Try other sysctls * Try fixing overcommit * Try just setting overcommit_memory=1 * Add explanatory comment * Add what's new section to readme * BUG: Mark ElevatorAction-ram-v0 as non-deterministic for now * Document seed * Move nondetermistic check into spec
2016-05-29 09:07:09 -07:00
def seed(self, seed=None):
"""Sets the seed for this env's random number generator(s).
Note:
Some environments use multiple pseudorandom number generators.
We want to capture all such seeds used in order to ensure that
there aren't accidental correlations between multiple generators.
Returns:
list<bigint>: Returns the list of seeds used in this env's random
number generators. The first value in the list should be the
"main" seed, or the value which a reproducer should pass to
'seed'. Often, the main seed equals the provided 'seed', but
this won't be true if seed=None, for example.
"""
return
@property
def unwrapped(self):
"""Completely unwrap this env.
2016-08-11 14:45:52 -07:00
Returns:
gym.Env: The base non-wrapped gym.Env instance
2016-08-11 14:45:52 -07:00
"""
2017-02-26 00:01:00 -08:00
return self
2016-08-11 14:45:52 -07:00
2016-04-27 08:00:58 -07:00
def __str__(self):
if self.spec is None:
2021-07-29 02:26:34 +02:00
return "<{} instance>".format(type(self).__name__)
else:
2021-07-29 02:26:34 +02:00
return "<{}<{}>>".format(type(self).__name__, self.spec.id)
2016-04-27 08:00:58 -07:00
def __enter__(self):
2021-07-29 02:26:34 +02:00
"""Support with-statement for the environment."""
return self
def __exit__(self, *args):
2021-07-29 02:26:34 +02:00
"""Support with-statement for the environment."""
self.close()
# propagate exception
return False
2018-02-26 17:35:07 +01:00
class GoalEnv(Env):
"""A goal-based environment. It functions just as any regular OpenAI Gym environment but it
imposes a required structure on the observation_space. More concretely, the observation
space is required to contain at least three elements, namely `observation`, `desired_goal`, and
`achieved_goal`. Here, `desired_goal` specifies the goal that the agent should attempt to achieve.
`achieved_goal` is the goal that it currently achieved instead. `observation` contains the
actual observations of the environment as per usual.
"""
def reset(self):
# Enforce that each GoalEnv uses a Goal-compatible observation space.
if not isinstance(self.observation_space, gym.spaces.Dict):
2021-07-29 15:39:42 -04:00
raise error.Error(
"GoalEnv requires an observation space of type gym.spaces.Dict"
)
2021-07-29 02:26:34 +02:00
for key in ["observation", "achieved_goal", "desired_goal"]:
if key not in self.observation_space.spaces:
2021-07-29 15:39:42 -04:00
raise error.Error(
'GoalEnv requires the "{}" key to be part of the observation dictionary.'.format(
key
)
)
2018-02-26 17:35:07 +01:00
@abstractmethod
2018-02-26 17:35:07 +01:00
def compute_reward(self, achieved_goal, desired_goal, info):
"""Compute the step reward. This externalizes the reward function and makes
it dependent on a desired goal and the one that was achieved. If you wish to include
2018-02-26 17:35:07 +01:00
additional rewards that are independent of the goal, you can include the necessary values
to derive it in 'info' and compute it accordingly.
2018-02-26 17:35:07 +01:00
Args:
achieved_goal (object): the goal that was achieved during execution
desired_goal (object): the desired goal that we asked the agent to attempt to achieve
info (dict): an info dictionary with additional information
Returns:
float: The reward that corresponds to the provided achieved goal w.r.t. to the desired
goal. Note that the following should always hold true:
ob, reward, done, info = env.step()
assert reward == env.compute_reward(ob['achieved_goal'], ob['goal'], info)
"""
raise NotImplementedError
2016-08-11 14:45:52 -07:00
class Wrapper(Env):
"""Wraps the environment to allow a modular transformation.
This class is the base class for all wrappers. The subclass could override
some methods to change the behavior of the original environment without touching the
original code.
.. note::
Don't forget to call ``super().__init__(env)`` if the subclass overrides :meth:`__init__`.
"""
2021-07-29 02:26:34 +02:00
2017-02-26 00:01:00 -08:00
def __init__(self, env):
self.env = env
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.reward_range = self.env.reward_range
self.metadata = self.env.metadata
def __getattr__(self, name):
2021-07-29 02:26:34 +02:00
if name.startswith("_"):
2021-07-29 15:39:42 -04:00
raise AttributeError(
"attempted to get missing private attribute '{}'".format(name)
)
return getattr(self.env, name)
@property
def spec(self):
return self.env.spec
@classmethod
def class_name(cls):
return cls.__name__
def step(self, action):
return self.env.step(action)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
2016-08-11 14:45:52 -07:00
2021-07-29 02:26:34 +02:00
def render(self, mode="human", **kwargs):
return self.env.render(mode, **kwargs)
2016-08-11 14:45:52 -07:00
def close(self):
return self.env.close()
2016-08-11 14:45:52 -07:00
def seed(self, seed=None):
2016-08-11 14:45:52 -07:00
return self.env.seed(seed)
2018-02-26 17:35:07 +01:00
def compute_reward(self, achieved_goal, desired_goal, info):
return self.env.compute_reward(achieved_goal, desired_goal, info)
2016-08-11 14:45:52 -07:00
def __str__(self):
2021-07-29 02:26:34 +02:00
return "<{}{}>".format(type(self).__name__, self.env)
def __repr__(self):
return str(self)
@property
2017-02-26 00:01:00 -08:00
def unwrapped(self):
return self.env.unwrapped
class ObservationWrapper(Wrapper):
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
return self.observation(observation)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return self.observation(observation), reward, done, info
@abstractmethod
def observation(self, observation):
raise NotImplementedError
class RewardWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
observation, reward, done, info = self.env.step(action)
return observation, self.reward(reward), done, info
@abstractmethod
def reward(self, reward):
raise NotImplementedError
class ActionWrapper(Wrapper):
def reset(self, **kwargs):
return self.env.reset(**kwargs)
def step(self, action):
return self.env.step(self.action(action))
@abstractmethod
def action(self, action):
raise NotImplementedError
@abstractmethod
2016-10-14 22:07:47 -07:00
def reverse_action(self, action):
2021-07-29 02:26:34 +02:00
raise NotImplementedError