import logging logger = logging.getLogger(__name__) import numpy as np from gym import error from gym.utils import closer env_closer = closer.Closer() # Env-related abstractions class Env(object): """The main OpenAI Gym class. It encapsulates an environment with arbitrary behind-the-scenes dynamics. An environment can be partially or fully observed. The main API methods that users of this class need to know are: step reset render close seed When implementing an environment, override the following methods in your subclass: _step _reset _render _close _seed And set the following attributes: action_space: The Space object corresponding to valid actions observation_space: The Space object corresponding to valid observations reward_range: A tuple corresponding to the min and max possible rewards Note: a default reward range set to [-inf,+inf] already exists. Set it if you want a narrower range. The methods are accessed publicly as "step", "reset", etc.. The non-underscored versions are wrapper methods to which we may add functionality over time. """ def __new__(cls, *args, **kwargs): # We use __new__ since we want the env author to be able to # override __init__ without remembering to call super. env = super(Env, cls).__new__(cls) env._env_closer_id = env_closer.register(env) env._closed = False # Will be automatically set when creating an environment via 'make' env.spec = None return env # Set this in SOME subclasses metadata = {'render.modes': []} reward_range = (-np.inf, np.inf) # Override in SOME subclasses def _close(self): pass # Set these in ALL subclasses action_space = None observation_space = None # Override in ALL subclasses def _step(self, action): raise NotImplementedError def _reset(self): raise NotImplementedError def _render(self, mode='human', close=False): raise NotImplementedError def _seed(self, seed=None): return [] # Do not override _owns_render = True @property def monitor(self): raise error.Error("env.monitor has been deprecated as of 12/23/2016. Remove your call to `env.monitor.start(directory)` and instead wrap your env with `env = gym.wrappers.Monitor(env, directory)` to record data.") def step(self, action): """Run one timestep of the environment's dynamics. When end of episode is reached, you are responsible for calling `reset()` to reset this environment's state. Accepts an action and returns a tuple (observation, reward, done, info). Args: action (object): an action provided by the environment Returns: observation (object): agent's observation of the current environment reward (float) : amount of reward returned after previous action done (boolean): whether the episode has ended, in which case further step() calls will return undefined results info (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning) """ return self._step(action) def reset(self): """Resets the state of the environment and returns an initial observation. Returns: observation (object): the initial observation of the space. """ return self._reset() def render(self, mode='human', close=False): """Renders the environment. The set of supported modes varies per environment. (And some environments do not support rendering at all.) By convention, if mode is: - human: render to the current display or terminal and return nothing. Usually for human consumption. - rgb_array: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an x-by-y pixel image, suitable for turning into a video. - ansi: Return a string (str) or StringIO.StringIO containing a terminal-style text representation. The text can include newlines and ANSI escape sequences (e.g. for colors). Note: Make sure that your class's metadata 'render.modes' key includes the list of supported modes. It's recommended to call super() in implementations to use the functionality of this method. Args: mode (str): the mode to render with close (bool): close all open renderings Example: class MyEnv(Env): metadata = {'render.modes': ['human', 'rgb_array']} def render(self, mode='human'): if mode == 'rgb_array': return np.array(...) # return RGB frame suitable for video elif mode is 'human': ... # pop up a window and render else: super(MyEnv, self).render(mode=mode) # just raise an exception """ if close: return modes = self.metadata.get('render.modes', []) if len(modes) == 0: raise error.UnsupportedMode('{} does not support rendering (requested mode: {})'.format(self, mode)) elif mode not in modes: raise error.UnsupportedMode('Unsupported rendering mode: {}. (Supported modes for {}: {})'.format(mode, self, modes)) return self._render(mode=mode, close=close) def close(self): """Override _close in your subclass to perform any necessary cleanup. Environments will automatically close() themselves when garbage collected or when the program exits. """ if self._closed: return if self._owns_render: self.render(close=True) self._close() env_closer.unregister(self._env_closer_id) # If an error occurs before this line, it's possible to # end up with double close. self._closed = True def seed(self, seed=None): """Sets the seed for this env's random number generator(s). Note: Some environments use multiple pseudorandom number generators. We want to capture all such seeds used in order to ensure that there aren't accidental correlations between multiple generators. Returns: list: Returns the list of seeds used in this env's random number generators. The first value in the list should be the "main" seed, or the value which a reproducer should pass to 'seed'. Often, the main seed equals the provided 'seed', but this won't be true if seed=None, for example. """ return self._seed(seed) @property def unwrapped(self): """Completely unwrap this env. Returns: gym.Env: The base non-wrapped gym.Env instance """ return self def __del__(self): self.close() def __str__(self): return '<{} instance>'.format(type(self).__name__) def configure(self): raise NotImplementedError("Env.configure has been removed. If you need it, please go back to gym commit 6f27709.") # Space-related abstractions class Space(object): """Defines the observation and action spaces, so you can write generic code that applies to any Env. For example, you can choose a random action. """ def sample(self): """ Uniformly randomly sample a random element of this space """ raise NotImplementedError def contains(self, x): """ Return boolean specifying if x is a valid member of this space """ raise NotImplementedError def to_jsonable(self, sample_n): """Convert a batch of samples from this space to a JSONable data type.""" # By default, assume identity is JSONable return sample_n def from_jsonable(self, sample_n): """Convert a JSONable data type to a batch of samples from this space.""" # By default, assume identity is JSONable return sample_n class Wrapper(Env): # Clear metadata so by default we don't override any keys. metadata = {} _owns_render = False # Make sure self.env is always defined, even if things break # early. env = None def __init__(self, env): self.env = env # Merge with the base metadata metadata = self.metadata self.metadata = self.env.metadata.copy() self.metadata.update(metadata) self.action_space = self.env.action_space self.observation_space = self.env.observation_space self.reward_range = self.env.reward_range self.spec = self.env.spec self._ensure_no_double_wrap() @classmethod def class_name(cls): return cls.__name__ def _ensure_no_double_wrap(self): env = self.env while True: if isinstance(env, Wrapper): if env.class_name() == self.class_name(): raise error.DoubleWrapperError("Attempted to double wrap with Wrapper: {}".format(self.__class__.__name__)) env = env.env else: break def _step(self, action): return self.env.step(action) def _reset(self): return self.env.reset() def _render(self, mode='human', close=False): return self.env.render(mode, close) def _close(self): return self.env.close() def _seed(self, seed=None): return self.env.seed(seed) def __str__(self): return '<{}{}>'.format(type(self).__name__, self.env) def __repr__(self): return str(self) @property def unwrapped(self): return self.env.unwrapped class ObservationWrapper(Wrapper): def _reset(self): observation = self.env.reset() return self._observation(observation) def _step(self, action): observation, reward, done, info = self.env.step(action) return self.observation(observation), reward, done, info def observation(self, observation): return self._observation(observation) def _observation(self, observation): raise NotImplementedError class RewardWrapper(Wrapper): def _step(self, action): observation, reward, done, info = self.env.step(action) return observation, self.reward(reward), done, info def reward(self, reward): return self._reward(reward) def _reward(self, reward): raise NotImplementedError class ActionWrapper(Wrapper): def _step(self, action): action = self.action(action) return self.env.step(action) def action(self, action): return self._action(action) def _action(self, action): raise NotImplementedError def reverse_action(self, action): return self._reverse_action(action) def _reverse_action(self, action): raise NotImplementedError