mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-07-31 22:04:31 +00:00
* Add support for python 3.6 * Add support for python 3.6 * Added check for python 3.6 to not install mujoco as no version exists * Fixed the install groups for python 3.6 * Re-added python 3.6 support for gym * black * Added support for dataclasses through dataclasses module in setup that backports the module * Fixed install requirements * Re-added dummy env spec with dataclasses * Changed type for compatability for python 3.6 * Added a python 3.6 warning * Fixed python 3.6 typing issue * Removed __future__ import annotation for python 3.6 support * Fixed python 3.6 typing
720 lines
28 KiB
Python
720 lines
28 KiB
Python
"""An async vector environment."""
|
|
import multiprocessing as mp
|
|
import sys
|
|
import time
|
|
from copy import deepcopy
|
|
from enum import Enum
|
|
from typing import List, Optional, Sequence, Tuple, Union
|
|
|
|
import numpy as np
|
|
|
|
import gym
|
|
from gym import logger
|
|
from gym.core import ObsType
|
|
from gym.error import (
|
|
AlreadyPendingCallError,
|
|
ClosedEnvironmentError,
|
|
CustomSpaceError,
|
|
NoAsyncCallError,
|
|
)
|
|
from gym.vector.utils import (
|
|
CloudpickleWrapper,
|
|
clear_mpi_env_vars,
|
|
concatenate,
|
|
create_empty_array,
|
|
create_shared_memory,
|
|
iterate,
|
|
read_from_shared_memory,
|
|
write_to_shared_memory,
|
|
)
|
|
from gym.vector.vector_env import VectorEnv
|
|
|
|
__all__ = ["AsyncVectorEnv"]
|
|
|
|
|
|
class AsyncState(Enum):
|
|
DEFAULT = "default"
|
|
WAITING_RESET = "reset"
|
|
WAITING_STEP = "step"
|
|
WAITING_CALL = "call"
|
|
|
|
|
|
class AsyncVectorEnv(VectorEnv):
|
|
"""Vectorized environment that runs multiple environments in parallel.
|
|
|
|
It uses ``multiprocessing`` processes, and pipes for communication.
|
|
|
|
Example::
|
|
|
|
>>> import gym
|
|
>>> env = gym.vector.AsyncVectorEnv([
|
|
... lambda: gym.make("Pendulum-v0", g=9.81),
|
|
... lambda: gym.make("Pendulum-v0", g=1.62)
|
|
... ])
|
|
>>> env.reset()
|
|
array([[-0.8286432 , 0.5597771 , 0.90249056],
|
|
[-0.85009176, 0.5266346 , 0.60007906]], dtype=float32)
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
env_fns: Sequence[callable],
|
|
observation_space: Optional[gym.Space] = None,
|
|
action_space: Optional[gym.Space] = None,
|
|
shared_memory: bool = True,
|
|
copy: bool = True,
|
|
context: Optional[str] = None,
|
|
daemon: bool = True,
|
|
worker: Optional[callable] = None,
|
|
):
|
|
"""Vectorized environment that runs multiple environments in parallel.
|
|
|
|
Args:
|
|
env_fns: Functions that create the environments.
|
|
observation_space: Observation space of a single environment. If ``None``,
|
|
then the observation space of the first environment is taken.
|
|
action_space: Action space of a single environment. If ``None``,
|
|
then the action space of the first environment is taken.
|
|
shared_memory: If ``True``, then the observations from the worker processes are communicated back through
|
|
shared variables. This can improve the efficiency if the observations are large (e.g. images).
|
|
copy: If ``True``, then the :meth:`~AsyncVectorEnv.reset` and :meth:`~AsyncVectorEnv.step` methods
|
|
return a copy of the observations.
|
|
context: Context for `multiprocessing`_. If ``None``, then the default context is used.
|
|
daemon: If ``True``, then subprocesses have ``daemon`` flag turned on; that is, they will quit if
|
|
the head process quits. However, ``daemon=True`` prevents subprocesses to spawn children,
|
|
so for some environments you may want to have it set to ``False``.
|
|
worker: If set, then use that worker in a subprocess instead of a default one.
|
|
Can be useful to override some inner vector env logic, for instance, how resets on done are handled.
|
|
|
|
Warnings: worker is an advanced mode option. It provides a high degree of flexibility and a high chance
|
|
to shoot yourself in the foot; thus, if you are writing your own worker, it is recommended to start
|
|
from the code for ``_worker`` (or ``_worker_shared_memory``) method, and add changes.
|
|
|
|
Raises:
|
|
RuntimeError: If the observation space of some sub-environment does not match observation_space
|
|
(or, by default, the observation space of the first sub-environment).
|
|
ValueError: If observation_space is a custom space (i.e. not a default space in Gym,
|
|
such as gym.spaces.Box, gym.spaces.Discrete, or gym.spaces.Dict) and shared_memory is True.
|
|
"""
|
|
ctx = mp.get_context(context)
|
|
self.env_fns = env_fns
|
|
self.shared_memory = shared_memory
|
|
self.copy = copy
|
|
dummy_env = env_fns[0]()
|
|
self.metadata = dummy_env.metadata
|
|
|
|
if (observation_space is None) or (action_space is None):
|
|
observation_space = observation_space or dummy_env.observation_space
|
|
action_space = action_space or dummy_env.action_space
|
|
dummy_env.close()
|
|
del dummy_env
|
|
super().__init__(
|
|
num_envs=len(env_fns),
|
|
observation_space=observation_space,
|
|
action_space=action_space,
|
|
)
|
|
|
|
if self.shared_memory:
|
|
try:
|
|
_obs_buffer = create_shared_memory(
|
|
self.single_observation_space, n=self.num_envs, ctx=ctx
|
|
)
|
|
self.observations = read_from_shared_memory(
|
|
self.single_observation_space, _obs_buffer, n=self.num_envs
|
|
)
|
|
except CustomSpaceError:
|
|
raise ValueError(
|
|
"Using `shared_memory=True` in `AsyncVectorEnv` "
|
|
"is incompatible with non-standard Gym observation spaces "
|
|
"(i.e. custom spaces inheriting from `gym.Space`), and is "
|
|
"only compatible with default Gym spaces (e.g. `Box`, "
|
|
"`Tuple`, `Dict`) for batching. Set `shared_memory=False` "
|
|
"if you use custom observation spaces."
|
|
)
|
|
else:
|
|
_obs_buffer = None
|
|
self.observations = create_empty_array(
|
|
self.single_observation_space, n=self.num_envs, fn=np.zeros
|
|
)
|
|
|
|
self.parent_pipes, self.processes = [], []
|
|
self.error_queue = ctx.Queue()
|
|
target = _worker_shared_memory if self.shared_memory else _worker
|
|
target = worker or target
|
|
with clear_mpi_env_vars():
|
|
for idx, env_fn in enumerate(self.env_fns):
|
|
parent_pipe, child_pipe = ctx.Pipe()
|
|
process = ctx.Process(
|
|
target=target,
|
|
name=f"Worker<{type(self).__name__}>-{idx}",
|
|
args=(
|
|
idx,
|
|
CloudpickleWrapper(env_fn),
|
|
child_pipe,
|
|
parent_pipe,
|
|
_obs_buffer,
|
|
self.error_queue,
|
|
),
|
|
)
|
|
|
|
self.parent_pipes.append(parent_pipe)
|
|
self.processes.append(process)
|
|
|
|
process.daemon = daemon
|
|
process.start()
|
|
child_pipe.close()
|
|
|
|
self._state = AsyncState.DEFAULT
|
|
self._check_spaces()
|
|
|
|
def seed(self, seed=None):
|
|
"""Seeds the vector environments.
|
|
|
|
Args:
|
|
seed: The seeds use with the environments
|
|
|
|
Raises:
|
|
AlreadyPendingCallError: Calling `seed` while waiting for a pending call to complete
|
|
"""
|
|
super().seed(seed=seed)
|
|
self._assert_is_running()
|
|
if seed is None:
|
|
seed = [None for _ in range(self.num_envs)]
|
|
if isinstance(seed, int):
|
|
seed = [seed + i for i in range(self.num_envs)]
|
|
assert len(seed) == self.num_envs
|
|
|
|
if self._state != AsyncState.DEFAULT:
|
|
raise AlreadyPendingCallError(
|
|
f"Calling `seed` while waiting for a pending call to `{self._state.value}` to complete.",
|
|
self._state.value,
|
|
)
|
|
|
|
for pipe, seed in zip(self.parent_pipes, seed):
|
|
pipe.send(("seed", seed))
|
|
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
|
|
self._raise_if_errors(successes)
|
|
|
|
def reset_async(
|
|
self,
|
|
seed: Optional[Union[int, List[int]]] = None,
|
|
return_info: bool = False,
|
|
options: Optional[dict] = None,
|
|
):
|
|
"""Send calls to the :obj:`reset` methods of the sub-environments.
|
|
|
|
To get the results of these calls, you may invoke :meth:`reset_wait`.
|
|
|
|
Args:
|
|
seed: List of seeds for each environment
|
|
return_info: If to return information
|
|
options: The reset option
|
|
|
|
Raises:
|
|
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
|
|
AlreadyPendingCallError: If the environment is already waiting for a pending call to another
|
|
method (e.g. :meth:`step_async`). This can be caused by two consecutive
|
|
calls to :meth:`reset_async`, with no call to :meth:`reset_wait` in between.
|
|
"""
|
|
self._assert_is_running()
|
|
|
|
if seed is None:
|
|
seed = [None for _ in range(self.num_envs)]
|
|
if isinstance(seed, int):
|
|
seed = [seed + i for i in range(self.num_envs)]
|
|
assert len(seed) == self.num_envs
|
|
|
|
if self._state != AsyncState.DEFAULT:
|
|
raise AlreadyPendingCallError(
|
|
f"Calling `reset_async` while waiting for a pending call to `{self._state.value}` to complete",
|
|
self._state.value,
|
|
)
|
|
|
|
for pipe, single_seed in zip(self.parent_pipes, seed):
|
|
single_kwargs = {}
|
|
if single_seed is not None:
|
|
single_kwargs["seed"] = single_seed
|
|
if return_info:
|
|
single_kwargs["return_info"] = return_info
|
|
if options is not None:
|
|
single_kwargs["options"] = options
|
|
|
|
pipe.send(("reset", single_kwargs))
|
|
self._state = AsyncState.WAITING_RESET
|
|
|
|
def reset_wait(
|
|
self,
|
|
timeout: Optional[Union[int, float]] = None,
|
|
seed: Optional[int] = None,
|
|
return_info: bool = False,
|
|
options: Optional[dict] = None,
|
|
) -> Union[ObsType, Tuple[ObsType, List[dict]]]:
|
|
"""Waits for the calls triggered by :meth:`reset_async` to finish and returns the results.
|
|
|
|
Args:
|
|
timeout: Number of seconds before the call to `reset_wait` times out. If `None`, the call to `reset_wait` never times out.
|
|
seed: ignored
|
|
return_info: If to return information
|
|
options: ignored
|
|
|
|
Returns:
|
|
A tuple of batched observations and list of dictionaries
|
|
|
|
Raises:
|
|
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
|
|
NoAsyncCallError: If :meth:`reset_wait` was called without any prior call to :meth:`reset_async`.
|
|
TimeoutError: If :meth:`reset_wait` timed out.
|
|
"""
|
|
self._assert_is_running()
|
|
if self._state != AsyncState.WAITING_RESET:
|
|
raise NoAsyncCallError(
|
|
"Calling `reset_wait` without any prior " "call to `reset_async`.",
|
|
AsyncState.WAITING_RESET.value,
|
|
)
|
|
|
|
if not self._poll(timeout):
|
|
self._state = AsyncState.DEFAULT
|
|
raise mp.TimeoutError(
|
|
f"The call to `reset_wait` has timed out after {timeout} second(s)."
|
|
)
|
|
|
|
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
|
|
self._raise_if_errors(successes)
|
|
self._state = AsyncState.DEFAULT
|
|
|
|
if return_info:
|
|
infos = {}
|
|
results, info_data = zip(*results)
|
|
for i, info in enumerate(info_data):
|
|
infos = self._add_info(infos, info, i)
|
|
|
|
if not self.shared_memory:
|
|
self.observations = concatenate(
|
|
self.single_observation_space, results, self.observations
|
|
)
|
|
|
|
return (
|
|
deepcopy(self.observations) if self.copy else self.observations
|
|
), infos
|
|
else:
|
|
if not self.shared_memory:
|
|
self.observations = concatenate(
|
|
self.single_observation_space, results, self.observations
|
|
)
|
|
|
|
return deepcopy(self.observations) if self.copy else self.observations
|
|
|
|
def step_async(self, actions: np.ndarray):
|
|
"""Send the calls to :obj:`step` to each sub-environment.
|
|
|
|
Args:
|
|
actions: Batch of actions. element of :attr:`~VectorEnv.action_space`
|
|
|
|
Raises:
|
|
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
|
|
AlreadyPendingCallError: If the environment is already waiting for a pending call to another
|
|
method (e.g. :meth:`reset_async`). This can be caused by two consecutive
|
|
calls to :meth:`step_async`, with no call to :meth:`step_wait` in
|
|
between.
|
|
"""
|
|
self._assert_is_running()
|
|
if self._state != AsyncState.DEFAULT:
|
|
raise AlreadyPendingCallError(
|
|
f"Calling `step_async` while waiting for a pending call to `{self._state.value}` to complete.",
|
|
self._state.value,
|
|
)
|
|
|
|
actions = iterate(self.action_space, actions)
|
|
for pipe, action in zip(self.parent_pipes, actions):
|
|
pipe.send(("step", action))
|
|
self._state = AsyncState.WAITING_STEP
|
|
|
|
def step_wait(
|
|
self, timeout: Optional[Union[int, float]] = None
|
|
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[dict]]:
|
|
"""Wait for the calls to :obj:`step` in each sub-environment to finish.
|
|
|
|
Args:
|
|
timeout: Number of seconds before the call to :meth:`step_wait` times out. If ``None``, the call to :meth:`step_wait` never times out.
|
|
|
|
Returns:
|
|
The batched environment step information, obs, reward, done and info
|
|
|
|
Raises:
|
|
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
|
|
NoAsyncCallError: If :meth:`step_wait` was called without any prior call to :meth:`step_async`.
|
|
TimeoutError: If :meth:`step_wait` timed out.
|
|
"""
|
|
self._assert_is_running()
|
|
if self._state != AsyncState.WAITING_STEP:
|
|
raise NoAsyncCallError(
|
|
"Calling `step_wait` without any prior call " "to `step_async`.",
|
|
AsyncState.WAITING_STEP.value,
|
|
)
|
|
|
|
if not self._poll(timeout):
|
|
self._state = AsyncState.DEFAULT
|
|
raise mp.TimeoutError(
|
|
f"The call to `step_wait` has timed out after {timeout} second(s)."
|
|
)
|
|
|
|
observations_list, rewards, dones, infos = [], [], [], {}
|
|
successes = []
|
|
for i, pipe in enumerate(self.parent_pipes):
|
|
result, success = pipe.recv()
|
|
obs, rew, done, info = result
|
|
|
|
successes.append(success)
|
|
observations_list.append(obs)
|
|
rewards.append(rew)
|
|
dones.append(done)
|
|
infos = self._add_info(infos, info, i)
|
|
|
|
self._raise_if_errors(successes)
|
|
self._state = AsyncState.DEFAULT
|
|
|
|
if not self.shared_memory:
|
|
self.observations = concatenate(
|
|
self.single_observation_space,
|
|
observations_list,
|
|
self.observations,
|
|
)
|
|
|
|
return (
|
|
deepcopy(self.observations) if self.copy else self.observations,
|
|
np.array(rewards),
|
|
np.array(dones, dtype=np.bool_),
|
|
infos,
|
|
)
|
|
|
|
def call_async(self, name: str, *args, **kwargs):
|
|
"""Calls the method with name asynchronously and apply args and kwargs to the method.
|
|
|
|
Args:
|
|
name: Name of the method or property to call.
|
|
*args: Arguments to apply to the method call.
|
|
**kwargs: Keyword arguments to apply to the method call.
|
|
|
|
Raises:
|
|
ClosedEnvironmentError: If the environment was closed (if :meth:`close` was previously called).
|
|
AlreadyPendingCallError: Calling `call_async` while waiting for a pending call to complete
|
|
"""
|
|
self._assert_is_running()
|
|
if self._state != AsyncState.DEFAULT:
|
|
raise AlreadyPendingCallError(
|
|
"Calling `call_async` while waiting "
|
|
f"for a pending call to `{self._state.value}` to complete.",
|
|
self._state.value,
|
|
)
|
|
|
|
for pipe in self.parent_pipes:
|
|
pipe.send(("_call", (name, args, kwargs)))
|
|
self._state = AsyncState.WAITING_CALL
|
|
|
|
def call_wait(self, timeout: Optional[Union[int, float]] = None) -> list:
|
|
"""Calls all parent pipes and waits for the results.
|
|
|
|
Args:
|
|
timeout: Number of seconds before the call to `step_wait` times out.
|
|
If `None` (default), the call to `step_wait` never times out.
|
|
|
|
Returns:
|
|
List of the results of the individual calls to the method or property for each environment.
|
|
|
|
Raises:
|
|
NoAsyncCallError: Calling `call_wait` without any prior call to `call_async`.
|
|
TimeoutError: The call to `call_wait` has timed out after timeout second(s).
|
|
"""
|
|
self._assert_is_running()
|
|
if self._state != AsyncState.WAITING_CALL:
|
|
raise NoAsyncCallError(
|
|
"Calling `call_wait` without any prior call to `call_async`.",
|
|
AsyncState.WAITING_CALL.value,
|
|
)
|
|
|
|
if not self._poll(timeout):
|
|
self._state = AsyncState.DEFAULT
|
|
raise mp.TimeoutError(
|
|
f"The call to `call_wait` has timed out after {timeout} second(s)."
|
|
)
|
|
|
|
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
|
|
self._raise_if_errors(successes)
|
|
self._state = AsyncState.DEFAULT
|
|
|
|
return results
|
|
|
|
def set_attr(self, name: str, values: Union[list, tuple, object]):
|
|
"""Sets an attribute of the sub-environments.
|
|
|
|
Args:
|
|
name: Name of the property to be set in each individual environment.
|
|
values: Values of the property to be set to. If ``values`` is a list or
|
|
tuple, then it corresponds to the values for each individual
|
|
environment, otherwise a single value is set for all environments.
|
|
|
|
Raises:
|
|
ValueError: Values must be a list or tuple with length equal to the number of environments.
|
|
AlreadyPendingCallError: Calling `set_attr` while waiting for a pending call to complete.
|
|
"""
|
|
self._assert_is_running()
|
|
if not isinstance(values, (list, tuple)):
|
|
values = [values for _ in range(self.num_envs)]
|
|
if len(values) != self.num_envs:
|
|
raise ValueError(
|
|
"Values must be a list or tuple with length equal to the "
|
|
f"number of environments. Got `{len(values)}` values for "
|
|
f"{self.num_envs} environments."
|
|
)
|
|
|
|
if self._state != AsyncState.DEFAULT:
|
|
raise AlreadyPendingCallError(
|
|
"Calling `set_attr` while waiting "
|
|
f"for a pending call to `{self._state.value}` to complete.",
|
|
self._state.value,
|
|
)
|
|
|
|
for pipe, value in zip(self.parent_pipes, values):
|
|
pipe.send(("_setattr", (name, value)))
|
|
_, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
|
|
self._raise_if_errors(successes)
|
|
|
|
def close_extras(
|
|
self, timeout: Optional[Union[int, float]] = None, terminate: bool = False
|
|
):
|
|
"""Close the environments & clean up the extra resources (processes and pipes).
|
|
|
|
Args:
|
|
timeout: Number of seconds before the call to :meth:`close` times out. If ``None``,
|
|
the call to :meth:`close` never times out. If the call to :meth:`close`
|
|
times out, then all processes are terminated.
|
|
terminate: If ``True``, then the :meth:`close` operation is forced and all processes are terminated.
|
|
|
|
Raises:
|
|
TimeoutError: If :meth:`close` timed out.
|
|
"""
|
|
timeout = 0 if terminate else timeout
|
|
try:
|
|
if self._state != AsyncState.DEFAULT:
|
|
logger.warn(
|
|
f"Calling `close` while waiting for a pending call to `{self._state.value}` to complete."
|
|
)
|
|
function = getattr(self, f"{self._state.value}_wait")
|
|
function(timeout)
|
|
except mp.TimeoutError:
|
|
terminate = True
|
|
|
|
if terminate:
|
|
for process in self.processes:
|
|
if process.is_alive():
|
|
process.terminate()
|
|
else:
|
|
for pipe in self.parent_pipes:
|
|
if (pipe is not None) and (not pipe.closed):
|
|
pipe.send(("close", None))
|
|
for pipe in self.parent_pipes:
|
|
if (pipe is not None) and (not pipe.closed):
|
|
pipe.recv()
|
|
|
|
for pipe in self.parent_pipes:
|
|
if pipe is not None:
|
|
pipe.close()
|
|
for process in self.processes:
|
|
process.join()
|
|
|
|
def _poll(self, timeout=None):
|
|
self._assert_is_running()
|
|
if timeout is None:
|
|
return True
|
|
end_time = time.perf_counter() + timeout
|
|
delta = None
|
|
for pipe in self.parent_pipes:
|
|
delta = max(end_time - time.perf_counter(), 0)
|
|
if pipe is None:
|
|
return False
|
|
if pipe.closed or (not pipe.poll(delta)):
|
|
return False
|
|
return True
|
|
|
|
def _check_spaces(self):
|
|
self._assert_is_running()
|
|
spaces = (self.single_observation_space, self.single_action_space)
|
|
for pipe in self.parent_pipes:
|
|
pipe.send(("_check_spaces", spaces))
|
|
results, successes = zip(*[pipe.recv() for pipe in self.parent_pipes])
|
|
self._raise_if_errors(successes)
|
|
same_observation_spaces, same_action_spaces = zip(*results)
|
|
if not all(same_observation_spaces):
|
|
raise RuntimeError(
|
|
"Some environments have an observation space different from "
|
|
f"`{self.single_observation_space}`. In order to batch observations, "
|
|
"the observation spaces from all environments must be equal."
|
|
)
|
|
if not all(same_action_spaces):
|
|
raise RuntimeError(
|
|
"Some environments have an action space different from "
|
|
f"`{self.single_action_space}`. In order to batch actions, the "
|
|
"action spaces from all environments must be equal."
|
|
)
|
|
|
|
def _assert_is_running(self):
|
|
if self.closed:
|
|
raise ClosedEnvironmentError(
|
|
f"Trying to operate on `{type(self).__name__}`, after a call to `close()`."
|
|
)
|
|
|
|
def _raise_if_errors(self, successes):
|
|
if all(successes):
|
|
return
|
|
|
|
num_errors = self.num_envs - sum(successes)
|
|
assert num_errors > 0
|
|
for _ in range(num_errors):
|
|
index, exctype, value = self.error_queue.get()
|
|
logger.error(
|
|
f"Received the following error from Worker-{index}: {exctype.__name__}: {value}"
|
|
)
|
|
logger.error(f"Shutting down Worker-{index}.")
|
|
self.parent_pipes[index].close()
|
|
self.parent_pipes[index] = None
|
|
|
|
logger.error("Raising the last exception back to the main process.")
|
|
raise exctype(value)
|
|
|
|
def __del__(self):
|
|
"""On deleting the object, checks that the vector environment is closed."""
|
|
if not getattr(self, "closed", True) and hasattr(self, "_state"):
|
|
self.close(terminate=True)
|
|
|
|
|
|
def _worker(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
|
|
assert shared_memory is None
|
|
env = env_fn()
|
|
parent_pipe.close()
|
|
try:
|
|
while True:
|
|
command, data = pipe.recv()
|
|
if command == "reset":
|
|
if "return_info" in data and data["return_info"] is True:
|
|
observation, info = env.reset(**data)
|
|
pipe.send(((observation, info), True))
|
|
else:
|
|
observation = env.reset(**data)
|
|
pipe.send((observation, True))
|
|
|
|
elif command == "step":
|
|
observation, reward, done, info = env.step(data)
|
|
if done:
|
|
info["terminal_observation"] = observation
|
|
observation = env.reset()
|
|
pipe.send(((observation, reward, done, info), True))
|
|
elif command == "seed":
|
|
env.seed(data)
|
|
pipe.send((None, True))
|
|
elif command == "close":
|
|
pipe.send((None, True))
|
|
break
|
|
elif command == "_call":
|
|
name, args, kwargs = data
|
|
if name in ["reset", "step", "seed", "close"]:
|
|
raise ValueError(
|
|
f"Trying to call function `{name}` with "
|
|
f"`_call`. Use `{name}` directly instead."
|
|
)
|
|
function = getattr(env, name)
|
|
if callable(function):
|
|
pipe.send((function(*args, **kwargs), True))
|
|
else:
|
|
pipe.send((function, True))
|
|
elif command == "_setattr":
|
|
name, value = data
|
|
setattr(env, name, value)
|
|
pipe.send((None, True))
|
|
elif command == "_check_spaces":
|
|
pipe.send(
|
|
(
|
|
(data[0] == env.observation_space, data[1] == env.action_space),
|
|
True,
|
|
)
|
|
)
|
|
else:
|
|
raise RuntimeError(
|
|
f"Received unknown command `{command}`. Must "
|
|
"be one of {`reset`, `step`, `seed`, `close`, `_call`, "
|
|
"`_setattr`, `_check_spaces`}."
|
|
)
|
|
except (KeyboardInterrupt, Exception):
|
|
error_queue.put((index,) + sys.exc_info()[:2])
|
|
pipe.send((None, False))
|
|
finally:
|
|
env.close()
|
|
|
|
|
|
def _worker_shared_memory(index, env_fn, pipe, parent_pipe, shared_memory, error_queue):
|
|
assert shared_memory is not None
|
|
env = env_fn()
|
|
observation_space = env.observation_space
|
|
parent_pipe.close()
|
|
try:
|
|
while True:
|
|
command, data = pipe.recv()
|
|
if command == "reset":
|
|
if "return_info" in data and data["return_info"] is True:
|
|
observation, info = env.reset(**data)
|
|
write_to_shared_memory(
|
|
observation_space, index, observation, shared_memory
|
|
)
|
|
pipe.send(((None, info), True))
|
|
else:
|
|
observation = env.reset(**data)
|
|
write_to_shared_memory(
|
|
observation_space, index, observation, shared_memory
|
|
)
|
|
pipe.send((None, True))
|
|
elif command == "step":
|
|
observation, reward, done, info = env.step(data)
|
|
if done:
|
|
info["terminal_observation"] = observation
|
|
observation = env.reset()
|
|
write_to_shared_memory(
|
|
observation_space, index, observation, shared_memory
|
|
)
|
|
pipe.send(((None, reward, done, info), True))
|
|
elif command == "seed":
|
|
env.seed(data)
|
|
pipe.send((None, True))
|
|
elif command == "close":
|
|
pipe.send((None, True))
|
|
break
|
|
elif command == "_call":
|
|
name, args, kwargs = data
|
|
if name in ["reset", "step", "seed", "close"]:
|
|
raise ValueError(
|
|
f"Trying to call function `{name}` with "
|
|
f"`_call`. Use `{name}` directly instead."
|
|
)
|
|
function = getattr(env, name)
|
|
if callable(function):
|
|
pipe.send((function(*args, **kwargs), True))
|
|
else:
|
|
pipe.send((function, True))
|
|
elif command == "_setattr":
|
|
name, value = data
|
|
setattr(env, name, value)
|
|
pipe.send((None, True))
|
|
elif command == "_check_spaces":
|
|
pipe.send(
|
|
((data[0] == observation_space, data[1] == env.action_space), True)
|
|
)
|
|
else:
|
|
raise RuntimeError(
|
|
f"Received unknown command `{command}`. Must "
|
|
"be one of {`reset`, `step`, `seed`, `close`, `_call`, "
|
|
"`_setattr`, `_check_spaces`}."
|
|
)
|
|
except (KeyboardInterrupt, Exception):
|
|
error_queue.put((index,) + sys.exc_info()[:2])
|
|
pipe.send((None, False))
|
|
finally:
|
|
env.close()
|