mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-08-18 04:49:12 +00:00
Seeding update (#2422)
* Ditch most of the seeding.py and replace np_random with the numpy default_rng. Let's see if tests pass * Updated a bunch of RNG calls from the RandomState API to Generator API * black; didn't expect that, did ya? * Undo a typo * blaaack * More typo fixes * Fixed setting/getting state in multidiscrete spaces * Fix typo, fix a test to work with the new sampling * Correctly (?) pass the randomly generated seed if np_random is called with None as seed * Convert the Discrete sample to a python int (as opposed to np.int64) * Remove some redundant imports * First version of the compatibility layer for old-style RNG. Mainly to trigger tests. * Removed redundant f-strings * Style fixes, removing unused imports * Try to make tests pass by removing atari from the dockerfile * Try to make tests pass by removing atari from the setup * Try to make tests pass by removing atari from the setup * Try to make tests pass by removing atari from the setup * First attempt at deprecating `env.seed` and supporting `env.reset(seed=seed)` instead. Tests should hopefully pass but throw up a million warnings. * black; didn't expect that, didya? * Rename the reset parameter in VecEnvs back to `seed` * Updated tests to use the new seeding method * Removed a bunch of old `seed` calls. Fixed a bug in AsyncVectorEnv * Stop Discrete envs from doing part of the setup (and using the randomness) in init (as opposed to reset) * Add explicit seed to wrappers reset * Remove an accidental return * Re-add some legacy functions with a warning. * Use deprecation instead of regular warnings for the newly deprecated methods/functions
This commit is contained in:
committed by
GitHub
parent
b84b69c872
commit
c364506710
@@ -1,5 +1,7 @@
|
||||
import os
|
||||
import copy
|
||||
from typing import Optional
|
||||
|
||||
import numpy as np
|
||||
|
||||
import gym
|
||||
@@ -37,7 +39,6 @@ class RobotEnv(gym.GoalEnv):
|
||||
"video.frames_per_second": int(np.round(1.0 / self.dt)),
|
||||
}
|
||||
|
||||
self.seed()
|
||||
self._env_setup(initial_qpos=initial_qpos)
|
||||
self.initial_state = copy.deepcopy(self.sim.get_state())
|
||||
|
||||
@@ -65,10 +66,6 @@ class RobotEnv(gym.GoalEnv):
|
||||
# Env methods
|
||||
# ----------------------------
|
||||
|
||||
def seed(self, seed=None):
|
||||
self.np_random, seed = seeding.np_random(seed)
|
||||
return [seed]
|
||||
|
||||
def step(self, action):
|
||||
if np.array(action).shape != self.action_space.shape:
|
||||
raise ValueError("Action dimension mismatch")
|
||||
@@ -86,13 +83,13 @@ class RobotEnv(gym.GoalEnv):
|
||||
reward = self.compute_reward(obs["achieved_goal"], self.goal, info)
|
||||
return obs, reward, done, info
|
||||
|
||||
def reset(self):
|
||||
def reset(self, seed: Optional[int] = None):
|
||||
# Attempt to reset the simulator. Since we randomize initial conditions, it
|
||||
# is possible to get into a state with numerical issues (e.g. due to penetration or
|
||||
# Gimbel lock) or we may not achieve an initial condition (e.g. an object is within the hand).
|
||||
# In this case, we just keep randomizing until we eventually achieve a valid initial
|
||||
# configuration.
|
||||
super().reset()
|
||||
super().reset(seed=seed)
|
||||
did_reset_sim = False
|
||||
while not did_reset_sim:
|
||||
did_reset_sim = self._reset_sim()
|
||||
|
Reference in New Issue
Block a user