mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-08-17 20:39:12 +00:00
* Ditch most of the seeding.py and replace np_random with the numpy default_rng. Let's see if tests pass * Updated a bunch of RNG calls from the RandomState API to Generator API * black; didn't expect that, did ya? * Undo a typo * blaaack * More typo fixes * Fixed setting/getting state in multidiscrete spaces * Fix typo, fix a test to work with the new sampling * Correctly (?) pass the randomly generated seed if np_random is called with None as seed * Convert the Discrete sample to a python int (as opposed to np.int64) * Remove some redundant imports * First version of the compatibility layer for old-style RNG. Mainly to trigger tests. * Removed redundant f-strings * Style fixes, removing unused imports * Try to make tests pass by removing atari from the dockerfile * Try to make tests pass by removing atari from the setup * Try to make tests pass by removing atari from the setup * Try to make tests pass by removing atari from the setup * First attempt at deprecating `env.seed` and supporting `env.reset(seed=seed)` instead. Tests should hopefully pass but throw up a million warnings. * black; didn't expect that, didya? * Rename the reset parameter in VecEnvs back to `seed` * Updated tests to use the new seeding method * Removed a bunch of old `seed` calls. Fixed a bug in AsyncVectorEnv * Stop Discrete envs from doing part of the setup (and using the randomness) in init (as opposed to reset) * Add explicit seed to wrappers reset * Remove an accidental return * Re-add some legacy functions with a warning. * Use deprecation instead of regular warnings for the newly deprecated methods/functions
58 lines
1.5 KiB
Python
58 lines
1.5 KiB
Python
import pytest
|
|
|
|
import numpy as np
|
|
|
|
import gym
|
|
from gym.wrappers import TransformReward
|
|
|
|
|
|
@pytest.mark.parametrize("env_id", ["CartPole-v1", "Pendulum-v1"])
|
|
def test_transform_reward(env_id):
|
|
# use case #1: scale
|
|
scales = [0.1, 200]
|
|
for scale in scales:
|
|
env = gym.make(env_id)
|
|
wrapped_env = TransformReward(gym.make(env_id), lambda r: scale * r)
|
|
action = env.action_space.sample()
|
|
|
|
env.reset(seed=0)
|
|
wrapped_env.reset(seed=0)
|
|
|
|
_, reward, _, _ = env.step(action)
|
|
_, wrapped_reward, _, _ = wrapped_env.step(action)
|
|
|
|
assert wrapped_reward == scale * reward
|
|
del env, wrapped_env
|
|
|
|
# use case #2: clip
|
|
min_r = -0.0005
|
|
max_r = 0.0002
|
|
env = gym.make(env_id)
|
|
wrapped_env = TransformReward(gym.make(env_id), lambda r: np.clip(r, min_r, max_r))
|
|
action = env.action_space.sample()
|
|
|
|
env.reset(seed=0)
|
|
wrapped_env.reset(seed=0)
|
|
|
|
_, reward, _, _ = env.step(action)
|
|
_, wrapped_reward, _, _ = wrapped_env.step(action)
|
|
|
|
assert abs(wrapped_reward) < abs(reward)
|
|
assert wrapped_reward == -0.0005 or wrapped_reward == 0.0002
|
|
del env, wrapped_env
|
|
|
|
# use case #3: sign
|
|
env = gym.make(env_id)
|
|
wrapped_env = TransformReward(gym.make(env_id), lambda r: np.sign(r))
|
|
|
|
env.reset(seed=0)
|
|
wrapped_env.reset(seed=0)
|
|
|
|
for _ in range(1000):
|
|
action = env.action_space.sample()
|
|
_, wrapped_reward, done, _ = wrapped_env.step(action)
|
|
assert wrapped_reward in [-1.0, 0.0, 1.0]
|
|
if done:
|
|
break
|
|
del env, wrapped_env
|