Remove unittest envs (#2553)

This commit is contained in:
Costa Huang
2022-01-10 23:42:26 -05:00
committed by GitHub
parent 8a96440084
commit cfed339701
7 changed files with 23 additions and 370 deletions

View File

@@ -263,28 +263,3 @@ register(
entry_point="gym.envs.mujoco:HumanoidStandupEnv",
max_episode_steps=1000,
)
# Unit test
# ---------
register(
id="CubeCrash-v0",
entry_point="gym.envs.unittest:CubeCrash",
reward_threshold=0.9,
)
register(
id="CubeCrashSparse-v0",
entry_point="gym.envs.unittest:CubeCrashSparse",
reward_threshold=0.9,
)
register(
id="CubeCrashScreenBecomesBlack-v0",
entry_point="gym.envs.unittest:CubeCrashScreenBecomesBlack",
reward_threshold=0.9,
)
register(
id="MemorizeDigits-v0",
entry_point="gym.envs.unittest:MemorizeDigits",
reward_threshold=20,
)

View File

@@ -1,4 +0,0 @@
from gym.envs.unittest.cube_crash import CubeCrash
from gym.envs.unittest.cube_crash import CubeCrashSparse
from gym.envs.unittest.cube_crash import CubeCrashScreenBecomesBlack
from gym.envs.unittest.memorize_digits import MemorizeDigits

View File

@@ -1,172 +0,0 @@
from typing import Optional
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
# Unit test environment for CNNs and CNN+RNN algorithms.
# Looks like this (RGB observations):
#
# ---------------------------
# | |
# | |
# | |
# | ** |
# | ** |
# | |
# | |
# | |
# | |
# | |
# ======== ==============
#
# Goal is to go through the hole at the bottom. Agent controls square using Left-Nop-Right actions.
# It falls down automatically, episode length is a bit less than FIELD_H
#
# CubeCrash-v0 # shaped reward
# CubeCrashSparse-v0 # reward 0 or 1 at the end
# CubeCrashScreenBecomesBlack-v0 # for RNNs
#
# To see how it works, run:
#
# python examples/agents/keyboard_agent.py CubeCrashScreen-v0
FIELD_W = 32
FIELD_H = 40
HOLE_WIDTH = 8
color_black = np.array((0, 0, 0)).astype("float32")
color_white = np.array((255, 255, 255)).astype("float32")
color_green = np.array((0, 255, 0)).astype("float32")
class CubeCrash(gym.Env):
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 60,
"video.res_w": FIELD_W,
"video.res_h": FIELD_H,
}
use_shaped_reward = True
use_black_screen = False
use_random_colors = False # Makes env too hard
def __init__(self):
self.viewer = None
self.observation_space = spaces.Box(
0, 255, (FIELD_H, FIELD_W, 3), dtype=np.uint8
)
self.action_space = spaces.Discrete(3)
self.reset()
def random_color(self):
return np.array(
[
self.np_random.integers(low=0, high=255),
self.np_random.integers(low=0, high=255),
self.np_random.integers(low=0, high=255),
]
).astype("uint8")
def reset(self, seed: Optional[int] = None):
super().reset(seed=seed)
self.cube_x = self.np_random.integers(low=3, high=FIELD_W - 3)
self.cube_y = self.np_random.integers(low=3, high=FIELD_H // 6)
self.hole_x = self.np_random.integers(low=HOLE_WIDTH, high=FIELD_W - HOLE_WIDTH)
self.bg_color = self.random_color() if self.use_random_colors else color_black
self.potential = None
self.step_n = 0
while 1:
self.wall_color = (
self.random_color() if self.use_random_colors else color_white
)
self.cube_color = (
self.random_color() if self.use_random_colors else color_green
)
if (
np.linalg.norm(self.wall_color - self.bg_color) < 50
or np.linalg.norm(self.cube_color - self.bg_color) < 50
):
continue
break
return self.step(0)[0]
def step(self, action):
if action == 0:
pass
elif action == 1:
self.cube_x -= 1
elif action == 2:
self.cube_x += 1
else:
assert 0, "Action %i is out of range" % action
self.cube_y += 1
self.step_n += 1
obs = np.zeros((FIELD_H, FIELD_W, 3), dtype=np.uint8)
obs[:, :, :] = self.bg_color
obs[FIELD_H - 5 : FIELD_H, :, :] = self.wall_color
obs[
FIELD_H - 5 : FIELD_H,
self.hole_x - HOLE_WIDTH // 2 : self.hole_x + HOLE_WIDTH // 2 + 1,
:,
] = self.bg_color
obs[
self.cube_y - 1 : self.cube_y + 2, self.cube_x - 1 : self.cube_x + 2, :
] = self.cube_color
if self.use_black_screen and self.step_n > 4:
obs[:] = np.zeros((3,), dtype=np.uint8)
done = False
reward = 0
dist = np.abs(self.cube_x - self.hole_x)
if self.potential is not None and self.use_shaped_reward:
reward = (self.potential - dist) * 0.01
self.potential = dist
if self.cube_x - 1 < 0 or self.cube_x + 1 >= FIELD_W:
done = True
reward = -1
elif self.cube_y + 1 >= FIELD_H - 5:
if dist >= HOLE_WIDTH // 2:
done = True
reward = -1
elif self.cube_y == FIELD_H:
done = True
reward = +1
self.last_obs = obs
return obs, reward, done, {}
def render(self, mode="human"):
if mode == "rgb_array":
return self.last_obs
elif mode == "human":
from gym.utils import pyglet_rendering
if self.viewer is None:
self.viewer = pyglet_rendering.SimpleImageViewer()
self.viewer.imshow(self.last_obs)
return self.viewer.isopen
else:
assert 0, f"Render mode '{mode}' is not supported"
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None
class CubeCrashSparse(CubeCrash):
use_shaped_reward = False
class CubeCrashScreenBecomesBlack(CubeCrash):
use_shaped_reward = False
use_black_screen = True

View File

@@ -1,144 +0,0 @@
from typing import Optional
import numpy as np
import gym
from gym import spaces
from gym.utils import seeding
# Unit test environment for CNNs.
# Looks like this (RGB observations):
#
# ---------------------------
# | |
# | ****** |
# | ****** |
# | ** ** |
# | ** ** |
# | ** |
# | ** |
# | **** |
# | **** |
# | **** |
# | **** |
# | ********** |
# | ********** |
# | |
# ---------------------------
#
# Agent should hit action 2 to gain reward. Catches off-by-one errors in your agent.
#
# To see how it works, run:
#
# python examples/agents/keyboard_agent.py MemorizeDigits-v0
FIELD_W = 32
FIELD_H = 24
bogus_mnist = [
[" **** ", "* *", "* *", "* *", "* *", " **** "],
[" ** ", " * * ", " * ", " * ", " * ", " *** "],
[" **** ", "* *", " *", " *** ", "** ", "******"],
[" **** ", "* *", " ** ", " *", "* *", " **** "],
[" * * ", " * * ", " * * ", " **** ", " * ", " * "],
[" **** ", " * ", " **** ", " * ", " * ", " **** "],
[" *** ", " * ", " **** ", " * * ", " * * ", " **** "],
[" **** ", " * ", " * ", " * ", " * ", " * "],
[" **** ", "* *", " **** ", "* *", "* *", " **** "],
[" **** ", "* *", "* *", " *****", " *", " **** "],
]
color_black = np.array((0, 0, 0)).astype("float32")
color_white = np.array((255, 255, 255)).astype("float32")
class MemorizeDigits(gym.Env):
metadata = {
"render.modes": ["human", "rgb_array"],
"video.frames_per_second": 60,
"video.res_w": FIELD_W,
"video.res_h": FIELD_H,
}
use_random_colors = False
def __init__(self):
self.viewer = None
self.observation_space = spaces.Box(
0, 255, (FIELD_H, FIELD_W, 3), dtype=np.uint8
)
self.action_space = spaces.Discrete(10)
self.bogus_mnist = np.zeros((10, 6, 6), dtype=np.uint8)
for digit in range(10):
for y in range(6):
self.bogus_mnist[digit, y, :] = [
ord(char) for char in bogus_mnist[digit][y]
]
self.reset()
def random_color(self):
return np.array(
[
self.np_random.integers(low=0, high=255),
self.np_random.integers(low=0, high=255),
self.np_random.integers(low=0, high=255),
]
).astype("uint8")
def reset(self, seed: Optional[int] = None):
super().reset(seed=seed)
self.digit_x = self.np_random.integers(low=FIELD_W // 5, high=FIELD_W // 5 * 4)
self.digit_y = self.np_random.integers(low=FIELD_H // 5, high=FIELD_H // 5 * 4)
self.color_bg = self.random_color() if self.use_random_colors else color_black
self.step_n = 0
while 1:
self.color_digit = (
self.random_color() if self.use_random_colors else color_white
)
if np.linalg.norm(self.color_digit - self.color_bg) < 50:
continue
break
self.digit = -1
return self.step(0)[0]
def step(self, action):
reward = -1
done = False
self.step_n += 1
if self.digit == -1:
pass
else:
if self.digit == action:
reward = +1
done = self.step_n > 20 and 0 == self.np_random.integers(low=0, high=5)
self.digit = self.np_random.integers(low=0, high=10)
obs = np.zeros((FIELD_H, FIELD_W, 3), dtype=np.uint8)
obs[:, :, :] = self.color_bg
digit_img = np.zeros((6, 6, 3), dtype=np.uint8)
digit_img[:] = self.color_bg
xxx = self.bogus_mnist[self.digit] == 42
digit_img[xxx] = self.color_digit
obs[
self.digit_y - 3 : self.digit_y + 3, self.digit_x - 3 : self.digit_x + 3
] = digit_img
self.last_obs = obs
return obs, reward, done, {}
def render(self, mode="human"):
if mode == "rgb_array":
return self.last_obs
elif mode == "human":
from gym.utils import pyglet_rendering
if self.viewer is None:
self.viewer = pyglet_rendering.SimpleImageViewer()
self.viewer.imshow(self.last_obs)
return self.viewer.isopen
else:
assert 0, f"Render mode '{mode}' is not supported"
def close(self):
if self.viewer is not None:
self.viewer.close()
self.viewer = None

View File

@@ -16,7 +16,7 @@ from gym.vector.async_vector_env import AsyncVectorEnv
@pytest.mark.parametrize("shared_memory", [True, False])
def test_create_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
finally:
@@ -27,7 +27,7 @@ def test_create_async_vector_env(shared_memory):
@pytest.mark.parametrize("shared_memory", [True, False])
def test_reset_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
observations = env.reset()
@@ -44,7 +44,7 @@ def test_reset_async_vector_env(shared_memory):
@pytest.mark.parametrize("shared_memory", [True, False])
@pytest.mark.parametrize("use_single_action_space", [True, False])
def test_step_async_vector_env(shared_memory, use_single_action_space):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
observations = env.reset()
@@ -79,24 +79,22 @@ def test_step_async_vector_env(shared_memory, use_single_action_space):
@pytest.mark.parametrize("shared_memory", [True, False])
def test_copy_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory, copy=True)
observations = env.reset()
observations[0] = 128
assert not np.all(env.observations[0] == 128)
observations[0] = 0
finally:
env.close()
@pytest.mark.parametrize("shared_memory", [True, False])
def test_no_copy_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory, copy=False)
observations = env.reset()
observations[0] = 128
assert np.all(env.observations[0] == 128)
observations[0] = 0
finally:
env.close()
@@ -129,7 +127,7 @@ def test_step_timeout_async_vector_env(shared_memory):
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("shared_memory", [True, False])
def test_reset_out_of_order_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
env_fns = [make_env("CartPole-v1", i) for i in range(4)]
with pytest.raises(NoAsyncCallError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
@@ -157,7 +155,7 @@ def test_reset_out_of_order_async_vector_env(shared_memory):
@pytest.mark.filterwarnings("ignore::UserWarning")
@pytest.mark.parametrize("shared_memory", [True, False])
def test_step_out_of_order_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
env_fns = [make_env("CartPole-v1", i) for i in range(4)]
with pytest.raises(NoAsyncCallError):
try:
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
@@ -185,7 +183,7 @@ def test_step_out_of_order_async_vector_env(shared_memory):
@pytest.mark.parametrize("shared_memory", [True, False])
def test_already_closed_async_vector_env(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
env_fns = [make_env("CartPole-v1", i) for i in range(4)]
with pytest.raises(ClosedEnvironmentError):
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
env.close()
@@ -194,10 +192,10 @@ def test_already_closed_async_vector_env(shared_memory):
@pytest.mark.parametrize("shared_memory", [True, False])
def test_check_spaces_async_vector_env(shared_memory):
# CubeCrash-v0 - observation_space: Box(40, 32, 3), action_space: Discrete(3)
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
# MemorizeDigits-v0 - observation_space: Box(24, 32, 3), action_space: Discrete(10)
env_fns[1] = make_env("MemorizeDigits-v0", 1)
# CartPole-v1 - observation_space: Box(4,), action_space: Discrete(2)
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
# FrozenLake-v1 - Discrete(16), action_space: Discrete(4)
env_fns[1] = make_env("FrozenLake-v1", 1)
with pytest.raises(RuntimeError):
env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)
env.close(terminate=True)

View File

@@ -8,7 +8,7 @@ from gym.vector.sync_vector_env import SyncVectorEnv
def test_create_sync_vector_env():
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("FrozenLake-v1", i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
finally:
@@ -18,7 +18,7 @@ def test_create_sync_vector_env():
def test_reset_sync_vector_env():
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
observations = env.reset()
@@ -34,7 +34,7 @@ def test_reset_sync_vector_env():
@pytest.mark.parametrize("use_single_action_space", [True, False])
def test_step_sync_vector_env(use_single_action_space):
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
env_fns = [make_env("FrozenLake-v1", i) for i in range(8)]
try:
env = SyncVectorEnv(env_fns)
observations = env.reset()
@@ -50,7 +50,7 @@ def test_step_sync_vector_env(use_single_action_space):
finally:
env.close()
assert isinstance(env.observation_space, Box)
assert isinstance(env.observation_space, MultiDiscrete)
assert isinstance(observations, np.ndarray)
assert observations.dtype == env.observation_space.dtype
assert observations.shape == (8,) + env.single_observation_space.shape
@@ -68,10 +68,10 @@ def test_step_sync_vector_env(use_single_action_space):
def test_check_spaces_sync_vector_env():
# CubeCrash-v0 - observation_space: Box(40, 32, 3), action_space: Discrete(3)
env_fns = [make_env("CubeCrash-v0", i) for i in range(8)]
# MemorizeDigits-v0 - observation_space: Box(24, 32, 3), action_space: Discrete(10)
env_fns[1] = make_env("MemorizeDigits-v0", 1)
# CartPole-v1 - observation_space: Box(4,), action_space: Discrete(2)
env_fns = [make_env("CartPole-v1", i) for i in range(8)]
# FrozenLake-v1 - Discrete(16), action_space: Discrete(4)
env_fns[1] = make_env("FrozenLake-v1", 1)
with pytest.raises(RuntimeError):
env = SyncVectorEnv(env_fns)
env.close()

View File

@@ -11,7 +11,7 @@ from gym.vector.vector_env import VectorEnv
@pytest.mark.parametrize("shared_memory", [True, False])
def test_vector_env_equal(shared_memory):
env_fns = [make_env("CubeCrash-v0", i) for i in range(4)]
env_fns = [make_env("CartPole-v1", i) for i in range(4)]
num_steps = 100
try:
async_env = AsyncVectorEnv(env_fns, shared_memory=shared_memory)