mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-08-27 16:57:10 +00:00
PEP-8 Fixes in algorithmic environment (#1382)
Remove trailing whitespaces. Make line breaks adhere to 80 character limit (not all, but quite a few). Remove unused imports. Other miscellaneous PEP-8 fixes.
This commit is contained in:
@@ -59,8 +59,8 @@ class AlgorithmicEnv(Env):
|
||||
self.last = 10
|
||||
# Cumulative reward earned this episode
|
||||
self.episode_total_reward = None
|
||||
# Running tally of reward shortfalls. e.g. if there were 10 points to earn and
|
||||
# we got 8, we'd append -2
|
||||
# Running tally of reward shortfalls. e.g. if there were 10 points to
|
||||
# earn and we got 8, we'd append -2
|
||||
AlgorithmicEnv.reward_shortfalls = []
|
||||
if chars:
|
||||
self.charmap = [chr(ord('A')+i) for i in range(base)]
|
||||
@@ -78,7 +78,8 @@ class AlgorithmicEnv(Env):
|
||||
self.action_space = Tuple(
|
||||
[Discrete(len(self.MOVEMENTS)), Discrete(2), Discrete(self.base)]
|
||||
)
|
||||
# Can see just what is on the input tape (one of n characters, or nothing)
|
||||
# Can see just what is on the input tape (one of n characters, or
|
||||
# nothing)
|
||||
self.observation_space = Discrete(self.base + 1)
|
||||
self.seed()
|
||||
self.reset()
|
||||
@@ -170,10 +171,11 @@ class AlgorithmicEnv(Env):
|
||||
try:
|
||||
correct = pred == self.target[self.write_head_position]
|
||||
except IndexError:
|
||||
logger.warn("It looks like you're calling step() even though this "+
|
||||
"environment has already returned done=True. You should always call "+
|
||||
"reset() once you receive done=True. Any further steps are undefined "+
|
||||
"behaviour.")
|
||||
logger.warn(
|
||||
"It looks like you're calling step() even though this "
|
||||
"environment has already returned done=True. You should "
|
||||
"always call reset() once you receive done=True. Any "
|
||||
"further steps are undefined behaviour.")
|
||||
correct = False
|
||||
if correct:
|
||||
reward = 1.0
|
||||
@@ -209,12 +211,11 @@ class AlgorithmicEnv(Env):
|
||||
AlgorithmicEnv.reward_shortfalls.append(self.episode_total_reward - len(self.target))
|
||||
AlgorithmicEnv.reward_shortfalls = AlgorithmicEnv.reward_shortfalls[-self.last:]
|
||||
if len(AlgorithmicEnv.reward_shortfalls) == self.last and \
|
||||
min(AlgorithmicEnv.reward_shortfalls) >= self.MIN_REWARD_SHORTFALL_FOR_PROMOTION and \
|
||||
AlgorithmicEnv.min_length < 30:
|
||||
min(AlgorithmicEnv.reward_shortfalls) >= self.MIN_REWARD_SHORTFALL_FOR_PROMOTION and \
|
||||
AlgorithmicEnv.min_length < 30:
|
||||
AlgorithmicEnv.min_length += 1
|
||||
AlgorithmicEnv.reward_shortfalls = []
|
||||
|
||||
|
||||
def reset(self):
|
||||
self._check_levelup()
|
||||
self.last_action = None
|
||||
@@ -264,7 +265,7 @@ class TapeAlgorithmicEnv(AlgorithmicEnv):
|
||||
|
||||
def render_observation(self):
|
||||
x = self.read_head_position
|
||||
x_str = "Observation Tape : "
|
||||
x_str = "Observation Tape : "
|
||||
for i in range(-2, self.input_width + 2):
|
||||
if i == x:
|
||||
x_str += colorize(self._get_str_obs(np.array([i])), 'green', highlight=True)
|
||||
@@ -278,6 +279,7 @@ class GridAlgorithmicEnv(AlgorithmicEnv):
|
||||
"""An algorithmic env with a 2-d input grid."""
|
||||
MOVEMENTS = ['left', 'right', 'up', 'down']
|
||||
READ_HEAD_START = (0, 0)
|
||||
|
||||
def __init__(self, rows, *args, **kwargs):
|
||||
self.rows = rows
|
||||
AlgorithmicEnv.__init__(self, *args, **kwargs)
|
||||
@@ -316,7 +318,7 @@ class GridAlgorithmicEnv(AlgorithmicEnv):
|
||||
|
||||
def render_observation(self):
|
||||
x = self.read_head_position
|
||||
label = "Observation Grid : "
|
||||
label = "Observation Grid : "
|
||||
x_str = ""
|
||||
for j in range(-1, self.rows+1):
|
||||
if j != -1:
|
||||
|
@@ -4,10 +4,10 @@ the output tape. http://arxiv.org/abs/1511.07275
|
||||
"""
|
||||
from gym.envs.algorithmic import algorithmic_env
|
||||
|
||||
|
||||
class CopyEnv(algorithmic_env.TapeAlgorithmicEnv):
|
||||
def __init__(self, base=5, chars=True):
|
||||
super(CopyEnv, self).__init__(base=base, chars=chars)
|
||||
|
||||
def target_from_input_data(self, input_data):
|
||||
return input_data
|
||||
|
||||
|
@@ -5,6 +5,7 @@ http://arxiv.org/abs/1511.07275
|
||||
from __future__ import division
|
||||
from gym.envs.algorithmic import algorithmic_env
|
||||
|
||||
|
||||
class DuplicatedInputEnv(algorithmic_env.TapeAlgorithmicEnv):
|
||||
def __init__(self, duplication=2, base=5):
|
||||
self.duplication = duplication
|
||||
|
@@ -4,12 +4,13 @@ the output tape. http://arxiv.org/abs/1511.07275
|
||||
"""
|
||||
from gym.envs.algorithmic import algorithmic_env
|
||||
|
||||
|
||||
class RepeatCopyEnv(algorithmic_env.TapeAlgorithmicEnv):
|
||||
MIN_REWARD_SHORTFALL_FOR_PROMOTION = -.1
|
||||
|
||||
def __init__(self, base=5):
|
||||
super(RepeatCopyEnv, self).__init__(base=base, chars=True)
|
||||
self.last = 50
|
||||
|
||||
def target_from_input_data(self, input_data):
|
||||
return input_data + list(reversed(input_data)) + input_data
|
||||
|
||||
|
@@ -2,11 +2,12 @@
|
||||
Task is to reverse content over the input tape.
|
||||
http://arxiv.org/abs/1511.07275
|
||||
"""
|
||||
|
||||
from gym.envs.algorithmic import algorithmic_env
|
||||
|
||||
|
||||
class ReverseEnv(algorithmic_env.TapeAlgorithmicEnv):
|
||||
MIN_REWARD_SHORTFALL_FOR_PROMOTION = -.1
|
||||
|
||||
def __init__(self, base=2):
|
||||
super(ReverseEnv, self).__init__(base=base, chars=True, starting_min_length=1)
|
||||
self.last = 50
|
||||
|
@@ -1,7 +1,7 @@
|
||||
from __future__ import division
|
||||
import numpy as np
|
||||
from gym.envs.algorithmic import algorithmic_env
|
||||
|
||||
|
||||
class ReversedAdditionEnv(algorithmic_env.GridAlgorithmicEnv):
|
||||
def __init__(self, rows=2, base=3):
|
||||
super(ReversedAdditionEnv, self).__init__(rows=rows, base=base, chars=False)
|
||||
|
Reference in New Issue
Block a user