mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-08-28 17:27:08 +00:00
Assert is a keyword, not a function
This commit is contained in:
@@ -41,7 +41,7 @@ class AlgorithmicEnv(Env):
|
||||
def _get_obs(self, pos=None):
|
||||
if pos is None:
|
||||
pos = self.x
|
||||
assert(isinstance(pos, np.ndarray) and pos.shape[0] == self.inp_dim)
|
||||
assert isinstance(pos, np.ndarray) and pos.shape[0] == self.inp_dim
|
||||
if ha(pos) not in self.content:
|
||||
self.content[ha(pos)] = self.base
|
||||
return self.content[ha(pos)]
|
||||
@@ -90,7 +90,7 @@ class AlgorithmicEnv(Env):
|
||||
x_str = label + x_str
|
||||
return x_str
|
||||
else:
|
||||
assert(False)
|
||||
assert False
|
||||
|
||||
|
||||
def _render(self, mode='human', close=False):
|
||||
|
@@ -406,7 +406,7 @@ class BipedalWalker(gym.Env):
|
||||
1.0 if self.legs[3].ground_contact else 0.0
|
||||
]
|
||||
state += [l.fraction for l in self.lidar]
|
||||
assert(len(state)==24)
|
||||
assert len(state)==24
|
||||
|
||||
self.scroll = pos.x - VIEWPORT_W/SCALE/5
|
||||
|
||||
|
@@ -263,7 +263,7 @@ class LunarLander(gym.Env):
|
||||
1.0 if self.legs[0].ground_contact else 0.0,
|
||||
1.0 if self.legs[1].ground_contact else 0.0
|
||||
]
|
||||
assert(len(state)==8)
|
||||
assert len(state)==8
|
||||
|
||||
reward = 0
|
||||
shaping = \
|
||||
|
@@ -21,7 +21,7 @@ class OneRoundDeterministicRewardEnv(gym.Env):
|
||||
self._reset()
|
||||
|
||||
def _step(self, action):
|
||||
assert(self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
if action:
|
||||
reward = 1
|
||||
else:
|
||||
|
@@ -22,7 +22,7 @@ class OneRoundNondeterministicRewardEnv(gym.Env):
|
||||
self._reset()
|
||||
|
||||
def _step(self, action):
|
||||
assert(self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
if action:
|
||||
#your agent should figure out that this option has expected value 2.5
|
||||
reward = random.choice([0, 5])
|
||||
|
@@ -28,7 +28,7 @@ class TwoRoundDeterministicRewardEnv(gym.Env):
|
||||
def _step(self, action):
|
||||
rewards = [[0, 3], [1, 2]]
|
||||
|
||||
assert(self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
|
||||
if self.firstAction is None:
|
||||
self.firstAction = action
|
||||
|
@@ -38,7 +38,7 @@ class TwoRoundNondeterministicRewardEnv(gym.Env):
|
||||
]
|
||||
]
|
||||
|
||||
assert(self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
|
||||
if self.firstAction is None:
|
||||
self.firstAction = action
|
||||
|
@@ -65,7 +65,7 @@ class ConvergenceControl(gym.Env):
|
||||
"""
|
||||
Perform some action in the environment
|
||||
"""
|
||||
assert(self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
|
||||
lr, decay, momentum, batch_size, l1, l2 = action;
|
||||
|
||||
|
@@ -59,7 +59,7 @@ class CNNClassifierTraining(gym.Env):
|
||||
"""
|
||||
Perform some action in the environment
|
||||
"""
|
||||
assert (self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
|
||||
lr, decay, momentum, batch_size, l1, l2, convs, fcs = action
|
||||
|
||||
|
@@ -89,7 +89,7 @@ class BlackjackEnv(gym.Env):
|
||||
return [seed]
|
||||
|
||||
def _step(self, action):
|
||||
assert(self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
if action: # hit: add a card to players hand and return
|
||||
self.player.append(draw_card(self.np_random))
|
||||
if is_bust(self.player):
|
||||
|
@@ -36,7 +36,7 @@ class NChainEnv(gym.Env):
|
||||
return [seed]
|
||||
|
||||
def _step(self, action):
|
||||
assert(self.action_space.contains(action))
|
||||
assert self.action_space.contains(action)
|
||||
if self.np_random.rand() < self.slip:
|
||||
action = not action # agent slipped, reverse action taken
|
||||
if action: # 'backwards': go back to the beginning, get small reward
|
||||
|
Reference in New Issue
Block a user