Assert is a keyword, not a function

This commit is contained in:
Jie Tang
2016-06-16 00:20:22 -07:00
parent 5372d34b37
commit 36d476224e
11 changed files with 101 additions and 101 deletions

View File

@@ -41,7 +41,7 @@ class AlgorithmicEnv(Env):
def _get_obs(self, pos=None):
if pos is None:
pos = self.x
assert(isinstance(pos, np.ndarray) and pos.shape[0] == self.inp_dim)
assert isinstance(pos, np.ndarray) and pos.shape[0] == self.inp_dim
if ha(pos) not in self.content:
self.content[ha(pos)] = self.base
return self.content[ha(pos)]
@@ -90,7 +90,7 @@ class AlgorithmicEnv(Env):
x_str = label + x_str
return x_str
else:
assert(False)
assert False
def _render(self, mode='human', close=False):

View File

@@ -406,7 +406,7 @@ class BipedalWalker(gym.Env):
1.0 if self.legs[3].ground_contact else 0.0
]
state += [l.fraction for l in self.lidar]
assert(len(state)==24)
assert len(state)==24
self.scroll = pos.x - VIEWPORT_W/SCALE/5

View File

@@ -263,7 +263,7 @@ class LunarLander(gym.Env):
1.0 if self.legs[0].ground_contact else 0.0,
1.0 if self.legs[1].ground_contact else 0.0
]
assert(len(state)==8)
assert len(state)==8
reward = 0
shaping = \

View File

@@ -21,7 +21,7 @@ class OneRoundDeterministicRewardEnv(gym.Env):
self._reset()
def _step(self, action):
assert(self.action_space.contains(action))
assert self.action_space.contains(action)
if action:
reward = 1
else:

View File

@@ -22,7 +22,7 @@ class OneRoundNondeterministicRewardEnv(gym.Env):
self._reset()
def _step(self, action):
assert(self.action_space.contains(action))
assert self.action_space.contains(action)
if action:
#your agent should figure out that this option has expected value 2.5
reward = random.choice([0, 5])

View File

@@ -28,7 +28,7 @@ class TwoRoundDeterministicRewardEnv(gym.Env):
def _step(self, action):
rewards = [[0, 3], [1, 2]]
assert(self.action_space.contains(action))
assert self.action_space.contains(action)
if self.firstAction is None:
self.firstAction = action

View File

@@ -38,7 +38,7 @@ class TwoRoundNondeterministicRewardEnv(gym.Env):
]
]
assert(self.action_space.contains(action))
assert self.action_space.contains(action)
if self.firstAction is None:
self.firstAction = action

View File

@@ -65,7 +65,7 @@ class ConvergenceControl(gym.Env):
"""
Perform some action in the environment
"""
assert(self.action_space.contains(action))
assert self.action_space.contains(action)
lr, decay, momentum, batch_size, l1, l2 = action;

View File

@@ -59,7 +59,7 @@ class CNNClassifierTraining(gym.Env):
"""
Perform some action in the environment
"""
assert (self.action_space.contains(action))
assert self.action_space.contains(action)
lr, decay, momentum, batch_size, l1, l2, convs, fcs = action

View File

@@ -89,7 +89,7 @@ class BlackjackEnv(gym.Env):
return [seed]
def _step(self, action):
assert(self.action_space.contains(action))
assert self.action_space.contains(action)
if action: # hit: add a card to players hand and return
self.player.append(draw_card(self.np_random))
if is_bust(self.player):

View File

@@ -36,7 +36,7 @@ class NChainEnv(gym.Env):
return [seed]
def _step(self, action):
assert(self.action_space.contains(action))
assert self.action_space.contains(action)
if self.np_random.rand() < self.slip:
action = not action # agent slipped, reverse action taken
if action: # 'backwards': go back to the beginning, get small reward