diff --git a/baselines/acer/acer_simple.py b/baselines/acer/acer_simple.py index 3eaa6cc..0ca4fb9 100644 --- a/baselines/acer/acer_simple.py +++ b/baselines/acer/acer_simple.py @@ -243,7 +243,7 @@ class Runner(object): mb_mus.append(mus) mb_dones.append(self.dones) obs, rewards, dones, _ = self.env.step(actions) - # states information for statefull predictors like LSTM + # states information for statefull models like LSTM self.states = states self.dones = dones self.update_obs(obs, dones) @@ -260,7 +260,7 @@ class Runner(object): mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0) - mb_masks = mb_dones # Used for statefull predictors like LSTM's to mask state when done + mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards # shapes are now [nenv, nsteps, []] diff --git a/baselines/acktr/kfac.py b/baselines/acktr/kfac.py index 60af0ee..b420819 100644 --- a/baselines/acktr/kfac.py +++ b/baselines/acktr/kfac.py @@ -134,7 +134,7 @@ class KfacOptimizer(): # check associated weights and bias for homogeneous coordinate representation # and check redundent factors # TO-DO: there may be a bug to detect associate bias and weights for - # forking layer, e.g. in inception predictors. + # forking layer, e.g. in inception models. for param in varlist: factorTensors[param]['assnWeights'] = None factorTensors[param]['assnBias'] = None