defaults are handled through registry
This commit is contained in:
@@ -16,6 +16,7 @@ from baselines.a2c.utils import EpisodeStats
|
||||
from baselines.a2c.utils import get_by_index, check_shape, avg_norm, gradient_add, q_explained_variance
|
||||
from baselines.acer.buffer import Buffer
|
||||
from baselines.acer.runner import Runner
|
||||
from baselines.acer.defaults import defaults
|
||||
|
||||
# remove last step
|
||||
def strip(var, nenvs, nsteps, flat = False):
|
||||
@@ -270,7 +271,7 @@ class Acer():
|
||||
logger.record_tabular(name, float(val))
|
||||
logger.dump_tabular()
|
||||
|
||||
@registry.register('acer')
|
||||
@registry.register('acer', defaults=defaults)
|
||||
def learn(network, env, seed=None, nsteps=20, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01,
|
||||
max_grad_norm=10, lr=7e-4, lrschedule='linear', rprop_epsilon=1e-5, rprop_alpha=0.99, gamma=0.99,
|
||||
log_interval=100, buffer_size=50000, replay_ratio=4, replay_start=10000, c=10.0,
|
||||
|
@@ -1,4 +1,3 @@
|
||||
def atari():
|
||||
return dict(
|
||||
lrschedule='constant'
|
||||
)
|
||||
defaults = {
|
||||
'atari': dict(lrschedule='constant')
|
||||
}
|
||||
|
@@ -11,6 +11,7 @@ from baselines.common.tf_util import get_session, save_variables, load_variables
|
||||
from baselines.a2c.runner import Runner
|
||||
from baselines.a2c.utils import Scheduler, find_trainable_variables
|
||||
from baselines.acktr import kfac
|
||||
from baselines.acktr.defaults import defaults
|
||||
|
||||
|
||||
class Model(object):
|
||||
@@ -90,7 +91,7 @@ class Model(object):
|
||||
self.initial_state = step_model.initial_state
|
||||
tf.global_variables_initializer().run(session=sess)
|
||||
|
||||
@registry.register('acktr')
|
||||
@registry.register('acktr', defaults=defaults)
|
||||
def learn(network, env, seed, total_timesteps=int(40e6), gamma=0.99, log_interval=1, nprocs=32, nsteps=20,
|
||||
ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
|
||||
kfac_clip=0.001, save_interval=None, lrschedule='linear', load_path=None, is_async=True, **network_kwargs):
|
||||
|
@@ -1,5 +1,6 @@
|
||||
def mujoco():
|
||||
return dict(
|
||||
defaults = {
|
||||
'mujoco' : dict(
|
||||
nsteps=2500,
|
||||
value_network='copy'
|
||||
)
|
||||
}
|
||||
|
@@ -16,7 +16,7 @@ from baselines.common import set_global_seeds
|
||||
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
|
||||
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
|
||||
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
|
||||
from baselines.common.vec_env.vec_normalize import VecNormalize
|
||||
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
|
||||
|
||||
from baselines.common import retro_wrappers
|
||||
|
||||
@@ -46,6 +46,8 @@ def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_ind
|
||||
if frame_stack_size > 1:
|
||||
venv = VecFrameStack(venv, frame_stack_size)
|
||||
|
||||
return venv
|
||||
|
||||
|
||||
def env_thunk(env_id, env_type, subrank=0, seed=None, reward_scale=1.0, gamestate=None, wrapper_kwargs={}):
|
||||
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
|
||||
|
@@ -18,6 +18,7 @@ from baselines.deepq.utils import ObservationInput
|
||||
|
||||
from baselines.common.tf_util import get_session
|
||||
from baselines.deepq.models import build_q_func
|
||||
from baselines.deepq.defaults import defaults
|
||||
|
||||
|
||||
class ActWrapper(object):
|
||||
@@ -92,7 +93,7 @@ def load_act(path):
|
||||
return ActWrapper.load_act(path)
|
||||
|
||||
|
||||
@registry.register('deepq', supports_vecenvs=False)
|
||||
@registry.register('deepq', supports_vecenvs=False, defaults=defaults)
|
||||
def learn(env,
|
||||
network,
|
||||
seed=None,
|
||||
|
@@ -16,6 +16,8 @@ def atari():
|
||||
dueling=True
|
||||
)
|
||||
|
||||
def retro():
|
||||
return atari()
|
||||
|
||||
defaults = {
|
||||
'atari': atari()
|
||||
'retro': atari()
|
||||
}
|
||||
|
@@ -1,5 +1,5 @@
|
||||
def mujoco():
|
||||
return dict(
|
||||
defaults = {
|
||||
'mujoco': dict(
|
||||
nsteps=2048,
|
||||
nminibatches=32,
|
||||
lam=0.95,
|
||||
@@ -10,13 +10,13 @@ def mujoco():
|
||||
lr=lambda f: 3e-4 * f,
|
||||
cliprange=0.2,
|
||||
value_network='copy'
|
||||
)
|
||||
),
|
||||
|
||||
def atari():
|
||||
return dict(
|
||||
'atari': dict(
|
||||
nsteps=128, nminibatches=4,
|
||||
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
|
||||
ent_coef=.01,
|
||||
lr=lambda f : f * 2.5e-4,
|
||||
cliprange=lambda f : f * 0.1,
|
||||
)
|
||||
}
|
||||
|
@@ -15,6 +15,7 @@ from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
|
||||
from mpi4py import MPI
|
||||
from baselines.common.tf_util import initialize
|
||||
from baselines.common.mpi_util import sync_from_root
|
||||
from baselines.ppo2.defaults import defaults
|
||||
|
||||
class Model(object):
|
||||
"""
|
||||
@@ -218,7 +219,7 @@ def constfn(val):
|
||||
return val
|
||||
return f
|
||||
|
||||
@registry.register('ppo2')
|
||||
@registry.register('ppo2', defaults=defaults)
|
||||
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
|
||||
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
|
||||
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
|
||||
|
@@ -1,7 +1,7 @@
|
||||
from baselines import logger
|
||||
registry = {}
|
||||
|
||||
def register(name, supports_vecenv=True, **kwargs):
|
||||
def register(name, supports_vecenv=True, defaults={}, **kwargs):
|
||||
def get_fn_entrypoint(fn):
|
||||
import inspect
|
||||
return '.'.join([inspect.getmodule(fn).__name__, fn.__name__])
|
||||
@@ -15,6 +15,7 @@ def register(name, supports_vecenv=True, **kwargs):
|
||||
registry[name] = dict(
|
||||
fn = learn_fn,
|
||||
supports_vecenv=supports_vecenv,
|
||||
defaults=defaults,
|
||||
**kwargs
|
||||
)
|
||||
return learn_fn
|
||||
|
@@ -5,7 +5,7 @@ import gym
|
||||
from collections import defaultdict
|
||||
import numpy as np
|
||||
|
||||
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
|
||||
from baselines.common.vec_env.vec_normalize import VecNormalize
|
||||
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, env_thunk
|
||||
from baselines import logger
|
||||
from baselines.registry import registry
|
||||
@@ -86,6 +86,7 @@ def build_env(args):
|
||||
|
||||
env_type, env_id = get_env_type(args.env)
|
||||
assert alg in registry, 'Unknown algorithm {}'.format(alg)
|
||||
|
||||
if env_type in {'atari', 'retro'}:
|
||||
frame_stack_size = 4
|
||||
else:
|
||||
@@ -141,13 +142,10 @@ def get_learn_function(alg):
|
||||
|
||||
|
||||
def get_learn_function_defaults(alg, env_type):
|
||||
try:
|
||||
alg_defaults = get_alg_module(alg, 'defaults')
|
||||
kwargs = getattr(alg_defaults, env_type)()
|
||||
except (ImportError, AttributeError):
|
||||
kwargs = {}
|
||||
return kwargs
|
||||
|
||||
entry = registry.get(alg)
|
||||
assert entry is not None, 'Unregistered algorithm {}'.format(alg)
|
||||
return entry['defaults'].get(env_type, {})
|
||||
|
||||
|
||||
|
||||
def parse_cmdline_kwargs(args):
|
||||
|
@@ -28,3 +28,9 @@ def mujoco():
|
||||
vf_stepsize=1e-3,
|
||||
normalize_observations=True,
|
||||
)
|
||||
|
||||
defaults = {
|
||||
'atari': atari(),
|
||||
'mujoco': mujoco(),
|
||||
}
|
||||
|
||||
|
@@ -13,6 +13,8 @@ from baselines.common.input import observation_placeholder
|
||||
from baselines.common.policies import build_policy
|
||||
from contextlib import contextmanager
|
||||
|
||||
from baselines.trpo_mpi.defaults import defaults
|
||||
|
||||
def traj_segment_generator(pi, env, horizon, stochastic):
|
||||
# Initialize state variables
|
||||
t = 0
|
||||
@@ -82,7 +84,7 @@ def add_vtarg_and_adv(seg, gamma, lam):
|
||||
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
|
||||
seg["tdlamret"] = seg["adv"] + seg["vpred"]
|
||||
|
||||
@registry.register('trpo_mpi', supports_vecenvs=False)
|
||||
@registry.register('trpo_mpi', supports_vecenvs=False, defaults=defaults)
|
||||
def learn(*,
|
||||
network,
|
||||
env,
|
||||
|
Reference in New Issue
Block a user