use algorithm registry - staging for internal benchmarks
This commit is contained in:
@@ -0,0 +1,12 @@
|
||||
# explicitly import sub-packages to register algorithms
|
||||
|
||||
import baselines.a2c.a2c
|
||||
import baselines.acer.acer
|
||||
import baselines.acktr.acktr
|
||||
import baselines.deepq.deepq
|
||||
import baselines.ddpg.ddpg
|
||||
import baselines.ppo2.ppo2
|
||||
|
||||
# not really sure why flake8 complains only about trpo_mpi here...
|
||||
import baselines.trpo_mpi.trpo_mpi # noqa: F401
|
||||
|
||||
|
@@ -2,13 +2,12 @@ import time
|
||||
import functools
|
||||
import tensorflow as tf
|
||||
|
||||
from baselines import logger
|
||||
from baselines import logger, registry
|
||||
|
||||
from baselines.common import set_global_seeds, explained_variance
|
||||
from baselines.common import tf_util
|
||||
from baselines.common.policies import build_policy
|
||||
|
||||
|
||||
from baselines.a2c.utils import Scheduler, find_trainable_variables
|
||||
from baselines.a2c.runner import Runner
|
||||
|
||||
@@ -114,6 +113,7 @@ class Model(object):
|
||||
tf.global_variables_initializer().run(session=sess)
|
||||
|
||||
|
||||
@registry.register('a2c')
|
||||
def learn(
|
||||
network,
|
||||
env,
|
||||
|
@@ -2,11 +2,12 @@ import time
|
||||
import functools
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from baselines import logger
|
||||
from baselines import logger, registry
|
||||
|
||||
from baselines.common import set_global_seeds
|
||||
from baselines.common.policies import build_policy
|
||||
from baselines.common.tf_util import get_session, save_variables
|
||||
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
|
||||
|
||||
from baselines.a2c.utils import batch_to_seq, seq_to_batch
|
||||
from baselines.a2c.utils import cat_entropy_softmax
|
||||
@@ -15,6 +16,7 @@ from baselines.a2c.utils import EpisodeStats
|
||||
from baselines.a2c.utils import get_by_index, check_shape, avg_norm, gradient_add, q_explained_variance
|
||||
from baselines.acer.buffer import Buffer
|
||||
from baselines.acer.runner import Runner
|
||||
from baselines.acer.defaults import defaults
|
||||
|
||||
# remove last step
|
||||
def strip(var, nenvs, nsteps, flat = False):
|
||||
@@ -55,8 +57,7 @@ def q_retrace(R, D, q_i, v, rho_i, nenvs, nsteps, gamma):
|
||||
# return tf.minimum(1 + eps_clip, tf.maximum(1 - eps_clip, ratio))
|
||||
|
||||
class Model(object):
|
||||
def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, nstack, num_procs,
|
||||
ent_coef, q_coef, gamma, max_grad_norm, lr,
|
||||
def __init__(self, policy, ob_space, ac_space, nenvs, nsteps, ent_coef, q_coef, gamma, max_grad_norm, lr,
|
||||
rprop_alpha, rprop_epsilon, total_timesteps, lrschedule,
|
||||
c, trust_region, alpha, delta):
|
||||
|
||||
@@ -71,8 +72,8 @@ class Model(object):
|
||||
LR = tf.placeholder(tf.float32, [])
|
||||
eps = 1e-6
|
||||
|
||||
step_ob_placeholder = tf.placeholder(dtype=ob_space.dtype, shape=(nenvs,) + ob_space.shape[:-1] + (ob_space.shape[-1] * nstack,))
|
||||
train_ob_placeholder = tf.placeholder(dtype=ob_space.dtype, shape=(nenvs*(nsteps+1),) + ob_space.shape[:-1] + (ob_space.shape[-1] * nstack,))
|
||||
step_ob_placeholder = tf.placeholder(dtype=ob_space.dtype, shape=(nenvs,) + ob_space.shape)
|
||||
train_ob_placeholder = tf.placeholder(dtype=ob_space.dtype, shape=(nenvs*(nsteps+1),) + ob_space.shape)
|
||||
with tf.variable_scope('acer_model', reuse=tf.AUTO_REUSE):
|
||||
|
||||
step_model = policy(observ_placeholder=step_ob_placeholder, sess=sess)
|
||||
@@ -247,6 +248,7 @@ class Acer():
|
||||
# get obs, actions, rewards, mus, dones from buffer.
|
||||
obs, actions, rewards, mus, dones, masks = buffer.get()
|
||||
|
||||
|
||||
# reshape stuff correctly
|
||||
obs = obs.reshape(runner.batch_ob_shape)
|
||||
actions = actions.reshape([runner.nbatch])
|
||||
@@ -269,8 +271,8 @@ class Acer():
|
||||
logger.record_tabular(name, float(val))
|
||||
logger.dump_tabular()
|
||||
|
||||
|
||||
def learn(network, env, seed=None, nsteps=20, nstack=4, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01,
|
||||
@registry.register('acer', defaults=defaults)
|
||||
def learn(network, env, seed=None, nsteps=20, total_timesteps=int(80e6), q_coef=0.5, ent_coef=0.01,
|
||||
max_grad_norm=10, lr=7e-4, lrschedule='linear', rprop_epsilon=1e-5, rprop_alpha=0.99, gamma=0.99,
|
||||
log_interval=100, buffer_size=50000, replay_ratio=4, replay_start=10000, c=10.0,
|
||||
trust_region=True, alpha=0.99, delta=1, load_path=None, **network_kwargs):
|
||||
@@ -342,21 +344,24 @@ def learn(network, env, seed=None, nsteps=20, nstack=4, total_timesteps=int(80e6
|
||||
print("Running Acer Simple")
|
||||
print(locals())
|
||||
set_global_seeds(seed)
|
||||
policy = build_policy(env, network, estimate_q=True, **network_kwargs)
|
||||
if not isinstance(env, VecFrameStack):
|
||||
env = VecFrameStack(env, 1)
|
||||
|
||||
policy = build_policy(env, network, estimate_q=True, **network_kwargs)
|
||||
nenvs = env.num_envs
|
||||
ob_space = env.observation_space
|
||||
ac_space = env.action_space
|
||||
num_procs = len(env.remotes) if hasattr(env, 'remotes') else 1# HACK
|
||||
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps, nstack=nstack,
|
||||
num_procs=num_procs, ent_coef=ent_coef, q_coef=q_coef, gamma=gamma,
|
||||
|
||||
nstack = env.nstack
|
||||
model = Model(policy=policy, ob_space=ob_space, ac_space=ac_space, nenvs=nenvs, nsteps=nsteps,
|
||||
ent_coef=ent_coef, q_coef=q_coef, gamma=gamma,
|
||||
max_grad_norm=max_grad_norm, lr=lr, rprop_alpha=rprop_alpha, rprop_epsilon=rprop_epsilon,
|
||||
total_timesteps=total_timesteps, lrschedule=lrschedule, c=c,
|
||||
trust_region=trust_region, alpha=alpha, delta=delta)
|
||||
|
||||
runner = Runner(env=env, model=model, nsteps=nsteps, nstack=nstack)
|
||||
runner = Runner(env=env, model=model, nsteps=nsteps)
|
||||
if replay_ratio > 0:
|
||||
buffer = Buffer(env=env, nsteps=nsteps, nstack=nstack, size=buffer_size)
|
||||
buffer = Buffer(env=env, nsteps=nsteps, size=buffer_size)
|
||||
else:
|
||||
buffer = None
|
||||
nbatch = nenvs*nsteps
|
||||
|
@@ -2,11 +2,16 @@ import numpy as np
|
||||
|
||||
class Buffer(object):
|
||||
# gets obs, actions, rewards, mu's, (states, masks), dones
|
||||
def __init__(self, env, nsteps, nstack, size=50000):
|
||||
def __init__(self, env, nsteps, size=50000):
|
||||
self.nenv = env.num_envs
|
||||
self.nsteps = nsteps
|
||||
self.nh, self.nw, self.nc = env.observation_space.shape
|
||||
self.nstack = nstack
|
||||
# self.nh, self.nw, self.nc = env.observation_space.shape
|
||||
self.obs_shape = env.observation_space.shape
|
||||
self.obs_dtype = env.observation_space.dtype
|
||||
self.ac_dtype = env.action_space.dtype
|
||||
self.nc = self.obs_shape[-1]
|
||||
self.nstack = env.nstack
|
||||
self.nc //= self.nstack
|
||||
self.nbatch = self.nenv * self.nsteps
|
||||
self.size = size // (self.nsteps) # Each loc contains nenv * nsteps frames, thus total buffer is nenv * size frames
|
||||
|
||||
@@ -33,22 +38,11 @@ class Buffer(object):
|
||||
# Generate stacked frames
|
||||
def decode(self, enc_obs, dones):
|
||||
# enc_obs has shape [nenvs, nsteps + nstack, nh, nw, nc]
|
||||
# dones has shape [nenvs, nsteps, nh, nw, nc]
|
||||
# dones has shape [nenvs, nsteps]
|
||||
# returns stacked obs of shape [nenv, (nsteps + 1), nh, nw, nstack*nc]
|
||||
nstack, nenv, nsteps, nh, nw, nc = self.nstack, self.nenv, self.nsteps, self.nh, self.nw, self.nc
|
||||
y = np.empty([nsteps + nstack - 1, nenv, 1, 1, 1], dtype=np.float32)
|
||||
obs = np.zeros([nstack, nsteps + nstack, nenv, nh, nw, nc], dtype=np.uint8)
|
||||
x = np.reshape(enc_obs, [nenv, nsteps + nstack, nh, nw, nc]).swapaxes(1,
|
||||
0) # [nsteps + nstack, nenv, nh, nw, nc]
|
||||
y[3:] = np.reshape(1.0 - dones, [nenv, nsteps, 1, 1, 1]).swapaxes(1, 0) # keep
|
||||
y[:3] = 1.0
|
||||
# y = np.reshape(1 - dones, [nenvs, nsteps, 1, 1, 1])
|
||||
for i in range(nstack):
|
||||
obs[-(i + 1), i:] = x
|
||||
# obs[:,i:,:,:,-(i+1),:] = x
|
||||
x = x[:-1] * y
|
||||
y = y[1:]
|
||||
return np.reshape(obs[:, 3:].transpose((2, 1, 3, 4, 0, 5)), [nenv, (nsteps + 1), nh, nw, nstack * nc])
|
||||
|
||||
return _stack_obs(enc_obs, dones,
|
||||
nsteps=self.nsteps)
|
||||
|
||||
def put(self, enc_obs, actions, rewards, mus, dones, masks):
|
||||
# enc_obs [nenv, (nsteps + nstack), nh, nw, nc]
|
||||
@@ -56,8 +50,8 @@ class Buffer(object):
|
||||
# mus [nenv, nsteps, nact]
|
||||
|
||||
if self.enc_obs is None:
|
||||
self.enc_obs = np.empty([self.size] + list(enc_obs.shape), dtype=np.uint8)
|
||||
self.actions = np.empty([self.size] + list(actions.shape), dtype=np.int32)
|
||||
self.enc_obs = np.empty([self.size] + list(enc_obs.shape), dtype=self.obs_dtype)
|
||||
self.actions = np.empty([self.size] + list(actions.shape), dtype=self.ac_dtype)
|
||||
self.rewards = np.empty([self.size] + list(rewards.shape), dtype=np.float32)
|
||||
self.mus = np.empty([self.size] + list(mus.shape), dtype=np.float32)
|
||||
self.dones = np.empty([self.size] + list(dones.shape), dtype=np.bool)
|
||||
@@ -101,3 +95,62 @@ class Buffer(object):
|
||||
mus = take(self.mus)
|
||||
masks = take(self.masks)
|
||||
return obs, actions, rewards, mus, dones, masks
|
||||
|
||||
|
||||
|
||||
def _stack_obs_ref(enc_obs, dones, nsteps):
|
||||
nenv = enc_obs.shape[0]
|
||||
nstack = enc_obs.shape[1] - nsteps
|
||||
nh, nw, nc = enc_obs.shape[2:]
|
||||
obs_dtype = enc_obs.dtype
|
||||
obs_shape = (nh, nw, nc*nstack)
|
||||
|
||||
mask = np.empty([nsteps + nstack - 1, nenv, 1, 1, 1], dtype=np.float32)
|
||||
obs = np.zeros([nstack, nsteps + nstack, nenv, nh, nw, nc], dtype=obs_dtype)
|
||||
x = np.reshape(enc_obs, [nenv, nsteps + nstack, nh, nw, nc]).swapaxes(1, 0) # [nsteps + nstack, nenv, nh, nw, nc]
|
||||
|
||||
mask[nstack-1:] = np.reshape(1.0 - dones, [nenv, nsteps, 1, 1, 1]).swapaxes(1, 0) # keep
|
||||
mask[:nstack-1] = 1.0
|
||||
|
||||
# y = np.reshape(1 - dones, [nenvs, nsteps, 1, 1, 1])
|
||||
for i in range(nstack):
|
||||
obs[-(i + 1), i:] = x
|
||||
# obs[:,i:,:,:,-(i+1),:] = x
|
||||
x = x[:-1] * mask
|
||||
mask = mask[1:]
|
||||
|
||||
return np.reshape(obs[:, (nstack-1):].transpose((2, 1, 3, 4, 0, 5)), (nenv, (nsteps + 1)) + obs_shape)
|
||||
|
||||
def _stack_obs(enc_obs, dones, nsteps):
|
||||
nenv = enc_obs.shape[0]
|
||||
nstack = enc_obs.shape[1] - nsteps
|
||||
nc = enc_obs.shape[-1]
|
||||
|
||||
obs_ = np.zeros((nenv, nsteps + 1) + enc_obs.shape[2:-1] + (enc_obs.shape[-1] * nstack, ), dtype=enc_obs.dtype)
|
||||
mask = np.ones((nenv, nsteps+1), dtype=enc_obs.dtype)
|
||||
mask[:, 1:] = 1.0 - dones
|
||||
mask = mask.reshape(mask.shape + tuple(np.ones(len(enc_obs.shape)-2, dtype=np.uint8)))
|
||||
|
||||
for i in range(nstack-1, -1, -1):
|
||||
obs_[..., i * nc : (i + 1) * nc] = enc_obs[:, i : i + nsteps + 1, :]
|
||||
if i < nstack-1:
|
||||
obs_[..., i * nc : (i + 1) * nc] *= mask
|
||||
mask[:, 1:, ...] *= mask[:, :-1, ...]
|
||||
|
||||
return obs_
|
||||
|
||||
def test_stack_obs():
|
||||
nstack = 7
|
||||
nenv = 1
|
||||
nsteps = 5
|
||||
|
||||
obs_shape = (2, 3, nstack)
|
||||
|
||||
enc_obs_shape = (nenv, nsteps + nstack) + obs_shape[:-1] + (1,)
|
||||
enc_obs = np.random.random(enc_obs_shape)
|
||||
dones = np.random.randint(low=0, high=2, size=(nenv, nsteps))
|
||||
|
||||
stacked_obs_ref = _stack_obs_ref(enc_obs, dones, nsteps=nsteps)
|
||||
stacked_obs_test = _stack_obs(enc_obs, dones, nsteps=nsteps)
|
||||
|
||||
np.testing.assert_allclose(stacked_obs_ref, stacked_obs_test)
|
||||
|
@@ -1,4 +1,3 @@
|
||||
def atari():
|
||||
return dict(
|
||||
lrschedule='constant'
|
||||
)
|
||||
defaults = {
|
||||
'atari': dict(lrschedule='constant')
|
||||
}
|
||||
|
@@ -1,30 +1,31 @@
|
||||
import numpy as np
|
||||
from baselines.common.runners import AbstractEnvRunner
|
||||
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
|
||||
from gym import spaces
|
||||
|
||||
|
||||
class Runner(AbstractEnvRunner):
|
||||
|
||||
def __init__(self, env, model, nsteps, nstack):
|
||||
def __init__(self, env, model, nsteps):
|
||||
super().__init__(env=env, model=model, nsteps=nsteps)
|
||||
self.nstack = nstack
|
||||
nh, nw, nc = env.observation_space.shape
|
||||
self.nc = nc # nc = 1 for atari, but just in case
|
||||
assert isinstance(env.action_space, spaces.Discrete), 'This ACER implementation works only with discrete action spaces!'
|
||||
assert isinstance(env, VecFrameStack)
|
||||
|
||||
self.nact = env.action_space.n
|
||||
nenv = self.nenv
|
||||
self.nbatch = nenv * nsteps
|
||||
self.batch_ob_shape = (nenv*(nsteps+1), nh, nw, nc*nstack)
|
||||
self.obs = np.zeros((nenv, nh, nw, nc * nstack), dtype=np.uint8)
|
||||
obs = env.reset()
|
||||
self.update_obs(obs)
|
||||
self.batch_ob_shape = (nenv*(nsteps+1),) + env.observation_space.shape
|
||||
|
||||
self.obs = env.reset()
|
||||
self.obs_dtype = env.observation_space.dtype
|
||||
self.ac_dtype = env.action_space.dtype
|
||||
self.nstack = self.env.nstack
|
||||
self.nc = self.batch_ob_shape[-1] // self.nstack
|
||||
|
||||
def update_obs(self, obs, dones=None):
|
||||
#self.obs = obs
|
||||
if dones is not None:
|
||||
self.obs *= (1 - dones.astype(np.uint8))[:, None, None, None]
|
||||
self.obs = np.roll(self.obs, shift=-self.nc, axis=3)
|
||||
self.obs[:, :, :, -self.nc:] = obs[:, :, :, :]
|
||||
|
||||
def run(self):
|
||||
enc_obs = np.split(self.obs, self.nstack, axis=3) # so now list of obs steps
|
||||
# enc_obs = np.split(self.obs, self.nstack, axis=3) # so now list of obs steps
|
||||
enc_obs = np.split(self.env.stackedobs, self.env.nstack, axis=-1)
|
||||
mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], []
|
||||
for _ in range(self.nsteps):
|
||||
actions, mus, states = self.model._step(self.obs, S=self.states, M=self.dones)
|
||||
@@ -36,15 +37,15 @@ class Runner(AbstractEnvRunner):
|
||||
# states information for statefull models like LSTM
|
||||
self.states = states
|
||||
self.dones = dones
|
||||
self.update_obs(obs, dones)
|
||||
self.obs = obs
|
||||
mb_rewards.append(rewards)
|
||||
enc_obs.append(obs)
|
||||
enc_obs.append(obs[..., -self.nc:])
|
||||
mb_obs.append(np.copy(self.obs))
|
||||
mb_dones.append(self.dones)
|
||||
|
||||
enc_obs = np.asarray(enc_obs, dtype=np.uint8).swapaxes(1, 0)
|
||||
mb_obs = np.asarray(mb_obs, dtype=np.uint8).swapaxes(1, 0)
|
||||
mb_actions = np.asarray(mb_actions, dtype=np.int32).swapaxes(1, 0)
|
||||
enc_obs = np.asarray(enc_obs, dtype=self.obs_dtype).swapaxes(1, 0)
|
||||
mb_obs = np.asarray(mb_obs, dtype=self.obs_dtype).swapaxes(1, 0)
|
||||
mb_actions = np.asarray(mb_actions, dtype=self.ac_dtype).swapaxes(1, 0)
|
||||
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
|
||||
mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0)
|
||||
|
||||
|
@@ -2,7 +2,7 @@ import os.path as osp
|
||||
import time
|
||||
import functools
|
||||
import tensorflow as tf
|
||||
from baselines import logger
|
||||
from baselines import logger, registry
|
||||
|
||||
from baselines.common import set_global_seeds, explained_variance
|
||||
from baselines.common.policies import build_policy
|
||||
@@ -11,6 +11,7 @@ from baselines.common.tf_util import get_session, save_variables, load_variables
|
||||
from baselines.a2c.runner import Runner
|
||||
from baselines.a2c.utils import Scheduler, find_trainable_variables
|
||||
from baselines.acktr import kfac
|
||||
from baselines.acktr.defaults import defaults
|
||||
|
||||
|
||||
class Model(object):
|
||||
@@ -90,6 +91,7 @@ class Model(object):
|
||||
self.initial_state = step_model.initial_state
|
||||
tf.global_variables_initializer().run(session=sess)
|
||||
|
||||
@registry.register('acktr', defaults=defaults)
|
||||
def learn(network, env, seed, total_timesteps=int(40e6), gamma=0.99, log_interval=1, nprocs=32, nsteps=20,
|
||||
ent_coef=0.01, vf_coef=0.5, vf_fisher_coef=1.0, lr=0.25, max_grad_norm=0.5,
|
||||
kfac_clip=0.001, save_interval=None, lrschedule='linear', load_path=None, is_async=True, **network_kwargs):
|
||||
|
@@ -1,5 +1,6 @@
|
||||
def mujoco():
|
||||
return dict(
|
||||
defaults = {
|
||||
'mujoco' : dict(
|
||||
nsteps=2500,
|
||||
value_network='copy'
|
||||
)
|
||||
}
|
||||
|
@@ -16,30 +16,64 @@ from baselines.common import set_global_seeds
|
||||
from baselines.common.atari_wrappers import make_atari, wrap_deepmind
|
||||
from baselines.common.vec_env.subproc_vec_env import SubprocVecEnv
|
||||
from baselines.common.vec_env.dummy_vec_env import DummyVecEnv
|
||||
from baselines.common.retro_wrappers import RewardScaler
|
||||
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
|
||||
|
||||
from baselines.common import retro_wrappers
|
||||
|
||||
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0):
|
||||
def make_vec_env(env_id, env_type, num_env, seed, wrapper_kwargs=None, start_index=0, reward_scale=1.0, gamestate=None, frame_stack_size=1):
|
||||
"""
|
||||
Create a wrapped, monitored SubprocVecEnv for Atari and MuJoCo.
|
||||
Create a wrapped, monitored SubprocVecEnv
|
||||
"""
|
||||
if wrapper_kwargs is None: wrapper_kwargs = {}
|
||||
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
|
||||
def make_env(rank): # pylint: disable=C0111
|
||||
def _thunk():
|
||||
env = make_atari(env_id) if env_type == 'atari' else gym.make(env_id)
|
||||
env.seed(seed + 10000*mpi_rank + rank if seed is not None else None)
|
||||
env = Monitor(env,
|
||||
logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(rank)),
|
||||
allow_early_resets=True)
|
||||
seed = seed + 10000 * mpi_rank if seed is not None else None
|
||||
def make_thunk(rank):
|
||||
return lambda: make_env(
|
||||
env_id=env_id,
|
||||
env_type=env_type,
|
||||
subrank = rank,
|
||||
seed=seed,
|
||||
reward_scale=reward_scale,
|
||||
gamestate=gamestate
|
||||
)
|
||||
|
||||
if env_type == 'atari': return wrap_deepmind(env, **wrapper_kwargs)
|
||||
elif reward_scale != 1: return RewardScaler(env, reward_scale)
|
||||
else: return env
|
||||
return _thunk
|
||||
set_global_seeds(seed)
|
||||
if num_env > 1: return SubprocVecEnv([make_env(i + start_index) for i in range(num_env)])
|
||||
else: return DummyVecEnv([make_env(start_index)])
|
||||
if num_env > 1:
|
||||
venv = SubprocVecEnv([make_thunk(i + start_index) for i in range(num_env)])
|
||||
else:
|
||||
venv = DummyVecEnv([make_thunk(start_index)])
|
||||
|
||||
if frame_stack_size > 1:
|
||||
venv = VecFrameStack(venv, frame_stack_size)
|
||||
|
||||
return venv
|
||||
|
||||
|
||||
|
||||
def make_env(env_id, env_type, subrank=0, seed=None, reward_scale=1.0, gamestate=None, wrapper_kwargs={}):
|
||||
mpi_rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
|
||||
if env_type == 'atari':
|
||||
env = make_atari(env_id)
|
||||
elif env_type == 'retro':
|
||||
import retro
|
||||
gamestate = gamestate or retro.State.DEFAULT
|
||||
env = retro_wrappers.make_retro(game=env_id, max_episode_steps=10000, use_restricted_actions=retro.Actions.DISCRETE, state=gamestate)
|
||||
else:
|
||||
env = gym.make(env_id)
|
||||
|
||||
env.seed(seed + subrank if seed is not None else None)
|
||||
env = Monitor(env,
|
||||
logger.get_dir() and os.path.join(logger.get_dir(), str(mpi_rank) + '.' + str(subrank)),
|
||||
allow_early_resets=True)
|
||||
|
||||
if env_type == 'atari':
|
||||
return wrap_deepmind(env, **wrapper_kwargs)
|
||||
elif reward_scale != 1:
|
||||
return retro_wrappers.RewardScaler(env, reward_scale)
|
||||
else:
|
||||
return env
|
||||
|
||||
|
||||
|
||||
def make_mujoco_env(env_id, seed, reward_scale=1.0):
|
||||
"""
|
||||
|
@@ -1,98 +0,0 @@
|
||||
from .running_stat import RunningStat
|
||||
from collections import deque
|
||||
import numpy as np
|
||||
|
||||
class Filter(object):
|
||||
def __call__(self, x, update=True):
|
||||
raise NotImplementedError
|
||||
def reset(self):
|
||||
pass
|
||||
|
||||
class IdentityFilter(Filter):
|
||||
def __call__(self, x, update=True):
|
||||
return x
|
||||
|
||||
class CompositionFilter(Filter):
|
||||
def __init__(self, fs):
|
||||
self.fs = fs
|
||||
def __call__(self, x, update=True):
|
||||
for f in self.fs:
|
||||
x = f(x)
|
||||
return x
|
||||
def output_shape(self, input_space):
|
||||
out = input_space.shape
|
||||
for f in self.fs:
|
||||
out = f.output_shape(out)
|
||||
return out
|
||||
|
||||
class ZFilter(Filter):
|
||||
"""
|
||||
y = (x-mean)/std
|
||||
using running estimates of mean,std
|
||||
"""
|
||||
|
||||
def __init__(self, shape, demean=True, destd=True, clip=10.0):
|
||||
self.demean = demean
|
||||
self.destd = destd
|
||||
self.clip = clip
|
||||
|
||||
self.rs = RunningStat(shape)
|
||||
|
||||
def __call__(self, x, update=True):
|
||||
if update: self.rs.push(x)
|
||||
if self.demean:
|
||||
x = x - self.rs.mean
|
||||
if self.destd:
|
||||
x = x / (self.rs.std+1e-8)
|
||||
if self.clip:
|
||||
x = np.clip(x, -self.clip, self.clip)
|
||||
return x
|
||||
def output_shape(self, input_space):
|
||||
return input_space.shape
|
||||
|
||||
class AddClock(Filter):
|
||||
def __init__(self):
|
||||
self.count = 0
|
||||
def reset(self):
|
||||
self.count = 0
|
||||
def __call__(self, x, update=True):
|
||||
return np.append(x, self.count/100.0)
|
||||
def output_shape(self, input_space):
|
||||
return (input_space.shape[0]+1,)
|
||||
|
||||
class FlattenFilter(Filter):
|
||||
def __call__(self, x, update=True):
|
||||
return x.ravel()
|
||||
def output_shape(self, input_space):
|
||||
return (int(np.prod(input_space.shape)),)
|
||||
|
||||
class Ind2OneHotFilter(Filter):
|
||||
def __init__(self, n):
|
||||
self.n = n
|
||||
def __call__(self, x, update=True):
|
||||
out = np.zeros(self.n)
|
||||
out[x] = 1
|
||||
return out
|
||||
def output_shape(self, input_space):
|
||||
return (input_space.n,)
|
||||
|
||||
class DivFilter(Filter):
|
||||
def __init__(self, divisor):
|
||||
self.divisor = divisor
|
||||
def __call__(self, x, update=True):
|
||||
return x / self.divisor
|
||||
def output_shape(self, input_space):
|
||||
return input_space.shape
|
||||
|
||||
class StackFilter(Filter):
|
||||
def __init__(self, length):
|
||||
self.stack = deque(maxlen=length)
|
||||
def reset(self):
|
||||
self.stack.clear()
|
||||
def __call__(self, x, update=True):
|
||||
self.stack.append(x)
|
||||
while len(self.stack) < self.stack.maxlen:
|
||||
self.stack.append(x)
|
||||
return np.concatenate(self.stack, axis=-1)
|
||||
def output_shape(self, input_space):
|
||||
return input_space.shape[:-1] + (input_space.shape[-1] * self.stack.maxlen,)
|
@@ -1,46 +0,0 @@
|
||||
import numpy as np
|
||||
|
||||
# http://www.johndcook.com/blog/standard_deviation/
|
||||
class RunningStat(object):
|
||||
def __init__(self, shape):
|
||||
self._n = 0
|
||||
self._M = np.zeros(shape)
|
||||
self._S = np.zeros(shape)
|
||||
def push(self, x):
|
||||
x = np.asarray(x)
|
||||
assert x.shape == self._M.shape
|
||||
self._n += 1
|
||||
if self._n == 1:
|
||||
self._M[...] = x
|
||||
else:
|
||||
oldM = self._M.copy()
|
||||
self._M[...] = oldM + (x - oldM)/self._n
|
||||
self._S[...] = self._S + (x - oldM)*(x - self._M)
|
||||
@property
|
||||
def n(self):
|
||||
return self._n
|
||||
@property
|
||||
def mean(self):
|
||||
return self._M
|
||||
@property
|
||||
def var(self):
|
||||
return self._S/(self._n - 1) if self._n > 1 else np.square(self._M)
|
||||
@property
|
||||
def std(self):
|
||||
return np.sqrt(self.var)
|
||||
@property
|
||||
def shape(self):
|
||||
return self._M.shape
|
||||
|
||||
def test_running_stat():
|
||||
for shp in ((), (3,), (3,4)):
|
||||
li = []
|
||||
rs = RunningStat(shp)
|
||||
for _ in range(5):
|
||||
val = np.random.randn(*shp)
|
||||
rs.push(val)
|
||||
li.append(val)
|
||||
m = np.mean(li, axis=0)
|
||||
assert np.allclose(rs.mean, m)
|
||||
v = np.square(m) if (len(li) == 1) else np.var(li, ddof=1, axis=0)
|
||||
assert np.allclose(rs.var, v)
|
@@ -13,6 +13,7 @@ common_kwargs = dict(
|
||||
|
||||
learn_kwargs = {
|
||||
'a2c' : dict(nsteps=32, value_network='copy', lr=0.05),
|
||||
'acer': dict(value_network='copy'),
|
||||
'acktr': dict(nsteps=32, value_network='copy', is_async=False),
|
||||
'deepq': dict(total_timesteps=20000),
|
||||
'ppo2': dict(value_network='copy'),
|
||||
@@ -40,4 +41,4 @@ def test_cartpole(alg):
|
||||
reward_per_episode_test(env_fn, learn_fn, 100)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_cartpole('deepq')
|
||||
test_cartpole('acer')
|
||||
|
@@ -20,8 +20,8 @@ learn_kwargs = {
|
||||
}
|
||||
|
||||
|
||||
algos_disc = ['a2c', 'deepq', 'ppo2', 'trpo_mpi']
|
||||
algos_cont = ['a2c', 'ddpg', 'ppo2', 'trpo_mpi']
|
||||
algos_disc = ['a2c', 'acktr', 'deepq', 'ppo2', 'trpo_mpi']
|
||||
algos_cont = ['a2c', 'acktr', 'ddpg', 'ppo2', 'trpo_mpi']
|
||||
|
||||
@pytest.mark.slow
|
||||
@pytest.mark.parametrize("alg", algos_disc)
|
||||
|
@@ -17,8 +17,7 @@ common_kwargs = {
|
||||
|
||||
learn_args = {
|
||||
'a2c': dict(total_timesteps=50000),
|
||||
# TODO need to resolve inference (step) API differences for acer; also slow
|
||||
# 'acer': dict(seed=0, total_timesteps=1000),
|
||||
'acer': dict(total_timesteps=20000),
|
||||
'deepq': dict(total_timesteps=5000),
|
||||
'acktr': dict(total_timesteps=30000),
|
||||
'ppo2': dict(total_timesteps=50000, lr=1e-3, nsteps=128, ent_coef=0.0),
|
||||
@@ -47,4 +46,4 @@ def test_mnist(alg):
|
||||
simple_test(env_fn, learn_fn, 0.6)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test_mnist('deepq')
|
||||
test_mnist('acer')
|
||||
|
@@ -17,6 +17,7 @@ learn_kwargs = {
|
||||
'deepq': {},
|
||||
'a2c': {},
|
||||
'acktr': {},
|
||||
'acer': {},
|
||||
'ppo2': {'nminibatches': 1, 'nsteps': 10},
|
||||
'trpo_mpi': {},
|
||||
}
|
||||
@@ -37,7 +38,7 @@ def test_serialization(learn_fn, network_fn):
|
||||
'''
|
||||
|
||||
|
||||
if network_fn.endswith('lstm') and learn_fn in ['acktr', 'trpo_mpi', 'deepq']:
|
||||
if network_fn.endswith('lstm') and learn_fn in ['acer', 'acktr', 'trpo_mpi', 'deepq']:
|
||||
# TODO make acktr work with recurrent policies
|
||||
# and test
|
||||
# github issue: https://github.com/openai/baselines/issues/660
|
||||
|
@@ -10,11 +10,11 @@ from baselines.ddpg.noise import AdaptiveParamNoiseSpec, NormalActionNoise, Orns
|
||||
|
||||
import baselines.common.tf_util as U
|
||||
|
||||
from baselines import logger
|
||||
from baselines import logger, registry
|
||||
import numpy as np
|
||||
from mpi4py import MPI
|
||||
|
||||
|
||||
@registry.register('ddpg')
|
||||
def learn(network, env,
|
||||
seed=None,
|
||||
total_timesteps=None,
|
||||
|
@@ -8,7 +8,7 @@ import numpy as np
|
||||
|
||||
import baselines.common.tf_util as U
|
||||
from baselines.common.tf_util import load_variables, save_variables
|
||||
from baselines import logger
|
||||
from baselines import logger, registry
|
||||
from baselines.common.schedules import LinearSchedule
|
||||
from baselines.common import set_global_seeds
|
||||
|
||||
@@ -18,6 +18,7 @@ from baselines.deepq.utils import ObservationInput
|
||||
|
||||
from baselines.common.tf_util import get_session
|
||||
from baselines.deepq.models import build_q_func
|
||||
from baselines.deepq.defaults import defaults
|
||||
|
||||
|
||||
class ActWrapper(object):
|
||||
@@ -92,6 +93,7 @@ def load_act(path):
|
||||
return ActWrapper.load_act(path)
|
||||
|
||||
|
||||
@registry.register('deepq', supports_vecenvs=False, defaults=defaults)
|
||||
def learn(env,
|
||||
network,
|
||||
seed=None,
|
||||
@@ -124,16 +126,12 @@ def learn(env,
|
||||
-------
|
||||
env: gym.Env
|
||||
environment to train on
|
||||
q_func: (tf.Variable, int, str, bool) -> tf.Variable
|
||||
the model that takes the following inputs:
|
||||
observation_in: object
|
||||
the output of observation placeholder
|
||||
num_actions: int
|
||||
number of actions
|
||||
scope: str
|
||||
reuse: bool
|
||||
should be passed to outer variable scope
|
||||
and returns a tensor of shape (batch_size, num_actions) with values of every action.
|
||||
network: string or a function
|
||||
neural network to use as a q function approximator. If string, has to be one of the names of registered models in baselines.common.models
|
||||
(mlp, cnn, conv_only). If a function, should take an observation tensor and return a latent variable tensor, which
|
||||
will be mapped to the Q function heads (see build_q_func in baselines.deepq.models for details on that)
|
||||
seed: int or None
|
||||
prng seed. The runs with the same seed "should" give the same results. If None, no seeding is used.
|
||||
lr: float
|
||||
learning rate for adam optimizer
|
||||
total_timesteps: int
|
||||
|
@@ -16,6 +16,8 @@ def atari():
|
||||
dueling=True
|
||||
)
|
||||
|
||||
def retro():
|
||||
return atari()
|
||||
|
||||
defaults = {
|
||||
'atari': atari()
|
||||
'retro': atari()
|
||||
}
|
||||
|
@@ -30,3 +30,51 @@ python -m baselines.her.experiment.train --num_cpu 19
|
||||
This will require a machine with sufficient amount of physical CPU cores. In our experiments,
|
||||
we used [Azure's D15v2 instances](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/sizes),
|
||||
which have 20 physical cores. We only scheduled the experiment on 19 of those to leave some head-room on the system.
|
||||
|
||||
|
||||
## Hindsight Experience Replay with Demonstrations
|
||||
Using pre-recorded demonstrations to Overcome the exploration problem in HER based Reinforcement learning.
|
||||
For details, please read the [paper](https://arxiv.org/pdf/1709.10089.pdf).
|
||||
|
||||
### Getting started
|
||||
The first step is to generate the demonstration dataset. This can be done in two ways, either by using a VR system to manipulate the arm using physical VR trackers or the simpler way is to write a script to carry out the respective task. Now some tasks can be complex and thus it would be difficult to write a hardcoded script for that task (eg. Fetch Push), but here our focus is on providing an algorithm that helps the agent to learn from demonstrations, and not on the demonstration generation paradigm itself. Thus the data collection part is left to the reader's choice.
|
||||
|
||||
We provide a script for the Fetch Pick and Place task, to generate demonstrations for the Pick and Place task execute:
|
||||
```bash
|
||||
python experiment/data_generation/fetch_data_generation.py
|
||||
```
|
||||
This outputs ```data_fetch_random_100.npz``` file which is our data file.
|
||||
|
||||
#### Configuration
|
||||
The provided configuration is for training an agent with HER without demonstrations, we need to change a few paramters for the HER algorithm to learn through demonstrations, to do that, set:
|
||||
|
||||
* bc_loss: 1 - whether or not to use the behavior cloning loss as an auxilliary loss
|
||||
* q_filter: 1 - whether or not a Q value filter should be used on the Actor outputs
|
||||
* num_demo: 100 - number of expert demo episodes
|
||||
* demo_batch_size: 128 - number of samples to be used from the demonstrations buffer, per mpi thread
|
||||
* prm_loss_weight: 0.001 - Weight corresponding to the primary loss
|
||||
* aux_loss_weight: 0.0078 - Weight corresponding to the auxilliary loss also called the cloning loss
|
||||
|
||||
Apart from these changes the reported results also have the following configurational changes:
|
||||
|
||||
* n_cycles: 20 - per epoch
|
||||
* batch_size: 1024 - per mpi thread, total batch size
|
||||
* random_eps: 0.1 - percentage of time a random action is taken
|
||||
* noise_eps: 0.1 - std of gaussian noise added to not-completely-random actions
|
||||
|
||||
Now training an agent with pre-recorded demonstrations:
|
||||
```bash
|
||||
python -m baselines.her.experiment.train --env=FetchPickAndPlace-v0 --n_epochs=1000 --demo_file=/Path/to/demo_file.npz --num_cpu=1
|
||||
```
|
||||
|
||||
This will train a DDPG+HER agent on the `FetchPickAndPlace` environment by using previously generated demonstration data.
|
||||
To inspect what the agent has learned, use the play script as described above.
|
||||
|
||||
### Results
|
||||
Training with demonstrations helps overcome the exploration problem and achieves a faster and better convergence. The following graphs contrast the difference between training with and without demonstration data, We report the mean Q values vs Epoch and the Success Rate vs Epoch:
|
||||
|
||||
|
||||
<div class="imgcap" align="middle">
|
||||
<center><img src="../../data/fetchPickAndPlaceContrast.png"></center>
|
||||
<div class="thecap" align="middle"><b>Training results for Fetch Pick and Place task constrasting between training with and without demonstration data.</b></div>
|
||||
</div>
|
||||
|
@@ -6,7 +6,7 @@ from tensorflow.contrib.staging import StagingArea
|
||||
|
||||
from baselines import logger
|
||||
from baselines.her.util import (
|
||||
import_function, store_args, flatten_grads, transitions_in_episode_batch)
|
||||
import_function, store_args, flatten_grads, transitions_in_episode_batch, convert_episode_to_batch_major)
|
||||
from baselines.her.normalizer import Normalizer
|
||||
from baselines.her.replay_buffer import ReplayBuffer
|
||||
from baselines.common.mpi_adam import MpiAdam
|
||||
@@ -16,13 +16,17 @@ def dims_to_shapes(input_dims):
|
||||
return {key: tuple([val]) if val > 0 else tuple() for key, val in input_dims.items()}
|
||||
|
||||
|
||||
global demoBuffer #buffer for demonstrations
|
||||
|
||||
class DDPG(object):
|
||||
@store_args
|
||||
def __init__(self, input_dims, buffer_size, hidden, layers, network_class, polyak, batch_size,
|
||||
Q_lr, pi_lr, norm_eps, norm_clip, max_u, action_l2, clip_obs, scope, T,
|
||||
rollout_batch_size, subtract_goals, relative_goals, clip_pos_returns, clip_return,
|
||||
bc_loss, q_filter, num_demo, demo_batch_size, prm_loss_weight, aux_loss_weight,
|
||||
sample_transitions, gamma, reuse=False, **kwargs):
|
||||
"""Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).
|
||||
Added functionality to use demonstrations for training to Overcome exploration problem.
|
||||
|
||||
Args:
|
||||
input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the
|
||||
@@ -50,6 +54,12 @@ class DDPG(object):
|
||||
sample_transitions (function) function that samples from the replay buffer
|
||||
gamma (float): gamma used for Q learning updates
|
||||
reuse (boolean): whether or not the networks should be reused
|
||||
bc_loss: whether or not the behavior cloning loss should be used as an auxilliary loss
|
||||
q_filter: whether or not a filter on the q value update should be used when training with demonstartions
|
||||
num_demo: Number of episodes in to be used in the demonstration buffer
|
||||
demo_batch_size: number of samples to be used from the demonstrations buffer, per mpi thread
|
||||
prm_loss_weight: Weight corresponding to the primary loss
|
||||
aux_loss_weight: Weight corresponding to the auxilliary loss also called the cloning loss
|
||||
"""
|
||||
if self.clip_return is None:
|
||||
self.clip_return = np.inf
|
||||
@@ -92,6 +102,9 @@ class DDPG(object):
|
||||
buffer_size = (self.buffer_size // self.rollout_batch_size) * self.rollout_batch_size
|
||||
self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions)
|
||||
|
||||
global demoBuffer
|
||||
demoBuffer = ReplayBuffer(buffer_shapes, buffer_size, self.T, self.sample_transitions) #initialize the demo buffer; in the same way as the primary data buffer
|
||||
|
||||
def _random_action(self, n):
|
||||
return np.random.uniform(low=-self.max_u, high=self.max_u, size=(n, self.dimu))
|
||||
|
||||
@@ -138,6 +151,57 @@ class DDPG(object):
|
||||
else:
|
||||
return ret
|
||||
|
||||
def initDemoBuffer(self, demoDataFile, update_stats=True): #function that initializes the demo buffer
|
||||
|
||||
demoData = np.load(demoDataFile) #load the demonstration data from data file
|
||||
info_keys = [key.replace('info_', '') for key in self.input_dims.keys() if key.startswith('info_')]
|
||||
info_values = [np.empty((self.T, 1, self.input_dims['info_' + key]), np.float32) for key in info_keys]
|
||||
|
||||
for epsd in range(self.num_demo): # we initialize the whole demo buffer at the start of the training
|
||||
obs, acts, goals, achieved_goals = [], [] ,[] ,[]
|
||||
i = 0
|
||||
for transition in range(self.T):
|
||||
obs.append([demoData['obs'][epsd ][transition].get('observation')])
|
||||
acts.append([demoData['acs'][epsd][transition]])
|
||||
goals.append([demoData['obs'][epsd][transition].get('desired_goal')])
|
||||
achieved_goals.append([demoData['obs'][epsd][transition].get('achieved_goal')])
|
||||
for idx, key in enumerate(info_keys):
|
||||
info_values[idx][transition, i] = demoData['info'][epsd][transition][key]
|
||||
|
||||
obs.append([demoData['obs'][epsd][self.T].get('observation')])
|
||||
achieved_goals.append([demoData['obs'][epsd][self.T].get('achieved_goal')])
|
||||
|
||||
episode = dict(o=obs,
|
||||
u=acts,
|
||||
g=goals,
|
||||
ag=achieved_goals)
|
||||
for key, value in zip(info_keys, info_values):
|
||||
episode['info_{}'.format(key)] = value
|
||||
|
||||
episode = convert_episode_to_batch_major(episode)
|
||||
global demoBuffer
|
||||
demoBuffer.store_episode(episode) # create the observation dict and append them into the demonstration buffer
|
||||
|
||||
print("Demo buffer size currently ", demoBuffer.get_current_size()) #print out the demonstration buffer size
|
||||
|
||||
if update_stats:
|
||||
# add transitions to normalizer to normalize the demo data as well
|
||||
episode['o_2'] = episode['o'][:, 1:, :]
|
||||
episode['ag_2'] = episode['ag'][:, 1:, :]
|
||||
num_normalizing_transitions = transitions_in_episode_batch(episode)
|
||||
transitions = self.sample_transitions(episode, num_normalizing_transitions)
|
||||
|
||||
o, o_2, g, ag = transitions['o'], transitions['o_2'], transitions['g'], transitions['ag']
|
||||
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
|
||||
# No need to preprocess the o_2 and g_2 since this is only used for stats
|
||||
|
||||
self.o_stats.update(transitions['o'])
|
||||
self.g_stats.update(transitions['g'])
|
||||
|
||||
self.o_stats.recompute_stats()
|
||||
self.g_stats.recompute_stats()
|
||||
episode.clear()
|
||||
|
||||
def store_episode(self, episode_batch, update_stats=True):
|
||||
"""
|
||||
episode_batch: array of batch_size x (T or T+1) x dim_key
|
||||
@@ -185,7 +249,18 @@ class DDPG(object):
|
||||
self.pi_adam.update(pi_grad, self.pi_lr)
|
||||
|
||||
def sample_batch(self):
|
||||
transitions = self.buffer.sample(self.batch_size)
|
||||
if self.bc_loss: #use demonstration buffer to sample as well if bc_loss flag is set TRUE
|
||||
transitions = self.buffer.sample(self.batch_size - self.demo_batch_size)
|
||||
global demoBuffer
|
||||
transitionsDemo = demoBuffer.sample(self.demo_batch_size) #sample from the demo buffer
|
||||
for k, values in transitionsDemo.items():
|
||||
rolloutV = transitions[k].tolist()
|
||||
for v in values:
|
||||
rolloutV.append(v.tolist())
|
||||
transitions[k] = np.array(rolloutV)
|
||||
else:
|
||||
transitions = self.buffer.sample(self.batch_size) #otherwise only sample from primary buffer
|
||||
|
||||
o, o_2, g = transitions['o'], transitions['o_2'], transitions['g']
|
||||
ag, ag_2 = transitions['ag'], transitions['ag_2']
|
||||
transitions['o'], transitions['g'] = self._preprocess_og(o, ag, g)
|
||||
@@ -248,6 +323,9 @@ class DDPG(object):
|
||||
for i, key in enumerate(self.stage_shapes.keys())])
|
||||
batch_tf['r'] = tf.reshape(batch_tf['r'], [-1, 1])
|
||||
|
||||
#choose only the demo buffer samples
|
||||
mask = np.concatenate((np.zeros(self.batch_size - self.demo_batch_size), np.ones(self.demo_batch_size)), axis = 0)
|
||||
|
||||
# networks
|
||||
with tf.variable_scope('main') as vs:
|
||||
if reuse:
|
||||
@@ -270,6 +348,25 @@ class DDPG(object):
|
||||
clip_range = (-self.clip_return, 0. if self.clip_pos_returns else np.inf)
|
||||
target_tf = tf.clip_by_value(batch_tf['r'] + self.gamma * target_Q_pi_tf, *clip_range)
|
||||
self.Q_loss_tf = tf.reduce_mean(tf.square(tf.stop_gradient(target_tf) - self.main.Q_tf))
|
||||
|
||||
if self.bc_loss ==1 and self.q_filter == 1 : # train with demonstrations and use bc_loss and q_filter both
|
||||
maskMain = tf.reshape(tf.boolean_mask(self.main.Q_tf > self.main.Q_pi_tf, mask), [-1]) #where is the demonstrator action better than actor action according to the critic? choose those samples only
|
||||
#define the cloning loss on the actor's actions only on the samples which adhere to the above masks
|
||||
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask(tf.boolean_mask((self.main.pi_tf), mask), maskMain, axis=0) - tf.boolean_mask(tf.boolean_mask((batch_tf['u']), mask), maskMain, axis=0)))
|
||||
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf) #primary loss scaled by it's respective weight prm_loss_weight
|
||||
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u)) #L2 loss on action values scaled by the same weight prm_loss_weight
|
||||
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf #adding the cloning loss to the actor loss as an auxilliary loss scaled by its weight aux_loss_weight
|
||||
|
||||
elif self.bc_loss == 1 and self.q_filter == 0: # train with demonstrations without q_filter
|
||||
self.cloning_loss_tf = tf.reduce_sum(tf.square(tf.boolean_mask((self.main.pi_tf), mask) - tf.boolean_mask((batch_tf['u']), mask)))
|
||||
self.pi_loss_tf = -self.prm_loss_weight * tf.reduce_mean(self.main.Q_pi_tf)
|
||||
self.pi_loss_tf += self.prm_loss_weight * self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
|
||||
self.pi_loss_tf += self.aux_loss_weight * self.cloning_loss_tf
|
||||
|
||||
else: #If not training with demonstrations
|
||||
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
|
||||
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
|
||||
|
||||
self.pi_loss_tf = -tf.reduce_mean(self.main.Q_pi_tf)
|
||||
self.pi_loss_tf += self.action_l2 * tf.reduce_mean(tf.square(self.main.pi_tf / self.max_u))
|
||||
Q_grads_tf = tf.gradients(self.Q_loss_tf, self._vars('main/Q'))
|
||||
|
@@ -44,6 +44,13 @@ DEFAULT_PARAMS = {
|
||||
# normalization
|
||||
'norm_eps': 0.01, # epsilon used for observation normalization
|
||||
'norm_clip': 5, # normalized observations are cropped to this values
|
||||
|
||||
'bc_loss': 0, # whether or not to use the behavior cloning loss as an auxilliary loss
|
||||
'q_filter': 0, # whether or not a Q value filter should be used on the Actor outputs
|
||||
'num_demo': 100, # number of expert demo episodes
|
||||
'demo_batch_size': 128, #number of samples to be used from the demonstrations buffer, per mpi thread 128/1024 or 32/256
|
||||
'prm_loss_weight': 0.001, #Weight corresponding to the primary loss
|
||||
'aux_loss_weight': 0.0078, #Weight corresponding to the auxilliary loss also called the cloning loss
|
||||
}
|
||||
|
||||
|
||||
@@ -145,6 +152,12 @@ def configure_ddpg(dims, params, reuse=False, use_mpi=True, clip_return=True):
|
||||
'subtract_goals': simple_goal_subtract,
|
||||
'sample_transitions': sample_her_transitions,
|
||||
'gamma': gamma,
|
||||
'bc_loss': params['bc_loss'],
|
||||
'q_filter': params['q_filter'],
|
||||
'num_demo': params['num_demo'],
|
||||
'demo_batch_size': params['demo_batch_size'],
|
||||
'prm_loss_weight': params['prm_loss_weight'],
|
||||
'aux_loss_weight': params['aux_loss_weight'],
|
||||
})
|
||||
ddpg_params['info'] = {
|
||||
'env_name': params['env_name'],
|
||||
|
@@ -0,0 +1,149 @@
|
||||
import gym
|
||||
import time
|
||||
import random
|
||||
import numpy as np
|
||||
import rospy
|
||||
import roslaunch
|
||||
|
||||
from random import randint
|
||||
from std_srvs.srv import Empty
|
||||
from sensor_msgs.msg import JointState
|
||||
from geometry_msgs.msg import PoseStamped
|
||||
from geometry_msgs.msg import Pose
|
||||
from std_msgs.msg import Float64
|
||||
from controller_manager_msgs.srv import SwitchController
|
||||
from gym.utils import seeding
|
||||
|
||||
|
||||
"""Data generation for the case of a single block pick and place in Fetch Env"""
|
||||
|
||||
actions = []
|
||||
observations = []
|
||||
infos = []
|
||||
|
||||
def main():
|
||||
env = gym.make('FetchPickAndPlace-v0')
|
||||
numItr = 100
|
||||
initStateSpace = "random"
|
||||
env.reset()
|
||||
print("Reset!")
|
||||
while len(actions) < numItr:
|
||||
obs = env.reset()
|
||||
print("ITERATION NUMBER ", len(actions))
|
||||
goToGoal(env, obs)
|
||||
|
||||
|
||||
fileName = "data_fetch"
|
||||
fileName += "_" + initStateSpace
|
||||
fileName += "_" + str(numItr)
|
||||
fileName += ".npz"
|
||||
|
||||
np.savez_compressed(fileName, acs=actions, obs=observations, info=infos) # save the file
|
||||
|
||||
def goToGoal(env, lastObs):
|
||||
|
||||
goal = lastObs['desired_goal']
|
||||
objectPos = lastObs['observation'][3:6]
|
||||
gripperPos = lastObs['observation'][:3]
|
||||
gripperState = lastObs['observation'][9:11]
|
||||
object_rel_pos = lastObs['observation'][6:9]
|
||||
episodeAcs = []
|
||||
episodeObs = []
|
||||
episodeInfo = []
|
||||
|
||||
object_oriented_goal = object_rel_pos.copy()
|
||||
object_oriented_goal[2] += 0.03 # first make the gripper go slightly above the object
|
||||
|
||||
timeStep = 0 #count the total number of timesteps
|
||||
episodeObs.append(lastObs)
|
||||
|
||||
while np.linalg.norm(object_oriented_goal) >= 0.005 and timeStep <= env._max_episode_steps:
|
||||
env.render()
|
||||
action = [0, 0, 0, 0]
|
||||
object_oriented_goal = object_rel_pos.copy()
|
||||
object_oriented_goal[2] += 0.03
|
||||
|
||||
for i in range(len(object_oriented_goal)):
|
||||
action[i] = object_oriented_goal[i]*6
|
||||
|
||||
action[len(action)-1] = 0.05 #open
|
||||
|
||||
obsDataNew, reward, done, info = env.step(action)
|
||||
timeStep += 1
|
||||
|
||||
episodeAcs.append(action)
|
||||
episodeInfo.append(info)
|
||||
episodeObs.append(obsDataNew)
|
||||
|
||||
objectPos = obsDataNew['observation'][3:6]
|
||||
gripperPos = obsDataNew['observation'][:3]
|
||||
gripperState = obsDataNew['observation'][9:11]
|
||||
object_rel_pos = obsDataNew['observation'][6:9]
|
||||
|
||||
while np.linalg.norm(object_rel_pos) >= 0.005 and timeStep <= env._max_episode_steps :
|
||||
env.render()
|
||||
action = [0, 0, 0, 0]
|
||||
for i in range(len(object_rel_pos)):
|
||||
action[i] = object_rel_pos[i]*6
|
||||
|
||||
action[len(action)-1] = -0.005
|
||||
|
||||
obsDataNew, reward, done, info = env.step(action)
|
||||
timeStep += 1
|
||||
|
||||
episodeAcs.append(action)
|
||||
episodeInfo.append(info)
|
||||
episodeObs.append(obsDataNew)
|
||||
|
||||
objectPos = obsDataNew['observation'][3:6]
|
||||
gripperPos = obsDataNew['observation'][:3]
|
||||
gripperState = obsDataNew['observation'][9:11]
|
||||
object_rel_pos = obsDataNew['observation'][6:9]
|
||||
|
||||
|
||||
while np.linalg.norm(goal - objectPos) >= 0.01 and timeStep <= env._max_episode_steps :
|
||||
env.render()
|
||||
action = [0, 0, 0, 0]
|
||||
for i in range(len(goal - objectPos)):
|
||||
action[i] = (goal - objectPos)[i]*6
|
||||
|
||||
action[len(action)-1] = -0.005
|
||||
|
||||
obsDataNew, reward, done, info = env.step(action)
|
||||
timeStep += 1
|
||||
|
||||
episodeAcs.append(action)
|
||||
episodeInfo.append(info)
|
||||
episodeObs.append(obsDataNew)
|
||||
|
||||
objectPos = obsDataNew['observation'][3:6]
|
||||
gripperPos = obsDataNew['observation'][:3]
|
||||
gripperState = obsDataNew['observation'][9:11]
|
||||
object_rel_pos = obsDataNew['observation'][6:9]
|
||||
|
||||
while True: #limit the number of timesteps in the episode to a fixed duration
|
||||
env.render()
|
||||
action = [0, 0, 0, 0]
|
||||
action[len(action)-1] = -0.005 # keep the gripper closed
|
||||
|
||||
obsDataNew, reward, done, info = env.step(action)
|
||||
timeStep += 1
|
||||
|
||||
episodeAcs.append(action)
|
||||
episodeInfo.append(info)
|
||||
episodeObs.append(obsDataNew)
|
||||
|
||||
objectPos = obsDataNew['observation'][3:6]
|
||||
gripperPos = obsDataNew['observation'][:3]
|
||||
gripperState = obsDataNew['observation'][9:11]
|
||||
object_rel_pos = obsDataNew['observation'][6:9]
|
||||
|
||||
if timeStep >= env._max_episode_steps: break
|
||||
|
||||
actions.append(episodeAcs)
|
||||
observations.append(episodeObs)
|
||||
infos.append(episodeInfo)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@@ -26,7 +26,7 @@ def mpi_average(value):
|
||||
|
||||
def train(policy, rollout_worker, evaluator,
|
||||
n_epochs, n_test_rollouts, n_cycles, n_batches, policy_save_interval,
|
||||
save_policies, **kwargs):
|
||||
save_policies, demo_file, **kwargs):
|
||||
rank = MPI.COMM_WORLD.Get_rank()
|
||||
|
||||
latest_policy_path = os.path.join(logger.get_dir(), 'policy_latest.pkl')
|
||||
@@ -35,6 +35,8 @@ def train(policy, rollout_worker, evaluator,
|
||||
|
||||
logger.info("Training...")
|
||||
best_success_rate = -1
|
||||
|
||||
if policy.bc_loss == 1: policy.initDemoBuffer(demo_file) #initialize demo buffer if training with demonstrations
|
||||
for epoch in range(n_epochs):
|
||||
# train
|
||||
rollout_worker.clear_history()
|
||||
@@ -84,7 +86,7 @@ def train(policy, rollout_worker, evaluator,
|
||||
|
||||
def launch(
|
||||
env, logdir, n_epochs, num_cpu, seed, replay_strategy, policy_save_interval, clip_return,
|
||||
override_params={}, save_policies=True
|
||||
demo_file, override_params={}, save_policies=True
|
||||
):
|
||||
# Fork for multi-CPU MPI implementation.
|
||||
if num_cpu > 1:
|
||||
@@ -171,7 +173,7 @@ def launch(
|
||||
logdir=logdir, policy=policy, rollout_worker=rollout_worker,
|
||||
evaluator=evaluator, n_epochs=n_epochs, n_test_rollouts=params['n_test_rollouts'],
|
||||
n_cycles=params['n_cycles'], n_batches=params['n_batches'],
|
||||
policy_save_interval=policy_save_interval, save_policies=save_policies)
|
||||
policy_save_interval=policy_save_interval, save_policies=save_policies, demo_file=demo_file)
|
||||
|
||||
|
||||
@click.command()
|
||||
@@ -183,6 +185,7 @@ def launch(
|
||||
@click.option('--policy_save_interval', type=int, default=5, help='the interval with which policy pickles are saved. If set to 0, only the best and latest policy will be pickled.')
|
||||
@click.option('--replay_strategy', type=click.Choice(['future', 'none']), default='future', help='the HER replay strategy to be used. "future" uses HER, "none" disables HER.')
|
||||
@click.option('--clip_return', type=int, default=1, help='whether or not returns should be clipped')
|
||||
@click.option('--demo_file', type=str, default = 'PATH/TO/DEMO/DATA/FILE.npz', help='demo data file path')
|
||||
def main(**kwargs):
|
||||
launch(**kwargs)
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
def mujoco():
|
||||
return dict(
|
||||
defaults = {
|
||||
'mujoco': dict(
|
||||
nsteps=2048,
|
||||
nminibatches=32,
|
||||
lam=0.95,
|
||||
@@ -10,13 +10,13 @@ def mujoco():
|
||||
lr=lambda f: 3e-4 * f,
|
||||
cliprange=0.2,
|
||||
value_network='copy'
|
||||
)
|
||||
),
|
||||
|
||||
def atari():
|
||||
return dict(
|
||||
'atari': dict(
|
||||
nsteps=128, nminibatches=4,
|
||||
lam=0.95, gamma=0.99, noptepochs=4, log_interval=1,
|
||||
ent_coef=.01,
|
||||
lr=lambda f : f * 2.5e-4,
|
||||
cliprange=lambda f : f * 0.1,
|
||||
)
|
||||
}
|
||||
|
@@ -4,7 +4,7 @@ import functools
|
||||
import numpy as np
|
||||
import os.path as osp
|
||||
import tensorflow as tf
|
||||
from baselines import logger
|
||||
from baselines import logger, registry
|
||||
from collections import deque
|
||||
from baselines.common import explained_variance, set_global_seeds
|
||||
from baselines.common.policies import build_policy
|
||||
@@ -15,6 +15,7 @@ from baselines.common.mpi_adam_optimizer import MpiAdamOptimizer
|
||||
from mpi4py import MPI
|
||||
from baselines.common.tf_util import initialize
|
||||
from baselines.common.mpi_util import sync_from_root
|
||||
from baselines.ppo2.defaults import defaults
|
||||
|
||||
class Model(object):
|
||||
"""
|
||||
@@ -218,6 +219,7 @@ def constfn(val):
|
||||
return val
|
||||
return f
|
||||
|
||||
@registry.register('ppo2', defaults=defaults)
|
||||
def learn(*, network, env, total_timesteps, eval_env = None, seed=None, nsteps=2048, ent_coef=0.0, lr=3e-4,
|
||||
vf_coef=0.5, max_grad_norm=0.5, gamma=0.99, lam=0.95,
|
||||
log_interval=10, nminibatches=4, noptepochs=4, cliprange=0.2,
|
||||
|
22
baselines/registry.py
Normal file
22
baselines/registry.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from baselines import logger
|
||||
registry = {}
|
||||
|
||||
def register(name, supports_vecenv=True, defaults={}, **kwargs):
|
||||
def get_fn_entrypoint(fn):
|
||||
import inspect
|
||||
return '.'.join([inspect.getmodule(fn).__name__, fn.__name__])
|
||||
|
||||
def _thunk(learn_fn):
|
||||
old_entry = registry.get(name)
|
||||
if old_entry is not None:
|
||||
logger.warn('Re-registering learn function {} (old entrypoint {}, new entrypoint {}) '.format(
|
||||
name, get_fn_entrypoint(old_entry['fn']), get_fn_entrypoint(learn_fn)))
|
||||
|
||||
registry[name] = dict(
|
||||
fn = learn_fn,
|
||||
supports_vecenv=supports_vecenv,
|
||||
defaults=defaults,
|
||||
**kwargs
|
||||
)
|
||||
return learn_fn
|
||||
return _thunk
|
@@ -3,17 +3,12 @@ import multiprocessing
|
||||
import os.path as osp
|
||||
import gym
|
||||
from collections import defaultdict
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
|
||||
from baselines.common.vec_env.vec_frame_stack import VecFrameStack
|
||||
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env
|
||||
from baselines.common.tf_util import get_session
|
||||
from baselines import bench, logger
|
||||
from importlib import import_module
|
||||
|
||||
from baselines.common.vec_env.vec_normalize import VecNormalize
|
||||
from baselines.common import atari_wrappers, retro_wrappers
|
||||
from baselines.common.cmd_util import common_arg_parser, parse_unknown_args, make_vec_env, env_thunk
|
||||
from baselines import logger
|
||||
from baselines.registry import registry
|
||||
|
||||
try:
|
||||
from mpi4py import MPI
|
||||
@@ -87,50 +82,23 @@ def build_env(args):
|
||||
if sys.platform == 'darwin': ncpu //= 2
|
||||
nenv = args.num_env or ncpu
|
||||
alg = args.alg
|
||||
rank = MPI.COMM_WORLD.Get_rank() if MPI else 0
|
||||
seed = args.seed
|
||||
|
||||
env_type, env_id = get_env_type(args.env)
|
||||
assert alg in registry, 'Unknown algorithm {}'.format(alg)
|
||||
|
||||
if env_type == 'atari':
|
||||
if alg == 'acer':
|
||||
env = make_vec_env(env_id, env_type, nenv, seed)
|
||||
elif alg == 'deepq':
|
||||
env = atari_wrappers.make_atari(env_id)
|
||||
env.seed(seed)
|
||||
env = bench.Monitor(env, logger.get_dir())
|
||||
env = atari_wrappers.wrap_deepmind(env, frame_stack=True)
|
||||
elif alg == 'trpo_mpi':
|
||||
env = atari_wrappers.make_atari(env_id)
|
||||
env.seed(seed)
|
||||
env = bench.Monitor(env, logger.get_dir() and osp.join(logger.get_dir(), str(rank)))
|
||||
env = atari_wrappers.wrap_deepmind(env)
|
||||
# TODO check if the second seeding is necessary, and eventually remove
|
||||
env.seed(seed)
|
||||
else:
|
||||
frame_stack_size = 4
|
||||
env = VecFrameStack(make_vec_env(env_id, env_type, nenv, seed), frame_stack_size)
|
||||
|
||||
elif env_type == 'retro':
|
||||
import retro
|
||||
gamestate = args.gamestate or retro.State.DEFAULT
|
||||
env = retro_wrappers.make_retro(game=args.env, state=gamestate, max_episode_steps=10000,
|
||||
use_restricted_actions=retro.Actions.DISCRETE)
|
||||
env.seed(args.seed)
|
||||
env = bench.Monitor(env, logger.get_dir())
|
||||
env = retro_wrappers.wrap_deepmind_retro(env)
|
||||
|
||||
if env_type in {'atari', 'retro'}:
|
||||
frame_stack_size = 4
|
||||
else:
|
||||
config = tf.ConfigProto(allow_soft_placement=True,
|
||||
intra_op_parallelism_threads=1,
|
||||
inter_op_parallelism_threads=1)
|
||||
config.gpu_options.allow_growth = True
|
||||
get_session(config=config)
|
||||
frame_stack_size = 1
|
||||
|
||||
env = make_vec_env(env_id, env_type, args.num_env or 1, seed, reward_scale=args.reward_scale)
|
||||
if registry[alg]['supports_vecenv']:
|
||||
env = make_vec_env(env_id, env_type, nenv, seed, gamestate=args.gamestate, reward_scale=args.reward_scale, frame_stack_size=frame_stack_size)
|
||||
else:
|
||||
env = env_thunk(env_id, env_type, seed=seed, wrapper_kwargs={'frame_stack': frame_stack_size > 1})
|
||||
|
||||
if env_type == 'mujoco':
|
||||
env = VecNormalize(env)
|
||||
if env_type == 'mujoco' and registry[alg]['supports_vecenv']:
|
||||
env = VecNormalize(env)
|
||||
|
||||
return env
|
||||
|
||||
@@ -157,29 +125,27 @@ def get_default_network(env_type):
|
||||
return 'mlp'
|
||||
|
||||
def get_alg_module(alg, submodule=None):
|
||||
submodule = submodule or alg
|
||||
try:
|
||||
# first try to import the alg module from baselines
|
||||
alg_module = import_module('.'.join(['baselines', alg, submodule]))
|
||||
except ImportError:
|
||||
# then from rl_algs
|
||||
alg_module = import_module('.'.join(['rl_' + 'algs', alg, submodule]))
|
||||
import inspect
|
||||
entry = registry.get(alg)
|
||||
assert entry is not None, 'Unregistered algorithm {}'.format(alg)
|
||||
module = inspect.getmodule(entry['fn']).__name__
|
||||
if submodule is not None:
|
||||
module = '.'.join([module, submodule])
|
||||
return module
|
||||
|
||||
return alg_module
|
||||
|
||||
|
||||
def get_learn_function(alg):
|
||||
return get_alg_module(alg).learn
|
||||
entry = registry.get(alg)
|
||||
assert entry is not None, 'Unregistered algorithm {}'.format(alg)
|
||||
return entry['fn']
|
||||
|
||||
|
||||
def get_learn_function_defaults(alg, env_type):
|
||||
try:
|
||||
alg_defaults = get_alg_module(alg, 'defaults')
|
||||
kwargs = getattr(alg_defaults, env_type)()
|
||||
except (ImportError, AttributeError):
|
||||
kwargs = {}
|
||||
return kwargs
|
||||
|
||||
entry = registry.get(alg)
|
||||
assert entry is not None, 'Unregistered algorithm {}'.format(alg)
|
||||
return entry['defaults'].get(env_type, {})
|
||||
|
||||
|
||||
|
||||
def parse_cmdline_kwargs(args):
|
||||
@@ -213,6 +179,7 @@ def main():
|
||||
rank = MPI.COMM_WORLD.Get_rank()
|
||||
|
||||
model, env = train(args, extra_args)
|
||||
|
||||
env.close()
|
||||
|
||||
if args.save_path is not None and rank == 0:
|
||||
|
@@ -28,3 +28,9 @@ def mujoco():
|
||||
vf_stepsize=1e-3,
|
||||
normalize_observations=True,
|
||||
)
|
||||
|
||||
defaults = {
|
||||
'atari': atari(),
|
||||
'mujoco': mujoco(),
|
||||
}
|
||||
|
||||
|
@@ -1,5 +1,5 @@
|
||||
from baselines.common import explained_variance, zipsame, dataset
|
||||
from baselines import logger
|
||||
from baselines import logger, registry
|
||||
import baselines.common.tf_util as U
|
||||
import tensorflow as tf, numpy as np
|
||||
import time
|
||||
@@ -13,6 +13,8 @@ from baselines.common.input import observation_placeholder
|
||||
from baselines.common.policies import build_policy
|
||||
from contextlib import contextmanager
|
||||
|
||||
from baselines.trpo_mpi.defaults import defaults
|
||||
|
||||
def traj_segment_generator(pi, env, horizon, stochastic):
|
||||
# Initialize state variables
|
||||
t = 0
|
||||
@@ -82,6 +84,7 @@ def add_vtarg_and_adv(seg, gamma, lam):
|
||||
gaelam[t] = lastgaelam = delta + gamma * lam * nonterminal * lastgaelam
|
||||
seg["tdlamret"] = seg["adv"] + seg["vpred"]
|
||||
|
||||
@registry.register('trpo_mpi', supports_vecenvs=False, defaults=defaults)
|
||||
def learn(*,
|
||||
network,
|
||||
env,
|
||||
|
BIN
data/fetchPickAndPlaceContrast.png
Normal file
BIN
data/fetchPickAndPlaceContrast.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 68 KiB |
Reference in New Issue
Block a user