* joshim5 changes (width and height to WarpFrame wrapper) * match network output with action distribution via a linear layer only if necessary (#167) * support color vs. grayscale option in WarpFrame wrapper (#166) * support color vs. grayscale option in WarpFrame wrapper * Support color in other wrappers * Updated per Peters suggestions * fixing test failures * ppo2 with microbatches (#168) * pass microbatch_size to the model during construction * microbatch fixes and test (#169) * microbatch fixes and test * tiny cleanup * added assertions to the test * vpg-related fix * Peterz joshim5 subclass ppo2 model (#170) * microbatch fixes and test * tiny cleanup * added assertions to the test * vpg-related fix * subclassing the model to make microbatched version of model WIP * made microbatched model a subclass of ppo2 Model * flake8 complaint * mpi-less ppo2 (resolving merge conflict) * flake8 and mpi4py imports in ppo2/model.py * more un-mpying * merge master * updates to the benchmark viewer code + autopep8 (#184) * viz docs and syntactic sugar wip * update viewer yaml to use persistent volume claims * move plot_util to baselines.common, update links * use 1Tb hard drive for results viewer * small updates to benchmark vizualizer code * autopep8 * autopep8 * any folder can be a benchmark * massage games image a little bit * fixed --preload option in app.py * remove preload from run_viewer.sh * remove pdb breakpoints * update bench-viewer.yaml * fixed bug (#185) * fixed bug it's wrong to do the else statement, because no other nodes would start. * changed the fix slightly * Refactor her phase 1 (#194) * add monitor to the rollout envs in her RUN BENCHMARKS her * Slice -> Slide in her benchmarks RUN BENCHMARKS her * run her benchmark for 200 epochs * dummy commit to RUN BENCHMARKS her * her benchmark for 500 epochs RUN BENCHMARKS her * add num_timesteps to her benchmark to be compatible with viewer RUN BENCHMARKS her * add num_timesteps to her benchmark to be compatible with viewer RUN BENCHMARKS her * add num_timesteps to her benchmark to be compatible with viewer RUN BENCHMARKS her * disable saving of policies in her benchmark RUN BENCHMARKS her * run fetch benchmarks with ppo2 and ddpg RUN BENCHMARKS Fetch * run fetch benchmarks with ppo2 and ddpg RUN BENCHMARKS Fetch * launcher refactor wip * wip * her works on FetchReach * her runner refactor RUN BENCHMARKS Fetch1M * unit test for her * fixing warnings in mpi_average in her, skip test_fetchreach if mujoco is not present * pickle-based serialization in her * remove extra import from subproc_vec_env.py * investigating differences in rollout.py * try with old rollout code RUN BENCHMARKS her * temporarily use DummyVecEnv in cmd_util.py RUN BENCHMARKS her * dummy commit to RUN BENCHMARKS her * set info_values in rollout worker in her RUN BENCHMARKS her * bug in rollout_new.py RUN BENCHMARKS her * fixed bug in rollout_new.py RUN BENCHMARKS her * do not use last step because vecenv calls reset and returns obs after reset RUN BENCHMARKS her * updated buffer sizes RUN BENCHMARKS her * fixed loading/saving via joblib * dust off learning from demonstrations in HER, docs, refactor * add deprecation notice on her play and plot files * address comments by Matthias
64 lines
2.8 KiB
Python
64 lines
2.8 KiB
Python
import numpy as np
|
|
|
|
|
|
def make_sample_her_transitions(replay_strategy, replay_k, reward_fun):
|
|
"""Creates a sample function that can be used for HER experience replay.
|
|
|
|
Args:
|
|
replay_strategy (in ['future', 'none']): the HER replay strategy; if set to 'none',
|
|
regular DDPG experience replay is used
|
|
replay_k (int): the ratio between HER replays and regular replays (e.g. k = 4 -> 4 times
|
|
as many HER replays as regular replays are used)
|
|
reward_fun (function): function to re-compute the reward with substituted goals
|
|
"""
|
|
if replay_strategy == 'future':
|
|
future_p = 1 - (1. / (1 + replay_k))
|
|
else: # 'replay_strategy' == 'none'
|
|
future_p = 0
|
|
|
|
def _sample_her_transitions(episode_batch, batch_size_in_transitions):
|
|
"""episode_batch is {key: array(buffer_size x T x dim_key)}
|
|
"""
|
|
T = episode_batch['u'].shape[1]
|
|
rollout_batch_size = episode_batch['u'].shape[0]
|
|
batch_size = batch_size_in_transitions
|
|
|
|
# Select which episodes and time steps to use.
|
|
episode_idxs = np.random.randint(0, rollout_batch_size, batch_size)
|
|
t_samples = np.random.randint(T, size=batch_size)
|
|
transitions = {key: episode_batch[key][episode_idxs, t_samples].copy()
|
|
for key in episode_batch.keys()}
|
|
|
|
# Select future time indexes proportional with probability future_p. These
|
|
# will be used for HER replay by substituting in future goals.
|
|
her_indexes = np.where(np.random.uniform(size=batch_size) < future_p)
|
|
future_offset = np.random.uniform(size=batch_size) * (T - t_samples)
|
|
future_offset = future_offset.astype(int)
|
|
future_t = (t_samples + 1 + future_offset)[her_indexes]
|
|
|
|
# Replace goal with achieved goal but only for the previously-selected
|
|
# HER transitions (as defined by her_indexes). For the other transitions,
|
|
# keep the original goal.
|
|
future_ag = episode_batch['ag'][episode_idxs[her_indexes], future_t]
|
|
transitions['g'][her_indexes] = future_ag
|
|
|
|
# Reconstruct info dictionary for reward computation.
|
|
info = {}
|
|
for key, value in transitions.items():
|
|
if key.startswith('info_'):
|
|
info[key.replace('info_', '')] = value
|
|
|
|
# Re-compute reward since we may have substituted the goal.
|
|
reward_params = {k: transitions[k] for k in ['ag_2', 'g']}
|
|
reward_params['info'] = info
|
|
transitions['r'] = reward_fun(**reward_params)
|
|
|
|
transitions = {k: transitions[k].reshape(batch_size, *transitions[k].shape[1:])
|
|
for k in transitions.keys()}
|
|
|
|
assert(transitions['u'].shape[0] == batch_size_in_transitions)
|
|
|
|
return transitions
|
|
|
|
return _sample_her_transitions
|