* add some docstrings * start making big changes * state machine redesign * sampling seems to work * some reorg * fixed sampling of real vals * json conversion * made it possible to register new commands got nontrivial version of Pred working * consolidate command definitions * add more macro blocks * revived visualization * rename Userdata -> CmdInterpreter make AlgoSmInstance subclass of SmInstance that uses appropriate userdata argument * replace userdata by ci when appropriate * minor test fixes * revamped handmade dir, can run ppo_metal * seed to avoid random test failure * implement AlgoAgent * Autogenerated object that performs all ops and macros * more CmdRecorder changes * move files around * move MatchProb and JtftProb * remove obsolete * fix tests involving AlgoAgent (pending the next commit on ppo_metal code) * ppo_metal: reduce duplication in policy_gen, make sess an attribute of PpoAgent and StochasticPolicy instead of using get_default_session everywhere. * maze_env reformatting, move algo_search script (but stil broken) * move agent.py * fix test on handcrafted agents * tuning/fixing ppo_metal baseline * minor * Fix ppo_metal baseline * Don’t set epcount, tcount unless they’re being used * get rid of old ppo_metal baseline * fixes for handmade/run.py tuning * fix codegen ppo * fix handmade ppo hps * fix test, go back to safe_div * switch to more complex filtering * make sure all handcrafted algos have finite probability * train to maximize logprob of provided samples Trex changes to avoid segfault * AlgoSm also includes global hyperparams * don’t duplicate global hyperparam defaults * create generic_ob_ac_space function * use sorted list of outkeys * revive tsne * todo changes * determinism test * todo + test fix * remove a few deprecated files, rename other tests so they don’t run automatically, fix real test failure * continuous control with codegen * continuous control with codegen * implement continuous action space algodistr * ppo with trex RUN BENCHMARKS * wrap trex in a monitor * dummy commit to RUN BENCHMARKS * adding monitor to trex env RUN BENCHMARKS * adding monitor to trex RUN BENCHMARKS * include monitor into trex env RUN BENCHMARKS * generate nll and predmean using Distribution node * dummy commit to RUN BENCHMARKS * include pybullet into baselines optional dependencies * dummy commit to RUN BENCHMARKS * install games for cron rcall user RUN BENCHMARKS * add --yes flag to install.py in rcall config for cron user RUN BENCHMARKS * both continuous and discrete versions seem to run * fixes to monitor to work with vecenv-like info and rewards RUN BENCHMARKS * dummy commit to RUN BENCHMARKS * removed shape check from one-hot encoding logic in distributions.CategoricalPd * reset logger configuration in codegen/handmade/run.py to be in-line with baselines RUN BENCHMARKS * merged peterz_codegen_benchmarks RUN BENCHMARKS * skip tests RUN BENCHMARKS * working on test failures * save benchmark dicts RUN BENCHMARK * merged peterz_codegen_benchmark RUN BENCHMARKS * add get_git_commit_message to the baselines.common.console_util * dummy commit to RUN BENCHMARKS * merged fixes from peterz_codegen_benchmark RUN BENCHMARKS * fixing failure in test_algo_nll WIP * test_algo_nll passes with both ppo and softq * re-enabled tests * run trex on gpus for 100k total (horizon=100k / 16) RUN BENCHMARKS * merged latest peterz_codegen_benchmarks RUN BENCHMARKS * fixing codegen test failures (logging-related) * fixed name collision in run-benchmarks-new.py RUN BENCHMARKS * fixed name collision in run-benchmarks-new.py RUN BENCHMARKS * fixed import in node_filters.py * test_algo_search passes * some cleanup * dummy commit to RUN BENCHMARKS * merge fast fail for subprocvecenv RUN BENCHMARKS * use SubprocVecEnv in sonic_prob * added deprecation note to shmem_vec_env * allow indexing of distributions * add timeout to pipeline.yaml * typo in pipeline.yml * run tests with --forked option * resolved merge conflict in rl_algs.bench.benchmarks * re-enable parallel tests * fix remaining merge conflicts and syntax * Update trex_prob.py * fixes to ResultsWriter * take baselines/run.py from peterz_codegen branch * actually save stuff to file in VecMonitor RUN BENCHMARKS * enable parallel tests * merge stricter flake8 * merge peterz_codegen_benchmark, resolve conflicts * autopep8 * remove traces of Monitor from trex env, check shapes before encoding in CategoricalPd * asserts and warnings to make q -> distribution change more explicit * fixed assert in CategoricalPd * add header to vec_monitor output file RUN BENCHMARKS * make VecMonitor write header to the output file * remove deprecation message from shmem_vec_env RUN BENCHMARKS * autopep8 * proper shape test in distributions.py * ResultsWriter can take dict headers * dummy commit to RUN BENCHMARKS * replace assert len(qs)==1 with warning RUN BENCHMARKS * removed pdb from ppo2 RUN BENCHMARKS
165 lines
5.9 KiB
Python
165 lines
5.9 KiB
Python
import re
|
|
import os.path as osp
|
|
import os
|
|
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
_atari7 = ['BeamRider', 'Breakout', 'Enduro', 'Pong', 'Qbert', 'Seaquest', 'SpaceInvaders']
|
|
_atariexpl7 = ['Freeway', 'Gravitar', 'MontezumaRevenge', 'Pitfall', 'PrivateEye', 'Solaris', 'Venture']
|
|
|
|
_BENCHMARKS = []
|
|
|
|
remove_version_re = re.compile(r'-v\d+$')
|
|
|
|
|
|
def register_benchmark(benchmark):
|
|
for b in _BENCHMARKS:
|
|
if b['name'] == benchmark['name']:
|
|
raise ValueError('Benchmark with name %s already registered!' % b['name'])
|
|
|
|
# automatically add a description if it is not present
|
|
if 'tasks' in benchmark:
|
|
for t in benchmark['tasks']:
|
|
if 'desc' not in t:
|
|
t['desc'] = remove_version_re.sub('', t['env_id'])
|
|
_BENCHMARKS.append(benchmark)
|
|
|
|
|
|
def list_benchmarks():
|
|
return [b['name'] for b in _BENCHMARKS]
|
|
|
|
|
|
def get_benchmark(benchmark_name):
|
|
for b in _BENCHMARKS:
|
|
if b['name'] == benchmark_name:
|
|
return b
|
|
raise ValueError('%s not found! Known benchmarks: %s' % (benchmark_name, list_benchmarks()))
|
|
|
|
|
|
def get_task(benchmark, env_id):
|
|
"""Get a task by env_id. Return None if the benchmark doesn't have the env"""
|
|
return next(filter(lambda task: task['env_id'] == env_id, benchmark['tasks']), None)
|
|
|
|
|
|
def find_task_for_env_id_in_any_benchmark(env_id):
|
|
for bm in _BENCHMARKS:
|
|
for task in bm["tasks"]:
|
|
if task["env_id"] == env_id:
|
|
return bm, task
|
|
return None, None
|
|
|
|
|
|
_ATARI_SUFFIX = 'NoFrameskip-v4'
|
|
|
|
register_benchmark({
|
|
'name': 'Atari50M',
|
|
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 50M timesteps',
|
|
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(50e6)} for _game in _atari7]
|
|
})
|
|
|
|
register_benchmark({
|
|
'name': 'Atari10M',
|
|
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
|
|
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 6, 'num_timesteps': int(10e6)} for _game in _atari7]
|
|
})
|
|
|
|
register_benchmark({
|
|
'name': 'Atari1Hr',
|
|
'description': '7 Atari games from Mnih et al. (2013), with pixel observations, 1 hour of walltime',
|
|
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_seconds': 60 * 60} for _game in _atari7]
|
|
})
|
|
|
|
register_benchmark({
|
|
'name': 'AtariExploration10M',
|
|
'description': '7 Atari games emphasizing exploration, with pixel observations, 10M timesteps',
|
|
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atariexpl7]
|
|
})
|
|
|
|
|
|
# MuJoCo
|
|
|
|
_mujocosmall = [
|
|
'InvertedDoublePendulum-v2', 'InvertedPendulum-v2',
|
|
'HalfCheetah-v2', 'Hopper-v2', 'Walker2d-v2',
|
|
'Reacher-v2', 'Swimmer-v2']
|
|
register_benchmark({
|
|
'name': 'Mujoco1M',
|
|
'description': 'Some small 2D MuJoCo tasks, run for 1M timesteps',
|
|
'tasks': [{'env_id': _envid, 'trials': 6, 'num_timesteps': int(1e6)} for _envid in _mujocosmall]
|
|
})
|
|
|
|
register_benchmark({
|
|
'name': 'MujocoWalkers',
|
|
'description': 'MuJoCo forward walkers, run for 8M, humanoid 100M',
|
|
'tasks': [
|
|
{'env_id': "Hopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
|
|
{'env_id': "Walker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
|
|
{'env_id': "Humanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
|
|
]
|
|
})
|
|
|
|
# Bullet
|
|
_bulletsmall = [
|
|
'InvertedDoublePendulum', 'InvertedPendulum', 'HalfCheetah', 'Reacher', 'Walker2D', 'Hopper', 'Ant'
|
|
]
|
|
_bulletsmall = [e + 'BulletEnv-v0' for e in _bulletsmall]
|
|
|
|
register_benchmark({
|
|
'name': 'Bullet1M',
|
|
'description': '6 mujoco-like tasks from bullet, 1M steps',
|
|
'tasks': [{'env_id': e, 'trials': 6, 'num_timesteps': int(1e6)} for e in _bulletsmall]
|
|
})
|
|
|
|
|
|
# Roboschool
|
|
|
|
register_benchmark({
|
|
'name': 'Roboschool8M',
|
|
'description': 'Small 2D tasks, up to 30 minutes to complete on 8 cores',
|
|
'tasks': [
|
|
{'env_id': "RoboschoolReacher-v1", 'trials': 4, 'num_timesteps': 2 * 1000000},
|
|
{'env_id': "RoboschoolAnt-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
|
|
{'env_id': "RoboschoolHalfCheetah-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
|
|
{'env_id': "RoboschoolHopper-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
|
|
{'env_id': "RoboschoolWalker2d-v1", 'trials': 4, 'num_timesteps': 8 * 1000000},
|
|
]
|
|
})
|
|
register_benchmark({
|
|
'name': 'RoboschoolHarder',
|
|
'description': 'Test your might!!! Up to 12 hours on 32 cores',
|
|
'tasks': [
|
|
{'env_id': "RoboschoolHumanoid-v1", 'trials': 4, 'num_timesteps': 100 * 1000000},
|
|
{'env_id': "RoboschoolHumanoidFlagrun-v1", 'trials': 4, 'num_timesteps': 200 * 1000000},
|
|
{'env_id': "RoboschoolHumanoidFlagrunHarder-v1", 'trials': 4, 'num_timesteps': 400 * 1000000},
|
|
]
|
|
})
|
|
|
|
# Other
|
|
|
|
_atari50 = [ # actually 47
|
|
'Alien', 'Amidar', 'Assault', 'Asterix', 'Asteroids',
|
|
'Atlantis', 'BankHeist', 'BattleZone', 'BeamRider', 'Bowling',
|
|
'Breakout', 'Centipede', 'ChopperCommand', 'CrazyClimber',
|
|
'DemonAttack', 'DoubleDunk', 'Enduro', 'FishingDerby', 'Freeway',
|
|
'Frostbite', 'Gopher', 'Gravitar', 'IceHockey', 'Jamesbond',
|
|
'Kangaroo', 'Krull', 'KungFuMaster', 'MontezumaRevenge', 'MsPacman',
|
|
'NameThisGame', 'Pitfall', 'Pong', 'PrivateEye', 'Qbert',
|
|
'RoadRunner', 'Robotank', 'Seaquest', 'SpaceInvaders', 'StarGunner',
|
|
'Tennis', 'TimePilot', 'Tutankham', 'UpNDown', 'Venture',
|
|
'VideoPinball', 'WizardOfWor', 'Zaxxon',
|
|
]
|
|
|
|
register_benchmark({
|
|
'name': 'Atari50_10M',
|
|
'description': '47 Atari games from Mnih et al. (2013), with pixel observations, 10M timesteps',
|
|
'tasks': [{'desc': _game, 'env_id': _game + _ATARI_SUFFIX, 'trials': 2, 'num_timesteps': int(10e6)} for _game in _atari50]
|
|
})
|
|
|
|
# HER DDPG
|
|
|
|
register_benchmark({
|
|
'name': 'HerDdpg',
|
|
'description': 'Smoke-test only benchmark of HER',
|
|
'tasks': [{'trials': 1, 'env_id': 'FetchReach-v1'}]
|
|
})
|
|
|