mirror of
https://github.com/Farama-Foundation/Gymnasium.git
synced 2025-08-19 13:32:03 +00:00
* Add WIP Monitored wrapper * Remove irrelevant render after close monitor test * py27 compatibility * Fix test_benchmark * Move Monitored out of wrappers __init__ * Turn Monitored into a function that returns a Monitor class * Fix monitor tests * Remove deprecated test * Remove deprecated utility * Prevent duplicate wrapping, add test * Fix test * close env in tests to prevent writing to nonexistent file * Disable semisuper tests * typo * Fix failing spec * Fix monitoring on semisuper tasks * Allow disabling of duplicate check * Rename MonitorManager * Monitored -> Monitor * Clean up comments * Remove cruft
67 lines
2.1 KiB
Python
67 lines
2.1 KiB
Python
#!/usr/bin/env python
|
|
#
|
|
# Run all the tasks on a benchmark using a random agent.
|
|
#
|
|
# This script assumes you have set an OPENAI_GYM_API_KEY environment
|
|
# variable. You can find your API key in the web interface:
|
|
# https://gym.openai.com/settings/profile.
|
|
#
|
|
import argparse
|
|
import logging
|
|
import os
|
|
import sys
|
|
|
|
import gym
|
|
# In modules, use `logger = logging.getLogger(__name__)`
|
|
from gym import wrappers
|
|
|
|
logger = logging.getLogger()
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description=None)
|
|
parser.add_argument('-b', '--benchmark-id', help='id of benchmark to run e.g. Atari7Ram-v0')
|
|
parser.add_argument('-v', '--verbose', action='count', dest='verbosity', default=0, help='Set verbosity.')
|
|
parser.add_argument('-t', '--training-dir', default="/tmp/gym-results/", help='What directory to upload.')
|
|
args = parser.parse_args()
|
|
|
|
if args.verbosity == 0:
|
|
logger.setLevel(logging.INFO)
|
|
elif args.verbosity >= 1:
|
|
logger.setLevel(logging.DEBUG)
|
|
|
|
benchmark_id = args.benchmark_id
|
|
if benchmark_id is None:
|
|
logger.info("Must supply a valid benchmark")
|
|
return 1
|
|
|
|
try:
|
|
benchmark = gym.benchmark_spec(benchmark_id)
|
|
except Exception:
|
|
logger.info("Invalid benchmark")
|
|
return 1
|
|
|
|
# run benchmark tasks
|
|
for task in benchmark.tasks:
|
|
logger.info("Running on env: {}".format(task.env_id))
|
|
for trial in range(task.trials):
|
|
env = gym.make(task.env_id)
|
|
training_dir_name = "{}/{}-{}".format(args.training_dir, task.env_id, trial)
|
|
env = wrappers.Monitor(training_dir_name)(env)
|
|
env.reset()
|
|
for _ in range(task.max_timesteps):
|
|
o, r, done, _ = env.step(env.action_space.sample())
|
|
if done:
|
|
env.reset()
|
|
env.close()
|
|
|
|
logger.info("""Done running, upload results using the following command:
|
|
|
|
python -c "import gym; gym.upload('{}', benchmark_id='{}', algorithm_id='(unknown)')"
|
|
|
|
""".rstrip().format(args.training_dir, benchmark_id))
|
|
|
|
return 0
|
|
|
|
if __name__ == '__main__':
|
|
sys.exit(main())
|