Move LivePlot under examples for now

This commit is contained in:
Greg Brockman
2016-05-30 22:02:37 -07:00
parent 7c530804cc
commit 1004e1ce39
3 changed files with 32 additions and 69 deletions

View File

@@ -1,65 +0,0 @@
import logging
import os, sys
import gym
from gym.monitoring.live_plot import LivePlot
# The world's simplest agent!
class RandomAgent(object):
def __init__(self, action_space):
self.action_space = action_space
def act(self, observation, reward, done):
return self.action_space.sample()
if __name__ == '__main__':
# You can optionally set up the logger. Also fine to set the level
# to logging.DEBUG or logging.WARN if you want to change the
# amount of output.
logger = logging.getLogger()
logger.setLevel(logging.INFO)
env = gym.make('CartPole-v0' if len(sys.argv)<2 else sys.argv[1])
# You provide the directory to write to (can be an existing
# directory, including one with existing data -- all monitor files
# will be namespaced). You can also dump to a tempdir if you'd
# like: tempfile.mkdtemp().
outdir = '/tmp/random-agent-results'
env.monitor.start(outdir, force=True, seed=0)
# You may optionally include a LivePlot so that you can see
# how your agent is performing. Use plotter.plot() to update
# the graph.
plotter = LivePlot(outdir)
# This declaration must go *after* the monitor call, since the
# monitor's seeding creates a new action_space instance with the
# appropriate pseudorandom number generator.
agent = RandomAgent(env.action_space)
episode_count = 100
max_steps = 200
reward = 0
done = False
for i in range(episode_count):
ob = env.reset()
for j in range(max_steps):
action = agent.act(ob, reward, done)
ob, reward, done, _ = env.step(action)
if done:
break
plotter.plot()
env.render()
# Dump result info to disk
env.monitor.close()
# Upload to the scoreboard. We could also do this from another
# process if we wanted.
logger.info("Successfully ran RandomAgent. Now trying to upload results to the scoreboard. If it breaks, you can always just try re-uploading the same results.")
gym.upload(outdir, algorithm_id='random')

View File

@@ -1,4 +1,4 @@
import gym
import gym
import matplotlib
import matplotlib.pyplot as plt
@@ -30,10 +30,39 @@ class LivePlot(object):
data = results[self.data_key]
#only update plot if data is different (plot calls are expensive)
if data != self._last_data:
if data != self._last_data:
self._last_data = data
plt.plot(data, color=self.line_color)
# pause so matplotlib will display
# may want to figure out matplotlib animation or use a different library in the future
plt.pause(0.000001)
plt.pause(0.000001)
if __name__ == '__main__':
env = gym.make('CartPole-v0')
outdir = '/tmp/random-agent-results'
env.monitor.start(outdir, force=True, seed=0)
# You may optionally include a LivePlot so that you can see
# how your agent is performing. Use plotter.plot() to update
# the graph.
plotter = LivePlot(outdir)
episode_count = 100
max_steps = 200
reward = 0
done = False
for i in range(episode_count):
ob = env.reset()
for j in range(max_steps):
ob, reward, done, _ = env.step(env.action_space.sample())
if done:
break
plotter.plot()
env.render()
# Dump result info to disk
env.monitor.close()

View File

@@ -1,4 +1,3 @@
from gym.monitoring.monitor import Monitor, load_results, _open_monitors
from gym.monitoring.stats_recorder import StatsRecorder
from gym.monitoring.video_recorder import VideoRecorder
from gym.monitoring.live_plot import LivePlot