$$ \huge{\underline{\textbf{ Classical Control with DQN }}} $$$$ \large{\textbf{MountainCar - Pendulum - CartPole - Acrobot - MoonLander}} $$


Introduction

This notebooks presents basic Deep Q-Network (DQN) used to solve OpenAI Gym Classic Control environments like Mountain Car, Inverted Pendulum and so on. In this notebook we use DQN without target network for educational purposes. For practical applications use target network.

The environments solved in this notebook are:

  • MountainCar-v0 - modified to remove 200 step time limit
  • Pendulum-v0 - modified with discrete actions and converted state space
  • CartPole-v0
  • Acrobot-v1
  • LunarLander-v2

I tried to match pre-processing and hyper parameters for all the environments. As the result some hyperparams will differ from optimal. Special note goes to frame skip parameter, which is set to 4 for mountain car and moon lander and omitted otherwise. It can be brought to value 2 for all environments and all of them should still train OK. As for training iterations, I used 25k everywhere with exception of LunarLander (200k). Epsilon decay is also elongated for lunar lander.

Imports

Imports

In [1]:
import os
import datetime
import itertools
import collections
import numpy as np
import matplotlib.pyplot as plt

Import OpenAI Gym

In [2]:
import gym

Initialise TensorFlow

In [3]:
import tensorflow as tf
gpu_options = tf.GPUOptions(allow_growth=True)  # init TF ...
config=tf.ConfigProto(gpu_options=gpu_options)  # w/o taking ...
with tf.Session(config=config): pass            # all GPU memory

Import helpers for plotting (source file: helpers.py)

In [4]:
import helpers
import importlib
importlib.reload(helpers);

Q-Learning

Epsilon random policy

In [5]:
def policy(st, model, eps):
    if np.random.rand() > eps:
        q_values = model.eval(np.stack([st]))
        return np.argmax(q_values)
    else:
        return env.action_space.sample()

Main training loop. Supports resuming from pre-trained agent and callbacks.

In [6]:
def q_learning(env, frames, gamma, eps_decay_steps, eps_target,
               batch_size, model, mem, start_step=0,
               callback=None, trace=None, render=False):
    """Q-Learning, supprots resume
    
    Note: If resuming, all parameters should be identical to original call, with
        exception of 'start_step' and 'frames'.
    
    Params:
        env - environment
        frames - number of time steps to execute
        gamma - discount factor [0..1]
        eps_decay_steps - decay epsilon-greedy param over that many time steps
        eps_target - epsilon-greedy param after decay
        batch_size - neural network batch size from memory buffer
        model      - function approximator, already initialised, with methods:
                     eval(state, action) -> float
                     train(state, target) -> None
        mem - memory reply buffer
        start_step - if continuning, pass in return value (tts_) here
        callback - optional callback to execute
        trace - this object handles data logging, plotting etc.
        render - render openai gym environment?
    """
    
    def eps_schedule(tts, eps_decay_steps, eps_target):
        if tts > eps_decay_steps:
            return eps_target
        else:
            eps_per_step_change = (1-eps_target) / eps_decay_steps
            return 1.0 - tts * eps_per_step_change
    
        
    assert len(mem) >= batch_size
    
    tts_ = start_step                        # total time step
    for _ in itertools.count():              # count from 0 to infinity
        
        S = env.reset()
        episode_reward = 0                   # purely for logging
        if render: env.render()
        
        for t_ in itertools.count():         # count from 0 to infinity
            
            eps = eps_schedule(tts_, eps_decay_steps, eps_target)
            
            A = policy(S, model, eps)
            
            S_, R, done, _ = env.step(A)
            episode_reward += R
            if render: env.render()
            
            mem.append(S, A, R, S_, done)
            
            if callback is not None:
                callback(tts_, t_, S, A, R, done, eps, episode_reward, model, mem, trace)
            
            states, actions, rewards, n_states, dones, _ = mem.get_batch(batch_size)
            targets = model.eval(n_states)
            targets = rewards + gamma * np.max(targets, axis=-1)
            targets[dones] = rewards[dones]  # return of next-to-terminal state is just R
            model.train(states, actions, targets)

            S = S_
            
            tts_ += 1
            if tts_ >= start_step + frames:
                return tts_                  # so we can pick up where we left
            
            if done:
                break

Stripped down version for evaluation. Does not train agent.

In [7]:
def evaluate(env, model, frames=None, episodes=None, eps=0.0, render=False):
    assert frames is not None or episodes is not None
        
    total_reward = 0
    
    tts_ = 0                                 # total time step
    for e_ in itertools.count():             # count from 0 to infinity
        if episodes is not None and e_ >= episodes:
            return total_reward
        
        S = env.reset()
        if render: env.render()
        
        for t_ in itertools.count():         # count from 0 to infinity
            
            A = policy(S, model, eps)
            
            S_, R, done, _ = env.step(A)
            total_reward += R
            if render: env.render()
    
            S = S_
            
            tts_ += 1
            if frames is not None and tts_ >= frames:
                return
            
            if done:
                break

Stripped down version to pre-fill memory buffer with random policy.

In [8]:
def mem_fill(env, mem, steps=None, episodes=None, render=False):
        
    # Fill memory buffer using random policy
    tts_ = 0
    for e_ in itertools.count():
        if episodes is not None and e_ >= episodes:
            return
        
        S = env.reset();
        if render: env.render()
        
        for t_ in itertools.count():
        
            A = env.action_space.sample()    # random policy
            S_, R, done, _ = env.step(A)
            if render: env.render()
                
            mem.append(S, A, R, S_, done)
            
            S = S_
            
            tts_ += 1
            if steps is not None and tts_ >= steps:
                return
            
            if done:
                break

Function Approximators and Memory

Definition of simple neural network with two hidden layers. As as side note, for classic control tasks tilings work much better.

In [9]:
class TFNeuralNet():
    def __init__(self, nb_in, nb_hid_1, nb_hid_2, nb_out, lr):
        self.nb_in = nb_in
        self.nb_hid_1 = nb_hid_1
        self.nb_hid_2 = nb_hid_2
        self.nb_out = nb_out
        
        tf.reset_default_graph()
              
        self._x = tf.placeholder(name='xx', shape=[None, nb_in], dtype=tf.float32)
        self._y = tf.placeholder(name='yy', shape=[None, nb_out], dtype=tf.float32)

        self._h_hid_1 = tf.layers.dense(self._x, units=nb_hid_1,
                                        activation=tf.nn.relu, name='Hidden_1')
        self._h_hid_2 = tf.layers.dense(self._h_hid_1, units=nb_hid_2,
                                        activation=tf.nn.relu, name='Hidden_2')
        self._y_hat = tf.layers.dense(self._h_hid_2, units=nb_out,
                                      activation=None, name='Output')
        self._loss = tf.losses.mean_squared_error(self._y, self._y_hat)

        self._optimizer = tf.train.RMSPropOptimizer(learning_rate=lr)
        self._train_op = self._optimizer.minimize(self._loss)

        self._sess = tf.Session()
        self._sess.run(tf.global_variables_initializer())
        
    def backward(self, x, y):
        assert x.ndim == y.ndim == 2
        _, y_hat, loss = self._sess.run([self._train_op, self._y_hat, self._loss],
                                         feed_dict={self._x: x, self._y:y})
        return y_hat, loss
    
    def forward(self, x):
        return self._sess.run(self._y_hat, feed_dict={self._x: x})
    
    def save(self, filepath):
        saver = tf.train.Saver()
        saver.save(self._sess, filepath)
        
    def load(self, filepath):
        saver = tf.train.Saver()
        saver.restore(self._sess, filepath)

Function approximator wrapper. Mostly performs sanity check and input/output normalisation.

In [10]:
class TFFunctApprox():

    def __init__(self, model, st_low, st_high, rew_mean, rew_std, nb_actions):
        """Q-function approximator using Keras model

        Args:
            model: TFNeuralNet model
        """
        st_low = np.array(st_low);
        st_high = np.array(st_high)
        self._model = model
        
        assert st_low.ndim == 1 and st_low.shape == st_high.shape
        
        if len(st_low) != model.nb_in:
            raise ValueError('Input shape does not match state_space shape')

        if nb_actions != model.nb_out:
            raise ValueError('Output shape does not match action_space shape')

        # normalise inputs
        self._offsets = st_low + (st_high - st_low) / 2
        self._scales = 1 / ((st_high - st_low) / 2)
        
        self._rew_mean = rew_mean
        self._rew_std = rew_std

    def eval(self, states):
        assert isinstance(states, np.ndarray)
        assert states.ndim == 2

        inputs = (states - self._offsets) * self._scales

        y_hat = self._model.forward(inputs)
        
        #return y_hat
        return y_hat*self._rew_std + self._rew_mean

    def train(self, states, actions, targets):
        
        assert isinstance(states, np.ndarray)
        assert isinstance(actions, np.ndarray)
        assert isinstance(targets, np.ndarray)
        assert states.ndim == 2
        assert actions.ndim == 1
        assert targets.ndim == 1
        assert len(states) == len(actions) == len(targets)
        
        
        targets = (targets-self._rew_mean) / self._rew_std    # normalise

        inputs = (states - self._offsets) * self._scales
        all_targets = self._model.forward(inputs)       # this should normalised already
        all_targets[np.arange(len(all_targets)), actions] = targets
        self._model.backward(inputs, all_targets)

Memory reply buffer. Reasonably fast impl., doesn't do memory copies when picking batches. This will be useful for Atari. Python deque is very slow.

In [11]:
class Memory:
    """Circular buffer for DQN memory reply. Fairly fast."""

    def __init__(self, max_len, state_shape, state_dtype):
        """
        Args:
            max_len: maximum capacity
        """
        assert isinstance(max_len, int)
        assert max_len > 0

        self.max_len = max_len                      # maximum length        
        self._curr_insert_ptr = 0                   # index to insert next data sample
        self._curr_len = 0                          # number of currently stored elements

        state_arr_shape = [max_len] + list(state_shape)

        self._hist_St = np.zeros(state_arr_shape, dtype=state_dtype)
        self._hist_At = np.zeros(max_len, dtype=int)
        self._hist_Rt_1 = np.zeros(max_len, dtype=float)
        self._hist_St_1 = np.zeros(state_arr_shape, dtype=state_dtype)
        self._hist_done_1 = np.zeros(max_len, dtype=bool)

    def append(self, St, At, Rt_1, St_1, done_1):
        """Add one sample to memory, override oldest if max_len reached.

        Args:
            St [np.ndarray]   - state
            At [int]          - action
            Rt_1 [float]      - reward
            St_1 [np.ndarray] - next state
            done_1 [bool]       - next state terminal?
        """
        self._hist_St[self._curr_insert_ptr] = St
        self._hist_At[self._curr_insert_ptr] = At
        self._hist_Rt_1[self._curr_insert_ptr] = Rt_1
        self._hist_St_1[self._curr_insert_ptr] = St_1
        self._hist_done_1[self._curr_insert_ptr] = done_1
        
        if self._curr_len < self.max_len:                 # keep track of current length
            self._curr_len += 1
            
        self._curr_insert_ptr += 1                        # increment insertion pointer
        if self._curr_insert_ptr >= self.max_len:         # roll to zero if needed
            self._curr_insert_ptr = 0

    def __len__(self):
        """Number of samples in memory, 0 <= length <= max_len"""
        return self._curr_len

    def get_batch(self, batch_len):
        """Sample batch of data, with repetition

        Args:
            batch_len: nb of samples to pick

        Returns:
            states, actions, rewards, next_states, next_done, indices
            Each returned element is np.ndarray with length == batch_len
        """
        assert self._curr_len > 0
        assert batch_len > 0

        
        indices = np.random.randint(        # randint much faster than np.random.sample
            low=0, high=self._curr_len, size=batch_len, dtype=int)

        states = np.take(self._hist_St, indices, axis=0)
        actions = np.take(self._hist_At, indices, axis=0)
        rewards_1 = np.take(self._hist_Rt_1, indices, axis=0)
        states_1 = np.take(self._hist_St_1, indices, axis=0)
        dones_1 = np.take(self._hist_done_1, indices, axis=0)

        return states, actions, rewards_1, states_1, dones_1, indices


    
    def pick_last(self, nb):
        """Pick last nb elements from memory
        
        Returns:
            states, actions, rewards, next_states, done_1, indices
            Each returned element is np.ndarray with length == batch_len
        """
        assert nb <= self._curr_len
        
        start = self._curr_insert_ptr - nb                # inclusive
        end = self._curr_insert_ptr                       # not inclusive
        indices = np.array(range(start,end), dtype=int)   # indices to pick, can be neg.
        indices[indices < 0] += self._curr_len            # loop negative to positive
        
        states = np.take(self._hist_St, indices, axis=0)
        actions = np.take(self._hist_At, indices, axis=0)
        rewards_1 = np.take(self._hist_Rt_1, indices, axis=0)
        states_1 = np.take(self._hist_St_1, indices, axis=0)
        dones_1 = np.take(self._hist_done_1, indices, axis=0)
        
        return states, actions, rewards_1, states_1, dones_1, indices
    

Experiment Setup

Environment wrapper to skip frames. Also known as action repeat. Really helps with some environments, e.g. mountain car.

In [12]:
class WrapFrameSkip():
    def __init__(self, env, frameskip):
        assert frameskip >= 1
        self._env = env
        self._frameskip = frameskip
        self.observation_space = env.observation_space
        self.action_space = env.action_space
    
    def reset(self):
        return self._env.reset()
    
    def step(self, action):
        sum_rew = 0
        for _ in range(self._frameskip):
            obs, rew, done, info = self._env.step(action)
            sum_rew += rew
            if done: break
        return obs, sum_rew, done, info
    
    def render(self, mode='human'):
        return self._env.render(mode=mode)
        
    def close(self):
        self._env.close()               

This object is a basic data logger. If you don't want to generate plots, you can mostly ignore it

In [13]:
class Trace():
    def __init__(self, eval_every, render=False, test_states=None, state_labels=None):
        
        if test_states is not None:
            assert test_states.ndim == 2
            
        self.enable_plotting = False
        
        self.eval_every = eval_every
        self.test_states = test_states
        self.state_labels = state_labels
        
        self.tstep = 0
        self.total_tstep = 0
        
        self.q_values = collections.OrderedDict()
        self.ep_rewards = collections.defaultdict(float)
        self.last_ep_reward = None
        
        self.states = []
        self.actions = []
        self.rewards = []  # t+1
        self.dones = []    # t+1
        self.epsilons = []

Callback which is called every single training iteration, it does as follows:

  • fill up trace object with basic logging data
  • print training progress
  • perform evaluation of test_states
  • optionally plot agent state
In [14]:
def callback(total_time_step, tstep, st, act, rew_, done_,
             eps, ep_reward, model, memory, trace):
    """Called from gradient_MC after every episode.
    
    Params:
        episode [int] - episode number
        tstep [int]   - timestep within episode
        model [obj]   - function approximator
        trace [list]  - list to write results to"""
    
    assert total_time_step == trace.total_tstep
    
    trace.tstep = tstep
    
    trace.states.append(st)
    trace.actions.append(act)
    trace.rewards.append(rew_)
    trace.dones.append(done_)
    trace.epsilons.append(eps)
        
    if done_:
        trace.ep_rewards[total_time_step] = ep_reward
        trace.last_ep_reward = ep_reward
            
    #
    #   Print, Evaluate, Plot
    #
    if (trace.eval_every is not None) and (trace.total_tstep % trace.eval_every == 0):
        
        last_ep_rew = trace.last_ep_reward
        reward_str = str(round(last_ep_rew, 3)) if last_ep_rew is not None else 'None'
        print(f'wall: {datetime.datetime.now().strftime("%H:%M:%S")}   '
              f'ep: {len(trace.ep_rewards):3}   tstep: {tstep:4}   '
              f'total tstep: {trace.total_tstep:6}   '
              f'eps: {eps:5.3f}   reward: {reward_str}   ')

        if len(st) == 2:
            # We are working with 2D environment,
            # eval. Q-Value function across whole state space
            q_arr = helpers.eval_state_action_space(model, env, split=[128,128])
            trace.q_values[trace.total_tstep] = q_arr
        else:
            # Environment is not 2D,
            # eval. on pre-defined random sample of states
            if trace.test_states is not None:
                y_hat = model.eval(trace.test_states)
                trace.q_values[trace.total_tstep] = y_hat

        if trace.enable_plotting:
            helpers.plot_all(env, model, memory, trace)
            print('■'*80)

    trace.total_tstep += 1

MountainCar

Start with mountain car environment

Notes:

  • OpenAI gym imposes 200 step limit, which makes this environment extremely hard to solve with DQN - we remove this limit
  • Frame-skip (aka action repeat) is technically not necessary, but makes training much easier, good value is frameskip=4

The nut and bolt of mountain car is that agent gets extremely sparse reward on successful exit. Shortest possible exit trajectory is approx 120-130 steps. If we impose 200 step limit and don't repeat actions this means agent has to perform series of 120-130 actions exactly correctly in the row with very little margin for error. If agent doesn't succeed it gets reset back to beginning on step 200. To do this with random exploratory policy is virtually impossible.

Note that this can still be solved easily with linear function approximation (local updates) and optimistic initialisation which facilitates continuing exploration. This is not possible with DQN.

In [15]:
def experiment_mountaincar():
    neural_net = TFNeuralNet(nb_in=2, nb_hid_1=64, nb_hid_2=64, nb_out=3, lr=0.00025)
    
    model = TFFunctApprox(neural_net,
                          env.observation_space.low,
                          env.observation_space.high,
                          rew_mean=-50,
                          rew_std=15,
                          nb_actions=env.action_space.n)
    
    mem = Memory(max_len=100000, state_shape=(2,), state_dtype=float)
    mem_fill(env, mem, steps=10000)
    test_states, _, _, _, _, _ = mem.get_batch(10)
    
    trace = Trace(eval_every=1000, test_states=test_states)
    
    return trace, model, mem

Setup the experiment

In [16]:
env = gym.make('MountainCar-v0').env  # remove 200 step limit
env = WrapFrameSkip(env, frameskip=4)
trace, model, mem = experiment_mountaincar()
# trace.enable_plotting = True
WARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.

Train the agent

In [17]:
tts = q_learning(env, frames=25000, gamma=.99,
                 eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
                 model=model, mem=mem, callback=callback, trace=trace)
wall: 10:03:27   ep:   0   tstep:    0   total tstep:      0   eps: 1.000   reward: None   
wall: 10:03:32   ep:   0   tstep: 1000   total tstep:   1000   eps: 0.955   reward: None   
wall: 10:03:37   ep:   3   tstep:   75   total tstep:   2000   eps: 0.910   reward: -1222.0   
wall: 10:03:42   ep:   8   tstep:   29   total tstep:   3000   eps: 0.865   reward: -452.0   
wall: 10:03:47   ep:  15   tstep:   35   total tstep:   4000   eps: 0.820   reward: -409.0   
wall: 10:03:51   ep:  22   tstep:  108   total tstep:   5000   eps: 0.775   reward: -486.0   
wall: 10:03:56   ep:  33   tstep:   82   total tstep:   6000   eps: 0.730   reward: -299.0   
wall: 10:04:01   ep:  42   tstep:   68   total tstep:   7000   eps: 0.685   reward: -359.0   
wall: 10:04:06   ep:  53   tstep:   69   total tstep:   8000   eps: 0.640   reward: -236.0   
wall: 10:04:11   ep:  68   tstep:   35   total tstep:   9000   eps: 0.595   reward: -235.0   
wall: 10:04:16   ep:  84   tstep:   25   total tstep:  10000   eps: 0.550   reward: -327.0   
wall: 10:04:21   ep: 102   tstep:   21   total tstep:  11000   eps: 0.505   reward: -204.0   
wall: 10:04:26   ep: 121   tstep:   38   total tstep:  12000   eps: 0.460   reward: -167.0   
wall: 10:04:32   ep: 143   tstep:   21   total tstep:  13000   eps: 0.415   reward: -200.0   
wall: 10:04:37   ep: 165   tstep:   26   total tstep:  14000   eps: 0.370   reward: -230.0   
wall: 10:04:42   ep: 190   tstep:   15   total tstep:  15000   eps: 0.325   reward: -153.0   
wall: 10:04:47   ep: 215   tstep:   28   total tstep:  16000   eps: 0.280   reward: -166.0   
wall: 10:04:53   ep: 242   tstep:   11   total tstep:  17000   eps: 0.235   reward: -203.0   
wall: 10:04:58   ep: 269   tstep:    0   total tstep:  18000   eps: 0.190   reward: -140.0   
wall: 10:05:03   ep: 298   tstep:   24   total tstep:  19000   eps: 0.145   reward: -197.0   
wall: 10:05:08   ep: 328   tstep:   23   total tstep:  20000   eps: 0.100   reward: -177.0   
wall: 10:05:13   ep: 362   tstep:   11   total tstep:  21000   eps: 0.100   reward: -210.0   
wall: 10:05:18   ep: 394   tstep:   25   total tstep:  22000   eps: 0.100   reward: -85.0   
wall: 10:05:24   ep: 427   tstep:   28   total tstep:  23000   eps: 0.100   reward: -115.0   
wall: 10:05:30   ep: 458   tstep:   28   total tstep:  24000   eps: 0.100   reward: -106.0   

Optional: train some more

In [18]:
# tts = q_learning(env, frames=5000, gamma=.99,
#                  eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
#                  model=model, mem=mem, start_step=tts, callback=callback, trace=trace)

Optional: plot the agent state

In [19]:
# helpers.plot_all(env, model, mem, trace, print_=True)

Save weights

In [20]:
model._model.save('./tf_models/MountainCar.ckpt')

Load weights

In [21]:
model._model.load('./tf_models/MountainCar.ckpt')
INFO:tensorflow:Restoring parameters from ./tf_models/MountainCar.ckpt

Enjoy trained agent

In [22]:
# In Jupyter, press squre '■' in top menu to quit animation
try: evaluate(env, model, frames=float('inf'), eps=0.0, render=True)
except KeyboardInterrupt: pass
finally: env.close()

Result

Expected agent behaviour after 15000 iterations

If you enable plotting, then output after training should be in roughly as follows

wall: 16:45:30   ep:  91   tstep:   22   total tstep:  15000   eps: 0.325   reward: -134.0

Where:

  • Q_Max - is a plot of q-function max over possible actions
  • Trajectory - is top-down view of agent trajectory. Agent starts roughly in the centre and exits to the right. Colour indicates action, see policy plot
  • Q Values - is a plot of average q-values for set or randomly selected states - this shows how q-values develop during training
  • Policy - is a top-down plot of agent policy. Green is accelerate right, red is accelerate left, blue is neutral
  • Memory Buffer - is a snapshot of memory buffer, same format as Trajectory plot
  • Episode Rewards - individual rewards acquired (blue dots) and running average (yellow) over the training period

Pendulum

Pendulum is in my opinion actually quite a bit easier than mountain car. Mainly because of rich reward signal.

Notes:

  • OpenAI pendulum has continuous actions while basic DQN requires discrete actions - we will discretise it
  • by default state space is 3-vector: [cos(angle), sin(angle), velocity] - to be able to plot whole state space we reduce it to [angle, velocity]
In [41]:
class Pendulum2DEnv():
    def __init__(self):
        self._env = gym.make('Pendulum-v0')
        
        self.observation_space = gym.spaces.Box(
            low=np.array([-np.pi, -8.0]), high=np.array([np.pi, 8.0]), dtype=np.float32 )
        self.action_space = gym.spaces.Discrete(n=3)
        
    def reset(self):
        cos, sin, vel = self._env.reset()
        theta = np.arctan2(sin, cos)
        return np.array([theta, vel])
        
    def step(self, action):
        torques = [-2.0, 0.0, 2.0]
        # torques = [-2.0, -.5, 0.0, .5, 2.0]
        joint_effort = torques[action]
        
        obs, rew, done, _ = self._env.step([joint_effort])
        cos, sin, vel = obs
        theta = np.arctan2(sin, cos)
        return np.array([theta, vel]), rew, done, obs
    
    def render(self, mode='human'):
        return self._env.render(mode=mode)
        
    def close(self):
        self._env.close()
In [42]:
def experiment_pendulum():
    neural_net = TFNeuralNet(nb_in=2, nb_hid_1=64, nb_hid_2=64, nb_out=3, lr=0.00025)
    
    model = TFFunctApprox(neural_net,
                          env.observation_space.low,
                          env.observation_space.high,
                          rew_mean=-210,
                          rew_std=50,
                          nb_actions=env.action_space.n)
    
    mem = Memory(max_len=100000, state_shape=(2,), state_dtype=float)
    mem_fill(env, mem, steps=10000)
    test_states, _, _, _, _, _ = mem.get_batch(10)
    
    trace = Trace(eval_every=1000, test_states=test_states)
    
    return trace, model, mem

Setup the experiment

In [43]:
env = Pendulum2DEnv()
trace, model, mem = experiment_pendulum()
# trace.enable_plotting = True

Train the agent

In [44]:
tts = q_learning(env, frames=25000, gamma=.99,
                 eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
                 model=model, mem=mem, callback=callback, trace=trace)
wall: 10:11:12   ep:   0   tstep:    0   total tstep:      0   eps: 1.000   reward: None   
wall: 10:11:16   ep:   5   tstep:    0   total tstep:   1000   eps: 0.955   reward: -932.75   
wall: 10:11:21   ep:  10   tstep:    0   total tstep:   2000   eps: 0.910   reward: -1349.562   
wall: 10:11:26   ep:  15   tstep:    0   total tstep:   3000   eps: 0.865   reward: -1182.014   
wall: 10:11:31   ep:  20   tstep:    0   total tstep:   4000   eps: 0.820   reward: -765.12   
wall: 10:11:36   ep:  25   tstep:    0   total tstep:   5000   eps: 0.775   reward: -758.568   
wall: 10:11:41   ep:  30   tstep:    0   total tstep:   6000   eps: 0.730   reward: -971.488   
wall: 10:11:47   ep:  35   tstep:    0   total tstep:   7000   eps: 0.685   reward: -867.18   
wall: 10:11:52   ep:  40   tstep:    0   total tstep:   8000   eps: 0.640   reward: -1113.014   
wall: 10:11:57   ep:  45   tstep:    0   total tstep:   9000   eps: 0.595   reward: -662.024   
wall: 10:12:03   ep:  50   tstep:    0   total tstep:  10000   eps: 0.550   reward: -527.786   
wall: 10:12:08   ep:  55   tstep:    0   total tstep:  11000   eps: 0.505   reward: -370.842   
wall: 10:12:14   ep:  60   tstep:    0   total tstep:  12000   eps: 0.460   reward: -254.44   
wall: 10:12:19   ep:  65   tstep:    0   total tstep:  13000   eps: 0.415   reward: -370.899   
wall: 10:12:24   ep:  70   tstep:    0   total tstep:  14000   eps: 0.370   reward: -252.474   
wall: 10:12:30   ep:  75   tstep:    0   total tstep:  15000   eps: 0.325   reward: -123.758   
wall: 10:12:36   ep:  80   tstep:    0   total tstep:  16000   eps: 0.280   reward: -524.958   
wall: 10:12:41   ep:  85   tstep:    0   total tstep:  17000   eps: 0.235   reward: -123.024   
wall: 10:12:47   ep:  90   tstep:    0   total tstep:  18000   eps: 0.190   reward: -128.003   
wall: 10:12:52   ep:  95   tstep:    0   total tstep:  19000   eps: 0.145   reward: -119.915   
wall: 10:12:57   ep: 100   tstep:    0   total tstep:  20000   eps: 0.100   reward: -116.671   
wall: 10:13:03   ep: 105   tstep:    0   total tstep:  21000   eps: 0.100   reward: -350.965   
wall: 10:13:08   ep: 110   tstep:    0   total tstep:  22000   eps: 0.100   reward: -3.165   
wall: 10:13:14   ep: 115   tstep:    0   total tstep:  23000   eps: 0.100   reward: -241.848   
wall: 10:13:20   ep: 120   tstep:    0   total tstep:  24000   eps: 0.100   reward: -125.71   

Optional: train some more

In [45]:
# tts = q_learning(env, frames=5000, gamma=.99,
#                  eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
#                  model=model, mem=mem, start_step=tts, callback=callback, trace=trace)

Optional: plot the agent state

In [46]:
# helpers.plot_all(env, model, mem, trace, print_=True)

Save weights

In [47]:
model._model.save('./tf_models/Pendulum.ckpt')

Load weights

In [48]:
model._model.load('./tf_models/Pendulum.ckpt')
INFO:tensorflow:Restoring parameters from ./tf_models/Pendulum.ckpt

Enjoy trained agent

In [49]:
# In Jupyter, press squre '■' in top menu to quit animation
try: evaluate(env, model, frames=float('inf'), eps=0.0, render=True)
except KeyboardInterrupt: pass
finally: env.close()

Result

Expected agent behaviour after 25000 iterations

If you enable plotting, then output after training should be in roughly as follows

wall: 08:13:49   ep: 250   tstep:   99   total tstep:  25000   eps: 0.100   reward: -227.530

Where:

  • Q_Max - is a plot of q-function max over possible actions
  • Trajectory - is top-down view of agent trajectory. Agent starts roughly in the centre and exits to the right. Colour indicates action, see policy plot
  • Q Values - is a plot of average q-values for set or randomly selected states - this shows how q-values develop during training
  • Policy - is a top-down plot of agent policy. Green is accelerate right, red is accelerate left, blue is neutral
  • Memory Buffer - is a snapshot of memory buffer, same format as Trajectory plot
  • Episode Rewards - individual rewards acquired (blue dots) and running average (yellow) over the training period

Cartpole

Everybody loves cartpole!

Notes:

  • this environment no longer has 2d state space so we can't plot it all :( - as a result plots look differently

This environment has peculiar dynamic where positive reward, along with non-corrected maximisation bias in Q-Function (we don't use Dual-DQN) causes runaway Q-Value effect where Q-Values increase continuously. This is ok for now, because RELATIVE Q-Values are still good enough to select correct action. I think using target network along with Dual-DQN would help with this problem.

In [51]:
def experiment_cartpole():
    neural_net = TFNeuralNet(nb_in=4, nb_hid_1=64, nb_hid_2=64, nb_out=2, lr=0.00025)
    
    model = TFFunctApprox(neural_net,
                          st_low=np.array([-.2, -1.0, -0.15, -1.0]),
                          st_high=np.array([.2, 1.0, 0.15, 1.0]),
                          rew_mean=0,
                          rew_std=1,
                          nb_actions=env.action_space.n)
    
    mem = Memory(max_len=100000, state_shape=(4,), state_dtype=float)
    mem_fill(env, mem, steps=10000)
    test_states, _, _, _, _, _ = mem.get_batch(10)
    
    trace = Trace(eval_every=1000, test_states=test_states)
    
    return trace, model, mem

Setup the experiment

In [52]:
env = env = gym.make('CartPole-v0')
trace, model, mem = experiment_cartpole()
# trace.enable_plotting = True

Train the agent

In [53]:
tts = q_learning(env, frames=25000, gamma=.99,
                 eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
                 model=model, mem=mem, callback=callback, trace=trace)
wall: 10:14:17   ep:   0   tstep:    0   total tstep:      0   eps: 1.000   reward: None   
wall: 10:14:21   ep:  47   tstep:    1   total tstep:   1000   eps: 0.955   reward: 9.0   
wall: 10:14:26   ep:  90   tstep:   10   total tstep:   2000   eps: 0.910   reward: 47.0   
wall: 10:14:31   ep: 131   tstep:    5   total tstep:   3000   eps: 0.865   reward: 27.0   
wall: 10:14:36   ep: 156   tstep:   68   total tstep:   4000   eps: 0.820   reward: 29.0   
wall: 10:14:42   ep: 181   tstep:    5   total tstep:   5000   eps: 0.775   reward: 41.0   
wall: 10:14:47   ep: 205   tstep:   71   total tstep:   6000   eps: 0.730   reward: 16.0   
wall: 10:14:52   ep: 225   tstep:   50   total tstep:   7000   eps: 0.685   reward: 31.0   
wall: 10:14:57   ep: 241   tstep:  151   total tstep:   8000   eps: 0.640   reward: 10.0   
wall: 10:15:03   ep: 255   tstep:   93   total tstep:   9000   eps: 0.595   reward: 80.0   
wall: 10:15:08   ep: 267   tstep:   55   total tstep:  10000   eps: 0.550   reward: 134.0   
wall: 10:15:13   ep: 276   tstep:  144   total tstep:  11000   eps: 0.505   reward: 101.0   
wall: 10:15:18   ep: 285   tstep:   38   total tstep:  12000   eps: 0.460   reward: 194.0   
wall: 10:15:24   ep: 292   tstep:   74   total tstep:  13000   eps: 0.415   reward: 200.0   
wall: 10:15:29   ep: 298   tstep:   16   total tstep:  14000   eps: 0.370   reward: 200.0   
wall: 10:15:34   ep: 304   tstep:   68   total tstep:  15000   eps: 0.325   reward: 143.0   
wall: 10:15:40   ep: 309   tstep:   68   total tstep:  16000   eps: 0.280   reward: 200.0   
wall: 10:15:45   ep: 314   tstep:  125   total tstep:  17000   eps: 0.235   reward: 200.0   
wall: 10:15:51   ep: 319   tstep:  125   total tstep:  18000   eps: 0.190   reward: 200.0   
wall: 10:15:56   ep: 324   tstep:  125   total tstep:  19000   eps: 0.145   reward: 200.0   
wall: 10:16:02   ep: 329   tstep:  125   total tstep:  20000   eps: 0.100   reward: 200.0   
wall: 10:16:07   ep: 334   tstep:  125   total tstep:  21000   eps: 0.100   reward: 200.0   
wall: 10:16:13   ep: 339   tstep:  125   total tstep:  22000   eps: 0.100   reward: 200.0   
wall: 10:16:19   ep: 344   tstep:  125   total tstep:  23000   eps: 0.100   reward: 200.0   
wall: 10:16:24   ep: 349   tstep:  125   total tstep:  24000   eps: 0.100   reward: 200.0   

Optional: train some more

In [54]:
# tts = q_learning(env, frames=5000, gamma=.99,
#                  eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
#                  model=model, mem=mem, start_step=tts, callback=callback, trace=trace)

Optional: plot the agent state

In [55]:
# helpers.plot_all(env, model, mem, trace, print_=True)

Save weights

In [56]:
model._model.save('./tf_models/CartPole.ckpt')

Load weights

In [57]:
model._model.load('./tf_models/CartPole.ckpt')
INFO:tensorflow:Restoring parameters from ./tf_models/CartPole.ckpt

Enjoy trained agent

In [58]:
# In Jupyter, press squre '■' in top menu to quit animation
try: evaluate(env, model, frames=float('inf'), eps=0.0, render=True)
except KeyboardInterrupt: pass
finally: env.close()

Result

Expected agent behaviour after 25000 iterations

If you enable plotting, then output after training should be in roughly as follows

wall: 10:16:24   ep: 349   tstep:  125   total tstep:  24000   eps: 0.100   reward: 200.0

Where:

  • Q Values - is a plot of average q-values for set of states - this shows how q-values develop during training
  • Episode Rewards - individual rewards acquired (blue dots) and running average (yellow) over the training period
  • Trajectory - is set of state variables over time within one or more episodes

Acrobot

This is a variation of pendulum environment

In [60]:
def experiment_acrobot():
    neural_net = TFNeuralNet(nb_in=6, nb_hid_1=64, nb_hid_2=64, nb_out=3, lr=0.00025)
    
    model = TFFunctApprox(neural_net,
                          env.observation_space.low,
                          env.observation_space.high,
                          rew_mean=-70,
                          rew_std=10,
                          nb_actions=env.action_space.n)
    
    mem = Memory(max_len=100000, state_shape=(6,), state_dtype=float)
    mem_fill(env, mem, steps=10000)
    test_states, _, _, _, _, _ = mem.get_batch(10)
    
    trace = Trace(eval_every=1000, test_states=test_states)
    
    return trace, model, mem

Setup the experiment

In [61]:
env = env = gym.make('Acrobot-v1')
trace, model, mem = experiment_acrobot()
# trace.enable_plotting = True
WARN: gym.spaces.Box autodetected dtype as <class 'numpy.float32'>. Please provide explicit dtype.

Train the agent

In [62]:
tts = q_learning(env, frames=25000, gamma=.99,
                 eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
                 model=model, mem=mem, callback=callback, trace=trace)
wall: 10:18:58   ep:   0   tstep:    0   total tstep:      0   eps: 1.000   reward: None   
wall: 10:19:03   ep:   2   tstep:    0   total tstep:   1000   eps: 0.955   reward: -500.0   
wall: 10:19:09   ep:   4   tstep:    0   total tstep:   2000   eps: 0.910   reward: -500.0   
wall: 10:19:15   ep:   6   tstep:    0   total tstep:   3000   eps: 0.865   reward: -500.0   
wall: 10:19:20   ep:   8   tstep:    0   total tstep:   4000   eps: 0.820   reward: -500.0   
wall: 10:19:26   ep:  10   tstep:  270   total tstep:   5000   eps: 0.775   reward: -287.0   
wall: 10:19:32   ep:  13   tstep:   25   total tstep:   6000   eps: 0.730   reward: -354.0   
wall: 10:19:38   ep:  16   tstep:   56   total tstep:   7000   eps: 0.685   reward: -337.0   
wall: 10:19:44   ep:  20   tstep:   61   total tstep:   8000   eps: 0.640   reward: -286.0   
wall: 10:19:50   ep:  25   tstep:   54   total tstep:   9000   eps: 0.595   reward: -241.0   
wall: 10:19:55   ep:  30   tstep:  100   total tstep:  10000   eps: 0.550   reward: -163.0   
wall: 10:20:01   ep:  36   tstep:   88   total tstep:  11000   eps: 0.505   reward: -118.0   
wall: 10:20:07   ep:  42   tstep:  145   total tstep:  12000   eps: 0.460   reward: -175.0   
wall: 10:20:13   ep:  49   tstep:  106   total tstep:  13000   eps: 0.415   reward: -105.0   
wall: 10:20:19   ep:  56   tstep:  116   total tstep:  14000   eps: 0.370   reward: -133.0   
wall: 10:20:26   ep:  64   tstep:   76   total tstep:  15000   eps: 0.325   reward: -141.0   
wall: 10:20:32   ep:  70   tstep:   30   total tstep:  16000   eps: 0.280   reward: -147.0   
wall: 10:20:38   ep:  77   tstep:   46   total tstep:  17000   eps: 0.235   reward: -120.0   
wall: 10:20:44   ep:  84   tstep:  141   total tstep:  18000   eps: 0.190   reward: -142.0   
wall: 10:20:51   ep:  93   tstep:   74   total tstep:  19000   eps: 0.145   reward: -100.0   
wall: 10:20:57   ep: 103   tstep:   80   total tstep:  20000   eps: 0.100   reward: -87.0   
wall: 10:21:03   ep: 114   tstep:   44   total tstep:  21000   eps: 0.100   reward: -88.0   
wall: 10:21:10   ep: 126   tstep:   59   total tstep:  22000   eps: 0.100   reward: -83.0   
wall: 10:21:16   ep: 133   tstep:  153   total tstep:  23000   eps: 0.100   reward: -153.0   
wall: 10:21:23   ep: 142   tstep:   82   total tstep:  24000   eps: 0.100   reward: -105.0   

Optional: train some more

In [63]:
# tts = q_learning(env, frames=5000, gamma=.99,
#                  eps_decay_steps=20000, eps_target=0.1, batch_size=4096,
#                  model=model, mem=mem, start_step=tts, callback=callback, trace=trace)

Optional: plot the agent state

In [64]:
# helpers.plot_all(env, model, mem, trace, print_=True)

Save weights

In [65]:
model._model.save('./tf_models/Acrobot.ckpt')

Load weights

In [66]:
model._model.load('./tf_models/Acrobot.ckpt')
INFO:tensorflow:Restoring parameters from ./tf_models/Acrobot.ckpt

Enjoy trained agent

In [67]:
# In Jupyter, press squre '■' in top menu to quit animation
try: evaluate(env, model, frames=float('inf'), eps=0.0, render=True)
except KeyboardInterrupt: pass
finally: env.close()

Result

Expected agent behaviour after 25000 iterations

If you enable plotting, then output after training should be in roughly as follows

wall: 09:23:11   ep: 332   tstep:   31   total tstep:  24000   eps: 0.100   reward: -90.0

Where:

  • Q Values - is a plot of average q-values for set of states - this shows how q-values develop during training
  • Episode Rewards - individual rewards acquired (blue dots) and running average (yellow) over the training period
  • Trajectory - is set of state variables over time within one or more episodes

Lunar Lander

In [68]:
def experiment_lunarlander():
    neural_net = TFNeuralNet(nb_in=8, nb_hid_1=64, nb_hid_2=64, nb_out=4, lr=0.00025)
    
    model = TFFunctApprox(neural_net,
                          st_low=np.array([-1., -1., -1., -1., -1., -1., -1., -1.]),
                          st_high=np.array([ 1.,  1.,  1.,  1.,  1.,  1.,  1.,  1.]),
                          rew_mean=0,
                          rew_std=1,
                          nb_actions=env.action_space.n)
    
    mem = Memory(max_len=100000, state_shape=(8,), state_dtype=float)
    mem_fill(env, mem, steps=10000)
    
    trace = Trace(eval_every=1000,
                  test_states=np.array([[0, 1.4, 0, 0, 0, 0, 0, 0],     # init
                                        [0, 0.7, 0, 0, 0, 0, 0, 0],     # half way
                                        [0, 0.0, 0, 0, 0, 0, 0, 0],]),  # landing pad
                  state_labels=['Pos.x', 'Pos.y', 'Vel.x', 'Vel.y',
                                'Angle', 'Ang. Vel', 'Left Leg', 'Right Leg'])
    
    return trace, model, mem

Setup the experiment

In [69]:
env = gym.make('LunarLander-v2')
#env = WrapFrameSkip(env, frameskip=4)
trace, model, mem = experiment_lunarlander()
# trace.enable_plotting = True

Train the agent

In [70]:
tts = q_learning(env, frames=200000, gamma=.99,
                 eps_decay_steps=50000, eps_target=0.1, batch_size=4096,
                 model=model, mem=mem, callback=callback, trace=trace)
wall: 10:22:41   ep:   0   tstep:    0   total tstep:      0   eps: 1.000   reward: None   
wall: 10:22:46   ep:  10   tstep:   20   total tstep:   1000   eps: 0.982   reward: -115.29   
wall: 10:22:52   ep:  21   tstep:   31   total tstep:   2000   eps: 0.964   reward: -271.343   
wall: 10:22:58   ep:  31   tstep:   33   total tstep:   3000   eps: 0.946   reward: -218.339   
wall: 10:23:03   ep:  40   tstep:  101   total tstep:   4000   eps: 0.928   reward: -206.543   
wall: 10:23:09   ep:  52   tstep:   18   total tstep:   5000   eps: 0.910   reward: -112.62   
wall: 10:23:15   ep:  63   tstep:   30   total tstep:   6000   eps: 0.892   reward: -101.06   
wall: 10:23:21   ep:  74   tstep:   13   total tstep:   7000   eps: 0.874   reward: -188.096   
wall: 10:23:27   ep:  83   tstep:   27   total tstep:   8000   eps: 0.856   reward: -126.304   
wall: 10:23:33   ep:  93   tstep:   92   total tstep:   9000   eps: 0.838   reward: -106.536   
wall: 10:23:39   ep: 104   tstep:   19   total tstep:  10000   eps: 0.820   reward: -73.858   
wall: 10:23:45   ep: 113   tstep:   33   total tstep:  11000   eps: 0.802   reward: -295.832   
wall: 10:23:51   ep: 124   tstep:   81   total tstep:  12000   eps: 0.784   reward: -77.515   
wall: 10:23:57   ep: 133   tstep:  111   total tstep:  13000   eps: 0.766   reward: -132.166   
wall: 10:24:03   ep: 142   tstep:   16   total tstep:  14000   eps: 0.748   reward: -69.937   
wall: 10:24:10   ep: 146   tstep:  650   total tstep:  15000   eps: 0.730   reward: -46.965   
wall: 10:24:16   ep: 153   tstep:   35   total tstep:  16000   eps: 0.712   reward: -81.782   
wall: 10:24:23   ep: 160   tstep:  137   total tstep:  17000   eps: 0.694   reward: -230.064   
wall: 10:24:29   ep: 168   tstep:   45   total tstep:  18000   eps: 0.676   reward: -25.934   
wall: 10:24:35   ep: 177   tstep:   47   total tstep:  19000   eps: 0.658   reward: -77.265   
wall: 10:24:41   ep: 186   tstep:   36   total tstep:  20000   eps: 0.640   reward: -78.109   
wall: 10:24:48   ep: 193   tstep:  130   total tstep:  21000   eps: 0.622   reward: -22.37   
wall: 10:24:54   ep: 201   tstep:  135   total tstep:  22000   eps: 0.604   reward: -0.823   
wall: 10:25:00   ep: 210   tstep:    8   total tstep:  23000   eps: 0.586   reward: -39.496   
wall: 10:25:07   ep: 214   tstep:  577   total tstep:  24000   eps: 0.568   reward: -26.854   
wall: 10:25:14   ep: 216   tstep:  706   total tstep:  25000   eps: 0.550   reward: -34.765   
wall: 10:25:22   ep: 217   tstep:  706   total tstep:  26000   eps: 0.532   reward: 111.061   
wall: 10:25:29   ep: 222   tstep:  154   total tstep:  27000   eps: 0.514   reward: -107.679   
wall: 10:25:36   ep: 226   tstep:  131   total tstep:  28000   eps: 0.496   reward: -298.987   
wall: 10:25:43   ep: 230   tstep:  218   total tstep:  29000   eps: 0.478   reward: -209.019   
wall: 10:25:49   ep: 235   tstep:   75   total tstep:  30000   eps: 0.460   reward: -169.042   
wall: 10:25:56   ep: 239   tstep:   24   total tstep:  31000   eps: 0.442   reward: -161.023   
wall: 10:26:03   ep: 242   tstep:  379   total tstep:  32000   eps: 0.424   reward: -45.139   
wall: 10:26:11   ep: 244   tstep:  333   total tstep:  33000   eps: 0.406   reward: -30.302   
wall: 10:26:19   ep: 245   tstep:  996   total tstep:  34000   eps: 0.388   reward: -182.147   
wall: 10:26:27   ep: 246   tstep:  996   total tstep:  35000   eps: 0.370   reward: -2.856   
wall: 10:26:34   ep: 250   tstep:  187   total tstep:  36000   eps: 0.352   reward: -3.681   
wall: 10:26:41   ep: 251   tstep:  496   total tstep:  37000   eps: 0.334   reward: -287.423   
wall: 10:26:50   ep: 252   tstep:  496   total tstep:  38000   eps: 0.316   reward: -25.345   
wall: 10:26:58   ep: 253   tstep:  496   total tstep:  39000   eps: 0.298   reward: 5.965   
wall: 10:27:05   ep: 255   tstep:  325   total tstep:  40000   eps: 0.280   reward: -91.694   
wall: 10:27:14   ep: 256   tstep:  325   total tstep:  41000   eps: 0.262   reward: -15.338   
wall: 10:27:21   ep: 257   tstep:  325   total tstep:  42000   eps: 0.244   reward: 4.851   
wall: 10:27:30   ep: 258   tstep:  325   total tstep:  43000   eps: 0.226   reward: 44.291   
wall: 10:27:37   ep: 259   tstep:  325   total tstep:  44000   eps: 0.208   reward: 144.273   
wall: 10:27:46   ep: 260   tstep:  325   total tstep:  45000   eps: 0.190   reward: 94.646   
wall: 10:27:54   ep: 261   tstep:  325   total tstep:  46000   eps: 0.172   reward: 28.112   
wall: 10:28:02   ep: 262   tstep:  325   total tstep:  47000   eps: 0.154   reward: 62.216   
wall: 10:28:10   ep: 263   tstep:  399   total tstep:  48000   eps: 0.136   reward: -167.148   
wall: 10:28:18   ep: 264   tstep:  740   total tstep:  49000   eps: 0.118   reward: 216.762   
wall: 10:28:27   ep: 265   tstep:  740   total tstep:  50000   eps: 0.100   reward: 116.882   
wall: 10:28:35   ep: 266   tstep:  740   total tstep:  51000   eps: 0.100   reward: 110.934   
wall: 10:28:43   ep: 267   tstep:  740   total tstep:  52000   eps: 0.100   reward: 37.194   
wall: 10:28:52   ep: 268   tstep:  740   total tstep:  53000   eps: 0.100   reward: -26.579   
wall: 10:29:00   ep: 269   tstep:  740   total tstep:  54000   eps: 0.100   reward: 45.556   
wall: 10:29:09   ep: 270   tstep:  740   total tstep:  55000   eps: 0.100   reward: 38.381   
wall: 10:29:17   ep: 271   tstep:  740   total tstep:  56000   eps: 0.100   reward: -55.859   
wall: 10:29:25   ep: 272   tstep:  740   total tstep:  57000   eps: 0.100   reward: -37.105   
wall: 10:29:33   ep: 274   tstep:  232   total tstep:  58000   eps: 0.100   reward: 255.776   
wall: 10:29:42   ep: 275   tstep:  232   total tstep:  59000   eps: 0.100   reward: -13.596   
wall: 10:29:50   ep: 276   tstep:  232   total tstep:  60000   eps: 0.100   reward: 21.605   
wall: 10:29:59   ep: 277   tstep:  956   total tstep:  61000   eps: 0.100   reward: -178.159   
wall: 10:30:08   ep: 278   tstep:  956   total tstep:  62000   eps: 0.100   reward: -20.156   
wall: 10:30:17   ep: 279   tstep:  956   total tstep:  63000   eps: 0.100   reward: 35.256   
wall: 10:30:26   ep: 280   tstep:  956   total tstep:  64000   eps: 0.100   reward: 24.137   
wall: 10:30:34   ep: 281   tstep:  956   total tstep:  65000   eps: 0.100   reward: 36.587   
wall: 10:30:42   ep: 282   tstep:  956   total tstep:  66000   eps: 0.100   reward: 45.871   
wall: 10:30:49   ep: 284   tstep:  392   total tstep:  67000   eps: 0.100   reward: 215.709   
wall: 10:30:58   ep: 285   tstep:  392   total tstep:  68000   eps: 0.100   reward: 90.141   
wall: 10:31:06   ep: 286   tstep:  931   total tstep:  69000   eps: 0.100   reward: 160.203   
wall: 10:31:14   ep: 287   tstep:  931   total tstep:  70000   eps: 0.100   reward: 81.186   
wall: 10:31:22   ep: 289   tstep:    4   total tstep:  71000   eps: 0.100   reward: 209.053   
wall: 10:31:31   ep: 290   tstep:    4   total tstep:  72000   eps: 0.100   reward: 18.395   
wall: 10:31:40   ep: 291   tstep:    4   total tstep:  73000   eps: 0.100   reward: 73.769   
wall: 10:31:48   ep: 292   tstep:    4   total tstep:  74000   eps: 0.100   reward: 118.818   
wall: 10:31:56   ep: 293   tstep:    4   total tstep:  75000   eps: 0.100   reward: 79.98   
wall: 10:32:04   ep: 294   tstep:  667   total tstep:  76000   eps: 0.100   reward: 260.134   
wall: 10:32:12   ep: 295   tstep:  667   total tstep:  77000   eps: 0.100   reward: 81.324   
wall: 10:32:20   ep: 296   tstep:  667   total tstep:  78000   eps: 0.100   reward: 121.995   
wall: 10:32:29   ep: 297   tstep:  667   total tstep:  79000   eps: 0.100   reward: 42.91   
wall: 10:32:37   ep: 298   tstep:  667   total tstep:  80000   eps: 0.100   reward: 108.989   
wall: 10:32:46   ep: 299   tstep:  667   total tstep:  81000   eps: 0.100   reward: 132.24   
wall: 10:32:54   ep: 300   tstep:  667   total tstep:  82000   eps: 0.100   reward: 56.982   
wall: 10:33:02   ep: 301   tstep:  667   total tstep:  83000   eps: 0.100   reward: 96.389   
wall: 10:33:10   ep: 302   tstep:  667   total tstep:  84000   eps: 0.100   reward: 70.521   
wall: 10:33:19   ep: 303   tstep:  667   total tstep:  85000   eps: 0.100   reward: 86.212   
wall: 10:33:27   ep: 304   tstep:  667   total tstep:  86000   eps: 0.100   reward: 107.187   
wall: 10:33:35   ep: 305   tstep:  667   total tstep:  87000   eps: 0.100   reward: 93.593   
wall: 10:33:44   ep: 306   tstep:  667   total tstep:  88000   eps: 0.100   reward: 72.983   
wall: 10:33:52   ep: 307   tstep:  667   total tstep:  89000   eps: 0.100   reward: 94.76   
wall: 10:34:00   ep: 308   tstep:  667   total tstep:  90000   eps: 0.100   reward: 39.705   
wall: 10:34:09   ep: 309   tstep:  938   total tstep:  91000   eps: 0.100   reward: 131.125   
wall: 10:34:16   ep: 311   tstep:  544   total tstep:  92000   eps: 0.100   reward: -50.488   
wall: 10:34:24   ep: 312   tstep:  544   total tstep:  93000   eps: 0.100   reward: -13.787   
wall: 10:34:32   ep: 313   tstep:  544   total tstep:  94000   eps: 0.100   reward: 120.073   
wall: 10:34:41   ep: 314   tstep:  544   total tstep:  95000   eps: 0.100   reward: 91.413   
wall: 10:34:48   ep: 315   tstep:  544   total tstep:  96000   eps: 0.100   reward: 112.728   
wall: 10:34:57   ep: 316   tstep:  786   total tstep:  97000   eps: 0.100   reward: 193.951   
wall: 10:35:05   ep: 317   tstep:  786   total tstep:  98000   eps: 0.100   reward: 36.57   
wall: 10:35:13   ep: 318   tstep:  924   total tstep:  99000   eps: 0.100   reward: 153.127   
wall: 10:35:21   ep: 319   tstep:  924   total tstep: 100000   eps: 0.100   reward: 80.755   
wall: 10:35:29   ep: 321   tstep:  506   total tstep: 101000   eps: 0.100   reward: 15.686   
wall: 10:35:36   ep: 322   tstep:  506   total tstep: 102000   eps: 0.100   reward: 162.326   
wall: 10:35:45   ep: 323   tstep:  602   total tstep: 103000   eps: 0.100   reward: 194.451   
wall: 10:35:52   ep: 325   tstep:   44   total tstep: 104000   eps: 0.100   reward: 208.562   
wall: 10:36:00   ep: 326   tstep:   44   total tstep: 105000   eps: 0.100   reward: 88.54   
wall: 10:36:08   ep: 327   tstep:   44   total tstep: 106000   eps: 0.100   reward: 127.951   
wall: 10:36:15   ep: 328   tstep:  641   total tstep: 107000   eps: 0.100   reward: 192.668   
wall: 10:36:23   ep: 329   tstep:  641   total tstep: 108000   eps: 0.100   reward: 48.12   
wall: 10:36:31   ep: 331   tstep:   99   total tstep: 109000   eps: 0.100   reward: 237.257   
wall: 10:36:39   ep: 332   tstep:   99   total tstep: 110000   eps: 0.100   reward: -10.114   
wall: 10:36:48   ep: 333   tstep:   99   total tstep: 111000   eps: 0.100   reward: 48.975   
wall: 10:36:56   ep: 335   tstep:   52   total tstep: 112000   eps: 0.100   reward: -304.066   
wall: 10:37:03   ep: 336   tstep:  616   total tstep: 113000   eps: 0.100   reward: 220.41   
wall: 10:37:11   ep: 337   tstep:  917   total tstep: 114000   eps: 0.100   reward: 147.43   
wall: 10:37:19   ep: 338   tstep:  971   total tstep: 115000   eps: 0.100   reward: 188.87   
wall: 10:37:26   ep: 339   tstep:  971   total tstep: 116000   eps: 0.100   reward: 35.325   
wall: 10:37:33   ep: 341   tstep:  460   total tstep: 117000   eps: 0.100   reward: 133.817   
wall: 10:37:41   ep: 342   tstep:  460   total tstep: 118000   eps: 0.100   reward: 74.843   
wall: 10:37:49   ep: 344   tstep:  616   total tstep: 119000   eps: 0.100   reward: 2.771   
wall: 10:37:56   ep: 346   tstep:   53   total tstep: 120000   eps: 0.100   reward: 150.66   
wall: 10:38:03   ep: 348   tstep:  569   total tstep: 121000   eps: 0.100   reward: -60.741   
wall: 10:38:11   ep: 350   tstep:  164   total tstep: 122000   eps: 0.100   reward: 222.941   
wall: 10:38:18   ep: 352   tstep:  383   total tstep: 123000   eps: 0.100   reward: 291.767   
wall: 10:38:26   ep: 354   tstep:   26   total tstep: 124000   eps: 0.100   reward: 205.84   
wall: 10:38:34   ep: 355   tstep:  388   total tstep: 125000   eps: 0.100   reward: 205.104   
wall: 10:38:42   ep: 357   tstep:  290   total tstep: 126000   eps: 0.100   reward: 265.188   
wall: 10:38:49   ep: 359   tstep:  325   total tstep: 127000   eps: 0.100   reward: 246.072   
wall: 10:38:56   ep: 362   tstep:  228   total tstep: 128000   eps: 0.100   reward: 261.931   
wall: 10:39:04   ep: 364   tstep:  212   total tstep: 129000   eps: 0.100   reward: 224.424   
wall: 10:39:11   ep: 365   tstep:  750   total tstep: 130000   eps: 0.100   reward: 241.802   
wall: 10:39:19   ep: 367   tstep:  325   total tstep: 131000   eps: 0.100   reward: 171.074   
wall: 10:39:26   ep: 369   tstep:   55   total tstep: 132000   eps: 0.100   reward: 12.651   
wall: 10:39:33   ep: 370   tstep:  874   total tstep: 133000   eps: 0.100   reward: -53.672   
wall: 10:39:41   ep: 372   tstep:  586   total tstep: 134000   eps: 0.100   reward: 262.182   
wall: 10:39:48   ep: 375   tstep:   82   total tstep: 135000   eps: 0.100   reward: 257.8   
wall: 10:39:56   ep: 378   tstep:   11   total tstep: 136000   eps: 0.100   reward: -228.043   
wall: 10:40:03   ep: 380   tstep:  284   total tstep: 137000   eps: 0.100   reward: 225.393   
wall: 10:40:10   ep: 383   tstep:  122   total tstep: 138000   eps: 0.100   reward: 17.938   
wall: 10:40:18   ep: 384   tstep:  122   total tstep: 139000   eps: 0.100   reward: 81.568   
wall: 10:40:25   ep: 389   tstep:   38   total tstep: 140000   eps: 0.100   reward: 295.639   
wall: 10:40:33   ep: 390   tstep:   38   total tstep: 141000   eps: 0.100   reward: 101.856   
wall: 10:40:40   ep: 394   tstep:  152   total tstep: 142000   eps: 0.100   reward: -29.112   
wall: 10:40:47   ep: 395   tstep:  152   total tstep: 143000   eps: 0.100   reward: 29.062   
wall: 10:40:55   ep: 396   tstep:  362   total tstep: 144000   eps: 0.100   reward: 211.48   
wall: 10:41:03   ep: 398   tstep:  291   total tstep: 145000   eps: 0.100   reward: -194.895   
wall: 10:41:10   ep: 401   tstep:  117   total tstep: 146000   eps: 0.100   reward: -204.992   
wall: 10:41:18   ep: 402   tstep:  117   total tstep: 147000   eps: 0.100   reward: 90.494   
wall: 10:41:25   ep: 405   tstep:   73   total tstep: 148000   eps: 0.100   reward: 162.904   
wall: 10:41:32   ep: 406   tstep:  714   total tstep: 149000   eps: 0.100   reward: 181.402   
wall: 10:41:40   ep: 408   tstep:  310   total tstep: 150000   eps: 0.100   reward: 260.454   
wall: 10:41:47   ep: 411   tstep:   48   total tstep: 151000   eps: 0.100   reward: 268.048   
wall: 10:41:54   ep: 412   tstep:  572   total tstep: 152000   eps: 0.100   reward: 217.864   
wall: 10:42:02   ep: 415   tstep:  269   total tstep: 153000   eps: 0.100   reward: 259.907   
wall: 10:42:09   ep: 419   tstep:   71   total tstep: 154000   eps: 0.100   reward: -105.182   
wall: 10:42:16   ep: 421   tstep:  122   total tstep: 155000   eps: 0.100   reward: 233.517   
wall: 10:42:24   ep: 423   tstep:   25   total tstep: 156000   eps: 0.100   reward: -311.613   
wall: 10:42:32   ep: 424   tstep:  445   total tstep: 157000   eps: 0.100   reward: 234.478   
wall: 10:42:39   ep: 427   tstep:   39   total tstep: 158000   eps: 0.100   reward: 226.218   
wall: 10:42:47   ep: 428   tstep:  107   total tstep: 159000   eps: 0.100   reward: 242.43   
wall: 10:42:54   ep: 429   tstep:  522   total tstep: 160000   eps: 0.100   reward: 242.87   
wall: 10:43:01   ep: 430   tstep:  522   total tstep: 161000   eps: 0.100   reward: 146.522   
wall: 10:43:08   ep: 432   tstep:  753   total tstep: 162000   eps: 0.100   reward: 255.673   
wall: 10:43:16   ep: 434   tstep:  492   total tstep: 163000   eps: 0.100   reward: 262.537   
wall: 10:43:22   ep: 436   tstep:  289   total tstep: 164000   eps: 0.100   reward: 217.805   
wall: 10:43:29   ep: 439   tstep:  200   total tstep: 165000   eps: 0.100   reward: 235.555   
wall: 10:43:37   ep: 442   tstep:   30   total tstep: 166000   eps: 0.100   reward: 215.799   
wall: 10:43:44   ep: 444   tstep:  240   total tstep: 167000   eps: 0.100   reward: 274.606   
wall: 10:43:51   ep: 445   tstep:  240   total tstep: 168000   eps: 0.100   reward: 94.49   
wall: 10:43:59   ep: 446   tstep:  796   total tstep: 169000   eps: 0.100   reward: 234.705   
wall: 10:44:06   ep: 449   tstep:   60   total tstep: 170000   eps: 0.100   reward: 117.05   
wall: 10:44:13   ep: 451   tstep:  199   total tstep: 171000   eps: 0.100   reward: 249.601   
wall: 10:44:21   ep: 453   tstep:  144   total tstep: 172000   eps: 0.100   reward: 188.276   
wall: 10:44:29   ep: 454   tstep:  144   total tstep: 173000   eps: 0.100   reward: 38.014   
wall: 10:44:37   ep: 456   tstep:   73   total tstep: 174000   eps: 0.100   reward: 274.643   
wall: 10:44:44   ep: 457   tstep:  458   total tstep: 175000   eps: 0.100   reward: 235.782   
wall: 10:44:52   ep: 459   tstep:  178   total tstep: 176000   eps: 0.100   reward: 224.325   
wall: 10:45:00   ep: 460   tstep:  828   total tstep: 177000   eps: 0.100   reward: 206.592   
wall: 10:45:07   ep: 463   tstep:   20   total tstep: 178000   eps: 0.100   reward: 203.23   
wall: 10:45:14   ep: 465   tstep:  105   total tstep: 179000   eps: 0.100   reward: 242.451   
wall: 10:45:22   ep: 466   tstep:  557   total tstep: 180000   eps: 0.100   reward: 213.594   
wall: 10:45:29   ep: 468   tstep:  482   total tstep: 181000   eps: 0.100   reward: 260.273   
wall: 10:45:36   ep: 471   tstep:   36   total tstep: 182000   eps: 0.100   reward: 254.961   
wall: 10:45:44   ep: 473   tstep:  173   total tstep: 183000   eps: 0.100   reward: 237.072   
wall: 10:45:51   ep: 474   tstep:  302   total tstep: 184000   eps: 0.100   reward: 168.218   
wall: 10:45:59   ep: 477   tstep:    1   total tstep: 185000   eps: 0.100   reward: 254.415   
wall: 10:46:07   ep: 478   tstep:  205   total tstep: 186000   eps: 0.100   reward: 166.87   
wall: 10:46:14   ep: 480   tstep:   23   total tstep: 187000   eps: 0.100   reward: 219.167   
wall: 10:46:22   ep: 481   tstep:   23   total tstep: 188000   eps: 0.100   reward: 121.224   
wall: 10:46:29   ep: 482   tstep:  398   total tstep: 189000   eps: 0.100   reward: 233.602   
wall: 10:46:37   ep: 483   tstep:  398   total tstep: 190000   eps: 0.100   reward: 94.814   
wall: 10:46:45   ep: 485   tstep:  332   total tstep: 191000   eps: 0.100   reward: -101.423   
wall: 10:46:52   ep: 486   tstep:  332   total tstep: 192000   eps: 0.100   reward: 110.194   
wall: 10:47:00   ep: 487   tstep:  332   total tstep: 193000   eps: 0.100   reward: 50.283   
wall: 10:47:07   ep: 490   tstep:  115   total tstep: 194000   eps: 0.100   reward: 261.25   
wall: 10:47:14   ep: 491   tstep:  454   total tstep: 195000   eps: 0.100   reward: 229.827   
wall: 10:47:21   ep: 493   tstep:  501   total tstep: 196000   eps: 0.100   reward: 227.933   
wall: 10:47:29   ep: 494   tstep:  501   total tstep: 197000   eps: 0.100   reward: 130.779   
wall: 10:47:37   ep: 496   tstep:  149   total tstep: 198000   eps: 0.100   reward: 221.184   
wall: 10:47:44   ep: 498   tstep:   10   total tstep: 199000   eps: 0.100   reward: -65.76   

Optional: train some more

In [71]:
# tts = q_learning(env, frames=5000, gamma=.99,
#                  eps_decay_steps=50000, eps_target=0.1, batch_size=4096,
#                  model=model, mem=mem, start_step=tts, callback=callback, trace=trace)

Optional: plot the agent state

In [72]:
# helpers.plot_all(env, model, mem, trace, print_=True)

Save weights

In [73]:
model._model.save('./tf_models/LunarLander.ckpt')

Load weights

In [74]:
model._model.load('./tf_models/LunarLander.ckpt')
INFO:tensorflow:Restoring parameters from ./tf_models/LunarLander.ckpt

Enjoy trained agent

In [75]:
# In Jupyter, press squre '■' in top menu to quit animation
try: evaluate(env, model, frames=float('inf'), eps=0.0, render=True)
except KeyboardInterrupt: pass
finally: env.close()

Result

Expected agent behaviour after 25000 iterations

If you enable plotting, then output after training should be in roughly as follows


Where:

  • Q Values - is a plot of average q-values for set of states - this shows how q-values develop during training
  • Episode Rewards - individual rewards acquired (blue dots) and running average (yellow) over the training period
  • Trajectory - is set of state variables over time within one or more episodes

Render HTML5 Video

This section is for rendering the animations, ignore

In [ ]:
import time

import numpy as np
import matplotlib.pyplot as plt

from matplotlib import animation, rc
from IPython.display import HTML
In [ ]:
def frames_render(env, frames, episodes, eps, model, callback=None, trace=None, render=True, sleep=0):

    rendered_frames = []
    
    def policy(st, model, eps):
        if np.random.rand() > eps:
            stack = np.stack([st])  # convert lazyframe to nn input shape [1, 84, 84, 4]
            q_values = model.eval(stack)
            return np.argmax(q_values)
        else:
            return env.action_space.sample()
        
    total_reward = 0
    
    tts_ = 0                                 # total time step
    for e_ in itertools.count():             # count from 0 to infinity
        
        S = env.reset()
        
        if render:
            rendered_frames.append(env.render(mode='rgb_array'))
            #env.render()
            time.sleep(sleep)
        
        for t_ in itertools.count():         # count from 0 to infinity
            
            # print(e_, t_)
            
            A = policy(S, model, eps)
            
            S_, R, done, info = env.step(A)
            
            #total_reward += info['full-reward']
            
            if render:
                rendered_frames.append(env.render(mode='rgb_array'))
                #env.render()
                time.sleep(sleep)
            
            if callback is not None:
                callback(tts_, e_, t_, S, A, R, done, eps, model, None, trace)
    
            if done:
                break
                
            if frames is not None and tts_ >= frames:
                return rendered_frames, total_reward
                
            S = S_
                
            tts_ += 1
            
        if episodes is not None and e_ >= episodes-1:
            return rendered_frames, total_reward
In [ ]:
env.close()
In [ ]:
evaluate(env, frames=None, episodes=1, eps=0.05, model=model, render=True)
In [ ]:
rendered_frames, total_reward = frames_render(env, frames=None, episodes=1, eps=0.05, model=model, render=True)
In [ ]:
plt.ioff()

fig = plt.figure(figsize=(rendered_frames[0].shape[1] / 72.0,
                          rendered_frames[0].shape[0] / 72.0), dpi = 72)
ax = fig.add_subplot(111);

patch = ax.imshow(rendered_frames[0])

plt.tick_params(axis='both', which='both', bottom=False, top=False, left=False,
                right=False, labelbottom=False, labelleft=False)
In [ ]:
def animate(i):
    patch.set_data(rendered_frames[i])
In [ ]:
anim = animation.FuncAnimation(fig, animate, frames=len(rendered_frames), interval=20, repeat=True)
In [ ]:
HTML(anim.to_html5_video())

Then:

In [ ]:
 
In [ ]: