image.png

Introduction

This notebooks presents ConvNet in PyTorch used to solve Street View House Numbers task.

This is replication of Multi-digit Number Recognition from Street View Imagery using Deep Convolutional Neural Networks

Contents

Config

Download and extract SVHN dataset in Format 1 (train.tar.gz, test.tar.gz, extra.tar.gz)

In [1]:
dataset_location = '/home/marcin/Datasets/SVHN'  # .../train/1.png
model_save_location = './models'

Imports

In [2]:
import os
import time
import pickle
import pathlib
import datetime

import numpy as np
import matplotlib.pyplot as plt

import h5py  # required to open .mat files in SVHN dataset
In [3]:
import PIL
import PIL.Image
In [4]:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
import torchvision

Dataset

In [5]:
dataset_path = pathlib.Path(dataset_location)
assert os.path.isfile(dataset_path / 'extra/1.png')
assert os.path.isfile(dataset_path / 'train/1.png')
assert os.path.isfile(dataset_path / 'test/1.png')

Functions to read .mat files with labels and bounding boxes.

In [6]:
def read_name(f, index):
    """Decode string from HDF5 file."""
    assert isinstance(f, h5py.File)
    assert index == int(index)
    ref = f['/digitStruct/name'][index][0]
    return ''.join(chr(v[0]) for v in f[ref])
In [7]:
def read_digits_raw(f, index):
    """Decode digits and bounding boxes from HDF5 file."""
    assert isinstance(f, h5py.File)
    assert index == int(index)
    
    ref = f['/digitStruct/bbox'][index].item()
    ddd = {}
    for key in ['label', 'left', 'top', 'width', 'height']:
        dset = f[ref][key]
        if len(dset) == 1:
            ddd[key] = [ int(dset[0][0]) ]
        else:
            ddd[key] = []
            for i in range(len(dset)):
                ref2 = dset[i][0]
                ddd[key].append( int(f[ref2][0][0]) )
    return ddd
In [8]:
def get_label(ddict):
    """Convert raw digit info into len-5 label and single bounding box"""
    assert isinstance(ddict, dict)
    
    # construct proper label for NN training
    # image '210' -> [3, 2, 1, 10, 0, 0]
    #                 ^  ^  ^  ^   ^--^-- "0, 0" pad with '0' (no digit)
    #                 |  ---------------- "210" house number, 0 encoded as 10
    #                 ------------------- "3" is number of digits
    label = ddict['label'].copy()
    label = [len(label)] + label + [0]*(5-len(label))
    
    left = min(ddict['left'])
    top = min(ddict['top'])
    right = max(l+w for l, w in zip(ddict['left'], ddict['width']))
    bottom = max(t+h for t, h in zip(ddict['top'], ddict['height']))
    return tuple(label), (left, top, right, bottom)
In [9]:
def read_mat_file(filepath):
    """Open .mat file and read all the metadata."""
    assert isinstance(filepath, (str, pathlib.PosixPath))
    
    print(filepath)
    
    meta = {'names':[], 'labels':[], 'bboxes':[]}
    with h5py.File(filepath) as f:
        length = len(f['/digitStruct/name'])
        for i in range(10): # length):
            name = read_name(f, i)
            ddict = read_digits_raw(f, i)
            label, bbox = get_label(ddict)
            meta['names'].append(name)
            meta['labels'].append(label)
            meta['bboxes'].append(bbox)
            if i % 1000 == 0 or i == length-1:
                print(f'{i:6d} / {length}')
    return meta
In [10]:
def open_or_generate(name):
    """Either load .pkl, or if doesn't exit generate it and open."""
    assert name in ('extra', 'test', 'train')
    
    fname = name+'.pkl'
    if os.path.exists(dataset_path / fname):
        with open(dataset_path / fname, 'rb') as f:
            meta = pickle.load(f)
            print(f'Loaded:{fname}')
    else:
        print(f'Generating {fname}:')
        meta = read_mat_file(dataset_path / name / 'digitStruct.mat')
        with open(dataset_path / fname, 'wb') as f:
            pickle.dump(meta, f)
    
    return meta

Note: this may take around one hour to complete, but only on the first run.

In [11]:
# Convert label/bbox data to friendly format
extra_meta = open_or_generate('extra')
test_meta = open_or_generate('test')
train_meta = open_or_generate('train')

# Add folder information
extra_meta['folders'] = ['extra'] * len(extra_meta['names'])
test_meta['folders'] = ['test'] * len(test_meta['names'])
train_meta['folders'] = ['train'] * len(train_meta['names'])

# Add 'extra' to 'train' data
train_meta['names'].extend(extra_meta['names'])
train_meta['labels'].extend(extra_meta['labels'])
train_meta['bboxes'].extend(extra_meta['bboxes'])
train_meta['folders'].extend(extra_meta['folders'])
del extra_meta
Loaded:extra.pkl
Loaded:test.pkl
Loaded:train.pkl

Define dataset class

In [12]:
class SVHNDataset: #(torch.utils.data.Dataset):
    def __init__(self, dataset_path, metadata, transforms=None):
        assert isinstance(dataset_path, (str, pathlib.PosixPath))
        assert isinstance(metadata, dict)
        assert set(metadata.keys()) == {'bboxes','folders','labels','names'}
        assert len(metadata['names']) == len(metadata['labels'])
        assert len(metadata['names']) == len(metadata['bboxes'])
        assert len(metadata['names']) == len(metadata['folders'])
        assert transforms is None or \
               isinstance(transforms, torchvision.transforms.Compose)
        
        self.dataset_path = pathlib.PosixPath(dataset_path)
        self.metadata = metadata
        self.transforms = transforms
    
    def __len__(self):
        return len(self.metadata['names'])
    
    def __getitem__(self, index):
        image_name = self.metadata['names'][index]      # e.g. '1.png'
        image_folder = self.metadata['folders'][index]  # e.g. 'test'
        label = self.metadata['labels'][index]  # [1, 2, 10, 0, 0]
        bbox = self.metadata['bboxes'][index]   # [left, top, right, bottom]
        
        # Figure out crop box
        left, top, right, bottom = bbox
        width, height = right - left, bottom - top
        crop_left =   int(round(left   - .15*width))
        crop_top =    int(round(top    - .15*height))
        crop_right =  int(round(right  + .15*width))
        crop_bottom = int(round(bottom + .15*height))
        
        img = PIL.Image.open(self.dataset_path / image_folder / image_name)
        img2 = img.crop(box=(crop_left, crop_top, crop_right, crop_bottom))
        res = img2.resize((64, 64))
        if self.transforms is not None:
            res = self.transforms(res)
        
        return res, label

Create temporary dataset to play with

In [13]:
dataset = SVHNDataset(dataset_path, train_meta)
In [14]:
img, label = dataset[200000]
print(f'Label: {label}')
display(img)
Label: (3, 3, 3, 2, 0, 0)
In [15]:
del dataset

Create actual datasets for training

In [16]:
transforms_train = torchvision.transforms.Compose([
    torchvision.transforms.RandomCrop([54, 54]),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

transforms_valid = torchvision.transforms.Compose([
    torchvision.transforms.CenterCrop([54, 54]),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

dataset_train = SVHNDataset(dataset_path, train_meta, transforms_train)
dataset_valid = SVHNDataset(dataset_path, test_meta, transforms_valid)

And dataloaders as well

In [17]:
batch_size = 32

train_loader = torch.utils.data.DataLoader(
    dataset_train, batch_size=batch_size, shuffle=True,
    num_workers=6, pin_memory=True)
valid_loader = torch.utils.data.DataLoader(
    dataset_valid, batch_size=128, shuffle=False,
    num_workers=6, pin_memory=False)

Model

In [18]:
class SVHNModel(torch.nn.Module):
    
    def __init__(self):
        def _block(in_channels, out_channels, stride):
            """Helper to build CNN blocks."""
            return nn.Sequential(
                nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
                          kernel_size=5, padding=2),
                nn.BatchNorm2d(num_features=out_channels),
                nn.ReLU(),
                nn.MaxPool2d(kernel_size=2, stride=stride, padding=1),
                nn.Dropout(0.2)
            )
    
        super().__init__()
        
        self.block1 = _block(in_channels=  3, out_channels= 48, stride=2)
        self.block2 = _block(in_channels= 48, out_channels= 64, stride=1)
        self.block3 = _block(in_channels= 64, out_channels=128, stride=2)
        self.block4 = _block(in_channels=128, out_channels=160, stride=1)
        self.block5 = _block(in_channels=160, out_channels=192, stride=2)
        self.block6 = _block(in_channels=192, out_channels=192, stride=1)
        self.block7 = _block(in_channels=192, out_channels=192, stride=2)
        self.block8 = _block(in_channels=192, out_channels=192, stride=1)
        self.fc1 = nn.Sequential(nn.Linear(192 * 7 * 7, 3072), nn.ReLU())
        self.fc2 = nn.Sequential(nn.Linear(3072, 3072), nn.ReLU())
        
        self.length = nn.Sequential(nn.Linear(3072, 7))
        self.digit1 = nn.Sequential(nn.Linear(3072, 11))
        self.digit2 = nn.Sequential(nn.Linear(3072, 11))
        self.digit3 = nn.Sequential(nn.Linear(3072, 11))
        self.digit4 = nn.Sequential(nn.Linear(3072, 11))
        self.digit5 = nn.Sequential(nn.Linear(3072, 11))
    
    def forward(self, x):
        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)
        x = self.block8(x)
        x = x.view(x.size(0), 192*7*7)
        x = self.fc1(x)
        x = self.fc2(x)

        length = self.length(x)  #  logits!
        digit1 = self.digit1(x)
        digit2 = self.digit2(x)
        digit3 = self.digit3(x)
        digit4 = self.digit4(x)
        digit5 = self.digit5(x)
        
        return length, digit1, digit2, digit3, digit4, digit5    

Custom loss function

In [19]:
def custom_loss(logits, targets):
    """Custom loss function.
    
    Params:
        logits (list): with following members:
            logits[0] (torch.Tensor): length, shape [n_batch, 7], LOGITS!
            logits[1] (torch.Tensor): digit1, shape [n_batch, 11], LOGITS!
            ...
        targets (list): with members:
            targets[0] (torch.Tensor): length target, shape [n_batch]
            targets[1] (torch.Tensor): digit1 target, shape [n_batch]
            ...
    """
    assert len(logits) == len(targets)
    length_ce = F.cross_entropy(logits[0], targets[0])
    digit1_ce = F.cross_entropy(logits[1], targets[1])
    digit2_ce = F.cross_entropy(logits[2], targets[2])
    digit3_ce = F.cross_entropy(logits[3], targets[3])
    digit4_ce = F.cross_entropy(logits[4], targets[4])
    digit5_ce = F.cross_entropy(logits[5], targets[5])
    loss = length_ce + digit1_ce + digit2_ce + \
           digit3_ce + digit4_ce + digit5_ce
    assert list(loss.size()) == []
    return loss  # tensor!

Custom accuracy function

In [20]:
def custom_acc_sum(logits, targets):
    """Custom accuracy function.
    
    Params:
        same as custom_loss()
    """
    assert len(logits) == len(targets)
    
    length_predictions = logits[0].max(dim=1)[1]
    digit1_predictions = logits[1].max(dim=1)[1]
    digit2_predictions = logits[2].max(dim=1)[1]
    digit3_predictions = logits[3].max(dim=1)[1]
    digit4_predictions = logits[4].max(dim=1)[1]
    digit5_predictions = logits[5].max(dim=1)[1]
    
    accumulate = torch.ones_like(targets[0], dtype=torch.uint8)
    accumulate &= length_predictions.eq(targets[0])
    accumulate &= digit1_predictions.eq(targets[1])
    accumulate &= digit2_predictions.eq(targets[2])
    accumulate &= digit3_predictions.eq(targets[3])
    accumulate &= digit4_predictions.eq(targets[4])
    accumulate &= digit5_predictions.eq(targets[5])
    
    accuracy_sum = accumulate.sum()
    return accuracy_sum  # tensor!

Get PyTorch device

In [21]:
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

Instantiate the model

In [22]:
model = SVHNModel()
model.to(device)
optimizer = torch.optim.SGD(
    model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.0005)
scheduler = torch.optim.lr_scheduler.StepLR(
    optimizer, step_size=10000, gamma=0.9)

Evaluate function

In [23]:
def evaluate(model, data_loader):
    model.eval()
    accuracy_sum = 0
    with torch.no_grad():
        for images, targets in data_loader:
            images = images.to(device)
            targets = [t.to(device) for t in targets]
            
            logits = model(images)
            acc = custom_acc_sum(logits, targets)
            accuracy_sum += acc.item()
    accuracy = accuracy_sum / len(data_loader.dataset)
    return accuracy

Train loop

In [24]:
os.makedirs(model_save_location, exist_ok=True)
In [25]:
print_every = 100
eval_every = 1000

step = 0
best_accuracy = 0
time_start = time.time()

for epoch in range(3):
    
    print(f'Training Epoch {epoch}...')
    model.train()
    for images, targets in train_loader:
        images = images.to(device)
        targets = [t.to(device) for t in targets]
        
        model.train()
        optimizer.zero_grad()
        logits = model(images)
        loss = custom_loss(logits, targets)
        loss.backward()
        optimizer.step()
        scheduler.step()
        

        step += 1

        if step % print_every == 0:
            dtnow = datetime.datetime.now()
            lr = scheduler.get_lr()[0]
            dtime = time.time() - time_start
            img_per_sec = batch_size * print_every / dtime
            print(f'  step: {step:7d}   loss: {loss.item():.6f}   ' \
                  'lr: {lr:.6f}   i/s: {img_per_sec:7.2f}')
            time_start = time.time()
        
        if step % eval_every == 0:
            
            print('Evaluating...')
            accuracy = evaluate(model, valid_loader)
            print(f'  accuracy: {accuracy:.4f}')
            
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                model_path = pathlib.PosixPath(model_save_location)
                torch.save(model.state_dict(), model_path / f'model_{step}')
            
            print('Training...')
Training Epoch 0...
  step:     100   loss: 7.401640   lr: 0.010000   i/s:  854.09
  step:     200   loss: 7.190207   lr: 0.010000   i/s: 1030.37
  step:     300   loss: 6.371806   lr: 0.010000   i/s: 1066.78
  step:     400   loss: 6.779932   lr: 0.010000   i/s: 1061.85
  step:     500   loss: 7.264554   lr: 0.010000   i/s: 1069.39
  step:     600   loss: 6.446972   lr: 0.010000   i/s: 1072.06
  step:     700   loss: 6.449721   lr: 0.010000   i/s: 1022.47
  step:     800   loss: 6.281842   lr: 0.010000   i/s: 1046.71
  step:     900   loss: 5.938114   lr: 0.010000   i/s: 1032.28
  step:    1000   loss: 5.916947   lr: 0.010000   i/s:  991.43
Evaluating...
  accuracy: 0.0694
Training...
  step:    1100   loss: 5.644208   lr: 0.010000   i/s:  388.32
  step:    1200   loss: 5.459051   lr: 0.010000   i/s:  960.80
  step:    1300   loss: 4.829540   lr: 0.010000   i/s:  996.09
  step:    1400   loss: 5.078192   lr: 0.010000   i/s:  971.94
  step:    1500   loss: 5.588068   lr: 0.010000   i/s: 1049.77
  step:    1600   loss: 5.007460   lr: 0.010000   i/s: 1013.82
  step:    1700   loss: 4.824934   lr: 0.010000   i/s: 1040.74
  step:    1800   loss: 4.935504   lr: 0.010000   i/s: 1061.68
  step:    1900   loss: 4.612381   lr: 0.010000   i/s:  992.53
  step:    2000   loss: 3.305910   lr: 0.010000   i/s:  962.54
Evaluating...
  accuracy: 0.2704
Training...
  step:    2100   loss: 4.393053   lr: 0.010000   i/s:  422.83
  step:    2200   loss: 4.240945   lr: 0.010000   i/s: 1002.94
  step:    2300   loss: 2.937595   lr: 0.010000   i/s:  993.45
  step:    2400   loss: 3.259427   lr: 0.010000   i/s: 1029.44
  step:    2500   loss: 3.005823   lr: 0.010000   i/s:  998.62
  step:    2600   loss: 2.166938   lr: 0.010000   i/s:  961.51
  step:    2700   loss: 2.812975   lr: 0.010000   i/s:  987.19
  step:    2800   loss: 2.467856   lr: 0.010000   i/s:  978.88
  step:    2900   loss: 3.406239   lr: 0.010000   i/s:  983.28
  step:    3000   loss: 2.745082   lr: 0.010000   i/s:  949.98
Evaluating...
  accuracy: 0.6435
Training...
  step:    3100   loss: 1.782341   lr: 0.010000   i/s:  443.00
  step:    3200   loss: 1.559707   lr: 0.010000   i/s: 1037.79
  step:    3300   loss: 2.777167   lr: 0.010000   i/s: 1023.67
  step:    3400   loss: 1.807530   lr: 0.010000   i/s: 1000.98
  step:    3500   loss: 1.169201   lr: 0.010000   i/s: 1035.72
  step:    3600   loss: 1.313325   lr: 0.010000   i/s:  986.18
  step:    3700   loss: 1.078747   lr: 0.010000   i/s:  990.44
  step:    3800   loss: 0.775579   lr: 0.010000   i/s:  945.11
  step:    3900   loss: 1.446685   lr: 0.010000   i/s: 1004.49
  step:    4000   loss: 1.214472   lr: 0.010000   i/s:  984.77
Evaluating...
  accuracy: 0.7061
Training...
  step:    4100   loss: 1.145120   lr: 0.010000   i/s:  406.99
  step:    4200   loss: 1.026030   lr: 0.010000   i/s: 1044.03
  step:    4300   loss: 1.048183   lr: 0.010000   i/s: 1039.08
  step:    4400   loss: 1.248956   lr: 0.010000   i/s:  988.14
  step:    4500   loss: 1.577457   lr: 0.010000   i/s: 1046.55
  step:    4600   loss: 1.753427   lr: 0.010000   i/s: 1046.21
  step:    4700   loss: 2.271700   lr: 0.010000   i/s: 1035.67
  step:    4800   loss: 1.322761   lr: 0.010000   i/s:  996.96
  step:    4900   loss: 0.958066   lr: 0.010000   i/s: 1042.63
  step:    5000   loss: 2.174776   lr: 0.010000   i/s:  985.36
Evaluating...
  accuracy: 0.7358
Training...
  step:    5100   loss: 1.030763   lr: 0.010000   i/s:  406.58
  step:    5200   loss: 0.818460   lr: 0.010000   i/s:  964.99
  step:    5300   loss: 0.705028   lr: 0.010000   i/s: 1034.38
  step:    5400   loss: 0.547387   lr: 0.010000   i/s: 1012.53
  step:    5500   loss: 0.883970   lr: 0.010000   i/s:  975.82
  step:    5600   loss: 1.818056   lr: 0.010000   i/s: 1018.23
  step:    5700   loss: 1.106099   lr: 0.010000   i/s:  990.36
  step:    5800   loss: 0.550250   lr: 0.010000   i/s:  985.19
  step:    5900   loss: 1.230123   lr: 0.010000   i/s: 1037.23
  step:    6000   loss: 1.819298   lr: 0.010000   i/s: 1041.18
Evaluating...
  accuracy: 0.8320
Training...
  step:    6100   loss: 1.042244   lr: 0.010000   i/s:  426.67
  step:    6200   loss: 0.863782   lr: 0.010000   i/s: 1024.50
  step:    6300   loss: 0.883616   lr: 0.010000   i/s:  966.47
  step:    6400   loss: 0.973729   lr: 0.010000   i/s:  969.21
  step:    6500   loss: 1.370933   lr: 0.010000   i/s: 1012.88
  step:    6600   loss: 1.295136   lr: 0.010000   i/s:  980.15
  step:    6700   loss: 2.294033   lr: 0.010000   i/s: 1033.45
  step:    6800   loss: 0.824266   lr: 0.010000   i/s: 1042.88
  step:    6900   loss: 1.632266   lr: 0.010000   i/s: 1035.05
  step:    7000   loss: 1.054014   lr: 0.010000   i/s:  974.15
Evaluating...
  accuracy: 0.8242
Training...
  step:    7100   loss: 1.058752   lr: 0.010000   i/s:  456.05
  step:    7200   loss: 0.871401   lr: 0.010000   i/s:  989.67
  step:    7300   loss: 1.888838   lr: 0.010000   i/s: 1043.98
Training Epoch 1...
  step:    7400   loss: 0.443430   lr: 0.010000   i/s:  913.24
  step:    7500   loss: 1.590154   lr: 0.010000   i/s: 1021.41
  step:    7600   loss: 1.425861   lr: 0.010000   i/s:  975.07
  step:    7700   loss: 0.621780   lr: 0.010000   i/s: 1042.50
  step:    7800   loss: 1.464213   lr: 0.010000   i/s: 1009.43
  step:    7900   loss: 0.722460   lr: 0.010000   i/s:  985.64
  step:    8000   loss: 0.897502   lr: 0.010000   i/s:  987.09
Evaluating...
  accuracy: 0.8528
Training...
  step:    8100   loss: 0.753870   lr: 0.010000   i/s:  418.73
  step:    8200   loss: 0.759861   lr: 0.010000   i/s: 1035.73
  step:    8300   loss: 0.469332   lr: 0.010000   i/s:  990.57
  step:    8400   loss: 0.987330   lr: 0.010000   i/s: 1016.41
  step:    8500   loss: 0.934088   lr: 0.010000   i/s:  973.05
  step:    8600   loss: 0.747963   lr: 0.010000   i/s:  967.62
  step:    8700   loss: 0.971788   lr: 0.010000   i/s:  982.59
  step:    8800   loss: 0.389881   lr: 0.010000   i/s:  983.42
  step:    8900   loss: 0.948968   lr: 0.010000   i/s: 1028.58
  step:    9000   loss: 0.761076   lr: 0.010000   i/s: 1017.40
Evaluating...
  accuracy: 0.8613
Training...
  step:    9100   loss: 0.925314   lr: 0.010000   i/s:  431.43
  step:    9200   loss: 0.409237   lr: 0.010000   i/s: 1032.03
  step:    9300   loss: 0.967408   lr: 0.010000   i/s: 1030.05
  step:    9400   loss: 0.525338   lr: 0.010000   i/s: 1030.07
  step:    9500   loss: 0.758896   lr: 0.010000   i/s: 1038.89
  step:    9600   loss: 1.168363   lr: 0.010000   i/s: 1012.51
  step:    9700   loss: 0.460460   lr: 0.010000   i/s:  970.48
  step:    9800   loss: 0.526057   lr: 0.010000   i/s: 1041.16
  step:    9900   loss: 1.087378   lr: 0.010000   i/s: 1040.62
  step:   10000   loss: 0.450504   lr: 0.010000   i/s: 1001.08
Evaluating...
  accuracy: 0.8685
Training...
  step:   10100   loss: 0.334797   lr: 0.009000   i/s:  403.07
  step:   10200   loss: 0.557356   lr: 0.009000   i/s:  973.46
  step:   10300   loss: 0.455093   lr: 0.009000   i/s: 1037.15
  step:   10400   loss: 0.149972   lr: 0.009000   i/s: 1040.11
  step:   10500   loss: 0.450200   lr: 0.009000   i/s: 1041.61
  step:   10600   loss: 1.307869   lr: 0.009000   i/s:  981.89
  step:   10700   loss: 0.874643   lr: 0.009000   i/s:  958.40
  step:   10800   loss: 0.187484   lr: 0.009000   i/s: 1028.29
  step:   10900   loss: 0.906819   lr: 0.009000   i/s:  967.84
  step:   11000   loss: 0.600397   lr: 0.009000   i/s: 1032.26
Evaluating...
  accuracy: 0.8581
Training...
  step:   11100   loss: 0.457243   lr: 0.009000   i/s:  425.77
  step:   11200   loss: 0.620792   lr: 0.009000   i/s: 1037.48
  step:   11300   loss: 0.788553   lr: 0.009000   i/s: 1004.13
  step:   11400   loss: 0.612838   lr: 0.009000   i/s: 1015.23
  step:   11500   loss: 0.761436   lr: 0.009000   i/s:  980.06
  step:   11600   loss: 1.145185   lr: 0.009000   i/s:  989.70
  step:   11700   loss: 0.587807   lr: 0.009000   i/s: 1026.33
  step:   11800   loss: 0.374947   lr: 0.009000   i/s:  997.14
  step:   11900   loss: 0.535011   lr: 0.009000   i/s:  988.77
  step:   12000   loss: 0.919092   lr: 0.009000   i/s: 1035.59
Evaluating...
  accuracy: 0.8804
Training...
  step:   12100   loss: 0.530406   lr: 0.009000   i/s:  415.01
  step:   12200   loss: 0.943522   lr: 0.009000   i/s: 1014.67
  step:   12300   loss: 0.290869   lr: 0.009000   i/s:  983.37
  step:   12400   loss: 0.418768   lr: 0.009000   i/s:  973.32
  step:   12500   loss: 0.387725   lr: 0.009000   i/s:  973.51
  step:   12600   loss: 1.092985   lr: 0.009000   i/s:  930.71
  step:   12700   loss: 0.307091   lr: 0.009000   i/s: 1012.94
  step:   12800   loss: 0.295358   lr: 0.009000   i/s: 1026.72
  step:   12900   loss: 0.608418   lr: 0.009000   i/s: 1016.48
  step:   13000   loss: 0.427352   lr: 0.009000   i/s: 1036.79
Evaluating...
  accuracy: 0.8913
Training...
  step:   13100   loss: 0.447693   lr: 0.009000   i/s:  400.71
  step:   13200   loss: 0.189442   lr: 0.009000   i/s:  995.68
  step:   13300   loss: 0.515071   lr: 0.009000   i/s: 1002.99
  step:   13400   loss: 0.425247   lr: 0.009000   i/s:  964.00
  step:   13500   loss: 0.945437   lr: 0.009000   i/s: 1032.98
  step:   13600   loss: 1.293150   lr: 0.009000   i/s: 1033.67
  step:   13700   loss: 1.305258   lr: 0.009000   i/s: 1027.65
  step:   13800   loss: 0.362586   lr: 0.009000   i/s: 1016.37
  step:   13900   loss: 0.403313   lr: 0.009000   i/s: 1024.22
  step:   14000   loss: 0.399584   lr: 0.009000   i/s: 1005.55
Evaluating...
  accuracy: 0.8864
Training...
  step:   14100   loss: 0.207920   lr: 0.009000   i/s:  421.31
  step:   14200   loss: 0.574743   lr: 0.009000   i/s: 1032.52
  step:   14300   loss: 1.355771   lr: 0.009000   i/s: 1029.60
  step:   14400   loss: 0.892763   lr: 0.009000   i/s: 1027.03
  step:   14500   loss: 0.755069   lr: 0.009000   i/s: 1024.56
  step:   14600   loss: 0.611209   lr: 0.009000   i/s: 1027.53
  step:   14700   loss: 1.013487   lr: 0.009000   i/s: 1030.17
Training Epoch 2...
  step:   14800   loss: 0.242172   lr: 0.009000   i/s:  915.02
  step:   14900   loss: 0.575221   lr: 0.009000   i/s: 1039.00
  step:   15000   loss: 1.704033   lr: 0.009000   i/s: 1042.70
Evaluating...
  accuracy: 0.8921
Training...
  step:   15100   loss: 0.189718   lr: 0.009000   i/s:  438.08
  step:   15200   loss: 0.234260   lr: 0.009000   i/s: 1039.08
  step:   15300   loss: 0.443983   lr: 0.009000   i/s: 1035.92
  step:   15400   loss: 0.443126   lr: 0.009000   i/s: 1035.63
  step:   15500   loss: 0.282271   lr: 0.009000   i/s:  990.64
  step:   15600   loss: 1.088677   lr: 0.009000   i/s: 1038.22
  step:   15700   loss: 0.519746   lr: 0.009000   i/s: 1035.23
  step:   15800   loss: 0.681998   lr: 0.009000   i/s: 1037.49
  step:   15900   loss: 0.525874   lr: 0.009000   i/s: 1037.64
  step:   16000   loss: 0.427543   lr: 0.009000   i/s: 1033.11
Evaluating...
  accuracy: 0.8983
Training...
  step:   16100   loss: 0.556764   lr: 0.009000   i/s:  439.16
  step:   16200   loss: 0.208572   lr: 0.009000   i/s: 1035.11
  step:   16300   loss: 1.919777   lr: 0.009000   i/s: 1042.30
  step:   16400   loss: 0.510058   lr: 0.009000   i/s: 1042.38
  step:   16500   loss: 0.419661   lr: 0.009000   i/s: 1034.21
  step:   16600   loss: 0.799362   lr: 0.009000   i/s: 1039.67
  step:   16700   loss: 0.204670   lr: 0.009000   i/s:  997.10
  step:   16800   loss: 0.500988   lr: 0.009000   i/s: 1030.67
  step:   16900   loss: 0.712977   lr: 0.009000   i/s: 1033.84
  step:   17000   loss: 0.174459   lr: 0.009000   i/s: 1035.19
Evaluating...
  accuracy: 0.8957
Training...
  step:   17100   loss: 0.225670   lr: 0.009000   i/s:  450.50
  step:   17200   loss: 0.604853   lr: 0.009000   i/s: 1034.77
  step:   17300   loss: 0.238381   lr: 0.009000   i/s: 1037.01
  step:   17400   loss: 0.287099   lr: 0.009000   i/s: 1038.43
  step:   17500   loss: 1.350337   lr: 0.009000   i/s: 1041.36
  step:   17600   loss: 0.323076   lr: 0.009000   i/s: 1039.37
  step:   17700   loss: 1.056839   lr: 0.009000   i/s: 1039.22
  step:   17800   loss: 0.290084   lr: 0.009000   i/s:  985.49
  step:   17900   loss: 0.265543   lr: 0.009000   i/s: 1027.72
  step:   18000   loss: 0.369357   lr: 0.009000   i/s: 1034.57
Evaluating...
  accuracy: 0.8848
Training...
  step:   18100   loss: 0.293914   lr: 0.009000   i/s:  438.93
  step:   18200   loss: 0.681480   lr: 0.009000   i/s: 1031.91
  step:   18300   loss: 0.275915   lr: 0.009000   i/s: 1035.97
  step:   18400   loss: 0.516870   lr: 0.009000   i/s: 1040.32
  step:   18500   loss: 0.512865   lr: 0.009000   i/s: 1038.09
  step:   18600   loss: 0.279998   lr: 0.009000   i/s: 1038.29
  step:   18700   loss: 0.188850   lr: 0.009000   i/s: 1035.71
  step:   18800   loss: 0.581584   lr: 0.009000   i/s: 1037.45
  step:   18900   loss: 0.754903   lr: 0.009000   i/s: 1039.61
  step:   19000   loss: 0.587499   lr: 0.009000   i/s: 1039.13
Evaluating...
  accuracy: 0.8904
Training...
  step:   19100   loss: 0.214593   lr: 0.009000   i/s:  444.66
  step:   19200   loss: 0.824621   lr: 0.009000   i/s: 1039.32
  step:   19300   loss: 0.639391   lr: 0.009000   i/s: 1036.49
  step:   19400   loss: 0.307804   lr: 0.009000   i/s: 1038.45
  step:   19500   loss: 0.170342   lr: 0.009000   i/s: 1039.58
  step:   19600   loss: 0.442691   lr: 0.009000   i/s: 1039.51
  step:   19700   loss: 0.656529   lr: 0.009000   i/s: 1031.29
  step:   19800   loss: 0.497308   lr: 0.009000   i/s:  996.97
  step:   19900   loss: 0.390402   lr: 0.009000   i/s: 1030.80
  step:   20000   loss: 0.181043   lr: 0.009000   i/s: 1025.86
Evaluating...
  accuracy: 0.9037
Training...
  step:   20100   loss: 0.279833   lr: 0.008100   i/s:  420.01
  step:   20200   loss: 0.387441   lr: 0.008100   i/s: 1026.11
  step:   20300   loss: 0.614445   lr: 0.008100   i/s: 1035.44
  step:   20400   loss: 1.887640   lr: 0.008100   i/s: 1035.64
  step:   20500   loss: 0.467831   lr: 0.008100   i/s: 1035.79
  step:   20600   loss: 0.890244   lr: 0.008100   i/s: 1022.09
  step:   20700   loss: 0.299031   lr: 0.008100   i/s: 1020.53
  step:   20800   loss: 0.341366   lr: 0.008100   i/s: 1035.15
  step:   20900   loss: 0.367442   lr: 0.008100   i/s: 1026.69
  step:   21000   loss: 0.216183   lr: 0.008100   i/s: 1020.25
Evaluating...
  accuracy: 0.8963
Training...
  step:   21100   loss: 1.481229   lr: 0.008100   i/s:  454.19
  step:   21200   loss: 0.068218   lr: 0.008100   i/s: 1037.78
  step:   21300   loss: 0.410192   lr: 0.008100   i/s: 1038.15
  step:   21400   loss: 0.807058   lr: 0.008100   i/s: 1037.22
  step:   21500   loss: 0.477383   lr: 0.008100   i/s: 1036.73
  step:   21600   loss: 1.209804   lr: 0.008100   i/s: 1037.13
  step:   21700   loss: 1.116107   lr: 0.008100   i/s: 1040.38
  step:   21800   loss: 0.131164   lr: 0.008100   i/s: 1042.21
  step:   21900   loss: 1.593124   lr: 0.008100   i/s: 1035.39
  step:   22000   loss: 0.226311   lr: 0.008100   i/s: 1015.62
Evaluating...
  accuracy: 0.9071
Training...
  step:   22100   loss: 0.339608   lr: 0.008100   i/s:  422.11

Inference

Open test dataset

In [131]:
dataset = SVHNDataset(dataset_path, train_meta)

Define transforms

In [132]:
transforms = torchvision.transforms.Compose([
    torchvision.transforms.CenterCrop([54, 54]),
    torchvision.transforms.ToTensor(),
    torchvision.transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
])

Load model

In [133]:
model = SVHNModel()
model.to(device)
model_path = pathlib.PosixPath(model_save_location)/f'model_22000'
model.load_state_dict(torch.load(model_path))
model.eval();

Helper to decode model output

In [134]:
def decode(logits):
    predictions = []
    for i in range(len(logits)):
        pred = logits[i].max(dim=1)[1].cpu().numpy()
        predictions.append(pred)
    return np.array(predictions).T

Pick test image

In [135]:
img_pil, label = dataset[24]
print('true label:, label')
print('image type:', type(img))
display(img_pil)
true label:, label
image type: <class 'PIL.Image.Image'>

Run model

In [136]:
img_tensor = transforms(img_pil)
with torch.no_grad():
    logits = model(img_tensor.unsqueeze(dim=0).to(device))

Show result

In [137]:
result = decode(logits)  # np.ndarray
result
Out[137]:
array([[ 3,  6, 10,  1,  0,  0]])
  • first item is 'number length'
  • '0' encodes 'no digit'
  • '10' encodes 'digit 0'