Compare commits
7 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
40c16548b2 | ||
|
|
17251edfda | ||
|
|
bfef06d720 | ||
|
|
7cf35d7357 | ||
|
|
65d478ef1b | ||
|
|
cd9cc8ce8b | ||
|
|
0af9320571 |
BIN
models/model_A-Z_v2.onnx
Normal file
BIN
models/model_A-Z_v2.onnx
Normal file
Binary file not shown.
BIN
models/model_A-Z_v2.pth
Normal file
BIN
models/model_A-Z_v2.pth
Normal file
Binary file not shown.
@@ -36,11 +36,28 @@ def circle_intersection(x0, y0, r0, x1, y1, r1):
|
||||
|
||||
|
||||
class MirrorKeypoints:
|
||||
def __call__(self, sample):
|
||||
def __call__(self, sample):
|
||||
if sample.shape[0] == 0:
|
||||
return sample
|
||||
if random.random() > 0.5:
|
||||
return sample
|
||||
# flip the keypoints tensor
|
||||
sample = 1 - sample
|
||||
|
||||
# flip the x coordinates
|
||||
sample[:, :, 0] *= -1
|
||||
|
||||
# switch hands (left becomes right and vice versa)
|
||||
left, right, n = 12, 33, 21
|
||||
if isinstance(sample, np.ndarray): # For testing purposes only
|
||||
sample[:, left:left+n, :], sample[:, right:right+n, :] = sample[: , right:right+n, :], sample[:, left:left+n, :].copy()
|
||||
else:
|
||||
sample[:, left:left+n, :], sample[:, right:right+n, :] = sample[: , right:right+n, :], sample[:, left:left+n, :].clone()
|
||||
|
||||
# switch pose keypoints
|
||||
sample[:, [1, 2], :] = sample[:, [2, 1], :] #eye
|
||||
sample[:, [3, 4], :] = sample[:, [4, 3], :] #ear
|
||||
sample[:, [6, 7], :] = sample[:, [7, 6], :] #shoulder
|
||||
sample[:, [8, 9], :] = sample[:, [9, 8], :] #elbow
|
||||
sample[:, [10, 11], :] = sample[:, [11, 10], :] #wrist
|
||||
|
||||
return sample
|
||||
|
||||
@@ -124,4 +141,16 @@ class NoiseAugmentation:
|
||||
def __call__(self, sample):
|
||||
# add noise to the keypoints
|
||||
sample = sample + torch.randn(sample.shape) * self.noise
|
||||
return sample
|
||||
return sample
|
||||
|
||||
# augmentation to rotate all keypoints around 0,0
|
||||
class RotateAugmentation:
|
||||
def __call__(self, sample):
|
||||
# generate a random angle between -13 and 13 degrees
|
||||
angle_max = 13.0
|
||||
angle = math.radians(random.uniform(a=-angle_max, b=angle_max))
|
||||
# rotate the keypoints around 0.0
|
||||
new_sample = sample
|
||||
new_sample[:, :, 0] = sample[:, :, 0]*math.cos(angle) - sample[:, :, 1]*math.sin(angle)
|
||||
new_sample[:, :, 1] = sample[:, :, 0]*math.sin(angle) + sample[:, :, 1]*math.cos(angle)
|
||||
return new_sample
|
||||
@@ -7,7 +7,7 @@ from src.model import SPOTER
|
||||
from src.identifiers import LANDMARKS
|
||||
|
||||
# set parameters of the model
|
||||
model_name = 'model_A-Z'
|
||||
model_name = 'model_A-Z_v2'
|
||||
num_classes = 26
|
||||
|
||||
# load PyTorch model from .pth file
|
||||
|
||||
27
src/model.py
27
src/model.py
@@ -1,7 +1,6 @@
|
||||
### SPOTER model implementation from the paper "SPOTER: Sign Pose-based Transformer for Sign Language Recognition from Sequence of Skeletal Data"
|
||||
|
||||
import copy
|
||||
import math
|
||||
from typing import Optional
|
||||
|
||||
import torch
|
||||
@@ -39,20 +38,7 @@ class SPOTERTransformerDecoderLayer(nn.TransformerDecoderLayer):
|
||||
|
||||
return tgt
|
||||
|
||||
class PositionalEmbedding(nn.Module):
|
||||
def __init__(self, d_model, max_len=60):
|
||||
super().__init__()
|
||||
pe = torch.zeros(max_len, d_model)
|
||||
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
|
||||
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
|
||||
pe[:, 0::2] = torch.sin(position * div_term)
|
||||
pe[:, 1::2] = torch.cos(position * div_term)
|
||||
pe = pe.unsqueeze(0).transpose(0, 1)
|
||||
self.register_buffer('pe', pe)
|
||||
|
||||
def forward(self, x):
|
||||
return x + self.pe[:x.size(0), :]
|
||||
|
||||
class SPOTER(nn.Module):
|
||||
"""
|
||||
Implementation of the SPOTER (Sign POse-based TransformER) architecture for sign language recognition from sequence
|
||||
@@ -62,9 +48,8 @@ class SPOTER(nn.Module):
|
||||
def __init__(self, num_classes, hidden_dim=55):
|
||||
super().__init__()
|
||||
|
||||
|
||||
self.pos = PositionalEmbedding(hidden_dim)
|
||||
|
||||
self.row_embed = nn.Parameter(torch.rand(50, hidden_dim))
|
||||
self.pos = nn.Parameter(torch.cat([self.row_embed[0].unsqueeze(0).repeat(1, 1, 1)], dim=-1).flatten(0, 1).unsqueeze(0))
|
||||
self.class_query = nn.Parameter(torch.rand(1, hidden_dim))
|
||||
self.transformer = nn.Transformer(hidden_dim, 9, 6, 6)
|
||||
self.linear_class = nn.Linear(hidden_dim, num_classes)
|
||||
@@ -76,13 +61,7 @@ class SPOTER(nn.Module):
|
||||
|
||||
def forward(self, inputs):
|
||||
h = torch.unsqueeze(inputs.flatten(start_dim=1), 1).float()
|
||||
# add positional encoding
|
||||
h = self.pos(h)
|
||||
|
||||
# add class query
|
||||
h = self.transformer(h, self.class_query.unsqueeze(0)).transpose(0, 1)
|
||||
|
||||
# get class prediction
|
||||
h = self.transformer(self.pos + h, self.class_query.unsqueeze(0)).transpose(0, 1)
|
||||
res = self.linear_class(h)
|
||||
|
||||
return res
|
||||
18
src/train.py
18
src/train.py
@@ -8,7 +8,7 @@ import torch.optim as optim
|
||||
from torch.utils.data import DataLoader
|
||||
from torchvision import transforms
|
||||
|
||||
from src.augmentations import MirrorKeypoints, Z_augmentation, NoiseAugmentation
|
||||
from src.augmentations import MirrorKeypoints, Z_augmentation, NoiseAugmentation, RotateAugmentation
|
||||
from src.datasets.finger_spelling_dataset import FingerSpellingDataset
|
||||
from src.identifiers import LANDMARKS
|
||||
from src.model import SPOTER
|
||||
@@ -29,12 +29,16 @@ def train():
|
||||
g = torch.Generator()
|
||||
g.manual_seed(379)
|
||||
|
||||
device = torch.device("cuda:0")
|
||||
|
||||
spoter_model = SPOTER(num_classes=26, hidden_dim=len(LANDMARKS) *2)
|
||||
|
||||
# use cuda if available
|
||||
if torch.cuda.is_available():
|
||||
device = torch.device("cuda:0")
|
||||
else:
|
||||
device = torch.device("cpu")
|
||||
|
||||
spoter_model.train(True)
|
||||
spoter_model.to(device)
|
||||
|
||||
|
||||
criterion = nn.CrossEntropyLoss()
|
||||
criterion_bad = CustomLoss()
|
||||
@@ -45,7 +49,7 @@ def train():
|
||||
if not os.path.exists("checkpoints"):
|
||||
os.makedirs("checkpoints")
|
||||
|
||||
transform = transforms.Compose([MirrorKeypoints(), NoiseAugmentation(noise=0.1)])
|
||||
transform = transforms.Compose([MirrorKeypoints(), NoiseAugmentation(noise=0.1), RotateAugmentation()])
|
||||
|
||||
train_set = FingerSpellingDataset("data/fingerspelling/data/", bad_data_folder="", keypoints_identifier=LANDMARKS, subset="train", transform=transform)
|
||||
train_loader = DataLoader(train_set, shuffle=True, generator=g)
|
||||
@@ -124,9 +128,9 @@ def train():
|
||||
if val_acc > best_val_acc:
|
||||
best_val_acc = val_acc
|
||||
epochs_without_improvement = 0
|
||||
if epoch > 55:
|
||||
if epoch > 45:
|
||||
top_val_acc = val_acc
|
||||
top_train_acc = train_acc
|
||||
top_train_acc = pred_correct / pred_all
|
||||
checkpoint_index = epoch
|
||||
torch.save(spoter_model.state_dict(), f"checkpoints/spoter_{epoch}.pth")
|
||||
else:
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -27,7 +27,7 @@ frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
||||
keypoints = []
|
||||
|
||||
spoter_model = SPOTER(num_classes=26, hidden_dim=len(LANDMARKS) * 2)
|
||||
spoter_model.load_state_dict(torch.load('models/spoter_76.pth', map_location=torch.device('cpu')))
|
||||
spoter_model.load_state_dict(torch.load('models/model_A-Z_v2.pth', map_location=torch.device('cpu')))
|
||||
|
||||
# get values of the landmarks as a list of integers
|
||||
values = []
|
||||
|
||||
Reference in New Issue
Block a user