Skip to content

Commit 5264dc6

Browse files
committed
2 parents a89453c + a152e8a commit 5264dc6

File tree

18 files changed

+683
-4
lines changed

18 files changed

+683
-4
lines changed

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -30,12 +30,12 @@ Here are the performance metrics (accuracy on AffectNet, AFEW and VGAF), F1-scor
3030
Please note, that we report the accuracies for AFEW and VGAF only on the subsets, in which MTCNN detects facial regions. The code contains also computation of overall accuracy on the complete testing set, which is slightly lower due to the absence of faces or failed face detection.
3131

3232
## Usage
33-
A special [python package](python-package) was prepared to simplify the usage of our models for face expression recognition and extraction of visual emotional embeddings. It can be installed via pip:
33+
Special [python packages](python-package) were prepared to simplify the usage of our models for face expression recognition and extraction of visual emotional embeddings. It can be installed via pip:
3434
```
3535
pip install hsemotion
3636
```
3737

38-
In order to run our code on the datasets, please prepare them firstly using our TensorFlow notebooks: [train_emotions.ipynb](src/train_emotions.ipynb), [AFEW_train.ipynb](src/AFEW_train.ipynb) and [VGAF_train.ipynb](src/VGAF_train.ipynb).
38+
In order to run our code on the datasets, please prepare them firstly using our TensorFlow notebooks: [train_emotions.ipynb](src/affectnet/train_emotions.ipynb), [AFEW_train.ipynb](src/AFEW_train.ipynb) and [VGAF_train.ipynb](src/VGAF_train.ipynb).
3939

4040
If you want to run our mobile application, please, run the following scripts inside [mobile_app](mobile_app) folder:
4141
```
Binary file not shown.
Binary file not shown.
Binary file not shown.
29.3 MB
Binary file not shown.
29.4 MB
Binary file not shown.
File renamed without changes.
Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
from __future__ import absolute_import
2+
from __future__ import division
3+
from __future__ import print_function
4+
5+
import os
6+
import numpy as np
7+
from PIL import Image
8+
import torch
9+
from torchvision import transforms
10+
import timm
11+
import urllib
12+
13+
#def get_path(model_name):
14+
# return '../../models/affectnet_emotions/'+model_name+'.pt'
15+
16+
def get_model_path(model_name):
17+
model_file=model_name+'.pt'
18+
cache_dir = os.path.join(os.path.expanduser('~'), '.hsemotion')
19+
os.makedirs(cache_dir, exist_ok=True)
20+
fpath=os.path.join(cache_dir,model_file)
21+
if not os.path.isfile(fpath):
22+
url='https://github.com/HSE-asavchenko/face-emotion-recognition/blob/main/models/affectnet_emotions/'+model_file+'?raw=true'
23+
print('Downloading',model_name,'from',url)
24+
urllib.request.urlretrieve(url, fpath)
25+
return fpath
26+
27+
28+
class HSEmotionRecognizer:
29+
#supported values of model_name: enet_b0_8_best_vgaf, enet_b0_8_best_afew, enet_b2_8, enet_b0_8_va_mtl, enet_b2_7
30+
def __init__(self, model_name='enet_b0_8_best_vgaf',device='cpu'):
31+
self.device=device
32+
self.is_mtl='_mtl' in model_name
33+
if '_7' in model_name:
34+
self.idx_to_class={0: 'Anger', 1: 'Disgust', 2: 'Fear', 3: 'Happiness', 4: 'Neutral', 5: 'Sadness', 6: 'Surprise'}
35+
else:
36+
self.idx_to_class={0: 'Anger', 1: 'Contempt', 2: 'Disgust', 3: 'Fear', 4: 'Happiness', 5: 'Neutral', 6: 'Sadness', 7: 'Surprise'}
37+
38+
self.img_size=224 if '_b0_' in model_name else 260
39+
self.test_transforms = transforms.Compose(
40+
[
41+
transforms.Resize((self.img_size,self.img_size)),
42+
transforms.ToTensor(),
43+
transforms.Normalize(mean=[0.485, 0.456, 0.406],
44+
std=[0.229, 0.224, 0.225])
45+
]
46+
)
47+
48+
path=get_model_path(model_name)
49+
model=torch.load(path)
50+
if isinstance(model.classifier,torch.nn.Sequential):
51+
self.classifier_weights=model.classifier[0].weight.cpu().data.numpy()
52+
self.classifier_bias=model.classifier[0].bias.cpu().data.numpy()
53+
else:
54+
self.classifier_weights=model.classifier.weight.cpu().data.numpy()
55+
self.classifier_bias=model.classifier.bias.cpu().data.numpy()
56+
57+
model.classifier=torch.nn.Identity()
58+
model=model.to(device)
59+
self.model=model.eval()
60+
print(path,self.test_transforms)
61+
62+
def get_probab(self, features):
63+
x=np.dot(features,np.transpose(self.classifier_weights))+self.classifier_bias
64+
return x
65+
66+
def extract_features(self,face_img):
67+
img_tensor = self.test_transforms(Image.fromarray(face_img))
68+
img_tensor.unsqueeze_(0)
69+
features = self.model(img_tensor.to(self.device))
70+
features=features.data.cpu().numpy()
71+
return features
72+
73+
def predict_emotions(self,face_img, logits=True):
74+
features=self.extract_features(face_img)
75+
scores=self.get_probab(features)[0]
76+
if self.is_mtl:
77+
x=scores[:-2]
78+
else:
79+
x=scores
80+
pred=np.argmax(x)
81+
82+
if not logits:
83+
e_x = np.exp(x - np.max(x)[np.newaxis])
84+
e_x = e_x / e_x.sum()[None]
85+
if self.is_mtl:
86+
scores[:-2]=e_x
87+
else:
88+
scores=e_x
89+
return self.idx_to_class[pred],scores
90+
91+
def extract_multi_features(self,face_img_list):
92+
imgs = [self.test_transforms(Image.fromarray(face_img)) for face_img in face_img_list]
93+
features = self.model(torch.stack(imgs, dim=0).to(self.device))
94+
features=features.data.cpu().numpy()
95+
return features
96+
97+
def predict_multi_emotions(self,face_img_list, logits=True):
98+
features=self.extract_multi_features(face_img_list)
99+
scores=self.get_probab(features)
100+
if self.is_mtl:
101+
preds=np.argmax(scores[:,:-2],axis=1)
102+
else:
103+
preds=np.argmax(scores,axis=1)
104+
if self.is_mtl:
105+
x=scores[:,:-2]
106+
else:
107+
x=scores
108+
pred=np.argmax(x[0])
109+
110+
if not logits:
111+
e_x = np.exp(x - np.max(x,axis=1)[:,np.newaxis])
112+
e_x = e_x / e_x.sum(axis=1)[:,None]
113+
if self.is_mtl:
114+
scores[:,:-2]=e_x
115+
else:
116+
scores=e_x
117+
118+
return [self.idx_to_class[pred] for pred in preds],scores
119+
File renamed without changes.

0 commit comments

Comments
 (0)