Skip to content

Commit 0b401bf

Browse files
authored
9.21 Update
1 parent ba5c26b commit 0b401bf

12 files changed

+143828
-0
lines changed

emotion_data_0.1.ipynb

Lines changed: 1134 additions & 0 deletions
Large diffs are not rendered by default.

emotion_labeling.ipynb

Lines changed: 370 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,370 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"Explanation of this code\n",
8+
"\n",
9+
"http://www.paulvangent.com/2016/04/01/emotion-recognition-with-python-opencv-and-a-face-dataset/"
10+
]
11+
},
12+
{
13+
"cell_type": "markdown",
14+
"metadata": {},
15+
"source": [
16+
"# 1. Labeling Dataset"
17+
]
18+
},
19+
{
20+
"cell_type": "code",
21+
"execution_count": 1,
22+
"metadata": {
23+
"collapsed": false
24+
},
25+
"outputs": [],
26+
"source": [
27+
"import cv2\n",
28+
"import random\n",
29+
"import numpy as np\n",
30+
"from shutil import copyfile\n",
31+
"import glob\n",
32+
"\n",
33+
"emotions = [\"neutral\", \"anger\", \"contempt\", \"disgust\", \"fear\", \"happy\", \"sadness\", \"surprise\"] #Define emotion order\n",
34+
"#participants = glob.glob(\"source_emotion/*\") #Returns a list of all folders with participant numbers\n",
35+
"\n",
36+
"emotion = 'surprise'"
37+
]
38+
},
39+
{
40+
"cell_type": "code",
41+
"execution_count": 2,
42+
"metadata": {
43+
"collapsed": true
44+
},
45+
"outputs": [],
46+
"source": [
47+
"data = {}\n",
48+
"\n",
49+
"def get_files(emotion): #Define function to get file list, randomly shuffle it and split 80/20\n",
50+
" files = glob.glob(\"dataset_test/%s/*\" %emotion)\n",
51+
" random.shuffle(files)\n",
52+
" training = files[:int(len(files)*0.8)] #get first 80% of file list\n",
53+
" prediction = files[-int(len(files)*0.2):] #get last 20% of file list\n",
54+
" return training, prediction\n",
55+
"\n",
56+
"def make_sets():\n",
57+
" training_data = []\n",
58+
" training_labels = []\n",
59+
" prediction_data = []\n",
60+
" prediction_labels = []\n",
61+
" for emotion in emotions:\n",
62+
" training, prediction = get_files(emotion)\n",
63+
" #Append data to training and prediction list, and generate labels 0-7\n",
64+
" for item in training:\n",
65+
" image = cv2.imread(item) #open image\n",
66+
" gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) #convert to grayscale\n",
67+
" training_data.append(image) #append image array to training data list\n",
68+
" training_labels.append(emotions.index(emotion))\n",
69+
" \n",
70+
" for item in prediction: #repeat above process for prediction set\n",
71+
" image = cv2.imread(item)\n",
72+
" gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n",
73+
" prediction_data.append(image)\n",
74+
" prediction_labels.append(emotions.index(emotion))\n",
75+
"\n",
76+
" return training_data, training_labels, prediction_data, prediction_labels\n",
77+
"\n",
78+
"training_data, training_labels, prediction_data, prediction_labels = make_sets()"
79+
]
80+
},
81+
{
82+
"cell_type": "code",
83+
"execution_count": 4,
84+
"metadata": {
85+
"collapsed": false
86+
},
87+
"outputs": [
88+
{
89+
"data": {
90+
"text/plain": [
91+
"(3483, 48, 48, 3)"
92+
]
93+
},
94+
"execution_count": 4,
95+
"metadata": {},
96+
"output_type": "execute_result"
97+
}
98+
],
99+
"source": [
100+
"X.shape"
101+
]
102+
},
103+
{
104+
"cell_type": "markdown",
105+
"metadata": {},
106+
"source": [
107+
"# 2. Training with TF learn (alexnet)"
108+
]
109+
},
110+
{
111+
"cell_type": "code",
112+
"execution_count": 27,
113+
"metadata": {
114+
"collapsed": false
115+
},
116+
"outputs": [],
117+
"source": [
118+
"from __future__ import division, print_function, absolute_import\n",
119+
"\n",
120+
"import tflearn\n",
121+
"from tflearn.data_utils import shuffle, to_categorical\n",
122+
"from tflearn.layers.core import input_data, dropout, fully_connected\n",
123+
"from tflearn.layers.conv import conv_2d, max_pool_2d\n",
124+
"from tflearn.layers.estimator import regression\n",
125+
"from tflearn.data_preprocessing import ImagePreprocessing\n",
126+
"from tflearn.data_augmentation import ImageAugmentation\n",
127+
"\n",
128+
"#import tflearn\n",
129+
"#from tflearn.layers.core import input_data, dropout, fully_connected\n",
130+
"#from tflearn.layers.conv import conv_2d, max_pool_2d\n",
131+
"from tflearn.layers.normalization import local_response_normalization\n",
132+
"from tflearn.layers.estimator import regression"
133+
]
134+
},
135+
{
136+
"cell_type": "code",
137+
"execution_count": 3,
138+
"metadata": {
139+
"collapsed": false
140+
},
141+
"outputs": [],
142+
"source": [
143+
"#Convert all data into numpy\n",
144+
"X, Y, X_test, Y_test = np.array(training_data), np.array(training_labels), np.array(prediction_data), np.array(prediction_labels)"
145+
]
146+
},
147+
{
148+
"cell_type": "markdown",
149+
"metadata": {
150+
"collapsed": false
151+
},
152+
"source": [
153+
"# Data shuffle code\n",
154+
"\n",
155+
"#Shuffle the data\n",
156+
"\n",
157+
"def randomize(dataset, labels):\n",
158+
" permutation = np.random.permutation(labels.shape[0])\n",
159+
" shuffled_dataset = dataset[permutation,:,:]\n",
160+
" shuffled_labels = labels[permutation]\n",
161+
" return shuffled_dataset, shuffled_labels\n",
162+
"\n",
163+
"X, Y = randomize(X, Y)\n",
164+
"testX, testY = randomize(testX, testY)\n",
165+
"#test_dataset, test_labels = randomize(test_dataset, test_labels)\n",
166+
"#valid_dataset, valid_labels = randomize(valid_dataset, valid_labels)"
167+
]
168+
},
169+
{
170+
"cell_type": "code",
171+
"execution_count": 24,
172+
"metadata": {
173+
"collapsed": false
174+
},
175+
"outputs": [],
176+
"source": [
177+
"#Shuffling and one hot encoding\n",
178+
"\n",
179+
"X, Y = shuffle(X,Y)\n",
180+
"\n",
181+
"#Y = to_categorical(Y, 8)\n",
182+
"#Y_test = to_categorical(Y_test, 8)\n",
183+
"\n",
184+
"def dense_to_one_hot(labels_dense, num_classes=8):\n",
185+
" \"\"\"Convert class labels from scalars to one-hot vectors.\"\"\"\n",
186+
" num_labels = labels_dense.shape[0]\n",
187+
" index_offset = np.arange(num_labels) * num_classes\n",
188+
" labels_one_hot = np.zeros((num_labels, num_classes))\n",
189+
" labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1\n",
190+
" return labels_one_hot\n",
191+
"\n",
192+
"Y = dense_to_one_hot(Y)\n",
193+
"Y_test = dense_to_one_hot(Y_test)"
194+
]
195+
},
196+
{
197+
"cell_type": "code",
198+
"execution_count": 25,
199+
"metadata": {
200+
"collapsed": false,
201+
"scrolled": true
202+
},
203+
"outputs": [
204+
{
205+
"data": {
206+
"text/plain": [
207+
"(867, 48, 48, 3)"
208+
]
209+
},
210+
"execution_count": 25,
211+
"metadata": {},
212+
"output_type": "execute_result"
213+
}
214+
],
215+
"source": [
216+
"X_test.shape"
217+
]
218+
},
219+
{
220+
"cell_type": "code",
221+
"execution_count": null,
222+
"metadata": {
223+
"collapsed": true
224+
},
225+
"outputs": [],
226+
"source": []
227+
},
228+
{
229+
"cell_type": "code",
230+
"execution_count": null,
231+
"metadata": {
232+
"collapsed": true
233+
},
234+
"outputs": [],
235+
"source": []
236+
},
237+
{
238+
"cell_type": "code",
239+
"execution_count": null,
240+
"metadata": {
241+
"collapsed": true
242+
},
243+
"outputs": [],
244+
"source": []
245+
},
246+
{
247+
"cell_type": "code",
248+
"execution_count": 8,
249+
"metadata": {
250+
"collapsed": false
251+
},
252+
"outputs": [],
253+
"source": [
254+
"#Convert dataset into...\n",
255+
"IMAGE_SIZE = 48\n",
256+
"\n",
257+
"X = X.reshape([-1, IMAGE_SIZE, IMAGE_SIZE, 1])\n",
258+
"X_test = X_test.reshape([-1, IMAGE_SIZE, IMAGE_SIZE, 1])"
259+
]
260+
},
261+
{
262+
"cell_type": "code",
263+
"execution_count": null,
264+
"metadata": {
265+
"collapsed": false
266+
},
267+
"outputs": [
268+
{
269+
"name": "stdout",
270+
"output_type": "stream",
271+
"text": [
272+
"---------------------------------\n",
273+
"Run id: convnet_mnist\n",
274+
"Log directory: /tmp/tflearn_logs/\n"
275+
]
276+
},
277+
{
278+
"name": "stderr",
279+
"output_type": "stream",
280+
"text": [
281+
"Exception in thread Thread-17:\n",
282+
"Traceback (most recent call last):\n",
283+
" File \"/home/ryan/anaconda2/lib/python2.7/threading.py\", line 801, in __bootstrap_inner\n",
284+
" self.run()\n",
285+
" File \"/home/ryan/anaconda2/lib/python2.7/threading.py\", line 754, in run\n",
286+
" self.__target(*self.__args, **self.__kwargs)\n",
287+
" File \"/home/ryan/anaconda2/lib/python2.7/site-packages/tflearn/data_flow.py\", line 183, in fill_feed_dict_queue\n",
288+
" data = self.retrieve_data(batch_ids)\n",
289+
" File \"/home/ryan/anaconda2/lib/python2.7/site-packages/tflearn/data_flow.py\", line 218, in retrieve_data\n",
290+
" utils.slice_array(self.feed_dict[key], batch_ids)\n",
291+
" File \"/home/ryan/anaconda2/lib/python2.7/site-packages/tflearn/utils.py\", line 166, in slice_array\n",
292+
" return X[start]\n",
293+
"IndexError: index 8769 is out of bounds for axis 0 with size 3483\n",
294+
"\n",
295+
"Exception in thread Thread-19:\n",
296+
"Traceback (most recent call last):\n",
297+
" File \"/home/ryan/anaconda2/lib/python2.7/threading.py\", line 801, in __bootstrap_inner\n",
298+
" self.run()\n",
299+
" File \"/home/ryan/anaconda2/lib/python2.7/threading.py\", line 754, in run\n",
300+
" self.__target(*self.__args, **self.__kwargs)\n",
301+
" File \"/home/ryan/anaconda2/lib/python2.7/site-packages/tflearn/data_flow.py\", line 183, in fill_feed_dict_queue\n",
302+
" data = self.retrieve_data(batch_ids)\n",
303+
" File \"/home/ryan/anaconda2/lib/python2.7/site-packages/tflearn/data_flow.py\", line 218, in retrieve_data\n",
304+
" utils.slice_array(self.feed_dict[key], batch_ids)\n",
305+
" File \"/home/ryan/anaconda2/lib/python2.7/site-packages/tflearn/utils.py\", line 166, in slice_array\n",
306+
" return X[start]\n",
307+
"IndexError: index 8902 is out of bounds for axis 0 with size 3483\n",
308+
"\n"
309+
]
310+
},
311+
{
312+
"name": "stdout",
313+
"output_type": "stream",
314+
"text": [
315+
"---------------------------------\n",
316+
"Training samples: 20898\n",
317+
"Validation samples: 5202\n",
318+
"--\n"
319+
]
320+
}
321+
],
322+
"source": [
323+
"# Building convolutional network\n",
324+
"network = input_data(shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name='input')\n",
325+
"network = conv_2d(network, 32, 3, activation='relu', regularizer=\"L2\")\n",
326+
"network = max_pool_2d(network, 2)\n",
327+
"network = local_response_normalization(network)\n",
328+
"#network = conv_2d(network, 64, 3, activation='relu', regularizer=\"L2\")\n",
329+
"#network = max_pool_2d(network, 2)\n",
330+
"#network = local_response_normalization(network)\n",
331+
"#network = fully_connected(network, 128, activation='tanh')\n",
332+
"network = fully_connected(network, 64, activation='tanh')\n",
333+
"network = dropout(network, 0.8)\n",
334+
"#network = fully_connected(network, 256, activation='tanh')\n",
335+
"network = fully_connected(network, 128, activation='tanh')\n",
336+
"network = dropout(network, 0.8)\n",
337+
"network = fully_connected(network, 8, activation='softmax')\n",
338+
"network = regression(network, optimizer='adam', learning_rate=0.01,\n",
339+
" loss='categorical_crossentropy', name='target')\n",
340+
"\n",
341+
"# Training\n",
342+
"model = tflearn.DNN(network, tensorboard_verbose=0)\n",
343+
"model.fit({'input': X}, {'target': Y}, n_epoch=20,\n",
344+
" validation_set=({'input': X_test}, {'target': Y_test}),\n",
345+
" snapshot_step=100, show_metric=True, run_id='convnet_mnist')"
346+
]
347+
}
348+
],
349+
"metadata": {
350+
"kernelspec": {
351+
"display_name": "Python 2",
352+
"language": "python",
353+
"name": "python2"
354+
},
355+
"language_info": {
356+
"codemirror_mode": {
357+
"name": "ipython",
358+
"version": 2
359+
},
360+
"file_extension": ".py",
361+
"mimetype": "text/x-python",
362+
"name": "python",
363+
"nbconvert_exporter": "python",
364+
"pygments_lexer": "ipython2",
365+
"version": "2.7.11"
366+
}
367+
},
368+
"nbformat": 4,
369+
"nbformat_minor": 0
370+
}

0 commit comments

Comments
 (0)