Skip to content

Commit 646fdbf

Browse files
authored
Training script
1 parent f428584 commit 646fdbf

File tree

1 file changed

+139
-0
lines changed

1 file changed

+139
-0
lines changed

facial_expression.py

Lines changed: 139 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,139 @@
1+
# -*- coding: utf-8 -*-
2+
"""
3+
@authors: jaydeep thik , Vasudev Purandare
4+
"""
5+
import tensorflow as tf
6+
from tensorflow import keras
7+
#from tensorflow.keras.models import Sequential
8+
#from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
9+
import matplotlib.pyplot as plt
10+
import numpy as np
11+
import pandas as pd
12+
13+
#---------------------------------------------------------------------------------------------------------------------------------
14+
def generate_dataset():
15+
16+
"""generate dataset from csv"""
17+
18+
df = pd.read_csv("./fer2013/fer2013.csv")
19+
20+
train_samples = df[df['Usage']=="Training"]
21+
validation_samples = df[df["Usage"]=="PublicTest"]
22+
test_samples = df[df["Usage"]=="PrivateTest"]
23+
24+
y_train = train_samples.emotion.astype(np.int32).values
25+
y_valid = validation_samples.emotion.astype(np.int32).values
26+
y_test = test_samples.emotion.astype(np.int32).values
27+
28+
X_train =np.array([ np.fromstring(image, np.uint8, sep=" ").reshape((48,48)) for image in train_samples.pixels])
29+
X_valid =np.array([ np.fromstring(image, np.uint8, sep=" ").reshape((48,48)) for image in validation_samples.pixels])
30+
X_test =np.array([ np.fromstring(image, np.uint8, sep=" ").reshape((48,48)) for image in test_samples.pixels])
31+
32+
33+
return X_train, y_train, X_valid, y_valid, X_test, y_test
34+
35+
#---------------------------------------------------------------------------------------------------------------------------------
36+
37+
def generate_model(lr=0.001):
38+
39+
"""training model"""
40+
41+
with tf.device('/gpu:0'):
42+
43+
model = keras.models.Sequential()
44+
45+
model.add(keras.layers.Conv2D(64,(3,3), input_shape=(48,48, 1), padding="same"))
46+
model.add(keras.layers.BatchNormalization())
47+
model.add(keras.layers.Activation('relu'))
48+
model.add(keras.layers.MaxPooling2D())
49+
model.add(keras.layers.Dropout(0.20))
50+
51+
model.add(keras.layers.Conv2D(128,(5,5), padding='same'))
52+
model.add(keras.layers.BatchNormalization())
53+
model.add(keras.layers.Activation('relu'))
54+
model.add(keras.layers.MaxPooling2D())
55+
model.add(keras.layers.Dropout(0.20))
56+
57+
model.add(keras.layers.Conv2D(512,(3,3), padding="same"))
58+
model.add(keras.layers.BatchNormalization())
59+
model.add(keras.layers.Activation('relu'))
60+
model.add(keras.layers.MaxPooling2D())
61+
model.add(keras.layers.Dropout(0.20))
62+
63+
model.add(keras.layers.Conv2D(512,(3,3), padding="same"))
64+
model.add(keras.layers.BatchNormalization())
65+
model.add(keras.layers.Activation('relu'))
66+
model.add(keras.layers.MaxPooling2D())
67+
model.add(keras.layers.Dropout(0.25))
68+
69+
model.add(keras.layers.Conv2D(256,(3,3), activation='relu', padding='same'))
70+
model.add(keras.layers.Conv2D(128,(3,3), padding='same', activation='relu'))
71+
model.add(keras.layers.MaxPooling2D())
72+
model.add(keras.layers.Dropout(0.25))
73+
74+
#model.add(keras.layers.GlobalAveragePooling2D())
75+
model.add(keras.layers.Flatten())
76+
model.add(keras.layers.Dense(256))
77+
model.add(keras.layers.BatchNormalization())
78+
model.add(keras.layers.Activation('relu'))
79+
model.add(keras.layers.Dropout(0.5))
80+
81+
model.add(keras.layers.Dense(512, activation='relu'))
82+
model.add(keras.layers.BatchNormalization())
83+
model.add(keras.layers.Activation('relu'))
84+
model.add(keras.layers.Dropout(0.5))
85+
86+
model.add(keras.layers.Dense(4,activation='softmax'))
87+
88+
model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(lr=lr) , metrics=['accuracy'])
89+
return model
90+
91+
#---------------------------------------------------------------------------------------------------------------------------------
92+
93+
if __name__=="__main__":
94+
95+
#df = pd.read_csv("./fer2013/fer2013.csv")
96+
# X_train, y_train, X_valid, y_valid, X_test, y_test = generate_dataset()
97+
98+
# X_train = X_train.reshape((-1,48,48,1)).astype(np.float32)
99+
# X_valid = X_valid.reshape((-1,48,48,1)).astype(np.float32)
100+
#X_test = X_test.reshape((-1,48,48,1)).astype(np.float32)
101+
102+
# X_train_std = X_train/255.
103+
104+
#X_valid_std = X_valid/255.
105+
#X_test_std = X_test/255.
106+
107+
108+
train_datagen = keras.preprocessing.image.ImageDataGenerator(
109+
rescale=1./255,
110+
shear_range=0.2,
111+
zoom_range=0.2,
112+
horizontal_flip=True)
113+
114+
test_datagen = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)
115+
116+
train_generator = train_datagen.flow_from_directory(
117+
'hackdataset/train',
118+
target_size=(48, 48),
119+
color_mode='grayscale',
120+
batch_size=128,
121+
class_mode='categorical')
122+
123+
test_generator = test_datagen.flow_from_directory(
124+
'hackdataset/test',
125+
target_size=(48, 48),
126+
color_mode='grayscale',
127+
batch_size=128,
128+
class_mode='categorical')
129+
130+
131+
model = generate_model(0.01)
132+
with tf.device("/gpu:0"):
133+
#history = model.fit(X_train_std, y_train,batch_size=128,epochs=35, validation_data=(X_valid_std, y_valid), shuffle=True)
134+
history = model.fit_generator(train_generator, steps_per_epoch=200,epochs=35, validation_data=test_generator, validation_steps=80)
135+
136+
137+
138+
139+

0 commit comments

Comments
 (0)