Rend19 commited on
Commit
827241b
·
verified ·
1 Parent(s): 614aa8c
Files changed (4) hide show
  1. app.py +213 -0
  2. requirements.txt +5 -0
  3. resnet_v2_attributes.pickle +3 -0
  4. resnet_v2_model.h5 +3 -0
app.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio_webrtc import WebRTC
3
+ import os
4
+ import pickle
5
+ import numpy as np
6
+ import cv2
7
+ import tensorflow as tf
8
+ from sklearn.preprocessing import OneHotEncoder
9
+ from tensorflow.keras.callbacks import EarlyStopping
10
+ from tensorflow.keras import Model
11
+ from tensorflow.image import resize
12
+ from tensorflow.keras.preprocessing.image import load_img, img_to_array
13
+ from keras_vggface.vggface import VGGFace
14
+ from tensorflow.keras.models import Sequential
15
+ from tensorflow.keras.layers import RandomFlip, RandomRotation, RandomBrightness, RandomContrast, RandomTranslation, Input, Dense, Flatten
16
+ from sklearn.model_selection import train_test_split
17
+
18
+ class ResNetModel():
19
+ def __init__(self):
20
+ self.data_augmentation = Sequential([
21
+ RandomBrightness(factor=0.2),
22
+ RandomContrast(factor=0.2),
23
+ RandomFlip("horizontal"),
24
+ RandomTranslation(height_factor=0.1, width_factor=0.1),
25
+ RandomRotation(factor=0.1)
26
+ ])
27
+
28
+ def load_data(self, data_path: str, input_size:tuple = (224, 224)):
29
+ image_list = []
30
+ class_list = []
31
+ self.input_size = input_size
32
+ faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
33
+ for label in os.listdir(data_path):
34
+ if len(os.listdir(os.path.join(data_path, label))) < 5:
35
+ continue
36
+ for j, filename in enumerate(os.listdir(os.path.join(data_path, label))):
37
+ if j >= 10:
38
+ break
39
+ filename = os.path.join(data_path, label, filename)
40
+ print(f"Index {j}. {filename}")
41
+ image = load_img(filename)
42
+ # image = load_img(filename, color_mode = color_mode)
43
+ # if color_mode != "grayscale":
44
+ # gray = np.array(rgb_to_grayscale(image))
45
+ # else:
46
+ # gray = np.array(image.copy())
47
+ image = np.array(image)
48
+ faces = faceCascade.detectMultiScale(
49
+ image,
50
+ scaleFactor=1.2,
51
+ minNeighbors=5,
52
+ minSize=(20, 20)
53
+ )
54
+ for (x,y,w,h) in faces:
55
+ if w == 0 or h == 0:
56
+ continue
57
+ image_roi = image[y:y+h, x:x+w]
58
+ image_roi = img_to_array(image_roi)
59
+ image_roi = resize(image_roi, input_size)
60
+
61
+ image_list.append(image_roi)
62
+ class_list.append(label)
63
+
64
+ os.system("cls")
65
+ encoder = OneHotEncoder(sparse_output=False)
66
+ class_list = encoder.fit_transform(np.array(class_list).reshape(-1, 1))
67
+
68
+ image_list = np.asarray(image_list)
69
+ self.label_names = encoder.categories_[0]
70
+
71
+ X_train, X_temp, y_train, y_temp = train_test_split(image_list, class_list, test_size=0.3)
72
+ X_val, X_test, y_val, y_test = train_test_split(X_temp, y_temp, test_size=0.5)
73
+
74
+ self.X_train = X_train
75
+ self.y_train = y_train
76
+ self.X_val = X_val
77
+ self.y_val = y_val
78
+ self.X_test = X_test
79
+ self.y_test = y_test
80
+ print(f"Train dataset len: {X_train.shape[0]}")
81
+ print(f"Val dataset len: {X_val.shape[0]}")
82
+ print(f"Test dataset len: {X_test.shape[0]}")
83
+ print(f"Sample image shape: {X_train[0].shape}")
84
+
85
+ def save(self, save_path):
86
+ self.X_train = []
87
+ self.y_train = []
88
+ self.X_val = []
89
+ self.y_val = []
90
+ self.X_test = []
91
+ self.y_test = []
92
+
93
+ model_save_path = save_path + "_model.h5"
94
+ self.model.save(model_save_path)
95
+
96
+ # Save the rest of the class attributes
97
+ with open(save_path + "_attributes.pickle", "wb") as file:
98
+ pickle.dump({
99
+ "data_augmentation": self.data_augmentation,
100
+ "label_names": self.label_names,
101
+ "input_size": self.input_size,
102
+ }, file)
103
+
104
+ @classmethod
105
+ def load(cls, save_path):
106
+ with open(save_path + "_attributes.pickle", "rb") as file:
107
+ attributes = pickle.load(file)
108
+
109
+ instance = cls()
110
+ instance.data_augmentation = attributes["data_augmentation"]
111
+ instance.label_names = attributes["label_names"]
112
+ instance.input_size = attributes["input_size"]
113
+
114
+ # Load the Keras model
115
+ model_save_path = save_path + "_model.h5"
116
+ instance.model = tf.keras.models.load_model(model_save_path)
117
+
118
+ print(f"Model and attributes loaded from {save_path}")
119
+ return instance
120
+
121
+
122
+ def fit(self, model_name: str, augmentation=True, save_path=None, batch_size=64, epochs=10):
123
+ inputs = Input(shape=self.X_train[0].shape)
124
+ layer = self.data_augmentation(inputs) if augmentation else inputs
125
+
126
+ base_model = VGGFace(model=model_name, include_top=False, input_shape=self.X_train[0].shape, pooling="avg")
127
+ base_model.trainable = False
128
+
129
+ layer = base_model(layer)
130
+
131
+ layer = Flatten(name="Flatten")(layer)
132
+ out = Dense(len(self.label_names), activation="softmax")(layer)
133
+
134
+ model = Model(inputs, out)
135
+ model.compile(optimizer="adam", metrics=["accuracy"], loss="categorical_crossentropy")
136
+
137
+ print(model.summary())
138
+
139
+ self.history = model.fit(
140
+ self.X_train, self.y_train,
141
+ batch_size=batch_size,
142
+ epochs=epochs,
143
+ validation_data=(self.X_val, self.y_val),
144
+ callbacks=[EarlyStopping(monitor="val_loss", patience=3)]
145
+ )
146
+
147
+ self.model = model
148
+ if save_path is not None:
149
+ self.save(save_path)
150
+
151
+ loss, accuracy = model.evaluate(self.X_test, self.y_test)
152
+ print(f"Test accuracy:{(accuracy * 100):2f}")
153
+ print(f"Test loss:{loss:2f}")
154
+ return model
155
+
156
+
157
+ def predict(self, predict_image):
158
+ predict_image = img_to_array(predict_image)
159
+ predict_image = resize(predict_image, self.input_size)
160
+ predict_image = np.expand_dims(predict_image, axis=0)
161
+ predict_label = self.model.predict(predict_image)
162
+ return self.label_names[np.argmax(predict_label)], np.max(predict_label)
163
+
164
+ faceRecognition = ResNetModel.load("resnet_v2")
165
+ def predict(image):
166
+
167
+ faceCascade = cv2.CascadeClassifier('Cascades/haarcascade_frontalface_default.xml')
168
+ frame = cv2.flip(image, 1)
169
+ faces = faceCascade.detectMultiScale(
170
+ frame,
171
+ scaleFactor=1.2,
172
+ minNeighbors=5,
173
+ minSize=(20, 20)
174
+ )
175
+ for (x,y,w,h) in faces:
176
+ cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2)
177
+ roi_color = frame[y:y+h, x:x+w]
178
+ roi_color = cv2.flip(roi_color, 1)
179
+ label, conf = faceRecognition.predict(roi_color)
180
+ label = label[:12] + "..." if len(label) > 10 else label
181
+ text = "{label} : {conf:.3f}".format(label = label, conf = conf)
182
+
183
+ cv2.putText(frame, text, (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,255), 2)
184
+ return frame
185
+
186
+ css = """.my-group {max-width: 600px !important; max-height: 600px !important;}
187
+ .my-column {display: flex !important; justify-content: center !important; align-items: center !important;}"""
188
+ rtc_configuration = {
189
+ "frameRate": {"ideal": 10}
190
+ }
191
+ with gr.Blocks(css=css) as demo:
192
+ gr.HTML(
193
+ """
194
+ <h1 style='text-align: center'>
195
+ Face Recognition Using FaceNet LB01 - Kelompok 8
196
+ </h1>
197
+ <ul style='text-align: center'>
198
+ <h4> <li>2602082452 - Rendy Susanto </li> </h4>
199
+ <h4> <li>2602090031 - Rafael Juviano Joesoef </li> </h4>
200
+ <h4> <li>2602091122 - Owen Tamashi Buntoro </li> </h4>
201
+ </ul>
202
+ """
203
+ )
204
+ with gr.Column(elem_classes=["my-column"]):
205
+ with gr.Group(elem_classes=["my-group"]):
206
+ image = WebRTC(label="Stream", rtc_configuration=rtc_configuration)
207
+
208
+ image.stream(
209
+ fn=predict, inputs=[image], outputs=[image]
210
+ )
211
+
212
+ if __name__ == "__main__":
213
+ demo.launch(share=True)
requirements.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ tensorflow==2.13.0
2
+ keras==2.12
3
+ keras-vggface==0.6
4
+ keras-applications==1.0.8
5
+ scikit-learn==1.6.0
resnet_v2_attributes.pickle ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:676641cbbbce10d3ea01f32b9b2f4e3faff753c13aff97347b7ccbf6d3fc109f
3
+ size 65347
resnet_v2_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9454c3de3db3b11a72676f6bde91b8ad7c17939c3faac2d378ab95843cba8c58
3
+ size 105037696