Goutham204 commited on
Commit
6d7f84d
Β·
verified Β·
1 Parent(s): b5c012d

Upload 2 files

Browse files
EGFE (Tkinter).ipynb ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "acf43a2d-909c-4268-9eba-742d82f4d983",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "import threading\n",
11
+ "import cv2\n",
12
+ "import numpy as np\n",
13
+ "import tkinter as tk\n",
14
+ "from tkinter import Label\n",
15
+ "from PIL import Image, ImageTk\n",
16
+ "import pyttsx3\n",
17
+ "from keras.models import load_model\n",
18
+ "import os"
19
+ ]
20
+ },
21
+ {
22
+ "cell_type": "code",
23
+ "execution_count": 3,
24
+ "id": "19b13281-85bf-4381-9d5f-f503b1658a8f",
25
+ "metadata": {},
26
+ "outputs": [],
27
+ "source": [
28
+ "model = load_model('emotion_model-099.keras')\n",
29
+ "engine = pyttsx3.init()\n",
30
+ "\n",
31
+ "frame_count = 0\n",
32
+ "predict_interval = 5\n",
33
+ "current_emotion_label = \"Neutral\"\n",
34
+ "last_spoken = None\n",
35
+ "\n",
36
+ "emoji_path = {\n",
37
+ " 0: 'D:/New download/Emoji/Angry.png',\n",
38
+ " 1: 'D:/New download/Emoji/Disgusted.png',\n",
39
+ " 2: 'D:/New download/Emoji/Fear.png',\n",
40
+ " 3: 'D:/New download/Emoji/Happy.png',\n",
41
+ " 4: 'D:/New download/Emoji/Neutral.png',\n",
42
+ " 5: 'D:/New download/Emoji/Sad.png',\n",
43
+ " 6: 'D:/New download/Emoji/Surprised.png'\n",
44
+ "}\n",
45
+ "emotion_labels = ['Angry', 'Disgust', 'Fear', 'Happy', 'Neutral', 'Sad', 'Surprise']\n",
46
+ "\n",
47
+ "face_cascade = cv2.CascadeClassifier(r'D:\\New download\\haarcascade_frontalface_default.xml')\n"
48
+ ]
49
+ },
50
+ {
51
+ "cell_type": "code",
52
+ "execution_count": 5,
53
+ "id": "30c4b41c-27df-4819-8914-fa3a7a459f43",
54
+ "metadata": {},
55
+ "outputs": [],
56
+ "source": [
57
+ "def preprocess_face(face_img):\n",
58
+ " gray = cv2.cvtColor(face_img, cv2.COLOR_BGR2GRAY)\n",
59
+ " resized = cv2.resize(gray, (100, 100))\n",
60
+ " resized = resized.astype('float32') / 255.0\n",
61
+ " resized = np.reshape(resized, (1, 100, 100, 1))\n",
62
+ " return resized\n",
63
+ "\n",
64
+ "def overlay_emoji_on_frame(frame, emoji_img, x, y, w, h):\n",
65
+ " emoji_resized = cv2.resize(emoji_img, (w, h))\n",
66
+ " if emoji_resized.shape[2] == 4:\n",
67
+ " emoji_rgb = emoji_resized[:, :, :3] \n",
68
+ " alpha_mask = emoji_resized[:, :, 3] \n",
69
+ " alpha_mask = alpha_mask / 255.0\n",
70
+ " for c in range(0, 3): \n",
71
+ " frame[y:y + h, x:x + w, c] = (alpha_mask * emoji_rgb[:, :, c] + (1 - alpha_mask) * frame[y:y + h, x:x + w, c])\n",
72
+ " else:\n",
73
+ " frame[y:y + h, x:x + w] = cv2.addWeighted(frame[y:y + h, x:x + w], 0.5, emoji_resized, 0.5, 0)\n",
74
+ " return frame\n",
75
+ "\n",
76
+ "def predict_emotion_async(face):\n",
77
+ " global current_emotion_label\n",
78
+ " processed_face = preprocess_face(face)\n",
79
+ " emotion_prediction = model.predict(processed_face)\n",
80
+ " max_index = int(np.argmax(emotion_prediction))\n",
81
+ " current_emotion_label = emotion_labels[max_index]"
82
+ ]
83
+ },
84
+ {
85
+ "cell_type": "code",
86
+ "execution_count": 7,
87
+ "id": "7c4211fd-e1a4-4600-aa05-687f8c3202e3",
88
+ "metadata": {},
89
+ "outputs": [
90
+ {
91
+ "name": "stdout",
92
+ "output_type": "stream",
93
+ "text": [
94
+ "\u001b[1m1/1\u001b[0m \u001b[32m━━━━━━━━━━━━━━━━━━━━\u001b[0m\u001b[37m\u001b[0m \u001b[1m1s\u001b[0m 810ms/step\n"
95
+ ]
96
+ }
97
+ ],
98
+ "source": [
99
+ "def update_frame():\n",
100
+ " global frame_count, last_spoken, current_emotion_label\n",
101
+ "\n",
102
+ " ret, frame = cap.read()\n",
103
+ " if not ret:\n",
104
+ " print(\"Error: Failed to capture video feed.\")\n",
105
+ " return\n",
106
+ " \n",
107
+ " frame = cv2.resize(frame, (320, 240))\n",
108
+ " gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n",
109
+ " faces = face_cascade.detectMultiScale(gray_frame, scaleFactor=1.3, minNeighbors=5)\n",
110
+ "\n",
111
+ " for (x, y, w, h) in faces:\n",
112
+ " face = frame[y:y + h, x:x + w]\n",
113
+ "\n",
114
+ " if frame_count % predict_interval == 0:\n",
115
+ " threading.Thread(target=predict_emotion_async, args=(face,)).start()\n",
116
+ "\n",
117
+ " cv2.putText(frame, current_emotion_label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)\n",
118
+ " cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)\n",
119
+ "\n",
120
+ " emoji_img_path = emoji_path.get(emotion_labels.index(current_emotion_label), None)\n",
121
+ " if emoji_img_path and os.path.exists(emoji_img_path):\n",
122
+ " emoji_img = cv2.imread(emoji_img_path, -1)\n",
123
+ " frame = overlay_emoji_on_frame(frame, emoji_img, x, y, w, h)\n",
124
+ "\n",
125
+ " if current_emotion_label != last_spoken:\n",
126
+ " last_spoken = current_emotion_label\n",
127
+ " engine.say(current_emotion_label)\n",
128
+ " engine.runAndWait()\n",
129
+ "\n",
130
+ " frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n",
131
+ " img = Image.fromarray(frame)\n",
132
+ " imgtk = ImageTk.PhotoImage(image=img)\n",
133
+ "\n",
134
+ " lblVideo.imgtk = imgtk\n",
135
+ " lblVideo.configure(image=imgtk)\n",
136
+ "\n",
137
+ " frame_count += 1\n",
138
+ " lblVideo.after(10, update_frame)\n",
139
+ "\n",
140
+ "root = tk.Tk()\n",
141
+ "root.title(\"Emoji Generator from Facial Expression\")\n",
142
+ "root.config(bg='khaki')\n",
143
+ "root.geometry('400x350')\n",
144
+ "\n",
145
+ "heading = Label(root,text=\"Emoji Generator from\\nFacial Expression\",font=('Times New Roman', 24, 'bold'),justify=\"center\",bg=\"khaki\",fg=\"saddle brown\")\n",
146
+ "heading.pack(pady=10) \n",
147
+ "\n",
148
+ "lblVideo = Label(root)\n",
149
+ "lblVideo.pack()\n",
150
+ "\n",
151
+ "cap = cv2.VideoCapture(0)\n",
152
+ "\n",
153
+ "update_frame()\n",
154
+ "\n",
155
+ "root.mainloop()\n",
156
+ "\n",
157
+ "cap.release()\n",
158
+ "cv2.destroyAllWindows()"
159
+ ]
160
+ },
161
+ {
162
+ "cell_type": "code",
163
+ "execution_count": null,
164
+ "id": "cebe5ae5-1d4f-45b7-8b08-f4739a4d030b",
165
+ "metadata": {},
166
+ "outputs": [],
167
+ "source": []
168
+ }
169
+ ],
170
+ "metadata": {
171
+ "kernelspec": {
172
+ "display_name": "Python 3 (ipykernel)",
173
+ "language": "python",
174
+ "name": "python3"
175
+ },
176
+ "language_info": {
177
+ "codemirror_mode": {
178
+ "name": "ipython",
179
+ "version": 3
180
+ },
181
+ "file_extension": ".py",
182
+ "mimetype": "text/x-python",
183
+ "name": "python",
184
+ "nbconvert_exporter": "python",
185
+ "pygments_lexer": "ipython3",
186
+ "version": "3.12.7"
187
+ }
188
+ },
189
+ "nbformat": 4,
190
+ "nbformat_minor": 5
191
+ }
Emoji Generator from Facial Expression.ipynb ADDED
The diff for this file is too large to render. See raw diff