Spaces:
Build error
Build error
| import streamlit as st | |
| from PIL import Image, ImageDraw, ImageOps | |
| import cv2 | |
| import numpy as np | |
| import pandas as pd | |
| import dlib | |
| import requests | |
| import os | |
| from io import BytesIO | |
| from facenet_pytorch import MTCNN | |
| # --------------------------------------------------------------- | |
| # Cache Models | |
| def load_mtcnn_model(): | |
| return MTCNN(keep_all=True) | |
| def load_dlib_detector(): | |
| return dlib.get_frontal_face_detector() | |
| # --------------------------------------------------------------- | |
| # Face Detection Functions | |
| def detect_faces_opencv(image): | |
| cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) | |
| gray = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY) | |
| face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml") | |
| faces = face_cascade.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5) | |
| result = [] | |
| for idx, (x, y, w, h) in enumerate(faces): | |
| cv2.rectangle(cv_image, (x, y), (x+w, y+h), (0, 255, 0), 2) | |
| cv2.putText(cv_image, f"Face {idx+1}", (x, y-10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) | |
| result.append({ | |
| "Face ID": f"Face {idx+1}", | |
| "X": x, | |
| "Y": y, | |
| "W": w, | |
| "H": h, | |
| "Confidence": "N/A" | |
| }) | |
| cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB) | |
| return cv_image, result | |
| def detect_faces_mtcnn(image): | |
| mtcnn = load_mtcnn_model() | |
| boxes, probs = mtcnn.detect(image, landmarks=False) | |
| result = [] | |
| draw_image = image.copy() | |
| draw = ImageDraw.Draw(draw_image) | |
| if boxes is not None: | |
| for idx, (box, prob) in enumerate(zip(boxes, probs)): | |
| x1, y1, x2, y2 = [int(v) for v in box] | |
| draw.rectangle([x1, y1, x2, y2], outline="blue", width=2) | |
| draw.text((x1, y1 - 15), f"Face {idx+1}", fill="blue") | |
| result.append({ | |
| "Face ID": f"Face {idx+1}", | |
| "X": x1, | |
| "Y": y1, | |
| "W": x2 - x1, | |
| "H": y2 - y1, | |
| "Confidence": f"{prob:.2f}" | |
| }) | |
| return draw_image, result | |
| def detect_faces_dlib(image): | |
| dlib_detector = load_dlib_detector() | |
| cv_image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2GRAY) | |
| dets, scores, _ = dlib_detector.run(cv_image, 1, -1) | |
| result = [] | |
| draw_image = image.copy() | |
| draw = ImageDraw.Draw(draw_image) | |
| for idx, (d, score) in enumerate(zip(dets, scores)): | |
| if score > 0.0: | |
| x1, y1, x2, y2 = d.left(), d.top(), d.right(), d.bottom() | |
| draw.rectangle([x1, y1, x2, y2], outline="red", width=2) | |
| draw.text((x1, y1 - 15), f"Face {idx+1}", fill="red") | |
| result.append({ | |
| "Face ID": f"Face {idx+1}", | |
| "X": x1, | |
| "Y": y1, | |
| "W": x2 - x1, | |
| "H": y2 - y1, | |
| "Confidence": f"{score:.2f}" | |
| }) | |
| return draw_image, result | |
| # --------------------------------------------------------------- | |
| # Face Cropping | |
| def get_face_crops(image, boxes): | |
| faces = [] | |
| for box in boxes: | |
| x, y, w, h = box["X"], box["Y"], box["W"], box["H"] | |
| cropped_face = image.crop((x, y, x + w, y + h)) | |
| cropped_face = ImageOps.fit(cropped_face, (80, 80)) | |
| faces.append(cropped_face) | |
| return faces | |
| # --------------------------------------------------------------- | |
| # Main Page UI | |
| st.header("👤 Skin Tone Bias in Face Detection") | |
| st.markdown( | |
| """ | |
| This page explores potential **skin tone bias** in face detection models like **Dlib**, **MTCNN**, and **OpenCV**. | |
| Upload group photos with varying skin tones to observe detection differences. | |
| """ | |
| ) | |
| model_choice = st.selectbox("Select a Face Detection Model", ["Dlib", "MTCNN", "OpenCV"]) | |
| input_method = st.selectbox("Select Input Method", ["Default Images", "Upload Image", "Use Image URL"]) | |
| image = None | |
| # Image Loading | |
| if input_method == "Upload Image": | |
| uploaded_file = st.file_uploader("Upload a group image", type=["jpg", "jpeg", "png"]) | |
| if uploaded_file: | |
| image = Image.open(uploaded_file).convert("RGB") | |
| elif input_method == "Use Image URL": | |
| image_url = st.text_input("Paste an image URL") | |
| if image_url: | |
| try: | |
| response = requests.get(image_url) | |
| image = Image.open(BytesIO(response.content)).convert("RGB") | |
| except Exception: | |
| st.error("Couldn't load image from the provided URL.") | |
| elif input_method == "Default Images": | |
| default_path = "default_images/skin_tone_bias" | |
| if os.path.exists(default_path): | |
| default_images = sorted([f for f in os.listdir(default_path) if f.lower().endswith((".jpg", ".jpeg", ".png"))]) | |
| if default_images: | |
| st.markdown("### Default Images") | |
| cols = st.columns(3) | |
| selected_image_name = st.session_state.get('selected_default_image', None) | |
| for idx, img_name in enumerate(default_images): | |
| img_path = os.path.join(default_path, img_name) | |
| with cols[idx % 3]: | |
| img = Image.open(img_path) | |
| st.image(img, use_container_width=True) | |
| if st.button(f"Select {chr(65+idx)}", key=f"select_{idx}"): | |
| st.session_state.selected_default_image = img_name | |
| selected_image_name = img_name | |
| if selected_image_name: | |
| st.success(f"Selected Image: {selected_image_name}") | |
| image = Image.open(os.path.join(default_path, selected_image_name)).convert("RGB") | |
| else: | |
| st.warning("No images found in 'default_images/skin_tone_bias'.") | |
| else: | |
| st.warning("Folder 'default_images/skin_tone_bias' does not exist.") | |
| # Image Preview | |
| if image is not None: | |
| st.image(image, caption="Input Image", use_container_width=True) | |
| # Detection | |
| if st.button("Detect Faces"): | |
| if image is None: | |
| st.warning("Please provide an image before detection.") | |
| else: | |
| with st.spinner(f"Detecting faces using {model_choice}..."): | |
| if model_choice == "OpenCV": | |
| draw_image, result = detect_faces_opencv(image) | |
| elif model_choice == "MTCNN": | |
| draw_image, result = detect_faces_mtcnn(image) | |
| elif model_choice == "Dlib": | |
| draw_image, result = detect_faces_dlib(image) | |
| if result: | |
| st.success(f"Detected {len(result)} face(s) with {model_choice}") | |
| st.image(draw_image, caption=f"{model_choice} Detection Output", use_container_width=True) | |
| face_images = get_face_crops(image, result) | |
| st.markdown("### Cropped Face Previews") | |
| num_faces = len(face_images) | |
| cols_per_row = 3 | |
| for i in range(0, num_faces, cols_per_row): | |
| cols = st.columns(cols_per_row) | |
| for j in range(cols_per_row): | |
| if i + j < num_faces: | |
| with cols[j]: | |
| st.image(face_images[i + j], use_container_width=True) | |
| st.markdown( | |
| f"<div style='text-align: center;'>" | |
| f"<b>{result[i + j]['Face ID']}</b><br>" | |
| f"Confidence: <code>{result[i + j]['Confidence']}</code>" | |
| f"</div>", | |
| unsafe_allow_html=True | |
| ) | |
| else: | |
| st.warning(f"No faces detected by {model_choice}. Try a different model or image.") | |