This commit deletes the `photo_tagger_refactored.py`, `run.sh`, and test files (`test_basic.py`, `test_deepface_gui.py`, `test_face_recognition.py`) that are no longer in use. The removal of these files streamlines the project structure and eliminates legacy code, paving the way for future enhancements and a cleaner codebase. The README has been updated to reflect these changes, ensuring clarity on the current state of the project.
717 lines
32 KiB
Python
717 lines
32 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
DeepFace GUI Test Application
|
|
|
|
GUI version of test_deepface_only.py that shows face comparison results
|
|
with left panel for reference faces and right panel for comparison faces with confidence scores.
|
|
"""
|
|
|
|
import os
|
|
import sys
|
|
import time
|
|
import tkinter as tk
|
|
from tkinter import ttk, messagebox, filedialog
|
|
from pathlib import Path
|
|
from typing import List, Dict, Tuple, Optional
|
|
import numpy as np
|
|
from PIL import Image, ImageTk
|
|
|
|
# Suppress TensorFlow warnings and CUDA errors
|
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
|
import warnings
|
|
warnings.filterwarnings('ignore')
|
|
|
|
# DeepFace library
|
|
from deepface import DeepFace
|
|
|
|
# Face recognition library
|
|
import face_recognition
|
|
|
|
# Supported image formats
|
|
SUPPORTED_FORMATS = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
|
|
|
|
|
|
class FaceComparisonGUI:
|
|
"""GUI application for DeepFace face comparison testing"""
|
|
|
|
def __init__(self):
|
|
self.root = tk.Tk()
|
|
self.root.title("Face Comparison Test - DeepFace vs face_recognition")
|
|
self.root.geometry("2000x1000")
|
|
self.root.minsize(1200, 800)
|
|
|
|
# Data storage
|
|
self.deepface_faces = [] # DeepFace faces from all images
|
|
self.facerec_faces = [] # face_recognition faces from all images
|
|
self.deepface_similarities = [] # DeepFace similarity results
|
|
self.facerec_similarities = [] # face_recognition similarity results
|
|
self.processing_times = {} # Timing information for each photo
|
|
|
|
# GUI components
|
|
self.setup_gui()
|
|
|
|
def setup_gui(self):
|
|
"""Set up the GUI layout"""
|
|
# Main frame
|
|
main_frame = ttk.Frame(self.root, padding="10")
|
|
main_frame.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
|
|
|
# Configure grid weights
|
|
self.root.columnconfigure(0, weight=1)
|
|
self.root.rowconfigure(0, weight=1)
|
|
main_frame.columnconfigure(0, weight=1)
|
|
main_frame.rowconfigure(2, weight=1) # Make the content area expandable
|
|
|
|
# Title
|
|
title_label = ttk.Label(main_frame, text="Face Comparison Test - DeepFace vs face_recognition",
|
|
font=("Arial", 16, "bold"))
|
|
title_label.grid(row=0, column=0, columnspan=3, pady=(0, 10))
|
|
|
|
# Control panel
|
|
control_frame = ttk.Frame(main_frame)
|
|
control_frame.grid(row=1, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=(0, 5))
|
|
|
|
# Folder selection
|
|
ttk.Label(control_frame, text="Test Folder:").grid(row=0, column=0, padx=(0, 5))
|
|
self.folder_var = tk.StringVar(value="demo_photos/testdeepface/")
|
|
folder_entry = ttk.Entry(control_frame, textvariable=self.folder_var, width=40)
|
|
folder_entry.grid(row=0, column=1, padx=(0, 5))
|
|
|
|
browse_btn = ttk.Button(control_frame, text="Browse", command=self.browse_folder)
|
|
browse_btn.grid(row=0, column=2, padx=(0, 10))
|
|
|
|
# Reference image selection
|
|
ttk.Label(control_frame, text="Reference Image:").grid(row=0, column=3, padx=(10, 5))
|
|
self.reference_var = tk.StringVar(value="2019-11-22_0011.JPG")
|
|
reference_entry = ttk.Entry(control_frame, textvariable=self.reference_var, width=20)
|
|
reference_entry.grid(row=0, column=4, padx=(0, 5))
|
|
|
|
# Face detector selection
|
|
ttk.Label(control_frame, text="Detector:").grid(row=0, column=5, padx=(10, 5))
|
|
self.detector_var = tk.StringVar(value="retinaface")
|
|
detector_combo = ttk.Combobox(control_frame, textvariable=self.detector_var,
|
|
values=["retinaface", "mtcnn", "opencv", "ssd"],
|
|
state="readonly", width=10)
|
|
detector_combo.grid(row=0, column=6, padx=(0, 5))
|
|
|
|
# Similarity threshold
|
|
ttk.Label(control_frame, text="Threshold:").grid(row=0, column=7, padx=(10, 5))
|
|
self.threshold_var = tk.StringVar(value="60")
|
|
threshold_entry = ttk.Entry(control_frame, textvariable=self.threshold_var, width=8)
|
|
threshold_entry.grid(row=0, column=8, padx=(0, 5))
|
|
|
|
# Process button
|
|
process_btn = ttk.Button(control_frame, text="Process Images",
|
|
command=self.process_images, style="Accent.TButton")
|
|
process_btn.grid(row=0, column=9, padx=(10, 0))
|
|
|
|
# Progress bar
|
|
self.progress_var = tk.DoubleVar()
|
|
self.progress_bar = ttk.Progressbar(control_frame, variable=self.progress_var,
|
|
maximum=100, length=200)
|
|
self.progress_bar.grid(row=1, column=0, columnspan=10, sticky=(tk.W, tk.E), pady=(5, 0))
|
|
|
|
# Status label
|
|
self.status_var = tk.StringVar(value="Ready to process images")
|
|
status_label = ttk.Label(control_frame, textvariable=self.status_var)
|
|
status_label.grid(row=2, column=0, columnspan=10, pady=(5, 0))
|
|
|
|
# Main content area with three panels
|
|
content_frame = ttk.Frame(main_frame)
|
|
content_frame.grid(row=2, column=0, columnspan=3, sticky=(tk.W, tk.E, tk.N, tk.S), pady=(10, 0))
|
|
content_frame.columnconfigure(0, weight=1)
|
|
content_frame.columnconfigure(1, weight=1)
|
|
content_frame.columnconfigure(2, weight=1)
|
|
content_frame.rowconfigure(0, weight=1)
|
|
|
|
# Left panel - DeepFace results
|
|
left_frame = ttk.LabelFrame(content_frame, text="DeepFace Results", padding="5")
|
|
left_frame.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S), padx=(0, 5))
|
|
left_frame.columnconfigure(0, weight=1)
|
|
left_frame.rowconfigure(0, weight=1)
|
|
|
|
# Left panel scrollable area
|
|
left_canvas = tk.Canvas(left_frame, bg="white")
|
|
left_scrollbar = ttk.Scrollbar(left_frame, orient="vertical", command=left_canvas.yview)
|
|
self.left_scrollable_frame = ttk.Frame(left_canvas)
|
|
|
|
self.left_scrollable_frame.bind(
|
|
"<Configure>",
|
|
lambda e: left_canvas.configure(scrollregion=left_canvas.bbox("all"))
|
|
)
|
|
|
|
left_canvas.create_window((0, 0), window=self.left_scrollable_frame, anchor="nw")
|
|
left_canvas.configure(yscrollcommand=left_scrollbar.set)
|
|
|
|
left_canvas.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
|
left_scrollbar.grid(row=0, column=1, sticky=(tk.N, tk.S))
|
|
|
|
# Middle panel - face_recognition results
|
|
middle_frame = ttk.LabelFrame(content_frame, text="face_recognition Results", padding="5")
|
|
middle_frame.grid(row=0, column=1, sticky=(tk.W, tk.E, tk.N, tk.S), padx=(5, 5))
|
|
middle_frame.columnconfigure(0, weight=1)
|
|
middle_frame.rowconfigure(0, weight=1)
|
|
|
|
# Right panel - Comparison Results
|
|
right_frame = ttk.LabelFrame(content_frame, text="Comparison Results", padding="5")
|
|
right_frame.grid(row=0, column=2, sticky=(tk.W, tk.E, tk.N, tk.S), padx=(5, 0))
|
|
right_frame.columnconfigure(0, weight=1)
|
|
right_frame.rowconfigure(0, weight=1)
|
|
|
|
# Middle panel scrollable area
|
|
middle_canvas = tk.Canvas(middle_frame, bg="white")
|
|
middle_scrollbar = ttk.Scrollbar(middle_frame, orient="vertical", command=middle_canvas.yview)
|
|
self.middle_scrollable_frame = ttk.Frame(middle_canvas)
|
|
|
|
self.middle_scrollable_frame.bind(
|
|
"<Configure>",
|
|
lambda e: middle_canvas.configure(scrollregion=middle_canvas.bbox("all"))
|
|
)
|
|
|
|
middle_canvas.create_window((0, 0), window=self.middle_scrollable_frame, anchor="nw")
|
|
middle_canvas.configure(yscrollcommand=middle_scrollbar.set)
|
|
|
|
middle_canvas.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
|
middle_scrollbar.grid(row=0, column=1, sticky=(tk.N, tk.S))
|
|
|
|
# Right panel scrollable area
|
|
right_canvas = tk.Canvas(right_frame, bg="white")
|
|
right_scrollbar = ttk.Scrollbar(right_frame, orient="vertical", command=right_canvas.yview)
|
|
self.right_scrollable_frame = ttk.Frame(right_canvas)
|
|
|
|
self.right_scrollable_frame.bind(
|
|
"<Configure>",
|
|
lambda e: right_canvas.configure(scrollregion=right_canvas.bbox("all"))
|
|
)
|
|
|
|
right_canvas.create_window((0, 0), window=self.right_scrollable_frame, anchor="nw")
|
|
right_canvas.configure(yscrollcommand=right_scrollbar.set)
|
|
|
|
right_canvas.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
|
right_scrollbar.grid(row=0, column=1, sticky=(tk.N, tk.S))
|
|
|
|
# Bind mousewheel to all canvases
|
|
def _on_mousewheel(event):
|
|
left_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
|
|
middle_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
|
|
right_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
|
|
|
|
left_canvas.bind("<MouseWheel>", _on_mousewheel)
|
|
middle_canvas.bind("<MouseWheel>", _on_mousewheel)
|
|
right_canvas.bind("<MouseWheel>", _on_mousewheel)
|
|
|
|
def browse_folder(self):
|
|
"""Browse for folder containing test images"""
|
|
folder = filedialog.askdirectory(initialdir="demo_photos/")
|
|
if folder:
|
|
self.folder_var.set(folder)
|
|
|
|
def update_status(self, message: str):
|
|
"""Update status message"""
|
|
self.status_var.set(message)
|
|
self.root.update_idletasks()
|
|
|
|
def update_progress(self, value: float):
|
|
"""Update progress bar"""
|
|
self.progress_var.set(value)
|
|
self.root.update_idletasks()
|
|
|
|
def get_image_files(self, folder_path: str) -> List[str]:
|
|
"""Get all supported image files from folder"""
|
|
folder = Path(folder_path)
|
|
if not folder.exists():
|
|
raise FileNotFoundError(f"Folder not found: {folder_path}")
|
|
|
|
image_files = []
|
|
for file_path in folder.rglob("*"):
|
|
if file_path.is_file() and file_path.suffix.lower() in SUPPORTED_FORMATS:
|
|
image_files.append(str(file_path))
|
|
|
|
return sorted(image_files)
|
|
|
|
def process_with_deepface(self, image_path: str, detector: str = "retinaface") -> Dict:
|
|
"""Process image with DeepFace library"""
|
|
try:
|
|
# Use DeepFace.represent() to get proper face detection with regions
|
|
# Using selected detector for face detection
|
|
results = DeepFace.represent(
|
|
img_path=image_path,
|
|
model_name='ArcFace', # Best accuracy model
|
|
detector_backend=detector, # User-selected detector
|
|
enforce_detection=False, # Don't fail if no faces
|
|
align=True # Face alignment for better accuracy
|
|
)
|
|
|
|
if not results:
|
|
print(f"No faces found in {Path(image_path).name}")
|
|
return {'faces': [], 'encodings': []}
|
|
|
|
print(f"Found {len(results)} faces in {Path(image_path).name}")
|
|
|
|
# Convert to our format
|
|
faces = []
|
|
encodings = []
|
|
|
|
for i, result in enumerate(results):
|
|
try:
|
|
# Extract face region info from DeepFace result
|
|
# DeepFace uses 'facial_area' instead of 'region'
|
|
facial_area = result.get('facial_area', {})
|
|
face_confidence = result.get('face_confidence', 0.0)
|
|
|
|
# Create face data with proper bounding box
|
|
face_data = {
|
|
'image_path': image_path,
|
|
'face_id': f"df_{Path(image_path).stem}_{i}",
|
|
'location': (facial_area.get('y', 0), facial_area.get('x', 0) + facial_area.get('w', 0),
|
|
facial_area.get('y', 0) + facial_area.get('h', 0), facial_area.get('x', 0)),
|
|
'bbox': facial_area,
|
|
'encoding': np.array(result['embedding']),
|
|
'confidence': face_confidence
|
|
}
|
|
faces.append(face_data)
|
|
encodings.append(np.array(result['embedding']))
|
|
|
|
print(f"Face {i}: facial_area={facial_area}, confidence={face_confidence:.2f}, embedding shape={np.array(result['embedding']).shape}")
|
|
|
|
except Exception as e:
|
|
print(f"Error processing face {i}: {e}")
|
|
continue
|
|
|
|
return {
|
|
'faces': faces,
|
|
'encodings': encodings
|
|
}
|
|
|
|
except Exception as e:
|
|
print(f"DeepFace error on {image_path}: {e}")
|
|
return {'faces': [], 'encodings': []}
|
|
|
|
def process_with_face_recognition(self, image_path: str) -> Dict:
|
|
"""Process image with face_recognition library"""
|
|
try:
|
|
# Load image
|
|
image = face_recognition.load_image_file(image_path)
|
|
|
|
# Find face locations
|
|
face_locations = face_recognition.face_locations(image, model="hog") # Use HOG model for speed
|
|
|
|
if not face_locations:
|
|
print(f"No faces found in {Path(image_path).name} (face_recognition)")
|
|
return {'faces': [], 'encodings': []}
|
|
|
|
print(f"Found {len(face_locations)} faces in {Path(image_path).name} (face_recognition)")
|
|
|
|
# Get face encodings
|
|
face_encodings = face_recognition.face_encodings(image, face_locations)
|
|
|
|
# Convert to our format
|
|
faces = []
|
|
encodings = []
|
|
|
|
for i, (face_location, face_encoding) in enumerate(zip(face_locations, face_encodings)):
|
|
try:
|
|
# face_recognition returns (top, right, bottom, left)
|
|
top, right, bottom, left = face_location
|
|
|
|
# Create face data with proper bounding box
|
|
face_data = {
|
|
'image_path': image_path,
|
|
'face_id': f"fr_{Path(image_path).stem}_{i}",
|
|
'location': face_location,
|
|
'bbox': {'x': left, 'y': top, 'w': right - left, 'h': bottom - top},
|
|
'encoding': np.array(face_encoding),
|
|
'confidence': 1.0 # face_recognition doesn't provide confidence scores
|
|
}
|
|
faces.append(face_data)
|
|
encodings.append(np.array(face_encoding))
|
|
|
|
print(f"Face {i}: location={face_location}, encoding shape={np.array(face_encoding).shape}")
|
|
|
|
except Exception as e:
|
|
print(f"Error processing face {i}: {e}")
|
|
continue
|
|
|
|
return {
|
|
'faces': faces,
|
|
'encodings': encodings
|
|
}
|
|
|
|
except Exception as e:
|
|
print(f"face_recognition error on {image_path}: {e}")
|
|
return {'faces': [], 'encodings': []}
|
|
|
|
def extract_face_thumbnail(self, face_data: Dict, size: Tuple[int, int] = (150, 150)) -> ImageTk.PhotoImage:
|
|
"""Extract face thumbnail from image"""
|
|
try:
|
|
# Load original image
|
|
image = Image.open(face_data['image_path'])
|
|
|
|
# Extract face region
|
|
bbox = face_data['bbox']
|
|
left = bbox.get('x', 0)
|
|
top = bbox.get('y', 0)
|
|
right = left + bbox.get('w', 0)
|
|
bottom = top + bbox.get('h', 0)
|
|
|
|
# Add padding
|
|
padding = 20
|
|
left = max(0, left - padding)
|
|
top = max(0, top - padding)
|
|
right = min(image.width, right + padding)
|
|
bottom = min(image.height, bottom + padding)
|
|
|
|
# Crop face
|
|
face_crop = image.crop((left, top, right, bottom))
|
|
|
|
# FORCE resize to exact size (don't use thumbnail which maintains aspect ratio)
|
|
face_crop = face_crop.resize(size, Image.Resampling.LANCZOS)
|
|
|
|
print(f"DEBUG: Created thumbnail of size {face_crop.size} for {face_data['face_id']}")
|
|
|
|
# Convert to PhotoImage
|
|
return ImageTk.PhotoImage(face_crop)
|
|
|
|
except Exception as e:
|
|
print(f"Error extracting thumbnail for {face_data['face_id']}: {e}")
|
|
# Return a placeholder image
|
|
placeholder = Image.new('RGB', size, color='lightgray')
|
|
return ImageTk.PhotoImage(placeholder)
|
|
|
|
def calculate_face_similarity(self, encoding1: np.ndarray, encoding2: np.ndarray) -> float:
|
|
"""Calculate similarity between two face encodings using cosine similarity"""
|
|
try:
|
|
# Ensure encodings are numpy arrays
|
|
enc1 = np.array(encoding1).flatten()
|
|
enc2 = np.array(encoding2).flatten()
|
|
|
|
# Check if encodings have the same length
|
|
if len(enc1) != len(enc2):
|
|
print(f"Warning: Encoding length mismatch: {len(enc1)} vs {len(enc2)}")
|
|
return 0.0
|
|
|
|
# Normalize encodings
|
|
enc1_norm = enc1 / (np.linalg.norm(enc1) + 1e-8) # Add small epsilon to avoid division by zero
|
|
enc2_norm = enc2 / (np.linalg.norm(enc2) + 1e-8)
|
|
|
|
# Calculate cosine similarity
|
|
cosine_sim = np.dot(enc1_norm, enc2_norm)
|
|
|
|
# Clamp cosine similarity to valid range [-1, 1]
|
|
cosine_sim = np.clip(cosine_sim, -1.0, 1.0)
|
|
|
|
# Convert to confidence percentage (0-100)
|
|
# For face recognition, we typically want values between 0-100%
|
|
# where higher values mean more similar faces
|
|
confidence = max(0, min(100, (cosine_sim + 1) * 50)) # Scale from [-1,1] to [0,100]
|
|
|
|
return confidence
|
|
|
|
except Exception as e:
|
|
print(f"Error calculating similarity: {e}")
|
|
return 0.0
|
|
|
|
def process_images(self):
|
|
"""Process all images and perform face comparison"""
|
|
try:
|
|
# Clear previous results
|
|
self.deepface_faces = []
|
|
self.facerec_faces = []
|
|
self.deepface_similarities = []
|
|
self.facerec_similarities = []
|
|
self.processing_times = {}
|
|
|
|
# Clear GUI panels
|
|
for widget in self.left_scrollable_frame.winfo_children():
|
|
widget.destroy()
|
|
for widget in self.middle_scrollable_frame.winfo_children():
|
|
widget.destroy()
|
|
for widget in self.right_scrollable_frame.winfo_children():
|
|
widget.destroy()
|
|
|
|
folder_path = self.folder_var.get()
|
|
threshold = float(self.threshold_var.get())
|
|
|
|
if not folder_path:
|
|
messagebox.showerror("Error", "Please specify folder path")
|
|
return
|
|
|
|
self.update_status("Getting image files...")
|
|
self.update_progress(10)
|
|
|
|
# Get all image files
|
|
image_files = self.get_image_files(folder_path)
|
|
if not image_files:
|
|
messagebox.showerror("Error", "No image files found in the specified folder")
|
|
return
|
|
|
|
# Get selected detector
|
|
detector = self.detector_var.get()
|
|
|
|
self.update_status(f"Processing all images with both DeepFace and face_recognition...")
|
|
self.update_progress(20)
|
|
|
|
# Process all images with both libraries
|
|
for i, image_path in enumerate(image_files):
|
|
filename = Path(image_path).name
|
|
self.update_status(f"Processing {filename}...")
|
|
progress = 20 + (i / len(image_files)) * 50
|
|
self.update_progress(progress)
|
|
|
|
# Process with DeepFace
|
|
start_time = time.time()
|
|
deepface_result = self.process_with_deepface(image_path, detector)
|
|
deepface_time = time.time() - start_time
|
|
|
|
# Process with face_recognition
|
|
start_time = time.time()
|
|
facerec_result = self.process_with_face_recognition(image_path)
|
|
facerec_time = time.time() - start_time
|
|
|
|
# Store timing information
|
|
self.processing_times[filename] = {
|
|
'deepface_time': deepface_time,
|
|
'facerec_time': facerec_time,
|
|
'total_time': deepface_time + facerec_time
|
|
}
|
|
|
|
# Store results
|
|
self.deepface_faces.extend(deepface_result['faces'])
|
|
self.facerec_faces.extend(facerec_result['faces'])
|
|
|
|
print(f"Processed {filename}: DeepFace={deepface_time:.2f}s, face_recognition={facerec_time:.2f}s")
|
|
|
|
if not self.deepface_faces and not self.facerec_faces:
|
|
messagebox.showwarning("Warning", "No faces found in any images")
|
|
return
|
|
|
|
self.update_status("Calculating face similarities...")
|
|
self.update_progress(75)
|
|
|
|
# Calculate similarities for DeepFace
|
|
for i, face1 in enumerate(self.deepface_faces):
|
|
similarities = []
|
|
for j, face2 in enumerate(self.deepface_faces):
|
|
if i != j: # Don't compare face with itself
|
|
confidence = self.calculate_face_similarity(
|
|
face1['encoding'], face2['encoding']
|
|
)
|
|
if confidence >= threshold: # Only include faces above threshold
|
|
similarities.append({
|
|
'face': face2,
|
|
'confidence': confidence
|
|
})
|
|
|
|
# Sort by confidence (highest first)
|
|
similarities.sort(key=lambda x: x['confidence'], reverse=True)
|
|
self.deepface_similarities.append({
|
|
'face': face1,
|
|
'similarities': similarities
|
|
})
|
|
|
|
# Calculate similarities for face_recognition
|
|
for i, face1 in enumerate(self.facerec_faces):
|
|
similarities = []
|
|
for j, face2 in enumerate(self.facerec_faces):
|
|
if i != j: # Don't compare face with itself
|
|
confidence = self.calculate_face_similarity(
|
|
face1['encoding'], face2['encoding']
|
|
)
|
|
if confidence >= threshold: # Only include faces above threshold
|
|
similarities.append({
|
|
'face': face2,
|
|
'confidence': confidence
|
|
})
|
|
|
|
# Sort by confidence (highest first)
|
|
similarities.sort(key=lambda x: x['confidence'], reverse=True)
|
|
self.facerec_similarities.append({
|
|
'face': face1,
|
|
'similarities': similarities
|
|
})
|
|
|
|
self.update_status("Displaying results...")
|
|
self.update_progress(95)
|
|
|
|
# Display results in GUI
|
|
self.display_results()
|
|
|
|
total_deepface_faces = len(self.deepface_faces)
|
|
total_facerec_faces = len(self.facerec_faces)
|
|
avg_deepface_time = sum(t['deepface_time'] for t in self.processing_times.values()) / len(self.processing_times)
|
|
avg_facerec_time = sum(t['facerec_time'] for t in self.processing_times.values()) / len(self.processing_times)
|
|
|
|
self.update_status(f"Complete! DeepFace: {total_deepface_faces} faces ({avg_deepface_time:.2f}s avg), face_recognition: {total_facerec_faces} faces ({avg_facerec_time:.2f}s avg)")
|
|
self.update_progress(100)
|
|
|
|
except Exception as e:
|
|
messagebox.showerror("Error", f"Processing failed: {str(e)}")
|
|
self.update_status("Error occurred during processing")
|
|
print(f"Error: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
|
|
def display_results(self):
|
|
"""Display the face comparison results in the GUI panels"""
|
|
# Display DeepFace results in left panel
|
|
self.display_library_results(self.deepface_similarities, self.left_scrollable_frame, "DeepFace")
|
|
|
|
# Display face_recognition results in middle panel
|
|
self.display_library_results(self.facerec_similarities, self.middle_scrollable_frame, "face_recognition")
|
|
|
|
# Display timing comparison in right panel
|
|
self.display_timing_comparison()
|
|
|
|
def display_library_results(self, similarities_list: List[Dict], parent_frame, library_name: str):
|
|
"""Display results for a specific library"""
|
|
for i, result in enumerate(similarities_list):
|
|
face = result['face']
|
|
|
|
# Create frame for this face
|
|
face_frame = ttk.Frame(parent_frame)
|
|
face_frame.grid(row=i, column=0, sticky=(tk.W, tk.E), pady=5, padx=5)
|
|
|
|
# Face thumbnail
|
|
thumbnail = self.extract_face_thumbnail(face, size=(80, 80))
|
|
thumbnail_label = ttk.Label(face_frame, image=thumbnail)
|
|
thumbnail_label.image = thumbnail # Keep a reference
|
|
thumbnail_label.grid(row=0, column=0, padx=5, pady=5)
|
|
|
|
# Face info
|
|
info_frame = ttk.Frame(face_frame)
|
|
info_frame.grid(row=0, column=1, sticky=(tk.W, tk.E), padx=5)
|
|
|
|
ttk.Label(info_frame, text=f"Face {i+1}", font=("Arial", 10, "bold")).grid(row=0, column=0, sticky=tk.W, pady=1)
|
|
ttk.Label(info_frame, text=f"ID: {face['face_id']}", font=("Arial", 8)).grid(row=1, column=0, sticky=tk.W, pady=1)
|
|
ttk.Label(info_frame, text=f"Image: {Path(face['image_path']).name}", font=("Arial", 8)).grid(row=2, column=0, sticky=tk.W, pady=1)
|
|
|
|
# Show number of similar faces
|
|
similar_count = len(result['similarities'])
|
|
ttk.Label(info_frame, text=f"Similar: {similar_count}", font=("Arial", 8, "bold")).grid(row=3, column=0, sticky=tk.W, pady=1)
|
|
|
|
def display_timing_comparison(self):
|
|
"""Display timing comparison between libraries"""
|
|
if not self.processing_times:
|
|
return
|
|
|
|
# Create summary frame
|
|
summary_frame = ttk.LabelFrame(self.right_scrollable_frame, text="Processing Times Summary")
|
|
summary_frame.grid(row=0, column=0, sticky=(tk.W, tk.E), pady=5, padx=5)
|
|
|
|
# Calculate averages
|
|
total_deepface_time = sum(t['deepface_time'] for t in self.processing_times.values())
|
|
total_facerec_time = sum(t['facerec_time'] for t in self.processing_times.values())
|
|
avg_deepface_time = total_deepface_time / len(self.processing_times)
|
|
avg_facerec_time = total_facerec_time / len(self.processing_times)
|
|
|
|
# Summary statistics
|
|
ttk.Label(summary_frame, text=f"Total Images: {len(self.processing_times)}", font=("Arial", 10, "bold")).grid(row=0, column=0, sticky=tk.W, pady=2)
|
|
ttk.Label(summary_frame, text=f"DeepFace Avg: {avg_deepface_time:.2f}s", font=("Arial", 9)).grid(row=1, column=0, sticky=tk.W, pady=1)
|
|
ttk.Label(summary_frame, text=f"face_recognition Avg: {avg_facerec_time:.2f}s", font=("Arial", 9)).grid(row=2, column=0, sticky=tk.W, pady=1)
|
|
|
|
speed_ratio = avg_deepface_time / avg_facerec_time if avg_facerec_time > 0 else 0
|
|
if speed_ratio > 1:
|
|
faster_lib = "face_recognition"
|
|
speed_text = f"{speed_ratio:.1f}x faster"
|
|
else:
|
|
faster_lib = "DeepFace"
|
|
speed_text = f"{1/speed_ratio:.1f}x faster"
|
|
|
|
ttk.Label(summary_frame, text=f"{faster_lib} is {speed_text}", font=("Arial", 9, "bold"), foreground="green").grid(row=3, column=0, sticky=tk.W, pady=2)
|
|
|
|
# Individual photo timings
|
|
timing_frame = ttk.LabelFrame(self.right_scrollable_frame, text="Per-Photo Timing")
|
|
timing_frame.grid(row=1, column=0, sticky=(tk.W, tk.E), pady=5, padx=5)
|
|
|
|
row = 0
|
|
for filename, times in sorted(self.processing_times.items()):
|
|
ttk.Label(timing_frame, text=f"{filename[:20]}...", font=("Arial", 8)).grid(row=row, column=0, sticky=tk.W, pady=1)
|
|
ttk.Label(timing_frame, text=f"DF: {times['deepface_time']:.2f}s", font=("Arial", 8)).grid(row=row, column=1, sticky=tk.W, pady=1, padx=(5,0))
|
|
ttk.Label(timing_frame, text=f"FR: {times['facerec_time']:.2f}s", font=("Arial", 8)).grid(row=row, column=2, sticky=tk.W, pady=1, padx=(5,0))
|
|
row += 1
|
|
|
|
def display_comparison_faces(self, ref_index: int, similarities: List[Dict]):
|
|
"""Display comparison faces for a specific reference face"""
|
|
# Create frame for this reference face's comparisons
|
|
comp_frame = ttk.LabelFrame(self.right_scrollable_frame,
|
|
text=f"Matches for Reference Face {ref_index + 1}")
|
|
comp_frame.grid(row=ref_index, column=0, sticky=(tk.W, tk.E), pady=10, padx=10)
|
|
|
|
# Display top matches (limit to avoid too much clutter)
|
|
max_matches = min(8, len(similarities))
|
|
|
|
for i in range(max_matches):
|
|
sim_data = similarities[i]
|
|
face = sim_data['face']
|
|
confidence = sim_data['confidence']
|
|
|
|
# Create frame for this comparison face
|
|
face_frame = ttk.Frame(comp_frame)
|
|
face_frame.grid(row=i, column=0, sticky=(tk.W, tk.E), pady=5, padx=10)
|
|
|
|
# Face thumbnail
|
|
thumbnail = self.extract_face_thumbnail(face, size=(120, 120))
|
|
thumbnail_label = ttk.Label(face_frame, image=thumbnail)
|
|
thumbnail_label.image = thumbnail # Keep a reference
|
|
thumbnail_label.grid(row=0, column=0, padx=10, pady=5)
|
|
|
|
# Face info with confidence
|
|
info_frame = ttk.Frame(face_frame)
|
|
info_frame.grid(row=0, column=1, sticky=(tk.W, tk.E), padx=10)
|
|
|
|
# Confidence with color coding
|
|
confidence_text = f"{confidence:.1f}%"
|
|
if confidence >= 80:
|
|
confidence_color = "green"
|
|
elif confidence >= 60:
|
|
confidence_color = "orange"
|
|
else:
|
|
confidence_color = "red"
|
|
|
|
ttk.Label(info_frame, text=confidence_text,
|
|
font=("Arial", 14, "bold"), foreground=confidence_color).grid(row=0, column=0, sticky=tk.W, pady=2)
|
|
ttk.Label(info_frame, text=f"ID: {face['face_id']}", font=("Arial", 10)).grid(row=1, column=0, sticky=tk.W, pady=2)
|
|
ttk.Label(info_frame, text=f"Image: {Path(face['image_path']).name}", font=("Arial", 10)).grid(row=2, column=0, sticky=tk.W, pady=2)
|
|
|
|
def run(self):
|
|
"""Start the GUI application"""
|
|
self.root.mainloop()
|
|
|
|
|
|
def main():
|
|
"""Main entry point"""
|
|
# Check dependencies
|
|
try:
|
|
from deepface import DeepFace
|
|
except ImportError as e:
|
|
print(f"Error: Missing required dependency: {e}")
|
|
print("Please install with: pip install deepface")
|
|
sys.exit(1)
|
|
|
|
try:
|
|
import face_recognition
|
|
except ImportError as e:
|
|
print(f"Error: Missing required dependency: {e}")
|
|
print("Please install with: pip install face_recognition")
|
|
sys.exit(1)
|
|
|
|
# Suppress TensorFlow warnings and errors
|
|
import os
|
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress TensorFlow warnings
|
|
import warnings
|
|
warnings.filterwarnings('ignore')
|
|
|
|
try:
|
|
# Create and run GUI
|
|
app = FaceComparisonGUI()
|
|
app.run()
|
|
except Exception as e:
|
|
print(f"GUI Error: {e}")
|
|
import traceback
|
|
traceback.print_exc()
|
|
sys.exit(1)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|