Remove deprecated files and refactor codebase for improved maintainability
This commit deletes the `photo_tagger_refactored.py`, `run.sh`, and test files (`test_basic.py`, `test_deepface_gui.py`, `test_face_recognition.py`) that are no longer in use. The removal of these files streamlines the project structure and eliminates legacy code, paving the way for future enhancements and a cleaner codebase. The README has been updated to reflect these changes, ensuring clarity on the current state of the project.
This commit is contained in:
parent
ac5507c560
commit
e49b567afa
@ -84,9 +84,9 @@ class PhotoTagger:
|
||||
return self.photo_manager.extract_photo_date(photo_path)
|
||||
|
||||
# Face processing methods (delegated)
|
||||
def process_faces(self, limit: int = DEFAULT_PROCESSING_LIMIT, model: str = DEFAULT_FACE_DETECTION_MODEL) -> int:
|
||||
"""Process unprocessed photos for faces"""
|
||||
return self.face_processor.process_faces(limit, model)
|
||||
def process_faces(self, limit: int = DEFAULT_PROCESSING_LIMIT, model: str = DEFAULT_FACE_DETECTION_MODEL, progress_callback=None, stop_event=None) -> int:
|
||||
"""Process unprocessed photos for faces with optional progress and cancellation"""
|
||||
return self.face_processor.process_faces(limit, model, progress_callback, stop_event)
|
||||
|
||||
def _extract_face_crop(self, photo_path: str, location: tuple, face_id: int) -> str:
|
||||
"""Extract and save individual face crop for identification (legacy compatibility)"""
|
||||
@ -205,11 +205,11 @@ class PhotoTagger:
|
||||
"""Callback to scan a folder from the dashboard."""
|
||||
return self.scan_folder(folder_path, recursive)
|
||||
|
||||
def _dashboard_process(self, limit_value: Optional[int]) -> int:
|
||||
"""Callback to process faces from the dashboard with optional limit."""
|
||||
def _dashboard_process(self, limit_value: Optional[int], progress_callback=None, stop_event=None) -> int:
|
||||
"""Callback to process faces from the dashboard with optional limit, progress, cancel."""
|
||||
if limit_value is None:
|
||||
return self.process_faces()
|
||||
return self.process_faces(limit=limit_value)
|
||||
return self.process_faces(progress_callback=progress_callback, stop_event=stop_event)
|
||||
return self.process_faces(limit=limit_value, progress_callback=progress_callback, stop_event=stop_event)
|
||||
|
||||
def _dashboard_identify(self, batch_value: Optional[int]) -> int:
|
||||
"""Callback to identify faces from the dashboard with optional batch (show_faces is always True)."""
|
||||
|
||||
@ -1,364 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
PunimTag CLI - Minimal Photo Face Tagger (Refactored)
|
||||
Simple command-line tool for face recognition and photo tagging
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import argparse
|
||||
import threading
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
|
||||
# Import our new modules
|
||||
from config import (
|
||||
DEFAULT_DB_PATH, DEFAULT_FACE_DETECTION_MODEL, DEFAULT_FACE_TOLERANCE,
|
||||
DEFAULT_BATCH_SIZE, DEFAULT_PROCESSING_LIMIT
|
||||
)
|
||||
from database import DatabaseManager
|
||||
from face_processing import FaceProcessor
|
||||
from photo_management import PhotoManager
|
||||
from tag_management import TagManager
|
||||
from search_stats import SearchStats
|
||||
from gui_core import GUICore
|
||||
|
||||
|
||||
class PhotoTagger:
|
||||
"""Main PhotoTagger class - orchestrates all functionality"""
|
||||
|
||||
def __init__(self, db_path: str = DEFAULT_DB_PATH, verbose: int = 0, debug: bool = False):
|
||||
"""Initialize the photo tagger with database and all managers"""
|
||||
self.db_path = db_path
|
||||
self.verbose = verbose
|
||||
self.debug = debug
|
||||
|
||||
# Initialize all managers
|
||||
self.db = DatabaseManager(db_path, verbose)
|
||||
self.face_processor = FaceProcessor(self.db, verbose)
|
||||
self.photo_manager = PhotoManager(self.db, verbose)
|
||||
self.tag_manager = TagManager(self.db, verbose)
|
||||
self.search_stats = SearchStats(self.db, verbose)
|
||||
self.gui_core = GUICore()
|
||||
|
||||
# Legacy compatibility - expose some methods directly
|
||||
self._face_encoding_cache = {}
|
||||
self._image_cache = {}
|
||||
self._db_connection = None
|
||||
self._db_lock = threading.Lock()
|
||||
|
||||
def cleanup(self):
|
||||
"""Clean up resources and close connections"""
|
||||
self.face_processor.cleanup_face_crops()
|
||||
self.db.close_db_connection()
|
||||
|
||||
# Database methods (delegated)
|
||||
def get_db_connection(self):
|
||||
"""Get database connection (legacy compatibility)"""
|
||||
return self.db.get_db_connection()
|
||||
|
||||
def close_db_connection(self):
|
||||
"""Close database connection (legacy compatibility)"""
|
||||
self.db.close_db_connection()
|
||||
|
||||
def init_database(self):
|
||||
"""Initialize database (legacy compatibility)"""
|
||||
self.db.init_database()
|
||||
|
||||
# Photo management methods (delegated)
|
||||
def scan_folder(self, folder_path: str, recursive: bool = True) -> int:
|
||||
"""Scan folder for photos and add to database"""
|
||||
return self.photo_manager.scan_folder(folder_path, recursive)
|
||||
|
||||
def _extract_photo_date(self, photo_path: str) -> Optional[str]:
|
||||
"""Extract date taken from photo EXIF data (legacy compatibility)"""
|
||||
return self.photo_manager.extract_photo_date(photo_path)
|
||||
|
||||
# Face processing methods (delegated)
|
||||
def process_faces(self, limit: int = DEFAULT_PROCESSING_LIMIT, model: str = DEFAULT_FACE_DETECTION_MODEL) -> int:
|
||||
"""Process unprocessed photos for faces"""
|
||||
return self.face_processor.process_faces(limit, model)
|
||||
|
||||
def _extract_face_crop(self, photo_path: str, location: tuple, face_id: int) -> str:
|
||||
"""Extract and save individual face crop for identification (legacy compatibility)"""
|
||||
return self.face_processor._extract_face_crop(photo_path, location, face_id)
|
||||
|
||||
def _create_comparison_image(self, unid_crop_path: str, match_crop_path: str, person_name: str, confidence: float) -> str:
|
||||
"""Create a side-by-side comparison image (legacy compatibility)"""
|
||||
return self.face_processor._create_comparison_image(unid_crop_path, match_crop_path, person_name, confidence)
|
||||
|
||||
def _calculate_face_quality_score(self, image, face_location: tuple) -> float:
|
||||
"""Calculate face quality score (legacy compatibility)"""
|
||||
return self.face_processor._calculate_face_quality_score(image, face_location)
|
||||
|
||||
def _add_person_encoding(self, person_id: int, face_id: int, encoding, quality_score: float):
|
||||
"""Add a face encoding to a person's encoding collection (legacy compatibility)"""
|
||||
self.face_processor.add_person_encoding(person_id, face_id, encoding, quality_score)
|
||||
|
||||
def _get_person_encodings(self, person_id: int, min_quality: float = 0.3):
|
||||
"""Get all high-quality encodings for a person (legacy compatibility)"""
|
||||
return self.face_processor.get_person_encodings(person_id, min_quality)
|
||||
|
||||
def _update_person_encodings(self, person_id: int):
|
||||
"""Update person encodings when a face is identified (legacy compatibility)"""
|
||||
self.face_processor.update_person_encodings(person_id)
|
||||
|
||||
def _calculate_adaptive_tolerance(self, base_tolerance: float, face_quality: float, match_confidence: float = None) -> float:
|
||||
"""Calculate adaptive tolerance (legacy compatibility)"""
|
||||
return self.face_processor._calculate_adaptive_tolerance(base_tolerance, face_quality, match_confidence)
|
||||
|
||||
def _get_filtered_similar_faces(self, face_id: int, tolerance: float, include_same_photo: bool = False, face_status: dict = None):
|
||||
"""Get similar faces with filtering (legacy compatibility)"""
|
||||
return self.face_processor._get_filtered_similar_faces(face_id, tolerance, include_same_photo, face_status)
|
||||
|
||||
def _filter_unique_faces(self, faces: List[Dict]):
|
||||
"""Filter faces to show only unique ones (legacy compatibility)"""
|
||||
return self.face_processor._filter_unique_faces(faces)
|
||||
|
||||
def _filter_unique_faces_from_list(self, faces_list: List[tuple]):
|
||||
"""Filter face list to show only unique ones (legacy compatibility)"""
|
||||
return self.face_processor._filter_unique_faces_from_list(faces_list)
|
||||
|
||||
def find_similar_faces(self, face_id: int = None, tolerance: float = DEFAULT_FACE_TOLERANCE, include_same_photo: bool = False):
|
||||
"""Find similar faces across all photos"""
|
||||
return self.face_processor.find_similar_faces(face_id, tolerance, include_same_photo)
|
||||
|
||||
def auto_identify_matches(self, tolerance: float = DEFAULT_FACE_TOLERANCE, confirm: bool = True, show_faces: bool = False, include_same_photo: bool = False) -> int:
|
||||
"""Automatically identify faces that match already identified faces"""
|
||||
# This would need to be implemented in the face_processing module
|
||||
# For now, return 0
|
||||
print("⚠️ Auto-identify matches not yet implemented in refactored version")
|
||||
return 0
|
||||
|
||||
# Tag management methods (delegated)
|
||||
def add_tags(self, photo_pattern: str = None, batch_size: int = DEFAULT_BATCH_SIZE) -> int:
|
||||
"""Add custom tags to photos"""
|
||||
return self.tag_manager.add_tags_to_photos(photo_pattern, batch_size)
|
||||
|
||||
def _deduplicate_tags(self, tag_list):
|
||||
"""Remove duplicate tags from a list (legacy compatibility)"""
|
||||
return self.tag_manager.deduplicate_tags(tag_list)
|
||||
|
||||
def _parse_tags_string(self, tags_string):
|
||||
"""Parse a comma-separated tags string (legacy compatibility)"""
|
||||
return self.tag_manager.parse_tags_string(tags_string)
|
||||
|
||||
def _get_tag_id_by_name(self, tag_name, tag_name_to_id_map):
|
||||
"""Get tag ID by name (legacy compatibility)"""
|
||||
return self.db.get_tag_id_by_name(tag_name, tag_name_to_id_map)
|
||||
|
||||
def _get_tag_name_by_id(self, tag_id, tag_id_to_name_map):
|
||||
"""Get tag name by ID (legacy compatibility)"""
|
||||
return self.db.get_tag_name_by_id(tag_id, tag_id_to_name_map)
|
||||
|
||||
def _load_tag_mappings(self):
|
||||
"""Load tag name to ID and ID to name mappings (legacy compatibility)"""
|
||||
return self.db.load_tag_mappings()
|
||||
|
||||
def _get_existing_tag_ids_for_photo(self, photo_id):
|
||||
"""Get list of tag IDs for a photo (legacy compatibility)"""
|
||||
return self.db.get_existing_tag_ids_for_photo(photo_id)
|
||||
|
||||
def _show_people_list(self, cursor=None):
|
||||
"""Show list of people in database (legacy compatibility)"""
|
||||
return self.db.show_people_list(cursor)
|
||||
|
||||
# Search and statistics methods (delegated)
|
||||
def search_faces(self, person_name: str):
|
||||
"""Search for photos containing a specific person"""
|
||||
return self.search_stats.search_faces(person_name)
|
||||
|
||||
def stats(self):
|
||||
"""Show database statistics"""
|
||||
return self.search_stats.print_statistics()
|
||||
|
||||
# GUI methods (legacy compatibility - these would need to be implemented)
|
||||
def identify_faces(self, batch_size: int = DEFAULT_BATCH_SIZE, show_faces: bool = False, tolerance: float = DEFAULT_FACE_TOLERANCE,
|
||||
date_from: str = None, date_to: str = None, date_processed_from: str = None, date_processed_to: str = None) -> int:
|
||||
"""Interactive face identification with GUI"""
|
||||
print("⚠️ Face identification GUI not yet implemented in refactored version")
|
||||
return 0
|
||||
|
||||
def tag_management(self) -> int:
|
||||
"""Tag management GUI"""
|
||||
print("⚠️ Tag management GUI not yet implemented in refactored version")
|
||||
return 0
|
||||
|
||||
def modifyidentified(self) -> int:
|
||||
"""Modify identified faces GUI"""
|
||||
print("⚠️ Face modification GUI not yet implemented in refactored version")
|
||||
return 0
|
||||
|
||||
def _setup_window_size_saving(self, root, config_file="gui_config.json"):
|
||||
"""Set up window size saving functionality (legacy compatibility)"""
|
||||
return self.gui_core.setup_window_size_saving(root, config_file)
|
||||
|
||||
def _display_similar_faces_in_panel(self, parent_frame, similar_faces_data, face_vars, face_images, face_crops, current_face_id=None, face_selection_states=None, data_cache=None):
|
||||
"""Display similar faces in panel (legacy compatibility)"""
|
||||
print("⚠️ Similar faces panel not yet implemented in refactored version")
|
||||
return None
|
||||
|
||||
def _create_photo_icon(self, canvas, photo_path, icon_size=20, icon_x=None, icon_y=None, callback=None):
|
||||
"""Create a small photo icon on a canvas (legacy compatibility)"""
|
||||
return self.gui_core.create_photo_icon(canvas, photo_path, icon_size, icon_x, icon_y, callback)
|
||||
|
||||
def _get_confidence_description(self, confidence_pct: float) -> str:
|
||||
"""Get human-readable confidence description (legacy compatibility)"""
|
||||
return self.face_processor._get_confidence_description(confidence_pct)
|
||||
|
||||
# Cache management (legacy compatibility)
|
||||
def _clear_caches(self):
|
||||
"""Clear all caches to free memory (legacy compatibility)"""
|
||||
self.face_processor._clear_caches()
|
||||
|
||||
def _cleanup_face_crops(self, current_face_crop_path=None):
|
||||
"""Clean up face crop files and caches (legacy compatibility)"""
|
||||
self.face_processor.cleanup_face_crops(current_face_crop_path)
|
||||
|
||||
@property
|
||||
def _face_encoding_cache(self):
|
||||
"""Face encoding cache (legacy compatibility)"""
|
||||
return self.face_processor._face_encoding_cache
|
||||
|
||||
@property
|
||||
def _image_cache(self):
|
||||
"""Image cache (legacy compatibility)"""
|
||||
return self.face_processor._image_cache
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI interface"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="PunimTag CLI - Simple photo face tagger (Refactored)",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
photo_tagger_refactored.py scan /path/to/photos # Scan folder for photos
|
||||
photo_tagger_refactored.py process --limit 20 # Process 20 photos for faces
|
||||
photo_tagger_refactored.py identify --batch 10 # Identify 10 faces interactively
|
||||
photo_tagger_refactored.py auto-match # Auto-identify matching faces
|
||||
photo_tagger_refactored.py modifyidentified # Show and Modify identified faces
|
||||
photo_tagger_refactored.py match 15 # Find faces similar to face ID 15
|
||||
photo_tagger_refactored.py tag --pattern "vacation" # Tag photos matching pattern
|
||||
photo_tagger_refactored.py search "John" # Find photos with John
|
||||
photo_tagger_refactored.py tag-manager # Open tag management GUI
|
||||
photo_tagger_refactored.py stats # Show statistics
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('command',
|
||||
choices=['scan', 'process', 'identify', 'tag', 'search', 'stats', 'match', 'auto-match', 'modifyidentified', 'tag-manager'],
|
||||
help='Command to execute')
|
||||
|
||||
parser.add_argument('target', nargs='?',
|
||||
help='Target folder (scan), person name (search), or pattern (tag)')
|
||||
|
||||
parser.add_argument('--db', default=DEFAULT_DB_PATH,
|
||||
help=f'Database file path (default: {DEFAULT_DB_PATH})')
|
||||
|
||||
parser.add_argument('--limit', type=int, default=DEFAULT_PROCESSING_LIMIT,
|
||||
help=f'Batch size limit for processing (default: {DEFAULT_PROCESSING_LIMIT})')
|
||||
|
||||
parser.add_argument('--batch', type=int, default=DEFAULT_BATCH_SIZE,
|
||||
help=f'Batch size for identification (default: {DEFAULT_BATCH_SIZE})')
|
||||
|
||||
parser.add_argument('--pattern',
|
||||
help='Pattern for filtering photos when tagging')
|
||||
|
||||
parser.add_argument('--model', choices=['hog', 'cnn'], default=DEFAULT_FACE_DETECTION_MODEL,
|
||||
help=f'Face detection model: hog (faster) or cnn (more accurate) (default: {DEFAULT_FACE_DETECTION_MODEL})')
|
||||
|
||||
parser.add_argument('--recursive', action='store_true',
|
||||
help='Scan folders recursively')
|
||||
|
||||
parser.add_argument('--show-faces', action='store_true',
|
||||
help='Show individual face crops during identification')
|
||||
|
||||
parser.add_argument('--tolerance', type=float, default=DEFAULT_FACE_TOLERANCE,
|
||||
help=f'Face matching tolerance (0.0-1.0, lower = stricter, default: {DEFAULT_FACE_TOLERANCE})')
|
||||
|
||||
parser.add_argument('--auto', action='store_true',
|
||||
help='Auto-identify high-confidence matches without confirmation')
|
||||
|
||||
parser.add_argument('--include-twins', action='store_true',
|
||||
help='Include same-photo matching (for twins or multiple instances)')
|
||||
|
||||
parser.add_argument('-v', '--verbose', action='count', default=0,
|
||||
help='Increase verbosity (-v, -vv, -vvv for more detail)')
|
||||
|
||||
parser.add_argument('--debug', action='store_true',
|
||||
help='Enable line-by-line debugging with pdb')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize tagger
|
||||
tagger = PhotoTagger(args.db, args.verbose, args.debug)
|
||||
|
||||
try:
|
||||
if args.command == 'scan':
|
||||
if not args.target:
|
||||
print("❌ Please specify a folder to scan")
|
||||
return 1
|
||||
tagger.scan_folder(args.target, args.recursive)
|
||||
|
||||
elif args.command == 'process':
|
||||
tagger.process_faces(args.limit, args.model)
|
||||
|
||||
elif args.command == 'identify':
|
||||
show_faces = getattr(args, 'show_faces', False)
|
||||
tagger.identify_faces(args.batch, show_faces, args.tolerance)
|
||||
|
||||
elif args.command == 'tag':
|
||||
tagger.add_tags(args.pattern or args.target, args.batch)
|
||||
|
||||
elif args.command == 'search':
|
||||
if not args.target:
|
||||
print("❌ Please specify a person name to search for")
|
||||
return 1
|
||||
tagger.search_faces(args.target)
|
||||
|
||||
elif args.command == 'stats':
|
||||
tagger.stats()
|
||||
|
||||
elif args.command == 'match':
|
||||
if args.target and args.target.isdigit():
|
||||
face_id = int(args.target)
|
||||
matches = tagger.find_similar_faces(face_id, args.tolerance)
|
||||
if matches:
|
||||
print(f"\n🎯 Found {len(matches)} similar faces:")
|
||||
for match in matches:
|
||||
person_name = "Unknown" if match.get('person_id') is None else f"Person ID {match.get('person_id')}"
|
||||
print(f" 📸 {match.get('filename', 'Unknown')} - {person_name} (confidence: {(1-match.get('distance', 1)):.1%})")
|
||||
else:
|
||||
print("🔍 No similar faces found")
|
||||
else:
|
||||
print("❌ Please specify a face ID number to find matches for")
|
||||
|
||||
elif args.command == 'auto-match':
|
||||
show_faces = getattr(args, 'show_faces', False)
|
||||
include_twins = getattr(args, 'include_twins', False)
|
||||
tagger.auto_identify_matches(args.tolerance, not args.auto, show_faces, include_twins)
|
||||
|
||||
elif args.command == 'modifyidentified':
|
||||
tagger.modifyidentified()
|
||||
|
||||
elif args.command == 'tag-manager':
|
||||
tagger.tag_management()
|
||||
|
||||
return 0
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n\n⚠️ Interrupted by user")
|
||||
return 1
|
||||
except Exception as e:
|
||||
print(f"❌ Error: {e}")
|
||||
if args.debug:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return 1
|
||||
finally:
|
||||
# Always cleanup resources
|
||||
tagger.cleanup()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
34
run.sh
34
run.sh
@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
# PunimTag Runner Script
|
||||
# Automatically activates virtual environment and runs commands
|
||||
|
||||
# Check if virtual environment exists
|
||||
if [ ! -d "venv" ]; then
|
||||
echo "❌ Virtual environment not found!"
|
||||
echo "Run: python3 -m venv venv && source venv/bin/activate && python3 setup.py"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Activate virtual environment
|
||||
source venv/bin/activate
|
||||
|
||||
# Check if no arguments provided
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "🎯 PunimTag CLI"
|
||||
echo "Usage: ./run.sh <command> [arguments]"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " ./run.sh scan /path/to/photos --recursive"
|
||||
echo " ./run.sh process --limit 20"
|
||||
echo " ./run.sh identify --batch 10"
|
||||
echo " ./run.sh search 'John'"
|
||||
echo " ./run.sh stats"
|
||||
echo ""
|
||||
echo "Or run directly:"
|
||||
echo " source venv/bin/activate"
|
||||
echo " python3 photo_tagger.py --help"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Run the command
|
||||
python3 photo_tagger.py "$@"
|
||||
142
test_basic.py
142
test_basic.py
@ -1,142 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Basic test for photo_tagger.py without face recognition dependencies
|
||||
Tests database initialization and basic functionality
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import tempfile
|
||||
import sqlite3
|
||||
|
||||
# Add current directory to path
|
||||
sys.path.insert(0, '.')
|
||||
|
||||
def test_database_init():
|
||||
"""Test database initialization without face recognition"""
|
||||
# Create temporary database
|
||||
with tempfile.NamedTemporaryFile(suffix='.db', delete=False) as tmp:
|
||||
test_db = tmp.name
|
||||
|
||||
try:
|
||||
# Import and test database creation
|
||||
from photo_tagger import PhotoTagger
|
||||
|
||||
# This should fail because face_recognition is not installed
|
||||
# But we can test the import and class structure
|
||||
print("✅ PhotoTagger class imported successfully")
|
||||
|
||||
# Test basic database initialization
|
||||
conn = sqlite3.connect(test_db)
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Create the tables manually to test schema
|
||||
cursor.execute('''
|
||||
CREATE TABLE IF NOT EXISTS photos (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
path TEXT UNIQUE NOT NULL,
|
||||
filename TEXT NOT NULL,
|
||||
date_added DATETIME DEFAULT CURRENT_TIMESTAMP,
|
||||
processed BOOLEAN DEFAULT 0
|
||||
)
|
||||
''')
|
||||
|
||||
cursor.execute('''
|
||||
CREATE TABLE IF NOT EXISTS people (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT UNIQUE NOT NULL,
|
||||
created_date DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
''')
|
||||
|
||||
conn.commit()
|
||||
|
||||
# Test basic operations
|
||||
cursor.execute("INSERT INTO photos (path, filename) VALUES (?, ?)",
|
||||
("/test/path.jpg", "test.jpg"))
|
||||
cursor.execute("INSERT INTO people (name) VALUES (?)", ("Test Person",))
|
||||
|
||||
cursor.execute("SELECT COUNT(*) FROM photos")
|
||||
photo_count = cursor.fetchone()[0]
|
||||
|
||||
cursor.execute("SELECT COUNT(*) FROM people")
|
||||
people_count = cursor.fetchone()[0]
|
||||
|
||||
conn.close()
|
||||
|
||||
print(f"✅ Database schema created successfully")
|
||||
print(f"✅ Test data inserted: {photo_count} photos, {people_count} people")
|
||||
|
||||
return True
|
||||
|
||||
except ImportError as e:
|
||||
print(f"⚠️ Import error (expected): {e}")
|
||||
print("✅ This is expected without face_recognition installed")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ Unexpected error: {e}")
|
||||
return False
|
||||
finally:
|
||||
# Clean up
|
||||
if os.path.exists(test_db):
|
||||
os.unlink(test_db)
|
||||
|
||||
def test_cli_structure():
|
||||
"""Test CLI argument parsing structure"""
|
||||
try:
|
||||
import argparse
|
||||
|
||||
# Test if our argument parser structure is valid
|
||||
parser = argparse.ArgumentParser(description="Test parser")
|
||||
parser.add_argument('command', choices=['scan', 'process', 'identify', 'tag', 'search', 'stats'])
|
||||
parser.add_argument('target', nargs='?')
|
||||
parser.add_argument('--db', default='photos.db')
|
||||
parser.add_argument('--limit', type=int, default=50)
|
||||
|
||||
# Test parsing
|
||||
args = parser.parse_args(['stats'])
|
||||
print(f"✅ CLI argument parsing works: command={args.command}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"❌ CLI structure error: {e}")
|
||||
return False
|
||||
|
||||
def main():
|
||||
"""Run basic tests"""
|
||||
print("🧪 Running Basic Tests for PunimTag CLI")
|
||||
print("=" * 50)
|
||||
|
||||
tests = [
|
||||
("Database Schema", test_database_init),
|
||||
("CLI Structure", test_cli_structure),
|
||||
]
|
||||
|
||||
passed = 0
|
||||
total = len(tests)
|
||||
|
||||
for test_name, test_func in tests:
|
||||
print(f"\n📋 Testing: {test_name}")
|
||||
try:
|
||||
if test_func():
|
||||
print(f"✅ {test_name}: PASSED")
|
||||
passed += 1
|
||||
else:
|
||||
print(f"❌ {test_name}: FAILED")
|
||||
except Exception as e:
|
||||
print(f"❌ {test_name}: ERROR - {e}")
|
||||
|
||||
print(f"\n📊 Results: {passed}/{total} tests passed")
|
||||
|
||||
if passed == total:
|
||||
print("🎉 All basic tests passed!")
|
||||
print("\n📦 Next steps:")
|
||||
print("1. Install dependencies: pip install -r requirements.txt")
|
||||
print("2. Test full functionality: python photo_tagger.py stats")
|
||||
return 0
|
||||
else:
|
||||
print("⚠️ Some tests failed")
|
||||
return 1
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
716
test_deepface_gui.py
Normal file
716
test_deepface_gui.py
Normal file
@ -0,0 +1,716 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DeepFace GUI Test Application
|
||||
|
||||
GUI version of test_deepface_only.py that shows face comparison results
|
||||
with left panel for reference faces and right panel for comparison faces with confidence scores.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import tkinter as tk
|
||||
from tkinter import ttk, messagebox, filedialog
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
import numpy as np
|
||||
from PIL import Image, ImageTk
|
||||
|
||||
# Suppress TensorFlow warnings and CUDA errors
|
||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
# DeepFace library
|
||||
from deepface import DeepFace
|
||||
|
||||
# Face recognition library
|
||||
import face_recognition
|
||||
|
||||
# Supported image formats
|
||||
SUPPORTED_FORMATS = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
|
||||
|
||||
|
||||
class FaceComparisonGUI:
|
||||
"""GUI application for DeepFace face comparison testing"""
|
||||
|
||||
def __init__(self):
|
||||
self.root = tk.Tk()
|
||||
self.root.title("Face Comparison Test - DeepFace vs face_recognition")
|
||||
self.root.geometry("2000x1000")
|
||||
self.root.minsize(1200, 800)
|
||||
|
||||
# Data storage
|
||||
self.deepface_faces = [] # DeepFace faces from all images
|
||||
self.facerec_faces = [] # face_recognition faces from all images
|
||||
self.deepface_similarities = [] # DeepFace similarity results
|
||||
self.facerec_similarities = [] # face_recognition similarity results
|
||||
self.processing_times = {} # Timing information for each photo
|
||||
|
||||
# GUI components
|
||||
self.setup_gui()
|
||||
|
||||
def setup_gui(self):
|
||||
"""Set up the GUI layout"""
|
||||
# Main frame
|
||||
main_frame = ttk.Frame(self.root, padding="10")
|
||||
main_frame.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
||||
|
||||
# Configure grid weights
|
||||
self.root.columnconfigure(0, weight=1)
|
||||
self.root.rowconfigure(0, weight=1)
|
||||
main_frame.columnconfigure(0, weight=1)
|
||||
main_frame.rowconfigure(2, weight=1) # Make the content area expandable
|
||||
|
||||
# Title
|
||||
title_label = ttk.Label(main_frame, text="Face Comparison Test - DeepFace vs face_recognition",
|
||||
font=("Arial", 16, "bold"))
|
||||
title_label.grid(row=0, column=0, columnspan=3, pady=(0, 10))
|
||||
|
||||
# Control panel
|
||||
control_frame = ttk.Frame(main_frame)
|
||||
control_frame.grid(row=1, column=0, columnspan=3, sticky=(tk.W, tk.E), pady=(0, 5))
|
||||
|
||||
# Folder selection
|
||||
ttk.Label(control_frame, text="Test Folder:").grid(row=0, column=0, padx=(0, 5))
|
||||
self.folder_var = tk.StringVar(value="demo_photos/testdeepface/")
|
||||
folder_entry = ttk.Entry(control_frame, textvariable=self.folder_var, width=40)
|
||||
folder_entry.grid(row=0, column=1, padx=(0, 5))
|
||||
|
||||
browse_btn = ttk.Button(control_frame, text="Browse", command=self.browse_folder)
|
||||
browse_btn.grid(row=0, column=2, padx=(0, 10))
|
||||
|
||||
# Reference image selection
|
||||
ttk.Label(control_frame, text="Reference Image:").grid(row=0, column=3, padx=(10, 5))
|
||||
self.reference_var = tk.StringVar(value="2019-11-22_0011.JPG")
|
||||
reference_entry = ttk.Entry(control_frame, textvariable=self.reference_var, width=20)
|
||||
reference_entry.grid(row=0, column=4, padx=(0, 5))
|
||||
|
||||
# Face detector selection
|
||||
ttk.Label(control_frame, text="Detector:").grid(row=0, column=5, padx=(10, 5))
|
||||
self.detector_var = tk.StringVar(value="retinaface")
|
||||
detector_combo = ttk.Combobox(control_frame, textvariable=self.detector_var,
|
||||
values=["retinaface", "mtcnn", "opencv", "ssd"],
|
||||
state="readonly", width=10)
|
||||
detector_combo.grid(row=0, column=6, padx=(0, 5))
|
||||
|
||||
# Similarity threshold
|
||||
ttk.Label(control_frame, text="Threshold:").grid(row=0, column=7, padx=(10, 5))
|
||||
self.threshold_var = tk.StringVar(value="60")
|
||||
threshold_entry = ttk.Entry(control_frame, textvariable=self.threshold_var, width=8)
|
||||
threshold_entry.grid(row=0, column=8, padx=(0, 5))
|
||||
|
||||
# Process button
|
||||
process_btn = ttk.Button(control_frame, text="Process Images",
|
||||
command=self.process_images, style="Accent.TButton")
|
||||
process_btn.grid(row=0, column=9, padx=(10, 0))
|
||||
|
||||
# Progress bar
|
||||
self.progress_var = tk.DoubleVar()
|
||||
self.progress_bar = ttk.Progressbar(control_frame, variable=self.progress_var,
|
||||
maximum=100, length=200)
|
||||
self.progress_bar.grid(row=1, column=0, columnspan=10, sticky=(tk.W, tk.E), pady=(5, 0))
|
||||
|
||||
# Status label
|
||||
self.status_var = tk.StringVar(value="Ready to process images")
|
||||
status_label = ttk.Label(control_frame, textvariable=self.status_var)
|
||||
status_label.grid(row=2, column=0, columnspan=10, pady=(5, 0))
|
||||
|
||||
# Main content area with three panels
|
||||
content_frame = ttk.Frame(main_frame)
|
||||
content_frame.grid(row=2, column=0, columnspan=3, sticky=(tk.W, tk.E, tk.N, tk.S), pady=(10, 0))
|
||||
content_frame.columnconfigure(0, weight=1)
|
||||
content_frame.columnconfigure(1, weight=1)
|
||||
content_frame.columnconfigure(2, weight=1)
|
||||
content_frame.rowconfigure(0, weight=1)
|
||||
|
||||
# Left panel - DeepFace results
|
||||
left_frame = ttk.LabelFrame(content_frame, text="DeepFace Results", padding="5")
|
||||
left_frame.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S), padx=(0, 5))
|
||||
left_frame.columnconfigure(0, weight=1)
|
||||
left_frame.rowconfigure(0, weight=1)
|
||||
|
||||
# Left panel scrollable area
|
||||
left_canvas = tk.Canvas(left_frame, bg="white")
|
||||
left_scrollbar = ttk.Scrollbar(left_frame, orient="vertical", command=left_canvas.yview)
|
||||
self.left_scrollable_frame = ttk.Frame(left_canvas)
|
||||
|
||||
self.left_scrollable_frame.bind(
|
||||
"<Configure>",
|
||||
lambda e: left_canvas.configure(scrollregion=left_canvas.bbox("all"))
|
||||
)
|
||||
|
||||
left_canvas.create_window((0, 0), window=self.left_scrollable_frame, anchor="nw")
|
||||
left_canvas.configure(yscrollcommand=left_scrollbar.set)
|
||||
|
||||
left_canvas.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
||||
left_scrollbar.grid(row=0, column=1, sticky=(tk.N, tk.S))
|
||||
|
||||
# Middle panel - face_recognition results
|
||||
middle_frame = ttk.LabelFrame(content_frame, text="face_recognition Results", padding="5")
|
||||
middle_frame.grid(row=0, column=1, sticky=(tk.W, tk.E, tk.N, tk.S), padx=(5, 5))
|
||||
middle_frame.columnconfigure(0, weight=1)
|
||||
middle_frame.rowconfigure(0, weight=1)
|
||||
|
||||
# Right panel - Comparison Results
|
||||
right_frame = ttk.LabelFrame(content_frame, text="Comparison Results", padding="5")
|
||||
right_frame.grid(row=0, column=2, sticky=(tk.W, tk.E, tk.N, tk.S), padx=(5, 0))
|
||||
right_frame.columnconfigure(0, weight=1)
|
||||
right_frame.rowconfigure(0, weight=1)
|
||||
|
||||
# Middle panel scrollable area
|
||||
middle_canvas = tk.Canvas(middle_frame, bg="white")
|
||||
middle_scrollbar = ttk.Scrollbar(middle_frame, orient="vertical", command=middle_canvas.yview)
|
||||
self.middle_scrollable_frame = ttk.Frame(middle_canvas)
|
||||
|
||||
self.middle_scrollable_frame.bind(
|
||||
"<Configure>",
|
||||
lambda e: middle_canvas.configure(scrollregion=middle_canvas.bbox("all"))
|
||||
)
|
||||
|
||||
middle_canvas.create_window((0, 0), window=self.middle_scrollable_frame, anchor="nw")
|
||||
middle_canvas.configure(yscrollcommand=middle_scrollbar.set)
|
||||
|
||||
middle_canvas.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
||||
middle_scrollbar.grid(row=0, column=1, sticky=(tk.N, tk.S))
|
||||
|
||||
# Right panel scrollable area
|
||||
right_canvas = tk.Canvas(right_frame, bg="white")
|
||||
right_scrollbar = ttk.Scrollbar(right_frame, orient="vertical", command=right_canvas.yview)
|
||||
self.right_scrollable_frame = ttk.Frame(right_canvas)
|
||||
|
||||
self.right_scrollable_frame.bind(
|
||||
"<Configure>",
|
||||
lambda e: right_canvas.configure(scrollregion=right_canvas.bbox("all"))
|
||||
)
|
||||
|
||||
right_canvas.create_window((0, 0), window=self.right_scrollable_frame, anchor="nw")
|
||||
right_canvas.configure(yscrollcommand=right_scrollbar.set)
|
||||
|
||||
right_canvas.grid(row=0, column=0, sticky=(tk.W, tk.E, tk.N, tk.S))
|
||||
right_scrollbar.grid(row=0, column=1, sticky=(tk.N, tk.S))
|
||||
|
||||
# Bind mousewheel to all canvases
|
||||
def _on_mousewheel(event):
|
||||
left_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
|
||||
middle_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
|
||||
right_canvas.yview_scroll(int(-1*(event.delta/120)), "units")
|
||||
|
||||
left_canvas.bind("<MouseWheel>", _on_mousewheel)
|
||||
middle_canvas.bind("<MouseWheel>", _on_mousewheel)
|
||||
right_canvas.bind("<MouseWheel>", _on_mousewheel)
|
||||
|
||||
def browse_folder(self):
|
||||
"""Browse for folder containing test images"""
|
||||
folder = filedialog.askdirectory(initialdir="demo_photos/")
|
||||
if folder:
|
||||
self.folder_var.set(folder)
|
||||
|
||||
def update_status(self, message: str):
|
||||
"""Update status message"""
|
||||
self.status_var.set(message)
|
||||
self.root.update_idletasks()
|
||||
|
||||
def update_progress(self, value: float):
|
||||
"""Update progress bar"""
|
||||
self.progress_var.set(value)
|
||||
self.root.update_idletasks()
|
||||
|
||||
def get_image_files(self, folder_path: str) -> List[str]:
|
||||
"""Get all supported image files from folder"""
|
||||
folder = Path(folder_path)
|
||||
if not folder.exists():
|
||||
raise FileNotFoundError(f"Folder not found: {folder_path}")
|
||||
|
||||
image_files = []
|
||||
for file_path in folder.rglob("*"):
|
||||
if file_path.is_file() and file_path.suffix.lower() in SUPPORTED_FORMATS:
|
||||
image_files.append(str(file_path))
|
||||
|
||||
return sorted(image_files)
|
||||
|
||||
def process_with_deepface(self, image_path: str, detector: str = "retinaface") -> Dict:
|
||||
"""Process image with DeepFace library"""
|
||||
try:
|
||||
# Use DeepFace.represent() to get proper face detection with regions
|
||||
# Using selected detector for face detection
|
||||
results = DeepFace.represent(
|
||||
img_path=image_path,
|
||||
model_name='ArcFace', # Best accuracy model
|
||||
detector_backend=detector, # User-selected detector
|
||||
enforce_detection=False, # Don't fail if no faces
|
||||
align=True # Face alignment for better accuracy
|
||||
)
|
||||
|
||||
if not results:
|
||||
print(f"No faces found in {Path(image_path).name}")
|
||||
return {'faces': [], 'encodings': []}
|
||||
|
||||
print(f"Found {len(results)} faces in {Path(image_path).name}")
|
||||
|
||||
# Convert to our format
|
||||
faces = []
|
||||
encodings = []
|
||||
|
||||
for i, result in enumerate(results):
|
||||
try:
|
||||
# Extract face region info from DeepFace result
|
||||
# DeepFace uses 'facial_area' instead of 'region'
|
||||
facial_area = result.get('facial_area', {})
|
||||
face_confidence = result.get('face_confidence', 0.0)
|
||||
|
||||
# Create face data with proper bounding box
|
||||
face_data = {
|
||||
'image_path': image_path,
|
||||
'face_id': f"df_{Path(image_path).stem}_{i}",
|
||||
'location': (facial_area.get('y', 0), facial_area.get('x', 0) + facial_area.get('w', 0),
|
||||
facial_area.get('y', 0) + facial_area.get('h', 0), facial_area.get('x', 0)),
|
||||
'bbox': facial_area,
|
||||
'encoding': np.array(result['embedding']),
|
||||
'confidence': face_confidence
|
||||
}
|
||||
faces.append(face_data)
|
||||
encodings.append(np.array(result['embedding']))
|
||||
|
||||
print(f"Face {i}: facial_area={facial_area}, confidence={face_confidence:.2f}, embedding shape={np.array(result['embedding']).shape}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing face {i}: {e}")
|
||||
continue
|
||||
|
||||
return {
|
||||
'faces': faces,
|
||||
'encodings': encodings
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"DeepFace error on {image_path}: {e}")
|
||||
return {'faces': [], 'encodings': []}
|
||||
|
||||
def process_with_face_recognition(self, image_path: str) -> Dict:
|
||||
"""Process image with face_recognition library"""
|
||||
try:
|
||||
# Load image
|
||||
image = face_recognition.load_image_file(image_path)
|
||||
|
||||
# Find face locations
|
||||
face_locations = face_recognition.face_locations(image, model="hog") # Use HOG model for speed
|
||||
|
||||
if not face_locations:
|
||||
print(f"No faces found in {Path(image_path).name} (face_recognition)")
|
||||
return {'faces': [], 'encodings': []}
|
||||
|
||||
print(f"Found {len(face_locations)} faces in {Path(image_path).name} (face_recognition)")
|
||||
|
||||
# Get face encodings
|
||||
face_encodings = face_recognition.face_encodings(image, face_locations)
|
||||
|
||||
# Convert to our format
|
||||
faces = []
|
||||
encodings = []
|
||||
|
||||
for i, (face_location, face_encoding) in enumerate(zip(face_locations, face_encodings)):
|
||||
try:
|
||||
# face_recognition returns (top, right, bottom, left)
|
||||
top, right, bottom, left = face_location
|
||||
|
||||
# Create face data with proper bounding box
|
||||
face_data = {
|
||||
'image_path': image_path,
|
||||
'face_id': f"fr_{Path(image_path).stem}_{i}",
|
||||
'location': face_location,
|
||||
'bbox': {'x': left, 'y': top, 'w': right - left, 'h': bottom - top},
|
||||
'encoding': np.array(face_encoding),
|
||||
'confidence': 1.0 # face_recognition doesn't provide confidence scores
|
||||
}
|
||||
faces.append(face_data)
|
||||
encodings.append(np.array(face_encoding))
|
||||
|
||||
print(f"Face {i}: location={face_location}, encoding shape={np.array(face_encoding).shape}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error processing face {i}: {e}")
|
||||
continue
|
||||
|
||||
return {
|
||||
'faces': faces,
|
||||
'encodings': encodings
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
print(f"face_recognition error on {image_path}: {e}")
|
||||
return {'faces': [], 'encodings': []}
|
||||
|
||||
def extract_face_thumbnail(self, face_data: Dict, size: Tuple[int, int] = (150, 150)) -> ImageTk.PhotoImage:
|
||||
"""Extract face thumbnail from image"""
|
||||
try:
|
||||
# Load original image
|
||||
image = Image.open(face_data['image_path'])
|
||||
|
||||
# Extract face region
|
||||
bbox = face_data['bbox']
|
||||
left = bbox.get('x', 0)
|
||||
top = bbox.get('y', 0)
|
||||
right = left + bbox.get('w', 0)
|
||||
bottom = top + bbox.get('h', 0)
|
||||
|
||||
# Add padding
|
||||
padding = 20
|
||||
left = max(0, left - padding)
|
||||
top = max(0, top - padding)
|
||||
right = min(image.width, right + padding)
|
||||
bottom = min(image.height, bottom + padding)
|
||||
|
||||
# Crop face
|
||||
face_crop = image.crop((left, top, right, bottom))
|
||||
|
||||
# FORCE resize to exact size (don't use thumbnail which maintains aspect ratio)
|
||||
face_crop = face_crop.resize(size, Image.Resampling.LANCZOS)
|
||||
|
||||
print(f"DEBUG: Created thumbnail of size {face_crop.size} for {face_data['face_id']}")
|
||||
|
||||
# Convert to PhotoImage
|
||||
return ImageTk.PhotoImage(face_crop)
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error extracting thumbnail for {face_data['face_id']}: {e}")
|
||||
# Return a placeholder image
|
||||
placeholder = Image.new('RGB', size, color='lightgray')
|
||||
return ImageTk.PhotoImage(placeholder)
|
||||
|
||||
def calculate_face_similarity(self, encoding1: np.ndarray, encoding2: np.ndarray) -> float:
|
||||
"""Calculate similarity between two face encodings using cosine similarity"""
|
||||
try:
|
||||
# Ensure encodings are numpy arrays
|
||||
enc1 = np.array(encoding1).flatten()
|
||||
enc2 = np.array(encoding2).flatten()
|
||||
|
||||
# Check if encodings have the same length
|
||||
if len(enc1) != len(enc2):
|
||||
print(f"Warning: Encoding length mismatch: {len(enc1)} vs {len(enc2)}")
|
||||
return 0.0
|
||||
|
||||
# Normalize encodings
|
||||
enc1_norm = enc1 / (np.linalg.norm(enc1) + 1e-8) # Add small epsilon to avoid division by zero
|
||||
enc2_norm = enc2 / (np.linalg.norm(enc2) + 1e-8)
|
||||
|
||||
# Calculate cosine similarity
|
||||
cosine_sim = np.dot(enc1_norm, enc2_norm)
|
||||
|
||||
# Clamp cosine similarity to valid range [-1, 1]
|
||||
cosine_sim = np.clip(cosine_sim, -1.0, 1.0)
|
||||
|
||||
# Convert to confidence percentage (0-100)
|
||||
# For face recognition, we typically want values between 0-100%
|
||||
# where higher values mean more similar faces
|
||||
confidence = max(0, min(100, (cosine_sim + 1) * 50)) # Scale from [-1,1] to [0,100]
|
||||
|
||||
return confidence
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error calculating similarity: {e}")
|
||||
return 0.0
|
||||
|
||||
def process_images(self):
|
||||
"""Process all images and perform face comparison"""
|
||||
try:
|
||||
# Clear previous results
|
||||
self.deepface_faces = []
|
||||
self.facerec_faces = []
|
||||
self.deepface_similarities = []
|
||||
self.facerec_similarities = []
|
||||
self.processing_times = {}
|
||||
|
||||
# Clear GUI panels
|
||||
for widget in self.left_scrollable_frame.winfo_children():
|
||||
widget.destroy()
|
||||
for widget in self.middle_scrollable_frame.winfo_children():
|
||||
widget.destroy()
|
||||
for widget in self.right_scrollable_frame.winfo_children():
|
||||
widget.destroy()
|
||||
|
||||
folder_path = self.folder_var.get()
|
||||
threshold = float(self.threshold_var.get())
|
||||
|
||||
if not folder_path:
|
||||
messagebox.showerror("Error", "Please specify folder path")
|
||||
return
|
||||
|
||||
self.update_status("Getting image files...")
|
||||
self.update_progress(10)
|
||||
|
||||
# Get all image files
|
||||
image_files = self.get_image_files(folder_path)
|
||||
if not image_files:
|
||||
messagebox.showerror("Error", "No image files found in the specified folder")
|
||||
return
|
||||
|
||||
# Get selected detector
|
||||
detector = self.detector_var.get()
|
||||
|
||||
self.update_status(f"Processing all images with both DeepFace and face_recognition...")
|
||||
self.update_progress(20)
|
||||
|
||||
# Process all images with both libraries
|
||||
for i, image_path in enumerate(image_files):
|
||||
filename = Path(image_path).name
|
||||
self.update_status(f"Processing {filename}...")
|
||||
progress = 20 + (i / len(image_files)) * 50
|
||||
self.update_progress(progress)
|
||||
|
||||
# Process with DeepFace
|
||||
start_time = time.time()
|
||||
deepface_result = self.process_with_deepface(image_path, detector)
|
||||
deepface_time = time.time() - start_time
|
||||
|
||||
# Process with face_recognition
|
||||
start_time = time.time()
|
||||
facerec_result = self.process_with_face_recognition(image_path)
|
||||
facerec_time = time.time() - start_time
|
||||
|
||||
# Store timing information
|
||||
self.processing_times[filename] = {
|
||||
'deepface_time': deepface_time,
|
||||
'facerec_time': facerec_time,
|
||||
'total_time': deepface_time + facerec_time
|
||||
}
|
||||
|
||||
# Store results
|
||||
self.deepface_faces.extend(deepface_result['faces'])
|
||||
self.facerec_faces.extend(facerec_result['faces'])
|
||||
|
||||
print(f"Processed {filename}: DeepFace={deepface_time:.2f}s, face_recognition={facerec_time:.2f}s")
|
||||
|
||||
if not self.deepface_faces and not self.facerec_faces:
|
||||
messagebox.showwarning("Warning", "No faces found in any images")
|
||||
return
|
||||
|
||||
self.update_status("Calculating face similarities...")
|
||||
self.update_progress(75)
|
||||
|
||||
# Calculate similarities for DeepFace
|
||||
for i, face1 in enumerate(self.deepface_faces):
|
||||
similarities = []
|
||||
for j, face2 in enumerate(self.deepface_faces):
|
||||
if i != j: # Don't compare face with itself
|
||||
confidence = self.calculate_face_similarity(
|
||||
face1['encoding'], face2['encoding']
|
||||
)
|
||||
if confidence >= threshold: # Only include faces above threshold
|
||||
similarities.append({
|
||||
'face': face2,
|
||||
'confidence': confidence
|
||||
})
|
||||
|
||||
# Sort by confidence (highest first)
|
||||
similarities.sort(key=lambda x: x['confidence'], reverse=True)
|
||||
self.deepface_similarities.append({
|
||||
'face': face1,
|
||||
'similarities': similarities
|
||||
})
|
||||
|
||||
# Calculate similarities for face_recognition
|
||||
for i, face1 in enumerate(self.facerec_faces):
|
||||
similarities = []
|
||||
for j, face2 in enumerate(self.facerec_faces):
|
||||
if i != j: # Don't compare face with itself
|
||||
confidence = self.calculate_face_similarity(
|
||||
face1['encoding'], face2['encoding']
|
||||
)
|
||||
if confidence >= threshold: # Only include faces above threshold
|
||||
similarities.append({
|
||||
'face': face2,
|
||||
'confidence': confidence
|
||||
})
|
||||
|
||||
# Sort by confidence (highest first)
|
||||
similarities.sort(key=lambda x: x['confidence'], reverse=True)
|
||||
self.facerec_similarities.append({
|
||||
'face': face1,
|
||||
'similarities': similarities
|
||||
})
|
||||
|
||||
self.update_status("Displaying results...")
|
||||
self.update_progress(95)
|
||||
|
||||
# Display results in GUI
|
||||
self.display_results()
|
||||
|
||||
total_deepface_faces = len(self.deepface_faces)
|
||||
total_facerec_faces = len(self.facerec_faces)
|
||||
avg_deepface_time = sum(t['deepface_time'] for t in self.processing_times.values()) / len(self.processing_times)
|
||||
avg_facerec_time = sum(t['facerec_time'] for t in self.processing_times.values()) / len(self.processing_times)
|
||||
|
||||
self.update_status(f"Complete! DeepFace: {total_deepface_faces} faces ({avg_deepface_time:.2f}s avg), face_recognition: {total_facerec_faces} faces ({avg_facerec_time:.2f}s avg)")
|
||||
self.update_progress(100)
|
||||
|
||||
except Exception as e:
|
||||
messagebox.showerror("Error", f"Processing failed: {str(e)}")
|
||||
self.update_status("Error occurred during processing")
|
||||
print(f"Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
def display_results(self):
|
||||
"""Display the face comparison results in the GUI panels"""
|
||||
# Display DeepFace results in left panel
|
||||
self.display_library_results(self.deepface_similarities, self.left_scrollable_frame, "DeepFace")
|
||||
|
||||
# Display face_recognition results in middle panel
|
||||
self.display_library_results(self.facerec_similarities, self.middle_scrollable_frame, "face_recognition")
|
||||
|
||||
# Display timing comparison in right panel
|
||||
self.display_timing_comparison()
|
||||
|
||||
def display_library_results(self, similarities_list: List[Dict], parent_frame, library_name: str):
|
||||
"""Display results for a specific library"""
|
||||
for i, result in enumerate(similarities_list):
|
||||
face = result['face']
|
||||
|
||||
# Create frame for this face
|
||||
face_frame = ttk.Frame(parent_frame)
|
||||
face_frame.grid(row=i, column=0, sticky=(tk.W, tk.E), pady=5, padx=5)
|
||||
|
||||
# Face thumbnail
|
||||
thumbnail = self.extract_face_thumbnail(face, size=(80, 80))
|
||||
thumbnail_label = ttk.Label(face_frame, image=thumbnail)
|
||||
thumbnail_label.image = thumbnail # Keep a reference
|
||||
thumbnail_label.grid(row=0, column=0, padx=5, pady=5)
|
||||
|
||||
# Face info
|
||||
info_frame = ttk.Frame(face_frame)
|
||||
info_frame.grid(row=0, column=1, sticky=(tk.W, tk.E), padx=5)
|
||||
|
||||
ttk.Label(info_frame, text=f"Face {i+1}", font=("Arial", 10, "bold")).grid(row=0, column=0, sticky=tk.W, pady=1)
|
||||
ttk.Label(info_frame, text=f"ID: {face['face_id']}", font=("Arial", 8)).grid(row=1, column=0, sticky=tk.W, pady=1)
|
||||
ttk.Label(info_frame, text=f"Image: {Path(face['image_path']).name}", font=("Arial", 8)).grid(row=2, column=0, sticky=tk.W, pady=1)
|
||||
|
||||
# Show number of similar faces
|
||||
similar_count = len(result['similarities'])
|
||||
ttk.Label(info_frame, text=f"Similar: {similar_count}", font=("Arial", 8, "bold")).grid(row=3, column=0, sticky=tk.W, pady=1)
|
||||
|
||||
def display_timing_comparison(self):
|
||||
"""Display timing comparison between libraries"""
|
||||
if not self.processing_times:
|
||||
return
|
||||
|
||||
# Create summary frame
|
||||
summary_frame = ttk.LabelFrame(self.right_scrollable_frame, text="Processing Times Summary")
|
||||
summary_frame.grid(row=0, column=0, sticky=(tk.W, tk.E), pady=5, padx=5)
|
||||
|
||||
# Calculate averages
|
||||
total_deepface_time = sum(t['deepface_time'] for t in self.processing_times.values())
|
||||
total_facerec_time = sum(t['facerec_time'] for t in self.processing_times.values())
|
||||
avg_deepface_time = total_deepface_time / len(self.processing_times)
|
||||
avg_facerec_time = total_facerec_time / len(self.processing_times)
|
||||
|
||||
# Summary statistics
|
||||
ttk.Label(summary_frame, text=f"Total Images: {len(self.processing_times)}", font=("Arial", 10, "bold")).grid(row=0, column=0, sticky=tk.W, pady=2)
|
||||
ttk.Label(summary_frame, text=f"DeepFace Avg: {avg_deepface_time:.2f}s", font=("Arial", 9)).grid(row=1, column=0, sticky=tk.W, pady=1)
|
||||
ttk.Label(summary_frame, text=f"face_recognition Avg: {avg_facerec_time:.2f}s", font=("Arial", 9)).grid(row=2, column=0, sticky=tk.W, pady=1)
|
||||
|
||||
speed_ratio = avg_deepface_time / avg_facerec_time if avg_facerec_time > 0 else 0
|
||||
if speed_ratio > 1:
|
||||
faster_lib = "face_recognition"
|
||||
speed_text = f"{speed_ratio:.1f}x faster"
|
||||
else:
|
||||
faster_lib = "DeepFace"
|
||||
speed_text = f"{1/speed_ratio:.1f}x faster"
|
||||
|
||||
ttk.Label(summary_frame, text=f"{faster_lib} is {speed_text}", font=("Arial", 9, "bold"), foreground="green").grid(row=3, column=0, sticky=tk.W, pady=2)
|
||||
|
||||
# Individual photo timings
|
||||
timing_frame = ttk.LabelFrame(self.right_scrollable_frame, text="Per-Photo Timing")
|
||||
timing_frame.grid(row=1, column=0, sticky=(tk.W, tk.E), pady=5, padx=5)
|
||||
|
||||
row = 0
|
||||
for filename, times in sorted(self.processing_times.items()):
|
||||
ttk.Label(timing_frame, text=f"{filename[:20]}...", font=("Arial", 8)).grid(row=row, column=0, sticky=tk.W, pady=1)
|
||||
ttk.Label(timing_frame, text=f"DF: {times['deepface_time']:.2f}s", font=("Arial", 8)).grid(row=row, column=1, sticky=tk.W, pady=1, padx=(5,0))
|
||||
ttk.Label(timing_frame, text=f"FR: {times['facerec_time']:.2f}s", font=("Arial", 8)).grid(row=row, column=2, sticky=tk.W, pady=1, padx=(5,0))
|
||||
row += 1
|
||||
|
||||
def display_comparison_faces(self, ref_index: int, similarities: List[Dict]):
|
||||
"""Display comparison faces for a specific reference face"""
|
||||
# Create frame for this reference face's comparisons
|
||||
comp_frame = ttk.LabelFrame(self.right_scrollable_frame,
|
||||
text=f"Matches for Reference Face {ref_index + 1}")
|
||||
comp_frame.grid(row=ref_index, column=0, sticky=(tk.W, tk.E), pady=10, padx=10)
|
||||
|
||||
# Display top matches (limit to avoid too much clutter)
|
||||
max_matches = min(8, len(similarities))
|
||||
|
||||
for i in range(max_matches):
|
||||
sim_data = similarities[i]
|
||||
face = sim_data['face']
|
||||
confidence = sim_data['confidence']
|
||||
|
||||
# Create frame for this comparison face
|
||||
face_frame = ttk.Frame(comp_frame)
|
||||
face_frame.grid(row=i, column=0, sticky=(tk.W, tk.E), pady=5, padx=10)
|
||||
|
||||
# Face thumbnail
|
||||
thumbnail = self.extract_face_thumbnail(face, size=(120, 120))
|
||||
thumbnail_label = ttk.Label(face_frame, image=thumbnail)
|
||||
thumbnail_label.image = thumbnail # Keep a reference
|
||||
thumbnail_label.grid(row=0, column=0, padx=10, pady=5)
|
||||
|
||||
# Face info with confidence
|
||||
info_frame = ttk.Frame(face_frame)
|
||||
info_frame.grid(row=0, column=1, sticky=(tk.W, tk.E), padx=10)
|
||||
|
||||
# Confidence with color coding
|
||||
confidence_text = f"{confidence:.1f}%"
|
||||
if confidence >= 80:
|
||||
confidence_color = "green"
|
||||
elif confidence >= 60:
|
||||
confidence_color = "orange"
|
||||
else:
|
||||
confidence_color = "red"
|
||||
|
||||
ttk.Label(info_frame, text=confidence_text,
|
||||
font=("Arial", 14, "bold"), foreground=confidence_color).grid(row=0, column=0, sticky=tk.W, pady=2)
|
||||
ttk.Label(info_frame, text=f"ID: {face['face_id']}", font=("Arial", 10)).grid(row=1, column=0, sticky=tk.W, pady=2)
|
||||
ttk.Label(info_frame, text=f"Image: {Path(face['image_path']).name}", font=("Arial", 10)).grid(row=2, column=0, sticky=tk.W, pady=2)
|
||||
|
||||
def run(self):
|
||||
"""Start the GUI application"""
|
||||
self.root.mainloop()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main entry point"""
|
||||
# Check dependencies
|
||||
try:
|
||||
from deepface import DeepFace
|
||||
except ImportError as e:
|
||||
print(f"Error: Missing required dependency: {e}")
|
||||
print("Please install with: pip install deepface")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
import face_recognition
|
||||
except ImportError as e:
|
||||
print(f"Error: Missing required dependency: {e}")
|
||||
print("Please install with: pip install face_recognition")
|
||||
sys.exit(1)
|
||||
|
||||
# Suppress TensorFlow warnings and errors
|
||||
import os
|
||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # Suppress TensorFlow warnings
|
||||
import warnings
|
||||
warnings.filterwarnings('ignore')
|
||||
|
||||
try:
|
||||
# Create and run GUI
|
||||
app = FaceComparisonGUI()
|
||||
app.run()
|
||||
except Exception as e:
|
||||
print(f"GUI Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
523
test_face_recognition.py
Executable file
523
test_face_recognition.py
Executable file
@ -0,0 +1,523 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Face Recognition Comparison Test Script
|
||||
|
||||
Compares face_recognition vs deepface on a folder of photos.
|
||||
Tests accuracy and performance without modifying existing database.
|
||||
|
||||
Usage:
|
||||
python test_face_recognition.py /path/to/photos [--save-crops] [--save-matrices] [--verbose]
|
||||
|
||||
Example:
|
||||
python test_face_recognition.py demo_photos/ --save-crops --verbose
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import argparse
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from PIL import Image
|
||||
|
||||
# Face recognition libraries
|
||||
import face_recognition
|
||||
from deepface import DeepFace
|
||||
|
||||
# Supported image formats
|
||||
SUPPORTED_FORMATS = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
|
||||
|
||||
|
||||
class FaceRecognitionTester:
|
||||
"""Test and compare face recognition libraries"""
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.verbose = verbose
|
||||
self.results = {
|
||||
'face_recognition': {'faces': [], 'times': [], 'encodings': []},
|
||||
'deepface': {'faces': [], 'times': [], 'encodings': []}
|
||||
}
|
||||
|
||||
def log(self, message: str, level: str = "INFO"):
|
||||
"""Print log message with timestamp"""
|
||||
if self.verbose or level == "ERROR":
|
||||
timestamp = time.strftime("%H:%M:%S")
|
||||
print(f"[{timestamp}] {level}: {message}")
|
||||
|
||||
def get_image_files(self, folder_path: str) -> List[str]:
|
||||
"""Get all supported image files from folder"""
|
||||
folder = Path(folder_path)
|
||||
if not folder.exists():
|
||||
raise FileNotFoundError(f"Folder not found: {folder_path}")
|
||||
|
||||
image_files = []
|
||||
for file_path in folder.rglob("*"):
|
||||
if file_path.is_file() and file_path.suffix.lower() in SUPPORTED_FORMATS:
|
||||
image_files.append(str(file_path))
|
||||
|
||||
self.log(f"Found {len(image_files)} image files")
|
||||
return sorted(image_files)
|
||||
|
||||
def process_with_face_recognition(self, image_path: str) -> Dict:
|
||||
"""Process image with face_recognition library"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Load image
|
||||
image = face_recognition.load_image_file(image_path)
|
||||
|
||||
# Detect faces using CNN model (more accurate than HOG)
|
||||
face_locations = face_recognition.face_locations(image, model="cnn")
|
||||
|
||||
if not face_locations:
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
# Get face encodings
|
||||
face_encodings = face_recognition.face_encodings(image, face_locations)
|
||||
|
||||
# Convert to our format
|
||||
faces = []
|
||||
encodings = []
|
||||
|
||||
for i, (location, encoding) in enumerate(zip(face_locations, face_encodings)):
|
||||
top, right, bottom, left = location
|
||||
face_data = {
|
||||
'image_path': image_path,
|
||||
'face_id': f"fr_{Path(image_path).stem}_{i}",
|
||||
'location': location,
|
||||
'bbox': {'top': top, 'right': right, 'bottom': bottom, 'left': left},
|
||||
'encoding': encoding
|
||||
}
|
||||
faces.append(face_data)
|
||||
encodings.append(encoding)
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
self.log(f"face_recognition: Found {len(faces)} faces in {processing_time:.2f}s")
|
||||
|
||||
return {
|
||||
'faces': faces,
|
||||
'encodings': encodings,
|
||||
'processing_time': processing_time
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"face_recognition error on {image_path}: {e}", "ERROR")
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
def process_with_deepface(self, image_path: str) -> Dict:
|
||||
"""Process image with deepface library"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Use DeepFace to detect and encode faces
|
||||
results = DeepFace.represent(
|
||||
img_path=image_path,
|
||||
model_name='ArcFace', # Best accuracy model
|
||||
detector_backend='retinaface', # Best detection
|
||||
enforce_detection=False, # Don't fail if no faces
|
||||
align=True # Face alignment for better accuracy
|
||||
)
|
||||
|
||||
if not results:
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
# Convert to our format
|
||||
faces = []
|
||||
encodings = []
|
||||
|
||||
for i, result in enumerate(results):
|
||||
# Extract face region info
|
||||
region = result.get('region', {})
|
||||
face_data = {
|
||||
'image_path': image_path,
|
||||
'face_id': f"df_{Path(image_path).stem}_{i}",
|
||||
'location': (region.get('y', 0), region.get('x', 0) + region.get('w', 0),
|
||||
region.get('y', 0) + region.get('h', 0), region.get('x', 0)),
|
||||
'bbox': region,
|
||||
'encoding': np.array(result['embedding'])
|
||||
}
|
||||
faces.append(face_data)
|
||||
encodings.append(np.array(result['embedding']))
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
self.log(f"deepface: Found {len(faces)} faces in {processing_time:.2f}s")
|
||||
|
||||
return {
|
||||
'faces': faces,
|
||||
'encodings': encodings,
|
||||
'processing_time': processing_time
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"deepface error on {image_path}: {e}", "ERROR")
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
def calculate_similarity_matrix(self, encodings: List[np.ndarray], method: str) -> np.ndarray:
|
||||
"""Calculate similarity matrix between all face encodings"""
|
||||
n_faces = len(encodings)
|
||||
if n_faces == 0:
|
||||
return np.array([])
|
||||
|
||||
similarity_matrix = np.zeros((n_faces, n_faces))
|
||||
|
||||
for i in range(n_faces):
|
||||
for j in range(n_faces):
|
||||
if i == j:
|
||||
similarity_matrix[i, j] = 0.0 # Same face
|
||||
else:
|
||||
if method == 'face_recognition':
|
||||
# Use face_recognition distance (lower = more similar)
|
||||
distance = face_recognition.face_distance([encodings[i]], encodings[j])[0]
|
||||
similarity_matrix[i, j] = distance
|
||||
else: # deepface
|
||||
# Use cosine distance for ArcFace embeddings
|
||||
enc1_norm = encodings[i] / np.linalg.norm(encodings[i])
|
||||
enc2_norm = encodings[j] / np.linalg.norm(encodings[j])
|
||||
cosine_sim = np.dot(enc1_norm, enc2_norm)
|
||||
cosine_distance = 1 - cosine_sim
|
||||
similarity_matrix[i, j] = cosine_distance
|
||||
|
||||
return similarity_matrix
|
||||
|
||||
def find_top_matches(self, similarity_matrix: np.ndarray, faces: List[Dict],
|
||||
method: str, top_k: int = 5) -> List[Dict]:
|
||||
"""Find top matches for each face"""
|
||||
top_matches = []
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
if i >= similarity_matrix.shape[0]:
|
||||
continue
|
||||
|
||||
# Get distances to all other faces
|
||||
distances = similarity_matrix[i, :]
|
||||
|
||||
# Find top matches (excluding self)
|
||||
if method == 'face_recognition':
|
||||
# Lower distance = more similar
|
||||
sorted_indices = np.argsort(distances)
|
||||
else: # deepface
|
||||
# Lower cosine distance = more similar
|
||||
sorted_indices = np.argsort(distances)
|
||||
|
||||
matches = []
|
||||
for idx in sorted_indices[1:top_k+1]: # Skip self (index 0)
|
||||
if idx < len(faces):
|
||||
other_face = faces[idx]
|
||||
distance = distances[idx]
|
||||
|
||||
# Convert to confidence percentage for display
|
||||
if method == 'face_recognition':
|
||||
confidence = max(0, (1 - distance) * 100)
|
||||
else: # deepface
|
||||
confidence = max(0, (1 - distance) * 100)
|
||||
|
||||
matches.append({
|
||||
'face_id': other_face['face_id'],
|
||||
'image_path': other_face['image_path'],
|
||||
'distance': distance,
|
||||
'confidence': confidence
|
||||
})
|
||||
|
||||
top_matches.append({
|
||||
'query_face': face,
|
||||
'matches': matches
|
||||
})
|
||||
|
||||
return top_matches
|
||||
|
||||
def save_face_crops(self, faces: List[Dict], output_dir: str, method: str):
|
||||
"""Save face crops for manual inspection"""
|
||||
crops_dir = Path(output_dir) / "face_crops" / method
|
||||
crops_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for face in faces:
|
||||
try:
|
||||
# Load original image
|
||||
image = Image.open(face['image_path'])
|
||||
|
||||
# Extract face region
|
||||
if method == 'face_recognition':
|
||||
top, right, bottom, left = face['location']
|
||||
else: # deepface
|
||||
bbox = face['bbox']
|
||||
left = bbox.get('x', 0)
|
||||
top = bbox.get('y', 0)
|
||||
right = left + bbox.get('w', 0)
|
||||
bottom = top + bbox.get('h', 0)
|
||||
|
||||
# Add padding
|
||||
padding = 20
|
||||
left = max(0, left - padding)
|
||||
top = max(0, top - padding)
|
||||
right = min(image.width, right + padding)
|
||||
bottom = min(image.height, bottom + padding)
|
||||
|
||||
# Crop and save
|
||||
face_crop = image.crop((left, top, right, bottom))
|
||||
crop_path = crops_dir / f"{face['face_id']}.jpg"
|
||||
face_crop.save(crop_path, "JPEG", quality=95)
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"Error saving crop for {face['face_id']}: {e}", "ERROR")
|
||||
|
||||
def save_similarity_matrices(self, fr_matrix: np.ndarray, df_matrix: np.ndarray,
|
||||
fr_faces: List[Dict], df_faces: List[Dict], output_dir: str):
|
||||
"""Save similarity matrices as CSV files"""
|
||||
matrices_dir = Path(output_dir) / "similarity_matrices"
|
||||
matrices_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Save face_recognition matrix
|
||||
if fr_matrix.size > 0:
|
||||
fr_df = pd.DataFrame(fr_matrix,
|
||||
index=[f['face_id'] for f in fr_faces],
|
||||
columns=[f['face_id'] for f in fr_faces])
|
||||
fr_df.to_csv(matrices_dir / "face_recognition_similarity.csv")
|
||||
|
||||
# Save deepface matrix
|
||||
if df_matrix.size > 0:
|
||||
df_df = pd.DataFrame(df_matrix,
|
||||
index=[f['face_id'] for f in df_faces],
|
||||
columns=[f['face_id'] for f in df_faces])
|
||||
df_df.to_csv(matrices_dir / "deepface_similarity.csv")
|
||||
|
||||
def generate_report(self, fr_results: Dict, df_results: Dict,
|
||||
fr_matches: List[Dict], df_matches: List[Dict],
|
||||
output_dir: Optional[str] = None) -> str:
|
||||
"""Generate comparison report"""
|
||||
report_lines = []
|
||||
report_lines.append("=" * 60)
|
||||
report_lines.append("FACE RECOGNITION COMPARISON REPORT")
|
||||
report_lines.append("=" * 60)
|
||||
report_lines.append("")
|
||||
|
||||
# Summary statistics
|
||||
fr_total_faces = len(fr_results['faces'])
|
||||
df_total_faces = len(df_results['faces'])
|
||||
fr_total_time = sum(fr_results['times'])
|
||||
df_total_time = sum(df_results['times'])
|
||||
|
||||
report_lines.append("SUMMARY STATISTICS:")
|
||||
report_lines.append(f" face_recognition: {fr_total_faces} faces in {fr_total_time:.2f}s")
|
||||
report_lines.append(f" deepface: {df_total_faces} faces in {df_total_time:.2f}s")
|
||||
report_lines.append(f" Speed ratio: {df_total_time/fr_total_time:.1f}x slower (deepface)")
|
||||
report_lines.append("")
|
||||
|
||||
# High confidence matches analysis
|
||||
def analyze_high_confidence_matches(matches: List[Dict], method: str, threshold: float = 70.0):
|
||||
high_conf_matches = []
|
||||
for match_data in matches:
|
||||
for match in match_data['matches']:
|
||||
if match['confidence'] >= threshold:
|
||||
high_conf_matches.append({
|
||||
'query': match_data['query_face']['face_id'],
|
||||
'match': match['face_id'],
|
||||
'confidence': match['confidence'],
|
||||
'query_image': match_data['query_face']['image_path'],
|
||||
'match_image': match['image_path']
|
||||
})
|
||||
return high_conf_matches
|
||||
|
||||
fr_high_conf = analyze_high_confidence_matches(fr_matches, 'face_recognition')
|
||||
df_high_conf = analyze_high_confidence_matches(df_matches, 'deepface')
|
||||
|
||||
report_lines.append("HIGH CONFIDENCE MATCHES (≥70%):")
|
||||
report_lines.append(f" face_recognition: {len(fr_high_conf)} matches")
|
||||
report_lines.append(f" deepface: {len(df_high_conf)} matches")
|
||||
report_lines.append("")
|
||||
|
||||
# Show top matches for manual inspection
|
||||
report_lines.append("TOP MATCHES FOR MANUAL INSPECTION:")
|
||||
report_lines.append("")
|
||||
|
||||
# face_recognition top matches
|
||||
report_lines.append("face_recognition top matches:")
|
||||
for i, match_data in enumerate(fr_matches[:3]): # Show first 3 faces
|
||||
query_face = match_data['query_face']
|
||||
report_lines.append(f" Query: {query_face['face_id']} ({Path(query_face['image_path']).name})")
|
||||
for match in match_data['matches'][:3]: # Top 3 matches
|
||||
report_lines.append(f" → {match['face_id']}: {match['confidence']:.1f}% ({Path(match['image_path']).name})")
|
||||
report_lines.append("")
|
||||
|
||||
# deepface top matches
|
||||
report_lines.append("deepface top matches:")
|
||||
for i, match_data in enumerate(df_matches[:3]): # Show first 3 faces
|
||||
query_face = match_data['query_face']
|
||||
report_lines.append(f" Query: {query_face['face_id']} ({Path(query_face['image_path']).name})")
|
||||
for match in match_data['matches'][:3]: # Top 3 matches
|
||||
report_lines.append(f" → {match['face_id']}: {match['confidence']:.1f}% ({Path(match['image_path']).name})")
|
||||
report_lines.append("")
|
||||
|
||||
# Recommendations
|
||||
report_lines.append("RECOMMENDATIONS:")
|
||||
if len(fr_high_conf) > len(df_high_conf) * 1.5:
|
||||
report_lines.append(" ⚠️ face_recognition shows significantly more high-confidence matches")
|
||||
report_lines.append(" This may indicate more false positives")
|
||||
if df_total_time > fr_total_time * 3:
|
||||
report_lines.append(" ⚠️ deepface is significantly slower")
|
||||
report_lines.append(" Consider GPU acceleration or faster models")
|
||||
if df_total_faces > fr_total_faces:
|
||||
report_lines.append(" ✅ deepface detected more faces")
|
||||
report_lines.append(" Better face detection in difficult conditions")
|
||||
|
||||
report_lines.append("")
|
||||
report_lines.append("=" * 60)
|
||||
|
||||
report_text = "\n".join(report_lines)
|
||||
|
||||
# Save report if output directory specified
|
||||
if output_dir:
|
||||
report_path = Path(output_dir) / "comparison_report.txt"
|
||||
with open(report_path, 'w') as f:
|
||||
f.write(report_text)
|
||||
self.log(f"Report saved to: {report_path}")
|
||||
|
||||
return report_text
|
||||
|
||||
def run_test(self, folder_path: str, save_crops: bool = False,
|
||||
save_matrices: bool = False) -> Dict:
|
||||
"""Run the complete face recognition comparison test"""
|
||||
self.log(f"Starting face recognition test on: {folder_path}")
|
||||
|
||||
# Get image files
|
||||
image_files = self.get_image_files(folder_path)
|
||||
if not image_files:
|
||||
raise ValueError("No image files found in the specified folder")
|
||||
|
||||
# Create output directory if needed
|
||||
output_dir = None
|
||||
if save_crops or save_matrices:
|
||||
output_dir = Path(folder_path).parent / "test_results"
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Process images with both methods
|
||||
self.log("Processing images with face_recognition...")
|
||||
for image_path in image_files:
|
||||
result = self.process_with_face_recognition(image_path)
|
||||
self.results['face_recognition']['faces'].extend(result['faces'])
|
||||
self.results['face_recognition']['times'].append(result['processing_time'])
|
||||
self.results['face_recognition']['encodings'].extend(result['encodings'])
|
||||
|
||||
self.log("Processing images with deepface...")
|
||||
for image_path in image_files:
|
||||
result = self.process_with_deepface(image_path)
|
||||
self.results['deepface']['faces'].extend(result['faces'])
|
||||
self.results['deepface']['times'].append(result['processing_time'])
|
||||
self.results['deepface']['encodings'].extend(result['encodings'])
|
||||
|
||||
# Calculate similarity matrices
|
||||
self.log("Calculating similarity matrices...")
|
||||
fr_matrix = self.calculate_similarity_matrix(
|
||||
self.results['face_recognition']['encodings'], 'face_recognition'
|
||||
)
|
||||
df_matrix = self.calculate_similarity_matrix(
|
||||
self.results['deepface']['encodings'], 'deepface'
|
||||
)
|
||||
|
||||
# Find top matches
|
||||
fr_matches = self.find_top_matches(
|
||||
fr_matrix, self.results['face_recognition']['faces'], 'face_recognition'
|
||||
)
|
||||
df_matches = self.find_top_matches(
|
||||
df_matrix, self.results['deepface']['faces'], 'deepface'
|
||||
)
|
||||
|
||||
# Save outputs if requested
|
||||
if save_crops and output_dir:
|
||||
self.log("Saving face crops...")
|
||||
self.save_face_crops(self.results['face_recognition']['faces'], str(output_dir), 'face_recognition')
|
||||
self.save_face_crops(self.results['deepface']['faces'], str(output_dir), 'deepface')
|
||||
|
||||
if save_matrices and output_dir:
|
||||
self.log("Saving similarity matrices...")
|
||||
self.save_similarity_matrices(
|
||||
fr_matrix, df_matrix,
|
||||
self.results['face_recognition']['faces'],
|
||||
self.results['deepface']['faces'],
|
||||
str(output_dir)
|
||||
)
|
||||
|
||||
# Generate and display report
|
||||
report = self.generate_report(
|
||||
self.results['face_recognition'], self.results['deepface'],
|
||||
fr_matches, df_matches, str(output_dir) if output_dir else None
|
||||
)
|
||||
|
||||
print(report)
|
||||
|
||||
return {
|
||||
'face_recognition': {
|
||||
'faces': self.results['face_recognition']['faces'],
|
||||
'matches': fr_matches,
|
||||
'matrix': fr_matrix
|
||||
},
|
||||
'deepface': {
|
||||
'faces': self.results['deepface']['faces'],
|
||||
'matches': df_matches,
|
||||
'matrix': df_matrix
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Compare face_recognition vs deepface on a folder of photos",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
python test_face_recognition.py demo_photos/
|
||||
python test_face_recognition.py demo_photos/ --save-crops --verbose
|
||||
python test_face_recognition.py demo_photos/ --save-matrices --save-crops
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('folder', help='Path to folder containing photos to test')
|
||||
parser.add_argument('--save-crops', action='store_true',
|
||||
help='Save face crops for manual inspection')
|
||||
parser.add_argument('--save-matrices', action='store_true',
|
||||
help='Save similarity matrices as CSV files')
|
||||
parser.add_argument('--verbose', '-v', action='store_true',
|
||||
help='Enable verbose logging')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate folder path
|
||||
if not os.path.exists(args.folder):
|
||||
print(f"Error: Folder not found: {args.folder}")
|
||||
sys.exit(1)
|
||||
|
||||
# Check dependencies
|
||||
try:
|
||||
import face_recognition
|
||||
from deepface import DeepFace
|
||||
except ImportError as e:
|
||||
print(f"Error: Missing required dependency: {e}")
|
||||
print("Please install with: pip install face_recognition deepface")
|
||||
sys.exit(1)
|
||||
|
||||
# Run test
|
||||
try:
|
||||
tester = FaceRecognitionTester(verbose=args.verbose)
|
||||
results = tester.run_test(
|
||||
args.folder,
|
||||
save_crops=args.save_crops,
|
||||
save_matrices=args.save_matrices
|
||||
)
|
||||
|
||||
print("\n✅ Test completed successfully!")
|
||||
if args.save_crops or args.save_matrices:
|
||||
print(f"📁 Results saved to: {Path(args.folder).parent / 'test_results'}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Test failed: {e}")
|
||||
if args.verbose:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Loading…
x
Reference in New Issue
Block a user