PunimTag Web Application - Major Feature Release #1
@ -184,7 +184,7 @@ jobs:
|
||||
run: |
|
||||
apt-get update && apt-get install -y postgresql-client
|
||||
pip install --no-cache-dir -r requirements.txt
|
||||
pip install --no-cache-dir pytest httpx
|
||||
pip install --no-cache-dir pytest httpx pytest-cov
|
||||
|
||||
- name: Audit Python dependencies
|
||||
run: |
|
||||
@ -192,10 +192,19 @@ jobs:
|
||||
pip-audit --desc || true
|
||||
continue-on-error: true
|
||||
|
||||
- name: Create test databases
|
||||
run: |
|
||||
export PGPASSWORD=postgres
|
||||
psql -h postgres -U postgres -c "CREATE DATABASE punimtag_test;" || true
|
||||
psql -h postgres -U postgres -c "CREATE DATABASE punimtag_auth_test;" || true
|
||||
echo "✅ Test databases ready"
|
||||
|
||||
- name: Initialize database schemas
|
||||
run: |
|
||||
export PYTHONPATH=$(pwd)
|
||||
echo "🗃️ Initializing main database schema..."
|
||||
python -c "from backend.db.models import Base; from backend.db.session import engine; Base.metadata.create_all(bind=engine)"
|
||||
echo "✅ Main database schema initialized"
|
||||
python << 'EOF'
|
||||
# Initialize auth database schema without importing worker (avoids DeepFace/TensorFlow imports)
|
||||
from backend.db.session import auth_engine
|
||||
@ -365,8 +374,20 @@ jobs:
|
||||
- name: Run backend tests
|
||||
run: |
|
||||
export PYTHONPATH=$(pwd)
|
||||
python -m pytest tests/ -v || true
|
||||
echo "🧪 Running all backend API tests..."
|
||||
python -m pytest tests/ -v --tb=short --cov=backend --cov-report=term-missing --cov-report=xml --junit-xml=test-results.xml || true
|
||||
continue-on-error: true
|
||||
|
||||
- name: Test results summary
|
||||
if: always()
|
||||
run: |
|
||||
echo "## 📊 Test Results Summary" >> $GITHUB_STEP_SUMMARY || true
|
||||
echo "" >> $GITHUB_STEP_SUMMARY || true
|
||||
if [ -f test-results.xml ]; then
|
||||
echo "✅ Test results generated (JUnit XML)" >> $GITHUB_STEP_SUMMARY || true
|
||||
fi
|
||||
echo "" >> $GITHUB_STEP_SUMMARY || true
|
||||
echo "Run \`pytest tests/ -v\` locally to see detailed results." >> $GITHUB_STEP_SUMMARY || true
|
||||
|
||||
build:
|
||||
needs: skip-ci-check
|
||||
|
||||
@ -9,6 +9,10 @@ python-jose[cryptography]==3.3.0
|
||||
python-multipart==0.0.9
|
||||
python-dotenv==1.0.0
|
||||
bcrypt==4.1.2
|
||||
# Testing Dependencies
|
||||
pytest>=7.4.0
|
||||
httpx>=0.24.0
|
||||
pytest-cov>=4.1.0
|
||||
# PunimTag Dependencies - DeepFace Implementation
|
||||
# Core Dependencies
|
||||
numpy>=1.21.0
|
||||
|
||||
607
tests/API_TEST_PLAN.md
Normal file
607
tests/API_TEST_PLAN.md
Normal file
@ -0,0 +1,607 @@
|
||||
# Backend API Test Plan
|
||||
|
||||
This document outlines comprehensive test cases for all backend API endpoints in PunimTag.
|
||||
|
||||
## Test Structure Overview
|
||||
|
||||
The test suite uses:
|
||||
- **pytest** - Testing framework
|
||||
- **httpx/TestClient** - For making test requests to FastAPI
|
||||
- **pytest-fixtures** - For database setup/teardown
|
||||
- **Test database** - Separate PostgreSQL database for testing
|
||||
|
||||
## Test Files Organization
|
||||
|
||||
### 1. Authentication API Tests (`test_api_auth.py`)
|
||||
|
||||
#### Login Endpoints
|
||||
- `test_login_success_with_valid_credentials` - Verify successful login with valid username/password
|
||||
- `test_login_failure_with_invalid_credentials` - Verify 401 with invalid credentials
|
||||
- `test_login_with_inactive_user` - Verify 401 when user account is inactive
|
||||
- `test_login_without_password_hash` - Verify error when password_hash is missing
|
||||
- `test_login_fallback_to_hardcoded_admin` - Verify fallback to admin/admin works
|
||||
- `test_login_updates_last_login` - Verify last_login timestamp is updated
|
||||
|
||||
#### Token Refresh Endpoints
|
||||
- `test_refresh_token_success` - Verify successful token refresh
|
||||
- `test_refresh_token_with_invalid_token` - Verify 401 with invalid refresh token
|
||||
- `test_refresh_token_with_access_token` - Verify 401 when using access token instead of refresh token
|
||||
- `test_refresh_token_expired` - Verify 401 with expired refresh token
|
||||
|
||||
#### Current User Endpoints
|
||||
- `test_get_current_user_info_authenticated` - Verify user info retrieval with valid token
|
||||
- `test_get_current_user_info_unauthenticated` - Verify 401 without token
|
||||
- `test_get_current_user_info_bootstrap_admin` - Verify admin bootstrap when no admins exist
|
||||
- `test_get_current_user_info_role_permissions` - Verify role and permissions are returned
|
||||
|
||||
#### Password Change Endpoints
|
||||
- `test_change_password_success` - Verify successful password change
|
||||
- `test_change_password_with_wrong_current_password` - Verify 401 with incorrect current password
|
||||
- `test_change_password_clears_password_change_required_flag` - Verify flag is cleared after change
|
||||
- `test_change_password_user_not_found` - Verify 404 when user doesn't exist
|
||||
|
||||
#### Authentication Middleware
|
||||
- `test_get_current_user_without_token` - Verify 401 without Authorization header
|
||||
- `test_get_current_user_with_expired_token` - Verify 401 with expired JWT
|
||||
- `test_get_current_user_with_invalid_token_format` - Verify 401 with malformed token
|
||||
- `test_get_current_user_with_id_creates_user` - Verify user creation in bootstrap scenario
|
||||
|
||||
---
|
||||
|
||||
### 2. Photos API Tests (`test_api_photos.py`)
|
||||
|
||||
#### Photo Search Endpoints
|
||||
- `test_search_photos_by_name_success` - Verify search by person name works
|
||||
- `test_search_photos_by_name_without_person_name` - Verify 400 when person_name missing
|
||||
- `test_search_photos_by_name_with_pagination` - Verify pagination works correctly
|
||||
- `test_search_photos_by_date_success` - Verify date range search
|
||||
- `test_search_photos_by_date_without_dates` - Verify 400 when both dates missing
|
||||
- `test_search_photos_by_date_from_only` - Verify search with only date_from
|
||||
- `test_search_photos_by_date_to_only` - Verify search with only date_to
|
||||
- `test_search_photos_by_tags_success` - Verify tag search works
|
||||
- `test_search_photos_by_tags_match_all` - Verify match_all parameter
|
||||
- `test_search_photos_by_tags_match_any` - Verify match_any behavior
|
||||
- `test_search_photos_by_tags_without_tags` - Verify 400 when tag_names missing
|
||||
- `test_search_photos_no_faces` - Verify photos without faces search
|
||||
- `test_search_photos_no_tags` - Verify photos without tags search
|
||||
- `test_search_photos_processed` - Verify processed photos search
|
||||
- `test_search_photos_unprocessed` - Verify unprocessed photos search
|
||||
- `test_search_photos_favorites_authenticated` - Verify favorites search with auth
|
||||
- `test_search_photos_favorites_unauthenticated` - Verify 401 without auth
|
||||
- `test_search_photos_with_pagination` - Verify page and page_size parameters
|
||||
- `test_search_photos_with_invalid_search_type` - Verify 400 with invalid search_type
|
||||
- `test_search_photos_with_media_type_filter` - Verify image/video filtering
|
||||
- `test_search_photos_with_folder_path_filter` - Verify folder path filtering
|
||||
- `test_search_photos_with_date_filters_as_additional_filters` - Verify date filters in non-date searches
|
||||
- `test_search_photos_returns_favorite_status` - Verify is_favorite field in results
|
||||
|
||||
#### Photo Import Endpoints
|
||||
- `test_import_photos_success` - Verify photo import job is queued
|
||||
- `test_import_photos_with_invalid_folder_path` - Verify 400 with invalid path
|
||||
- `test_import_photos_with_nonexistent_folder` - Verify 400 when folder doesn't exist
|
||||
- `test_import_photos_recursive` - Verify recursive import option
|
||||
- `test_import_photos_returns_job_id` - Verify job_id is returned
|
||||
- `test_import_photos_returns_estimated_count` - Verify estimated_photos count
|
||||
|
||||
#### Photo Upload Endpoints
|
||||
- `test_upload_photos_success` - Verify single file upload
|
||||
- `test_upload_photos_multiple_files` - Verify multiple file upload
|
||||
- `test_upload_photos_duplicate_handling` - Verify duplicate detection
|
||||
- `test_upload_photos_invalid_file_type` - Verify error handling for invalid files
|
||||
- `test_upload_photos_returns_added_existing_counts` - Verify response counts
|
||||
|
||||
#### Photo Retrieval Endpoints
|
||||
- `test_get_photo_by_id_success` - Verify photo retrieval by ID
|
||||
- `test_get_photo_by_id_not_found` - Verify 404 for non-existent photo
|
||||
- `test_get_photo_image_success` - Verify image file serving
|
||||
- `test_get_photo_image_not_found` - Verify 404 when photo doesn't exist
|
||||
- `test_get_photo_image_file_missing` - Verify 404 when file is missing
|
||||
- `test_get_photo_image_content_type` - Verify correct Content-Type header
|
||||
- `test_get_photo_image_cache_headers` - Verify cache headers are set
|
||||
|
||||
#### Photo Favorites Endpoints
|
||||
- `test_toggle_favorite_add` - Verify adding favorite
|
||||
- `test_toggle_favorite_remove` - Verify removing favorite
|
||||
- `test_toggle_favorite_unauthenticated` - Verify 401 without auth
|
||||
- `test_toggle_favorite_photo_not_found` - Verify 404 for non-existent photo
|
||||
- `test_check_favorite_true` - Verify check returns true for favorited photo
|
||||
- `test_check_favorite_false` - Verify check returns false for non-favorited photo
|
||||
- `test_bulk_add_favorites_success` - Verify bulk add operation
|
||||
- `test_bulk_add_favorites_already_favorites` - Verify handling of already-favorited photos
|
||||
- `test_bulk_add_favorites_with_missing_photos` - Verify 404 with missing photo IDs
|
||||
- `test_bulk_remove_favorites_success` - Verify bulk remove operation
|
||||
- `test_bulk_remove_favorites_not_favorites` - Verify handling of non-favorited photos
|
||||
|
||||
#### Photo Deletion Endpoints
|
||||
- `test_bulk_delete_photos_success` - Verify bulk delete (admin only)
|
||||
- `test_bulk_delete_photos_non_admin` - Verify 403 for non-admin users
|
||||
- `test_bulk_delete_photos_with_missing_ids` - Verify handling of missing IDs
|
||||
- `test_bulk_delete_photos_cascades_to_faces_tags` - Verify cascade deletion
|
||||
- `test_bulk_delete_photos_empty_list` - Verify 400 with empty photo_ids
|
||||
|
||||
#### Photo Folder Operations
|
||||
- `test_browse_folder_success` - Verify folder picker works (if tkinter available)
|
||||
- `test_browse_folder_no_display` - Verify graceful failure without display
|
||||
- `test_browse_folder_cancelled` - Verify handling when user cancels
|
||||
- `test_open_photo_folder_success` - Verify folder opening works
|
||||
- `test_open_photo_folder_photo_not_found` - Verify 404 for non-existent photo
|
||||
- `test_open_photo_folder_file_missing` - Verify 404 when file is missing
|
||||
|
||||
---
|
||||
|
||||
### 3. People API Tests (`test_api_people.py`)
|
||||
|
||||
#### People Listing Endpoints
|
||||
- `test_list_people_success` - Verify people list retrieval
|
||||
- `test_list_people_with_last_name_filter` - Verify last name filtering
|
||||
- `test_list_people_case_insensitive_filter` - Verify case-insensitive search
|
||||
- `test_list_people_with_faces_success` - Verify people with face counts
|
||||
- `test_list_people_with_faces_includes_zero_counts` - Verify zero counts included
|
||||
- `test_list_people_with_faces_last_name_filter` - Verify filtering with faces
|
||||
- `test_list_people_with_faces_maiden_name_filter` - Verify maiden name filtering
|
||||
- `test_list_people_sorted_by_name` - Verify sorting by last_name, first_name
|
||||
|
||||
#### People CRUD Endpoints
|
||||
- `test_create_person_success` - Verify person creation
|
||||
- `test_create_person_with_middle_name` - Verify optional middle_name
|
||||
- `test_create_person_with_maiden_name` - Verify optional maiden_name
|
||||
- `test_create_person_with_date_of_birth` - Verify date_of_birth handling
|
||||
- `test_create_person_strips_whitespace` - Verify name trimming
|
||||
- `test_get_person_by_id_success` - Verify person retrieval
|
||||
- `test_get_person_by_id_not_found` - Verify 404 for non-existent person
|
||||
- `test_update_person_success` - Verify person update
|
||||
- `test_update_person_not_found` - Verify 404 when updating non-existent person
|
||||
- `test_update_person_strips_whitespace` - Verify whitespace handling
|
||||
- `test_delete_person_success` - Verify person deletion
|
||||
- `test_delete_person_cascades_to_faces_and_encodings` - Verify cascade behavior
|
||||
- `test_delete_person_cascades_to_video_linkages` - Verify video linkage cleanup
|
||||
- `test_delete_person_not_found` - Verify 404 for non-existent person
|
||||
|
||||
#### People Faces Endpoints
|
||||
- `test_get_person_faces_success` - Verify faces retrieval for person
|
||||
- `test_get_person_faces_no_faces` - Verify empty list when no faces
|
||||
- `test_get_person_faces_person_not_found` - Verify 404 for non-existent person
|
||||
- `test_get_person_faces_sorted_by_filename` - Verify sorting
|
||||
- `test_get_person_videos_success` - Verify videos linked to person
|
||||
- `test_get_person_videos_no_videos` - Verify empty list when no videos
|
||||
- `test_get_person_videos_person_not_found` - Verify 404 handling
|
||||
|
||||
#### People Match Acceptance Endpoints
|
||||
- `test_accept_matches_success` - Verify accepting auto-match matches
|
||||
- `test_accept_matches_tracks_user_id` - Verify user tracking
|
||||
- `test_accept_matches_person_not_found` - Verify 404 for non-existent person
|
||||
- `test_accept_matches_face_not_found` - Verify handling of missing faces
|
||||
- `test_accept_matches_creates_person_encodings` - Verify encoding creation
|
||||
- `test_accept_matches_updates_existing_encodings` - Verify encoding updates
|
||||
|
||||
---
|
||||
|
||||
### 4. Faces API Tests (`test_api_faces.py`)
|
||||
|
||||
#### Face Processing Endpoints
|
||||
- `test_process_faces_success` - Verify face processing job queued
|
||||
- `test_process_faces_redis_unavailable` - Verify 503 when Redis unavailable
|
||||
- `test_process_faces_with_custom_detector` - Verify custom detector_backend
|
||||
- `test_process_faces_with_custom_model` - Verify custom model_name
|
||||
- `test_process_faces_with_batch_size` - Verify batch_size parameter
|
||||
- `test_process_faces_returns_job_id` - Verify job_id in response
|
||||
|
||||
#### Unidentified Faces Endpoints
|
||||
- `test_get_unidentified_faces_success` - Verify unidentified faces list
|
||||
- `test_get_unidentified_faces_with_pagination` - Verify pagination
|
||||
- `test_get_unidentified_faces_with_quality_filter` - Verify min_quality filter
|
||||
- `test_get_unidentified_faces_with_date_filters` - Verify date filtering
|
||||
- `test_get_unidentified_faces_with_tag_filters` - Verify tag filtering
|
||||
- `test_get_unidentified_faces_with_photo_id_filter` - Verify photo ID filtering
|
||||
- `test_get_unidentified_faces_include_excluded` - Verify include_excluded parameter
|
||||
- `test_get_unidentified_faces_sort_by_quality` - Verify sorting by quality
|
||||
- `test_get_unidentified_faces_sort_by_date` - Verify sorting by date
|
||||
- `test_get_unidentified_faces_invalid_date_format` - Verify date validation
|
||||
- `test_get_unidentified_faces_match_all_tags` - Verify match_all parameter
|
||||
|
||||
#### Similar Faces Endpoints
|
||||
- `test_get_similar_faces_success` - Verify similar faces retrieval
|
||||
- `test_get_similar_faces_include_excluded` - Verify include_excluded parameter
|
||||
- `test_get_similar_faces_face_not_found` - Verify 404 for non-existent face
|
||||
- `test_get_similar_faces_returns_similarity_scores` - Verify similarity in response
|
||||
- `test_batch_similarity_success` - Verify batch similarity calculation
|
||||
- `test_batch_similarity_with_min_confidence` - Verify min_confidence filter
|
||||
- `test_batch_similarity_empty_list` - Verify handling of empty face_ids
|
||||
- `test_batch_similarity_invalid_face_ids` - Verify error handling
|
||||
|
||||
#### Face Identification Endpoints
|
||||
- `test_identify_face_with_existing_person` - Verify identification with existing person
|
||||
- `test_identify_face_create_new_person` - Verify person creation during identification
|
||||
- `test_identify_face_with_additional_faces` - Verify batch identification
|
||||
- `test_identify_face_face_not_found` - Verify 404 for non-existent face
|
||||
- `test_identify_face_person_not_found` - Verify 400 when person_id invalid
|
||||
- `test_identify_face_tracks_user_id` - Verify user tracking
|
||||
- `test_identify_face_creates_person_encodings` - Verify encoding creation
|
||||
- `test_identify_face_requires_name_for_new_person` - Verify validation
|
||||
|
||||
#### Face Crop Endpoint
|
||||
- `test_get_face_crop_success` - Verify face crop image generation
|
||||
- `test_get_face_crop_face_not_found` - Verify 404 for non-existent face
|
||||
- `test_get_face_crop_photo_file_missing` - Verify 404 when file missing
|
||||
- `test_get_face_crop_invalid_location` - Verify 422 for invalid location
|
||||
- `test_get_face_crop_exif_orientation_handling` - Verify EXIF correction
|
||||
- `test_get_face_crop_resizes_small_faces` - Verify resizing for small faces
|
||||
- `test_get_face_crop_content_type` - Verify correct Content-Type
|
||||
|
||||
#### Face Exclusion Endpoints
|
||||
- `test_toggle_face_excluded_true` - Verify excluding face
|
||||
- `test_toggle_face_excluded_false` - Verify including face
|
||||
- `test_toggle_face_excluded_face_not_found` - Verify 404 handling
|
||||
|
||||
#### Face Unmatch Endpoints
|
||||
- `test_unmatch_face_success` - Verify face unmatching
|
||||
- `test_unmatch_face_already_unmatched` - Verify 400 when already unmatched
|
||||
- `test_unmatch_face_deletes_person_encodings` - Verify encoding cleanup
|
||||
- `test_batch_unmatch_faces_success` - Verify batch unmatch
|
||||
- `test_batch_unmatch_faces_none_matched` - Verify 400 when none matched
|
||||
- `test_batch_unmatch_faces_some_missing` - Verify 404 with missing faces
|
||||
|
||||
#### Auto-Match Endpoints
|
||||
- `test_auto_match_faces_success` - Verify auto-match process
|
||||
- `test_auto_match_faces_with_tolerance` - Verify tolerance parameter
|
||||
- `test_auto_match_faces_auto_accept_enabled` - Verify auto-accept functionality
|
||||
- `test_auto_match_faces_auto_accept_with_threshold` - Verify threshold filtering
|
||||
- `test_auto_match_faces_auto_accept_filters_by_quality` - Verify quality filtering
|
||||
- `test_auto_match_faces_auto_accept_filters_by_pose` - Verify pose filtering
|
||||
- `test_get_auto_match_people_success` - Verify people list for auto-match
|
||||
- `test_get_auto_match_people_filter_frontal_only` - Verify frontal filter
|
||||
- `test_get_auto_match_person_matches_success` - Verify person matches retrieval
|
||||
- `test_get_auto_match_person_matches_person_not_found` - Verify 404 handling
|
||||
|
||||
#### Face Maintenance Endpoints
|
||||
- `test_list_all_faces_success` - Verify all faces listing
|
||||
- `test_list_all_faces_with_filters` - Verify filtering options
|
||||
- `test_list_all_faces_pagination` - Verify pagination
|
||||
- `test_list_all_faces_excluded_filter` - Verify excluded status filter
|
||||
- `test_list_all_faces_identified_filter` - Verify identified status filter
|
||||
- `test_delete_faces_success` - Verify face deletion
|
||||
- `test_delete_faces_with_missing_ids` - Verify 404 with missing IDs
|
||||
- `test_delete_faces_deletes_person_encodings` - Verify encoding cleanup
|
||||
- `test_delete_faces_empty_list` - Verify 400 with empty list
|
||||
|
||||
---
|
||||
|
||||
### 5. Tags API Tests (`test_api_tags.py`)
|
||||
|
||||
#### Tag Listing Endpoints
|
||||
- `test_get_tags_success` - Verify tags list retrieval
|
||||
- `test_get_tags_empty_list` - Verify empty list when no tags
|
||||
- `test_get_tags_sorted` - Verify sorting behavior
|
||||
|
||||
#### Tag CRUD Endpoints
|
||||
- `test_create_tag_success` - Verify tag creation
|
||||
- `test_create_tag_duplicate` - Verify returns existing tag if duplicate
|
||||
- `test_create_tag_strips_whitespace` - Verify whitespace handling
|
||||
- `test_update_tag_success` - Verify tag update
|
||||
- `test_update_tag_not_found` - Verify 404 for non-existent tag
|
||||
- `test_delete_tag_success` - Verify tag deletion
|
||||
- `test_delete_tag_with_photos` - Verify cascade or error handling
|
||||
- `test_delete_tag_not_found` - Verify 404 handling
|
||||
|
||||
#### Photo-Tag Operations
|
||||
- `test_add_tags_to_photos_success` - Verify adding tags to photos
|
||||
- `test_add_tags_to_photos_empty_photo_ids` - Verify 400 with empty photo_ids
|
||||
- `test_add_tags_to_photos_empty_tag_names` - Verify 400 with empty tag_names
|
||||
- `test_add_tags_to_photos_creates_missing_tags` - Verify auto-creation
|
||||
- `test_remove_tags_from_photos_success` - Verify tag removal
|
||||
- `test_get_photo_tags_success` - Verify photo tags retrieval
|
||||
- `test_get_photo_tags_empty` - Verify empty list for untagged photo
|
||||
- `test_get_photos_with_tags_success` - Verify photos with tags query
|
||||
- `test_get_photos_with_tags_multiple_tags` - Verify multiple tag filtering
|
||||
- `test_get_photos_with_tags_match_all` - Verify match_all behavior
|
||||
|
||||
---
|
||||
|
||||
### 6. Users API Tests (`test_api_users.py`)
|
||||
|
||||
#### User Listing Endpoints
|
||||
- `test_list_users_success` - Verify users list (admin only)
|
||||
- `test_list_users_non_admin` - Verify 403 for non-admin users
|
||||
- `test_list_users_with_pagination` - Verify pagination
|
||||
- `test_list_users_with_search_filter` - Verify search functionality
|
||||
- `test_list_users_includes_role_info` - Verify role information
|
||||
|
||||
#### User CRUD Endpoints
|
||||
- `test_create_user_success` - Verify user creation (admin only)
|
||||
- `test_create_user_duplicate_email` - Verify 400 with duplicate email
|
||||
- `test_create_user_duplicate_username` - Verify 400 with duplicate username
|
||||
- `test_create_user_with_role` - Verify role assignment
|
||||
- `test_create_user_creates_auth_user` - Verify auth database sync
|
||||
- `test_create_user_password_validation` - Verify password requirements
|
||||
- `test_get_user_by_id_success` - Verify user retrieval
|
||||
- `test_get_user_by_id_not_found` - Verify 404 for non-existent user
|
||||
- `test_update_user_success` - Verify user update
|
||||
- `test_update_user_role_change` - Verify role updates
|
||||
- `test_update_user_email_conflict` - Verify email uniqueness
|
||||
- `test_delete_user_success` - Verify user deletion
|
||||
- `test_delete_user_with_linked_data` - Verify graceful handling
|
||||
- `test_delete_user_cascades_to_auth_database` - Verify auth DB cleanup
|
||||
- `test_delete_user_non_admin` - Verify 403 for non-admin
|
||||
|
||||
#### User Activation Endpoints
|
||||
- `test_activate_user_success` - Verify user activation
|
||||
- `test_deactivate_user_success` - Verify user deactivation
|
||||
- `test_activate_user_not_found` - Verify 404 handling
|
||||
|
||||
---
|
||||
|
||||
### 7. Jobs API Tests (`test_api_jobs.py`)
|
||||
|
||||
#### Job Status Endpoints
|
||||
- `test_get_job_status_queued` - Verify queued job status
|
||||
- `test_get_job_status_started` - Verify started job status
|
||||
- `test_get_job_status_progress` - Verify progress status with metadata
|
||||
- `test_get_job_status_success` - Verify completed job status
|
||||
- `test_get_job_status_failed` - Verify failed job status
|
||||
- `test_get_job_status_cancelled` - Verify cancelled job status
|
||||
- `test_get_job_status_not_found` - Verify 404 for non-existent job
|
||||
- `test_get_job_status_includes_timestamps` - Verify timestamp fields
|
||||
|
||||
#### Job Streaming Endpoints
|
||||
- `test_stream_job_progress_success` - Verify SSE stream works
|
||||
- `test_stream_job_progress_updates` - Verify progress updates in stream
|
||||
- `test_stream_job_progress_completion` - Verify completion event
|
||||
- `test_stream_job_progress_not_found` - Verify 404 handling
|
||||
- `test_stream_job_progress_sse_format` - Verify SSE format compliance
|
||||
|
||||
---
|
||||
|
||||
### 8. Health & Version API Tests (`test_api_health.py`)
|
||||
|
||||
#### Health Check Endpoints
|
||||
- `test_health_check_success` - Verify health endpoint returns 200
|
||||
- `test_health_check_database_connection` - Verify DB connection check
|
||||
- `test_version_endpoint_success` - Verify version information
|
||||
- `test_version_endpoint_includes_app_version` - Verify version format
|
||||
- `test_metrics_endpoint_success` - Verify metrics endpoint (if applicable)
|
||||
|
||||
---
|
||||
|
||||
### 9. Integration Tests (`test_api_integration.py`)
|
||||
|
||||
#### End-to-End Workflows
|
||||
- `test_photo_import_to_face_processing_to_identification_workflow` - Full photo import workflow
|
||||
- `test_create_person_identify_faces_auto_match_workflow` - Person creation to auto-match
|
||||
- `test_tag_photos_search_by_tags_workflow` - Tagging and search workflow
|
||||
- `test_favorite_photos_search_favorites_workflow` - Favorites workflow
|
||||
- `test_user_creation_login_role_permissions_workflow` - User management workflow
|
||||
- `test_bulk_operations_workflow` - Multiple bulk operations in sequence
|
||||
- `test_concurrent_requests_workflow` - Verify concurrent request handling
|
||||
|
||||
---
|
||||
|
||||
### 10. Error Handling & Edge Cases (`test_api_errors.py`)
|
||||
|
||||
#### Error Response Tests
|
||||
- `test_404_not_found_responses` - Verify 404 responses across endpoints
|
||||
- `test_400_bad_request_validation` - Verify validation error responses
|
||||
- `test_401_unauthorized_responses` - Verify authentication errors
|
||||
- `test_403_forbidden_responses` - Verify authorization errors
|
||||
- `test_422_unprocessable_entity` - Verify unprocessable entity errors
|
||||
- `test_500_internal_server_error_handling` - Verify error handling
|
||||
- `test_database_connection_failure_handling` - Verify DB failure handling
|
||||
- `test_redis_connection_failure_handling` - Verify Redis failure handling
|
||||
- `test_file_operation_errors` - Verify file operation error handling
|
||||
- `test_concurrent_request_handling` - Verify concurrent operations
|
||||
- `test_large_payload_handling` - Verify handling of large requests
|
||||
- `test_sql_injection_attempts` - Verify SQL injection protection
|
||||
- `test_xss_attempts` - Verify XSS protection
|
||||
- `test_path_traversal_attempts` - Verify path traversal protection
|
||||
|
||||
---
|
||||
|
||||
## Test Infrastructure Setup
|
||||
|
||||
### Test Configuration (`conftest.py`)
|
||||
|
||||
The test suite requires a `conftest.py` file with the following fixtures:
|
||||
|
||||
```python
|
||||
# tests/conftest.py
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from backend.app import create_app
|
||||
from backend.db.base import Base
|
||||
from backend.db.session import get_db
|
||||
|
||||
# Test database URL (use separate test database)
|
||||
TEST_DATABASE_URL = "postgresql+psycopg2://postgres:postgres@localhost:5432/punimtag_test"
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def test_db_engine():
|
||||
"""Create test database engine."""
|
||||
engine = create_engine(TEST_DATABASE_URL)
|
||||
Base.metadata.create_all(bind=engine)
|
||||
yield engine
|
||||
Base.metadata.drop_all(bind=engine)
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_db_session(test_db_engine):
|
||||
"""Create a test database session with transaction rollback."""
|
||||
connection = test_db_engine.connect()
|
||||
transaction = connection.begin()
|
||||
session = sessionmaker(bind=connection)()
|
||||
|
||||
yield session
|
||||
|
||||
session.close()
|
||||
transaction.rollback()
|
||||
connection.close()
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_client(test_db_session):
|
||||
"""Create a test client with test database."""
|
||||
app = create_app()
|
||||
|
||||
def override_get_db():
|
||||
yield test_db_session
|
||||
|
||||
app.dependency_overrides[get_db] = override_get_db
|
||||
|
||||
with TestClient(app) as client:
|
||||
yield client
|
||||
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
@pytest.fixture
|
||||
def auth_token(test_client):
|
||||
"""Get authentication token for test user."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "admin", "password": "admin"}
|
||||
)
|
||||
return response.json()["access_token"]
|
||||
|
||||
@pytest.fixture
|
||||
def auth_headers(auth_token):
|
||||
"""Get authentication headers."""
|
||||
return {"Authorization": f"Bearer {auth_token}"}
|
||||
|
||||
@pytest.fixture
|
||||
def admin_user(test_db_session):
|
||||
"""Create an admin user for testing."""
|
||||
from backend.db.models import User
|
||||
from backend.utils.password import hash_password
|
||||
|
||||
user = User(
|
||||
username="testadmin",
|
||||
email="testadmin@example.com",
|
||||
password_hash=hash_password("testpass"),
|
||||
is_admin=True,
|
||||
is_active=True,
|
||||
)
|
||||
test_db_session.add(user)
|
||||
test_db_session.commit()
|
||||
return user
|
||||
|
||||
@pytest.fixture
|
||||
def regular_user(test_db_session):
|
||||
"""Create a regular user for testing."""
|
||||
from backend.db.models import User
|
||||
from backend.utils.password import hash_password
|
||||
|
||||
user = User(
|
||||
username="testuser",
|
||||
email="testuser@example.com",
|
||||
password_hash=hash_password("testpass"),
|
||||
is_admin=False,
|
||||
is_active=True,
|
||||
)
|
||||
test_db_session.add(user)
|
||||
test_db_session.commit()
|
||||
return user
|
||||
```
|
||||
|
||||
### Test Database Setup
|
||||
|
||||
1. Create a separate test database:
|
||||
```sql
|
||||
CREATE DATABASE punimtag_test;
|
||||
```
|
||||
|
||||
2. Set test database URL in environment or test config:
|
||||
```bash
|
||||
export DATABASE_URL="postgresql+psycopg2://postgres:postgres@localhost:5432/punimtag_test"
|
||||
```
|
||||
|
||||
3. Ensure Redis is available for job-related tests (or mock it)
|
||||
|
||||
---
|
||||
|
||||
## Priority Recommendations
|
||||
|
||||
### High Priority (Core Functionality)
|
||||
1. **Authentication** - Login, token refresh, password change
|
||||
2. **Photo Search** - All search types and filters
|
||||
3. **Face Identification** - Core face matching and identification
|
||||
4. **User Management** - Admin operations and role management
|
||||
|
||||
### Medium Priority (Important Features)
|
||||
1. **Tag Operations** - CRUD and photo-tag relationships
|
||||
2. **People CRUD** - Person management
|
||||
3. **Job Status Tracking** - Background job monitoring
|
||||
4. **Bulk Operations** - Bulk favorites, deletions, etc.
|
||||
|
||||
### Lower Priority (Nice to Have)
|
||||
1. **File Operations** - Browse folder, open folder (OS-dependent)
|
||||
2. **Maintenance Endpoints** - Advanced maintenance features
|
||||
3. **Edge Cases** - Comprehensive error handling tests
|
||||
|
||||
---
|
||||
|
||||
## Testing Best Practices
|
||||
|
||||
1. **Use Fixtures** - Leverage pytest fixtures for common setup (database, auth tokens)
|
||||
2. **Test Both Paths** - Always test both success and failure scenarios
|
||||
3. **Test Authorization** - Verify admin vs regular user permissions
|
||||
4. **Test Pagination** - Verify pagination works for all list endpoints
|
||||
5. **Test Validation** - Test input validation (empty strings, invalid IDs, etc.)
|
||||
6. **Test Transactions** - Verify database transactions and rollbacks
|
||||
7. **Use Test Database** - Always use a separate test database
|
||||
8. **Clean Up** - Ensure test data is cleaned up after each test
|
||||
9. **Test Concurrency** - Test concurrent operations where relevant
|
||||
10. **Mock External Dependencies** - Mock Redis, file system when needed
|
||||
11. **Test Error Messages** - Verify error messages are helpful
|
||||
12. **Test Response Formats** - Verify response schemas match expectations
|
||||
13. **Test Edge Cases** - Test boundary conditions and edge cases
|
||||
14. **Test Performance** - Consider performance tests for critical endpoints
|
||||
15. **Test Security** - Test authentication, authorization, and input sanitization
|
||||
|
||||
---
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Run All Tests
|
||||
```bash
|
||||
npm run test:backend
|
||||
# or
|
||||
pytest tests/ -v
|
||||
```
|
||||
|
||||
### Run Specific Test File
|
||||
```bash
|
||||
pytest tests/test_api_auth.py -v
|
||||
```
|
||||
|
||||
### Run Specific Test
|
||||
```bash
|
||||
pytest tests/test_api_auth.py::test_login_success_with_valid_credentials -v
|
||||
```
|
||||
|
||||
### Run with Coverage
|
||||
```bash
|
||||
pytest tests/ --cov=backend --cov-report=html
|
||||
```
|
||||
|
||||
### Run in CI
|
||||
The CI workflow (`.gitea/workflows/ci.yml`) already includes a `test-backend` job that runs:
|
||||
```bash
|
||||
python -m pytest tests/ -v
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Test Coverage Goals
|
||||
|
||||
- **Minimum Coverage**: 80% (as per project rules)
|
||||
- **Critical Endpoints**: 100% coverage (auth, photo search, face identification)
|
||||
- **All Endpoints**: At least basic success/failure tests
|
||||
|
||||
---
|
||||
|
||||
## Notes
|
||||
|
||||
- Tests should be independent and not rely on execution order
|
||||
- Use transaction rollback to ensure test isolation
|
||||
- Mock external services (Redis, file system) when appropriate
|
||||
- Use factories or fixtures for test data creation
|
||||
- Keep tests fast - avoid unnecessary I/O operations
|
||||
- Document complex test scenarios with comments
|
||||
|
||||
179
tests/CI_TEST_SETUP.md
Normal file
179
tests/CI_TEST_SETUP.md
Normal file
@ -0,0 +1,179 @@
|
||||
# CI Test Setup Documentation
|
||||
|
||||
This document describes how the authentication tests and other backend tests are configured to run in CI.
|
||||
|
||||
## CI Workflow Configuration
|
||||
|
||||
The CI workflow (`.gitea/workflows/ci.yml`) has been updated to include:
|
||||
|
||||
### Test Database Setup
|
||||
|
||||
1. **PostgreSQL Service**: The CI uses a PostgreSQL 15 service container
|
||||
- Database: `punimtag_test` (main database)
|
||||
- Auth Database: `punimtag_auth_test` (auth database)
|
||||
- User: `postgres`
|
||||
- Password: `postgres`
|
||||
|
||||
2. **Database Creation**: Explicit database creation step ensures databases exist
|
||||
```yaml
|
||||
- name: Create test databases
|
||||
run: |
|
||||
export PGPASSWORD=postgres
|
||||
psql -h postgres -U postgres -c "CREATE DATABASE punimtag_test;" || true
|
||||
psql -h postgres -U postgres -c "CREATE DATABASE punimtag_auth_test;" || true
|
||||
```
|
||||
|
||||
3. **Schema Initialization**: Database schemas are initialized before tests run
|
||||
- Main database: All tables created via SQLAlchemy Base.metadata
|
||||
- Auth database: Tables created via SQL scripts
|
||||
|
||||
### Test Dependencies
|
||||
|
||||
The following testing dependencies are installed:
|
||||
- `pytest>=7.4.0` - Test framework
|
||||
- `httpx>=0.24.0` - HTTP client for FastAPI TestClient
|
||||
- `pytest-cov>=4.1.0` - Coverage reporting
|
||||
|
||||
These are installed via:
|
||||
1. `requirements.txt` (for local development)
|
||||
2. Explicit pip install in CI (for redundancy)
|
||||
|
||||
### Test Execution
|
||||
|
||||
The CI runs tests in two steps:
|
||||
|
||||
1. **All Backend Tests**:
|
||||
```bash
|
||||
pytest tests/ -v --tb=short --cov=backend --cov-report=term-missing --cov-report=xml
|
||||
```
|
||||
- Runs all tests in the `tests/` directory
|
||||
- Generates coverage report
|
||||
- Uses short traceback format
|
||||
|
||||
2. **Authentication Tests** (specific step):
|
||||
```bash
|
||||
pytest tests/test_api_auth.py -v --tb=short --junit-xml=test-results-auth.xml
|
||||
```
|
||||
- Runs only authentication tests
|
||||
- Generates JUnit XML for test reporting
|
||||
- Provides focused output for authentication tests
|
||||
|
||||
### Environment Variables
|
||||
|
||||
The following environment variables are set in CI:
|
||||
- `DATABASE_URL`: `postgresql+psycopg2://postgres:postgres@postgres:5432/punimtag_test`
|
||||
- `DATABASE_URL_AUTH`: `postgresql+psycopg2://postgres:postgres@postgres:5432/punimtag_auth_test`
|
||||
- `REDIS_URL`: `redis://redis:6379/0`
|
||||
- `PYTHONPATH`: Set to project root
|
||||
|
||||
### Test Results
|
||||
|
||||
- Tests use `continue-on-error: true` to allow CI to complete even if tests fail
|
||||
- Test results are logged to console
|
||||
- JUnit XML output is generated for test reporting tools
|
||||
- Coverage reports are generated (terminal and XML formats)
|
||||
|
||||
## Running Tests Locally
|
||||
|
||||
To run the same tests locally:
|
||||
|
||||
1. **Set up test database**:
|
||||
```bash
|
||||
# Create test database
|
||||
createdb punimtag_test
|
||||
createdb punimtag_auth_test
|
||||
```
|
||||
|
||||
2. **Set environment variables**:
|
||||
```bash
|
||||
export DATABASE_URL="postgresql+psycopg2://postgres:postgres@localhost:5432/punimtag_test"
|
||||
export DATABASE_URL_AUTH="postgresql+psycopg2://postgres:postgres@localhost:5432/punimtag_auth_test"
|
||||
export PYTHONPATH=$(pwd)
|
||||
```
|
||||
|
||||
3. **Install dependencies**:
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
4. **Run tests**:
|
||||
```bash
|
||||
# Run all tests
|
||||
pytest tests/ -v
|
||||
|
||||
# Run only authentication tests
|
||||
pytest tests/test_api_auth.py -v
|
||||
|
||||
# Run with coverage
|
||||
pytest tests/ --cov=backend --cov-report=html
|
||||
```
|
||||
|
||||
## Test Structure
|
||||
|
||||
### Test Files
|
||||
- `tests/conftest.py` - Test fixtures and configuration
|
||||
- `tests/test_api_auth.py` - Authentication API tests
|
||||
- `tests/API_TEST_PLAN.md` - Comprehensive test plan
|
||||
|
||||
### Test Fixtures
|
||||
|
||||
The `conftest.py` provides:
|
||||
- `test_db_engine` - Database engine (session scope)
|
||||
- `test_db_session` - Database session with rollback (function scope)
|
||||
- `test_client` - FastAPI test client (function scope)
|
||||
- `admin_user` - Admin user fixture
|
||||
- `regular_user` - Regular user fixture
|
||||
- `inactive_user` - Inactive user fixture
|
||||
- `auth_token` - Authentication token for admin
|
||||
- `regular_auth_token` - Authentication token for regular user
|
||||
- `auth_headers` - Authorization headers for admin
|
||||
- `regular_auth_headers` - Authorization headers for regular user
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Tests Fail in CI
|
||||
|
||||
1. **Check database connection**:
|
||||
- Verify PostgreSQL service is running
|
||||
- Check database URLs are correct
|
||||
- Ensure databases exist
|
||||
|
||||
2. **Check dependencies**:
|
||||
- Verify pytest, httpx, and pytest-cov are installed
|
||||
- Check requirements.txt is up to date
|
||||
|
||||
3. **Check test database state**:
|
||||
- Tests use transaction rollback, so database should be clean
|
||||
- If issues persist, check for schema mismatches
|
||||
|
||||
### Database Connection Issues
|
||||
|
||||
If tests fail with database connection errors:
|
||||
- Verify `DATABASE_URL` environment variable is set
|
||||
- Check PostgreSQL service is accessible
|
||||
- Ensure database exists and user has permissions
|
||||
|
||||
### Import Errors
|
||||
|
||||
If tests fail with import errors:
|
||||
- Verify `PYTHONPATH` is set to project root
|
||||
- Check all dependencies are installed
|
||||
- Ensure test files are in `tests/` directory
|
||||
|
||||
## Next Steps
|
||||
|
||||
1. Add more high-priority test files:
|
||||
- `test_api_photos.py` - Photo search tests
|
||||
- `test_api_faces.py` - Face identification tests
|
||||
- `test_api_users.py` - User management tests
|
||||
|
||||
2. Improve test coverage:
|
||||
- Add integration tests
|
||||
- Add error handling tests
|
||||
- Add performance tests
|
||||
|
||||
3. Enhance CI reporting:
|
||||
- Add test result artifacts
|
||||
- Add coverage badge
|
||||
- Add test summary to PR comments
|
||||
|
||||
@ -1,690 +0,0 @@
|
||||
# PunimTag Testing Guide
|
||||
|
||||
**Version:** 1.0
|
||||
**Date:** October 16, 2025
|
||||
**Phase:** 6 - Testing and Validation
|
||||
|
||||
---
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Overview](#overview)
|
||||
2. [Test Suite Structure](#test-suite-structure)
|
||||
3. [Running Tests](#running-tests)
|
||||
4. [Test Categories](#test-categories)
|
||||
5. [Test Details](#test-details)
|
||||
6. [Interpreting Results](#interpreting-results)
|
||||
7. [Troubleshooting](#troubleshooting)
|
||||
8. [Adding New Tests](#adding-new-tests)
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This guide explains the comprehensive test suite for PunimTag's DeepFace integration. The test suite validates all aspects of the migration from face_recognition to DeepFace, ensuring functionality, performance, and reliability.
|
||||
|
||||
### Test Philosophy
|
||||
|
||||
- **Automated**: Tests run without manual intervention
|
||||
- **Comprehensive**: Cover all critical functionality
|
||||
- **Fast**: Complete in reasonable time for CI/CD
|
||||
- **Reliable**: Consistent results across runs
|
||||
- **Informative**: Clear pass/fail with diagnostic info
|
||||
|
||||
---
|
||||
|
||||
## Test Suite Structure
|
||||
|
||||
```
|
||||
tests/
|
||||
├── test_deepface_integration.py # Main Phase 6 test suite (10 tests)
|
||||
├── test_deepface_gui.py # GUI comparison tests (reference)
|
||||
├── test_deepface_only.py # DeepFace-only tests (reference)
|
||||
├── test_face_recognition.py # Legacy tests
|
||||
├── README_TESTING.md # This file
|
||||
└── demo_photos/ # Test images (required)
|
||||
```
|
||||
|
||||
### Test Files
|
||||
|
||||
- **test_deepface_integration.py**: Primary test suite for Phase 6 validation
|
||||
- **test_deepface_gui.py**: Reference implementation with GUI tests
|
||||
- **test_deepface_only.py**: DeepFace library tests without GUI
|
||||
- **test_face_recognition.py**: Legacy face_recognition tests
|
||||
|
||||
---
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Prerequisites
|
||||
|
||||
1. **Install Dependencies**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
2. **Verify Demo Photos**
|
||||
```bash
|
||||
ls demo_photos/*.jpg
|
||||
# Should show: 2019-11-22_0011.jpg, 2019-11-22_0012.jpg, etc.
|
||||
```
|
||||
|
||||
3. **Check DeepFace Installation**
|
||||
```bash
|
||||
python -c "from deepface import DeepFace; print('DeepFace OK')"
|
||||
```
|
||||
|
||||
### Running the Full Test Suite
|
||||
|
||||
```bash
|
||||
# Navigate to project root
|
||||
cd /home/ladmin/Code/punimtag
|
||||
|
||||
# Run Phase 6 integration tests
|
||||
python tests/test_deepface_integration.py
|
||||
```
|
||||
|
||||
### Running Individual Tests
|
||||
|
||||
```python
|
||||
# In Python shell or script
|
||||
from tests.test_deepface_integration import test_face_detection
|
||||
|
||||
# Run specific test
|
||||
result = test_face_detection()
|
||||
print("Passed!" if result else "Failed!")
|
||||
```
|
||||
|
||||
### Running with Verbose Output
|
||||
|
||||
```bash
|
||||
# Add debugging output
|
||||
python -u tests/test_deepface_integration.py 2>&1 | tee test_results.log
|
||||
```
|
||||
|
||||
### Expected Runtime
|
||||
|
||||
- **Full Suite**: ~30-60 seconds (depends on hardware)
|
||||
- **Individual Test**: ~3-10 seconds
|
||||
- **With GPU**: Faster inference times
|
||||
- **First Run**: +2-5 minutes (model downloads)
|
||||
|
||||
---
|
||||
|
||||
## Test Categories
|
||||
|
||||
### 1. Core Functionality Tests
|
||||
- Face Detection
|
||||
- Face Matching
|
||||
- Metadata Storage
|
||||
|
||||
### 2. Configuration Tests
|
||||
- FaceProcessor Initialization
|
||||
- Multiple Detector Backends
|
||||
|
||||
### 3. Algorithm Tests
|
||||
- Cosine Similarity
|
||||
- Adaptive Tolerance
|
||||
|
||||
### 4. Data Tests
|
||||
- Database Schema
|
||||
- Face Location Format
|
||||
|
||||
### 5. Performance Tests
|
||||
- Performance Benchmark
|
||||
|
||||
---
|
||||
|
||||
## Test Details
|
||||
|
||||
### Test 1: Face Detection
|
||||
|
||||
**Purpose:** Verify DeepFace detects faces correctly
|
||||
|
||||
**What it tests:**
|
||||
- Face detection with default detector (retinaface)
|
||||
- Photo processing workflow
|
||||
- Face encoding generation (512-dimensional)
|
||||
- Database storage
|
||||
|
||||
**Pass Criteria:**
|
||||
- At least 1 face detected in test image
|
||||
- Encoding size = 4096 bytes (512 floats × 8)
|
||||
- No exceptions during processing
|
||||
|
||||
**Failure Modes:**
|
||||
- Image file not found
|
||||
- No faces detected (possible with poor quality images)
|
||||
- Wrong encoding size
|
||||
- Database errors
|
||||
|
||||
---
|
||||
|
||||
### Test 2: Face Matching
|
||||
|
||||
**Purpose:** Verify face similarity matching works
|
||||
|
||||
**What it tests:**
|
||||
- Processing multiple photos
|
||||
- Finding similar faces
|
||||
- Similarity calculation
|
||||
- Match confidence scoring
|
||||
|
||||
**Pass Criteria:**
|
||||
- Multiple photos processed successfully
|
||||
- Similar faces found within tolerance
|
||||
- Confidence scores reasonable (0-100%)
|
||||
- Match results consistent
|
||||
|
||||
**Failure Modes:**
|
||||
- Not enough test images
|
||||
- No faces detected
|
||||
- Similarity calculation errors
|
||||
- No matches found (tolerance too strict)
|
||||
|
||||
---
|
||||
|
||||
### Test 3: Metadata Storage
|
||||
|
||||
**Purpose:** Verify DeepFace metadata stored correctly
|
||||
|
||||
**What it tests:**
|
||||
- face_confidence column storage
|
||||
- detector_backend column storage
|
||||
- model_name column storage
|
||||
- quality_score calculation
|
||||
|
||||
**Pass Criteria:**
|
||||
- All metadata fields populated
|
||||
- Detector matches configuration
|
||||
- Model matches configuration
|
||||
- Values within expected ranges
|
||||
|
||||
**Failure Modes:**
|
||||
- Missing columns
|
||||
- NULL values in metadata
|
||||
- Mismatched detector/model
|
||||
- Invalid data types
|
||||
|
||||
---
|
||||
|
||||
### Test 4: Configuration
|
||||
|
||||
**Purpose:** Verify FaceProcessor configuration flexibility
|
||||
|
||||
**What it tests:**
|
||||
- Default configuration
|
||||
- Custom detector backends
|
||||
- Custom models
|
||||
- Configuration application
|
||||
|
||||
**Pass Criteria:**
|
||||
- Default values match config.py
|
||||
- Custom values applied correctly
|
||||
- All detector options work
|
||||
- Configuration persists
|
||||
|
||||
**Failure Modes:**
|
||||
- Configuration not applied
|
||||
- Invalid detector/model accepted
|
||||
- Configuration mismatch
|
||||
- Initialization errors
|
||||
|
||||
---
|
||||
|
||||
### Test 5: Cosine Similarity
|
||||
|
||||
**Purpose:** Verify similarity calculation accuracy
|
||||
|
||||
**What it tests:**
|
||||
- Identical encoding distance (should be ~0)
|
||||
- Different encoding distance (should be >0)
|
||||
- Mismatched length handling
|
||||
- Normalization and scaling
|
||||
|
||||
**Pass Criteria:**
|
||||
- Identical encodings: distance < 0.01
|
||||
- Different encodings: distance > 0.1
|
||||
- Mismatched lengths: distance = 2.0
|
||||
- No calculation errors
|
||||
|
||||
**Failure Modes:**
|
||||
- Identical encodings not similar
|
||||
- Different encodings too similar
|
||||
- Division by zero
|
||||
- Numerical instability
|
||||
|
||||
---
|
||||
|
||||
### Test 6: Database Schema
|
||||
|
||||
**Purpose:** Verify database schema updates correct
|
||||
|
||||
**What it tests:**
|
||||
- New columns in faces table
|
||||
- New columns in person_encodings table
|
||||
- Column data types
|
||||
- Schema consistency
|
||||
|
||||
**Pass Criteria:**
|
||||
- All required columns exist
|
||||
- Data types correct (TEXT, REAL)
|
||||
- Schema matches migration plan
|
||||
- No missing columns
|
||||
|
||||
**Failure Modes:**
|
||||
- Missing columns
|
||||
- Wrong data types
|
||||
- Migration not applied
|
||||
- Schema corruption
|
||||
|
||||
---
|
||||
|
||||
### Test 7: Face Location Format
|
||||
|
||||
**Purpose:** Verify DeepFace location format {x, y, w, h}
|
||||
|
||||
**What it tests:**
|
||||
- Location stored as dict string
|
||||
- Location parsing
|
||||
- Required keys present (x, y, w, h)
|
||||
- Format consistency
|
||||
|
||||
**Pass Criteria:**
|
||||
- Location is dict with 4 keys
|
||||
- Values are numeric
|
||||
- Format parseable
|
||||
- Consistent across faces
|
||||
|
||||
**Failure Modes:**
|
||||
- Wrong format (tuple instead of dict)
|
||||
- Missing keys
|
||||
- Parse errors
|
||||
- Invalid values
|
||||
|
||||
---
|
||||
|
||||
### Test 8: Performance Benchmark
|
||||
|
||||
**Purpose:** Measure and validate performance
|
||||
|
||||
**What it tests:**
|
||||
- Face detection speed
|
||||
- Similarity search speed
|
||||
- Scaling with photo count
|
||||
- Resource usage
|
||||
|
||||
**Pass Criteria:**
|
||||
- Processing completes in reasonable time
|
||||
- No crashes or hangs
|
||||
- Performance metrics reported
|
||||
- Consistent across runs
|
||||
|
||||
**Failure Modes:**
|
||||
- Excessive processing time
|
||||
- Memory exhaustion
|
||||
- Performance degradation
|
||||
- Timeout errors
|
||||
|
||||
---
|
||||
|
||||
### Test 9: Adaptive Tolerance
|
||||
|
||||
**Purpose:** Verify adaptive tolerance calculation
|
||||
|
||||
**What it tests:**
|
||||
- Quality-based tolerance adjustment
|
||||
- Confidence-based tolerance adjustment
|
||||
- Bounds enforcement [0.2, 0.6]
|
||||
- Tolerance calculation logic
|
||||
|
||||
**Pass Criteria:**
|
||||
- Tolerance adjusts with quality
|
||||
- Higher quality = stricter tolerance
|
||||
- Tolerance stays within bounds
|
||||
- Calculation consistent
|
||||
|
||||
**Failure Modes:**
|
||||
- Tolerance out of bounds
|
||||
- No quality adjustment
|
||||
- Calculation errors
|
||||
- Incorrect formula
|
||||
|
||||
---
|
||||
|
||||
### Test 10: Multiple Detectors
|
||||
|
||||
**Purpose:** Verify multiple detector backends work
|
||||
|
||||
**What it tests:**
|
||||
- opencv detector
|
||||
- ssd detector
|
||||
- (retinaface tested in Test 1)
|
||||
- (mtcnn available but slower)
|
||||
- Detector-specific results
|
||||
|
||||
**Pass Criteria:**
|
||||
- At least one detector finds faces
|
||||
- No detector crashes
|
||||
- Results recorded
|
||||
- Different detectors work
|
||||
|
||||
**Failure Modes:**
|
||||
- All detectors fail
|
||||
- Detector not available
|
||||
- Configuration errors
|
||||
- Missing dependencies
|
||||
|
||||
---
|
||||
|
||||
## Interpreting Results
|
||||
|
||||
### Success Output
|
||||
|
||||
```
|
||||
======================================================================
|
||||
DEEPFACE INTEGRATION TEST SUITE - PHASE 6
|
||||
======================================================================
|
||||
|
||||
Testing complete DeepFace integration in PunimTag
|
||||
This comprehensive test suite validates all aspects of the migration
|
||||
|
||||
============================================================
|
||||
Test 1: DeepFace Face Detection
|
||||
============================================================
|
||||
Testing with image: demo_photos/2019-11-22_0011.jpg
|
||||
✓ Added photo to database (ID: 1)
|
||||
📸 Processing: 2019-11-22_0011.jpg
|
||||
👤 Found 2 faces
|
||||
✓ Processed 1 photos
|
||||
✓ Found 2 faces in the photo
|
||||
✓ Encoding size: 4096 bytes (expected: 4096)
|
||||
|
||||
✅ PASS: Face detection working correctly
|
||||
|
||||
[... more tests ...]
|
||||
|
||||
======================================================================
|
||||
TEST SUMMARY
|
||||
======================================================================
|
||||
✅ PASS: Face Detection
|
||||
✅ PASS: Face Matching
|
||||
✅ PASS: Metadata Storage
|
||||
✅ PASS: Configuration
|
||||
✅ PASS: Cosine Similarity
|
||||
✅ PASS: Database Schema
|
||||
✅ PASS: Face Location Format
|
||||
✅ PASS: Performance Benchmark
|
||||
✅ PASS: Adaptive Tolerance
|
||||
✅ PASS: Multiple Detectors
|
||||
======================================================================
|
||||
Tests passed: 10/10
|
||||
Tests failed: 0/10
|
||||
======================================================================
|
||||
|
||||
🎉 ALL TESTS PASSED! DeepFace integration is working correctly!
|
||||
```
|
||||
|
||||
### Failure Output
|
||||
|
||||
```
|
||||
❌ FAIL: Face detection working correctly
|
||||
|
||||
Error: No faces detected in test image
|
||||
|
||||
[Traceback ...]
|
||||
```
|
||||
|
||||
### Warning Output
|
||||
|
||||
```
|
||||
⚠️ Test image not found: demo_photos/2019-11-22_0011.jpg
|
||||
Please ensure demo photos are available
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### 1. Test Images Not Found
|
||||
|
||||
**Problem:**
|
||||
```
|
||||
❌ Test image not found: demo_photos/2019-11-22_0011.jpg
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
- Verify demo_photos directory exists
|
||||
- Check image filenames
|
||||
- Ensure running from project root
|
||||
|
||||
#### 2. DeepFace Import Error
|
||||
|
||||
**Problem:**
|
||||
```
|
||||
ImportError: No module named 'deepface'
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
```bash
|
||||
pip install deepface tensorflow opencv-python retina-face
|
||||
```
|
||||
|
||||
#### 3. TensorFlow Warnings
|
||||
|
||||
**Problem:**
|
||||
```
|
||||
TensorFlow: Could not load dynamic library 'libcudart.so.11.0'
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
- Expected on CPU-only systems
|
||||
- Warnings suppressed in config.py
|
||||
- Does not affect functionality
|
||||
|
||||
#### 4. Model Download Timeout
|
||||
|
||||
**Problem:**
|
||||
```
|
||||
TimeoutError: Failed to download ArcFace model
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
- Check internet connection
|
||||
- Models stored in ~/.deepface/weights/
|
||||
- Retry after network issues resolved
|
||||
|
||||
#### 5. Memory Error
|
||||
|
||||
**Problem:**
|
||||
```
|
||||
MemoryError: Unable to allocate array
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
- Close other applications
|
||||
- Use smaller test images
|
||||
- Increase system memory
|
||||
- Process fewer images at once
|
||||
|
||||
#### 6. Database Locked
|
||||
|
||||
**Problem:**
|
||||
```
|
||||
sqlite3.OperationalError: database is locked
|
||||
```
|
||||
|
||||
**Solution:**
|
||||
- Close other database connections
|
||||
- Stop running dashboard
|
||||
- Use in-memory database for tests
|
||||
|
||||
---
|
||||
|
||||
## Adding New Tests
|
||||
|
||||
### Test Template
|
||||
|
||||
```python
|
||||
def test_new_feature():
|
||||
"""Test X: Description of what this tests"""
|
||||
print("\n" + "="*60)
|
||||
print("Test X: Test Name")
|
||||
print("="*60)
|
||||
|
||||
try:
|
||||
# Setup
|
||||
db = DatabaseManager(":memory:", verbose=0)
|
||||
processor = FaceProcessor(db, verbose=0)
|
||||
|
||||
# Test logic
|
||||
result = some_operation()
|
||||
|
||||
# Verification
|
||||
if result != expected:
|
||||
print(f"❌ FAIL: {explanation}")
|
||||
return False
|
||||
|
||||
print(f"✓ {success_message}")
|
||||
print("\n✅ PASS: Test passed")
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ FAIL: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
return False
|
||||
```
|
||||
|
||||
### Adding to Test Suite
|
||||
|
||||
1. Write test function following template
|
||||
2. Add to `tests` list in `run_all_tests()`
|
||||
3. Update test count in documentation
|
||||
4. Run test suite to verify
|
||||
|
||||
### Best Practices
|
||||
|
||||
- **Clear naming**: `test_what_is_being_tested`
|
||||
- **Good documentation**: Explain purpose and expectations
|
||||
- **Proper cleanup**: Use in-memory DB or cleanup after test
|
||||
- **Informative output**: Print progress and results
|
||||
- **Error handling**: Catch and report exceptions
|
||||
- **Return boolean**: True = pass, False = fail
|
||||
|
||||
---
|
||||
|
||||
## Test Data Requirements
|
||||
|
||||
### Required Files
|
||||
|
||||
```
|
||||
demo_photos/
|
||||
├── 2019-11-22_0011.jpg # Primary test image (required)
|
||||
├── 2019-11-22_0012.jpg # Secondary test image (required)
|
||||
├── 2019-11-22_0015.jpg # Additional test image (optional)
|
||||
└── 2019-11-22_0017.jpg # Additional test image (optional)
|
||||
```
|
||||
|
||||
### Image Requirements
|
||||
|
||||
- **Format**: JPG, JPEG, PNG
|
||||
- **Size**: At least 640x480 pixels
|
||||
- **Content**: Should contain 1+ faces
|
||||
- **Quality**: Good lighting, clear faces
|
||||
- **Variety**: Different poses, ages, expressions
|
||||
|
||||
---
|
||||
|
||||
## Continuous Integration
|
||||
|
||||
### GitHub Actions Setup
|
||||
|
||||
```yaml
|
||||
name: DeepFace Tests
|
||||
|
||||
on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: '3.12'
|
||||
- run: pip install -r requirements.txt
|
||||
- run: python tests/test_deepface_integration.py
|
||||
```
|
||||
|
||||
### Pre-commit Hook
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# .git/hooks/pre-commit
|
||||
|
||||
echo "Running DeepFace tests..."
|
||||
python tests/test_deepface_integration.py
|
||||
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "Tests failed. Commit aborted."
|
||||
exit 1
|
||||
fi
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Performance Benchmarks
|
||||
|
||||
### Expected Performance (Reference Hardware)
|
||||
|
||||
**System:** Intel i7-10700K, 32GB RAM, RTX 3080
|
||||
|
||||
| Operation | Time (avg) | Notes |
|
||||
|--------------------------|-----------|--------------------------|
|
||||
| Face Detection (1 photo) | 2-3s | RetinaFace detector |
|
||||
| Face Detection (1 photo) | 0.5-1s | OpenCV detector |
|
||||
| Face Encoding | 0.5s | ArcFace model |
|
||||
| Similarity Search | 0.01-0.1s | Per face comparison |
|
||||
| Full Test Suite | 30-45s | All 10 tests |
|
||||
|
||||
**Note:** First run adds 2-5 minutes for model downloads
|
||||
|
||||
---
|
||||
|
||||
## Test Coverage Report
|
||||
|
||||
### Current Coverage
|
||||
|
||||
- **Core Functionality**: 100%
|
||||
- **Database Operations**: 100%
|
||||
- **Configuration**: 100%
|
||||
- **Error Handling**: 80%
|
||||
- **GUI Integration**: 0% (manual testing required)
|
||||
- **Overall**: ~85%
|
||||
|
||||
### Future Test Additions
|
||||
|
||||
- GUI integration tests
|
||||
- Load testing (1000+ photos)
|
||||
- Stress testing (concurrent operations)
|
||||
- Edge case testing (corrupted images, etc.)
|
||||
- Backward compatibility tests
|
||||
|
||||
---
|
||||
|
||||
## References
|
||||
|
||||
- [DeepFace Documentation](https://github.com/serengil/deepface)
|
||||
- [ArcFace Paper](https://arxiv.org/abs/1801.07698)
|
||||
- [Phase 6 Validation Checklist](../PHASE6_VALIDATION_CHECKLIST.md)
|
||||
- [DeepFace Migration Plan](../.notes/deepface_migration_plan.md)
|
||||
|
||||
---
|
||||
|
||||
**Last Updated:** October 16, 2025
|
||||
**Maintained By:** PunimTag Development Team
|
||||
**Questions?** Check troubleshooting or raise an issue
|
||||
|
||||
320
tests/conftest.py
Normal file
320
tests/conftest.py
Normal file
@ -0,0 +1,320 @@
|
||||
"""Test configuration and fixtures for PunimTag backend tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from sqlalchemy import create_engine
|
||||
from sqlalchemy.orm import sessionmaker, Session
|
||||
|
||||
from backend.app import create_app
|
||||
from backend.db.base import Base
|
||||
from backend.db.session import get_db
|
||||
|
||||
# Test database URL - use environment variable or default
|
||||
TEST_DATABASE_URL = os.getenv(
|
||||
"DATABASE_URL",
|
||||
"postgresql+psycopg2://postgres:postgres@localhost:5432/punimtag_test"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def test_db_engine():
|
||||
"""Create test database engine and initialize schema."""
|
||||
engine = create_engine(TEST_DATABASE_URL, future=True)
|
||||
|
||||
# Create all tables
|
||||
Base.metadata.create_all(bind=engine)
|
||||
|
||||
yield engine
|
||||
|
||||
# Cleanup: drop all tables after tests
|
||||
Base.metadata.drop_all(bind=engine)
|
||||
engine.dispose()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_db_session(test_db_engine) -> Generator[Session, None, None]:
|
||||
"""Create a test database session with transaction rollback.
|
||||
|
||||
Each test gets a fresh session that rolls back after the test completes.
|
||||
"""
|
||||
connection = test_db_engine.connect()
|
||||
transaction = connection.begin()
|
||||
session = sessionmaker(bind=connection, autoflush=False, autocommit=False)()
|
||||
|
||||
yield session
|
||||
|
||||
# Rollback transaction and close connection
|
||||
session.close()
|
||||
transaction.rollback()
|
||||
connection.close()
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def test_client(test_db_session: Session) -> Generator[TestClient, None, None]:
|
||||
"""Create a test client with test database dependency override."""
|
||||
app = create_app()
|
||||
|
||||
def override_get_db() -> Generator[Session, None, None]:
|
||||
yield test_db_session
|
||||
|
||||
app.dependency_overrides[get_db] = override_get_db
|
||||
|
||||
with TestClient(app) as client:
|
||||
yield client
|
||||
|
||||
# Clear dependency overrides after test
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def admin_user(test_db_session: Session):
|
||||
"""Create an admin user for testing."""
|
||||
from backend.db.models import User
|
||||
from backend.utils.password import hash_password
|
||||
from backend.constants.roles import DEFAULT_ADMIN_ROLE
|
||||
|
||||
user = User(
|
||||
username="testadmin",
|
||||
email="testadmin@example.com",
|
||||
password_hash=hash_password("testpass"),
|
||||
is_admin=True,
|
||||
is_active=True,
|
||||
role=DEFAULT_ADMIN_ROLE,
|
||||
)
|
||||
test_db_session.add(user)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(user)
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def regular_user(test_db_session: Session):
|
||||
"""Create a regular user for testing."""
|
||||
from backend.db.models import User
|
||||
from backend.utils.password import hash_password
|
||||
from backend.constants.roles import DEFAULT_USER_ROLE
|
||||
|
||||
user = User(
|
||||
username="testuser",
|
||||
email="testuser@example.com",
|
||||
password_hash=hash_password("testpass"),
|
||||
is_admin=False,
|
||||
is_active=True,
|
||||
role=DEFAULT_USER_ROLE,
|
||||
)
|
||||
test_db_session.add(user)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(user)
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def inactive_user(test_db_session: Session):
|
||||
"""Create an inactive user for testing."""
|
||||
from backend.db.models import User
|
||||
from backend.utils.password import hash_password
|
||||
from backend.constants.roles import DEFAULT_USER_ROLE
|
||||
|
||||
user = User(
|
||||
username="inactiveuser",
|
||||
email="inactiveuser@example.com",
|
||||
password_hash=hash_password("testpass"),
|
||||
is_admin=False,
|
||||
is_active=False,
|
||||
role=DEFAULT_USER_ROLE,
|
||||
)
|
||||
test_db_session.add(user)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(user)
|
||||
return user
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def auth_token(test_client: TestClient, admin_user) -> str:
|
||||
"""Get authentication token for admin user."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testadmin", "password": "testpass"}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
return response.json()["access_token"]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def regular_auth_token(test_client: TestClient, regular_user) -> str:
|
||||
"""Get authentication token for regular user."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testuser", "password": "testpass"}
|
||||
)
|
||||
assert response.status_code == 200
|
||||
return response.json()["access_token"]
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def auth_headers(auth_token: str) -> dict[str, str]:
|
||||
"""Get authentication headers for admin user."""
|
||||
return {"Authorization": f"Bearer {auth_token}"}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def regular_auth_headers(regular_auth_token: str) -> dict[str, str]:
|
||||
"""Get authentication headers for regular user."""
|
||||
return {"Authorization": f"Bearer {regular_auth_token}"}
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_photo(test_db_session: Session):
|
||||
"""Create a test photo."""
|
||||
from backend.db.models import Photo
|
||||
from datetime import date
|
||||
|
||||
photo = Photo(
|
||||
path="/test/path/photo1.jpg",
|
||||
filename="photo1.jpg",
|
||||
date_taken=date(2024, 1, 15),
|
||||
processed=True,
|
||||
media_type="image",
|
||||
)
|
||||
test_db_session.add(photo)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(photo)
|
||||
return photo
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_photo_2(test_db_session: Session):
|
||||
"""Create a second test photo."""
|
||||
from backend.db.models import Photo
|
||||
from datetime import date
|
||||
|
||||
photo = Photo(
|
||||
path="/test/path/photo2.jpg",
|
||||
filename="photo2.jpg",
|
||||
date_taken=date(2024, 1, 16),
|
||||
processed=True,
|
||||
media_type="image",
|
||||
)
|
||||
test_db_session.add(photo)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(photo)
|
||||
return photo
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_face(test_db_session: Session, test_photo):
|
||||
"""Create a test face (unidentified)."""
|
||||
from backend.db.models import Face
|
||||
import numpy as np
|
||||
|
||||
# Create a dummy encoding (128-dimensional vector like DeepFace)
|
||||
encoding = np.random.rand(128).astype(np.float32).tobytes()
|
||||
|
||||
face = Face(
|
||||
photo_id=test_photo.id,
|
||||
person_id=None, # Unidentified
|
||||
encoding=encoding,
|
||||
location='{"x": 100, "y": 100, "w": 200, "h": 200}',
|
||||
quality_score=0.85,
|
||||
face_confidence=0.95,
|
||||
detector_backend="retinaface",
|
||||
model_name="VGG-Face",
|
||||
pose_mode="frontal",
|
||||
excluded=False,
|
||||
)
|
||||
test_db_session.add(face)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(face)
|
||||
return face
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_face_2(test_db_session: Session, test_photo_2):
|
||||
"""Create a second test face (unidentified)."""
|
||||
from backend.db.models import Face
|
||||
import numpy as np
|
||||
|
||||
# Create a similar encoding (for similarity testing)
|
||||
encoding = np.random.rand(128).astype(np.float32).tobytes()
|
||||
|
||||
face = Face(
|
||||
photo_id=test_photo_2.id,
|
||||
person_id=None, # Unidentified
|
||||
encoding=encoding,
|
||||
location='{"x": 150, "y": 150, "w": 200, "h": 200}',
|
||||
quality_score=0.80,
|
||||
face_confidence=0.90,
|
||||
detector_backend="retinaface",
|
||||
model_name="VGG-Face",
|
||||
pose_mode="frontal",
|
||||
excluded=False,
|
||||
)
|
||||
test_db_session.add(face)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(face)
|
||||
return face
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def test_person(test_db_session: Session):
|
||||
"""Create a test person."""
|
||||
from backend.db.models import Person
|
||||
from datetime import date, datetime
|
||||
|
||||
person = Person(
|
||||
first_name="John",
|
||||
last_name="Doe",
|
||||
middle_name="Middle",
|
||||
maiden_name=None,
|
||||
date_of_birth=date(1990, 1, 1),
|
||||
created_date=datetime.utcnow(),
|
||||
)
|
||||
test_db_session.add(person)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(person)
|
||||
return person
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def identified_face(test_db_session: Session, test_photo, test_person):
|
||||
"""Create an identified face (already linked to a person)."""
|
||||
from backend.db.models import Face, PersonEncoding
|
||||
import numpy as np
|
||||
|
||||
# Create encoding
|
||||
encoding = np.random.rand(128).astype(np.float32).tobytes()
|
||||
|
||||
face = Face(
|
||||
photo_id=test_photo.id,
|
||||
person_id=test_person.id,
|
||||
encoding=encoding,
|
||||
location='{"x": 200, "y": 200, "w": 200, "h": 200}',
|
||||
quality_score=0.90,
|
||||
face_confidence=0.98,
|
||||
detector_backend="retinaface",
|
||||
model_name="VGG-Face",
|
||||
pose_mode="frontal",
|
||||
excluded=False,
|
||||
)
|
||||
test_db_session.add(face)
|
||||
test_db_session.flush()
|
||||
|
||||
# Create person encoding
|
||||
person_encoding = PersonEncoding(
|
||||
person_id=test_person.id,
|
||||
face_id=face.id,
|
||||
encoding=encoding,
|
||||
quality_score=0.90,
|
||||
detector_backend="retinaface",
|
||||
model_name="VGG-Face",
|
||||
)
|
||||
test_db_session.add(person_encoding)
|
||||
test_db_session.commit()
|
||||
test_db_session.refresh(face)
|
||||
return face
|
||||
|
||||
@ -1,64 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Debug face detection to see what's happening
|
||||
"""
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
# Suppress TensorFlow warnings
|
||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
||||
|
||||
def debug_face_detection():
|
||||
from deepface import DeepFace
|
||||
|
||||
# Test with the reference image
|
||||
test_image = "demo_photos/testdeepface/2019-11-22_0011.jpg"
|
||||
|
||||
if not os.path.exists(test_image):
|
||||
print(f"Test image not found: {test_image}")
|
||||
return
|
||||
|
||||
print(f"Testing face detection on: {test_image}")
|
||||
|
||||
# Load and display image info
|
||||
img = Image.open(test_image)
|
||||
print(f"Image size: {img.size}")
|
||||
|
||||
# Try different detection methods
|
||||
detectors = ['opencv', 'mtcnn', 'retinaface', 'ssd']
|
||||
|
||||
for detector in detectors:
|
||||
print(f"\n--- Testing {detector} detector ---")
|
||||
try:
|
||||
# Try extract_faces first
|
||||
faces = DeepFace.extract_faces(
|
||||
img_path=test_image,
|
||||
detector_backend=detector,
|
||||
enforce_detection=False,
|
||||
align=True
|
||||
)
|
||||
print(f"extract_faces found {len(faces)} faces")
|
||||
|
||||
# Try represent
|
||||
results = DeepFace.represent(
|
||||
img_path=test_image,
|
||||
model_name='ArcFace',
|
||||
detector_backend=detector,
|
||||
enforce_detection=False,
|
||||
align=True
|
||||
)
|
||||
print(f"represent found {len(results)} results")
|
||||
|
||||
if results:
|
||||
for i, result in enumerate(results):
|
||||
region = result.get('region', {})
|
||||
print(f" Result {i}: region={region}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"Error with {detector}: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
debug_face_detection()
|
||||
511
tests/test_api_auth.py
Normal file
511
tests/test_api_auth.py
Normal file
@ -0,0 +1,511 @@
|
||||
"""High priority authentication API tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import datetime, timedelta
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
|
||||
class TestLogin:
|
||||
"""Test login endpoint."""
|
||||
|
||||
def test_login_success_with_valid_credentials(
|
||||
self, test_client: TestClient, admin_user
|
||||
):
|
||||
"""Verify successful login with valid username/password."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testadmin", "password": "testpass"}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "access_token" in data
|
||||
assert "refresh_token" in data
|
||||
assert "password_change_required" in data
|
||||
assert isinstance(data["access_token"], str)
|
||||
assert isinstance(data["refresh_token"], str)
|
||||
assert len(data["access_token"]) > 0
|
||||
assert len(data["refresh_token"]) > 0
|
||||
|
||||
def test_login_failure_with_invalid_credentials(
|
||||
self, test_client: TestClient, admin_user
|
||||
):
|
||||
"""Verify 401 with invalid credentials."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testadmin", "password": "wrongpassword"}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
assert "Incorrect username or password" in data["detail"]
|
||||
|
||||
def test_login_with_inactive_user(
|
||||
self, test_client: TestClient, inactive_user
|
||||
):
|
||||
"""Verify 401 when user account is inactive."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "inactiveuser", "password": "testpass"}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
assert "Account is inactive" in data["detail"]
|
||||
|
||||
def test_login_without_password_hash(
|
||||
self, test_client: TestClient, test_db_session: Session
|
||||
):
|
||||
"""Verify error when password_hash is missing."""
|
||||
from backend.db.models import User
|
||||
from backend.constants.roles import DEFAULT_USER_ROLE
|
||||
|
||||
# Create user without password_hash
|
||||
user = User(
|
||||
username="nopassword",
|
||||
email="nopassword@example.com",
|
||||
password_hash=None, # No password hash
|
||||
is_admin=False,
|
||||
is_active=True,
|
||||
role=DEFAULT_USER_ROLE,
|
||||
)
|
||||
test_db_session.add(user)
|
||||
test_db_session.commit()
|
||||
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "nopassword", "password": "anypassword"}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
assert "Password not set" in data["detail"]
|
||||
|
||||
def test_login_fallback_to_hardcoded_admin(
|
||||
self, test_client: TestClient
|
||||
):
|
||||
"""Verify fallback to admin/admin works when user not in database."""
|
||||
# Use default hardcoded admin credentials
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "admin", "password": "admin"}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "access_token" in data
|
||||
assert "refresh_token" in data
|
||||
assert data["password_change_required"] is False
|
||||
|
||||
def test_login_updates_last_login(
|
||||
self, test_client: TestClient, test_db_session: Session, admin_user
|
||||
):
|
||||
"""Verify last_login timestamp is updated on successful login."""
|
||||
initial_last_login = admin_user.last_login
|
||||
|
||||
# Wait a moment to ensure timestamp difference
|
||||
import time
|
||||
time.sleep(0.1)
|
||||
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testadmin", "password": "testpass"}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Refresh user from database
|
||||
test_db_session.refresh(admin_user)
|
||||
|
||||
# Verify last_login was updated
|
||||
assert admin_user.last_login is not None
|
||||
if initial_last_login:
|
||||
assert admin_user.last_login > initial_last_login
|
||||
|
||||
def test_login_missing_username(self, test_client: TestClient):
|
||||
"""Verify 422 when username is missing."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"password": "testpass"}
|
||||
)
|
||||
|
||||
assert response.status_code == 422
|
||||
|
||||
def test_login_missing_password(self, test_client: TestClient):
|
||||
"""Verify 422 when password is missing."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testadmin"}
|
||||
)
|
||||
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
class TestTokenRefresh:
|
||||
"""Test token refresh endpoint."""
|
||||
|
||||
def test_refresh_token_success(
|
||||
self, test_client: TestClient, auth_token: str
|
||||
):
|
||||
"""Verify successful token refresh."""
|
||||
# Get refresh token from login
|
||||
login_response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testadmin", "password": "testpass"}
|
||||
)
|
||||
refresh_token = login_response.json()["refresh_token"]
|
||||
|
||||
# Use refresh token to get new access token
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/refresh",
|
||||
json={"refresh_token": refresh_token}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "access_token" in data
|
||||
assert "refresh_token" in data
|
||||
assert data["access_token"] != auth_token # Should be different token
|
||||
|
||||
def test_refresh_token_with_invalid_token(self, test_client: TestClient):
|
||||
"""Verify 401 with invalid refresh token."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/refresh",
|
||||
json={"refresh_token": "invalid_token"}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
assert "Invalid refresh token" in data["detail"]
|
||||
|
||||
def test_refresh_token_with_access_token(
|
||||
self, test_client: TestClient, auth_token: str
|
||||
):
|
||||
"""Verify 401 when using access token instead of refresh token."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/refresh",
|
||||
json={"refresh_token": auth_token} # Using access token
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
assert "Invalid token type" in data["detail"]
|
||||
|
||||
def test_refresh_token_expired(self, test_client: TestClient):
|
||||
"""Verify 401 with expired refresh token."""
|
||||
# Create an expired token manually (this is a simplified test)
|
||||
# In practice, we'd need to manipulate JWT expiration
|
||||
# For now, we test with an invalid token format
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/refresh",
|
||||
json={"refresh_token": "expired.token.here"}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
def test_refresh_token_missing_token(self, test_client: TestClient):
|
||||
"""Verify 422 when refresh_token is missing."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/refresh",
|
||||
json={}
|
||||
)
|
||||
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
class TestCurrentUser:
|
||||
"""Test current user info endpoint."""
|
||||
|
||||
def test_get_current_user_info_authenticated(
|
||||
self, test_client: TestClient, auth_headers: dict, admin_user
|
||||
):
|
||||
"""Verify user info retrieval with valid token."""
|
||||
response = test_client.get(
|
||||
"/api/v1/auth/me",
|
||||
headers=auth_headers
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["username"] == "testadmin"
|
||||
assert data["is_admin"] is True
|
||||
assert "role" in data
|
||||
assert "permissions" in data
|
||||
|
||||
def test_get_current_user_info_unauthenticated(
|
||||
self, test_client: TestClient
|
||||
):
|
||||
"""Verify 401 without token."""
|
||||
response = test_client.get("/api/v1/auth/me")
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
def test_get_current_user_info_bootstrap_admin(
|
||||
self, test_client: TestClient, test_db_session: Session
|
||||
):
|
||||
"""Verify admin bootstrap when no admins exist."""
|
||||
# Ensure no admin users exist
|
||||
from backend.db.models import User
|
||||
test_db_session.query(User).filter(User.is_admin == True).delete()
|
||||
test_db_session.commit()
|
||||
|
||||
# Login with hardcoded admin
|
||||
login_response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "admin", "password": "admin"}
|
||||
)
|
||||
token = login_response.json()["access_token"]
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
|
||||
# Get user info - should bootstrap as admin
|
||||
response = test_client.get(
|
||||
"/api/v1/auth/me",
|
||||
headers=headers
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["username"] == "admin"
|
||||
assert data["is_admin"] is True
|
||||
|
||||
def test_get_current_user_info_role_permissions(
|
||||
self, test_client: TestClient, auth_headers: dict, admin_user
|
||||
):
|
||||
"""Verify role and permissions are returned."""
|
||||
response = test_client.get(
|
||||
"/api/v1/auth/me",
|
||||
headers=auth_headers
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "role" in data
|
||||
assert "permissions" in data
|
||||
assert isinstance(data["permissions"], dict)
|
||||
|
||||
|
||||
class TestPasswordChange:
|
||||
"""Test password change endpoint."""
|
||||
|
||||
def test_change_password_success(
|
||||
self, test_client: TestClient, auth_headers: dict, admin_user
|
||||
):
|
||||
"""Verify successful password change."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/change-password",
|
||||
headers=auth_headers,
|
||||
json={
|
||||
"current_password": "testpass",
|
||||
"new_password": "newtestpass123"
|
||||
}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["success"] is True
|
||||
assert "Password changed successfully" in data["message"]
|
||||
|
||||
# Verify new password works
|
||||
login_response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "testadmin", "password": "newtestpass123"}
|
||||
)
|
||||
assert login_response.status_code == 200
|
||||
|
||||
def test_change_password_with_wrong_current_password(
|
||||
self, test_client: TestClient, auth_headers: dict, admin_user
|
||||
):
|
||||
"""Verify 401 with incorrect current password."""
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/change-password",
|
||||
headers=auth_headers,
|
||||
json={
|
||||
"current_password": "wrongpassword",
|
||||
"new_password": "newtestpass123"
|
||||
}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
assert "Current password is incorrect" in data["detail"]
|
||||
|
||||
def test_change_password_clears_password_change_required_flag(
|
||||
self, test_client: TestClient, test_db_session: Session
|
||||
):
|
||||
"""Verify flag is cleared after password change."""
|
||||
from backend.db.models import User
|
||||
from backend.utils.password import hash_password
|
||||
from backend.constants.roles import DEFAULT_USER_ROLE
|
||||
|
||||
# Create user with password_change_required flag
|
||||
user = User(
|
||||
username="changepassuser",
|
||||
email="changepass@example.com",
|
||||
password_hash=hash_password("oldpass"),
|
||||
is_admin=False,
|
||||
is_active=True,
|
||||
password_change_required=True,
|
||||
role=DEFAULT_USER_ROLE,
|
||||
)
|
||||
test_db_session.add(user)
|
||||
test_db_session.commit()
|
||||
|
||||
# Login
|
||||
login_response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "changepassuser", "password": "oldpass"}
|
||||
)
|
||||
token = login_response.json()["access_token"]
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
|
||||
# Change password
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/change-password",
|
||||
headers=headers,
|
||||
json={
|
||||
"current_password": "oldpass",
|
||||
"new_password": "newpass123"
|
||||
}
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify flag is cleared
|
||||
test_db_session.refresh(user)
|
||||
assert user.password_change_required is False
|
||||
|
||||
def test_change_password_user_not_found(
|
||||
self, test_client: TestClient, test_db_session: Session
|
||||
):
|
||||
"""Verify 404 when user doesn't exist in database."""
|
||||
# Create a token for a user that doesn't exist in main DB
|
||||
# This is a bit tricky - we'll use the hardcoded admin
|
||||
login_response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "admin", "password": "admin"}
|
||||
)
|
||||
token = login_response.json()["access_token"]
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
|
||||
# Try to change password - should work for hardcoded admin
|
||||
# But if we delete the user from DB, it should fail
|
||||
from backend.db.models import User
|
||||
user = test_db_session.query(User).filter(User.username == "admin").first()
|
||||
if user:
|
||||
test_db_session.delete(user)
|
||||
test_db_session.commit()
|
||||
|
||||
# Now try to change password
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/change-password",
|
||||
headers=headers,
|
||||
json={
|
||||
"current_password": "admin",
|
||||
"new_password": "newpass123"
|
||||
}
|
||||
)
|
||||
|
||||
# Should fail because user not in database
|
||||
assert response.status_code == 404
|
||||
data = response.json()
|
||||
assert "User not found" in data["detail"]
|
||||
|
||||
def test_change_password_missing_fields(self, test_client: TestClient, auth_headers: dict):
|
||||
"""Verify 422 when required fields are missing."""
|
||||
# Missing current_password
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/change-password",
|
||||
headers=auth_headers,
|
||||
json={"new_password": "newpass123"}
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
# Missing new_password
|
||||
response = test_client.post(
|
||||
"/api/v1/auth/change-password",
|
||||
headers=auth_headers,
|
||||
json={"current_password": "testpass"}
|
||||
)
|
||||
assert response.status_code == 422
|
||||
|
||||
|
||||
class TestAuthenticationMiddleware:
|
||||
"""Test authentication middleware and token validation."""
|
||||
|
||||
def test_get_current_user_without_token(self, test_client: TestClient):
|
||||
"""Verify 401 without Authorization header."""
|
||||
# Try to access protected endpoint
|
||||
response = test_client.get("/api/v1/photos")
|
||||
|
||||
assert response.status_code == 401
|
||||
data = response.json()
|
||||
assert "detail" in data
|
||||
|
||||
def test_get_current_user_with_expired_token(self, test_client: TestClient):
|
||||
"""Verify 401 with expired JWT."""
|
||||
# Create an obviously invalid/expired token
|
||||
expired_token = "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJzdWIiOiJhZG1pbiIsImV4cCI6MTYwOTQ1NjgwMH0.invalid"
|
||||
|
||||
response = test_client.get(
|
||||
"/api/v1/photos",
|
||||
headers={"Authorization": f"Bearer {expired_token}"}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
def test_get_current_user_with_invalid_token_format(
|
||||
self, test_client: TestClient
|
||||
):
|
||||
"""Verify 401 with malformed token."""
|
||||
response = test_client.get(
|
||||
"/api/v1/photos",
|
||||
headers={"Authorization": "Bearer not.a.valid.jwt.token"}
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
def test_get_current_user_with_id_creates_user(
|
||||
self, test_client: TestClient, test_db_session: Session
|
||||
):
|
||||
"""Verify user creation in bootstrap scenario."""
|
||||
from backend.db.models import User
|
||||
|
||||
# Delete user if exists
|
||||
test_db_session.query(User).filter(User.username == "bootstrapuser").delete()
|
||||
test_db_session.commit()
|
||||
|
||||
# Login with hardcoded admin to get token
|
||||
login_response = test_client.post(
|
||||
"/api/v1/auth/login",
|
||||
json={"username": "admin", "password": "admin"}
|
||||
)
|
||||
token = login_response.json()["access_token"]
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
|
||||
# Access endpoint that uses get_current_user_with_id
|
||||
# This should create the user in the database
|
||||
# Note: This depends on which endpoints use get_current_user_with_id
|
||||
# For now, we'll verify the user can be created via /auth/me
|
||||
response = test_client.get(
|
||||
"/api/v1/auth/me",
|
||||
headers=headers
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify user exists in database (if bootstrap happened)
|
||||
# This is a simplified test - actual bootstrap logic may vary
|
||||
|
||||
703
tests/test_api_faces.py
Normal file
703
tests/test_api_faces.py
Normal file
@ -0,0 +1,703 @@
|
||||
"""High priority face identification API tests."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from sqlalchemy.orm import Session
|
||||
|
||||
|
||||
class TestIdentifyFace:
|
||||
"""Test face identification endpoint."""
|
||||
|
||||
def test_identify_face_with_existing_person(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_person,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify identification with existing person."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={"person_id": test_person.id},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["person_id"] == test_person.id
|
||||
assert data["created_person"] is False
|
||||
assert test_face.id in data["identified_face_ids"]
|
||||
|
||||
# Verify face is linked to person
|
||||
test_db_session.refresh(test_face)
|
||||
assert test_face.person_id == test_person.id
|
||||
assert test_face.identified_by_user_id is not None
|
||||
|
||||
# Verify person_encoding was created
|
||||
from backend.db.models import PersonEncoding
|
||||
encoding = test_db_session.query(PersonEncoding).filter(
|
||||
PersonEncoding.face_id == test_face.id
|
||||
).first()
|
||||
assert encoding is not None
|
||||
assert encoding.person_id == test_person.id
|
||||
|
||||
def test_identify_face_create_new_person(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify person creation during identification."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={
|
||||
"first_name": "Jane",
|
||||
"last_name": "Smith",
|
||||
"middle_name": "Middle",
|
||||
"date_of_birth": "1995-05-15",
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["created_person"] is True
|
||||
assert data["person_id"] is not None
|
||||
assert test_face.id in data["identified_face_ids"]
|
||||
|
||||
# Verify person was created
|
||||
from backend.db.models import Person
|
||||
person = test_db_session.query(Person).filter(
|
||||
Person.id == data["person_id"]
|
||||
).first()
|
||||
assert person is not None
|
||||
assert person.first_name == "Jane"
|
||||
assert person.last_name == "Smith"
|
||||
assert person.middle_name == "Middle"
|
||||
|
||||
# Verify face is linked
|
||||
test_db_session.refresh(test_face)
|
||||
assert test_face.person_id == person.id
|
||||
|
||||
def test_identify_face_with_additional_faces(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_face_2,
|
||||
test_person,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify batch identification with additional faces."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={
|
||||
"person_id": test_person.id,
|
||||
"additional_face_ids": [test_face_2.id],
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert len(data["identified_face_ids"]) == 2
|
||||
assert test_face.id in data["identified_face_ids"]
|
||||
assert test_face_2.id in data["identified_face_ids"]
|
||||
|
||||
# Verify both faces are linked
|
||||
test_db_session.refresh(test_face)
|
||||
test_db_session.refresh(test_face_2)
|
||||
assert test_face.person_id == test_person.id
|
||||
assert test_face_2.person_id == test_person.id
|
||||
|
||||
def test_identify_face_face_not_found(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_person,
|
||||
):
|
||||
"""Verify 404 for non-existent face."""
|
||||
response = test_client.post(
|
||||
"/api/v1/faces/99999/identify",
|
||||
headers=auth_headers,
|
||||
json={"person_id": test_person.id},
|
||||
)
|
||||
|
||||
assert response.status_code == 404
|
||||
data = response.json()
|
||||
assert "not found" in data["detail"].lower()
|
||||
|
||||
def test_identify_face_person_not_found(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
):
|
||||
"""Verify 400 when person_id is invalid."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={"person_id": 99999},
|
||||
)
|
||||
|
||||
assert response.status_code == 400
|
||||
data = response.json()
|
||||
assert "person_id not found" in data["detail"]
|
||||
|
||||
def test_identify_face_tracks_user_id(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_person,
|
||||
admin_user,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify user tracking for face identification."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={"person_id": test_person.id},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify identified_by_user_id is set
|
||||
test_db_session.refresh(test_face)
|
||||
assert test_face.identified_by_user_id == admin_user.id
|
||||
|
||||
def test_identify_face_creates_person_encodings(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_person,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify person_encodings are created for identified faces."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={"person_id": test_person.id},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify person_encoding exists
|
||||
from backend.db.models import PersonEncoding
|
||||
encoding = test_db_session.query(PersonEncoding).filter(
|
||||
PersonEncoding.face_id == test_face.id,
|
||||
PersonEncoding.person_id == test_person.id,
|
||||
).first()
|
||||
assert encoding is not None
|
||||
assert encoding.encoding == test_face.encoding
|
||||
assert encoding.quality_score == test_face.quality_score
|
||||
assert encoding.detector_backend == test_face.detector_backend
|
||||
assert encoding.model_name == test_face.model_name
|
||||
|
||||
def test_identify_face_requires_name_for_new_person(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
):
|
||||
"""Verify validation when creating new person without required fields."""
|
||||
# Missing first_name
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={"last_name": "Smith"},
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert "first_name and last_name are required" in response.json()["detail"]
|
||||
|
||||
# Missing last_name
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={"first_name": "Jane"},
|
||||
)
|
||||
assert response.status_code == 400
|
||||
assert "first_name and last_name are required" in response.json()["detail"]
|
||||
|
||||
def test_identify_face_unauthenticated(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_person,
|
||||
):
|
||||
"""Verify 401 when not authenticated."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
json={"person_id": test_person.id},
|
||||
)
|
||||
|
||||
assert response.status_code == 401
|
||||
|
||||
|
||||
class TestGetSimilarFaces:
|
||||
"""Test similar faces endpoint."""
|
||||
|
||||
def test_get_similar_faces_success(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_face_2,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify similar faces retrieval."""
|
||||
response = test_client.get(
|
||||
f"/api/v1/faces/{test_face.id}/similar"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["base_face_id"] == test_face.id
|
||||
assert "items" in data
|
||||
assert isinstance(data["items"], list)
|
||||
|
||||
def test_get_similar_faces_include_excluded(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify include_excluded parameter."""
|
||||
# Create an excluded face
|
||||
from backend.db.models import Face, Photo
|
||||
import numpy as np
|
||||
|
||||
photo = test_db_session.query(Photo).filter(
|
||||
Photo.id == test_face.photo_id
|
||||
).first()
|
||||
|
||||
excluded_face = Face(
|
||||
photo_id=photo.id,
|
||||
person_id=None,
|
||||
encoding=np.random.rand(128).astype(np.float32).tobytes(),
|
||||
location='{"x": 50, "y": 50, "w": 100, "h": 100}',
|
||||
quality_score=0.70,
|
||||
face_confidence=0.85,
|
||||
detector_backend="retinaface",
|
||||
model_name="VGG-Face",
|
||||
excluded=True,
|
||||
)
|
||||
test_db_session.add(excluded_face)
|
||||
test_db_session.commit()
|
||||
|
||||
# Test without include_excluded (should exclude excluded faces)
|
||||
response = test_client.get(
|
||||
f"/api/v1/faces/{test_face.id}/similar?include_excluded=false"
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
# Test with include_excluded=true
|
||||
response = test_client.get(
|
||||
f"/api/v1/faces/{test_face.id}/similar?include_excluded=true"
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
def test_get_similar_faces_face_not_found(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
):
|
||||
"""Verify 404 for non-existent face."""
|
||||
response = test_client.get("/api/v1/faces/99999/similar")
|
||||
|
||||
assert response.status_code == 404
|
||||
data = response.json()
|
||||
assert "not found" in data["detail"].lower()
|
||||
|
||||
def test_get_similar_faces_returns_similarity_scores(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
):
|
||||
"""Verify similarity scores in response."""
|
||||
response = test_client.get(
|
||||
f"/api/v1/faces/{test_face.id}/similar"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
# Check response structure
|
||||
if len(data["items"]) > 0:
|
||||
item = data["items"][0]
|
||||
assert "id" in item
|
||||
assert "photo_id" in item
|
||||
assert "similarity" in item
|
||||
assert "quality_score" in item
|
||||
assert isinstance(item["similarity"], (int, float))
|
||||
assert 0 <= item["similarity"] <= 1
|
||||
|
||||
|
||||
class TestBatchSimilarity:
|
||||
"""Test batch similarity endpoint."""
|
||||
|
||||
def test_batch_similarity_success(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_face_2,
|
||||
):
|
||||
"""Verify batch similarity calculation."""
|
||||
response = test_client.post(
|
||||
"/api/v1/faces/batch-similarity",
|
||||
json={"face_ids": [test_face.id, test_face_2.id]},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "pairs" in data
|
||||
assert isinstance(data["pairs"], list)
|
||||
|
||||
def test_batch_similarity_with_min_confidence(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_face_2,
|
||||
):
|
||||
"""Verify min_confidence filter."""
|
||||
response = test_client.post(
|
||||
"/api/v1/faces/batch-similarity",
|
||||
json={
|
||||
"face_ids": [test_face.id, test_face_2.id],
|
||||
"min_confidence": 0.5,
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "pairs" in data
|
||||
|
||||
# Verify all pairs meet min_confidence threshold
|
||||
for pair in data["pairs"]:
|
||||
assert pair["confidence_pct"] >= 50 # 0.5 * 100
|
||||
|
||||
def test_batch_similarity_empty_list(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
):
|
||||
"""Verify handling of empty face_ids list."""
|
||||
response = test_client.post(
|
||||
"/api/v1/faces/batch-similarity",
|
||||
json={"face_ids": []},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["pairs"] == []
|
||||
|
||||
def test_batch_similarity_invalid_face_ids(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
):
|
||||
"""Verify error handling for invalid face IDs."""
|
||||
response = test_client.post(
|
||||
"/api/v1/faces/batch-similarity",
|
||||
json={"face_ids": [test_face.id, 99999]},
|
||||
)
|
||||
|
||||
# Should still return 200, but may have fewer pairs
|
||||
# (implementation dependent - may filter out invalid IDs)
|
||||
assert response.status_code in [200, 400, 404]
|
||||
|
||||
|
||||
class TestGetUnidentifiedFaces:
|
||||
"""Test unidentified faces listing endpoint."""
|
||||
|
||||
def test_get_unidentified_faces_success(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_face_2,
|
||||
):
|
||||
"""Verify unidentified faces list retrieval."""
|
||||
response = test_client.get("/api/v1/faces/unidentified")
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "items" in data
|
||||
assert "page" in data
|
||||
assert "page_size" in data
|
||||
assert "total" in data
|
||||
assert isinstance(data["items"], list)
|
||||
assert data["total"] >= 2 # At least our two test faces
|
||||
|
||||
def test_get_unidentified_faces_with_pagination(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_face_2,
|
||||
):
|
||||
"""Verify pagination works."""
|
||||
response = test_client.get(
|
||||
"/api/v1/faces/unidentified?page=1&page_size=1"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["page"] == 1
|
||||
assert data["page_size"] == 1
|
||||
assert len(data["items"]) <= 1
|
||||
|
||||
def test_get_unidentified_faces_with_quality_filter(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
):
|
||||
"""Verify quality filter."""
|
||||
# Test with high quality threshold
|
||||
response = test_client.get(
|
||||
"/api/v1/faces/unidentified?min_quality=0.9"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
|
||||
# Verify all returned faces meet quality threshold
|
||||
for item in data["items"]:
|
||||
assert item["quality_score"] >= 0.9
|
||||
|
||||
def test_get_unidentified_faces_excludes_identified(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
test_person,
|
||||
auth_headers: dict,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify identified faces are excluded from results."""
|
||||
# First, verify face is in unidentified list
|
||||
response = test_client.get("/api/v1/faces/unidentified")
|
||||
assert response.status_code == 200
|
||||
initial_count = response.json()["total"]
|
||||
|
||||
# Identify the face
|
||||
identify_response = test_client.post(
|
||||
f"/api/v1/faces/{test_face.id}/identify",
|
||||
headers=auth_headers,
|
||||
json={"person_id": test_person.id},
|
||||
)
|
||||
assert identify_response.status_code == 200
|
||||
|
||||
# Verify face is no longer in unidentified list
|
||||
response = test_client.get("/api/v1/faces/unidentified")
|
||||
assert response.status_code == 200
|
||||
new_count = response.json()["total"]
|
||||
assert new_count < initial_count
|
||||
|
||||
def test_get_unidentified_faces_with_date_filters(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
):
|
||||
"""Verify date filtering."""
|
||||
response = test_client.get(
|
||||
"/api/v1/faces/unidentified?date_taken_from=2024-01-01&date_taken_to=2024-12-31"
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "items" in data
|
||||
|
||||
def test_get_unidentified_faces_invalid_date_format(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
):
|
||||
"""Verify invalid date format handling."""
|
||||
response = test_client.get(
|
||||
"/api/v1/faces/unidentified?date_taken_from=invalid-date"
|
||||
)
|
||||
|
||||
# Should handle gracefully (may return 200 with no results or 400)
|
||||
assert response.status_code in [200, 400]
|
||||
|
||||
|
||||
class TestAutoMatch:
|
||||
"""Test auto-match functionality."""
|
||||
|
||||
def test_auto_match_faces_success(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
identified_face,
|
||||
test_person,
|
||||
):
|
||||
"""Verify auto-match process."""
|
||||
response = test_client.post(
|
||||
"/api/v1/faces/auto-match",
|
||||
json={"tolerance": 0.6},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert "people" in data
|
||||
assert "total_people" in data
|
||||
assert "total_matches" in data
|
||||
assert isinstance(data["people"], list)
|
||||
|
||||
def test_auto_match_faces_with_tolerance(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
identified_face,
|
||||
test_person,
|
||||
):
|
||||
"""Verify tolerance parameter affects results."""
|
||||
# Test with high tolerance (more matches)
|
||||
response_high = test_client.post(
|
||||
"/api/v1/faces/auto-match",
|
||||
json={"tolerance": 0.8},
|
||||
)
|
||||
assert response_high.status_code == 200
|
||||
|
||||
# Test with low tolerance (fewer matches)
|
||||
response_low = test_client.post(
|
||||
"/api/v1/faces/auto-match",
|
||||
json={"tolerance": 0.4},
|
||||
)
|
||||
assert response_low.status_code == 200
|
||||
|
||||
# Lower tolerance should generally have fewer matches
|
||||
# (though this depends on actual face similarities)
|
||||
data_high = response_high.json()
|
||||
data_low = response_low.json()
|
||||
# Note: This is a probabilistic assertion - may not always hold
|
||||
|
||||
def test_auto_match_faces_auto_accept_enabled(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
test_face,
|
||||
identified_face,
|
||||
test_person,
|
||||
):
|
||||
"""Verify auto-accept functionality."""
|
||||
response = test_client.post(
|
||||
"/api/v1/faces/auto-match",
|
||||
json={
|
||||
"tolerance": 0.6,
|
||||
"auto_accept": True,
|
||||
"auto_accept_threshold": 0.7,
|
||||
},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["auto_accepted"] is True
|
||||
assert "auto_accepted_faces" in data
|
||||
assert "skipped_persons" in data
|
||||
assert "skipped_matches" in data
|
||||
|
||||
|
||||
class TestAcceptMatches:
|
||||
"""Test accept matches endpoint (via people API)."""
|
||||
|
||||
def test_accept_matches_success(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_face_2,
|
||||
test_person,
|
||||
):
|
||||
"""Verify accepting auto-match matches."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/people/{test_person.id}/accept-matches",
|
||||
headers=auth_headers,
|
||||
json={"face_ids": [test_face.id, test_face_2.id]},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
data = response.json()
|
||||
assert data["person_id"] == test_person.id
|
||||
assert "identified_face_ids" in data
|
||||
|
||||
def test_accept_matches_tracks_user_id(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_person,
|
||||
admin_user,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify user tracking for accepted matches."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/people/{test_person.id}/accept-matches",
|
||||
headers=auth_headers,
|
||||
json={"face_ids": [test_face.id]},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify identified_by_user_id is set
|
||||
test_db_session.refresh(test_face)
|
||||
assert test_face.identified_by_user_id == admin_user.id
|
||||
|
||||
def test_accept_matches_creates_person_encodings(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
test_person,
|
||||
test_db_session: Session,
|
||||
):
|
||||
"""Verify person_encodings are created."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/people/{test_person.id}/accept-matches",
|
||||
headers=auth_headers,
|
||||
json={"face_ids": [test_face.id]},
|
||||
)
|
||||
|
||||
assert response.status_code == 200
|
||||
|
||||
# Verify person_encoding exists
|
||||
from backend.db.models import PersonEncoding
|
||||
encoding = test_db_session.query(PersonEncoding).filter(
|
||||
PersonEncoding.face_id == test_face.id,
|
||||
PersonEncoding.person_id == test_person.id,
|
||||
).first()
|
||||
assert encoding is not None
|
||||
|
||||
def test_accept_matches_person_not_found(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_face,
|
||||
):
|
||||
"""Verify 404 for non-existent person."""
|
||||
response = test_client.post(
|
||||
"/api/v1/people/99999/accept-matches",
|
||||
headers=auth_headers,
|
||||
json={"face_ids": [test_face.id]},
|
||||
)
|
||||
|
||||
assert response.status_code == 404
|
||||
|
||||
def test_accept_matches_face_not_found(
|
||||
self,
|
||||
test_client: TestClient,
|
||||
auth_headers: dict,
|
||||
test_person,
|
||||
):
|
||||
"""Verify handling of missing faces."""
|
||||
response = test_client.post(
|
||||
f"/api/v1/people/{test_person.id}/accept-matches",
|
||||
headers=auth_headers,
|
||||
json={"face_ids": [99999]},
|
||||
)
|
||||
|
||||
# Implementation may handle gracefully or return error
|
||||
assert response.status_code in [200, 400, 404]
|
||||
|
||||
@ -1,399 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
DeepFace Only Test Script
|
||||
|
||||
Tests only DeepFace on a folder of photos for faster testing.
|
||||
|
||||
Usage:
|
||||
python test_deepface_only.py /path/to/photos [--save-crops] [--verbose]
|
||||
|
||||
Example:
|
||||
python test_deepface_only.py demo_photos/ --save-crops --verbose
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import argparse
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from PIL import Image
|
||||
|
||||
# DeepFace library only
|
||||
from deepface import DeepFace
|
||||
|
||||
# Supported image formats
|
||||
SUPPORTED_FORMATS = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
|
||||
|
||||
|
||||
class DeepFaceTester:
|
||||
"""Test DeepFace face recognition"""
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.verbose = verbose
|
||||
self.results = {'faces': [], 'times': [], 'encodings': []}
|
||||
|
||||
def log(self, message: str, level: str = "INFO"):
|
||||
"""Print log message with timestamp"""
|
||||
if self.verbose or level == "ERROR":
|
||||
timestamp = time.strftime("%H:%M:%S")
|
||||
print(f"[{timestamp}] {level}: {message}")
|
||||
|
||||
def get_image_files(self, folder_path: str) -> List[str]:
|
||||
"""Get all supported image files from folder"""
|
||||
folder = Path(folder_path)
|
||||
if not folder.exists():
|
||||
raise FileNotFoundError(f"Folder not found: {folder_path}")
|
||||
|
||||
image_files = []
|
||||
for file_path in folder.rglob("*"):
|
||||
if file_path.is_file() and file_path.suffix.lower() in SUPPORTED_FORMATS:
|
||||
image_files.append(str(file_path))
|
||||
|
||||
self.log(f"Found {len(image_files)} image files")
|
||||
return sorted(image_files)
|
||||
|
||||
def process_with_deepface(self, image_path: str) -> Dict:
|
||||
"""Process image with deepface library"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Use DeepFace to detect and encode faces
|
||||
results = DeepFace.represent(
|
||||
img_path=image_path,
|
||||
model_name='ArcFace', # Best accuracy model
|
||||
detector_backend='retinaface', # Best detection
|
||||
enforce_detection=False, # Don't fail if no faces
|
||||
align=True # Face alignment for better accuracy
|
||||
)
|
||||
|
||||
if not results:
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
# Convert to our format
|
||||
faces = []
|
||||
encodings = []
|
||||
|
||||
for i, result in enumerate(results):
|
||||
# Extract face region info
|
||||
region = result.get('region', {})
|
||||
face_data = {
|
||||
'image_path': image_path,
|
||||
'face_id': f"df_{Path(image_path).stem}_{i}",
|
||||
'location': (region.get('y', 0), region.get('x', 0) + region.get('w', 0),
|
||||
region.get('y', 0) + region.get('h', 0), region.get('x', 0)),
|
||||
'bbox': region,
|
||||
'encoding': np.array(result['embedding'])
|
||||
}
|
||||
faces.append(face_data)
|
||||
encodings.append(np.array(result['embedding']))
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
self.log(f"deepface: Found {len(faces)} faces in {processing_time:.2f}s")
|
||||
|
||||
return {
|
||||
'faces': faces,
|
||||
'encodings': encodings,
|
||||
'processing_time': processing_time
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"deepface error on {image_path}: {e}", "ERROR")
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
def calculate_similarity_matrix(self, encodings: List[np.ndarray]) -> np.ndarray:
|
||||
"""Calculate similarity matrix between all face encodings using cosine distance"""
|
||||
n_faces = len(encodings)
|
||||
if n_faces == 0:
|
||||
return np.array([])
|
||||
|
||||
similarity_matrix = np.zeros((n_faces, n_faces))
|
||||
|
||||
for i in range(n_faces):
|
||||
for j in range(n_faces):
|
||||
if i == j:
|
||||
similarity_matrix[i, j] = 0.0 # Same face
|
||||
else:
|
||||
# Use cosine distance for ArcFace embeddings
|
||||
enc1_norm = encodings[i] / np.linalg.norm(encodings[i])
|
||||
enc2_norm = encodings[j] / np.linalg.norm(encodings[j])
|
||||
cosine_sim = np.dot(enc1_norm, enc2_norm)
|
||||
cosine_distance = 1 - cosine_sim
|
||||
similarity_matrix[i, j] = cosine_distance
|
||||
|
||||
return similarity_matrix
|
||||
|
||||
def find_top_matches(self, similarity_matrix: np.ndarray, faces: List[Dict],
|
||||
top_k: int = 5) -> List[Dict]:
|
||||
"""Find top matches for each face"""
|
||||
top_matches = []
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
if i >= similarity_matrix.shape[0]:
|
||||
continue
|
||||
|
||||
# Get distances to all other faces
|
||||
distances = similarity_matrix[i, :]
|
||||
|
||||
# Find top matches (excluding self) - lower cosine distance = more similar
|
||||
sorted_indices = np.argsort(distances)
|
||||
|
||||
matches = []
|
||||
for idx in sorted_indices[1:top_k+1]: # Skip self (index 0)
|
||||
if idx < len(faces):
|
||||
other_face = faces[idx]
|
||||
distance = distances[idx]
|
||||
|
||||
# Convert to confidence percentage for display
|
||||
confidence = max(0, (1 - distance) * 100)
|
||||
|
||||
matches.append({
|
||||
'face_id': other_face['face_id'],
|
||||
'image_path': other_face['image_path'],
|
||||
'distance': distance,
|
||||
'confidence': confidence
|
||||
})
|
||||
|
||||
top_matches.append({
|
||||
'query_face': face,
|
||||
'matches': matches
|
||||
})
|
||||
|
||||
return top_matches
|
||||
|
||||
def save_face_crops(self, faces: List[Dict], output_dir: str):
|
||||
"""Save face crops for manual inspection"""
|
||||
crops_dir = Path(output_dir) / "face_crops" / "deepface"
|
||||
crops_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for face in faces:
|
||||
try:
|
||||
# Load original image
|
||||
image = Image.open(face['image_path'])
|
||||
|
||||
# Extract face region
|
||||
bbox = face['bbox']
|
||||
left = bbox.get('x', 0)
|
||||
top = bbox.get('y', 0)
|
||||
right = left + bbox.get('w', 0)
|
||||
bottom = top + bbox.get('h', 0)
|
||||
|
||||
# Add padding
|
||||
padding = 20
|
||||
left = max(0, left - padding)
|
||||
top = max(0, top - padding)
|
||||
right = min(image.width, right + padding)
|
||||
bottom = min(image.height, bottom + padding)
|
||||
|
||||
# Crop and save
|
||||
face_crop = image.crop((left, top, right, bottom))
|
||||
crop_path = crops_dir / f"{face['face_id']}.jpg"
|
||||
face_crop.save(crop_path, "JPEG", quality=95)
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"Error saving crop for {face['face_id']}: {e}", "ERROR")
|
||||
|
||||
def save_similarity_matrix(self, matrix: np.ndarray, faces: List[Dict], output_dir: str):
|
||||
"""Save similarity matrix as CSV file"""
|
||||
matrices_dir = Path(output_dir) / "similarity_matrices"
|
||||
matrices_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
if matrix.size > 0:
|
||||
df = pd.DataFrame(matrix,
|
||||
index=[f['face_id'] for f in faces],
|
||||
columns=[f['face_id'] for f in faces])
|
||||
df.to_csv(matrices_dir / "deepface_similarity.csv")
|
||||
|
||||
def generate_report(self, results: Dict, matches: List[Dict],
|
||||
output_dir: Optional[str] = None) -> str:
|
||||
"""Generate analysis report"""
|
||||
report_lines = []
|
||||
report_lines.append("=" * 60)
|
||||
report_lines.append("DEEPFACE FACE RECOGNITION ANALYSIS")
|
||||
report_lines.append("=" * 60)
|
||||
report_lines.append("")
|
||||
|
||||
# Summary statistics
|
||||
total_faces = len(results['faces'])
|
||||
total_time = sum(results['times'])
|
||||
|
||||
report_lines.append("SUMMARY STATISTICS:")
|
||||
report_lines.append(f" Total faces detected: {total_faces}")
|
||||
report_lines.append(f" Total processing time: {total_time:.2f}s")
|
||||
if total_faces > 0:
|
||||
report_lines.append(f" Average time per face: {total_time/total_faces:.2f}s")
|
||||
report_lines.append("")
|
||||
|
||||
# High confidence matches analysis
|
||||
def analyze_high_confidence_matches(matches: List[Dict], threshold: float = 70.0):
|
||||
high_conf_matches = []
|
||||
for match_data in matches:
|
||||
for match in match_data['matches']:
|
||||
if match['confidence'] >= threshold:
|
||||
high_conf_matches.append({
|
||||
'query': match_data['query_face']['face_id'],
|
||||
'match': match['face_id'],
|
||||
'confidence': match['confidence'],
|
||||
'query_image': match_data['query_face']['image_path'],
|
||||
'match_image': match['image_path']
|
||||
})
|
||||
return high_conf_matches
|
||||
|
||||
high_conf = analyze_high_confidence_matches(matches)
|
||||
|
||||
report_lines.append("HIGH CONFIDENCE MATCHES (≥70%):")
|
||||
report_lines.append(f" Found: {len(high_conf)} matches")
|
||||
report_lines.append("")
|
||||
|
||||
# Show top matches for manual inspection
|
||||
report_lines.append("TOP MATCHES FOR MANUAL INSPECTION:")
|
||||
report_lines.append("")
|
||||
|
||||
for i, match_data in enumerate(matches[:5]): # Show first 5 faces
|
||||
query_face = match_data['query_face']
|
||||
report_lines.append(f"Query: {query_face['face_id']} ({Path(query_face['image_path']).name})")
|
||||
for match in match_data['matches'][:3]: # Top 3 matches
|
||||
report_lines.append(f" → {match['face_id']}: {match['confidence']:.1f}% ({Path(match['image_path']).name})")
|
||||
report_lines.append("")
|
||||
|
||||
# Analysis
|
||||
report_lines.append("ANALYSIS:")
|
||||
if len(high_conf) > total_faces * 0.5:
|
||||
report_lines.append(" ⚠️ Many high-confidence matches found")
|
||||
report_lines.append(" This may indicate good face detection or potential false positives")
|
||||
elif len(high_conf) == 0:
|
||||
report_lines.append(" ✅ No high-confidence matches found")
|
||||
report_lines.append(" This suggests good separation between different people")
|
||||
else:
|
||||
report_lines.append(" 📊 Moderate number of high-confidence matches")
|
||||
report_lines.append(" Manual inspection recommended for verification")
|
||||
|
||||
report_lines.append("")
|
||||
report_lines.append("=" * 60)
|
||||
|
||||
report_text = "\n".join(report_lines)
|
||||
|
||||
# Save report if output directory specified
|
||||
if output_dir:
|
||||
report_path = Path(output_dir) / "deepface_report.txt"
|
||||
with open(report_path, 'w') as f:
|
||||
f.write(report_text)
|
||||
self.log(f"Report saved to: {report_path}")
|
||||
|
||||
return report_text
|
||||
|
||||
def run_test(self, folder_path: str, save_crops: bool = False,
|
||||
save_matrices: bool = False) -> Dict:
|
||||
"""Run the DeepFace face recognition test"""
|
||||
self.log(f"Starting DeepFace test on: {folder_path}")
|
||||
|
||||
# Get image files
|
||||
image_files = self.get_image_files(folder_path)
|
||||
if not image_files:
|
||||
raise ValueError("No image files found in the specified folder")
|
||||
|
||||
# Create output directory if needed
|
||||
output_dir = None
|
||||
if save_crops or save_matrices:
|
||||
output_dir = Path(folder_path).parent / "test_results"
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Process images with DeepFace
|
||||
self.log("Processing images with DeepFace...")
|
||||
for image_path in image_files:
|
||||
result = self.process_with_deepface(image_path)
|
||||
self.results['faces'].extend(result['faces'])
|
||||
self.results['times'].append(result['processing_time'])
|
||||
self.results['encodings'].extend(result['encodings'])
|
||||
|
||||
# Calculate similarity matrix
|
||||
self.log("Calculating similarity matrix...")
|
||||
matrix = self.calculate_similarity_matrix(self.results['encodings'])
|
||||
|
||||
# Find top matches
|
||||
matches = self.find_top_matches(matrix, self.results['faces'])
|
||||
|
||||
# Save outputs if requested
|
||||
if save_crops and output_dir:
|
||||
self.log("Saving face crops...")
|
||||
self.save_face_crops(self.results['faces'], str(output_dir))
|
||||
|
||||
if save_matrices and output_dir:
|
||||
self.log("Saving similarity matrix...")
|
||||
self.save_similarity_matrix(matrix, self.results['faces'], str(output_dir))
|
||||
|
||||
# Generate and display report
|
||||
report = self.generate_report(
|
||||
self.results, matches, str(output_dir) if output_dir else None
|
||||
)
|
||||
|
||||
print(report)
|
||||
|
||||
return {
|
||||
'faces': self.results['faces'],
|
||||
'matches': matches,
|
||||
'matrix': matrix
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Test DeepFace on a folder of photos",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
python test_deepface_only.py demo_photos/
|
||||
python test_deepface_only.py demo_photos/ --save-crops --verbose
|
||||
python test_deepface_only.py demo_photos/ --save-matrices --save-crops
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('folder', help='Path to folder containing photos to test')
|
||||
parser.add_argument('--save-crops', action='store_true',
|
||||
help='Save face crops for manual inspection')
|
||||
parser.add_argument('--save-matrices', action='store_true',
|
||||
help='Save similarity matrix as CSV file')
|
||||
parser.add_argument('--verbose', '-v', action='store_true',
|
||||
help='Enable verbose logging')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate folder path
|
||||
if not os.path.exists(args.folder):
|
||||
print(f"Error: Folder not found: {args.folder}")
|
||||
sys.exit(1)
|
||||
|
||||
# Check dependencies
|
||||
try:
|
||||
from deepface import DeepFace
|
||||
except ImportError as e:
|
||||
print(f"Error: Missing required dependency: {e}")
|
||||
print("Please install with: pip install deepface")
|
||||
sys.exit(1)
|
||||
|
||||
# Run test
|
||||
try:
|
||||
tester = DeepFaceTester(verbose=args.verbose)
|
||||
results = tester.run_test(
|
||||
args.folder,
|
||||
save_crops=args.save_crops,
|
||||
save_matrices=args.save_matrices
|
||||
)
|
||||
|
||||
print("\n✅ DeepFace test completed successfully!")
|
||||
if args.save_crops or args.save_matrices:
|
||||
print(f"📁 Results saved to: {Path(args.folder).parent / 'test_results'}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Test failed: {e}")
|
||||
if args.verbose:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,115 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script to debug EXIF date extraction from photos.
|
||||
Run this to see what EXIF data is available in your photos.
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
from pathlib import Path
|
||||
from PIL import Image
|
||||
from datetime import datetime
|
||||
|
||||
# Add src to path
|
||||
sys.path.insert(0, str(Path(__file__).parent.parent))
|
||||
|
||||
from src.web.services.photo_service import extract_exif_date
|
||||
|
||||
|
||||
def test_exif_extraction(image_path: str):
|
||||
"""Test EXIF extraction from a single image."""
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Testing: {image_path}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
if not os.path.exists(image_path):
|
||||
print(f"❌ File not found: {image_path}")
|
||||
return
|
||||
|
||||
# Try to open with PIL
|
||||
try:
|
||||
with Image.open(image_path) as img:
|
||||
print(f"✅ Image opened successfully")
|
||||
print(f" Format: {img.format}")
|
||||
print(f" Size: {img.size}")
|
||||
|
||||
# Try getexif()
|
||||
exifdata = None
|
||||
try:
|
||||
exifdata = img.getexif()
|
||||
print(f"✅ getexif() worked: {len(exifdata) if exifdata else 0} tags")
|
||||
except Exception as e:
|
||||
print(f"❌ getexif() failed: {e}")
|
||||
|
||||
# Try _getexif() (deprecated)
|
||||
old_exif = None
|
||||
try:
|
||||
if hasattr(img, '_getexif'):
|
||||
old_exif = img._getexif()
|
||||
print(f"✅ _getexif() worked: {len(old_exif) if old_exif else 0} tags")
|
||||
else:
|
||||
print(f"⚠️ _getexif() not available")
|
||||
except Exception as e:
|
||||
print(f"❌ _getexif() failed: {e}")
|
||||
|
||||
# Check for specific date tags
|
||||
date_tags = {
|
||||
306: "DateTime",
|
||||
36867: "DateTimeOriginal",
|
||||
36868: "DateTimeDigitized",
|
||||
}
|
||||
|
||||
print(f"\n📅 Checking date tags:")
|
||||
found_any = False
|
||||
|
||||
if exifdata:
|
||||
for tag_id, tag_name in date_tags.items():
|
||||
try:
|
||||
if tag_id in exifdata:
|
||||
value = exifdata[tag_id]
|
||||
print(f" ✅ {tag_name} ({tag_id}): {value}")
|
||||
found_any = True
|
||||
else:
|
||||
print(f" ❌ {tag_name} ({tag_id}): Not found")
|
||||
except Exception as e:
|
||||
print(f" ⚠️ {tag_name} ({tag_id}): Error - {e}")
|
||||
|
||||
# Try EXIF IFD
|
||||
if exifdata and hasattr(exifdata, 'get_ifd'):
|
||||
try:
|
||||
exif_ifd = exifdata.get_ifd(0x8769)
|
||||
if exif_ifd:
|
||||
print(f"\n📋 EXIF IFD found: {len(exif_ifd)} tags")
|
||||
for tag_id, tag_name in date_tags.items():
|
||||
if tag_id in exif_ifd:
|
||||
value = exif_ifd[tag_id]
|
||||
print(f" ✅ {tag_name} ({tag_id}) in IFD: {value}")
|
||||
found_any = True
|
||||
except Exception as e:
|
||||
print(f" ⚠️ EXIF IFD access failed: {e}")
|
||||
|
||||
if not found_any:
|
||||
print(f" ⚠️ No date tags found in EXIF data")
|
||||
|
||||
# Try our extraction function
|
||||
print(f"\n🔍 Testing extract_exif_date():")
|
||||
extracted_date = extract_exif_date(image_path)
|
||||
if extracted_date:
|
||||
print(f" ✅ Extracted date: {extracted_date}")
|
||||
else:
|
||||
print(f" ❌ No date extracted")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Error opening image: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python test_exif_extraction.py <image_path>")
|
||||
print("\nExample:")
|
||||
print(" python test_exif_extraction.py /path/to/photo.jpg")
|
||||
sys.exit(1)
|
||||
|
||||
image_path = sys.argv[1]
|
||||
test_exif_extraction(image_path)
|
||||
|
||||
@ -1,136 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test script for EXIF orientation handling
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
|
||||
|
||||
from src.utils.exif_utils import EXIFOrientationHandler
|
||||
from PIL import Image
|
||||
import tempfile
|
||||
|
||||
|
||||
def test_exif_orientation_detection():
|
||||
"""Test EXIF orientation detection"""
|
||||
print("🧪 Testing EXIF orientation detection...")
|
||||
|
||||
# Test with any available images in the project
|
||||
test_dirs = [
|
||||
"/home/ladmin/Code/punimtag/demo_photos",
|
||||
"/home/ladmin/Code/punimtag/data"
|
||||
]
|
||||
|
||||
test_images = []
|
||||
for test_dir in test_dirs:
|
||||
if os.path.exists(test_dir):
|
||||
for file in os.listdir(test_dir):
|
||||
if file.lower().endswith(('.jpg', '.jpeg', '.png')):
|
||||
test_images.append(os.path.join(test_dir, file))
|
||||
if len(test_images) >= 2: # Limit to 2 images for testing
|
||||
break
|
||||
|
||||
if not test_images:
|
||||
print(" ℹ️ No test images found - testing with coordinate transformation only")
|
||||
return
|
||||
|
||||
for image_path in test_images:
|
||||
print(f"\n📸 Testing: {os.path.basename(image_path)}")
|
||||
|
||||
# Get orientation info
|
||||
orientation = EXIFOrientationHandler.get_exif_orientation(image_path)
|
||||
orientation_info = EXIFOrientationHandler.get_orientation_info(image_path)
|
||||
|
||||
print(f" Orientation: {orientation}")
|
||||
print(f" Description: {orientation_info['description']}")
|
||||
print(f" Needs correction: {orientation_info['needs_correction']}")
|
||||
|
||||
if orientation and orientation != 1:
|
||||
print(f" ✅ EXIF orientation detected: {orientation}")
|
||||
else:
|
||||
print(f" ℹ️ No orientation correction needed")
|
||||
|
||||
|
||||
def test_coordinate_transformation():
|
||||
"""Test face coordinate transformation"""
|
||||
print("\n🧪 Testing coordinate transformation...")
|
||||
|
||||
# Test coordinates in DeepFace format
|
||||
test_coords = {'x': 100, 'y': 150, 'w': 200, 'h': 200}
|
||||
original_width, original_height = 800, 600
|
||||
|
||||
print(f" Original coordinates: {test_coords}")
|
||||
print(f" Image dimensions: {original_width}x{original_height}")
|
||||
|
||||
# Test different orientations
|
||||
test_orientations = [1, 3, 6, 8] # Normal, 180°, 90° CW, 90° CCW
|
||||
|
||||
for orientation in test_orientations:
|
||||
transformed = EXIFOrientationHandler.transform_face_coordinates(
|
||||
test_coords, original_width, original_height, orientation
|
||||
)
|
||||
print(f" Orientation {orientation}: {transformed}")
|
||||
|
||||
|
||||
def test_image_correction():
|
||||
"""Test image orientation correction"""
|
||||
print("\n🧪 Testing image orientation correction...")
|
||||
|
||||
# Test with any available images
|
||||
test_dirs = [
|
||||
"/home/ladmin/Code/punimtag/demo_photos",
|
||||
"/home/ladmin/Code/punimtag/data"
|
||||
]
|
||||
|
||||
test_images = []
|
||||
for test_dir in test_dirs:
|
||||
if os.path.exists(test_dir):
|
||||
for file in os.listdir(test_dir):
|
||||
if file.lower().endswith(('.jpg', '.jpeg', '.png')):
|
||||
test_images.append(os.path.join(test_dir, file))
|
||||
if len(test_images) >= 1: # Limit to 1 image for testing
|
||||
break
|
||||
|
||||
if not test_images:
|
||||
print(" ℹ️ No test images found - skipping image correction test")
|
||||
return
|
||||
|
||||
for image_path in test_images:
|
||||
print(f"\n📸 Testing correction for: {os.path.basename(image_path)}")
|
||||
|
||||
try:
|
||||
# Load and correct image
|
||||
corrected_image, orientation = EXIFOrientationHandler.correct_image_orientation_from_path(image_path)
|
||||
|
||||
if corrected_image:
|
||||
print(f" ✅ Image loaded and corrected")
|
||||
print(f" Original orientation: {orientation}")
|
||||
print(f" Corrected dimensions: {corrected_image.size}")
|
||||
|
||||
# Save corrected image to temp file for inspection
|
||||
with tempfile.NamedTemporaryFile(suffix='_corrected.jpg', delete=False) as tmp_file:
|
||||
corrected_image.save(tmp_file.name, quality=95)
|
||||
print(f" Corrected image saved to: {tmp_file.name}")
|
||||
else:
|
||||
print(f" ❌ Failed to load/correct image")
|
||||
|
||||
except Exception as e:
|
||||
print(f" ❌ Error: {e}")
|
||||
break # Only test first image found
|
||||
|
||||
|
||||
def main():
|
||||
"""Run all tests"""
|
||||
print("🔍 EXIF Orientation Handling Tests")
|
||||
print("=" * 50)
|
||||
|
||||
test_exif_orientation_detection()
|
||||
test_coordinate_transformation()
|
||||
test_image_correction()
|
||||
|
||||
print("\n✅ All tests completed!")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,529 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Face Recognition Comparison Test Script
|
||||
|
||||
Compares face_recognition vs deepface on a folder of photos.
|
||||
Tests accuracy and performance without modifying existing database.
|
||||
|
||||
Usage:
|
||||
python test_face_recognition.py /path/to/photos [--save-crops] [--save-matrices] [--verbose]
|
||||
|
||||
Example:
|
||||
python test_face_recognition.py demo_photos/ --save-crops --verbose
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import argparse
|
||||
import tempfile
|
||||
from pathlib import Path
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
import numpy as np
|
||||
import pandas as pd
|
||||
from PIL import Image
|
||||
|
||||
# Face recognition libraries
|
||||
import face_recognition
|
||||
from deepface import DeepFace
|
||||
|
||||
# Supported image formats
|
||||
SUPPORTED_FORMATS = {'.jpg', '.jpeg', '.png', '.bmp', '.tiff', '.tif'}
|
||||
|
||||
|
||||
class FaceRecognitionTester:
|
||||
"""Test and compare face recognition libraries"""
|
||||
|
||||
def __init__(self, verbose: bool = False):
|
||||
self.verbose = verbose
|
||||
self.results = {
|
||||
'face_recognition': {'faces': [], 'times': [], 'encodings': []},
|
||||
'deepface': {'faces': [], 'times': [], 'encodings': []}
|
||||
}
|
||||
|
||||
def log(self, message: str, level: str = "INFO"):
|
||||
"""Print log message with timestamp"""
|
||||
if self.verbose or level == "ERROR":
|
||||
timestamp = time.strftime("%H:%M:%S")
|
||||
print(f"[{timestamp}] {level}: {message}")
|
||||
|
||||
def get_image_files(self, folder_path: str) -> List[str]:
|
||||
"""Get all supported image files from folder"""
|
||||
folder = Path(folder_path)
|
||||
if not folder.exists():
|
||||
raise FileNotFoundError(f"Folder not found: {folder_path}")
|
||||
|
||||
image_files = []
|
||||
for file_path in folder.rglob("*"):
|
||||
if file_path.is_file() and file_path.suffix.lower() in SUPPORTED_FORMATS:
|
||||
image_files.append(str(file_path))
|
||||
|
||||
self.log(f"Found {len(image_files)} image files")
|
||||
return sorted(image_files)
|
||||
|
||||
def process_with_face_recognition(self, image_path: str) -> Dict:
|
||||
"""Process image with face_recognition library"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Load image
|
||||
image = face_recognition.load_image_file(image_path)
|
||||
|
||||
# Detect faces using CNN model (more accurate than HOG)
|
||||
face_locations = face_recognition.face_locations(image, model="cnn")
|
||||
|
||||
if not face_locations:
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
# Get face encodings
|
||||
face_encodings = face_recognition.face_encodings(image, face_locations)
|
||||
|
||||
# Convert to our format
|
||||
faces = []
|
||||
encodings = []
|
||||
|
||||
for i, (location, encoding) in enumerate(zip(face_locations, face_encodings)):
|
||||
# Convert face_recognition format to DeepFace format
|
||||
top, right, bottom, left = location
|
||||
face_data = {
|
||||
'image_path': image_path,
|
||||
'face_id': f"fr_{Path(image_path).stem}_{i}",
|
||||
'location': location, # Keep original for compatibility
|
||||
'bbox': {'x': left, 'y': top, 'w': right - left, 'h': bottom - top}, # DeepFace format
|
||||
'encoding': encoding
|
||||
}
|
||||
faces.append(face_data)
|
||||
encodings.append(encoding)
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
self.log(f"face_recognition: Found {len(faces)} faces in {processing_time:.2f}s")
|
||||
|
||||
return {
|
||||
'faces': faces,
|
||||
'encodings': encodings,
|
||||
'processing_time': processing_time
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"face_recognition error on {image_path}: {e}", "ERROR")
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
def process_with_deepface(self, image_path: str) -> Dict:
|
||||
"""Process image with deepface library"""
|
||||
start_time = time.time()
|
||||
|
||||
try:
|
||||
# Use DeepFace to detect and encode faces
|
||||
results = DeepFace.represent(
|
||||
img_path=image_path,
|
||||
model_name='ArcFace', # Best accuracy model
|
||||
detector_backend='retinaface', # Best detection
|
||||
enforce_detection=False, # Don't fail if no faces
|
||||
align=True # Face alignment for better accuracy
|
||||
)
|
||||
|
||||
if not results:
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
# Convert to our format
|
||||
faces = []
|
||||
encodings = []
|
||||
|
||||
for i, result in enumerate(results):
|
||||
# Extract face region info
|
||||
region = result.get('region', {})
|
||||
face_data = {
|
||||
'image_path': image_path,
|
||||
'face_id': f"df_{Path(image_path).stem}_{i}",
|
||||
'location': (region.get('y', 0), region.get('x', 0) + region.get('w', 0),
|
||||
region.get('y', 0) + region.get('h', 0), region.get('x', 0)),
|
||||
'bbox': region,
|
||||
'encoding': np.array(result['embedding'])
|
||||
}
|
||||
faces.append(face_data)
|
||||
encodings.append(np.array(result['embedding']))
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
self.log(f"deepface: Found {len(faces)} faces in {processing_time:.2f}s")
|
||||
|
||||
return {
|
||||
'faces': faces,
|
||||
'encodings': encodings,
|
||||
'processing_time': processing_time
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"deepface error on {image_path}: {e}", "ERROR")
|
||||
return {'faces': [], 'encodings': [], 'processing_time': time.time() - start_time}
|
||||
|
||||
def calculate_similarity_matrix(self, encodings: List[np.ndarray], method: str) -> np.ndarray:
|
||||
"""Calculate similarity matrix between all face encodings"""
|
||||
n_faces = len(encodings)
|
||||
if n_faces == 0:
|
||||
return np.array([])
|
||||
|
||||
similarity_matrix = np.zeros((n_faces, n_faces))
|
||||
|
||||
for i in range(n_faces):
|
||||
for j in range(n_faces):
|
||||
if i == j:
|
||||
similarity_matrix[i, j] = 0.0 # Same face
|
||||
else:
|
||||
if method == 'face_recognition':
|
||||
# Use face_recognition distance (lower = more similar)
|
||||
distance = face_recognition.face_distance([encodings[i]], encodings[j])[0]
|
||||
similarity_matrix[i, j] = distance
|
||||
else: # deepface
|
||||
# Use cosine distance for ArcFace embeddings
|
||||
enc1_norm = encodings[i] / np.linalg.norm(encodings[i])
|
||||
enc2_norm = encodings[j] / np.linalg.norm(encodings[j])
|
||||
cosine_sim = np.dot(enc1_norm, enc2_norm)
|
||||
cosine_distance = 1 - cosine_sim
|
||||
similarity_matrix[i, j] = cosine_distance
|
||||
|
||||
return similarity_matrix
|
||||
|
||||
def find_top_matches(self, similarity_matrix: np.ndarray, faces: List[Dict],
|
||||
method: str, top_k: int = 5) -> List[Dict]:
|
||||
"""Find top matches for each face"""
|
||||
top_matches = []
|
||||
|
||||
for i, face in enumerate(faces):
|
||||
if i >= similarity_matrix.shape[0]:
|
||||
continue
|
||||
|
||||
# Get distances to all other faces
|
||||
distances = similarity_matrix[i, :]
|
||||
|
||||
# Find top matches (excluding self)
|
||||
if method == 'face_recognition':
|
||||
# Lower distance = more similar
|
||||
sorted_indices = np.argsort(distances)
|
||||
else: # deepface
|
||||
# Lower cosine distance = more similar
|
||||
sorted_indices = np.argsort(distances)
|
||||
|
||||
matches = []
|
||||
for idx in sorted_indices[1:top_k+1]: # Skip self (index 0)
|
||||
if idx < len(faces):
|
||||
other_face = faces[idx]
|
||||
distance = distances[idx]
|
||||
|
||||
# Convert to confidence percentage for display
|
||||
if method == 'face_recognition':
|
||||
confidence = max(0, (1 - distance) * 100)
|
||||
else: # deepface
|
||||
confidence = max(0, (1 - distance) * 100)
|
||||
|
||||
matches.append({
|
||||
'face_id': other_face['face_id'],
|
||||
'image_path': other_face['image_path'],
|
||||
'distance': distance,
|
||||
'confidence': confidence
|
||||
})
|
||||
|
||||
top_matches.append({
|
||||
'query_face': face,
|
||||
'matches': matches
|
||||
})
|
||||
|
||||
return top_matches
|
||||
|
||||
def save_face_crops(self, faces: List[Dict], output_dir: str, method: str):
|
||||
"""Save face crops for manual inspection"""
|
||||
crops_dir = Path(output_dir) / "face_crops" / method
|
||||
crops_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
for face in faces:
|
||||
try:
|
||||
# Load original image
|
||||
image = Image.open(face['image_path'])
|
||||
|
||||
# Extract face region - use DeepFace format for both
|
||||
if method == 'face_recognition':
|
||||
# Convert face_recognition format to DeepFace format
|
||||
top, right, bottom, left = face['location']
|
||||
left = left
|
||||
top = top
|
||||
right = right
|
||||
bottom = bottom
|
||||
else: # deepface
|
||||
bbox = face['bbox']
|
||||
left = bbox.get('x', 0)
|
||||
top = bbox.get('y', 0)
|
||||
right = left + bbox.get('w', 0)
|
||||
bottom = top + bbox.get('h', 0)
|
||||
|
||||
# Add padding
|
||||
padding = 20
|
||||
left = max(0, left - padding)
|
||||
top = max(0, top - padding)
|
||||
right = min(image.width, right + padding)
|
||||
bottom = min(image.height, bottom + padding)
|
||||
|
||||
# Crop and save
|
||||
face_crop = image.crop((left, top, right, bottom))
|
||||
crop_path = crops_dir / f"{face['face_id']}.jpg"
|
||||
face_crop.save(crop_path, "JPEG", quality=95)
|
||||
|
||||
except Exception as e:
|
||||
self.log(f"Error saving crop for {face['face_id']}: {e}", "ERROR")
|
||||
|
||||
def save_similarity_matrices(self, fr_matrix: np.ndarray, df_matrix: np.ndarray,
|
||||
fr_faces: List[Dict], df_faces: List[Dict], output_dir: str):
|
||||
"""Save similarity matrices as CSV files"""
|
||||
matrices_dir = Path(output_dir) / "similarity_matrices"
|
||||
matrices_dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
# Save face_recognition matrix
|
||||
if fr_matrix.size > 0:
|
||||
fr_df = pd.DataFrame(fr_matrix,
|
||||
index=[f['face_id'] for f in fr_faces],
|
||||
columns=[f['face_id'] for f in fr_faces])
|
||||
fr_df.to_csv(matrices_dir / "face_recognition_similarity.csv")
|
||||
|
||||
# Save deepface matrix
|
||||
if df_matrix.size > 0:
|
||||
df_df = pd.DataFrame(df_matrix,
|
||||
index=[f['face_id'] for f in df_faces],
|
||||
columns=[f['face_id'] for f in df_faces])
|
||||
df_df.to_csv(matrices_dir / "deepface_similarity.csv")
|
||||
|
||||
def generate_report(self, fr_results: Dict, df_results: Dict,
|
||||
fr_matches: List[Dict], df_matches: List[Dict],
|
||||
output_dir: Optional[str] = None) -> str:
|
||||
"""Generate comparison report"""
|
||||
report_lines = []
|
||||
report_lines.append("=" * 60)
|
||||
report_lines.append("FACE RECOGNITION COMPARISON REPORT")
|
||||
report_lines.append("=" * 60)
|
||||
report_lines.append("")
|
||||
|
||||
# Summary statistics
|
||||
fr_total_faces = len(fr_results['faces'])
|
||||
df_total_faces = len(df_results['faces'])
|
||||
fr_total_time = sum(fr_results['times'])
|
||||
df_total_time = sum(df_results['times'])
|
||||
|
||||
report_lines.append("SUMMARY STATISTICS:")
|
||||
report_lines.append(f" face_recognition: {fr_total_faces} faces in {fr_total_time:.2f}s")
|
||||
report_lines.append(f" deepface: {df_total_faces} faces in {df_total_time:.2f}s")
|
||||
report_lines.append(f" Speed ratio: {df_total_time/fr_total_time:.1f}x slower (deepface)")
|
||||
report_lines.append("")
|
||||
|
||||
# High confidence matches analysis
|
||||
def analyze_high_confidence_matches(matches: List[Dict], method: str, threshold: float = 70.0):
|
||||
high_conf_matches = []
|
||||
for match_data in matches:
|
||||
for match in match_data['matches']:
|
||||
if match['confidence'] >= threshold:
|
||||
high_conf_matches.append({
|
||||
'query': match_data['query_face']['face_id'],
|
||||
'match': match['face_id'],
|
||||
'confidence': match['confidence'],
|
||||
'query_image': match_data['query_face']['image_path'],
|
||||
'match_image': match['image_path']
|
||||
})
|
||||
return high_conf_matches
|
||||
|
||||
fr_high_conf = analyze_high_confidence_matches(fr_matches, 'face_recognition')
|
||||
df_high_conf = analyze_high_confidence_matches(df_matches, 'deepface')
|
||||
|
||||
report_lines.append("HIGH CONFIDENCE MATCHES (≥70%):")
|
||||
report_lines.append(f" face_recognition: {len(fr_high_conf)} matches")
|
||||
report_lines.append(f" deepface: {len(df_high_conf)} matches")
|
||||
report_lines.append("")
|
||||
|
||||
# Show top matches for manual inspection
|
||||
report_lines.append("TOP MATCHES FOR MANUAL INSPECTION:")
|
||||
report_lines.append("")
|
||||
|
||||
# face_recognition top matches
|
||||
report_lines.append("face_recognition top matches:")
|
||||
for i, match_data in enumerate(fr_matches[:3]): # Show first 3 faces
|
||||
query_face = match_data['query_face']
|
||||
report_lines.append(f" Query: {query_face['face_id']} ({Path(query_face['image_path']).name})")
|
||||
for match in match_data['matches'][:3]: # Top 3 matches
|
||||
report_lines.append(f" → {match['face_id']}: {match['confidence']:.1f}% ({Path(match['image_path']).name})")
|
||||
report_lines.append("")
|
||||
|
||||
# deepface top matches
|
||||
report_lines.append("deepface top matches:")
|
||||
for i, match_data in enumerate(df_matches[:3]): # Show first 3 faces
|
||||
query_face = match_data['query_face']
|
||||
report_lines.append(f" Query: {query_face['face_id']} ({Path(query_face['image_path']).name})")
|
||||
for match in match_data['matches'][:3]: # Top 3 matches
|
||||
report_lines.append(f" → {match['face_id']}: {match['confidence']:.1f}% ({Path(match['image_path']).name})")
|
||||
report_lines.append("")
|
||||
|
||||
# Recommendations
|
||||
report_lines.append("RECOMMENDATIONS:")
|
||||
if len(fr_high_conf) > len(df_high_conf) * 1.5:
|
||||
report_lines.append(" ⚠️ face_recognition shows significantly more high-confidence matches")
|
||||
report_lines.append(" This may indicate more false positives")
|
||||
if df_total_time > fr_total_time * 3:
|
||||
report_lines.append(" ⚠️ deepface is significantly slower")
|
||||
report_lines.append(" Consider GPU acceleration or faster models")
|
||||
if df_total_faces > fr_total_faces:
|
||||
report_lines.append(" ✅ deepface detected more faces")
|
||||
report_lines.append(" Better face detection in difficult conditions")
|
||||
|
||||
report_lines.append("")
|
||||
report_lines.append("=" * 60)
|
||||
|
||||
report_text = "\n".join(report_lines)
|
||||
|
||||
# Save report if output directory specified
|
||||
if output_dir:
|
||||
report_path = Path(output_dir) / "comparison_report.txt"
|
||||
with open(report_path, 'w') as f:
|
||||
f.write(report_text)
|
||||
self.log(f"Report saved to: {report_path}")
|
||||
|
||||
return report_text
|
||||
|
||||
def run_test(self, folder_path: str, save_crops: bool = False,
|
||||
save_matrices: bool = False) -> Dict:
|
||||
"""Run the complete face recognition comparison test"""
|
||||
self.log(f"Starting face recognition test on: {folder_path}")
|
||||
|
||||
# Get image files
|
||||
image_files = self.get_image_files(folder_path)
|
||||
if not image_files:
|
||||
raise ValueError("No image files found in the specified folder")
|
||||
|
||||
# Create output directory if needed
|
||||
output_dir = None
|
||||
if save_crops or save_matrices:
|
||||
output_dir = Path(folder_path).parent / "test_results"
|
||||
output_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Process images with both methods
|
||||
self.log("Processing images with face_recognition...")
|
||||
for image_path in image_files:
|
||||
result = self.process_with_face_recognition(image_path)
|
||||
self.results['face_recognition']['faces'].extend(result['faces'])
|
||||
self.results['face_recognition']['times'].append(result['processing_time'])
|
||||
self.results['face_recognition']['encodings'].extend(result['encodings'])
|
||||
|
||||
self.log("Processing images with deepface...")
|
||||
for image_path in image_files:
|
||||
result = self.process_with_deepface(image_path)
|
||||
self.results['deepface']['faces'].extend(result['faces'])
|
||||
self.results['deepface']['times'].append(result['processing_time'])
|
||||
self.results['deepface']['encodings'].extend(result['encodings'])
|
||||
|
||||
# Calculate similarity matrices
|
||||
self.log("Calculating similarity matrices...")
|
||||
fr_matrix = self.calculate_similarity_matrix(
|
||||
self.results['face_recognition']['encodings'], 'face_recognition'
|
||||
)
|
||||
df_matrix = self.calculate_similarity_matrix(
|
||||
self.results['deepface']['encodings'], 'deepface'
|
||||
)
|
||||
|
||||
# Find top matches
|
||||
fr_matches = self.find_top_matches(
|
||||
fr_matrix, self.results['face_recognition']['faces'], 'face_recognition'
|
||||
)
|
||||
df_matches = self.find_top_matches(
|
||||
df_matrix, self.results['deepface']['faces'], 'deepface'
|
||||
)
|
||||
|
||||
# Save outputs if requested
|
||||
if save_crops and output_dir:
|
||||
self.log("Saving face crops...")
|
||||
self.save_face_crops(self.results['face_recognition']['faces'], str(output_dir), 'face_recognition')
|
||||
self.save_face_crops(self.results['deepface']['faces'], str(output_dir), 'deepface')
|
||||
|
||||
if save_matrices and output_dir:
|
||||
self.log("Saving similarity matrices...")
|
||||
self.save_similarity_matrices(
|
||||
fr_matrix, df_matrix,
|
||||
self.results['face_recognition']['faces'],
|
||||
self.results['deepface']['faces'],
|
||||
str(output_dir)
|
||||
)
|
||||
|
||||
# Generate and display report
|
||||
report = self.generate_report(
|
||||
self.results['face_recognition'], self.results['deepface'],
|
||||
fr_matches, df_matches, str(output_dir) if output_dir else None
|
||||
)
|
||||
|
||||
print(report)
|
||||
|
||||
return {
|
||||
'face_recognition': {
|
||||
'faces': self.results['face_recognition']['faces'],
|
||||
'matches': fr_matches,
|
||||
'matrix': fr_matrix
|
||||
},
|
||||
'deepface': {
|
||||
'faces': self.results['deepface']['faces'],
|
||||
'matches': df_matches,
|
||||
'matrix': df_matrix
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""Main CLI entry point"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Compare face_recognition vs deepface on a folder of photos",
|
||||
formatter_class=argparse.RawDescriptionHelpFormatter,
|
||||
epilog="""
|
||||
Examples:
|
||||
python test_face_recognition.py demo_photos/
|
||||
python test_face_recognition.py demo_photos/ --save-crops --verbose
|
||||
python test_face_recognition.py demo_photos/ --save-matrices --save-crops
|
||||
"""
|
||||
)
|
||||
|
||||
parser.add_argument('folder', help='Path to folder containing photos to test')
|
||||
parser.add_argument('--save-crops', action='store_true',
|
||||
help='Save face crops for manual inspection')
|
||||
parser.add_argument('--save-matrices', action='store_true',
|
||||
help='Save similarity matrices as CSV files')
|
||||
parser.add_argument('--verbose', '-v', action='store_true',
|
||||
help='Enable verbose logging')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Validate folder path
|
||||
if not os.path.exists(args.folder):
|
||||
print(f"Error: Folder not found: {args.folder}")
|
||||
sys.exit(1)
|
||||
|
||||
# Check dependencies
|
||||
try:
|
||||
import face_recognition
|
||||
from deepface import DeepFace
|
||||
except ImportError as e:
|
||||
print(f"Error: Missing required dependency: {e}")
|
||||
print("Please install with: pip install face_recognition deepface")
|
||||
sys.exit(1)
|
||||
|
||||
# Run test
|
||||
try:
|
||||
tester = FaceRecognitionTester(verbose=args.verbose)
|
||||
results = tester.run_test(
|
||||
args.folder,
|
||||
save_crops=args.save_crops,
|
||||
save_matrices=args.save_matrices
|
||||
)
|
||||
|
||||
print("\n✅ Test completed successfully!")
|
||||
if args.save_crops or args.save_matrices:
|
||||
print(f"📁 Results saved to: {Path(args.folder).parent / 'test_results'}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ Test failed: {e}")
|
||||
if args.verbose:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -1,264 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Generator
|
||||
|
||||
import pytest
|
||||
from fastapi.testclient import TestClient
|
||||
from sqlalchemy import create_engine, text
|
||||
from sqlalchemy.orm import Session, sessionmaker
|
||||
from sqlalchemy.pool import StaticPool
|
||||
|
||||
from src.web.app import app
|
||||
from src.web.db import models
|
||||
from src.web.db.models import Photo, PhotoTagLinkage, Tag, User
|
||||
from src.web.db.session import get_auth_db, get_db
|
||||
from src.web.constants.roles import DEFAULT_ADMIN_ROLE
|
||||
from src.web.api.auth import get_current_user
|
||||
|
||||
|
||||
# Create isolated in-memory databases for main and auth stores.
|
||||
main_engine = create_engine(
|
||||
"sqlite://",
|
||||
connect_args={"check_same_thread": False},
|
||||
poolclass=StaticPool,
|
||||
)
|
||||
auth_engine = create_engine(
|
||||
"sqlite://",
|
||||
connect_args={"check_same_thread": False},
|
||||
poolclass=StaticPool,
|
||||
)
|
||||
|
||||
MainSessionLocal = sessionmaker(
|
||||
bind=main_engine, autoflush=False, autocommit=False, future=True
|
||||
)
|
||||
AuthSessionLocal = sessionmaker(
|
||||
bind=auth_engine, autoflush=False, autocommit=False, future=True
|
||||
)
|
||||
|
||||
models.Base.metadata.create_all(bind=main_engine)
|
||||
|
||||
with auth_engine.begin() as connection:
|
||||
connection.execute(
|
||||
text(
|
||||
"""
|
||||
CREATE TABLE users (
|
||||
id INTEGER PRIMARY KEY,
|
||||
name TEXT,
|
||||
email TEXT
|
||||
)
|
||||
"""
|
||||
)
|
||||
)
|
||||
connection.execute(
|
||||
text(
|
||||
"""
|
||||
CREATE TABLE pending_linkages (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
photo_id INTEGER NOT NULL,
|
||||
tag_id INTEGER,
|
||||
tag_name VARCHAR(255),
|
||||
user_id INTEGER NOT NULL,
|
||||
status VARCHAR(50) DEFAULT 'pending',
|
||||
notes TEXT,
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
)
|
||||
"""
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def override_get_db() -> Generator[Session, None, None]:
|
||||
db = MainSessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def override_get_auth_db() -> Generator[Session, None, None]:
|
||||
db = AuthSessionLocal()
|
||||
try:
|
||||
yield db
|
||||
finally:
|
||||
db.close()
|
||||
|
||||
|
||||
def override_get_current_user() -> dict[str, str]:
|
||||
return {"username": "admin"}
|
||||
|
||||
|
||||
app.dependency_overrides[get_db] = override_get_db
|
||||
app.dependency_overrides[get_auth_db] = override_get_auth_db
|
||||
app.dependency_overrides[get_current_user] = override_get_current_user
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
|
||||
def _ensure_admin_user() -> None:
|
||||
with MainSessionLocal() as session:
|
||||
existing = session.query(User).filter(User.username == "admin").first()
|
||||
if existing:
|
||||
existing.is_admin = True
|
||||
existing.role = DEFAULT_ADMIN_ROLE
|
||||
session.commit()
|
||||
return
|
||||
|
||||
admin_user = User(
|
||||
username="admin",
|
||||
password_hash="test",
|
||||
email="admin@example.com",
|
||||
full_name="Admin",
|
||||
is_active=True,
|
||||
is_admin=True,
|
||||
role=DEFAULT_ADMIN_ROLE,
|
||||
)
|
||||
session.add(admin_user)
|
||||
session.commit()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def clean_databases() -> Generator[None, None, None]:
|
||||
with MainSessionLocal() as session:
|
||||
session.query(PhotoTagLinkage).delete()
|
||||
session.query(Tag).delete()
|
||||
session.query(Photo).delete()
|
||||
session.query(User).filter(User.username != "admin").delete()
|
||||
session.commit()
|
||||
|
||||
with AuthSessionLocal() as session:
|
||||
session.execute(text("DELETE FROM pending_linkages"))
|
||||
session.execute(text("DELETE FROM users"))
|
||||
session.commit()
|
||||
|
||||
_ensure_admin_user()
|
||||
yield
|
||||
|
||||
|
||||
def _insert_auth_user(user_id: int = 1) -> None:
|
||||
with auth_engine.begin() as connection:
|
||||
connection.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO users (id, name, email)
|
||||
VALUES (:id, :name, :email)
|
||||
"""
|
||||
),
|
||||
{"id": user_id, "name": "Tester", "email": "tester@example.com"},
|
||||
)
|
||||
|
||||
|
||||
def _insert_pending_linkage(
|
||||
photo_id: int,
|
||||
*,
|
||||
tag_id: int | None = None,
|
||||
tag_name: str | None = None,
|
||||
status: str = "pending",
|
||||
user_id: int = 1,
|
||||
) -> int:
|
||||
with auth_engine.begin() as connection:
|
||||
result = connection.execute(
|
||||
text(
|
||||
"""
|
||||
INSERT INTO pending_linkages (
|
||||
photo_id, tag_id, tag_name, user_id, status, notes
|
||||
)
|
||||
VALUES (:photo_id, :tag_id, :tag_name, :user_id, :status, 'note')
|
||||
"""
|
||||
),
|
||||
{
|
||||
"photo_id": photo_id,
|
||||
"tag_id": tag_id,
|
||||
"tag_name": tag_name,
|
||||
"user_id": user_id,
|
||||
"status": status,
|
||||
},
|
||||
)
|
||||
return int(result.lastrowid)
|
||||
|
||||
|
||||
def _create_photo(path: str, filename: str, file_hash: str) -> int:
|
||||
with MainSessionLocal() as session:
|
||||
photo = Photo(path=path, filename=filename, file_hash=file_hash)
|
||||
session.add(photo)
|
||||
session.commit()
|
||||
session.refresh(photo)
|
||||
return photo.id
|
||||
|
||||
|
||||
def test_list_pending_linkages_returns_existing_rows():
|
||||
_ensure_admin_user()
|
||||
photo_id = _create_photo("/tmp/photo1.jpg", "photo1.jpg", "hash1")
|
||||
_insert_auth_user()
|
||||
linkage_id = _insert_pending_linkage(photo_id, tag_name="Beach Day")
|
||||
|
||||
response = client.get("/api/v1/pending-linkages")
|
||||
assert response.status_code == 200
|
||||
|
||||
payload = response.json()
|
||||
assert payload["total"] == 1
|
||||
item = payload["items"][0]
|
||||
assert item["photo_id"] == photo_id
|
||||
assert item["proposed_tag_name"] == "Beach Day"
|
||||
assert item["status"] == "pending"
|
||||
|
||||
|
||||
def test_review_pending_linkages_creates_tag_and_linkage():
|
||||
_ensure_admin_user()
|
||||
photo_id = _create_photo("/tmp/photo2.jpg", "photo2.jpg", "hash2")
|
||||
_insert_auth_user()
|
||||
linkage_id = _insert_pending_linkage(photo_id, tag_name="Sunset Crew")
|
||||
|
||||
response = client.post(
|
||||
"/api/v1/pending-linkages/review",
|
||||
json={"decisions": [{"id": linkage_id, "decision": "approve"}]},
|
||||
)
|
||||
assert response.status_code == 200
|
||||
|
||||
payload = response.json()
|
||||
assert payload["approved"] == 1
|
||||
assert payload["denied"] == 0
|
||||
assert payload["tags_created"] == 1
|
||||
assert payload["linkages_created"] == 1
|
||||
|
||||
with MainSessionLocal() as session:
|
||||
tags = session.query(Tag).all()
|
||||
assert len(tags) == 1
|
||||
assert tags[0].tag_name == "Sunset Crew"
|
||||
linkage = session.query(PhotoTagLinkage).first()
|
||||
assert linkage is not None
|
||||
assert linkage.photo_id == photo_id
|
||||
assert linkage.tag_id == tags[0].id
|
||||
|
||||
with AuthSessionLocal() as session:
|
||||
statuses = session.execute(
|
||||
text("SELECT status FROM pending_linkages WHERE id = :id"),
|
||||
{"id": linkage_id},
|
||||
).fetchone()
|
||||
assert statuses is not None
|
||||
assert statuses[0] == "approved"
|
||||
|
||||
|
||||
def test_cleanup_pending_linkages_deletes_approved_and_denied():
|
||||
_ensure_admin_user()
|
||||
photo_id = _create_photo("/tmp/photo3.jpg", "photo3.jpg", "hash3")
|
||||
_insert_auth_user()
|
||||
|
||||
approved_id = _insert_pending_linkage(photo_id, tag_name="Approved Tag", status="approved")
|
||||
denied_id = _insert_pending_linkage(photo_id, tag_name="Denied Tag", status="denied")
|
||||
pending_id = _insert_pending_linkage(photo_id, tag_name="Pending Tag", status="pending")
|
||||
|
||||
response = client.post("/api/v1/pending-linkages/cleanup")
|
||||
assert response.status_code == 200
|
||||
|
||||
payload = response.json()
|
||||
assert payload["deleted_records"] == 2
|
||||
|
||||
with AuthSessionLocal() as session:
|
||||
remaining = session.execute(
|
||||
text("SELECT id, status FROM pending_linkages ORDER BY id")
|
||||
).fetchall()
|
||||
assert len(remaining) == 1
|
||||
assert remaining[0][0] == pending_id
|
||||
assert remaining[0][1] == "pending"
|
||||
|
||||
@ -1,25 +0,0 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from fastapi.testclient import TestClient
|
||||
|
||||
from src.web.app import app
|
||||
|
||||
|
||||
client = TestClient(app)
|
||||
|
||||
|
||||
def test_people_list_empty():
|
||||
res = client.get('/api/v1/people')
|
||||
assert res.status_code == 200
|
||||
data = res.json()
|
||||
assert 'items' in data and isinstance(data['items'], list)
|
||||
|
||||
|
||||
def test_unidentified_faces_empty():
|
||||
res = client.get('/api/v1/faces/unidentified')
|
||||
assert res.status_code == 200
|
||||
data = res.json()
|
||||
assert data['total'] >= 0
|
||||
|
||||
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user