diff --git a/admin-frontend/public/enable-dev-mode.html b/admin-frontend/public/enable-dev-mode.html
new file mode 100644
index 0000000..c4e659a
--- /dev/null
+++ b/admin-frontend/public/enable-dev-mode.html
@@ -0,0 +1,67 @@
+
+
+
+ Enable Developer Mode
+
+
+
+
+
Enable Developer Mode
+
Click the button below to enable Developer Mode for PunimTag.
+
Enable Developer Mode
+
+
+
+
+
+
+
diff --git a/admin-frontend/src/api/faces.ts b/admin-frontend/src/api/faces.ts
index 608f3ac..af9bcf0 100644
--- a/admin-frontend/src/api/faces.ts
+++ b/admin-frontend/src/api/faces.ts
@@ -39,11 +39,27 @@ export interface SimilarFaceItem {
quality_score: number
filename: string
pose_mode?: string
+ debug_info?: {
+ encoding_length: number
+ encoding_min: number
+ encoding_max: number
+ encoding_mean: number
+ encoding_std: number
+ encoding_first_10: number[]
+ }
}
export interface SimilarFacesResponse {
base_face_id: number
items: SimilarFaceItem[]
+ debug_info?: {
+ encoding_length: number
+ encoding_min: number
+ encoding_max: number
+ encoding_mean: number
+ encoding_std: number
+ encoding_first_10: number[]
+ }
}
export interface FaceSimilarityPair {
@@ -97,6 +113,7 @@ export interface AutoMatchRequest {
tolerance: number
auto_accept?: boolean
auto_accept_threshold?: number
+ use_distance_based_thresholds?: boolean
}
export interface AutoMatchFaceItem {
@@ -217,11 +234,25 @@ export const facesApi = {
})
return response.data
},
- getSimilar: async (faceId: number, includeExcluded?: boolean): Promise => {
+ getSimilar: async (faceId: number, includeExcluded?: boolean, debug?: boolean): Promise => {
const response = await apiClient.get(`/api/v1/faces/${faceId}/similar`, {
- params: { include_excluded: includeExcluded || false },
+ params: { include_excluded: includeExcluded || false, debug: debug || false },
})
- return response.data
+ const data = response.data
+
+ // Log debug info to browser console if available
+ if (debug && data.debug_info) {
+ console.log('🔍 Base Face Encoding Debug Info:', data.debug_info)
+ }
+ if (debug && data.items) {
+ data.items.forEach((item, index) => {
+ if (item.debug_info) {
+ console.log(`🔍 Similar Face ${index + 1} (ID: ${item.id}) Encoding Debug Info:`, item.debug_info)
+ }
+ })
+ }
+
+ return data
},
batchSimilarity: async (request: BatchSimilarityRequest): Promise => {
const response = await apiClient.post('/api/v1/faces/batch-similarity', request)
diff --git a/admin-frontend/src/pages/AutoMatch.tsx b/admin-frontend/src/pages/AutoMatch.tsx
index 13e55f0..45a0b17 100644
--- a/admin-frontend/src/pages/AutoMatch.tsx
+++ b/admin-frontend/src/pages/AutoMatch.tsx
@@ -7,7 +7,8 @@ import peopleApi, { Person } from '../api/people'
import { apiClient } from '../api/client'
import { useDeveloperMode } from '../context/DeveloperModeContext'
-const DEFAULT_TOLERANCE = 0.5
+const DEFAULT_TOLERANCE = 0.6 // Default for regular auto-match (more lenient)
+const RUN_AUTO_MATCH_TOLERANCE = 0.5 // Tolerance for Run auto-match button (stricter)
export default function AutoMatch() {
const { isDeveloperMode } = useDeveloperMode()
@@ -451,9 +452,10 @@ export default function AutoMatch() {
setBusy(true)
try {
const response = await facesApi.autoMatch({
- tolerance,
+ tolerance: RUN_AUTO_MATCH_TOLERANCE, // Use 0.5 for Run auto-match button (stricter)
auto_accept: true,
- auto_accept_threshold: autoAcceptThreshold
+ auto_accept_threshold: autoAcceptThreshold,
+ use_distance_based_thresholds: true // Enable distance-based thresholds for Run auto-match button
})
// Show summary if auto-accept was performed
@@ -758,7 +760,7 @@ export default function AutoMatch() {
)}
- ℹ️ Auto-Match Criteria: Only faces with similarity higher than 70% and picture quality higher than 50% will be auto-matched. Profile faces are excluded for better accuracy.
+ ℹ️ Auto-Match Criteria: Only faces with similarity higher than 85% and picture quality higher than 50% will be auto-matched. Profile faces are excluded for better accuracy.
diff --git a/admin-frontend/src/pages/Help.tsx b/admin-frontend/src/pages/Help.tsx
index b1df125..dd6b035 100644
--- a/admin-frontend/src/pages/Help.tsx
+++ b/admin-frontend/src/pages/Help.tsx
@@ -474,7 +474,7 @@ function AutoMatchPageHelp({ onBack }: { onBack: () => void }) {
Click "🚀 Run Auto-Match" button
The system will automatically match unidentified faces to identified people based on:
- Similarity higher than 70%
+ Similarity higher than 85%
Picture quality higher than 50%
Profile faces are excluded for better accuracy
diff --git a/admin-frontend/src/pages/Identify.tsx b/admin-frontend/src/pages/Identify.tsx
index 1adc2ec..72a92d0 100644
--- a/admin-frontend/src/pages/Identify.tsx
+++ b/admin-frontend/src/pages/Identify.tsx
@@ -348,7 +348,8 @@ export default function Identify() {
return
}
try {
- const res = await facesApi.getSimilar(faceId, includeExcludedFaces)
+ // Enable debug mode to log encoding info to browser console
+ const res = await facesApi.getSimilar(faceId, includeExcludedFaces, true)
setSimilar(res.items || [])
setSelectedSimilar({})
} catch (error) {
diff --git a/backend/api/faces.py b/backend/api/faces.py
index 8bafeda..61d4e83 100644
--- a/backend/api/faces.py
+++ b/backend/api/faces.py
@@ -90,9 +90,9 @@ def process_faces(request: ProcessFacesRequest) -> ProcessFacesResponse:
job_timeout="1h", # Long timeout for face processing
)
- print(f"[Faces API] Enqueued face processing job: {job.id}")
- print(f"[Faces API] Job status: {job.get_status()}")
- print(f"[Faces API] Queue length: {len(queue)}")
+ import logging
+ logger = logging.getLogger(__name__)
+ logger.info(f"Enqueued face processing job: {job.id}, status: {job.get_status()}, queue length: {len(queue)}")
return ProcessFacesResponse(
job_id=job.id,
@@ -197,12 +197,14 @@ def get_unidentified_faces(
def get_similar_faces(
face_id: int,
include_excluded: bool = Query(False, description="Include excluded faces in results"),
+ debug: bool = Query(False, description="Include debug information (encoding stats) in response"),
db: Session = Depends(get_db)
) -> SimilarFacesResponse:
"""Return similar unidentified faces for a given face."""
import logging
+ import numpy as np
logger = logging.getLogger(__name__)
- logger.info(f"API: get_similar_faces called for face_id={face_id}, include_excluded={include_excluded}")
+ logger.info(f"API: get_similar_faces called for face_id={face_id}, include_excluded={include_excluded}, debug={debug}")
# Validate face exists
base = db.query(Face).filter(Face.id == face_id).first()
@@ -210,9 +212,23 @@ def get_similar_faces(
logger.warning(f"API: Face {face_id} not found")
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=f"Face {face_id} not found")
+ # Load base encoding for debug info if needed
+ base_debug_info = None
+ if debug:
+ from backend.services.face_service import load_face_encoding
+ base_enc = load_face_encoding(base.encoding)
+ base_debug_info = {
+ "encoding_length": len(base_enc),
+ "encoding_min": float(np.min(base_enc)),
+ "encoding_max": float(np.max(base_enc)),
+ "encoding_mean": float(np.mean(base_enc)),
+ "encoding_std": float(np.std(base_enc)),
+ "encoding_first_10": [float(x) for x in base_enc[:10].tolist()],
+ }
+
logger.info(f"API: Calling find_similar_faces for face_id={face_id}, include_excluded={include_excluded}")
# Use 0.6 tolerance for Identify People (more lenient for manual review)
- results = find_similar_faces(db, face_id, tolerance=0.6, include_excluded=include_excluded)
+ results = find_similar_faces(db, face_id, tolerance=0.6, include_excluded=include_excluded, debug=debug)
logger.info(f"API: find_similar_faces returned {len(results)} results")
items = [
@@ -224,12 +240,13 @@ def get_similar_faces(
quality_score=float(f.quality_score),
filename=f.photo.filename if f.photo else "unknown",
pose_mode=getattr(f, "pose_mode", None) or "frontal",
+ debug_info=debug_info if debug else None,
)
- for f, distance, confidence_pct in results
+ for f, distance, confidence_pct, debug_info in results
]
logger.info(f"API: Returning {len(items)} items for face_id={face_id}")
- return SimilarFacesResponse(base_face_id=face_id, items=items)
+ return SimilarFacesResponse(base_face_id=face_id, items=items, debug_info=base_debug_info)
@router.post("/batch-similarity", response_model=BatchSimilarityResponse)
@@ -438,7 +455,9 @@ def get_face_crop(face_id: int, db: Session = Depends(get_db)) -> Response:
except HTTPException:
raise
except Exception as e:
- print(f"[Faces API] get_face_crop error for face {face_id}: {e}")
+ import logging
+ logger = logging.getLogger(__name__)
+ logger.error(f"get_face_crop error for face {face_id}: {e}")
raise HTTPException(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
detail=f"Failed to extract face crop: {str(e)}",
@@ -610,10 +629,12 @@ def auto_match_faces(
# Find matches for all identified people
# Filter by frontal reference faces if auto_accept enabled
+ # Use distance-based thresholds only when auto_accept is enabled (Run auto-match button)
matches_data = find_auto_match_matches(
db,
tolerance=request.tolerance,
- filter_frontal_only=request.auto_accept
+ filter_frontal_only=request.auto_accept,
+ use_distance_based_thresholds=request.use_distance_based_thresholds or request.auto_accept
)
# If auto_accept enabled, process matches automatically
@@ -647,7 +668,9 @@ def auto_match_faces(
)
auto_accepted_faces += identified_count
except Exception as e:
- print(f"Error auto-accepting matches for person {person_id}: {e}")
+ import logging
+ logger = logging.getLogger(__name__)
+ logger.error(f"Error auto-accepting matches for person {person_id}: {e}")
if not matches_data:
return AutoMatchResponse(
@@ -750,7 +773,7 @@ def auto_match_faces(
@router.get("/auto-match/people", response_model=AutoMatchPeopleResponse)
def get_auto_match_people(
filter_frontal_only: bool = Query(False, description="Only include frontal/tilted reference faces"),
- tolerance: float = Query(0.5, ge=0.0, le=1.0, description="Tolerance threshold"),
+ tolerance: float = Query(0.6, ge=0.0, le=1.0, description="Tolerance threshold (default 0.6 for regular auto-match)"),
db: Session = Depends(get_db),
) -> AutoMatchPeopleResponse:
"""Get list of people for auto-match (without matches) - fast initial load.
@@ -813,7 +836,7 @@ def get_auto_match_people(
@router.get("/auto-match/people/{person_id}/matches", response_model=AutoMatchPersonMatchesResponse)
def get_auto_match_person_matches(
person_id: int,
- tolerance: float = Query(0.5, ge=0.0, le=1.0, description="Tolerance threshold"),
+ tolerance: float = Query(0.6, ge=0.0, le=1.0, description="Tolerance threshold (default 0.6 for regular auto-match)"),
filter_frontal_only: bool = Query(False, description="Only return frontal/tilted faces"),
db: Session = Depends(get_db),
) -> AutoMatchPersonMatchesResponse:
diff --git a/backend/schemas/faces.py b/backend/schemas/faces.py
index 768ccba..19bb8b1 100644
--- a/backend/schemas/faces.py
+++ b/backend/schemas/faces.py
@@ -89,6 +89,7 @@ class SimilarFaceItem(BaseModel):
quality_score: float
filename: str
pose_mode: Optional[str] = Field("frontal", description="Pose classification (frontal, profile_left, etc.)")
+ debug_info: Optional[dict] = Field(None, description="Debug information (encoding stats) when debug mode is enabled")
class SimilarFacesResponse(BaseModel):
@@ -98,6 +99,7 @@ class SimilarFacesResponse(BaseModel):
base_face_id: int
items: list[SimilarFaceItem]
+ debug_info: Optional[dict] = Field(None, description="Debug information (base face encoding stats) when debug mode is enabled")
class BatchSimilarityRequest(BaseModel):
@@ -215,6 +217,7 @@ class AutoMatchRequest(BaseModel):
tolerance: float = Field(0.5, ge=0.0, le=1.0, description="Tolerance threshold (lower = stricter matching)")
auto_accept: bool = Field(False, description="Enable automatic acceptance of matching faces")
auto_accept_threshold: float = Field(70.0, ge=0.0, le=100.0, description="Similarity threshold for auto-acceptance (0-100%)")
+ use_distance_based_thresholds: bool = Field(False, description="Use distance-based confidence thresholds (stricter for borderline distances)")
class AutoMatchFaceItem(BaseModel):
diff --git a/backend/services/face_service.py b/backend/services/face_service.py
index da30331..334c409 100644
--- a/backend/services/face_service.py
+++ b/backend/services/face_service.py
@@ -631,17 +631,21 @@ def process_photo_faces(
if face_width is None:
face_width = matched_pose_face.get('face_width')
pose_mode = PoseDetector.classify_pose_mode(
- yaw_angle, pitch_angle, roll_angle, face_width
+ yaw_angle, pitch_angle, roll_angle, face_width, landmarks
)
else:
- # Can't calculate yaw, use face_width
+ # Can't calculate yaw, use face_width and landmarks for single-eye detection
pose_mode = PoseDetector.classify_pose_mode(
- yaw_angle, pitch_angle, roll_angle, face_width
+ yaw_angle, pitch_angle, roll_angle, face_width, landmarks
)
elif face_width is not None:
# No landmarks available, use face_width only
+ # Try to get landmarks from matched_pose_face if available
+ landmarks_for_classification = None
+ if matched_pose_face:
+ landmarks_for_classification = matched_pose_face.get('landmarks')
pose_mode = PoseDetector.classify_pose_mode(
- yaw_angle, pitch_angle, roll_angle, face_width
+ yaw_angle, pitch_angle, roll_angle, face_width, landmarks_for_classification
)
else:
# No landmarks and no face_width, use default
@@ -1746,7 +1750,6 @@ def calculate_cosine_distance(encoding1: np.ndarray, encoding2: np.ndarray) -> f
# Normalize encodings (matching desktop exactly)
norm1 = np.linalg.norm(enc1)
norm2 = np.linalg.norm(enc2)
-
if norm1 == 0 or norm2 == 0:
return 2.0
@@ -1769,6 +1772,32 @@ def calculate_cosine_distance(encoding1: np.ndarray, encoding2: np.ndarray) -> f
return 2.0 # Maximum distance on error
+def get_distance_based_min_confidence(distance: float) -> float:
+ """Get minimum confidence threshold based on distance.
+
+ For borderline distances, require higher confidence to reduce false positives.
+ This is used only when use_distance_based_thresholds=True (e.g., in auto-match).
+
+ Args:
+ distance: Cosine distance between faces (0 = identical, 2 = opposite)
+
+ Returns:
+ Minimum confidence percentage (0-100) required for this distance
+ """
+ if distance <= 0.15:
+ # Very close matches: standard threshold
+ return 50.0
+ elif distance <= 0.20:
+ # Borderline matches: require higher confidence
+ return 70.0
+ elif distance <= 0.25:
+ # Near threshold: require very high confidence
+ return 85.0
+ else:
+ # Far matches: require extremely high confidence
+ return 95.0
+
+
def calculate_adaptive_tolerance(base_tolerance: float, face_quality: float) -> float:
"""Calculate adaptive tolerance based on face quality, matching desktop exactly."""
# Start with base tolerance
@@ -1936,7 +1965,9 @@ def find_similar_faces(
include_excluded: bool = False, # Include excluded faces in results
filter_small_faces: bool = False, # Filter out small faces (for auto-match)
min_face_size_ratio: float = 0.005, # Minimum face size ratio (0.5% of image)
-) -> List[Tuple[Face, float, float]]: # Returns (face, distance, confidence_pct)
+ debug: bool = False, # Include debug information (encoding stats)
+ use_distance_based_thresholds: bool = False, # Use distance-based confidence thresholds (for auto-match)
+) -> List[Tuple[Face, float, float, dict | None]]: # Returns (face, distance, confidence_pct, debug_info)
"""Find similar faces matching desktop logic exactly.
Desktop flow:
@@ -1963,6 +1994,7 @@ def find_similar_faces(
base: Face = db.query(Face).filter(Face.id == face_id).first()
if not base:
return []
+
# Load base encoding - auto-detect dtype (supports both float32 and float64)
base_enc = load_face_encoding(base.encoding)
@@ -1986,11 +2018,24 @@ def find_similar_faces(
)
matches: List[Tuple[Face, float, float]] = []
+
for f in all_faces:
# Load other encoding - auto-detect dtype (supports both float32 and float64)
other_enc = load_face_encoding(f.encoding)
other_enc = other_enc.copy() # Make a copy to avoid buffer issues
+ # Calculate debug info if requested
+ debug_info = None
+ if debug:
+ debug_info = {
+ "encoding_length": len(other_enc),
+ "encoding_min": float(np.min(other_enc)),
+ "encoding_max": float(np.max(other_enc)),
+ "encoding_mean": float(np.mean(other_enc)),
+ "encoding_std": float(np.std(other_enc)),
+ "encoding_first_10": [float(x) for x in other_enc[:10].tolist()],
+ }
+
other_quality = float(f.quality_score) if f.quality_score is not None else 0.5
# Calculate adaptive tolerance based on both face qualities (matching desktop exactly)
@@ -2001,7 +2046,6 @@ def find_similar_faces(
distance = calculate_cosine_distance(base_enc, other_enc)
# Filter by distance <= adaptive_tolerance (matching desktop find_similar_faces)
- # DEBUG: Log tolerance usage for troubleshooting
if distance <= adaptive_tolerance:
# Get photo info (desktop does this in find_similar_faces)
if f.photo:
@@ -2012,13 +2056,16 @@ def find_similar_faces(
# Desktop _get_filtered_similar_faces filters by:
# 1. person_id is None (unidentified)
# 2. confidence >= 50% (increased from 40% to reduce false matches)
+ # OR confidence >= distance-based threshold if use_distance_based_thresholds=True
is_unidentified = f.person_id is None
- # DEBUG: Log all faces that pass distance check
- if is_unidentified:
- print(f"DEBUG: Face {f.id} - distance={distance:.4f}, adaptive_tolerance={adaptive_tolerance:.4f}, base_tolerance={tolerance:.4f}, confidence={confidence_pct:.2f}%, passed_distance={distance <= adaptive_tolerance}, passed_confidence={confidence_pct >= 50}")
+ # Calculate minimum confidence threshold
+ if use_distance_based_thresholds:
+ min_confidence = get_distance_based_min_confidence(distance)
+ else:
+ min_confidence = 50.0 # Standard threshold
- if is_unidentified and confidence_pct >= 50:
+ if is_unidentified and confidence_pct >= min_confidence:
# Filter by excluded status if not including excluded faces
if not include_excluded and getattr(f, "excluded", False):
continue
@@ -2036,7 +2083,7 @@ def find_similar_faces(
# Return calibrated confidence percentage (matching desktop)
# Desktop displays confidence_pct directly from _get_calibrated_confidence
- matches.append((f, distance, confidence_pct))
+ matches.append((f, distance, confidence_pct, debug_info))
# Sort by distance (lower is better) - matching desktop
matches.sort(key=lambda x: x[1])
@@ -2197,6 +2244,7 @@ def find_auto_match_matches(
db: Session,
tolerance: float = 0.5,
filter_frontal_only: bool = False,
+ use_distance_based_thresholds: bool = False, # Use distance-based confidence thresholds
) -> List[Tuple[int, int, Face, List[Tuple[Face, float, float]]]]:
"""Find auto-match matches for all identified people, matching desktop logic exactly.
@@ -2289,27 +2337,30 @@ def find_auto_match_matches(
for person_id, reference_face, person_name in person_faces_list:
reference_face_id = reference_face.id
- # Check if reference face is too small (exclude from auto-match)
- reference_photo = db.query(Photo).filter(Photo.id == reference_face.photo_id).first()
- if reference_photo:
- ref_size_ratio = _calculate_face_size_ratio(reference_face, reference_photo)
- if ref_size_ratio < MIN_AUTO_MATCH_FACE_SIZE_RATIO:
- # Skip this person - reference face is too small
- continue
+ # TEMPORARILY DISABLED: Check if reference face is too small (exclude from auto-match)
+ # reference_photo = db.query(Photo).filter(Photo.id == reference_face.photo_id).first()
+ # if reference_photo:
+ # ref_size_ratio = _calculate_face_size_ratio(reference_face, reference_photo)
+ # if ref_size_ratio < MIN_AUTO_MATCH_FACE_SIZE_RATIO:
+ # # Skip this person - reference face is too small
+ # continue
# Use find_similar_faces which matches desktop _get_filtered_similar_faces logic
# Desktop: similar_faces = self.face_processor._get_filtered_similar_faces(
# reference_face_id, tolerance, include_same_photo=False, face_status=None)
# This filters by: person_id is None (unidentified), confidence >= 50% (increased from 40%), sorts by distance
# Auto-match always excludes excluded faces
- # filter_small_faces=True to exclude small match faces
- similar_faces = find_similar_faces(
+ # TEMPORARILY DISABLED: filter_small_faces=True to exclude small match faces
+ similar_faces_with_debug = find_similar_faces(
db, reference_face_id, tolerance=tolerance,
filter_frontal_only=filter_frontal_only,
include_excluded=False, # Auto-match always excludes excluded faces
- filter_small_faces=True, # Exclude small faces from auto-match
- min_face_size_ratio=MIN_AUTO_MATCH_FACE_SIZE_RATIO
+ filter_small_faces=False, # TEMPORARILY DISABLED: Exclude small faces from auto-match
+ min_face_size_ratio=MIN_AUTO_MATCH_FACE_SIZE_RATIO,
+ use_distance_based_thresholds=use_distance_based_thresholds # Use distance-based thresholds if enabled
)
+ # Strip debug_info for internal use
+ similar_faces = [(f, dist, conf) for f, dist, conf, _ in similar_faces_with_debug]
if similar_faces:
results.append((person_id, reference_face_id, reference_face, similar_faces))
@@ -2453,14 +2504,13 @@ def get_auto_match_person_matches(
# Find similar faces using existing function
# Auto-match always excludes excluded faces
- # DEBUG: Log tolerance being used
- print(f"DEBUG get_auto_match_person_matches: person_id={person_id}, tolerance={tolerance}, reference_face_id={reference_face.id}")
- similar_faces = find_similar_faces(
+ similar_faces_with_debug = find_similar_faces(
db, reference_face.id, tolerance=tolerance,
filter_frontal_only=filter_frontal_only,
include_excluded=False # Auto-match always excludes excluded faces
)
- print(f"DEBUG get_auto_match_person_matches: Found {len(similar_faces)} matches for person {person_id} with tolerance {tolerance}")
+ # Strip debug_info for internal use
+ similar_faces = [(f, dist, conf) for f, dist, conf, _ in similar_faces_with_debug]
return similar_faces
diff --git a/src/utils/pose_detection.py b/src/utils/pose_detection.py
index 8a93829..fb8214e 100644
--- a/src/utils/pose_detection.py
+++ b/src/utils/pose_detection.py
@@ -22,7 +22,7 @@ class PoseDetector:
"""Detect face pose (yaw, pitch, roll) using RetinaFace landmarks"""
# Thresholds for pose detection (in degrees)
- PROFILE_YAW_THRESHOLD = 30.0 # Faces with |yaw| >= 30° are considered profile
+ PROFILE_YAW_THRESHOLD = 15.0 # Faces with |yaw| >= 15° are considered profile
EXTREME_YAW_THRESHOLD = 60.0 # Faces with |yaw| >= 60° are extreme profile
PITCH_THRESHOLD = 20.0 # Faces with |pitch| >= 20° are looking up/down
@@ -39,7 +39,7 @@ class PoseDetector:
Args:
yaw_threshold: Yaw angle threshold for profile detection (degrees)
- Default: 30.0
+ Default: 15.0
pitch_threshold: Pitch angle threshold for up/down detection (degrees)
Default: 20.0
roll_threshold: Roll angle threshold for tilt detection (degrees)
@@ -53,17 +53,24 @@ class PoseDetector:
self.roll_threshold = roll_threshold or self.ROLL_THRESHOLD
@staticmethod
- def detect_faces_with_landmarks(img_path: str) -> Dict:
+ def detect_faces_with_landmarks(img_path: str, filter_estimated_landmarks: bool = False) -> Dict:
"""Detect faces using RetinaFace directly
+ Args:
+ img_path: Path to image file
+ filter_estimated_landmarks: If True, remove landmarks that appear to be estimated
+ (e.g., hidden eye in profile views) rather than actually visible.
+ Uses heuristics: if eyes are very close together (< 20px) and
+ yaw calculation suggests extreme profile, mark hidden eye as None.
+
Returns:
Dictionary with face keys and landmark data:
{
'face_1': {
'facial_area': {'x': x, 'y': y, 'w': w, 'h': h},
'landmarks': {
- 'left_eye': (x, y),
- 'right_eye': (x, y),
+ 'left_eye': (x, y) or None,
+ 'right_eye': (x, y) or None,
'nose': (x, y),
'left_mouth': (x, y),
'right_mouth': (x, y)
@@ -76,6 +83,42 @@ class PoseDetector:
return {}
faces = RetinaFace.detect_faces(img_path)
+
+ # Post-process to filter estimated landmarks if requested
+ if filter_estimated_landmarks:
+ for face_key, face_data in faces.items():
+ landmarks = face_data.get('landmarks', {})
+ if not landmarks:
+ continue
+
+ left_eye = landmarks.get('left_eye')
+ right_eye = landmarks.get('right_eye')
+ nose = landmarks.get('nose')
+
+ # Check if both eyes are present and very close together (profile view)
+ if left_eye and right_eye and nose:
+ face_width = abs(right_eye[0] - left_eye[0])
+
+ # If eyes are very close (< 20px), likely a profile view
+ if face_width < 20.0:
+ # Calculate which eye is likely hidden based on nose position
+ eye_mid_x = (left_eye[0] + right_eye[0]) / 2
+ nose_x = nose[0]
+
+ # If nose is closer to left eye, right eye is likely hidden (face turned left)
+ # If nose is closer to right eye, left eye is likely hidden (face turned right)
+ dist_to_left = abs(nose_x - left_eye[0])
+ dist_to_right = abs(nose_x - right_eye[0])
+
+ if dist_to_left < dist_to_right:
+ # Nose closer to left eye = face turned left = right eye hidden
+ landmarks['right_eye'] = None
+ else:
+ # Nose closer to right eye = face turned right = left eye hidden
+ landmarks['left_eye'] = None
+
+ face_data['landmarks'] = landmarks
+
return faces
@staticmethod
@@ -260,7 +303,8 @@ class PoseDetector:
def classify_pose_mode(yaw: Optional[float],
pitch: Optional[float],
roll: Optional[float],
- face_width: Optional[float] = None) -> str:
+ face_width: Optional[float] = None,
+ landmarks: Optional[Dict] = None) -> str:
"""Classify face pose mode from all three angles and optionally face width
Args:
@@ -268,8 +312,10 @@ class PoseDetector:
pitch: Pitch angle in degrees
roll: Roll angle in degrees
face_width: Face width in pixels (eye distance). Used as indicator for profile detection.
- If face_width < 25px, indicates profile view. When yaw is available but < 30°,
+ If face_width < 25px, indicates profile view. When yaw is available but < 15°,
face_width can override yaw if it suggests profile (face_width < 25px).
+ landmarks: Optional facial landmarks dictionary. Used to detect single-eye visibility
+ for extreme profile views where only one eye is visible.
Returns:
Pose mode classification string:
@@ -279,6 +325,28 @@ class PoseDetector:
- 'tilted_left', 'tilted_right': roll variations
- Combined modes: e.g., 'profile_left_looking_up'
"""
+ # Check for single-eye visibility to infer profile direction
+ # This handles extreme profile views where only one eye is visible
+ if landmarks:
+ left_eye = landmarks.get('left_eye')
+ right_eye = landmarks.get('right_eye')
+
+ # Only right eye visible -> face turned left -> profile_left
+ if left_eye is None and right_eye is not None:
+ # Infer profile_left when only right eye is visible
+ inferred_profile = "profile_left"
+ # Only left eye visible -> face turned right -> profile_right
+ elif left_eye is not None and right_eye is None:
+ # Infer profile_right when only left eye is visible
+ inferred_profile = "profile_right"
+ # No eyes visible -> extreme profile, default to profile_left
+ elif left_eye is None and right_eye is None:
+ inferred_profile = "profile_left"
+ else:
+ inferred_profile = None # Both eyes visible, use normal logic
+ else:
+ inferred_profile = None
+
# Default to frontal if angles unknown
yaw_original = yaw
if yaw is None:
@@ -290,20 +358,23 @@ class PoseDetector:
# Face width threshold for profile detection (in pixels)
# Profile faces have very small eye distance (< 25 pixels typically)
- PROFILE_FACE_WIDTH_THRESHOLD = 10.0 #25.0
+ PROFILE_FACE_WIDTH_THRESHOLD = 20.0
# Yaw classification - PRIMARY INDICATOR
- # Use yaw angle as the primary indicator (30° threshold)
+ # Use yaw angle as the primary indicator (15° threshold)
abs_yaw = abs(yaw)
# Primary classification based on yaw angle
- if abs_yaw < 30.0:
+ if abs_yaw < 15.0:
# Yaw indicates frontal view
- # Trust yaw when it's available and reasonable (< 30°)
+ # Trust yaw when it's available and reasonable (< 15°)
# Only use face_width as fallback when yaw is unavailable (None)
if yaw_original is None:
- # Yaw unavailable - use face_width as fallback
- if face_width is not None:
+ # Yaw unavailable - check for single-eye visibility first
+ if inferred_profile is not None:
+ # Single eye visible or no eyes visible -> use inferred profile direction
+ yaw_mode = inferred_profile
+ elif face_width is not None:
if face_width < PROFILE_FACE_WIDTH_THRESHOLD:
# Face width suggests profile view - use it when yaw is unavailable
yaw_mode = "profile_left" # Default direction when yaw unavailable
@@ -311,16 +382,14 @@ class PoseDetector:
# Face width is normal (>= 25px) - likely frontal
yaw_mode = "frontal"
else:
- # Both yaw and face_width unavailable - cannot determine reliably
- # This usually means landmarks are incomplete (missing nose and/or eyes)
- # For extreme profile views, both eyes might not be visible, which would
- # cause face_width to be None. In this case, we cannot reliably determine
- # pose without additional indicators (like face bounding box aspect ratio).
- # Default to frontal (conservative approach), but this might misclassify
- # some extreme profile faces.
- yaw_mode = "frontal"
+ # Both yaw and face_width unavailable - check if we inferred profile from landmarks
+ if inferred_profile is not None:
+ yaw_mode = inferred_profile
+ else:
+ # Cannot determine reliably - default to frontal
+ yaw_mode = "frontal"
else:
- # Yaw is available and < 30° - but still check face_width
+ # Yaw is available and < 15° - but still check face_width
# If face_width is very small (< 25px), it suggests profile even with small yaw
if face_width is not None:
if face_width < PROFILE_FACE_WIDTH_THRESHOLD:
@@ -332,11 +401,11 @@ class PoseDetector:
else:
# No face_width provided - trust yaw, classify as frontal
yaw_mode = "frontal"
- elif yaw <= -30.0:
- # abs_yaw >= 30.0 and yaw is negative - profile left
+ elif yaw <= -15.0:
+ # abs_yaw >= 15.0 and yaw is negative - profile left
yaw_mode = "profile_left" # Negative yaw = face turned left = left profile visible
- elif yaw >= 30.0:
- # abs_yaw >= 30.0 and yaw is positive - profile right
+ elif yaw >= 15.0:
+ # abs_yaw >= 15.0 and yaw is positive - profile right
yaw_mode = "profile_right" # Positive yaw = face turned right = right profile visible
else:
# This should never be reached, but handle edge case
@@ -411,8 +480,8 @@ class PoseDetector:
# Calculate face width (eye distance) for profile detection
face_width = self.calculate_face_width_from_landmarks(landmarks)
- # Classify pose mode (using face width as additional indicator)
- pose_mode = self.classify_pose_mode(yaw_angle, pitch_angle, roll_angle, face_width)
+ # Classify pose mode (using face width and landmarks as additional indicators)
+ pose_mode = self.classify_pose_mode(yaw_angle, pitch_angle, roll_angle, face_width, landmarks)
# Normalize facial_area format (RetinaFace returns list [x, y, w, h] or dict)
facial_area_raw = face_data.get('facial_area', {})
diff --git a/tests/README.md b/tests/README.md
index be8a184..3510603 100644
--- a/tests/README.md
+++ b/tests/README.md
@@ -111,3 +111,4 @@ In CI (GitHub Actions/Gitea Actions), test results appear in:
+
diff --git a/viewer-frontend/scripts/install-dependencies.sh b/viewer-frontend/scripts/install-dependencies.sh
index 4165168..ab64403 100755
--- a/viewer-frontend/scripts/install-dependencies.sh
+++ b/viewer-frontend/scripts/install-dependencies.sh
@@ -207,3 +207,4 @@ echo ""
+
diff --git a/viewer-frontend/scripts/test-prisma-query.ts b/viewer-frontend/scripts/test-prisma-query.ts
index 13a11f1..6b37630 100644
--- a/viewer-frontend/scripts/test-prisma-query.ts
+++ b/viewer-frontend/scripts/test-prisma-query.ts
@@ -148,3 +148,4 @@ testQueries()
+
diff --git a/viewer-frontend/scripts/with-sharp-libpath.sh b/viewer-frontend/scripts/with-sharp-libpath.sh
index f1dc94c..6677835 100755
--- a/viewer-frontend/scripts/with-sharp-libpath.sh
+++ b/viewer-frontend/scripts/with-sharp-libpath.sh
@@ -25,3 +25,4 @@ fi
+