chore: Remove Alembic migration files and configuration

This commit deletes the Alembic migration files and configuration, including the alembic.ini file, env.py, and various migration scripts. This cleanup is part of the transition to a new database management approach, ensuring that outdated migration artifacts do not interfere with future development. The requirements.txt file has also been updated to remove the Alembic dependency. No functional changes to the application are introduced in this commit.
This commit is contained in:
tanyar09 2025-11-10 13:36:51 -05:00
parent ea3d06a3d5
commit ac07932e14
18 changed files with 244 additions and 647 deletions

View File

@ -1,117 +0,0 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
# Uncomment the line below if you want the files to be prepended with date and time
# file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
# sys.path path, will be prepended to sys.path if present.
# defaults to the current working directory.
prepend_sys_path = .
# timezone to use when rendering the date within the migration file
# as well as the filename.
# If specified, requires the python-dateutil library that can be
# installed by adding `alembic[tz]` to the pip requirements
# string value is passed to dateutil.tz.gettz()
# leave blank for localtime
# timezone =
# max length of characters to apply to the
# "slug" field
# truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; This defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path.
# The path separator used here should be the separator specified by "version_path_separator" below.
# version_locations = %(here)s/bar:%(here)s/bat:alembic/versions
# version path separator; As mentioned above, this is the character used to split
# version_locations. The default within new alembic.ini files is "os", which uses os.pathsep.
# If this key is omitted entirely, it falls back to the legacy behavior of splitting on spaces and/or commas.
# Valid values for version_path_separator are:
#
# version_path_separator = :
# version_path_separator = ;
# version_path_separator = space
version_path_separator = os # Use os.pathsep. Default configuration used for new projects.
# set to 'true' to search source files recursively
# in each "version_locations" directory
# new in Alembic version 1.10
# recursive_version_locations = false
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
# sqlalchemy.url - will be read from src.web.db.session
# Override with: alembic -x db_url=sqlite:///data/punimtag.db <command>
sqlalchemy.url = sqlite:///data/punimtag.db
[post_write_hooks]
# post_write_hooks defines scripts or Python functions that are run
# on newly generated revision scripts. See the documentation for further
# detail and examples
# format using "black" - use the console_scripts runner, against the "black" entrypoint
# hooks = black
# black.type = console_scripts
# black.entrypoint = black
# black.options = -l 79 REVISION_SCRIPT_FILENAME
# lint with attempts to fix using "ruff" - use the exec runner, execute a binary
# hooks = ruff
# ruff.type = exec
# ruff.executable = %(here)s/.venv/bin/ruff
# ruff.options = --fix REVISION_SCRIPT_FILENAME
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View File

@ -1,79 +0,0 @@
"""Alembic environment configuration."""
from logging.config import fileConfig
from sqlalchemy import engine_from_config, pool
from alembic import context
# Import models for autogenerate
from src.web.db.models import Base
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
target_metadata = Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(connection=connection, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()

View File

@ -1,27 +0,0 @@
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
# revision identifiers, used by Alembic.
revision: str = ${repr(up_revision)}
down_revision: Union[str, None] = ${repr(down_revision)}
branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
def upgrade() -> None:
${upgrades if upgrades else "pass"}
def downgrade() -> None:
${downgrades if downgrades else "pass"}

View File

@ -1,30 +0,0 @@
"""add processed column to photos
Revision ID: add_processed_to_photos_20251103
Revises: 4d53a59b0e41
Create Date: 2025-11-03
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'add_processed_to_photos_20251103'
down_revision = '4d53a59b0e41'
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column('photos', sa.Column('processed', sa.Boolean(), nullable=False, server_default=sa.false()))
# Drop server default after backfilling default
op.alter_column('photos', 'processed', server_default=None)
op.create_index('ix_photos_processed', 'photos', ['processed'], unique=False)
def downgrade() -> None:
op.drop_index('ix_photos_processed', table_name='photos')
op.drop_column('photos', 'processed')

View File

@ -1,25 +0,0 @@
"""add landmarks column to faces
Revision ID: add_landmarks_to_faces_20251106
Revises: add_processed_to_photos_20251103
Create Date: 2025-11-06
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'add_landmarks_to_faces_20251106'
down_revision = 'add_processed_to_photos_20251103'
branch_labels = None
depends_on = None
def upgrade() -> None:
op.add_column('faces', sa.Column('landmarks', sa.Text(), nullable=True))
def downgrade() -> None:
op.drop_column('faces', 'landmarks')

View File

@ -1,143 +0,0 @@
"""Initial schema
Revision ID: 4d53a59b0e41
Revises:
Create Date: 2025-10-31 12:03:50.406080
"""
from typing import Sequence, Union
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision: str = '4d53a59b0e41'
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('people',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('display_name', sa.String(length=256), nullable=False),
sa.Column('given_name', sa.String(length=128), nullable=True),
sa.Column('family_name', sa.String(length=128), nullable=True),
sa.Column('notes', sa.Text(), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_people_display_name'), 'people', ['display_name'], unique=False)
op.create_index(op.f('ix_people_id'), 'people', ['id'], unique=False)
op.create_table('photos',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('path', sa.String(length=2048), nullable=False),
sa.Column('filename', sa.String(length=512), nullable=False),
sa.Column('checksum', sa.String(length=64), nullable=True),
sa.Column('date_added', sa.DateTime(), nullable=False),
sa.Column('date_taken', sa.DateTime(), nullable=True),
sa.Column('width', sa.Integer(), nullable=True),
sa.Column('height', sa.Integer(), nullable=True),
sa.Column('mime_type', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_photos_checksum'), 'photos', ['checksum'], unique=True)
op.create_index(op.f('ix_photos_date_taken'), 'photos', ['date_taken'], unique=False)
op.create_index(op.f('ix_photos_id'), 'photos', ['id'], unique=False)
op.create_index(op.f('ix_photos_path'), 'photos', ['path'], unique=True)
op.create_table('tags',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('tag', sa.String(length=128), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_tags_id'), 'tags', ['id'], unique=False)
op.create_index(op.f('ix_tags_tag'), 'tags', ['tag'], unique=True)
op.create_table('faces',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('photo_id', sa.Integer(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=True),
sa.Column('bbox_x', sa.Integer(), nullable=False),
sa.Column('bbox_y', sa.Integer(), nullable=False),
sa.Column('bbox_w', sa.Integer(), nullable=False),
sa.Column('bbox_h', sa.Integer(), nullable=False),
sa.Column('embedding', sa.LargeBinary(), nullable=False),
sa.Column('confidence', sa.Integer(), nullable=True),
sa.Column('quality', sa.Integer(), nullable=True),
sa.Column('model', sa.String(length=64), nullable=True),
sa.Column('detector', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['person_id'], ['people.id'], ),
sa.ForeignKeyConstraint(['photo_id'], ['photos.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_faces_quality', 'faces', ['quality'], unique=False)
op.create_index(op.f('ix_faces_id'), 'faces', ['id'], unique=False)
op.create_index(op.f('ix_faces_person_id'), 'faces', ['person_id'], unique=False)
op.create_index(op.f('ix_faces_photo_id'), 'faces', ['photo_id'], unique=False)
op.create_index(op.f('ix_faces_quality'), 'faces', ['quality'], unique=False)
op.create_table('photo_tags',
sa.Column('photo_id', sa.Integer(), nullable=False),
sa.Column('tag_id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['photo_id'], ['photos.id'], ),
sa.ForeignKeyConstraint(['tag_id'], ['tags.id'], ),
sa.PrimaryKeyConstraint('photo_id', 'tag_id'),
sa.UniqueConstraint('photo_id', 'tag_id', name='uq_photo_tag')
)
op.create_index('idx_photo_tags_photo', 'photo_tags', ['photo_id'], unique=False)
op.create_index('idx_photo_tags_tag', 'photo_tags', ['tag_id'], unique=False)
op.create_table('person_embeddings',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('person_id', sa.Integer(), nullable=False),
sa.Column('face_id', sa.Integer(), nullable=False),
sa.Column('embedding', sa.LargeBinary(), nullable=False),
sa.Column('quality', sa.Integer(), nullable=True),
sa.Column('model', sa.String(length=64), nullable=True),
sa.Column('created_at', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['face_id'], ['faces.id'], ),
sa.ForeignKeyConstraint(['person_id'], ['people.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index('idx_person_embeddings_person', 'person_embeddings', ['person_id'], unique=False)
op.create_index('idx_person_embeddings_quality', 'person_embeddings', ['quality'], unique=False)
op.create_index(op.f('ix_person_embeddings_face_id'), 'person_embeddings', ['face_id'], unique=False)
op.create_index(op.f('ix_person_embeddings_id'), 'person_embeddings', ['id'], unique=False)
op.create_index(op.f('ix_person_embeddings_person_id'), 'person_embeddings', ['person_id'], unique=False)
op.create_index(op.f('ix_person_embeddings_quality'), 'person_embeddings', ['quality'], unique=False)
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_person_embeddings_quality'), table_name='person_embeddings')
op.drop_index(op.f('ix_person_embeddings_person_id'), table_name='person_embeddings')
op.drop_index(op.f('ix_person_embeddings_id'), table_name='person_embeddings')
op.drop_index(op.f('ix_person_embeddings_face_id'), table_name='person_embeddings')
op.drop_index('idx_person_embeddings_quality', table_name='person_embeddings')
op.drop_index('idx_person_embeddings_person', table_name='person_embeddings')
op.drop_table('person_embeddings')
op.drop_index('idx_photo_tags_tag', table_name='photo_tags')
op.drop_index('idx_photo_tags_photo', table_name='photo_tags')
op.drop_table('photo_tags')
op.drop_index(op.f('ix_faces_quality'), table_name='faces')
op.drop_index(op.f('ix_faces_photo_id'), table_name='faces')
op.drop_index(op.f('ix_faces_person_id'), table_name='faces')
op.drop_index(op.f('ix_faces_id'), table_name='faces')
op.drop_index('idx_faces_quality', table_name='faces')
op.drop_table('faces')
op.drop_index(op.f('ix_tags_tag'), table_name='tags')
op.drop_index(op.f('ix_tags_id'), table_name='tags')
op.drop_table('tags')
op.drop_index(op.f('ix_photos_path'), table_name='photos')
op.drop_index(op.f('ix_photos_id'), table_name='photos')
op.drop_index(op.f('ix_photos_date_taken'), table_name='photos')
op.drop_index(op.f('ix_photos_checksum'), table_name='photos')
op.drop_table('photos')
op.drop_index(op.f('ix_people_id'), table_name='people')
op.drop_index(op.f('ix_people_display_name'), table_name='people')
op.drop_table('people')
# ### end Alembic commands ###

View File

@ -1,5 +1,6 @@
import { BrowserRouter, Routes, Route, Navigate } from 'react-router-dom'
import { AuthProvider, useAuth } from './context/AuthContext'
import { DeveloperModeProvider } from './context/DeveloperModeContext'
import Login from './pages/Login'
import Dashboard from './pages/Dashboard'
import Search from './pages/Search'
@ -49,9 +50,11 @@ function AppRoutes() {
function App() {
return (
<AuthProvider>
<BrowserRouter>
<AppRoutes />
</BrowserRouter>
<DeveloperModeProvider>
<BrowserRouter>
<AppRoutes />
</BrowserRouter>
</DeveloperModeProvider>
</AuthProvider>
)
}

View File

@ -0,0 +1,42 @@
import { createContext, useContext, useState, useEffect, ReactNode } from 'react'
interface DeveloperModeContextType {
isDeveloperMode: boolean
setDeveloperMode: (enabled: boolean) => void
}
const DeveloperModeContext = createContext<DeveloperModeContextType | undefined>(undefined)
const STORAGE_KEY = 'punimtag_developer_mode'
export function DeveloperModeProvider({ children }: { children: ReactNode }) {
const [isDeveloperMode, setIsDeveloperMode] = useState<boolean>(false)
// Load from localStorage on mount
useEffect(() => {
const stored = localStorage.getItem(STORAGE_KEY)
if (stored !== null) {
setIsDeveloperMode(stored === 'true')
}
}, [])
const setDeveloperMode = (enabled: boolean) => {
setIsDeveloperMode(enabled)
localStorage.setItem(STORAGE_KEY, enabled.toString())
}
return (
<DeveloperModeContext.Provider value={{ isDeveloperMode, setDeveloperMode }}>
{children}
</DeveloperModeContext.Provider>
)
}
export function useDeveloperMode() {
const context = useContext(DeveloperModeContext)
if (context === undefined) {
throw new Error('useDeveloperMode must be used within a DeveloperModeProvider')
}
return context
}

View File

@ -131,6 +131,10 @@ export default function AutoMatch() {
if (summary) {
alert(summary)
}
// Reload faces after auto-accept to remove auto-accepted faces from the list
await loadAutoMatch()
return
}
if (response.people.length === 0) {

View File

@ -35,7 +35,6 @@ export default function Identify() {
const [imageLoading, setImageLoading] = useState(false)
const [filtersCollapsed, setFiltersCollapsed] = useState(false)
const [loadingFaces, setLoadingFaces] = useState(false)
const [loadingProgress, setLoadingProgress] = useState({ current: 0, total: 0, message: '' })
// Store form data per face ID (matching desktop behavior)
const [faceFormData, setFaceFormData] = useState<Record<number, {
@ -58,7 +57,6 @@ export default function Identify() {
const loadFaces = async () => {
setLoadingFaces(true)
setLoadingProgress({ current: 0, total: 0, message: 'Loading faces...' })
try {
const res = await facesApi.getUnidentified({
@ -73,7 +71,6 @@ export default function Identify() {
// Apply unique faces filter if enabled
if (uniqueFacesOnly) {
setLoadingProgress({ current: 0, total: res.items.length, message: 'Filtering unique faces...' })
const filtered = await filterUniqueFaces(res.items)
setFaces(filtered)
setTotal(filtered.length)
@ -84,7 +81,6 @@ export default function Identify() {
setCurrentIdx(0)
} finally {
setLoadingFaces(false)
setLoadingProgress({ current: 0, total: 0, message: '' })
}
}
@ -102,40 +98,17 @@ export default function Identify() {
similarityMap.set(face.id, new Set<number>())
}
// Update progress - loading all faces once
setLoadingProgress({
current: 0,
total: faces.length,
message: 'Loading all faces from database...'
})
try {
// Get all face IDs
const faceIds = faces.map(f => f.id)
// Update progress - calculating similarities
setLoadingProgress({
current: 0,
total: faces.length,
message: `Calculating similarities for ${faces.length} faces (this may take a while)...`
})
// Call batch similarity endpoint - loads all faces once from DB
// Note: This is where the heavy computation happens (comparing N faces to M faces)
// The progress bar will show 0% during this time as we can't track backend progress
const batchRes = await facesApi.batchSimilarity({
face_ids: faceIds,
min_confidence: 60.0
})
// Update progress - calculation complete, now processing results
const totalPairs = batchRes.pairs.length
setLoadingProgress({
current: 0,
total: totalPairs,
message: `Similarity calculation complete! Processing ${totalPairs} results...`
})
// Build similarity map from batch results
// Note: results include similarities to all faces in DB, but we only care about
// similarities between faces in the current list
@ -144,14 +117,8 @@ export default function Identify() {
// Only include pairs where both faces are in the current list
if (!faceMap.has(pair.face_id_1) || !faceMap.has(pair.face_id_2)) {
processedPairs++
// Update progress every 100 pairs or at the end
if (processedPairs % 100 === 0 || processedPairs === totalPairs) {
setLoadingProgress({
current: processedPairs,
total: totalPairs,
message: `Processing similarity results... (${processedPairs} / ${totalPairs})`
})
// Allow UI to update
// Allow UI to update periodically
if (processedPairs % 100 === 0 || processedPairs === batchRes.pairs.length) {
await new Promise(resolve => setTimeout(resolve, 0))
}
continue
@ -167,14 +134,8 @@ export default function Identify() {
similarityMap.set(pair.face_id_2, set2)
processedPairs++
// Update progress every 100 pairs or at the end
if (processedPairs % 100 === 0 || processedPairs === totalPairs) {
setLoadingProgress({
current: processedPairs,
total: totalPairs,
message: `Processing similarity results... (${processedPairs} / ${totalPairs})`
})
// Allow UI to update
// Allow UI to update periodically
if (processedPairs % 100 === 0 || processedPairs === batchRes.pairs.length) {
await new Promise(resolve => setTimeout(resolve, 0))
}
}
@ -434,57 +395,25 @@ export default function Identify() {
<div>
<h1 className="text-2xl font-bold text-gray-900 mb-4">Identify</h1>
{/* Loading Progress Bar */}
{loadingFaces && (
<div className="bg-white rounded-lg shadow p-4 mb-4">
<div className="flex items-center justify-between mb-2">
<span className="text-sm font-medium text-gray-700">
{loadingProgress.message || 'Loading faces...'}
</span>
{loadingProgress.total > 0 && (
<span className="text-sm text-gray-500">
{loadingProgress.current} / {loadingProgress.total}
{loadingProgress.total > 0 && (
<span className="ml-1">
({Math.round((loadingProgress.current / loadingProgress.total) * 100)}%)
</span>
)}
</span>
)}
</div>
<div className="w-full bg-gray-200 rounded-full h-2.5">
{loadingProgress.total > 0 ? (
<div
className="bg-blue-600 h-2.5 rounded-full transition-all duration-300"
style={{
width: `${Math.max(1, (loadingProgress.current / loadingProgress.total) * 100)}%`
}}
/>
) : (
<div className="relative h-2.5 overflow-hidden rounded-full bg-gray-200">
<div
className="absolute h-2.5 bg-blue-600 rounded-full"
style={{
width: '30%',
animation: 'slide 1.5s ease-in-out infinite',
left: '-30%'
}}
/>
<style>{`
@keyframes slide {
0% { left: -30%; }
100% { left: 100%; }
}
`}</style>
</div>
)}
</div>
</div>
)}
<div className="grid grid-cols-12 gap-4">
{/* Left: Controls and current face */}
<div className="col-span-4">
{/* Unique Faces Checkbox - Outside Filters */}
<div className="bg-white rounded-lg shadow mb-4 p-4">
<label className="flex items-center gap-2">
<input
type="checkbox"
checked={uniqueFacesOnly}
onChange={(e) => setUniqueFacesOnly(e.target.checked)}
className="rounded"
/>
<span className="text-sm font-medium text-gray-700">Unique faces only</span>
</label>
<p className="text-xs text-gray-500 mt-1 ml-6">
Hide duplicates with 60% match confidence
</p>
</div>
<div className="bg-white rounded-lg shadow mb-4">
<div className="flex items-center justify-between p-4 border-b cursor-pointer hover:bg-gray-50" onClick={() => setFiltersCollapsed(!filtersCollapsed)}>
<h2 className="text-lg font-semibold text-gray-900">Filters</h2>
@ -545,20 +474,6 @@ export default function Identify() {
</select>
</div>
</div>
<div className="mt-3 pt-3 border-t">
<label className="flex items-center gap-2">
<input
type="checkbox"
checked={uniqueFacesOnly}
onChange={(e) => setUniqueFacesOnly(e.target.checked)}
className="rounded"
/>
<span className="text-sm text-gray-700">Unique faces only</span>
</label>
<p className="text-xs text-gray-500 mt-1 ml-6">
Hide duplicates with 60% match confidence
</p>
</div>
<div className="mt-4 pt-3 border-t">
<button
onClick={loadFaces}

View File

@ -413,7 +413,7 @@ export default function Modify() {
value={lastNameFilter}
onChange={(e) => setLastNameFilter(e.target.value)}
onKeyDown={(e) => e.key === 'Enter' && handleSearch()}
placeholder="Type Last Name"
placeholder="Type Last Name or Maiden Name"
className="flex-1 px-3 py-2 border border-gray-300 rounded-md focus:outline-none focus:ring-2 focus:ring-blue-500"
/>
<button
@ -429,7 +429,7 @@ export default function Modify() {
Clear
</button>
</div>
<p className="text-xs text-gray-500">Type Last Name</p>
<p className="text-xs text-gray-500">Search by Last Name or Maiden Name</p>
</div>
{/* People list */}

View File

@ -1,6 +1,7 @@
import { useState, useRef, useEffect } from 'react'
import { facesApi, ProcessFacesRequest } from '../api/faces'
import { jobsApi, JobResponse, JobStatus } from '../api/jobs'
import { useDeveloperMode } from '../context/DeveloperModeContext'
interface JobProgress {
id: string
@ -17,6 +18,7 @@ const DETECTOR_OPTIONS = ['retinaface', 'mtcnn', 'opencv', 'ssd']
const MODEL_OPTIONS = ['ArcFace', 'Facenet', 'Facenet512', 'VGG-Face']
export default function Process() {
const { isDeveloperMode } = useDeveloperMode()
const [batchSize, setBatchSize] = useState<number | undefined>(undefined)
const [detectorBackend, setDetectorBackend] = useState('retinaface')
const [modelName, setModelName] = useState('ArcFace')
@ -84,26 +86,22 @@ export default function Process() {
try {
// Call API to cancel the job
const result = await jobsApi.cancelJob(currentJob.id)
console.log('Job cancellation:', result)
console.log('Job cancellation requested:', result)
// Close SSE stream
if (eventSourceRef.current) {
eventSourceRef.current.close()
eventSourceRef.current = null
}
// Update job status to show cancellation is in progress
setCurrentJob({
...currentJob,
status: JobStatus.PROGRESS,
message: 'Cancellation requested - finishing current photo...',
})
// Update UI state
setIsProcessing(false)
setError(`Job cancelled: ${result.message}`)
// Don't close SSE stream yet - keep it open to wait for job to actually stop
// The job will finish the current photo, then stop and send a final status update
// The SSE stream handler will close the stream when job status becomes SUCCESS or FAILURE
// Update job status
if (currentJob) {
setCurrentJob({
...currentJob,
status: JobStatus.FAILURE,
message: 'Cancelled by user',
})
}
// Set a flag to indicate cancellation was requested
// This will be checked in the SSE handler
setError(null) // Clear any previous errors
} catch (err: any) {
console.error('Error cancelling job:', err)
setError(err.response?.data?.detail || err.message || 'Failed to cancel job')
@ -155,6 +153,11 @@ export default function Process() {
eventSource.close()
eventSourceRef.current = null
// Show cancellation message if job was cancelled
if (data.message && (data.message.includes('Cancelled') || data.message.includes('cancelled'))) {
setError(`Job cancelled: ${data.message}`)
}
// Fetch final job result to get processing stats
if (jobStatus === JobStatus.SUCCESS) {
fetchJobResult(jobId)
@ -255,57 +258,61 @@ export default function Process() {
</p>
</div>
{/* Detector Backend */}
<div>
<label
htmlFor="detector-backend"
className="block text-sm font-medium text-gray-700 mb-2"
>
Face Detector
</label>
<select
id="detector-backend"
value={detectorBackend}
onChange={(e) => setDetectorBackend(e.target.value)}
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
disabled={isProcessing}
>
{DETECTOR_OPTIONS.map((option) => (
<option key={option} value={option}>
{option.charAt(0).toUpperCase() + option.slice(1)}
</option>
))}
</select>
<p className="mt-1 text-sm text-gray-500">
RetinaFace recommended for best accuracy
</p>
</div>
{/* Detector Backend - Only visible in developer mode */}
{isDeveloperMode && (
<div>
<label
htmlFor="detector-backend"
className="block text-sm font-medium text-gray-700 mb-2"
>
Face Detector
</label>
<select
id="detector-backend"
value={detectorBackend}
onChange={(e) => setDetectorBackend(e.target.value)}
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
disabled={isProcessing}
>
{DETECTOR_OPTIONS.map((option) => (
<option key={option} value={option}>
{option.charAt(0).toUpperCase() + option.slice(1)}
</option>
))}
</select>
<p className="mt-1 text-sm text-gray-500">
RetinaFace recommended for best accuracy
</p>
</div>
)}
{/* Model Name */}
<div>
<label
htmlFor="model-name"
className="block text-sm font-medium text-gray-700 mb-2"
>
Recognition Model
</label>
<select
id="model-name"
value={modelName}
onChange={(e) => setModelName(e.target.value)}
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
disabled={isProcessing}
>
{MODEL_OPTIONS.map((option) => (
<option key={option} value={option}>
{option}
</option>
))}
</select>
<p className="mt-1 text-gray-500">
ArcFace recommended for best accuracy
</p>
</div>
{/* Model Name - Only visible in developer mode */}
{isDeveloperMode && (
<div>
<label
htmlFor="model-name"
className="block text-sm font-medium text-gray-700 mb-2"
>
Recognition Model
</label>
<select
id="model-name"
value={modelName}
onChange={(e) => setModelName(e.target.value)}
className="w-full px-3 py-2 border border-gray-300 rounded-md shadow-sm focus:outline-none focus:ring-blue-500 focus:border-blue-500"
disabled={isProcessing}
>
{MODEL_OPTIONS.map((option) => (
<option key={option} value={option}>
{option}
</option>
))}
</select>
<p className="mt-1 text-gray-500">
ArcFace recommended for best accuracy
</p>
</div>
)}
{/* Control Buttons */}
<div className="flex gap-2 pt-4">

View File

@ -1,9 +1,37 @@
import { useDeveloperMode } from '../context/DeveloperModeContext'
export default function Settings() {
const { isDeveloperMode, setDeveloperMode } = useDeveloperMode()
return (
<div>
<h1 className="text-2xl font-bold text-gray-900 mb-4">Settings</h1>
<div className="bg-white rounded-lg shadow p-6">
<p className="text-gray-600">Settings panel coming soon.</p>
<div className="bg-white rounded-lg shadow p-6 mb-4">
<h2 className="text-lg font-semibold text-gray-900 mb-4">Developer Options</h2>
<div className="flex items-center justify-between py-3 border-b border-gray-200">
<div className="flex-1">
<label htmlFor="developer-mode" className="text-sm font-medium text-gray-700">
Developer Mode
</label>
<p className="text-xs text-gray-500 mt-1">
Enable developer features. Additional features will be available when enabled.
</p>
</div>
<div className="ml-4">
<label className="relative inline-flex items-center cursor-pointer">
<input
type="checkbox"
id="developer-mode"
checked={isDeveloperMode}
onChange={(e) => setDeveloperMode(e.target.checked)}
className="sr-only peer"
/>
<div className="w-11 h-6 bg-gray-200 peer-focus:outline-none peer-focus:ring-4 peer-focus:ring-blue-300 rounded-full peer peer-checked:after:translate-x-full peer-checked:after:border-white after:content-[''] after:absolute after:top-[2px] after:left-[2px] after:bg-white after:border-gray-300 after:border after:rounded-full after:h-5 after:w-5 after:transition-all peer-checked:bg-blue-600"></div>
</label>
</div>
</div>
</div>
</div>
)

View File

@ -3,7 +3,6 @@ uvicorn[standard]==0.30.6
pydantic==2.9.1
SQLAlchemy==2.0.36
psycopg2-binary==2.9.9
alembic==1.13.2
redis==5.0.8
rq==1.16.2
python-jose[cryptography]==3.3.0

View File

@ -52,6 +52,11 @@ def get_job(job_id: str) -> JobResponse:
message = job.meta.get("message", "") if job.meta else ""
# Check if job was cancelled
if job.meta and job.meta.get("cancelled", False):
job_status = JobStatus.FAILURE
message = job.meta.get("message", "Cancelled by user")
# If job failed, include error message
if rq_status == "failed" and job.exc_info:
# Extract error message from exception info
@ -95,16 +100,28 @@ def stream_job_progress(job_id: str):
"failed": JobStatus.FAILURE,
}
job_status = status_map.get(job.get_status(), JobStatus.PENDING)
progress = 0
if job_status == JobStatus.STARTED or job_status == JobStatus.PROGRESS:
# Check if job was cancelled first
if job.meta and job.meta.get("cancelled", False):
job_status = JobStatus.FAILURE
message = job.meta.get("message", "Cancelled by user")
progress = job.meta.get("progress", 0) if job.meta else 0
elif job_status == JobStatus.SUCCESS:
progress = 100
elif job_status == JobStatus.FAILURE:
else:
progress = 0
if job_status == JobStatus.STARTED:
# Job is running - show progress if available
progress = job.meta.get("progress", 0) if job.meta else 0
# Map to PROGRESS status if we have actual progress
if progress > 0:
job_status = JobStatus.PROGRESS
elif job_status == JobStatus.PROGRESS:
progress = job.meta.get("progress", 0) if job.meta else 0
elif job_status == JobStatus.SUCCESS:
progress = 100
elif job_status == JobStatus.FAILURE:
progress = 0
message = job.meta.get("message", "") if job.meta else ""
message = job.meta.get("message", "") if job.meta else ""
# Only send event if progress or message changed
if progress != last_progress or message != last_message:

View File

@ -44,12 +44,12 @@ def list_people(
@router.get("/with-faces", response_model=PeopleWithFacesListResponse)
def list_people_with_faces(
last_name: str | None = Query(None, description="Filter by last name (case-insensitive)"),
last_name: str | None = Query(None, description="Filter by last name or maiden name (case-insensitive)"),
db: Session = Depends(get_db),
) -> PeopleWithFacesListResponse:
"""List all people with face counts, sorted by last_name, first_name.
Optionally filter by last_name if provided (case-insensitive search).
Optionally filter by last_name or maiden_name if provided (case-insensitive search).
Only returns people who have at least one face.
"""
# Query people with face counts
@ -64,8 +64,12 @@ def list_people_with_faces(
)
if last_name:
# Case-insensitive search on last_name
query = query.filter(func.lower(Person.last_name).contains(func.lower(last_name)))
# Case-insensitive search on both last_name and maiden_name
search_term = last_name.lower()
query = query.filter(
(func.lower(Person.last_name).contains(search_term)) |
((Person.maiden_name.isnot(None)) & (func.lower(Person.maiden_name).contains(search_term)))
)
results = query.order_by(Person.last_name.asc(), Person.first_name.asc()).all()

View File

@ -338,7 +338,7 @@ def process_photo_faces(
try:
pose_faces = pose_detector.detect_pose_faces(face_detection_path)
if pose_faces:
print(f"[FaceService] Pose detection: found {len(pose_faces)} faces with pose data")
print(f"[FaceService] Pose detection for {photo.filename}: found {len(pose_faces)} faces with pose data")
except Exception as e:
print(f"[FaceService] ⚠️ Pose detection failed for {photo.filename}: {e}, using defaults")
pose_faces = []
@ -348,7 +348,7 @@ def process_photo_faces(
pose_detector_local = PoseDetector()
pose_faces = pose_detector_local.detect_pose_faces(face_detection_path)
if pose_faces:
print(f"[FaceService] Pose detection: found {len(pose_faces)} faces with pose data")
print(f"[FaceService] Pose detection for {photo.filename}: found {len(pose_faces)} faces with pose data")
except Exception as e:
print(f"[FaceService] ⚠️ Pose detection failed for {photo.filename}: {e}, using defaults")
pose_faces = []
@ -1058,14 +1058,19 @@ def process_unprocessed_photos(
if check_cancelled():
print(f"[FaceService] Job cancelled at photo {idx}/{total}")
if update_progress:
update_progress(
idx - 1,
total,
"Cancelled by user",
total_faces_detected,
total_faces_stored,
)
break
try:
update_progress(
idx - 1,
total,
"Cancelled by user",
total_faces_detected,
total_faces_stored,
)
except KeyboardInterrupt:
# Expected when cancellation is detected
pass
# Raise KeyboardInterrupt to signal cancellation to the task handler
raise KeyboardInterrupt("Job cancelled by user")
try:
# Update progress before processing each photo
@ -1102,28 +1107,8 @@ def process_unprocessed_photos(
first_photo_time = time.time() - first_photo_start
print(f"[FaceService] First photo completed in {first_photo_time:.2f}s")
# Check for cancellation AFTER finishing the current photo completely
# This allows the current photo to complete (including pose detection and DB commit),
# then stops before the next one
if check_cancelled():
print(f"[FaceService] Job cancelled after finishing photo {idx}/{total}")
# Update progress to show cancellation status
if update_progress:
try:
update_progress(
idx,
total,
"Cancelled by user - finished current photo",
total_faces_detected,
total_faces_stored,
)
except KeyboardInterrupt:
# If update_progress raises KeyboardInterrupt, that's expected
# The cancellation check already happened, so we're good
pass
break
# Update progress only if NOT cancelled (to avoid unnecessary KeyboardInterrupt)
# Update progress to show completion (including pose detection)
# This happens AFTER the entire photo processing is complete
if update_progress:
try:
update_progress(
@ -1134,12 +1119,21 @@ def process_unprocessed_photos(
total_faces_stored,
)
except KeyboardInterrupt:
# If cancellation was detected during update_progress, check again and break
# If cancellation was detected during update_progress, check again
if check_cancelled():
print(f"[FaceService] Job cancelled during progress update after photo {idx}/{total}")
break
# Raise KeyboardInterrupt to signal cancellation to the task handler
raise KeyboardInterrupt("Job cancelled by user after completing current photo")
# Re-raise if it wasn't a cancellation
raise
# Check for cancellation AFTER updating progress (photo is fully complete)
# This ensures the entire photo processing is done (including pose detection and DB commit),
# and the progress shows "Completed", then stops before the next one
if check_cancelled():
print(f"[FaceService] Job cancelled after completing photo {idx}/{total} (including pose detection)")
# Raise KeyboardInterrupt to signal cancellation to the task handler
raise KeyboardInterrupt("Job cancelled by user after completing current photo")
except KeyboardInterrupt:
# Cancellation was requested - stop processing gracefully
print(f"[FaceService] Job cancelled during processing of photo {idx}/{total}")

View File

@ -211,8 +211,13 @@ def process_faces_task(
try:
job.meta = job.meta or {}
job.meta.update({
"message": "Cancelled by user",
"progress": job.meta.get("progress", 0),
"message": "Cancelled by user - finished current photo",
"cancelled": True,
"processed": job.meta.get("processed", photos_processed),
"total": job.meta.get("total", 0),
"faces_detected": job.meta.get("faces_detected", total_faces_detected),
"faces_stored": job.meta.get("faces_stored", total_faces_stored),
})
job.save_meta()
except Exception: