punimtag/docs/testing-standards.md
2025-08-15 00:57:39 -08:00

12 KiB

PunimTag Testing Standards

Overview

This document defines the standards for writing and organizing tests in PunimTag.

Test Organization

Directory Structure

tests/
├── unit/              # Unit tests for individual functions
├── integration/       # Integration tests for API endpoints
├── e2e/              # End-to-end tests for complete workflows
├── fixtures/          # Test data and fixtures
├── utils/             # Test utilities and helpers
└── conftest.py        # pytest configuration and shared fixtures

Test File Naming

  • Unit Tests: test_<module_name>.py
  • Integration Tests: test_<feature>_integration.py
  • E2E Tests: test_<workflow>_e2e.py
  • Test Utilities: test_<utility_name>.py

Test Categories

Unit Tests

Test individual functions and classes in isolation.

# tests/unit/test_face_recognition.py
import pytest
from src.utils.face_recognition import detect_faces, encode_face

def test_detect_faces_with_valid_image():
    """Test face detection with a valid image."""
    image_path = "tests/fixtures/valid_face.jpg"
    faces = detect_faces(image_path)

    assert len(faces) > 0
    assert all(hasattr(face, 'left') for face in faces)
    assert all(hasattr(face, 'top') for face in faces)

def test_detect_faces_with_no_faces():
    """Test face detection with an image containing no faces."""
    image_path = "tests/fixtures/no_faces.jpg"
    faces = detect_faces(image_path)

    assert len(faces) == 0

def test_encode_face_with_valid_face():
    """Test face encoding with a valid face."""
    face_image = load_test_face_image()
    encoding = encode_face(face_image)

    assert len(encoding) == 128
    assert all(isinstance(x, float) for x in encoding)

Integration Tests

Test API endpoints and database interactions.

# tests/integration/test_photo_api.py
import pytest
from src.app import app

@pytest.fixture
def client():
    """Create a test client."""
    app.config['TESTING'] = True
    app.config['DATABASE'] = 'test.db'

    with app.test_client() as client:
        yield client

def test_get_photos_endpoint(client):
    """Test the GET /photos endpoint."""
    response = client.get('/photos')

    assert response.status_code == 200
    data = response.get_json()
    assert data['success'] == True
    assert 'photos' in data

def test_create_photo_endpoint(client):
    """Test the POST /photos endpoint."""
    photo_data = {
        'filename': 'test.jpg',
        'path': '/test/path/test.jpg'
    }

    response = client.post('/photos', json=photo_data)

    assert response.status_code == 201
    data = response.get_json()
    assert data['success'] == True
    assert 'photo_id' in data

def test_get_photo_not_found(client):
    """Test getting a non-existent photo."""
    response = client.get('/photos/99999')

    assert response.status_code == 404
    data = response.get_json()
    assert data['success'] == False
    assert 'error' in data

End-to-End Tests

Test complete user workflows.

# tests/e2e/test_photo_workflow.py
import pytest
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC

@pytest.fixture
def driver():
    """Create a web driver for E2E tests."""
    driver = webdriver.Chrome()
    driver.implicitly_wait(10)
    yield driver
    driver.quit()

def test_upload_and_identify_photo(driver):
    """Test the complete workflow of uploading and identifying a photo."""
    # Navigate to the app
    driver.get("http://localhost:5000")

    # Upload a photo
    file_input = driver.find_element(By.ID, "photo-upload")
    file_input.send_keys("tests/fixtures/test_photo.jpg")

    # Wait for upload to complete
    WebDriverWait(driver, 30).until(
        EC.presence_of_element_located((By.CLASS_NAME, "photo-card"))
    )

    # Click on the photo to open details
    photo_card = driver.find_element(By.CLASS_NAME, "photo-card")
    photo_card.click()

    # Wait for photo details to load
    WebDriverWait(driver, 10).until(
        EC.presence_of_element_located((By.ID, "photoDetails"))
    )

    # Verify faces are detected
    faces = driver.find_elements(By.CLASS_NAME, "face-item")
    assert len(faces) > 0

    # Identify a face
    face_input = driver.find_element(By.CLASS_NAME, "face-name-input")
    face_input.send_keys("Test Person")

    identify_button = driver.find_element(By.CLASS_NAME, "identify-face-btn")
    identify_button.click()

    # Verify identification
    WebDriverWait(driver, 10).until(
        EC.text_to_be_present_in_element((By.CLASS_NAME, "face-name"), "Test Person")
    )

Test Fixtures

Database Fixtures

# tests/conftest.py
import pytest
import sqlite3
import tempfile
import os

@pytest.fixture
def test_db():
    """Create a temporary test database."""
    db_fd, db_path = tempfile.mkstemp()

    # Create test database schema
    conn = sqlite3.connect(db_path)
    cursor = conn.cursor()

    cursor.execute('''
        CREATE TABLE images (
            id INTEGER PRIMARY KEY,
            filename TEXT NOT NULL,
            path TEXT NOT NULL,
            date_taken TEXT
        )
    ''')

    cursor.execute('''
        CREATE TABLE faces (
            id INTEGER PRIMARY KEY,
            image_id INTEGER,
            person_id INTEGER,
            encoding BLOB,
            left INTEGER,
            top INTEGER,
            right INTEGER,
            bottom INTEGER
        )
    ''')

    conn.commit()
    conn.close()

    yield db_path

    # Cleanup
    os.close(db_fd)
    os.unlink(db_path)

@pytest.fixture
def sample_photos(test_db):
    """Add sample photos to the test database."""
    conn = sqlite3.connect(test_db)
    cursor = conn.cursor()

    photos = [
        ('photo1.jpg', '/test/path/photo1.jpg', '2023-01-01'),
        ('photo2.jpg', '/test/path/photo2.jpg', '2023-01-02'),
        ('photo3.jpg', '/test/path/photo3.jpg', '2023-01-03')
    ]

    cursor.executemany(
        'INSERT INTO images (filename, path, date_taken) VALUES (?, ?, ?)',
        photos
    )

    conn.commit()
    conn.close()

    return photos

Mock Fixtures

# tests/conftest.py
import pytest
from unittest.mock import Mock, patch

@pytest.fixture
def mock_face_recognition():
    """Mock face recognition functions."""
    with patch('src.utils.face_recognition.detect_faces') as mock_detect:
        with patch('src.utils.face_recognition.encode_face') as mock_encode:
            mock_detect.return_value = [
                Mock(left=100, top=100, right=200, bottom=200)
            ]
            mock_encode.return_value = [0.1] * 128

            yield {
                'detect': mock_detect,
                'encode': mock_encode
            }

@pytest.fixture
def mock_file_system():
    """Mock file system operations."""
    with patch('os.path.exists') as mock_exists:
        with patch('os.path.getsize') as mock_size:
            mock_exists.return_value = True
            mock_size.return_value = 1024 * 1024  # 1MB

            yield {
                'exists': mock_exists,
                'size': mock_size
            }

Test Data Management

Test Images

# tests/fixtures/test_images.py
import os
from PIL import Image
import numpy as np

def create_test_image(width=100, height=100, filename="test.jpg"):
    """Create a test image for testing."""
    # Create a simple test image
    image = Image.new('RGB', (width, height), color='red')

    # Add a simple face-like pattern
    pixels = np.array(image)
    # Draw a simple face outline
    pixels[30:70, 40:60] = [255, 255, 255]  # White face
    pixels[40:50, 45:55] = [0, 0, 0]        # Black eyes

    test_image = Image.fromarray(pixels)
    test_path = f"tests/fixtures/{filename}"
    test_image.save(test_path)

    return test_path

def cleanup_test_images():
    """Clean up test images."""
    fixture_dir = "tests/fixtures"
    for file in os.listdir(fixture_dir):
        if file.endswith(('.jpg', '.png', '.jpeg')):
            os.remove(os.path.join(fixture_dir, file))

Performance Testing

Load Testing

# tests/performance/test_load.py
import pytest
import time
import concurrent.futures
from src.app import app

def test_concurrent_photo_requests():
    """Test handling multiple concurrent photo requests."""
    client = app.test_client()

    def make_request():
        return client.get('/photos?page=1&per_page=20')

    # Make 10 concurrent requests
    with concurrent.futures.ThreadPoolExecutor(max_workers=10) as executor:
        futures = [executor.submit(make_request) for _ in range(10)]
        responses = [future.result() for future in futures]

    # All requests should succeed
    for response in responses:
        assert response.status_code == 200

    # Check response times
    start_time = time.time()
    for _ in range(5):
        client.get('/photos?page=1&per_page=20')
    end_time = time.time()

    avg_time = (end_time - start_time) / 5
    assert avg_time < 1.0  # Should respond within 1 second

def test_large_photo_collection():
    """Test performance with a large photo collection."""
    # This would require setting up a large test dataset
    pass

Test Configuration

pytest Configuration

# pytest.ini
[tool:pytest]
testpaths = tests
python_files = test_*.py
python_classes = Test*
python_functions = test_*
addopts =
    -v
    --tb=short
    --strict-markers
    --disable-warnings
markers =
    unit: Unit tests
    integration: Integration tests
    e2e: End-to-end tests
    slow: Slow running tests
    performance: Performance tests

Test Environment Variables

# tests/conftest.py
import os

@pytest.fixture(autouse=True)
def test_environment():
    """Set up test environment variables."""
    os.environ['TESTING'] = 'true'
    os.environ['DATABASE_PATH'] = 'test.db'
    os.environ['PHOTOS_DIR'] = 'tests/fixtures/photos'

    yield

    # Cleanup
    if 'TESTING' in os.environ:
        del os.environ['TESTING']

Code Coverage

Coverage Configuration

# .coveragerc
[run]
source = src
omit =
    */tests/*
    */venv/*
    */__pycache__/*
    */migrations/*

[report]
exclude_lines =
    pragma: no cover
    def __repr__
    raise AssertionError
    raise NotImplementedError
    if 0:
    if __name__ == .__main__.:

Coverage Testing

# tests/test_coverage.py
import pytest
import coverage

def test_code_coverage():
    """Ensure code coverage meets minimum requirements."""
    cov = coverage.Coverage()
    cov.start()

    # Run the application
    from src.app import app
    client = app.test_client()
    client.get('/photos')

    cov.stop()
    cov.save()

    # Generate coverage report
    cov.report()

    # Check coverage percentage
    total_coverage = cov.report()
    assert total_coverage >= 80.0  # Minimum 80% coverage

Continuous Integration

GitHub Actions

# .github/workflows/test.yml
name: Tests

on: [push, pull_request]

jobs:
  test:
    runs-on: ubuntu-latest

    steps:
      - uses: actions/checkout@v2

      - name: Set up Python
        uses: actions/setup-python@v2
        with:
          python-version: 3.9

      - name: Install dependencies
        run: |
          pip install -r requirements.txt
          pip install pytest pytest-cov

      - name: Run tests
        run: |
          pytest tests/ --cov=src --cov-report=xml

      - name: Upload coverage
        uses: codecov/codecov-action@v1
        with:
          file: ./coverage.xml

Best Practices

Test Naming

  • Use descriptive test names that explain what is being tested
  • Follow the pattern: test_<function>_<scenario>_<expected_result>
  • Example: test_detect_faces_with_multiple_faces_returns_correct_count

Test Independence

  • Each test should be independent and not rely on other tests
  • Use fixtures to set up test data
  • Clean up after each test

Test Data

  • Use realistic but minimal test data
  • Create helper functions for generating test data
  • Keep test data in fixtures directory

Error Testing

  • Test both success and failure scenarios
  • Test edge cases and boundary conditions
  • Test error handling and recovery

Performance

  • Keep tests fast and efficient
  • Use mocking for slow operations
  • Separate slow tests with @pytest.mark.slow

Documentation

  • Document complex test scenarios
  • Explain the purpose of each test
  • Keep test code readable and maintainable