fix: handle concurrent table creation race in SQLite (#151)

This commit is contained in:
Alexander Whitestone
2026-03-08 13:27:11 -04:00
committed by GitHub
parent ae3bb1cc21
commit 8dbce25183
5 changed files with 44 additions and 8 deletions

View File

@@ -2,9 +2,9 @@ name: Tests
on:
push:
branches: ["**"]
branches: [main]
pull_request:
branches: ["**"]
branches: [main]
jobs:
lint:
@@ -17,7 +17,7 @@ jobs:
python-version: "3.11"
- name: Install linters
run: pip install black==23.12.1 isort==5.13.2 bandit==1.7.5
run: pip install black==23.12.1 isort==5.13.2 bandit==1.8.0
- name: Check formatting (black)
run: black --check --line-length 100 src/ tests/

View File

@@ -28,7 +28,7 @@ COPY pyproject.toml poetry.lock ./
# Install deps directly from lock file (no virtualenv, no export plugin needed)
RUN poetry config virtualenvs.create false && \
poetry install --only main --extras telegram --extras discord --no-interaction
poetry install --only main --extras telegram --extras discord --no-root --no-interaction
# ── Stage 2: Runtime ───────────────────────────────────────────────────────
FROM python:3.12-slim AS base

View File

@@ -1,8 +1,13 @@
#!/usr/bin/env bash
# Pre-commit hook: run tests with a wall-clock limit.
# Blocks the commit if tests fail or take too long.
# Pre-commit hook: lint + test with a wall-clock limit.
# Blocks the commit if formatting, imports, or tests fail.
# Current baseline: ~18s wall-clock. Limit set to 30s for headroom.
echo "Auto-formatting (black + isort)..."
poetry run python -m black --line-length 100 src/ tests/ --quiet
poetry run isort --profile black --line-length 100 src/ tests/ --quiet 2>/dev/null
git add -u
MAX_SECONDS=30
echo "Running tests (${MAX_SECONDS}s limit)..."

View File

@@ -1,9 +1,18 @@
import logging
from pathlib import Path
from sqlalchemy import create_engine
from sqlalchemy.exc import OperationalError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker
logger = logging.getLogger(__name__)
SQLALCHEMY_DATABASE_URL = "sqlite:///./data/timmy_calm.db"
# Ensure the data directory exists before creating the engine
Path("./data").mkdir(parents=True, exist_ok=True)
engine = create_engine(SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False})
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
@@ -12,8 +21,11 @@ Base = declarative_base()
def create_tables():
"""Create all tables defined by models that have imported Base."""
Base.metadata.create_all(bind=engine)
"""Create all tables idempotently (safe under pytest-xdist concurrency)."""
try:
Base.metadata.create_all(bind=engine)
except OperationalError as exc:
logger.debug("Table creation skipped (already exists): %s", exc)
def get_db():

View File

@@ -235,3 +235,22 @@ def test_reorder_promote_later_to_next(client: TestClient, db_session: Session):
assert db_session.query(Task).filter(Task.id == task_now.id).first().state == TaskState.NOW
assert db_session.query(Task).filter(Task.id == task_later1.id).first().state == TaskState.NEXT
assert db_session.query(Task).filter(Task.id == task_later2.id).first().state == TaskState.LATER
def test_create_tables_idempotent_under_concurrency():
"""Calling create_tables() when tables already exist must not crash.
This covers the race where multiple pytest-xdist workers (or app
processes) import the calm routes module simultaneously and each
calls create_tables() against the same SQLite file.
"""
from unittest.mock import patch
from sqlalchemy.exc import OperationalError
from dashboard.models.database import create_tables
fake_error = OperationalError("CREATE TABLE", {}, Exception("table tasks already exists"))
with patch("dashboard.models.database.Base.metadata.create_all", side_effect=fake_error):
# Must not raise — the OperationalError is caught and logged
create_tables()