Compare commits
7 Commits
005-ui-pol
...
009-login-
| Author | SHA1 | Date | |
|---|---|---|---|
| 7a835d3172 | |||
| f3e0021ee8 | |||
| 354c85292d | |||
| 265b967f6b | |||
| 355014f975 | |||
| 6092a4454e | |||
| 28df9a1261 |
@@ -19,3 +19,11 @@ JWT_SECRET_KEY=change-me-to-a-long-random-string
|
||||
JWT_EXPIRY_SECONDS=86400
|
||||
OWNER_USERNAME=owner
|
||||
OWNER_PASSWORD=change-me
|
||||
|
||||
# Login brute-force protection
|
||||
LOGIN_MAX_FAILURES=5
|
||||
LOGIN_WINDOW_SECONDS=300
|
||||
LOGIN_COOLDOWN_SECONDS=900
|
||||
# Comma-separated IPs/CIDRs of trusted upstream proxies (e.g. nginx ingress pod CIDR).
|
||||
# Leave empty when not behind a reverse proxy.
|
||||
LOGIN_TRUSTED_PROXY_IPS=
|
||||
|
||||
36
.env.test.example
Normal file
36
.env.test.example
Normal file
@@ -0,0 +1,36 @@
|
||||
# Integration test environment variables
|
||||
# Used when running pytest directly on the host (outside Docker).
|
||||
#
|
||||
# Start test services first:
|
||||
# docker compose -f docker-compose.test.yml up -d postgres-test minio-test minio-init-test
|
||||
#
|
||||
# Then source this file and run tests:
|
||||
# export $(grep -v '^#' .env.test.example | xargs)
|
||||
# cd api && python -m pytest tests/integration/ -v
|
||||
|
||||
# PostgreSQL test database (postgres-test container on host port 5433)
|
||||
TEST_DATABASE_URL=postgresql+asyncpg://reactbin:reactbin@localhost:5433/reactbin_test
|
||||
DATABASE_URL=postgresql+asyncpg://reactbin:reactbin@localhost:5433/reactbin_test
|
||||
|
||||
# MinIO test instance (minio-test container on host port 9002)
|
||||
S3_ENDPOINT_URL=http://localhost:9002
|
||||
S3_BUCKET_NAME=reactbin-test
|
||||
S3_ACCESS_KEY_ID=minioadmin
|
||||
S3_SECRET_ACCESS_KEY=minioadmin
|
||||
S3_REGION=us-east-1
|
||||
|
||||
# Auth (test values — not for production)
|
||||
JWT_SECRET_KEY=test-secret-key-for-testing-only
|
||||
OWNER_USERNAME=testowner
|
||||
OWNER_PASSWORD=testpassword
|
||||
|
||||
# API
|
||||
API_BASE_URL=http://localhost:8000
|
||||
MAX_UPLOAD_BYTES=52428800
|
||||
|
||||
# Login brute-force protection
|
||||
LOGIN_MAX_FAILURES=5
|
||||
LOGIN_WINDOW_SECONDS=300
|
||||
LOGIN_COOLDOWN_SECONDS=900
|
||||
# Comma-separated IPs/CIDRs of trusted upstream proxies; leave empty for direct connections.
|
||||
LOGIN_TRUSTED_PROXY_IPS=
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -1,7 +1,11 @@
|
||||
# Developer notes
|
||||
notes/
|
||||
|
||||
# Environment
|
||||
.env
|
||||
.env.*
|
||||
!.env.example
|
||||
!.env.test.example
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
|
||||
@@ -1 +1,3 @@
|
||||
{"feature_directory": "specs/005-ui-polish"}
|
||||
{
|
||||
"feature_directory": "specs/009-login-rate-limiting"
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
<!--
|
||||
SYNC IMPACT REPORT
|
||||
==================
|
||||
Version change: 1.1.1 → 1.2.0
|
||||
Ratified: 2026-05-01 | Last amended: 2026-05-03
|
||||
Version change: 1.2.0 → 1.3.0
|
||||
Ratified: 2026-05-01 | Last amended: 2026-05-06
|
||||
|
||||
Principles introduced (first population from docs/CONSTITUTION.md):
|
||||
- §2 Architecture Principles (6 sub-principles)
|
||||
@@ -94,10 +94,11 @@ The constitution acknowledges all three; the spec governs which is built.
|
||||
|
||||
### 2.5 Database abstraction
|
||||
|
||||
PostgreSQL is the Phase 1 database. All DB access MUST go through a repository
|
||||
layer (one repository class per domain aggregate). Raw SQL or an ORM is
|
||||
acceptable, but no query logic MAY live outside a repository. This makes the
|
||||
planned PostgreSQL → SQLite refactor a repository-layer change only.
|
||||
PostgreSQL is the database. All DB access MUST go through a repository layer
|
||||
(one repository class per domain aggregate). Raw SQL or an ORM is acceptable,
|
||||
but no query logic MAY live outside a repository. No alternative database
|
||||
engine (SQLite, DuckDB, in-memory substitutes) MAY be used in integration
|
||||
tests — dialect differences mask production bugs.
|
||||
|
||||
### 2.6 No speculative abstraction
|
||||
|
||||
@@ -179,8 +180,11 @@ before any implementation step.
|
||||
### 5.2 Test pyramid
|
||||
|
||||
- **Unit tests** — pure logic, repository mocks, no I/O
|
||||
- **Integration tests** — API routes tested against a real (test) database
|
||||
and a real (test) S3-compatible bucket (e.g. MinIO in Docker)
|
||||
- **Integration tests** — API routes tested against a real PostgreSQL instance
|
||||
and a real S3-compatible bucket (e.g. MinIO in Docker). SQLite and other
|
||||
in-memory database substitutes are **prohibited** — PostgreSQL-specific
|
||||
behaviour (GROUP BY enforcement, JSON operators, constraint handling) MUST
|
||||
be exercised by the test suite.
|
||||
- **E2E tests** — Angular + API, minimal set covering the core happy paths
|
||||
|
||||
Unit and integration tests are required. E2E tests are best-effort in v1.
|
||||
@@ -284,7 +288,8 @@ Phase 1 design is complete.
|
||||
| 1.1.0 | 2026-05-02 | Adopted into Spec Kit memory; fixed duplicate §4.3 → §4.4; strengthened "should" language to MUST/MUST NOT; added §9 Governance |
|
||||
| 1.1.1 | 2026-05-03 | Clarify that the only acceptable form of image transformation or editing is thumbnail generation |
|
||||
| 1.2.0 | 2026-05-03 | §2.4: Mark Phase 2 (JWT bearer auth) complete, reword phase status; §6: Add PyJWT to tech stack table; §8: Remove username/password auth from out-of-scope (now shipped) |
|
||||
| 1.3.0 | 2026-05-06 | §2.5: Remove planned PostgreSQL → SQLite refactor note; prohibit alternative database engines in integration tests. §5.2: Explicitly require PostgreSQL for integration tests; prohibit SQLite — a production HAVING/GROUP BY bug was masked by SQLite's permissive dialect. |
|
||||
|
||||
---
|
||||
|
||||
**Version**: 1.2.0 | **Ratified**: 2026-05-01 | **Last Amended**: 2026-05-03
|
||||
**Version**: 1.3.0 | **Ratified**: 2026-05-01 | **Last Amended**: 2026-05-06
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
<!-- SPECKIT START -->
|
||||
For additional context about technologies to be used, project structure,
|
||||
shell commands, and other important information, read the current plan at
|
||||
`specs/005-ui-polish/plan.md`.
|
||||
`specs/009-login-rate-limiting/plan.md`.
|
||||
<!-- SPECKIT END -->
|
||||
|
||||
7
Makefile
Normal file
7
Makefile
Normal file
@@ -0,0 +1,7 @@
|
||||
.PHONY: test-unit test-integration
|
||||
|
||||
test-unit:
|
||||
cd api && python -m pytest tests/unit/ -v
|
||||
|
||||
test-integration:
|
||||
docker compose -f docker-compose.test.yml run --rm api-test
|
||||
91
api/app/auth/rate_limiter.py
Normal file
91
api/app/auth/rate_limiter.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import ipaddress
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from ipaddress import IPv4Network, IPv6Network
|
||||
from threading import Lock
|
||||
|
||||
from starlette.requests import Request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_client_ip(
|
||||
request: Request,
|
||||
trusted_networks: list[IPv4Network | IPv6Network],
|
||||
) -> str:
|
||||
"""Return the resolved client IP, honouring X-Forwarded-For when the
|
||||
TCP peer is a trusted upstream proxy. Falls back to the TCP peer address
|
||||
when no trusted networks are configured or the peer is not in the list."""
|
||||
peer = request.client.host if request.client else "unknown"
|
||||
if trusted_networks and peer != "unknown":
|
||||
try:
|
||||
peer_addr = ipaddress.ip_address(peer)
|
||||
if any(peer_addr in net for net in trusted_networks):
|
||||
xff = request.headers.get("X-Forwarded-For", "").split(",")[0].strip()
|
||||
if xff:
|
||||
return xff
|
||||
real_ip = request.headers.get("X-Real-IP", "").strip()
|
||||
if real_ip:
|
||||
return real_ip
|
||||
except ValueError:
|
||||
pass
|
||||
return peer
|
||||
|
||||
|
||||
@dataclass
|
||||
class _Record:
|
||||
failures: int = 0
|
||||
window_start: float = field(default_factory=time.time)
|
||||
blocked_until: float = 0.0
|
||||
|
||||
|
||||
class LoginRateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
max_failures: int = 5,
|
||||
window_seconds: int = 300,
|
||||
cooldown_seconds: int = 900,
|
||||
) -> None:
|
||||
self._max = max_failures
|
||||
self._window = window_seconds
|
||||
self._cooldown = cooldown_seconds
|
||||
self._store: dict[str, _Record] = {}
|
||||
self._lock = Lock()
|
||||
|
||||
@property
|
||||
def cooldown_seconds(self) -> int:
|
||||
return self._cooldown
|
||||
|
||||
def is_blocked(self, ip: str) -> bool:
|
||||
now = time.time()
|
||||
with self._lock:
|
||||
rec = self._store.get(ip)
|
||||
if rec is None:
|
||||
return False
|
||||
if rec.blocked_until > now:
|
||||
return True
|
||||
if rec.blocked_until > 0:
|
||||
del self._store[ip]
|
||||
return False
|
||||
|
||||
def record_failure(self, ip: str) -> None:
|
||||
now = time.time()
|
||||
with self._lock:
|
||||
rec = self._store.get(ip)
|
||||
if rec is None:
|
||||
rec = _Record(window_start=now)
|
||||
self._store[ip] = rec
|
||||
if now - rec.window_start > self._window:
|
||||
rec.failures = 0
|
||||
rec.window_start = now
|
||||
rec.failures += 1
|
||||
if rec.failures >= self._max:
|
||||
rec.blocked_until = now + self._cooldown
|
||||
logger.warning(
|
||||
"Login blocked for %s after %d failures", ip, rec.failures
|
||||
)
|
||||
|
||||
def record_success(self, ip: str) -> None:
|
||||
with self._lock:
|
||||
self._store.pop(ip, None)
|
||||
@@ -18,6 +18,10 @@ class Settings(BaseSettings):
|
||||
jwt_expiry_seconds: int = 86400
|
||||
owner_username: str
|
||||
owner_password: str
|
||||
login_max_failures: int = 5
|
||||
login_window_seconds: int = 300
|
||||
login_cooldown_seconds: int = 900
|
||||
login_trusted_proxy_ips: str = ""
|
||||
|
||||
|
||||
@lru_cache
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
|
||||
from sqlalchemy.orm import DeclarativeBase
|
||||
|
||||
from app.config import get_settings
|
||||
|
||||
@@ -1,17 +1,30 @@
|
||||
from contextlib import asynccontextmanager
|
||||
import ipaddress
|
||||
from contextlib import asynccontextmanager, suppress
|
||||
|
||||
from fastapi import FastAPI, Request
|
||||
from fastapi.exceptions import HTTPException
|
||||
from fastapi.responses import JSONResponse
|
||||
|
||||
from app.auth.rate_limiter import LoginRateLimiter
|
||||
from app.config import get_settings
|
||||
from app.database import Base, get_engine
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(application: FastAPI):
|
||||
get_settings()
|
||||
# Verify DB connection and run migrations on startup
|
||||
settings = get_settings()
|
||||
application.state.login_rate_limiter = LoginRateLimiter(
|
||||
max_failures=settings.login_max_failures,
|
||||
window_seconds=settings.login_window_seconds,
|
||||
cooldown_seconds=settings.login_cooldown_seconds,
|
||||
)
|
||||
trusted_networks = []
|
||||
for part in settings.login_trusted_proxy_ips.split(","):
|
||||
part = part.strip()
|
||||
if part:
|
||||
with suppress(ValueError):
|
||||
trusted_networks.append(ipaddress.ip_network(part, strict=False))
|
||||
application.state.login_trusted_networks = trusted_networks
|
||||
engine = get_engine()
|
||||
async with engine.begin() as conn:
|
||||
# In production, Alembic handles migrations; this is a dev convenience
|
||||
@@ -22,6 +35,10 @@ async def lifespan(application: FastAPI):
|
||||
|
||||
app = FastAPI(title="Reactbin API", version="1.0.0", lifespan=lifespan)
|
||||
|
||||
# Defaults so app.state is populated even when lifespan doesn't run (e.g. tests)
|
||||
app.state.login_rate_limiter = LoginRateLimiter()
|
||||
app.state.login_trusted_networks = []
|
||||
|
||||
|
||||
@app.exception_handler(HTTPException)
|
||||
async def http_exception_handler(request: Request, exc: HTTPException):
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from sqlalchemy import String, Integer, BigInteger, DateTime, ForeignKey, UniqueConstraint, Index
|
||||
from sqlalchemy import BigInteger, DateTime, ForeignKey, Index, Integer, String, UniqueConstraint
|
||||
from sqlalchemy.dialects.postgresql import UUID
|
||||
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
||||
|
||||
@@ -9,7 +9,7 @@ from app.database import Base
|
||||
|
||||
|
||||
def _utcnow() -> datetime:
|
||||
return datetime.now(timezone.utc)
|
||||
return datetime.now(UTC)
|
||||
|
||||
|
||||
class Image(Base):
|
||||
@@ -24,9 +24,13 @@ class Image(Base):
|
||||
height: Mapped[int] = mapped_column(Integer, nullable=False)
|
||||
storage_key: Mapped[str] = mapped_column(String(64), nullable=False)
|
||||
thumbnail_key: Mapped[str | None] = mapped_column(String(70), nullable=True, default=None)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), default=_utcnow, nullable=False
|
||||
)
|
||||
|
||||
image_tags: Mapped[list["ImageTag"]] = relationship(back_populates="image", cascade="all, delete-orphan")
|
||||
image_tags: Mapped[list["ImageTag"]] = relationship(
|
||||
back_populates="image", cascade="all, delete-orphan"
|
||||
)
|
||||
|
||||
@property
|
||||
def tags(self) -> list[str]:
|
||||
@@ -38,7 +42,9 @@ class Tag(Base):
|
||||
|
||||
id: Mapped[uuid.UUID] = mapped_column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
|
||||
name: Mapped[str] = mapped_column(String(64), unique=True, nullable=False, index=True)
|
||||
created_at: Mapped[datetime] = mapped_column(DateTime(timezone=True), default=_utcnow, nullable=False)
|
||||
created_at: Mapped[datetime] = mapped_column(
|
||||
DateTime(timezone=True), default=_utcnow, nullable=False
|
||||
)
|
||||
|
||||
image_tags: Mapped[list["ImageTag"]] = relationship(back_populates="tag")
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import uuid
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy import select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
@@ -12,15 +11,19 @@ class ImageRepository:
|
||||
def __init__(self, session: AsyncSession) -> None:
|
||||
self._session = session
|
||||
|
||||
async def get_by_hash(self, hash_hex: str) -> Optional[Image]:
|
||||
async def get_by_hash(self, hash_hex: str) -> Image | None:
|
||||
result = await self._session.execute(
|
||||
select(Image).where(Image.hash == hash_hex).options(selectinload(Image.image_tags).selectinload(ImageTag.tag))
|
||||
select(Image)
|
||||
.where(Image.hash == hash_hex)
|
||||
.options(selectinload(Image.image_tags).selectinload(ImageTag.tag))
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
async def get_by_id(self, image_id: uuid.UUID) -> Optional[Image]:
|
||||
async def get_by_id(self, image_id: uuid.UUID) -> Image | None:
|
||||
result = await self._session.execute(
|
||||
select(Image).where(Image.id == image_id).options(selectinload(Image.image_tags).selectinload(ImageTag.tag))
|
||||
select(Image)
|
||||
.where(Image.id == image_id)
|
||||
.options(selectinload(Image.image_tags).selectinload(ImageTag.tag))
|
||||
)
|
||||
return result.scalar_one_or_none()
|
||||
|
||||
@@ -57,7 +60,7 @@ class ImageRepository:
|
||||
limit: int = 50,
|
||||
offset: int = 0,
|
||||
) -> tuple[list[Image], int]:
|
||||
from sqlalchemy import func, and_
|
||||
from sqlalchemy import func
|
||||
|
||||
base_query = select(Image).options(
|
||||
selectinload(Image.image_tags).selectinload(ImageTag.tag)
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from sqlalchemy import select, func
|
||||
from sqlalchemy import func, select
|
||||
from sqlalchemy.ext.asyncio import AsyncSession
|
||||
|
||||
from app.models import Image, ImageTag, Tag
|
||||
@@ -76,6 +76,8 @@ class TagRepository:
|
||||
prefix: str | None = None,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
sort: str = "name",
|
||||
min_count: int = 0,
|
||||
) -> tuple[list[dict], int]:
|
||||
count_subq = (
|
||||
select(func.count(ImageTag.image_id))
|
||||
@@ -87,12 +89,16 @@ class TagRepository:
|
||||
query = select(Tag, count_subq.label("image_count"))
|
||||
if prefix:
|
||||
query = query.where(Tag.name.like(f"{prefix}%"))
|
||||
if min_count > 0:
|
||||
query = query.where(count_subq >= min_count)
|
||||
|
||||
total_query = select(func.count()).select_from(query.subquery())
|
||||
total_result = await self._session.execute(total_query)
|
||||
total = total_result.scalar_one()
|
||||
|
||||
paginated = query.order_by(Tag.name).limit(limit).offset(offset)
|
||||
order = [count_subq.desc(), Tag.name.asc()] if sort == "count_desc" else [Tag.name.asc()]
|
||||
|
||||
paginated = query.order_by(*order).limit(limit).offset(offset)
|
||||
rows = await self._session.execute(paginated)
|
||||
|
||||
items = [
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
from fastapi import APIRouter, Depends, HTTPException
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from app.auth.jwt_provider import JWTAuthProvider
|
||||
from app.auth.rate_limiter import LoginRateLimiter, get_client_ip
|
||||
from app.dependencies import get_jwt_auth
|
||||
|
||||
router = APIRouter(tags=["auth"])
|
||||
@@ -19,12 +21,32 @@ class TokenResponse(BaseModel):
|
||||
|
||||
|
||||
@router.post("/auth/token", response_model=TokenResponse)
|
||||
async def login(body: LoginRequest, auth: JWTAuthProvider = Depends(get_jwt_auth)):
|
||||
async def login(
|
||||
request: Request,
|
||||
body: LoginRequest,
|
||||
auth: JWTAuthProvider = Depends(get_jwt_auth),
|
||||
):
|
||||
limiter: LoginRateLimiter = request.app.state.login_rate_limiter
|
||||
ip: str = get_client_ip(request, request.app.state.login_trusted_networks)
|
||||
|
||||
if limiter.is_blocked(ip):
|
||||
return JSONResponse(
|
||||
status_code=429,
|
||||
content={
|
||||
"detail": "Too many failed login attempts. Please try again later.",
|
||||
"code": "login_rate_limited",
|
||||
},
|
||||
headers={"Retry-After": str(limiter.cooldown_seconds)},
|
||||
)
|
||||
|
||||
if not auth.verify_credentials(body.username, body.password):
|
||||
limiter.record_failure(ip)
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail={"detail": "Invalid credentials", "code": "invalid_credentials"},
|
||||
)
|
||||
|
||||
limiter.record_success(ip)
|
||||
token = auth.create_token()
|
||||
return TokenResponse(
|
||||
access_token=token,
|
||||
|
||||
@@ -12,9 +12,13 @@ async def list_tags(
|
||||
q: str | None = None,
|
||||
limit: int = 100,
|
||||
offset: int = 0,
|
||||
sort: str = "name",
|
||||
min_count: int = 0,
|
||||
db: AsyncSession = Depends(get_db),
|
||||
):
|
||||
limit = min(limit, 200)
|
||||
limit = min(limit, 500)
|
||||
tag_repo = TagRepository(db)
|
||||
items, total = await tag_repo.list_tags(prefix=q, limit=limit, offset=offset)
|
||||
items, total = await tag_repo.list_tags(
|
||||
prefix=q, limit=limit, offset=offset, sort=sort, min_count=min_count
|
||||
)
|
||||
return {"items": items, "total": total, "limit": limit, "offset": offset}
|
||||
|
||||
@@ -1,19 +1,20 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
import pytest_asyncio
|
||||
from httpx import AsyncClient, ASGITransport
|
||||
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
|
||||
from httpx import ASGITransport, AsyncClient
|
||||
from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
|
||||
|
||||
# Provide required settings for the test environment before any app imports resolve them
|
||||
os.environ.setdefault("JWT_SECRET_KEY", "test-secret-key-for-testing-only")
|
||||
os.environ.setdefault("OWNER_USERNAME", "testowner")
|
||||
os.environ.setdefault("OWNER_PASSWORD", "testpassword")
|
||||
|
||||
from app.main import app
|
||||
from app.config import get_settings
|
||||
from app.database import Base
|
||||
from app.dependencies import get_db, get_storage, get_auth
|
||||
from app.auth.jwt_provider import JWTAuthProvider
|
||||
from app.auth.jwt_provider import JWTAuthProvider # noqa: E402
|
||||
from app.config import get_settings # noqa: E402
|
||||
from app.database import Base # noqa: E402
|
||||
from app.dependencies import get_auth, get_db, get_storage # noqa: E402
|
||||
from app.main import app # noqa: E402
|
||||
|
||||
# Bust the LRU cache so get_settings() picks up the env vars set above
|
||||
get_settings.cache_clear()
|
||||
@@ -26,8 +27,6 @@ _TEST_OWNER_PASSWORD = os.environ["OWNER_PASSWORD"]
|
||||
@pytest_asyncio.fixture(scope="session", loop_scope="session")
|
||||
async def engine():
|
||||
settings = get_settings()
|
||||
# Use a separate test database URL if TEST_DATABASE_URL is set
|
||||
import os
|
||||
db_url = os.getenv("TEST_DATABASE_URL", settings.database_url)
|
||||
eng = create_async_engine(db_url, echo=False)
|
||||
async with eng.begin() as conn:
|
||||
@@ -48,8 +47,8 @@ async def db_session(engine):
|
||||
|
||||
@pytest_asyncio.fixture
|
||||
async def client(db_session):
|
||||
from app.storage.s3_backend import S3StorageBackend
|
||||
from app.auth.noop import NoOpAuthProvider
|
||||
from app.storage.s3_backend import S3StorageBackend
|
||||
|
||||
storage = S3StorageBackend()
|
||||
auth = NoOpAuthProvider()
|
||||
@@ -108,3 +107,15 @@ async def authed_client(db_session, jwt_auth_provider):
|
||||
yield c, valid_token
|
||||
|
||||
app.dependency_overrides.clear()
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
db_url = os.getenv("TEST_DATABASE_URL") or os.getenv("DATABASE_URL", "")
|
||||
if not db_url.startswith("postgresql+asyncpg://"):
|
||||
pytest.exit(
|
||||
"Integration tests require a PostgreSQL database "
|
||||
"(postgresql+asyncpg://...). "
|
||||
"Set TEST_DATABASE_URL or DATABASE_URL accordingly. "
|
||||
f"Got: {db_url!r}",
|
||||
returncode=1,
|
||||
)
|
||||
|
||||
@@ -19,15 +19,18 @@ def _minimal_jpeg_v2() -> bytes:
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_removes_record(client):
|
||||
async def test_delete_removes_record(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _minimal_jpeg_v2()
|
||||
upload = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("del-test.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers=headers,
|
||||
)
|
||||
image_id = upload.json()["id"]
|
||||
|
||||
delete_resp = await client.delete(f"/api/v1/images/{image_id}")
|
||||
delete_resp = await client.delete(f"/api/v1/images/{image_id}", headers=headers)
|
||||
assert delete_resp.status_code == 204
|
||||
|
||||
get_resp = await client.get(f"/api/v1/images/{image_id}")
|
||||
@@ -36,17 +39,19 @@ async def test_delete_removes_record(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_removes_storage_object(client):
|
||||
async def test_delete_removes_storage_object(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _minimal_jpeg_v2() + b"\x00"
|
||||
upload = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("del-storage-test.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers=headers,
|
||||
)
|
||||
assert upload.status_code in (200, 201)
|
||||
image_id = upload.json()["id"]
|
||||
storage_key = upload.json()["hash"]
|
||||
|
||||
delete_resp = await client.delete(f"/api/v1/images/{image_id}")
|
||||
delete_resp = await client.delete(f"/api/v1/images/{image_id}", headers=headers)
|
||||
assert delete_resp.status_code == 204
|
||||
|
||||
# Confirm storage redirect no longer works (404 since record is gone)
|
||||
@@ -55,15 +60,21 @@ async def test_delete_removes_storage_object(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_unknown_id_returns_404(client):
|
||||
response = await client.delete(f"/api/v1/images/{uuid.uuid4()}")
|
||||
async def test_delete_unknown_id_returns_404(authed_client):
|
||||
client, token = authed_client
|
||||
response = await client.delete(
|
||||
f"/api/v1/images/{uuid.uuid4()}",
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert response.status_code == 404
|
||||
body = response.json()
|
||||
assert body["code"] == "image_not_found"
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_delete_removes_thumbnail(client):
|
||||
async def test_delete_removes_thumbnail(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
buf = io.BytesIO()
|
||||
PILImage.new("RGB", (200, 150), color=(60, 90, 120)).save(buf, format="JPEG")
|
||||
data = buf.getvalue()
|
||||
@@ -71,12 +82,13 @@ async def test_delete_removes_thumbnail(client):
|
||||
upload = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("thumb-del.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers=headers,
|
||||
)
|
||||
assert upload.status_code == 201
|
||||
image_id = upload.json()["id"]
|
||||
assert upload.json()["thumbnail_key"] is not None
|
||||
|
||||
delete_resp = await client.delete(f"/api/v1/images/{image_id}")
|
||||
delete_resp = await client.delete(f"/api/v1/images/{image_id}", headers=headers)
|
||||
assert delete_resp.status_code == 204
|
||||
|
||||
thumb_resp = await client.get(f"/api/v1/images/{image_id}/thumbnail")
|
||||
|
||||
121
api/tests/integration/test_login_rate_limit.py
Normal file
121
api/tests/integration/test_login_rate_limit.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import os
|
||||
|
||||
import pytest
|
||||
from httpx import AsyncClient
|
||||
|
||||
from app.auth.rate_limiter import LoginRateLimiter
|
||||
from app.main import app
|
||||
|
||||
BAD_CREDS = {"username": "attacker", "password": "wrong"}
|
||||
VALID_CREDS = {
|
||||
"username": os.environ.get("OWNER_USERNAME", "testowner"),
|
||||
"password": os.environ.get("OWNER_PASSWORD", "testpassword"),
|
||||
}
|
||||
|
||||
|
||||
def _fresh_limiter():
|
||||
return LoginRateLimiter(max_failures=3, window_seconds=60, cooldown_seconds=30)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_repeated_failures_trigger_429(client: AsyncClient):
|
||||
original_limiter = app.state.login_rate_limiter
|
||||
original_networks = app.state.login_trusted_networks
|
||||
app.state.login_rate_limiter = _fresh_limiter()
|
||||
app.state.login_trusted_networks = []
|
||||
try:
|
||||
for _ in range(3):
|
||||
await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
resp = await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
assert resp.status_code == 429
|
||||
assert resp.json()["code"] == "login_rate_limited"
|
||||
finally:
|
||||
app.state.login_rate_limiter = original_limiter
|
||||
app.state.login_trusted_networks = original_networks
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success_resets_counter(client: AsyncClient):
|
||||
original_limiter = app.state.login_rate_limiter
|
||||
original_networks = app.state.login_trusted_networks
|
||||
app.state.login_rate_limiter = _fresh_limiter()
|
||||
app.state.login_trusted_networks = []
|
||||
try:
|
||||
for _ in range(2):
|
||||
await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
await client.post("/api/v1/auth/token", json=VALID_CREDS)
|
||||
for _ in range(3):
|
||||
resp = await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
assert resp.status_code == 401, "counter should have reset after success"
|
||||
finally:
|
||||
app.state.login_rate_limiter = original_limiter
|
||||
app.state.login_trusted_networks = original_networks
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_429_has_retry_after_header(client: AsyncClient):
|
||||
original_limiter = app.state.login_rate_limiter
|
||||
original_networks = app.state.login_trusted_networks
|
||||
app.state.login_rate_limiter = _fresh_limiter()
|
||||
app.state.login_trusted_networks = []
|
||||
try:
|
||||
for _ in range(3):
|
||||
await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
resp = await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
assert resp.status_code == 429
|
||||
assert "Retry-After" in resp.headers
|
||||
assert int(resp.headers["Retry-After"]) > 0
|
||||
finally:
|
||||
app.state.login_rate_limiter = original_limiter
|
||||
app.state.login_trusted_networks = original_networks
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_429_body_shape(client: AsyncClient):
|
||||
original_limiter = app.state.login_rate_limiter
|
||||
original_networks = app.state.login_trusted_networks
|
||||
app.state.login_rate_limiter = _fresh_limiter()
|
||||
app.state.login_trusted_networks = []
|
||||
try:
|
||||
for _ in range(3):
|
||||
await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
resp = await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
assert resp.status_code == 429
|
||||
assert resp.json() == {
|
||||
"detail": "Too many failed login attempts. Please try again later.",
|
||||
"code": "login_rate_limited",
|
||||
}
|
||||
finally:
|
||||
app.state.login_rate_limiter = original_limiter
|
||||
app.state.login_trusted_networks = original_networks
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_xff_header_ignored_when_no_trusted_networks(client: AsyncClient):
|
||||
original_limiter = app.state.login_rate_limiter
|
||||
original_networks = app.state.login_trusted_networks
|
||||
app.state.login_rate_limiter = _fresh_limiter()
|
||||
app.state.login_trusted_networks = []
|
||||
try:
|
||||
# Send 3 failures all claiming to be "1.2.3.4" via XFF
|
||||
for _ in range(3):
|
||||
await client.post(
|
||||
"/api/v1/auth/token",
|
||||
json=BAD_CREDS,
|
||||
headers={"X-Forwarded-For": "1.2.3.4"},
|
||||
)
|
||||
# 4th request with a *different* XFF — if XFF were trusted, this
|
||||
# would appear to be a fresh IP and get 401. Since XFF is ignored,
|
||||
# the real peer ("testclient") is blocked and we get 429.
|
||||
resp = await client.post(
|
||||
"/api/v1/auth/token",
|
||||
json=BAD_CREDS,
|
||||
headers={"X-Forwarded-For": "9.9.9.9"},
|
||||
)
|
||||
assert resp.status_code == 429, (
|
||||
"XFF should be ignored when no trusted networks are configured; "
|
||||
"expected real peer to be blocked"
|
||||
)
|
||||
finally:
|
||||
app.state.login_rate_limiter = original_limiter
|
||||
app.state.login_trusted_networks = original_networks
|
||||
@@ -3,7 +3,6 @@ US3 regression tests: all read endpoints must remain accessible without a token
|
||||
even after require_auth is applied to write endpoints.
|
||||
"""
|
||||
import io
|
||||
import uuid
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ T041 — GET /api/v1/images?tags=cat,funny → only images with both tags
|
||||
T042 — same query excludes images with only one matching tag
|
||||
"""
|
||||
import io
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
@@ -15,7 +16,9 @@ def _minimal_gif() -> bytes:
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_and_filter_returns_only_matching_images(client):
|
||||
async def test_and_filter_returns_only_matching_images(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _minimal_gif()
|
||||
|
||||
# Image with both tags
|
||||
@@ -23,6 +26,7 @@ async def test_and_filter_returns_only_matching_images(client):
|
||||
"/api/v1/images",
|
||||
files={"file": ("both.gif", io.BytesIO(data), "image/gif")},
|
||||
data={"tags": "andcat,andfunny"},
|
||||
headers=headers,
|
||||
)
|
||||
both_id = r_both.json()["id"]
|
||||
|
||||
@@ -31,6 +35,7 @@ async def test_and_filter_returns_only_matching_images(client):
|
||||
"/api/v1/images",
|
||||
files={"file": ("one.gif", io.BytesIO(data + b"\x00"), "image/gif")},
|
||||
data={"tags": "andcat"},
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
response = await client.get("/api/v1/images?tags=andcat,andfunny")
|
||||
@@ -42,7 +47,9 @@ async def test_and_filter_returns_only_matching_images(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_filter_excludes_partial_tag_match(client):
|
||||
async def test_filter_excludes_partial_tag_match(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _minimal_gif()
|
||||
|
||||
# Image with only "exclcat"
|
||||
@@ -50,6 +57,7 @@ async def test_filter_excludes_partial_tag_match(client):
|
||||
"/api/v1/images",
|
||||
files={"file": ("partial.gif", io.BytesIO(data + b"\x01"), "image/gif")},
|
||||
data={"tags": "exclcat"},
|
||||
headers=headers,
|
||||
)
|
||||
|
||||
# Filter requires both exclcat and exclother
|
||||
|
||||
@@ -29,11 +29,13 @@ def _minimal_webp() -> bytes:
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_returns_200_with_content(client):
|
||||
async def test_file_returns_200_with_content(authed_client):
|
||||
client, token = authed_client
|
||||
data = _minimal_webp()
|
||||
upload = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("img.webp", io.BytesIO(data), "image/webp")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert upload.status_code in (200, 201)
|
||||
upload_body = upload.json()
|
||||
@@ -57,11 +59,13 @@ async def test_file_unknown_id_returns_404(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_file_response_exposes_no_storage_details(client):
|
||||
async def test_file_response_exposes_no_storage_details(authed_client):
|
||||
client, token = authed_client
|
||||
data = _minimal_webp()
|
||||
upload = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("img.webp", io.BytesIO(data), "image/webp")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert upload.status_code in (200, 201)
|
||||
image_id = upload.json()["id"]
|
||||
@@ -75,11 +79,13 @@ async def test_file_response_exposes_no_storage_details(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_thumbnail_returns_webp(client):
|
||||
async def test_thumbnail_returns_webp(authed_client):
|
||||
client, token = authed_client
|
||||
data = _real_jpeg()
|
||||
upload = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("t.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert upload.status_code == 201
|
||||
body = upload.json()
|
||||
@@ -95,11 +101,13 @@ async def test_thumbnail_returns_webp(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_thumbnail_fallback_returns_original(client, db_session):
|
||||
async def test_thumbnail_fallback_returns_original(authed_client, db_session):
|
||||
client, token = authed_client
|
||||
data = _real_jpeg()
|
||||
upload = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("fallback.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert upload.status_code == 201
|
||||
image_id = upload.json()["id"]
|
||||
|
||||
@@ -5,13 +5,17 @@ T057 — PATCH replaces tags, old tags unlinked, new tags upserted
|
||||
T058 — PATCH with invalid tag → 422 invalid_tag
|
||||
T073 — GET /api/v1/tags returns all tags alphabetically with correct image_count
|
||||
T074 — GET /api/v1/tags?q=ca returns only tags prefixed "ca"
|
||||
T001 — GET /api/v1/tags?sort=count_desc returns tags ordered highest-count-first
|
||||
T002 — GET /api/v1/tags?min_count=N excludes tags with image_count < N
|
||||
"""
|
||||
import io
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def _minimal_png() -> bytes:
|
||||
import struct, zlib
|
||||
import struct
|
||||
import zlib
|
||||
def chunk(name: bytes, data: bytes) -> bytes:
|
||||
c = name + data
|
||||
return struct.pack(">I", len(data)) + c + struct.pack(">I", zlib.crc32(c) & 0xFFFFFFFF)
|
||||
@@ -27,12 +31,14 @@ def _minimal_png() -> bytes:
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_upload_with_tags_persists_tags(client):
|
||||
async def test_upload_with_tags_persists_tags(authed_client):
|
||||
client, token = authed_client
|
||||
data = _minimal_png()
|
||||
response = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("img.png", io.BytesIO(data), "image/png")},
|
||||
data={"tags": "cat,funny"},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert response.status_code == 201
|
||||
body = response.json()
|
||||
@@ -40,12 +46,15 @@ async def test_upload_with_tags_persists_tags(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duplicate_upload_tags_unchanged(client):
|
||||
async def test_duplicate_upload_tags_unchanged(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _minimal_png()
|
||||
r1 = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("img.png", io.BytesIO(data), "image/png")},
|
||||
data={"tags": "original-tag"},
|
||||
headers=headers,
|
||||
)
|
||||
assert r1.status_code in (200, 201)
|
||||
original_tags = set(r1.json()["tags"])
|
||||
@@ -54,6 +63,7 @@ async def test_duplicate_upload_tags_unchanged(client):
|
||||
"/api/v1/images",
|
||||
files={"file": ("img.png", io.BytesIO(data), "image/png")},
|
||||
data={"tags": "different-tag"},
|
||||
headers=headers,
|
||||
)
|
||||
assert r2.status_code == 200
|
||||
assert r2.json()["duplicate"] is True
|
||||
@@ -61,18 +71,22 @@ async def test_duplicate_upload_tags_unchanged(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_patch_replaces_tag_set(client):
|
||||
async def test_patch_replaces_tag_set(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _minimal_png()
|
||||
r1 = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("patch-test.png", io.BytesIO(data), "image/png")},
|
||||
data={"tags": "old-tag"},
|
||||
headers=headers,
|
||||
)
|
||||
image_id = r1.json()["id"]
|
||||
|
||||
patch = await client.patch(
|
||||
f"/api/v1/images/{image_id}/tags",
|
||||
json={"tags": ["new-tag", "another"]},
|
||||
headers=headers,
|
||||
)
|
||||
assert patch.status_code == 200
|
||||
body = patch.json()
|
||||
@@ -81,17 +95,21 @@ async def test_patch_replaces_tag_set(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_patch_invalid_tag_returns_422(client):
|
||||
async def test_patch_invalid_tag_returns_422(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _minimal_png()
|
||||
r1 = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("invalid-tag-test.png", io.BytesIO(data), "image/png")},
|
||||
headers=headers,
|
||||
)
|
||||
image_id = r1.json()["id"]
|
||||
|
||||
patch = await client.patch(
|
||||
f"/api/v1/images/{image_id}/tags",
|
||||
json={"tags": ["valid", "INVALID TAG WITH SPACES!"]},
|
||||
headers=headers,
|
||||
)
|
||||
assert patch.status_code == 422
|
||||
body = patch.json()
|
||||
@@ -99,12 +117,14 @@ async def test_patch_invalid_tag_returns_422(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_tags_alphabetical_with_counts(client):
|
||||
async def test_list_tags_alphabetical_with_counts(authed_client):
|
||||
client, token = authed_client
|
||||
data = _minimal_png()
|
||||
await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("tag-list-test.png", io.BytesIO(data), "image/png")},
|
||||
data={"tags": "zebra,apple"},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
response = await client.get("/api/v1/tags")
|
||||
assert response.status_code == 200
|
||||
@@ -117,12 +137,14 @@ async def test_list_tags_alphabetical_with_counts(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_tags_prefix_filter(client):
|
||||
async def test_list_tags_prefix_filter(authed_client):
|
||||
client, token = authed_client
|
||||
data = _minimal_png()
|
||||
await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("prefix-test.png", io.BytesIO(data), "image/png")},
|
||||
data={"tags": "cat,catfish,caterpillar,dog"},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
response = await client.get("/api/v1/tags?q=cat")
|
||||
assert response.status_code == 200
|
||||
@@ -130,3 +152,70 @@ async def test_list_tags_prefix_filter(client):
|
||||
for item in body["items"]:
|
||||
assert item["name"].startswith("cat")
|
||||
assert not any(item["name"] == "dog" for item in body["items"])
|
||||
|
||||
|
||||
def _unique_png(seed: int) -> bytes:
|
||||
"""Generate a 1x1 PNG with a seed-determined pixel so each seed produces a distinct hash."""
|
||||
import struct
|
||||
import zlib
|
||||
def chunk(name: bytes, data: bytes) -> bytes:
|
||||
c = name + data
|
||||
return struct.pack(">I", len(data)) + c + struct.pack(">I", zlib.crc32(c) & 0xFFFFFFFF)
|
||||
ihdr = struct.pack(">IIBBBBB", 1, 1, 8, 2, 0, 0, 0)
|
||||
r, g, b = (seed * 37) % 256, (seed * 53) % 256, (seed * 71) % 256
|
||||
idat_data = zlib.compress(bytes([0, r, g, b]))
|
||||
return (
|
||||
b"\x89PNG\r\n\x1a\n"
|
||||
+ chunk(b"IHDR", ihdr)
|
||||
+ chunk(b"IDAT", idat_data)
|
||||
+ chunk(b"IEND", b"")
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_tags_sort_count_desc(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
# popular-sort-tag appears on 2 images, rare-sort-tag on 1 — verify count_desc ordering
|
||||
for seed in (100, 101):
|
||||
await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": (f"sort-{seed}.png", io.BytesIO(_unique_png(seed)), "image/png")},
|
||||
data={"tags": "popular-sort-tag,rare-sort-tag" if seed == 100 else "popular-sort-tag"},
|
||||
headers=headers,
|
||||
)
|
||||
response = await client.get("/api/v1/tags?sort=count_desc")
|
||||
assert response.status_code == 200
|
||||
items = response.json()["items"]
|
||||
sort_items = [i for i in items if i["name"] in ("popular-sort-tag", "rare-sort-tag")]
|
||||
assert len(sort_items) == 2
|
||||
# popular-sort-tag (count=2) must come before rare-sort-tag (count=1)
|
||||
names = [i["name"] for i in sort_items]
|
||||
assert names.index("popular-sort-tag") < names.index("rare-sort-tag")
|
||||
# Counts must be non-increasing
|
||||
counts = [i["image_count"] for i in items]
|
||||
assert counts == sorted(counts, reverse=True)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_list_tags_min_count_excludes_below_threshold(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
# common-min-tag appears on 2 images, uncommon-min-tag on 1
|
||||
for seed in (200, 201):
|
||||
await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": (f"min-{seed}.png", io.BytesIO(_unique_png(seed)), "image/png")},
|
||||
data={"tags": "common-min-tag,uncommon-min-tag" if seed == 200 else "common-min-tag"},
|
||||
headers=headers,
|
||||
)
|
||||
# min_count=2 should exclude uncommon-min-tag (count=1) but keep common-min-tag (count=2)
|
||||
response = await client.get("/api/v1/tags?min_count=2")
|
||||
assert response.status_code == 200
|
||||
items = response.json()["items"]
|
||||
names = [i["name"] for i in items]
|
||||
assert "common-min-tag" in names
|
||||
assert "uncommon-min-tag" not in names
|
||||
# All returned tags must have image_count >= 2
|
||||
for item in items:
|
||||
assert item["image_count"] >= 2
|
||||
|
||||
@@ -6,6 +6,7 @@ T029 — file > MAX_UPLOAD_BYTES → 422 file_too_large
|
||||
T079 — GET /api/v1/images/{id} 404 → error envelope shape
|
||||
"""
|
||||
import io
|
||||
import uuid
|
||||
from unittest.mock import patch
|
||||
|
||||
import pytest
|
||||
@@ -27,11 +28,13 @@ def _minimal_jpeg() -> bytes:
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_upload_new_image_returns_201(client):
|
||||
async def test_upload_new_image_returns_201(authed_client):
|
||||
client, token = authed_client
|
||||
data = _minimal_jpeg()
|
||||
response = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("test.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert response.status_code == 201
|
||||
body = response.json()
|
||||
@@ -44,12 +47,15 @@ async def test_upload_new_image_returns_201(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_upload_duplicate_returns_200_with_flag(client):
|
||||
async def test_upload_duplicate_returns_200_with_flag(authed_client):
|
||||
client, token = authed_client
|
||||
data = _minimal_jpeg()
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
# First upload
|
||||
r1 = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("test.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers=headers,
|
||||
)
|
||||
assert r1.status_code in (200, 201)
|
||||
|
||||
@@ -57,6 +63,7 @@ async def test_upload_duplicate_returns_200_with_flag(client):
|
||||
r2 = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("test.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers=headers,
|
||||
)
|
||||
assert r2.status_code == 200
|
||||
body = r2.json()
|
||||
@@ -65,10 +72,12 @@ async def test_upload_duplicate_returns_200_with_flag(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_upload_invalid_mime_type_returns_422(client):
|
||||
async def test_upload_invalid_mime_type_returns_422(authed_client):
|
||||
client, token = authed_client
|
||||
response = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("doc.pdf", io.BytesIO(b"%PDF-1.4"), "application/pdf")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
body = response.json()
|
||||
@@ -77,10 +86,12 @@ async def test_upload_invalid_mime_type_returns_422(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_upload_oversized_file_returns_422(client):
|
||||
async def test_upload_oversized_file_returns_422(authed_client):
|
||||
import os
|
||||
|
||||
from app.config import get_settings
|
||||
|
||||
client, token = authed_client
|
||||
os.environ["MAX_UPLOAD_BYTES"] = "10"
|
||||
get_settings.cache_clear()
|
||||
|
||||
@@ -88,6 +99,7 @@ async def test_upload_oversized_file_returns_422(client):
|
||||
response = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("big.jpg", io.BytesIO(b"x" * 11), "image/jpeg")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert response.status_code == 422
|
||||
body = response.json()
|
||||
@@ -99,7 +111,6 @@ async def test_upload_oversized_file_returns_422(client):
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_unknown_image_returns_404_with_envelope(client):
|
||||
import uuid
|
||||
response = await client.get(f"/api/v1/images/{uuid.uuid4()}")
|
||||
assert response.status_code == 404
|
||||
body = response.json()
|
||||
@@ -108,11 +119,13 @@ async def test_get_unknown_image_returns_404_with_envelope(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_upload_returns_thumbnail_key(client):
|
||||
async def test_upload_returns_thumbnail_key(authed_client):
|
||||
client, token = authed_client
|
||||
data = _real_jpeg(color=(100, 150, 200))
|
||||
response = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("thumb_test.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert response.status_code == 201
|
||||
body = response.json()
|
||||
@@ -122,17 +135,21 @@ async def test_upload_returns_thumbnail_key(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_duplicate_upload_reuses_thumbnail_key(client):
|
||||
async def test_duplicate_upload_reuses_thumbnail_key(authed_client):
|
||||
client, token = authed_client
|
||||
headers = {"Authorization": f"Bearer {token}"}
|
||||
data = _real_jpeg(color=(200, 100, 50))
|
||||
r1 = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("dup.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers=headers,
|
||||
)
|
||||
assert r1.status_code in (200, 201)
|
||||
|
||||
r2 = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("dup.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers=headers,
|
||||
)
|
||||
assert r2.status_code == 200
|
||||
|
||||
@@ -143,12 +160,14 @@ async def test_duplicate_upload_reuses_thumbnail_key(client):
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_upload_succeeds_when_thumbnail_fails(client):
|
||||
async def test_upload_succeeds_when_thumbnail_fails(authed_client):
|
||||
client, token = authed_client
|
||||
data = _real_jpeg(color=(50, 200, 150))
|
||||
with patch("app.routers.images.generate_thumbnail", side_effect=RuntimeError("simulated")):
|
||||
response = await client.post(
|
||||
"/api/v1/images",
|
||||
files={"file": ("no_thumb.jpg", io.BytesIO(data), "image/jpeg")},
|
||||
headers={"Authorization": f"Bearer {token}"},
|
||||
)
|
||||
assert response.status_code in (200, 201)
|
||||
body = response.json()
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
import os
|
||||
import pytest
|
||||
|
||||
|
||||
_BASE_ENV = {
|
||||
@@ -26,6 +24,7 @@ def test_settings_load_from_env(monkeypatch):
|
||||
|
||||
# Import inside test to pick up monkeypatched env
|
||||
import importlib
|
||||
|
||||
import app.config as config_module
|
||||
importlib.reload(config_module)
|
||||
|
||||
@@ -42,6 +41,7 @@ def test_settings_max_upload_bytes_override(monkeypatch):
|
||||
_apply_env(monkeypatch, {"MAX_UPLOAD_BYTES": "10485760"})
|
||||
|
||||
import importlib
|
||||
|
||||
import app.config as config_module
|
||||
importlib.reload(config_module)
|
||||
|
||||
@@ -53,6 +53,7 @@ def test_settings_jwt_expiry_override(monkeypatch):
|
||||
_apply_env(monkeypatch, {"JWT_EXPIRY_SECONDS": "3600"})
|
||||
|
||||
import importlib
|
||||
|
||||
import app.config as config_module
|
||||
importlib.reload(config_module)
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import hashlib
|
||||
|
||||
from app.utils import compute_sha256
|
||||
|
||||
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import time
|
||||
import pytest
|
||||
import jwt as pyjwt
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
|
||||
from app.auth.jwt_provider import JWTAuthProvider
|
||||
|
||||
98
api/tests/unit/test_rate_limiter.py
Normal file
98
api/tests/unit/test_rate_limiter.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import ipaddress
|
||||
from unittest.mock import MagicMock
|
||||
|
||||
from starlette.requests import Request
|
||||
|
||||
from app.auth.rate_limiter import LoginRateLimiter, get_client_ip
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# LoginRateLimiter tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def make_limiter():
|
||||
return LoginRateLimiter(max_failures=3, window_seconds=60, cooldown_seconds=300)
|
||||
|
||||
|
||||
def test_not_blocked_initially():
|
||||
assert make_limiter().is_blocked("1.2.3.4") is False
|
||||
|
||||
|
||||
def test_blocked_after_threshold():
|
||||
limiter = make_limiter()
|
||||
for _ in range(3):
|
||||
limiter.record_failure("1.2.3.4")
|
||||
assert limiter.is_blocked("1.2.3.4") is True
|
||||
|
||||
|
||||
def test_success_clears_failures():
|
||||
limiter = make_limiter()
|
||||
limiter.record_failure("1.2.3.4")
|
||||
limiter.record_failure("1.2.3.4")
|
||||
limiter.record_success("1.2.3.4")
|
||||
assert limiter.is_blocked("1.2.3.4") is False
|
||||
|
||||
|
||||
def test_ips_are_isolated():
|
||||
limiter = make_limiter()
|
||||
for _ in range(3):
|
||||
limiter.record_failure("1.1.1.1")
|
||||
assert limiter.is_blocked("2.2.2.2") is False
|
||||
|
||||
|
||||
def test_window_resets_after_expiry():
|
||||
import time
|
||||
|
||||
limiter = LoginRateLimiter(max_failures=3, window_seconds=0, cooldown_seconds=300)
|
||||
limiter.record_failure("1.2.3.4")
|
||||
limiter.record_failure("1.2.3.4")
|
||||
time.sleep(0.01)
|
||||
limiter.record_failure("1.2.3.4")
|
||||
# window expired — counter reset on third call, so failures = 1, not 3
|
||||
assert limiter.is_blocked("1.2.3.4") is False
|
||||
|
||||
|
||||
def test_log_warning_on_lockout(caplog):
|
||||
import logging
|
||||
|
||||
limiter = make_limiter()
|
||||
with caplog.at_level(logging.WARNING, logger="app.auth.rate_limiter"):
|
||||
for _ in range(3):
|
||||
limiter.record_failure("5.6.7.8")
|
||||
assert "Login blocked" in caplog.text
|
||||
assert "5.6.7.8" in caplog.text
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_client_ip tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def make_request(peer: str, headers: dict) -> MagicMock:
|
||||
req = MagicMock(spec=Request)
|
||||
req.client.host = peer
|
||||
req.headers = headers
|
||||
return req
|
||||
|
||||
|
||||
def test_get_client_ip_no_trusted_networks_returns_peer():
|
||||
req = make_request("203.0.113.1", {"X-Forwarded-For": "10.0.0.1"})
|
||||
assert get_client_ip(req, []) == "203.0.113.1"
|
||||
|
||||
|
||||
def test_get_client_ip_trusted_peer_uses_xff():
|
||||
req = make_request("10.0.0.1", {"X-Forwarded-For": "203.0.113.5"})
|
||||
nets = [ipaddress.ip_network("10.0.0.0/8")]
|
||||
assert get_client_ip(req, nets) == "203.0.113.5"
|
||||
|
||||
|
||||
def test_get_client_ip_untrusted_peer_ignores_xff():
|
||||
req = make_request("8.8.8.8", {"X-Forwarded-For": "203.0.113.5"})
|
||||
nets = [ipaddress.ip_network("10.0.0.0/8")]
|
||||
assert get_client_ip(req, nets) == "8.8.8.8"
|
||||
|
||||
|
||||
def test_get_client_ip_trusted_peer_falls_back_to_real_ip():
|
||||
req = make_request("10.0.0.1", {"X-Real-IP": "203.0.113.9"})
|
||||
nets = [ipaddress.ip_network("10.0.0.0/8")]
|
||||
assert get_client_ip(req, nets) == "203.0.113.9"
|
||||
@@ -3,6 +3,7 @@ T037 — tag normalisation: uppercase → lowercase, whitespace stripped
|
||||
T038 — tag validation: rejects names > 64 chars, invalid chars
|
||||
"""
|
||||
import pytest
|
||||
|
||||
from app.repositories.tag_repo import TagRepository
|
||||
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import pytest
|
||||
from app.validation import validate_mime_type, validate_file_size, MimeTypeError, FileSizeError
|
||||
|
||||
from app.validation import FileSizeError, MimeTypeError, validate_file_size, validate_mime_type
|
||||
|
||||
ACCEPTED_TYPES = ["image/jpeg", "image/png", "image/gif", "image/webp"]
|
||||
REJECTED_TYPES = ["application/pdf", "video/mp4", "text/plain", "application/octet-stream"]
|
||||
|
||||
67
docker-compose.test.yml
Normal file
67
docker-compose.test.yml
Normal file
@@ -0,0 +1,67 @@
|
||||
services:
|
||||
postgres-test:
|
||||
image: postgres:16-alpine
|
||||
environment:
|
||||
POSTGRES_USER: reactbin
|
||||
POSTGRES_PASSWORD: reactbin
|
||||
POSTGRES_DB: reactbin_test
|
||||
ports:
|
||||
- "5433:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U reactbin"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
minio-test:
|
||||
image: minio/minio:latest
|
||||
command: server /data --console-address ":9001"
|
||||
environment:
|
||||
MINIO_ROOT_USER: minioadmin
|
||||
MINIO_ROOT_PASSWORD: minioadmin
|
||||
ports:
|
||||
- "9002:9000"
|
||||
- "9003:9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
minio-init-test:
|
||||
image: minio/mc:latest
|
||||
depends_on:
|
||||
minio-test:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
MINIO_ROOT_USER: minioadmin
|
||||
MINIO_ROOT_PASSWORD: minioadmin
|
||||
entrypoint: >
|
||||
/bin/sh -c "
|
||||
mc alias set local http://minio-test:9000 $$MINIO_ROOT_USER $$MINIO_ROOT_PASSWORD &&
|
||||
mc mb --ignore-existing local/reactbin-test
|
||||
"
|
||||
|
||||
api-test:
|
||||
build:
|
||||
context: ./api
|
||||
environment:
|
||||
TEST_DATABASE_URL: postgresql+asyncpg://reactbin:reactbin@postgres-test:5432/reactbin_test
|
||||
DATABASE_URL: postgresql+asyncpg://reactbin:reactbin@postgres-test:5432/reactbin_test
|
||||
S3_ENDPOINT_URL: http://minio-test:9000
|
||||
S3_BUCKET_NAME: reactbin-test
|
||||
S3_ACCESS_KEY_ID: minioadmin
|
||||
S3_SECRET_ACCESS_KEY: minioadmin
|
||||
S3_REGION: us-east-1
|
||||
JWT_SECRET_KEY: test-secret-key-for-testing-only
|
||||
OWNER_USERNAME: testowner
|
||||
OWNER_PASSWORD: testpassword
|
||||
API_BASE_URL: http://localhost:8000
|
||||
MAX_UPLOAD_BYTES: "52428800"
|
||||
depends_on:
|
||||
postgres-test:
|
||||
condition: service_healthy
|
||||
minio-init-test:
|
||||
condition: service_completed_successfully
|
||||
command: ["python", "-m", "pytest", "tests/", "-v"]
|
||||
working_dir: /app
|
||||
34
specs/006-header-nav-signout/checklists/requirements.md
Normal file
34
specs/006-header-nav-signout/checklists/requirements.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Specification Quality Checklist: Header Navigation & Sign-Out Destination
|
||||
|
||||
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||
**Created**: 2026-05-03
|
||||
**Feature**: [spec.md](../spec.md)
|
||||
|
||||
## Content Quality
|
||||
|
||||
- [X] No implementation details (languages, frameworks, APIs)
|
||||
- [X] Focused on user value and business needs
|
||||
- [X] Written for non-technical stakeholders
|
||||
- [X] All mandatory sections completed
|
||||
|
||||
## Requirement Completeness
|
||||
|
||||
- [X] No [NEEDS CLARIFICATION] markers remain
|
||||
- [X] Requirements are testable and unambiguous
|
||||
- [X] Success criteria are measurable
|
||||
- [X] Success criteria are technology-agnostic (no implementation details)
|
||||
- [X] All acceptance scenarios are defined
|
||||
- [X] Edge cases are identified
|
||||
- [X] Scope is clearly bounded
|
||||
- [X] Dependencies and assumptions identified
|
||||
|
||||
## Feature Readiness
|
||||
|
||||
- [X] All functional requirements have clear acceptance criteria
|
||||
- [X] User scenarios cover primary flows
|
||||
- [X] Feature meets measurable outcomes defined in Success Criteria
|
||||
- [X] No implementation details leak into specification
|
||||
|
||||
## Notes
|
||||
|
||||
- All items pass. Both changes are small, independent, and clearly bounded. Spec is ready for `/speckit-plan`.
|
||||
25
specs/006-header-nav-signout/plan.md
Normal file
25
specs/006-header-nav-signout/plan.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Implementation Plan: Header Navigation & Sign-Out Destination
|
||||
|
||||
**Branch**: `006-header-nav-signout` | **Date**: 2026-05-03 | **Spec**: [spec.md](spec.md)
|
||||
|
||||
## Summary
|
||||
|
||||
Two targeted changes to `ui/src/app/app.component.ts`:
|
||||
1. Wrap the header app-name text in a router link to `/`.
|
||||
2. Change the post-sign-out navigation target from `/login` to `/`.
|
||||
|
||||
No API changes. No new dependencies. One component file affected.
|
||||
|
||||
## Technical Context
|
||||
|
||||
**Language/Version**: TypeScript 5 / Angular 19 (standalone components)
|
||||
**Affected files**: `ui/src/app/app.component.ts`, `ui/src/app/app.component.spec.ts`
|
||||
**Testing**: Karma / Jasmine
|
||||
|
||||
## Constitution Check
|
||||
|
||||
| Principle | Status |
|
||||
|-----------|--------|
|
||||
| §2.1 Strict separation of concerns | ✅ UI-only change |
|
||||
| §5.1 TDD non-negotiable | ✅ Tests written first |
|
||||
| §7.3 Linting non-optional | ✅ ng lint gate in tasks |
|
||||
69
specs/006-header-nav-signout/spec.md
Normal file
69
specs/006-header-nav-signout/spec.md
Normal file
@@ -0,0 +1,69 @@
|
||||
# Feature Specification: Header Navigation & Sign-Out Destination
|
||||
|
||||
**Feature Branch**: `006-header-nav-signout`
|
||||
**Created**: 2026-05-03
|
||||
**Status**: Draft
|
||||
**Input**: User description: "Simple updates to the UI. Site title in the header should link to the base URL so that it's quick to get back to the main grid view from any sub-page now or in the future. When a user signs out, they should be sent back to the grid view instead of the sign in form."
|
||||
|
||||
## User Scenarios & Testing *(mandatory)*
|
||||
|
||||
### User Story 1 — Header Title Links to Grid (Priority: P1)
|
||||
|
||||
The owner (or any visitor) is on a sub-page — an image detail page, the upload form, or any future page — and wants to return to the main image grid. Clicking the application name in the header takes them there immediately, without needing the browser back button or a dedicated navigation link.
|
||||
|
||||
**Why this priority**: The header title is always visible on every page and is the most natural home-navigation affordance. Making it functional costs almost nothing and benefits every session.
|
||||
|
||||
**Independent Test**: Open any sub-page (e.g., an image detail page). Click the application title in the header. Confirm the image grid loads. Works identically whether logged in or not.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** the user is on any page other than the grid, **When** they click the application title in the header, **Then** they are taken to the image grid view.
|
||||
2. **Given** the user is already on the grid view, **When** they click the application title, **Then** the page either reloads the grid or stays on it — no error, no blank page.
|
||||
3. **Given** the user is not logged in and is on a public detail page, **When** they click the application title, **Then** they are taken to the grid (which is publicly visible) without being redirected to login.
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 — Sign Out Lands on Grid (Priority: P1)
|
||||
|
||||
The owner signs out of the application and is returned to the image grid rather than the login page. Since the grid is publicly accessible, there is no need to force a redirect to login — the owner can choose to sign back in if they wish.
|
||||
|
||||
**Why this priority**: Sending a signed-out user to the login page is unnecessary friction for a personal tool where the grid content is public. It also makes the sign-out flow feel punitive rather than neutral.
|
||||
|
||||
**Independent Test**: Log in and sign out from the header. Confirm the image grid is shown, not the login form. Confirm the grid shows images in read-only mode (no write controls).
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** the user is signed in, **When** they click the sign-out control, **Then** their session ends and they are taken to the image grid view.
|
||||
2. **Given** the user has landed on the grid after signing out, **When** they view the page, **Then** tag-edit and delete controls are not shown (consistent with unauthenticated behaviour already in place).
|
||||
3. **Given** the user signs out from a sub-page (e.g., detail page), **When** the sign-out completes, **Then** they are taken to the grid — not to the page they were on, and not to the login form.
|
||||
|
||||
---
|
||||
|
||||
### Edge Cases
|
||||
|
||||
- What happens if the grid is unavailable when the user clicks the title? The navigation attempt is made; any existing error-state handling on the grid covers this.
|
||||
- What if a future page introduces an auth-required route? The header title links to the grid unconditionally; auth guards on specific routes handle their own redirects independently.
|
||||
|
||||
## Requirements *(mandatory)*
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: The application title displayed in the persistent header MUST be a navigable link that takes the user to the image grid view from any page in the application.
|
||||
- **FR-002**: The title link MUST be accessible to both authenticated and unauthenticated users; it MUST NOT trigger a login redirect.
|
||||
- **FR-003**: After a user successfully signs out, the application MUST navigate them to the image grid view.
|
||||
- **FR-004**: The sign-out destination MUST be the grid view regardless of which page the user was on when they signed out.
|
||||
|
||||
## Success Criteria *(mandatory)*
|
||||
|
||||
### Measurable Outcomes
|
||||
|
||||
- **SC-001**: From any page, the image grid is reachable in exactly one click via the header title — no intermediate pages or redirects.
|
||||
- **SC-002**: After signing out, the user sees the image grid (not the login page) in 100% of sign-out flows.
|
||||
- **SC-003**: The title link functions correctly for both authenticated and unauthenticated sessions — verified across the grid, detail, and upload pages.
|
||||
|
||||
## Assumptions
|
||||
|
||||
- The image grid at the root URL is publicly accessible without authentication (confirmed: existing behaviour shows images to unauthenticated visitors).
|
||||
- The application title ("Reactbin") is already rendered as a text element in the persistent header from the UI polish work; this spec adds navigation behaviour to it, not a new visual element.
|
||||
- No change is made to the login redirect behaviour for protected routes (e.g., navigating directly to `/upload` while logged out still redirects to login as before).
|
||||
- The sign-out action clears the session as already implemented; only the post-sign-out destination changes.
|
||||
15
specs/006-header-nav-signout/tasks.md
Normal file
15
specs/006-header-nav-signout/tasks.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# Tasks: Header Navigation & Sign-Out Destination
|
||||
|
||||
## Phase 1: Tests (TDD — write first)
|
||||
|
||||
- [X] T001 Add component tests for header title routerLink to `/` and sign-out navigation to `/` in `ui/src/app/app.component.spec.ts`
|
||||
|
||||
## Phase 2: Implementation
|
||||
|
||||
- [X] T002 Wrap the `.app-name` span in a `routerLink="/"` anchor in `ui/src/app/app.component.ts`
|
||||
- [X] T003 Change `onLogout()` navigation target from `/login` to `/` in `ui/src/app/app.component.ts`
|
||||
|
||||
## Phase 3: Validation
|
||||
|
||||
- [X] T004 Run `ng lint` in `ui/` — zero violations
|
||||
- [X] T005 Run `ng build` in `ui/` — zero errors
|
||||
34
specs/007-tag-browser/checklists/requirements.md
Normal file
34
specs/007-tag-browser/checklists/requirements.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Specification Quality Checklist: Tag Browser
|
||||
|
||||
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||
**Created**: 2026-05-06
|
||||
**Feature**: [spec.md](../spec.md)
|
||||
|
||||
## Content Quality
|
||||
|
||||
- [X] No implementation details (languages, frameworks, APIs)
|
||||
- [X] Focused on user value and business needs
|
||||
- [X] Written for non-technical stakeholders
|
||||
- [X] All mandatory sections completed
|
||||
|
||||
## Requirement Completeness
|
||||
|
||||
- [X] No [NEEDS CLARIFICATION] markers remain
|
||||
- [X] Requirements are testable and unambiguous
|
||||
- [X] Success criteria are measurable
|
||||
- [X] Success criteria are technology-agnostic (no implementation details)
|
||||
- [X] All acceptance scenarios are defined
|
||||
- [X] Edge cases are identified
|
||||
- [X] Scope is clearly bounded
|
||||
- [X] Dependencies and assumptions identified
|
||||
|
||||
## Feature Readiness
|
||||
|
||||
- [X] All functional requirements have clear acceptance criteria
|
||||
- [X] User scenarios cover primary flows
|
||||
- [X] Feature meets measurable outcomes defined in Success Criteria
|
||||
- [X] No implementation details leak into specification
|
||||
|
||||
## Notes
|
||||
|
||||
- All items pass. Feature is small and well-bounded — two P1 stories (browse + navigate) form the core MVP; P2 (discoverability link) is a natural follow-on. No clarifications needed. Ready for `/speckit-plan`.
|
||||
58
specs/007-tag-browser/contracts/tags-endpoint.md
Normal file
58
specs/007-tag-browser/contracts/tags-endpoint.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# Contract: GET /api/v1/tags (enhanced)
|
||||
|
||||
## Overview
|
||||
|
||||
Extends the existing tags list endpoint with two new optional query parameters. All existing behaviour is preserved when the new parameters are omitted.
|
||||
|
||||
## Request
|
||||
|
||||
```
|
||||
GET /api/v1/tags
|
||||
```
|
||||
|
||||
### Query Parameters
|
||||
|
||||
| Parameter | Type | Default | Description |
|
||||
|------------|---------|----------|-------------|
|
||||
| `q` | string | — | Filter tags by name prefix (existing) |
|
||||
| `limit` | integer | 100 | Max items to return; capped at 200 (existing) |
|
||||
| `offset` | integer | 0 | Pagination offset (existing) |
|
||||
| `sort` | string | `name` | Sort order: `name` (alphabetical asc) or `count_desc` (image count descending, alphabetical secondary) |
|
||||
| `min_count`| integer | 0 | Exclude tags with fewer than this many images. Use `1` to hide zero-count tags. |
|
||||
|
||||
### Authentication
|
||||
|
||||
Not required. Public endpoint.
|
||||
|
||||
## Response
|
||||
|
||||
```json
|
||||
{
|
||||
"items": [
|
||||
{ "id": "uuid", "name": "string", "image_count": 0 }
|
||||
],
|
||||
"total": 0,
|
||||
"limit": 100,
|
||||
"offset": 0
|
||||
}
|
||||
```
|
||||
|
||||
No changes to the response shape.
|
||||
|
||||
## Tag Browser Usage
|
||||
|
||||
The tag browser component calls:
|
||||
|
||||
```
|
||||
GET /api/v1/tags?sort=count_desc&min_count=1&limit=500
|
||||
```
|
||||
|
||||
`limit=500` is a safe upper bound for a personal library. If `total` exceeds `limit` in the response, the component logs a warning but renders what it received (no pagination UI required at this scale).
|
||||
|
||||
## Library Autocomplete Usage (unchanged)
|
||||
|
||||
```
|
||||
GET /api/v1/tags?q=<prefix>&limit=10
|
||||
```
|
||||
|
||||
Uses neither `sort` nor `min_count` — default behaviour is unchanged.
|
||||
23
specs/007-tag-browser/data-model.md
Normal file
23
specs/007-tag-browser/data-model.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Data Model: Tag Browser
|
||||
|
||||
No schema changes are required for this feature. All data needed to power the tag browser already exists.
|
||||
|
||||
## Derived Entity: Tag with Count
|
||||
|
||||
The tag browser displays a **read-only, derived view** of existing data:
|
||||
|
||||
| Field | Source | Notes |
|
||||
|-------|--------|-------|
|
||||
| `name` | `tags.name` | Lowercase, normalised string |
|
||||
| `image_count` | `COUNT(image_tags.image_id) WHERE image_tags.tag_id = tags.id` | Computed at query time |
|
||||
|
||||
This is exactly the shape already returned by `GET /api/v1/tags` as `{"id", "name", "image_count"}`.
|
||||
|
||||
## What Changes
|
||||
|
||||
The query in `TagRepository.list_tags()` gains two optional behaviours:
|
||||
|
||||
1. **Sort by count descending** — adds `ORDER BY image_count DESC, name ASC` (count-desc primary, alphabetical secondary) instead of the current `ORDER BY name ASC`.
|
||||
2. **Exclude zero-count tags** — adds `HAVING image_count > 0` (or equivalent `WHERE` on the subquery) when `min_count=1` is requested.
|
||||
|
||||
No new tables, columns, indexes, or migrations are needed.
|
||||
96
specs/007-tag-browser/plan.md
Normal file
96
specs/007-tag-browser/plan.md
Normal file
@@ -0,0 +1,96 @@
|
||||
# Implementation Plan: Tag Browser
|
||||
|
||||
**Branch**: `007-tag-browser` | **Date**: 2026-05-06 | **Spec**: [spec.md](spec.md)
|
||||
**Input**: Feature specification from `specs/007-tag-browser/spec.md`
|
||||
|
||||
## Summary
|
||||
|
||||
Add a `/tags` page that lists every tag with its image count, sorted by popularity, each linking to the filtered library view. Requires: (1) two new query parameters on the existing `/api/v1/tags` endpoint to support sort-by-count and zero-count exclusion, (2) query-parameter-driven filtering on the library route so tag browser links deep-link correctly, (3) a new `TagBrowserComponent`, and (4) a navigation entry point from the library.
|
||||
|
||||
## Technical Context
|
||||
|
||||
**Language/Version**: Python 3.12 (API), TypeScript strict / Angular 19 (UI)
|
||||
**Primary Dependencies**: FastAPI, SQLAlchemy 2.x async, Angular standalone components
|
||||
**Storage**: PostgreSQL (read-only for this feature — no schema changes)
|
||||
**Testing**: pytest + httpx (API integration), Jasmine/Karma (Angular unit)
|
||||
**Target Platform**: Web (same stack as all prior features)
|
||||
**Project Type**: Web service + SPA
|
||||
**Performance Goals**: Tag list page load perceived as instant (same bar as library)
|
||||
**Constraints**: No schema changes; no new dependencies; counts must be accurate at page-load time
|
||||
**Scale/Scope**: Personal library — tag count is bounded; no pagination UI needed for tag browser, but the API call uses existing paginated endpoint
|
||||
|
||||
## Constitution Check
|
||||
|
||||
| Principle | Status | Notes |
|
||||
|-----------|--------|-------|
|
||||
| §2.1 Strict separation of concerns | ✅ | UI calls API; API owns all DB logic |
|
||||
| §2.5 Repository layer | ✅ | All query changes go in `TagRepository.list_tags()` |
|
||||
| §2.6 No speculative abstraction | ✅ | No new interfaces; extends existing repo method |
|
||||
| §3.1 API versioning `/api/v1/` | ✅ | Modifying existing versioned endpoint |
|
||||
| §3.2 OpenAPI as contract | ✅ | New query params documented via FastAPI |
|
||||
| §3.3 Error shape | ✅ | No new error paths |
|
||||
| §3.4 Pagination | ✅ | Existing endpoint already paginates; tag browser fetches with `limit=500` (safe upper bound for a personal library) |
|
||||
| §4.1 Tags lowercase normalised | ✅ | No change to tag creation/normalisation |
|
||||
| §5.1 TDD non-negotiable | ✅ | Tests written before implementation in tasks |
|
||||
| §5.3 Tests colocated | ✅ | API tests in `api/tests/`, Angular spec next to component |
|
||||
| §6 Tech stack | ✅ | No new dependencies |
|
||||
| §7.3 Linting/formatting enforced | ✅ | `ng lint` + `ruff` gates in tasks |
|
||||
|
||||
**Gate**: All principles pass. Phase 0 research not required — no unknowns.
|
||||
|
||||
## Project Structure
|
||||
|
||||
### Documentation (this feature)
|
||||
|
||||
```text
|
||||
specs/007-tag-browser/
|
||||
├── plan.md ← this file
|
||||
├── research.md ← not required (no unknowns)
|
||||
├── data-model.md ← see below (derived data, no schema changes)
|
||||
├── contracts/
|
||||
│ └── tags-endpoint.md ← enhanced GET /api/v1/tags contract
|
||||
└── tasks.md ← generated by /speckit-tasks
|
||||
```
|
||||
|
||||
### Source Code Changes
|
||||
|
||||
```text
|
||||
api/
|
||||
├── app/
|
||||
│ ├── repositories/
|
||||
│ │ └── tag_repo.py ← extend list_tags() with sort + min_count params
|
||||
│ └── routers/
|
||||
│ └── tags.py ← expose sort + min_count as query params
|
||||
└── tests/
|
||||
├── integration/
|
||||
│ └── test_tags.py ← new tests: sort=count_desc, min_count=1
|
||||
└── unit/
|
||||
└── test_tags.py ← unit tests for repo sort/filter logic (if applicable)
|
||||
|
||||
ui/src/app/
|
||||
├── tags/
|
||||
│ ├── tags.component.ts ← new TagBrowserComponent
|
||||
│ └── tags.component.spec.ts ← component tests
|
||||
├── services/
|
||||
│ └── tag.service.ts ← add sort param to list() method
|
||||
├── library/
|
||||
│ └── library.component.ts ← read ?tags= query param on init; add /tags nav link
|
||||
└── app.routes.ts ← add /tags route (lazy-loaded)
|
||||
```
|
||||
|
||||
## Design Decisions
|
||||
|
||||
### API: extend existing endpoint rather than add new one
|
||||
|
||||
The `/api/v1/tags` endpoint already returns tags with `image_count`. Two new optional query parameters make it serve the tag browser without breaking existing callers (the library autocomplete uses the endpoint unchanged):
|
||||
|
||||
- `sort`: `name` (default, current behaviour) | `count_desc` (tag browser use case)
|
||||
- `min_count`: integer, default `0` (all tags, current behaviour) | `1` (excludes zero-count tags)
|
||||
|
||||
### Library: query param deep-linking
|
||||
|
||||
The library component currently manages `activeFilters` in memory only. Adding `?tags=cat,funny` query parameter support (read on `ngOnInit` via `ActivatedRoute`) allows the tag browser to link directly to a pre-filtered library view. The library already uses `addFilter()` / `applyFilter()` internally — reading from query params simply pre-populates `activeFilters` before the initial `load()` call. Navigation from within the library that changes filters should update the URL to keep it shareable, but that is a polish concern — minimum requirement is that arriving at `/?tags=cat` shows the cat-filtered library.
|
||||
|
||||
### Tag browser UI layout
|
||||
|
||||
A responsive chip/card grid sorted by count descending. Each item shows the tag name and count. Each item is a `routerLink` to `/?tags=<name>`. Follows the existing design token system (`--surface`, `--accent`, `--chip` styles). Empty state if no tags exist.
|
||||
45
specs/007-tag-browser/quickstart.md
Normal file
45
specs/007-tag-browser/quickstart.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Quickstart: Tag Browser
|
||||
|
||||
## Verifying the feature end-to-end
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker stack running (`docker compose up`)
|
||||
- At least 3 images uploaded with different tags (e.g., `cat`, `funny`, `reaction`)
|
||||
- At least one image with two tags (e.g., both `cat` and `funny`)
|
||||
|
||||
### Scenario 1 — Tag browser shows all tags with correct counts
|
||||
|
||||
1. Open the app (not logged in).
|
||||
2. Navigate to `/tags`.
|
||||
3. **Expected**: A list of tags is shown. Each tag displays the number of images with that tag. Tags are ordered from most images to fewest.
|
||||
4. Verify: Count next to `cat` matches the number of images actually tagged `cat`.
|
||||
5. Verify: Tags with zero images are not shown.
|
||||
|
||||
### Scenario 2 — Clicking a tag navigates to the filtered library
|
||||
|
||||
1. On the `/tags` page, click the `cat` tag.
|
||||
2. **Expected**: Navigated to the library (`/`) showing only images tagged `cat`.
|
||||
3. Verify: The active filter chip shows `cat` in the library.
|
||||
|
||||
### Scenario 3 — Library page links to tag browser
|
||||
|
||||
1. Navigate to `/` (library, logged in or out).
|
||||
2. **Expected**: A link or button labelled "Browse by tag" (or similar) is visible.
|
||||
3. Click it.
|
||||
4. **Expected**: The tag browser page loads.
|
||||
|
||||
### Scenario 4 — Empty state
|
||||
|
||||
1. If the library has no images at all, navigate to `/tags`.
|
||||
2. **Expected**: An empty state message is shown rather than a blank page or error.
|
||||
|
||||
### API verification
|
||||
|
||||
```bash
|
||||
# Sorted by count, zero-count tags excluded
|
||||
curl http://localhost:8000/api/v1/tags?sort=count_desc&min_count=1
|
||||
|
||||
# Existing autocomplete behaviour unchanged
|
||||
curl http://localhost:8000/api/v1/tags?q=ca&limit=10
|
||||
```
|
||||
95
specs/007-tag-browser/spec.md
Normal file
95
specs/007-tag-browser/spec.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Feature Specification: Tag Browser
|
||||
|
||||
**Feature Branch**: `007-tag-browser`
|
||||
**Created**: 2026-05-06
|
||||
**Status**: Draft
|
||||
**Input**: User description: "A page that lists all tags with their image counts so that users don't have to guess at searches to find image categories/tags"
|
||||
|
||||
## User Scenarios & Testing *(mandatory)*
|
||||
|
||||
### User Story 1 — Browse All Tags (Priority: P1)
|
||||
|
||||
The owner (or any visitor) wants to know what categories of images exist in the library without having to type guesses into a search box. They navigate to the tag browser page and see every tag in the library alongside the number of images associated with it, sorted so the most-used tags appear first.
|
||||
|
||||
**Why this priority**: This is the entire purpose of the feature. A visitor who doesn't know what tags exist has no way to discover them otherwise — the tag filter on the library page only helps when you already know what to type.
|
||||
|
||||
**Independent Test**: Navigate to the tag browser page without being logged in. Confirm every tag in the library is shown with its image count, ordered from highest to lowest count.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** the library contains images with various tags, **When** a visitor opens the tag browser page, **Then** every tag in the library is listed with the number of images that carry that tag.
|
||||
2. **Given** the tag list is displayed, **When** the visitor looks at the ordering, **Then** tags with more images appear before tags with fewer images.
|
||||
3. **Given** the visitor is not logged in, **When** they open the tag browser page, **Then** the page loads and displays tags without requiring authentication.
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 — Navigate from Tag to Library (Priority: P1)
|
||||
|
||||
A visitor sees a tag they are interested in and wants to view the images in that category. Clicking a tag on the tag browser page takes them directly to the library filtered to that tag, without requiring them to retype it.
|
||||
|
||||
**Why this priority**: The tag browser page has no value as a dead end. Each tag must be a link to the filtered library view — that is the core action the page enables. Treated as P1 because the browse and navigate actions together form the minimum useful feature.
|
||||
|
||||
**Independent Test**: Click any tag on the tag browser page. Confirm the library view opens showing only images carrying that tag.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** the tag browser is showing a list of tags, **When** the visitor clicks a tag, **Then** they are taken to the library view filtered to show only images with that tag.
|
||||
2. **Given** the visitor clicks a tag with a count of one, **When** the library loads, **Then** exactly one image is shown.
|
||||
|
||||
---
|
||||
|
||||
### User Story 3 — Reach the Tag Browser from the Library (Priority: P2)
|
||||
|
||||
The owner is browsing the image library and wants to switch to the tag browser to explore by category. A navigation element on the library page makes the tag browser discoverable without requiring the visitor to type the URL directly.
|
||||
|
||||
**Why this priority**: The tag browser is only useful if visitors can find it. A direct entry point from the library is the most natural discovery path; however, the core value of browsing and navigating tags is independently deliverable without it.
|
||||
|
||||
**Independent Test**: Load the library page. Confirm a visible link or button leads to the tag browser and navigates correctly when clicked.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** the visitor is on the library page, **When** they look for a way to browse by tag, **Then** a visible link or button leads them to the tag browser.
|
||||
2. **Given** the visitor clicks that link, **When** the tag browser loads, **Then** all tags and counts are shown as expected.
|
||||
|
||||
---
|
||||
|
||||
### Edge Cases
|
||||
|
||||
- What if there are no tags in the library at all? The page displays an appropriate empty state message rather than a blank page or error.
|
||||
- What if a tag has been removed from all images (count reaches zero)? Tags with a count of zero are not shown on the tag browser page.
|
||||
- What if the library contains a very large number of distinct tags? The page renders all of them without truncation; pagination is not required at personal library scale.
|
||||
- What if two tags share the same count? An alphabetical secondary sort is acceptable — no specific tie-breaking order was requested.
|
||||
|
||||
## Requirements *(mandatory)*
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: The application MUST provide a dedicated tag browser page accessible at a stable URL.
|
||||
- **FR-002**: The tag browser page MUST display every tag that exists in the library with at least one associated image, each shown with its current image count.
|
||||
- **FR-003**: Tags with an image count of zero MUST NOT appear on the tag browser page.
|
||||
- **FR-004**: Tags MUST be ordered from highest image count to lowest image count.
|
||||
- **FR-005**: Each tag on the tag browser page MUST be a navigable link that takes the visitor to the library view filtered to that tag.
|
||||
- **FR-006**: The tag browser page MUST be publicly accessible without authentication.
|
||||
- **FR-007**: The library page MUST include a discoverable navigation element leading to the tag browser page.
|
||||
|
||||
### Key Entities
|
||||
|
||||
- **Tag with count**: A tag label paired with the number of images currently carrying that tag. No new stored data — counts are derived from existing image–tag relationships at read time.
|
||||
|
||||
## Success Criteria *(mandatory)*
|
||||
|
||||
### Measurable Outcomes
|
||||
|
||||
- **SC-001**: Every tag present in the library with at least one image appears on the tag browser page — 0% omission rate.
|
||||
- **SC-002**: The image count displayed next to each tag matches the actual number of images with that tag — 100% accuracy.
|
||||
- **SC-003**: Clicking any tag on the tag browser navigates to the correctly filtered library view in 100% of cases.
|
||||
- **SC-004**: The tag browser page loads successfully without authentication — verified by opening it while logged out.
|
||||
- **SC-005**: A visitor can go from the library page to the tag browser and on to a filtered library view in three interactions or fewer.
|
||||
|
||||
## Assumptions
|
||||
|
||||
- Tags are already a first-class concept in the library — images can have multiple tags and the data needed to derive counts already exists. No schema changes are required.
|
||||
- The library page already supports filtering by tag (via the existing search/filter mechanism); the tag browser links into that existing behaviour.
|
||||
- Alphabetical secondary sort for equal-count tags is acceptable.
|
||||
- Pagination of the tag list is out of scope for a personal image library.
|
||||
- Creating, renaming, or deleting tags from the tag browser page is out of scope; it is a read-only view.
|
||||
152
specs/007-tag-browser/tasks.md
Normal file
152
specs/007-tag-browser/tasks.md
Normal file
@@ -0,0 +1,152 @@
|
||||
# Tasks: Tag Browser
|
||||
|
||||
**Input**: Design documents from `specs/007-tag-browser/`
|
||||
**Prerequisites**: plan.md ✅, spec.md ✅, data-model.md ✅, contracts/ ✅, quickstart.md ✅
|
||||
|
||||
**Tests**: TDD is non-negotiable (§5.1). Every implementation task is preceded by a failing-test task. Test tasks MUST be written and confirmed failing before the corresponding implementation task begins.
|
||||
|
||||
**Organization**: Foundational API + service changes first (block all stories), then one phase per user story.
|
||||
|
||||
## Format: `[ID] [P?] [Story] Description`
|
||||
|
||||
- **[P]**: Can run in parallel with other [P] tasks in the same phase
|
||||
- **[Story]**: Which user story this task belongs to
|
||||
- Exact file paths included in every task description
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Setup
|
||||
|
||||
No new project structure required. The existing layout accommodates all changes.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Foundational — API Enhancement & Service Update
|
||||
|
||||
**Purpose**: Extend `GET /api/v1/tags` with `sort` and `min_count` query parameters; update the Angular `TagService` to pass them. All three user stories depend on the API returning tags sorted by count with zero-count tags excluded.
|
||||
|
||||
**⚠️ CRITICAL**: No user story work can begin until this phase is complete.
|
||||
|
||||
- [X] T001 [P] Write failing API integration tests for `sort=count_desc` and `min_count=1` params in `api/tests/integration/test_tags.py` — assert response is ordered highest-count-first and excludes zero-count tags
|
||||
- [X] T002 [P] Write failing spec for updated `TagService.list()` accepting `sort` and `minCount` params in `ui/src/app/services/tag.service.spec.ts` — final signature: `list(prefix = '', limit = 100, offset = 0, sort?: string, minCount?: number)`
|
||||
- [X] T003 Extend `TagRepository.list_tags()` in `api/app/repositories/tag_repo.py` — add `sort: str = "name"` and `min_count: int = 0` params; apply `ORDER BY image_count DESC, name ASC` when `sort="count_desc"`; apply `HAVING image_count >= min_count` filter — run AFTER T001 (TDD)
|
||||
- [X] T004 Expose `sort` and `min_count` as optional query params in `api/app/routers/tags.py` — pass through to `tag_repo.list_tags()` — run AFTER T003
|
||||
- [X] T005 Update `TagService.list()` in `ui/src/app/services/tag.service.ts` — final signature: `list(prefix = '', limit = 100, offset = 0, sort?: string, minCount?: number)`; include `sort` and `min_count` in `HttpParams` when provided — run AFTER T002 (TDD)
|
||||
|
||||
**Execution order**: T001 ∥ T002 → T003 (after T001), T005 (after T002) → T004 (after T003)
|
||||
|
||||
**Checkpoint**: `GET /api/v1/tags?sort=count_desc&min_count=1` returns tags sorted by image count descending with zero-count tags excluded. `TagService.list()` passes the new params.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: User Story 1 — Browse All Tags (Priority: P1) 🎯 MVP
|
||||
|
||||
**Goal**: A `/tags` page that lists every tag (with count ≥ 1) sorted from most-used to least-used, with loading skeleton, empty state, and error state matching the existing design system.
|
||||
|
||||
**Independent Test**: Navigate to `/tags` while logged out. Confirm every tag with at least one image is shown with its count, ordered by count descending. Confirm the empty state appears when no tags exist.
|
||||
|
||||
### Tests for User Story 1
|
||||
|
||||
- [X] T006 [US1] Write failing spec for `TagBrowserComponent` in `ui/src/app/tags/tags.component.spec.ts` covering: (a) skeleton shown while loading, (b) tag list rendered with name and count after load, (c) tags ordered by count descending, (d) empty state shown when tag list is empty, (e) error state shown on fetch failure with retry button, (f) each rendered tag element has an `href` of `/?tags=<tagname>` (FR-005 coverage), (g) component renders when `AuthService` is not present / user is unauthenticated (FR-006 coverage)
|
||||
|
||||
### Implementation for User Story 1
|
||||
|
||||
- [X] T007 [US1] Create `TagBrowserComponent` in `ui/src/app/tags/tags.component.ts` — standalone component; on init call `tagService.list('', 500, 0, 'count_desc', 1)` (positional order matches T005 signature); display tag chips with name + count; each chip is a `routerLink="/"` with `[queryParams]="{tags: tag.name}"` so the href renders as `/?tags=<name>`; include skeleton loading state (reuse `.skeleton` class from global styles), empty state, and error state with retry; apply design tokens throughout
|
||||
- [X] T008 [P] [US1] Add `/tags` lazy route to `ui/src/app/app.routes.ts` — load `TagBrowserComponent`; no auth guard (public route)
|
||||
|
||||
**Checkpoint**: `/tags` renders a sorted, filterable tag list visible without authentication.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: User Story 2 — Navigate from Tag to Library (Priority: P1)
|
||||
|
||||
**Goal**: Clicking a tag on the tag browser navigates to the library pre-filtered to that tag. Requires the library to read `?tags=<name>` from the URL on init and apply it as an active filter before the first image load.
|
||||
|
||||
**Independent Test**: Navigate directly to `/?tags=cat` in the browser. Confirm the library loads showing only images tagged `cat` and the `cat` chip appears in the active filter bar.
|
||||
|
||||
### Tests for User Story 2
|
||||
|
||||
- [X] T009 [US2] Write failing spec for `LibraryComponent` reading `?tags=` query param in `ui/src/app/library/library.component.spec.ts` — assert that when the component initialises with `?tags=cat` in the URL, `activeFilters` contains `['cat']` and `imageService.list` is called with `['cat']`
|
||||
|
||||
### Implementation for User Story 2
|
||||
|
||||
- [X] T010 [US2] Update `LibraryComponent` in `ui/src/app/library/library.component.ts` — inject `ActivatedRoute`; in `ngOnInit`, read `snapshot.queryParamMap.get('tags')`; if present, split by comma, set `activeFilters` before calling `load()` so the first fetch is already filtered
|
||||
|
||||
**Checkpoint**: Navigating to `/?tags=cat` from the tag browser shows the correctly filtered library.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: User Story 3 — Tag Browser Discoverable from Library (Priority: P2)
|
||||
|
||||
**Goal**: A visible "Browse tags" link in the library page header navigates to `/tags`. Makes the tag browser discoverable without requiring the user to type the URL.
|
||||
|
||||
**Independent Test**: Load the library page. Confirm a link to `/tags` is visible in the header and navigates correctly when clicked.
|
||||
|
||||
### Tests for User Story 3
|
||||
|
||||
- [X] T011 [US3] Write failing spec for library nav link to `/tags` in `ui/src/app/library/library.component.spec.ts` — assert a link element with `href="/tags"` is present in the rendered header
|
||||
|
||||
### Implementation for User Story 3
|
||||
|
||||
- [X] T012 [US3] Add "Browse tags" `routerLink="/tags"` link to `LibraryComponent` header in `ui/src/app/library/library.component.ts` — place alongside the existing Upload button; style consistently with the existing header button pattern
|
||||
|
||||
**Checkpoint**: All three user stories are independently functional.
|
||||
|
||||
---
|
||||
|
||||
## Phase 6: Polish & Cross-Cutting Concerns
|
||||
|
||||
- [X] T013 [P] Run `ruff check api/app/ api/tests/` and fix any violations
|
||||
- [X] T014 [P] Run `ng lint` in `ui/` — zero violations required
|
||||
- [X] T015 Run `ng build` in `ui/` — zero errors required
|
||||
|
||||
---
|
||||
|
||||
## Dependencies & Execution Order
|
||||
|
||||
### Phase Dependencies
|
||||
|
||||
- **Phase 2 (Foundational)**: Blocks all user story phases — must complete first
|
||||
- **Phase 3 (US1)**: Depends on Phase 2 — TagBrowserComponent needs the sorted tag endpoint
|
||||
- **Phase 4 (US2)**: Depends on Phase 2 — library deep-link needs no API change, but should follow US1 for coherent testing
|
||||
- **Phase 5 (US3)**: Depends on Phase 3 (needs the `/tags` route to exist for the link to be meaningful)
|
||||
- **Phase 6 (Polish)**: Depends on all prior phases
|
||||
|
||||
### Within Phase 2
|
||||
|
||||
- T001 ∥ T002 (different repos, both write failing tests)
|
||||
- T003 after T001 (TDD: failing test must exist first)
|
||||
- T005 after T002 (TDD: failing test must exist first)
|
||||
- T003 ∥ T005 (different repos, after their respective tests)
|
||||
- T004 after T003 (router wraps repo)
|
||||
|
||||
### Execution Order (Phase 2)
|
||||
|
||||
```
|
||||
Step 1 (parallel): T001, T002
|
||||
Step 2 (parallel): T003 (after T001), T005 (after T002)
|
||||
Step 3: T004 (after T003)
|
||||
```
|
||||
|
||||
### Parallel Opportunities (Phases 3–5)
|
||||
|
||||
- T007 and T008 are parallel within Phase 3
|
||||
|
||||
---
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### MVP (US1 + US2 — both P1)
|
||||
|
||||
1. Complete Phase 2 (Foundational)
|
||||
2. Complete Phase 3 (US1 — TagBrowserComponent)
|
||||
3. Complete Phase 4 (US2 — library deep-link)
|
||||
4. **Validate**: Navigate from tag browser → library → confirm pre-filtered results
|
||||
5. Phases 5–6 add discoverability and polish
|
||||
|
||||
### Incremental Delivery
|
||||
|
||||
- After Phase 3: `/tags` page is live and usable (visitors can browse tags)
|
||||
- After Phase 4: clicking a tag works end-to-end (browse → filtered library)
|
||||
- After Phase 5: tag browser is discoverable from the library without typing the URL
|
||||
- After Phase 6: lint and build clean, ready for merge
|
||||
236
specs/008-postgres-integration-tests/plan.md
Normal file
236
specs/008-postgres-integration-tests/plan.md
Normal file
@@ -0,0 +1,236 @@
|
||||
# Implementation Plan: PostgreSQL Integration Test Infrastructure
|
||||
|
||||
**Branch**: `master` | **Date**: 2026-05-06 | **Spec**: specs/008-postgres-integration-tests/spec.md
|
||||
**Input**: Feature specification from `specs/008-postgres-integration-tests/spec.md`
|
||||
|
||||
---
|
||||
|
||||
## Summary
|
||||
|
||||
Enforce the constitution's PostgreSQL mandate (§2.5, §5.2 v1.3.0) for integration tests. Three concrete deliverables: (1) a fast-fail guard in `conftest.py` that rejects non-PostgreSQL URLs before any test collects, (2) a `docker-compose.test.yml` that provides isolated `postgres-test` and `minio-test` services and an `api-test` runner, and (3) a `Makefile` + `.env.test.example` that document the canonical test commands.
|
||||
|
||||
---
|
||||
|
||||
## Technical Context
|
||||
|
||||
**Language/Version**: Python 3.12, Docker Compose v2
|
||||
**Primary Dependencies**: pytest, pytest-asyncio, asyncpg, SQLAlchemy 2.x (all already in `pyproject.toml [dev]`)
|
||||
**Storage**: PostgreSQL 16-alpine (test instance), MinIO (test instance)
|
||||
**Testing**: pytest — this feature *is* the test infrastructure change
|
||||
**Target Platform**: Developer workstation (Linux/macOS) with Docker
|
||||
**Project Type**: Infrastructure / developer-experience
|
||||
**Performance Goals**: Guard exits in < 2 s; full integration suite continues to run in < 60 s
|
||||
**Constraints**: Must not break the existing dev compose stack; no changes to application source code
|
||||
**Scale/Scope**: One guard, one compose file, one Makefile, one env example
|
||||
|
||||
---
|
||||
|
||||
## Constitution Check
|
||||
|
||||
| Principle | Status | Notes |
|
||||
|-----------|--------|-------|
|
||||
| §2.5 Database abstraction — no alternative DB in integration tests | ✅ ENFORCED | This feature implements the enforcement |
|
||||
| §5.1 TDD — failing test before implementation | ✅ | Guard itself is tested by running with a bad URL before adding the guard |
|
||||
| §5.2 Test pyramid — integration tests use real PostgreSQL | ✅ ENFORCED | docker-compose.test.yml provides the real instance |
|
||||
| §5.4 CI must pass before task is done | ✅ | Verified by running the full suite via compose |
|
||||
| §6 Tech stack — asyncpg driver, Docker Compose | ✅ | No new technologies introduced |
|
||||
| §7.1 One-command local start | ✅ | `docker compose -f docker-compose.test.yml run --rm api-test` |
|
||||
| §7.2 Environment config via env vars | ✅ | .env.test.example documents all vars |
|
||||
| §7.3 Linting not optional | ✅ | ruff will run as part of task validation |
|
||||
|
||||
No violations.
|
||||
|
||||
---
|
||||
|
||||
## Project Structure
|
||||
|
||||
### Documentation (this feature)
|
||||
|
||||
```text
|
||||
specs/008-postgres-integration-tests/
|
||||
├── plan.md ← this file
|
||||
├── research.md ← decisions made above
|
||||
├── spec.md ← feature specification
|
||||
└── tasks.md ← generated by /speckit-tasks
|
||||
```
|
||||
|
||||
### Source changes
|
||||
|
||||
```text
|
||||
# New files
|
||||
docker-compose.test.yml ← isolated test services + api-test runner
|
||||
.env.test.example ← documents test environment variables
|
||||
Makefile ← test-unit / test-integration targets
|
||||
|
||||
# Modified files
|
||||
api/tests/integration/conftest.py ← add postgresql+asyncpg:// dialect guard
|
||||
```
|
||||
|
||||
No application source files (`api/app/`) are modified. No UI files are touched.
|
||||
|
||||
---
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### 1. conftest.py — dialect guard
|
||||
|
||||
Add a module-level `pytest_configure` hook at the top of `api/tests/integration/conftest.py`. It resolves the database URL (same logic as the `engine` fixture: prefer `TEST_DATABASE_URL`, fall back to `settings.database_url`) and calls `pytest.exit()` if the scheme is not `postgresql+asyncpg`:
|
||||
|
||||
```python
|
||||
def pytest_configure(config):
|
||||
import os
|
||||
db_url = os.getenv("TEST_DATABASE_URL") or os.getenv("DATABASE_URL", "")
|
||||
if not db_url.startswith("postgresql+asyncpg://"):
|
||||
pytest.exit(
|
||||
"Integration tests require a PostgreSQL database "
|
||||
"(postgresql+asyncpg://...). "
|
||||
"Set TEST_DATABASE_URL or DATABASE_URL accordingly. "
|
||||
f"Got: {db_url!r}",
|
||||
returncode=1,
|
||||
)
|
||||
```
|
||||
|
||||
The hook runs before any fixture or collection, giving an immediate, unambiguous error.
|
||||
|
||||
**Note**: This guard goes in `api/tests/integration/conftest.py` only, not in `api/tests/conftest.py`, so that unit tests (which use no database) are unaffected.
|
||||
|
||||
### 2. docker-compose.test.yml
|
||||
|
||||
```yaml
|
||||
services:
|
||||
postgres-test:
|
||||
image: postgres:16-alpine
|
||||
environment:
|
||||
POSTGRES_USER: reactbin
|
||||
POSTGRES_PASSWORD: reactbin
|
||||
POSTGRES_DB: reactbin_test
|
||||
ports:
|
||||
- "5433:5432"
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U reactbin"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
minio-test:
|
||||
image: minio/minio:latest
|
||||
command: server /data --console-address ":9001"
|
||||
environment:
|
||||
MINIO_ROOT_USER: minioadmin
|
||||
MINIO_ROOT_PASSWORD: minioadmin
|
||||
ports:
|
||||
- "9002:9000"
|
||||
- "9003:9001"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
minio-init-test:
|
||||
image: minio/mc:latest
|
||||
depends_on:
|
||||
minio-test:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
MINIO_ROOT_USER: minioadmin
|
||||
MINIO_ROOT_PASSWORD: minioadmin
|
||||
entrypoint: >
|
||||
/bin/sh -c "
|
||||
mc alias set local http://minio-test:9000 $$MINIO_ROOT_USER $$MINIO_ROOT_PASSWORD &&
|
||||
mc mb --ignore-existing local/reactbin-test
|
||||
"
|
||||
|
||||
api-test:
|
||||
build:
|
||||
context: ./api
|
||||
environment:
|
||||
TEST_DATABASE_URL: postgresql+asyncpg://reactbin:reactbin@postgres-test:5432/reactbin_test
|
||||
DATABASE_URL: postgresql+asyncpg://reactbin:reactbin@postgres-test:5432/reactbin_test
|
||||
S3_ENDPOINT_URL: http://minio-test:9000
|
||||
S3_BUCKET_NAME: reactbin-test
|
||||
S3_ACCESS_KEY_ID: minioadmin
|
||||
S3_SECRET_ACCESS_KEY: minioadmin
|
||||
S3_REGION: us-east-1
|
||||
JWT_SECRET_KEY: test-secret-key-for-testing-only
|
||||
OWNER_USERNAME: testowner
|
||||
OWNER_PASSWORD: testpassword
|
||||
API_BASE_URL: http://localhost:8000
|
||||
MAX_UPLOAD_BYTES: "52428800"
|
||||
depends_on:
|
||||
postgres-test:
|
||||
condition: service_healthy
|
||||
minio-init-test:
|
||||
condition: service_completed_successfully
|
||||
command: ["python", "-m", "pytest", "tests/", "-v"]
|
||||
working_dir: /app
|
||||
```
|
||||
|
||||
### 3. .env.test.example
|
||||
|
||||
Documents the variables needed to run integration tests from the host (with postgres-test and minio-test already running via compose):
|
||||
|
||||
```bash
|
||||
# Integration test environment — used when running pytest directly on the host
|
||||
# Start test services first: docker compose -f docker-compose.test.yml up -d postgres-test minio-test minio-init-test
|
||||
|
||||
TEST_DATABASE_URL=postgresql+asyncpg://reactbin:reactbin@localhost:5433/reactbin_test
|
||||
DATABASE_URL=postgresql+asyncpg://reactbin:reactbin@localhost:5433/reactbin_test
|
||||
S3_ENDPOINT_URL=http://localhost:9002
|
||||
S3_BUCKET_NAME=reactbin-test
|
||||
S3_ACCESS_KEY_ID=minioadmin
|
||||
S3_SECRET_ACCESS_KEY=minioadmin
|
||||
S3_REGION=us-east-1
|
||||
JWT_SECRET_KEY=test-secret-key-for-testing-only
|
||||
OWNER_USERNAME=testowner
|
||||
OWNER_PASSWORD=testpassword
|
||||
API_BASE_URL=http://localhost:8000
|
||||
MAX_UPLOAD_BYTES=52428800
|
||||
```
|
||||
|
||||
### 4. Makefile
|
||||
|
||||
```makefile
|
||||
.PHONY: test-unit test-integration
|
||||
|
||||
test-unit:
|
||||
cd api && python -m pytest tests/unit/ -v
|
||||
|
||||
test-integration:
|
||||
docker compose -f docker-compose.test.yml run --rm api-test
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Phase Breakdown
|
||||
|
||||
### Phase 1: Guard (FR-001) — US1
|
||||
|
||||
- Write a failing test: run `pytest api/tests/integration/` with `TEST_DATABASE_URL=sqlite+aiosqlite:///test.db` — confirm it does NOT exit early (test that the guard is absent)
|
||||
- Add `pytest_configure` guard to `api/tests/integration/conftest.py`
|
||||
- Verify: running with SQLite URL now exits immediately with the correct message
|
||||
- Verify: running with a PostgreSQL URL proceeds normally
|
||||
|
||||
### Phase 2: Docker Compose test stack (FR-002, FR-003) — US2
|
||||
|
||||
- Write `docker-compose.test.yml` with `postgres-test`, `minio-test`, `minio-init-test`, `api-test`
|
||||
- Run `docker compose -f docker-compose.test.yml run --rm api-test` — all tests pass
|
||||
- Confirm dev stack (port 5432, 9000) is unaffected
|
||||
|
||||
### Phase 3: Documentation (FR-004, FR-005) — US3
|
||||
|
||||
- Write `.env.test.example`
|
||||
- Write `Makefile` with `test-unit` and `test-integration`
|
||||
- Verify `make test-unit` runs unit tests without Docker
|
||||
- Verify `make test-integration` invokes the compose command
|
||||
|
||||
### Phase 4: Polish
|
||||
|
||||
- `ruff check api/app/ api/tests/` — zero violations
|
||||
- `ng lint` is unaffected (no UI changes)
|
||||
|
||||
---
|
||||
|
||||
## No data model or API contracts
|
||||
|
||||
This feature touches only developer tooling. No new API endpoints, database schema changes, or UI components.
|
||||
38
specs/008-postgres-integration-tests/quickstart.md
Normal file
38
specs/008-postgres-integration-tests/quickstart.md
Normal file
@@ -0,0 +1,38 @@
|
||||
# Quickstart: Integration Test Infrastructure
|
||||
|
||||
## Run the full integration test suite (Docker, recommended)
|
||||
|
||||
```bash
|
||||
docker compose -f docker-compose.test.yml run --rm api-test
|
||||
```
|
||||
|
||||
Test services start automatically. The command exits with pytest's return code.
|
||||
|
||||
## Run unit tests only (no Docker required)
|
||||
|
||||
```bash
|
||||
make test-unit
|
||||
# or directly:
|
||||
cd api && python -m pytest tests/unit/ -v
|
||||
```
|
||||
|
||||
## Run integration tests from the host (test services must be running)
|
||||
|
||||
```bash
|
||||
# Start test services
|
||||
docker compose -f docker-compose.test.yml up -d postgres-test minio-test minio-init-test
|
||||
|
||||
# Copy and source test env vars
|
||||
cp .env.test.example .env.test
|
||||
export $(cat .env.test | xargs)
|
||||
|
||||
# Run tests
|
||||
cd api && python -m pytest tests/integration/ -v
|
||||
```
|
||||
|
||||
## Validate the guard works
|
||||
|
||||
```bash
|
||||
TEST_DATABASE_URL=sqlite+aiosqlite:///test.db python -m pytest api/tests/integration/
|
||||
# Expected: exits immediately with "Integration tests require postgresql+asyncpg://"
|
||||
```
|
||||
55
specs/008-postgres-integration-tests/research.md
Normal file
55
specs/008-postgres-integration-tests/research.md
Normal file
@@ -0,0 +1,55 @@
|
||||
# Research: PostgreSQL Integration Test Infrastructure
|
||||
|
||||
## Decision 1: How to enforce the PostgreSQL dialect in conftest.py
|
||||
|
||||
**Decision**: Add a `pytest_configure` hook (or a module-level guard in `conftest.py`) that calls `pytest.exit()` if the resolved database URL does not start with `postgresql+asyncpg://`.
|
||||
|
||||
**Rationale**: `pytest_configure` runs before collection, giving the clearest possible failure signal. A module-level assertion would also work but produces a less readable traceback. `pytest.exit()` with a human-readable message is the idiomatic approach.
|
||||
|
||||
**Alternatives considered**:
|
||||
- A custom pytest plugin in a separate file — unnecessary complexity for a one-liner guard.
|
||||
- Raising an exception in the `engine` fixture — runs too late (after collection); developers see confusing fixture errors instead of a clear message.
|
||||
|
||||
---
|
||||
|
||||
## Decision 2: Separate docker-compose.test.yml vs profiles in docker-compose.yml
|
||||
|
||||
**Decision**: Use a standalone `docker-compose.test.yml` at the repo root.
|
||||
|
||||
**Rationale**: Docker Compose profiles require the developer to remember `--profile test` on every command. A separate file is explicit and self-contained. The test file can define its own service names and ports without touching the dev compose file at all.
|
||||
|
||||
**Alternatives considered**:
|
||||
- `docker-compose.yml` with a `test` profile — profile discovery is non-obvious; modifying the dev file risks breaking the dev stack.
|
||||
- A `docker-compose.override.yml` — override files apply automatically to `docker compose up`, which is the opposite of what we want for tests.
|
||||
|
||||
---
|
||||
|
||||
## Decision 3: Port assignments for test services
|
||||
|
||||
**Decision**:
|
||||
- `postgres-test`: host port 5433 (standard offset from dev 5432)
|
||||
- `minio-test` API: host port 9002 (offset from dev 9000)
|
||||
- `minio-test` console: host port 9003 (offset from dev 9001)
|
||||
|
||||
**Rationale**: Predictable offsets make it easy to remember. Developers running both stacks simultaneously won't hit port conflicts.
|
||||
|
||||
---
|
||||
|
||||
## Decision 4: S3 isolation strategy for tests
|
||||
|
||||
**Decision**: The `api-test` service sets `S3_BUCKET_NAME=reactbin-test` pointing to the dedicated `minio-test` instance. The `minio-init-test` sidecar creates that bucket before tests run.
|
||||
|
||||
**Rationale**: The existing conftest already manages database isolation via `create_all` / `drop_all`. MinIO requires bucket pre-creation (same as dev). A dedicated test bucket on a dedicated test MinIO instance gives full isolation. No changes to application storage code are needed.
|
||||
|
||||
---
|
||||
|
||||
## Decision 5: Makefile vs shell scripts
|
||||
|
||||
**Decision**: A `Makefile` at the repo root with `test-unit` and `test-integration` targets.
|
||||
|
||||
**Rationale**: `make` is universally available on Linux/macOS developer machines. The targets are short wrappers that document the canonical test invocation. No build logic; just convenience aliases.
|
||||
|
||||
**Alternatives considered**:
|
||||
- Shell scripts (`scripts/test.sh`) — no discoverability; `make help` is more ergonomic.
|
||||
- `package.json` scripts — wrong tool for a Python/Docker project.
|
||||
- `justfile` — not universally installed.
|
||||
95
specs/008-postgres-integration-tests/spec.md
Normal file
95
specs/008-postgres-integration-tests/spec.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Feature Specification: PostgreSQL Integration Test Infrastructure
|
||||
|
||||
**Feature Branch**: `008-postgres-integration-tests`
|
||||
**Created**: 2026-05-06
|
||||
**Status**: Draft
|
||||
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
Integration tests currently permit any SQLAlchemy-compatible database URL, including SQLite. This allowed a real production bug (incorrect `HAVING` without `GROUP BY`) to ship undetected because SQLite's permissive dialect did not reject it. The project constitution (§2.5, §5.2 v1.3.0) now explicitly mandates PostgreSQL for integration tests. This feature enforces that mandate with infrastructure and guardrails.
|
||||
|
||||
---
|
||||
|
||||
## User Scenarios & Testing
|
||||
|
||||
### User Story 1 — Integration tests are enforced to run against PostgreSQL (Priority: P1)
|
||||
|
||||
A developer running `pytest` against a non-PostgreSQL database URL receives an immediate, descriptive failure before any test runs.
|
||||
|
||||
**Why this priority**: Directly addresses the production bug that prompted this feature. Without this, the constitution mandate has no teeth.
|
||||
|
||||
**Independent Test**: Set `TEST_DATABASE_URL=sqlite+aiosqlite:///test.db` and run `pytest api/tests/integration/`. Confirm pytest exits immediately with a message identifying the dialect problem and naming the required scheme.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** `TEST_DATABASE_URL` is set to a SQLite URL, **When** `pytest api/tests/integration/` is invoked, **Then** pytest exits before collecting any test with an error: `Integration tests require postgresql+asyncpg://`.
|
||||
2. **Given** `DATABASE_URL` is unset and `TEST_DATABASE_URL` is unset, **When** pytest is invoked, **Then** pytest exits with a clear message about the missing database URL.
|
||||
3. **Given** `TEST_DATABASE_URL` is a valid `postgresql+asyncpg://` URL, **When** pytest is invoked, **Then** tests collect and run normally.
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 — One-command integration test run against isolated services (Priority: P1)
|
||||
|
||||
A developer can run the entire integration test suite against dedicated, isolated PostgreSQL and MinIO instances with a single command.
|
||||
|
||||
**Why this priority**: Without this, the PostgreSQL requirement is mandated but impractical — developers have no easy way to satisfy it.
|
||||
|
||||
**Independent Test**: From the repo root with Docker available, run `docker compose -f docker-compose.test.yml run --rm api-test`. Confirm all integration tests pass, test containers start and stop cleanly, and dev database/bucket are untouched.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** Docker is running and dev services are stopped, **When** the test command is run, **Then** isolated `postgres-test` and `minio-test` services start, all tests run against them, and the command exits with pytest's return code.
|
||||
2. **Given** dev services are running on their normal ports, **When** the test command is run, **Then** test services use different ports (5433, 9002/9003) and do not interfere with the dev stack.
|
||||
3. **Given** any test data is written during the run, **When** the test run completes, **Then** all test schema is dropped (conftest teardown is unchanged).
|
||||
|
||||
---
|
||||
|
||||
### User Story 3 — Test infrastructure is documented (Priority: P2)
|
||||
|
||||
A developer new to the project can understand how to run unit tests vs integration tests without reading the source code.
|
||||
|
||||
**Independent Test**: Read `.env.test.example` and `Makefile`. Confirm all required environment variables are documented and `make test-unit` / `make test-integration` targets are present.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a fresh clone, **When** the developer reads `.env.test.example`, **Then** they see every variable needed to run integration tests outside Docker, with example values.
|
||||
2. **Given** the Makefile, **When** the developer runs `make test-unit`, **Then** the pytest unit suite runs without requiring Docker.
|
||||
3. **Given** the Makefile, **When** the developer runs `make test-integration`, **Then** the Docker Compose test command runs.
|
||||
|
||||
---
|
||||
|
||||
### Edge Cases
|
||||
|
||||
- What if `TEST_DATABASE_URL` is set but malformed? — The guard should still catch a non-PostgreSQL scheme; asyncpg will raise its own error for a malformed URL.
|
||||
- What if Docker is not available? — `make test-integration` fails at the Docker level with Docker's own error; the Makefile does not need to guard for this.
|
||||
- What if the test PostgreSQL port (5433) is already in use? — Standard Docker port conflict error; no special handling needed.
|
||||
|
||||
---
|
||||
|
||||
## Requirements
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: `conftest.py` MUST assert the resolved database URL starts with `postgresql+asyncpg://` and call `pytest.exit()` with a descriptive message before any test collects.
|
||||
- **FR-002**: A `docker-compose.test.yml` MUST define isolated `postgres-test` (port 5433) and `minio-test` (ports 9002/9003) services and an `api-test` runner service.
|
||||
- **FR-003**: The `api-test` service MUST set `TEST_DATABASE_URL` pointing to `postgres-test` and all S3 env vars pointing to `minio-test`.
|
||||
- **FR-004**: A `.env.test.example` MUST document all environment variables required to run integration tests outside Docker.
|
||||
- **FR-005**: A `Makefile` MUST provide `test-unit` and `test-integration` targets.
|
||||
|
||||
---
|
||||
|
||||
## Success Criteria
|
||||
|
||||
- **SC-001**: Running `pytest api/tests/integration/` with a SQLite URL exits in under 2 seconds with a clear error message — no tests run.
|
||||
- **SC-002**: `docker compose -f docker-compose.test.yml run --rm api-test` completes successfully with all integration tests passing.
|
||||
- **SC-003**: Dev services (postgres on 5432, minio on 9000) are unaffected when the test command runs.
|
||||
|
||||
---
|
||||
|
||||
## Assumptions
|
||||
|
||||
- Docker Compose v2 (`docker compose`) is available in the developer environment.
|
||||
- The existing `conftest.py` `engine` fixture (session-scoped `create_all` / `drop_all`) continues to handle schema lifecycle; no per-test transaction rollback mechanism is introduced.
|
||||
- CI/CD pipeline configuration is out of scope for this feature.
|
||||
113
specs/008-postgres-integration-tests/tasks.md
Normal file
113
specs/008-postgres-integration-tests/tasks.md
Normal file
@@ -0,0 +1,113 @@
|
||||
# Tasks: PostgreSQL Integration Test Infrastructure
|
||||
|
||||
**Input**: Design documents from `specs/008-postgres-integration-tests/`
|
||||
**Prerequisites**: plan.md ✅, spec.md ✅, research.md ✅, quickstart.md ✅
|
||||
|
||||
**Tests**: TDD is non-negotiable (§5.1). For infrastructure tasks the "failing test" is a verification step that confirms the thing being built is absent before building it, then confirms it works after. Every user story has an explicit TDD red step before its implementation task.
|
||||
|
||||
**Organization**: No foundational blocking phase — all three user stories touch independent files and can proceed in order.
|
||||
|
||||
## Format: `[ID] [P?] [Story] Description`
|
||||
|
||||
- **[P]**: Can run in parallel with other [P] tasks in the same phase
|
||||
- **[Story]**: Which user story this task belongs to
|
||||
- Exact file paths included in every task description
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Setup
|
||||
|
||||
No new project structure required. The existing layout accommodates all changes.
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: User Story 1 — Dialect guard in conftest (Priority: P1) 🎯 MVP
|
||||
|
||||
**Goal**: `pytest api/tests/integration/` exits immediately with a clear message if the database URL is not `postgresql+asyncpg://`.
|
||||
|
||||
**Independent Test**: Run `TEST_DATABASE_URL=sqlite+aiosqlite:///test.db python -m pytest api/tests/integration/ -q` — command exits in < 2 s with the error message `Integration tests require postgresql+asyncpg://` and no tests are collected.
|
||||
|
||||
- [X] T001 [US1] Confirm guard is absent (TDD red): from `api/`, run `TEST_DATABASE_URL=sqlite+aiosqlite:///test.db python -m pytest tests/integration/ -q --co 2>&1 | head -20` — observe that tests ARE collected and note the count (guard not yet in place)
|
||||
- [X] T002 [US1] Add `pytest_configure` hook to `api/tests/integration/conftest.py` — resolve URL via `os.getenv("TEST_DATABASE_URL") or os.getenv("DATABASE_URL", "")`, call `pytest.exit("Integration tests require postgresql+asyncpg://...", returncode=1)` if URL does not start with `postgresql+asyncpg://`; place hook before any imports that depend on the database URL
|
||||
- [X] T003 [US1] Verify guard works (TDD green): run `TEST_DATABASE_URL=sqlite+aiosqlite:///test.db python -m pytest api/tests/integration/ -q` — confirm immediate exit with the correct error message and zero tests collected; also confirm a valid `postgresql+asyncpg://` URL does not trigger the guard
|
||||
|
||||
**Checkpoint**: Dialect-mismatched test runs are blocked before any test collects.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: User Story 2 — Docker Compose test stack (Priority: P1)
|
||||
|
||||
**Goal**: `docker compose -f docker-compose.test.yml run --rm api-test` runs the full integration suite against isolated PostgreSQL and MinIO services on different ports than the dev stack.
|
||||
|
||||
**Independent Test**: Run `docker compose -f docker-compose.test.yml run --rm api-test` from the repo root — all tests pass; verify `docker compose ps` shows dev services (if running) are unaffected on their original ports.
|
||||
|
||||
- [X] T004 [US2] Confirm compose file is absent (TDD red): run `test -f docker-compose.test.yml && echo EXISTS || echo ABSENT` — confirm output is `ABSENT`
|
||||
- [X] T005 [US2] Create `docker-compose.test.yml` at the repo root with four services: `postgres-test` (image `postgres:16-alpine`, host port 5433, db `reactbin_test`), `minio-test` (image `minio/minio:latest`, host ports 9002/9003), `minio-init-test` (creates bucket `reactbin-test`, depends on `minio-test` healthy), and `api-test` (builds from `./api`, runs `python -m pytest tests/ -v`, depends on `postgres-test` healthy and `minio-init-test` completed, environment sets `TEST_DATABASE_URL=postgresql+asyncpg://reactbin:reactbin@postgres-test:5432/reactbin_test`, `DATABASE_URL` to same value, and all S3 vars pointing to `minio-test:9000` with bucket `reactbin-test`) — follow exact design in `specs/008-postgres-integration-tests/plan.md`
|
||||
- [X] T006 [US2] Verify compose stack (TDD green): run `docker compose -f docker-compose.test.yml run --rm api-test` — confirm all integration tests pass; confirm no errors about missing env vars or connection failures
|
||||
|
||||
**Checkpoint**: Full integration suite runs against real PostgreSQL via one command.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: User Story 3 — Test documentation (Priority: P2)
|
||||
|
||||
**Goal**: `.env.test.example` and `Makefile` document how to run both test tiers.
|
||||
|
||||
**Independent Test**: Read `.env.test.example` — all variables needed for integration tests are present with example values. Run `make test-unit` — pytest unit suite runs without Docker and passes.
|
||||
|
||||
- [X] T007 [P] [US3] Create `.env.test.example` at the repo root documenting all variables required to run integration tests outside Docker: `TEST_DATABASE_URL`, `DATABASE_URL`, `S3_ENDPOINT_URL`, `S3_BUCKET_NAME`, `S3_ACCESS_KEY_ID`, `S3_SECRET_ACCESS_KEY`, `S3_REGION`, `JWT_SECRET_KEY`, `OWNER_USERNAME`, `OWNER_PASSWORD`, `API_BASE_URL`, `MAX_UPLOAD_BYTES` — with example values pointing to `localhost:5433` and `localhost:9002` (test service ports); include a comment explaining how to start test services first — follow exact design in `specs/008-postgres-integration-tests/plan.md`
|
||||
- [X] T008 [P] [US3] Create `Makefile` at the repo root with `.PHONY: test-unit test-integration`, `test-unit` target running `cd api && python -m pytest tests/unit/ -v`, and `test-integration` target running `docker compose -f docker-compose.test.yml run --rm api-test`
|
||||
- [X] T009 [US3] Verify `make test-unit` — unit tests pass without Docker (validates the Makefile target and confirms unit tests have no Docker dependency)
|
||||
- [X] T010 Verify `make test-integration` — Docker integration suite passes end-to-end (cross-story verification: exercises the US2 compose stack via the US3 Makefile target)
|
||||
|
||||
**Checkpoint**: All three user stories independently functional.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Polish & Cross-Cutting Concerns
|
||||
|
||||
- [X] T011 Run `ruff check api/app/ api/tests/` — zero violations (conftest change must pass ruff; fix any issues)
|
||||
|
||||
---
|
||||
|
||||
## Dependencies & Execution Order
|
||||
|
||||
### Phase Dependencies
|
||||
|
||||
- **Phase 2 (US1)**: No external dependencies — can start immediately
|
||||
- **Phase 3 (US2)**: Depends on Phase 2 (guard must be in place so the compose stack run exercises it)
|
||||
- **Phase 4 (US3)**: T007 and T008 are independent file writes (can run in parallel with each other after Phase 3); T009 requires T008; T010 requires T008 and T006
|
||||
- **Phase 5 (Polish)**: Depends on all prior phases
|
||||
|
||||
### Within Phase 4
|
||||
|
||||
- T007 ∥ T008 (different files, no dependency)
|
||||
- T009 after T008 (Makefile must exist)
|
||||
- T010 after T008 and T006 (requires both Makefile and compose stack)
|
||||
|
||||
### Execution Order Summary
|
||||
|
||||
```
|
||||
Step 1: T001, T002, T003 (sequential — TDD for guard)
|
||||
Step 2: T004, T005, T006 (sequential — TDD for compose stack)
|
||||
Step 3 (parallel): T007, T008
|
||||
Step 4: T009 (after T008), T010 (after T008 + T006)
|
||||
Step 5: T011
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### MVP (US1 — the guard)
|
||||
|
||||
1. Complete T001–T003
|
||||
2. **Validate**: SQLite URL is blocked; PostgreSQL URL proceeds
|
||||
3. US2 and US3 add the infrastructure to make the mandate practical
|
||||
|
||||
### Incremental Delivery
|
||||
|
||||
- After Phase 2: Dialect bugs are caught immediately — core safety net is in place
|
||||
- After Phase 3: Full integration suite runs against PostgreSQL via one Docker command
|
||||
- After Phase 4: Both test tiers are documented and accessible via `make`
|
||||
- After Phase 5: Lint clean, ready for merge
|
||||
34
specs/009-login-rate-limiting/checklists/requirements.md
Normal file
34
specs/009-login-rate-limiting/checklists/requirements.md
Normal file
@@ -0,0 +1,34 @@
|
||||
# Specification Quality Checklist: Login Brute-Force Protection
|
||||
|
||||
**Purpose**: Validate specification completeness and quality before proceeding to planning
|
||||
**Created**: 2026-05-06
|
||||
**Feature**: [spec.md](../spec.md)
|
||||
|
||||
## Content Quality
|
||||
|
||||
- [X] No implementation details (languages, frameworks, APIs)
|
||||
- [X] Focused on user value and business needs
|
||||
- [X] Written for non-technical stakeholders
|
||||
- [X] All mandatory sections completed
|
||||
|
||||
## Requirement Completeness
|
||||
|
||||
- [X] No [NEEDS CLARIFICATION] markers remain
|
||||
- [X] Requirements are testable and unambiguous
|
||||
- [X] Success criteria are measurable
|
||||
- [X] Success criteria are technology-agnostic (no implementation details)
|
||||
- [X] All acceptance scenarios are defined
|
||||
- [X] Edge cases are identified
|
||||
- [X] Scope is clearly bounded
|
||||
- [X] Dependencies and assumptions identified
|
||||
|
||||
## Feature Readiness
|
||||
|
||||
- [X] All functional requirements have clear acceptance criteria
|
||||
- [X] User scenarios cover primary flows
|
||||
- [X] Feature meets measurable outcomes defined in Success Criteria
|
||||
- [X] No implementation details leak into specification
|
||||
|
||||
## Notes
|
||||
|
||||
- All items pass. Spec is ready for `/speckit-plan`.
|
||||
85
specs/009-login-rate-limiting/contracts/auth.md
Normal file
85
specs/009-login-rate-limiting/contracts/auth.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# API Contract: Authentication
|
||||
|
||||
## POST /api/v1/auth/token
|
||||
|
||||
Authenticates the owner and returns a JWT access token.
|
||||
|
||||
**This endpoint is modified by feature 009** to enforce brute-force protection.
|
||||
All previous behaviour is preserved. One new response code (429) is added.
|
||||
|
||||
### Request
|
||||
|
||||
```
|
||||
POST /api/v1/auth/token
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"username": "string",
|
||||
"password": "string"
|
||||
}
|
||||
```
|
||||
|
||||
### Responses
|
||||
|
||||
#### 200 OK — Credentials accepted
|
||||
|
||||
```json
|
||||
{
|
||||
"access_token": "<jwt>",
|
||||
"token_type": "bearer",
|
||||
"expires_in": 86400
|
||||
}
|
||||
```
|
||||
|
||||
Side effect: resets the failure counter for the caller's IP address.
|
||||
|
||||
---
|
||||
|
||||
#### 401 Unauthorized — Credentials rejected
|
||||
|
||||
```json
|
||||
{
|
||||
"detail": "Invalid credentials",
|
||||
"code": "invalid_credentials"
|
||||
}
|
||||
```
|
||||
|
||||
Side effect: increments the failure counter for the caller's IP address. If the
|
||||
counter reaches `LOGIN_MAX_FAILURES`, subsequent requests from this IP will receive
|
||||
429 until the cooldown expires.
|
||||
|
||||
---
|
||||
|
||||
#### 429 Too Many Requests — Source blocked after repeated failures
|
||||
|
||||
**This response is new in feature 009.**
|
||||
|
||||
```
|
||||
HTTP/1.1 429 Too Many Requests
|
||||
Retry-After: 900
|
||||
Content-Type: application/json
|
||||
```
|
||||
|
||||
```json
|
||||
{
|
||||
"detail": "Too many failed login attempts. Please try again later.",
|
||||
"code": "login_rate_limited"
|
||||
}
|
||||
```
|
||||
|
||||
The `Retry-After` header value is the configured cooldown duration in seconds (default: 900).
|
||||
It reflects the maximum possible wait, not the exact remaining lockout time.
|
||||
|
||||
No credentials are verified when this response is returned — the request is
|
||||
rejected before authentication is attempted.
|
||||
|
||||
---
|
||||
|
||||
### Notes
|
||||
|
||||
- The failure counter is per source IP address (TCP peer, not forwarded headers).
|
||||
- Threshold values (`LOGIN_MAX_FAILURES`, `LOGIN_WINDOW_SECONDS`, `LOGIN_COOLDOWN_SECONDS`)
|
||||
are not disclosed in any response.
|
||||
- Counters are in-memory and reset on process restart.
|
||||
53
specs/009-login-rate-limiting/data-model.md
Normal file
53
specs/009-login-rate-limiting/data-model.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# Data Model: Login Brute-Force Protection
|
||||
|
||||
## Overview
|
||||
|
||||
This feature introduces no new database tables. The only data entity is a transient,
|
||||
in-memory rate-limit record that does not survive process restarts. This is intentional
|
||||
(see research.md Decision 3).
|
||||
|
||||
---
|
||||
|
||||
## Entity: Rate-Limit Record (in-memory only)
|
||||
|
||||
| Field | Type | Description |
|
||||
|----------------|---------|-----------------------------------------------------------------------------|
|
||||
| `failures` | int | Count of consecutive failed login attempts in the current window |
|
||||
| `window_start` | float | Unix timestamp marking when the current counting window began |
|
||||
| `blocked_until`| float | Unix timestamp after which the source is no longer blocked; 0.0 if not blocked |
|
||||
|
||||
**Keyed by**: resolved client IP address string (e.g., `"192.168.1.1"`); see `get_client_ip()` in `rate_limiter.py` for resolution logic
|
||||
|
||||
**Lifecycle**:
|
||||
1. Record is created on the first failed login from a source.
|
||||
2. `failures` increments on each subsequent failure within the window.
|
||||
3. When `failures >= LOGIN_MAX_FAILURES`, `blocked_until` is set to `now + LOGIN_COOLDOWN_SECONDS`.
|
||||
4. When `blocked_until` has passed, the record is deleted on the next request from that source.
|
||||
5. A successful login deletes the record immediately (failure counter reset).
|
||||
6. If `now - window_start > LOGIN_WINDOW_SECONDS` without triggering lockout, the counter resets within the existing record.
|
||||
|
||||
**State machine**:
|
||||
|
||||
```
|
||||
[no record]
|
||||
│ first failure
|
||||
▼
|
||||
[tracking] ──── failure N ≥ max ────► [blocked]
|
||||
│ │
|
||||
│ success / window expires │ cooldown expires
|
||||
▼ ▼
|
||||
[no record] ◄─────────────────────── [no record]
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Configuration Entity: Rate-Limit Settings
|
||||
|
||||
Stored as environment variables; loaded via `app.config.Settings`:
|
||||
|
||||
| Env Var | Default | Description |
|
||||
|----------------------------|---------|----------------------------------------------------------|
|
||||
| `LOGIN_MAX_FAILURES` | `5` | Failures within window before lockout |
|
||||
| `LOGIN_WINDOW_SECONDS` | `300` | Rolling window duration in seconds (5 minutes) |
|
||||
| `LOGIN_COOLDOWN_SECONDS` | `900` | Lockout duration in seconds after threshold exceeded (15 minutes) |
|
||||
| `LOGIN_TRUSTED_PROXY_IPS` | `""` | Comma-separated IPs/CIDRs of trusted upstream proxies (e.g., `10.0.0.0/8`); empty = disabled |
|
||||
388
specs/009-login-rate-limiting/plan.md
Normal file
388
specs/009-login-rate-limiting/plan.md
Normal file
@@ -0,0 +1,388 @@
|
||||
# Implementation Plan: Login Brute-Force Protection
|
||||
|
||||
**Branch**: `009-login-rate-limiting` | **Date**: 2026-05-06 | **Spec**: [spec.md](spec.md)
|
||||
**Input**: Feature specification from `specs/009-login-rate-limiting/spec.md`
|
||||
|
||||
## Summary
|
||||
|
||||
Add failure-counting brute-force protection to the login endpoint (`POST /api/v1/auth/token`).
|
||||
After a configurable number of consecutive failed attempts from the same resolved client IP,
|
||||
the endpoint returns HTTP 429 with a `Retry-After` header for a configurable cooldown period.
|
||||
A successful login resets the counter. All thresholds are configurable via environment variables.
|
||||
When deployed behind a reverse proxy (nginx, Kubernetes ingress), a `LOGIN_TRUSTED_PROXY_IPS`
|
||||
setting enables extraction of the real client IP from `X-Forwarded-For`. No new infrastructure
|
||||
(no Redis, no new DB table) — counters live in process memory.
|
||||
|
||||
---
|
||||
|
||||
## Technical Context
|
||||
|
||||
**Language/Version**: Python 3.12+
|
||||
**Primary Dependencies**: FastAPI, pydantic-settings (already in use); no new dependencies added
|
||||
**Storage**: In-memory `dict` (no persistence across restarts — intentional)
|
||||
**Testing**: pytest + pytest-asyncio (existing test infrastructure)
|
||||
**Target Platform**: Linux server (Docker)
|
||||
**Project Type**: Web service (API only — this feature has no UI surface)
|
||||
**Performance Goals**: Rate limiter adds negligible overhead (dict lookup + lock acquisition; sub-millisecond)
|
||||
**Constraints**: Must not add new runtime service dependencies; must not change any auth behaviour for non-blocked sources
|
||||
**Scale/Scope**: Single process, single user; in-memory store is sufficient
|
||||
|
||||
---
|
||||
|
||||
## Constitution Check
|
||||
|
||||
| Principle | Status | Notes |
|
||||
|-----------|--------|-------|
|
||||
| §2.4 Auth abstraction (AuthProvider interface) | ✅ Pass | Rate limiter is a guard *before* `JWTAuthProvider.verify_credentials()`, not a bypass of the interface |
|
||||
| §2.5 DB abstraction (repository layer) | ✅ Pass | No database access; in-memory only |
|
||||
| §2.6 No speculative abstraction | ✅ Pass | Concrete `LoginRateLimiter` class, no interface; only one implementation planned |
|
||||
| §3.3 Error envelope (`detail` + `code`) | ✅ Pass | 429 response uses `{"detail": "...", "code": "login_rate_limited"}` |
|
||||
| §5.1 TDD | ✅ Required | Tasks follow red → green order |
|
||||
| §5.2 Integration tests against PostgreSQL | ✅ Pass | Integration test for the login endpoint will run against the Docker PostgreSQL stack |
|
||||
| §7.2 Environment configuration | ✅ Pass | `LOGIN_MAX_FAILURES`, `LOGIN_WINDOW_SECONDS`, `LOGIN_COOLDOWN_SECONDS`, `LOGIN_TRUSTED_PROXY_IPS` from env vars |
|
||||
| §7.3 Linting (ruff) | ✅ Required | All new files must pass `ruff check` |
|
||||
|
||||
**Gate result**: No violations. Cleared to proceed.
|
||||
|
||||
---
|
||||
|
||||
## Project Structure
|
||||
|
||||
### Documentation (this feature)
|
||||
|
||||
```text
|
||||
specs/009-login-rate-limiting/
|
||||
├── plan.md ← this file
|
||||
├── research.md ← decisions on approach
|
||||
├── data-model.md ← rate-limit record entity
|
||||
├── quickstart.md ← curl runbook
|
||||
├── contracts/
|
||||
│ └── auth.md ← updated POST /api/v1/auth/token with 429
|
||||
└── tasks.md ← generated by /speckit-tasks
|
||||
```
|
||||
|
||||
### Source Code Changes
|
||||
|
||||
```text
|
||||
api/
|
||||
├── app/
|
||||
│ ├── auth/
|
||||
│ │ ├── rate_limiter.py ← NEW: LoginRateLimiter class
|
||||
│ │ ├── jwt_provider.py (unchanged)
|
||||
│ │ ├── noop.py (unchanged)
|
||||
│ │ └── provider.py (unchanged)
|
||||
│ ├── config.py ← add login_max_failures, login_window_seconds, login_cooldown_seconds, login_trusted_proxy_ips
|
||||
│ ├── main.py ← init LoginRateLimiter in lifespan, attach to app.state
|
||||
│ └── routers/
|
||||
│ └── auth.py ← check rate limit before auth, record outcome
|
||||
└── tests/
|
||||
├── unit/
|
||||
│ └── test_rate_limiter.py ← NEW: unit tests for LoginRateLimiter logic
|
||||
└── integration/
|
||||
└── test_login_rate_limit.py ← NEW: integration tests for 429 behaviour via HTTP
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Detail
|
||||
|
||||
### `api/app/auth/rate_limiter.py`
|
||||
|
||||
```python
|
||||
import ipaddress
|
||||
import logging
|
||||
import time
|
||||
from dataclasses import dataclass, field
|
||||
from ipaddress import IPv4Network, IPv6Network
|
||||
from threading import Lock
|
||||
|
||||
from starlette.requests import Request
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def get_client_ip(
|
||||
request: Request,
|
||||
trusted_networks: list[IPv4Network | IPv6Network],
|
||||
) -> str:
|
||||
"""Return the resolved client IP, honouring X-Forwarded-For when the
|
||||
TCP peer is a trusted upstream proxy. Falls back to the TCP peer address
|
||||
when no trusted networks are configured or the peer is not in the list."""
|
||||
peer = request.client.host if request.client else "unknown"
|
||||
if trusted_networks and peer != "unknown":
|
||||
try:
|
||||
peer_addr = ipaddress.ip_address(peer)
|
||||
if any(peer_addr in net for net in trusted_networks):
|
||||
xff = request.headers.get("X-Forwarded-For", "").split(",")[0].strip()
|
||||
if xff:
|
||||
return xff
|
||||
real_ip = request.headers.get("X-Real-IP", "").strip()
|
||||
if real_ip:
|
||||
return real_ip
|
||||
except ValueError:
|
||||
pass
|
||||
return peer
|
||||
|
||||
|
||||
@dataclass
|
||||
class _Record:
|
||||
failures: int = 0
|
||||
window_start: float = field(default_factory=time.time)
|
||||
blocked_until: float = 0.0
|
||||
|
||||
|
||||
class LoginRateLimiter:
|
||||
def __init__(
|
||||
self,
|
||||
max_failures: int = 5,
|
||||
window_seconds: int = 300,
|
||||
cooldown_seconds: int = 900,
|
||||
) -> None:
|
||||
self._max = max_failures
|
||||
self._window = window_seconds
|
||||
self._cooldown = cooldown_seconds
|
||||
self._store: dict[str, _Record] = {}
|
||||
self._lock = Lock()
|
||||
|
||||
@property
|
||||
def cooldown_seconds(self) -> int:
|
||||
return self._cooldown
|
||||
|
||||
def is_blocked(self, ip: str) -> bool:
|
||||
now = time.time()
|
||||
with self._lock:
|
||||
rec = self._store.get(ip)
|
||||
if rec is None:
|
||||
return False
|
||||
if rec.blocked_until > now:
|
||||
return True
|
||||
if rec.blocked_until > 0:
|
||||
del self._store[ip]
|
||||
return False
|
||||
|
||||
def record_failure(self, ip: str) -> None:
|
||||
now = time.time()
|
||||
with self._lock:
|
||||
rec = self._store.get(ip)
|
||||
if rec is None:
|
||||
rec = _Record(window_start=now)
|
||||
self._store[ip] = rec
|
||||
if now - rec.window_start > self._window:
|
||||
rec.failures = 0
|
||||
rec.window_start = now
|
||||
rec.failures += 1
|
||||
if rec.failures >= self._max:
|
||||
rec.blocked_until = now + self._cooldown
|
||||
logger.warning(
|
||||
"Login blocked for %s after %d failures", ip, rec.failures
|
||||
)
|
||||
|
||||
def record_success(self, ip: str) -> None:
|
||||
with self._lock:
|
||||
self._store.pop(ip, None)
|
||||
```
|
||||
|
||||
### `api/app/config.py` additions
|
||||
|
||||
```python
|
||||
login_max_failures: int = 5
|
||||
login_window_seconds: int = 300
|
||||
login_cooldown_seconds: int = 900
|
||||
login_trusted_proxy_ips: str = "" # comma-separated IPs/CIDRs; empty = disabled
|
||||
```
|
||||
|
||||
### `api/app/main.py` lifespan update
|
||||
|
||||
```python
|
||||
import ipaddress
|
||||
|
||||
from app.auth.rate_limiter import LoginRateLimiter
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(application: FastAPI):
|
||||
settings = get_settings()
|
||||
application.state.login_rate_limiter = LoginRateLimiter(
|
||||
max_failures=settings.login_max_failures,
|
||||
window_seconds=settings.login_window_seconds,
|
||||
cooldown_seconds=settings.login_cooldown_seconds,
|
||||
)
|
||||
trusted_networks = []
|
||||
for part in settings.login_trusted_proxy_ips.split(","):
|
||||
part = part.strip()
|
||||
if part:
|
||||
try:
|
||||
trusted_networks.append(ipaddress.ip_network(part, strict=False))
|
||||
except ValueError:
|
||||
pass # invalid entry — skip silently
|
||||
application.state.login_trusted_networks = trusted_networks
|
||||
# ... existing DB setup unchanged
|
||||
engine = get_engine()
|
||||
async with engine.begin() as conn:
|
||||
await conn.run_sync(Base.metadata.create_all)
|
||||
yield
|
||||
await engine.dispose()
|
||||
```
|
||||
|
||||
### `api/app/routers/auth.py` update
|
||||
|
||||
```python
|
||||
from fastapi import APIRouter, Depends, HTTPException, Request
|
||||
from fastapi.responses import JSONResponse
|
||||
from pydantic import BaseModel
|
||||
|
||||
from app.auth.jwt_provider import JWTAuthProvider
|
||||
from app.auth.rate_limiter import LoginRateLimiter, get_client_ip
|
||||
from app.dependencies import get_jwt_auth
|
||||
|
||||
router = APIRouter(tags=["auth"])
|
||||
|
||||
|
||||
class LoginRequest(BaseModel):
|
||||
username: str
|
||||
password: str
|
||||
|
||||
|
||||
class TokenResponse(BaseModel):
|
||||
access_token: str
|
||||
token_type: str = "bearer"
|
||||
expires_in: int
|
||||
|
||||
|
||||
@router.post("/auth/token", response_model=TokenResponse)
|
||||
async def login(
|
||||
request: Request,
|
||||
body: LoginRequest,
|
||||
auth: JWTAuthProvider = Depends(get_jwt_auth),
|
||||
):
|
||||
limiter: LoginRateLimiter = request.app.state.login_rate_limiter
|
||||
ip: str = get_client_ip(request, request.app.state.login_trusted_networks)
|
||||
|
||||
if limiter.is_blocked(ip):
|
||||
return JSONResponse(
|
||||
status_code=429,
|
||||
content={
|
||||
"detail": "Too many failed login attempts. Please try again later.",
|
||||
"code": "login_rate_limited",
|
||||
},
|
||||
headers={"Retry-After": str(limiter.cooldown_seconds)},
|
||||
)
|
||||
|
||||
if not auth.verify_credentials(body.username, body.password):
|
||||
limiter.record_failure(ip)
|
||||
raise HTTPException(
|
||||
status_code=401,
|
||||
detail={"detail": "Invalid credentials", "code": "invalid_credentials"},
|
||||
)
|
||||
|
||||
limiter.record_success(ip)
|
||||
token = auth.create_token()
|
||||
return TokenResponse(
|
||||
access_token=token,
|
||||
token_type="bearer",
|
||||
expires_in=auth._expiry_seconds,
|
||||
)
|
||||
```
|
||||
|
||||
### `api/tests/unit/test_rate_limiter.py` (representative cases)
|
||||
|
||||
```python
|
||||
import time
|
||||
import pytest
|
||||
from app.auth.rate_limiter import LoginRateLimiter
|
||||
|
||||
|
||||
def test_not_blocked_initially():
|
||||
limiter = LoginRateLimiter(max_failures=3, window_seconds=60, cooldown_seconds=300)
|
||||
assert limiter.is_blocked("1.2.3.4") is False
|
||||
|
||||
|
||||
def test_blocked_after_threshold():
|
||||
limiter = LoginRateLimiter(max_failures=3, window_seconds=60, cooldown_seconds=300)
|
||||
for _ in range(3):
|
||||
limiter.record_failure("1.2.3.4")
|
||||
assert limiter.is_blocked("1.2.3.4") is True
|
||||
|
||||
|
||||
def test_success_clears_failures():
|
||||
limiter = LoginRateLimiter(max_failures=3, window_seconds=60, cooldown_seconds=300)
|
||||
limiter.record_failure("1.2.3.4")
|
||||
limiter.record_failure("1.2.3.4")
|
||||
limiter.record_success("1.2.3.4")
|
||||
assert limiter.is_blocked("1.2.3.4") is False
|
||||
|
||||
|
||||
def test_ips_are_isolated():
|
||||
limiter = LoginRateLimiter(max_failures=2, window_seconds=60, cooldown_seconds=300)
|
||||
limiter.record_failure("1.1.1.1")
|
||||
limiter.record_failure("1.1.1.1")
|
||||
assert limiter.is_blocked("2.2.2.2") is False
|
||||
```
|
||||
|
||||
### `api/tests/integration/test_login_rate_limit.py` (representative cases)
|
||||
|
||||
```python
|
||||
import pytest
|
||||
from httpx import AsyncClient
|
||||
|
||||
# Uses the 'client' fixture (NoOpAuthProvider) from conftest — sufficient for this
|
||||
# endpoint since we're testing the rate-limit layer, not auth correctness.
|
||||
# The login endpoint instantiates its own limiter via app.state, so we need
|
||||
# the full ASGI app.
|
||||
|
||||
BAD_CREDS = {"username": "attacker", "password": "wrong"}
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_repeated_failures_trigger_429(client: AsyncClient):
|
||||
# Use a custom limiter with low threshold to avoid slow tests
|
||||
# (the app.state.login_rate_limiter is set in lifespan; override for test)
|
||||
from app.auth.rate_limiter import LoginRateLimiter
|
||||
from app.main import app
|
||||
original = app.state.login_rate_limiter
|
||||
app.state.login_rate_limiter = LoginRateLimiter(
|
||||
max_failures=3, window_seconds=60, cooldown_seconds=30
|
||||
)
|
||||
try:
|
||||
for _ in range(3):
|
||||
await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
resp = await client.post("/api/v1/auth/token", json=BAD_CREDS)
|
||||
assert resp.status_code == 429
|
||||
assert resp.json()["code"] == "login_rate_limited"
|
||||
assert "Retry-After" in resp.headers
|
||||
finally:
|
||||
app.state.login_rate_limiter = original
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Phases
|
||||
|
||||
### Phase 1 (MVP — P1): Blocking after repeated failures
|
||||
|
||||
1. Add `login_max_failures`, `login_window_seconds`, `login_cooldown_seconds`, `login_trusted_proxy_ips` to `api/app/config.py`
|
||||
2. Create `api/app/auth/rate_limiter.py` with `LoginRateLimiter` and `get_client_ip()`
|
||||
3. Initialize rate limiter and parse trusted networks in `api/app/main.py` lifespan; attach both to `app.state`
|
||||
4. Update `api/app/routers/auth.py` to resolve client IP via `get_client_ip()`, then check + record outcomes
|
||||
5. Unit tests: `api/tests/unit/test_rate_limiter.py`
|
||||
6. Integration tests: `api/tests/integration/test_login_rate_limit.py`
|
||||
|
||||
### Phase 2 (US2 — observability): Logging and response hints
|
||||
|
||||
Delivered as part of Phase 1 (the `logger.warning(...)` call and `Retry-After` header
|
||||
are embedded in the same implementation). No separate phase needed.
|
||||
|
||||
---
|
||||
|
||||
## Environment Variables to Add to `.env.example`
|
||||
|
||||
```dotenv
|
||||
# Login brute-force protection
|
||||
LOGIN_MAX_FAILURES=5
|
||||
LOGIN_WINDOW_SECONDS=300
|
||||
LOGIN_COOLDOWN_SECONDS=900
|
||||
# Comma-separated IPs/CIDRs of trusted upstream proxies (e.g. nginx ingress pod CIDR).
|
||||
# Leave empty when not behind a reverse proxy.
|
||||
LOGIN_TRUSTED_PROXY_IPS=
|
||||
```
|
||||
|
||||
These are optional (have defaults) so existing `.env` files without them continue working.
|
||||
112
specs/009-login-rate-limiting/quickstart.md
Normal file
112
specs/009-login-rate-limiting/quickstart.md
Normal file
@@ -0,0 +1,112 @@
|
||||
# Quickstart: Login Brute-Force Protection
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- API running (via `docker compose up` or locally with `.env` set)
|
||||
- `curl` available
|
||||
|
||||
---
|
||||
|
||||
## Scenario 1: Trigger the rate limiter
|
||||
|
||||
Send 6 consecutive failed login attempts (default threshold is 5):
|
||||
|
||||
```bash
|
||||
for i in $(seq 1 6); do
|
||||
echo "Attempt $i:"
|
||||
curl -s -o /dev/null -w "%{http_code}\n" \
|
||||
-X POST http://localhost:8000/api/v1/auth/token \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "wrong", "password": "wrong"}'
|
||||
done
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
Attempt 1: 401
|
||||
Attempt 2: 401
|
||||
Attempt 3: 401
|
||||
Attempt 4: 401
|
||||
Attempt 5: 401
|
||||
Attempt 6: 429
|
||||
```
|
||||
|
||||
The 6th attempt returns 429. Inspect the headers:
|
||||
|
||||
```bash
|
||||
curl -i -X POST http://localhost:8000/api/v1/auth/token \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "wrong", "password": "wrong"}'
|
||||
```
|
||||
|
||||
Expected headers include:
|
||||
```
|
||||
HTTP/1.1 429 Too Many Requests
|
||||
Retry-After: 900
|
||||
```
|
||||
|
||||
Expected body:
|
||||
```json
|
||||
{"detail": "Too many failed login attempts. Please try again later.", "code": "login_rate_limited"}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Scenario 2: Successful login resets the counter
|
||||
|
||||
Make some failed attempts, then log in with valid credentials:
|
||||
|
||||
```bash
|
||||
# Fail twice
|
||||
for i in 1 2; do
|
||||
curl -s -o /dev/null -w "fail $i: %{http_code}\n" \
|
||||
-X POST http://localhost:8000/api/v1/auth/token \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "wrong", "password": "wrong"}'
|
||||
done
|
||||
|
||||
# Succeed — resets counter
|
||||
curl -s -o /dev/null -w "success: %{http_code}\n" \
|
||||
-X POST http://localhost:8000/api/v1/auth/token \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "'"$OWNER_USERNAME"'", "password": "'"$OWNER_PASSWORD"'"}'
|
||||
|
||||
# Now fail 5 more times — counter was reset, so no 429 yet
|
||||
for i in $(seq 1 5); do
|
||||
curl -s -o /dev/null -w "fail after reset $i: %{http_code}\n" \
|
||||
-X POST http://localhost:8000/api/v1/auth/token \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{"username": "wrong", "password": "wrong"}'
|
||||
done
|
||||
```
|
||||
|
||||
Expected: all "fail after reset" lines return 401 (not 429), confirming the counter was reset.
|
||||
|
||||
---
|
||||
|
||||
## Scenario 3: Observe log output
|
||||
|
||||
While triggering the rate limiter (Scenario 1), watch API logs:
|
||||
|
||||
```bash
|
||||
docker compose logs -f api
|
||||
```
|
||||
|
||||
After the threshold is crossed you should see a line like:
|
||||
|
||||
```
|
||||
WARNING app.auth.rate_limiter:rate_limiter.py:NN Login blocked for 172.18.0.1 after 5 failures
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Environment variable overrides
|
||||
|
||||
To test with a lower threshold without code changes:
|
||||
|
||||
```bash
|
||||
LOGIN_MAX_FAILURES=2 LOGIN_WINDOW_SECONDS=60 LOGIN_COOLDOWN_SECONDS=30 \
|
||||
uvicorn app.main:app --reload
|
||||
```
|
||||
|
||||
Then only 2 failures trigger the lockout, and it clears after 30 seconds.
|
||||
67
specs/009-login-rate-limiting/research.md
Normal file
67
specs/009-login-rate-limiting/research.md
Normal file
@@ -0,0 +1,67 @@
|
||||
# Research: Login Brute-Force Protection
|
||||
|
||||
## Decision 1: Library vs. custom implementation
|
||||
|
||||
**Decision**: Custom in-memory failure tracker (no new library dependency)
|
||||
|
||||
**Rationale**: The requirement is to count *failed* login attempts specifically and reset on success — not to rate-limit all requests regardless of outcome. Popular libraries like `slowapi` count all requests to a route, which would break FR-004 (reset on success) without significant workarounds. A purpose-built 60-line class is simpler, more auditable, and has no dependency footprint.
|
||||
|
||||
**Alternatives considered**:
|
||||
- `slowapi` (built on `limits`): Counts all requests, not failures. Requires patching the exception handler to decrement on success — fragile and non-obvious.
|
||||
- `slowapi` with a custom key function: Could be done, but the library's storage model doesn't expose a "reset this key" API in a clean way.
|
||||
- Redis-backed counter: Overkill for a single-user personal app with one instance. No new infrastructure justified.
|
||||
|
||||
---
|
||||
|
||||
## Decision 2: Fixed window vs. sliding window
|
||||
|
||||
**Decision**: Fixed window with per-source reset on successful login
|
||||
|
||||
**Rationale**: Fixed window is simpler to implement correctly and sufficient for this use case. The main attack — rapid sequential guessing — is fully addressed. The known "burst at window boundary" weakness is irrelevant here because: (a) the cooldown period is separate from the counting window, and (b) a successful login resets the counter entirely.
|
||||
|
||||
**Alternatives considered**:
|
||||
- Sliding window: More accurate, but adds complexity (requires storing timestamps of each request). The marginal security benefit doesn't justify the implementation cost for a personal single-user app.
|
||||
|
||||
---
|
||||
|
||||
## Decision 3: In-memory backing store
|
||||
|
||||
**Decision**: Python `dict` keyed by source IP, protected by a threading `Lock`
|
||||
|
||||
**Rationale**: The application runs as a single process. In-memory storage means counters reset on restart — this is acceptable and matches the "fail open" assumption in the spec. No new infrastructure (Redis, database table) is required.
|
||||
|
||||
**Alternatives considered**:
|
||||
- Database-backed counters: Persistent across restarts, but adds a DB round-trip to every login request (including successful ones). Not justified.
|
||||
- Redis: Distributed-safe and persistent, but requires a new service dependency. Out of scope for a personal single-instance app.
|
||||
|
||||
---
|
||||
|
||||
## Decision 4: Source identifier
|
||||
|
||||
**Decision**: `request.client.host` (the TCP peer address)
|
||||
|
||||
**Rationale**: The spec explicitly states not to trust `X-Forwarded-For` headers unless the app is known to be behind a trusted proxy. `request.client.host` in Starlette/FastAPI is the actual TCP peer IP — it cannot be spoofed by an attacker sending arbitrary headers.
|
||||
|
||||
**Alternatives considered**:
|
||||
- `X-Forwarded-For` first value: Spoofable if the app is not behind a trusted proxy (attacker can set arbitrary header values).
|
||||
- `X-Real-IP`: Same spoofing concern.
|
||||
|
||||
---
|
||||
|
||||
## Decision 5: 429 response and Retry-After header
|
||||
|
||||
**Decision**: Return HTTP 429 with `{"detail": "...", "code": "login_rate_limited"}` and a `Retry-After` header set to the configured cooldown duration in seconds
|
||||
|
||||
**Rationale**: HTTP 429 is the standard "Too Many Requests" status. The `Retry-After` header is explicitly mentioned in the spec (US2 acceptance scenario) and is required by RFC 6585 for rate-limit responses. Setting it to the *configured* cooldown (not the exact remaining time) satisfies FR-005: it doesn't reveal precise expiry, just the maximum wait. The response body follows §3.3 of the constitution (error envelope with `detail` and `code`).
|
||||
|
||||
---
|
||||
|
||||
## Decision 6: Default threshold values
|
||||
|
||||
**Decision**: `LOGIN_MAX_FAILURES=5`, `LOGIN_WINDOW_SECONDS=300` (5 min), `LOGIN_COOLDOWN_SECONDS=900` (15 min)
|
||||
|
||||
**Rationale**: Industry standard for web apps. 5 attempts is enough for legitimate typos but makes brute-force infeasible at human scale. A 5-minute counting window matches typical "I fat-fingered my password" retry patterns. A 15-minute cooldown is a meaningful deterrent without locking out a legitimate owner indefinitely.
|
||||
|
||||
**Alternatives considered**:
|
||||
- 3 failures / 60 s window / 300 s cooldown: More aggressive, but too likely to lock out the legitimate owner on a bad day.
|
||||
- 10 failures: Too permissive for a brute-force defense.
|
||||
84
specs/009-login-rate-limiting/spec.md
Normal file
84
specs/009-login-rate-limiting/spec.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# Feature Specification: Login Brute-Force Protection
|
||||
|
||||
**Feature Branch**: `009-login-rate-limiting`
|
||||
**Created**: 2026-05-06
|
||||
**Status**: Draft
|
||||
**Input**: User description: "Login API endpoints should be rate limited or otherwise protected against brute force attacks"
|
||||
|
||||
## User Scenarios & Testing *(mandatory)*
|
||||
|
||||
### User Story 1 - Repeated failed logins are blocked (Priority: P1)
|
||||
|
||||
An attacker (or misconfigured client) sending many rapid login attempts with the wrong password is slowed or blocked before they can exhaustively guess credentials. After a threshold number of consecutive failures from the same source, the system refuses further attempts for a cooldown period and returns a clear, non-leaking error.
|
||||
|
||||
**Why this priority**: Directly prevents credential-stuffing and brute-force attacks against the sole privileged account. Without this, the owner account is exposed to automated password guessing with no friction.
|
||||
|
||||
**Independent Test**: Send more than the allowed number of failed login requests in quick succession and confirm that subsequent attempts are rejected with a rate-limit or lockout response — without knowing or changing the real password.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** an attacker sends N+1 failed login attempts within the configured window, **When** the (N+1)th request arrives, **Then** the system returns an error response indicating the request is blocked (not the normal "invalid credentials" error) and does not process the login attempt.
|
||||
2. **Given** a legitimate user has been temporarily blocked after too many failures, **When** the cooldown period elapses and they retry with the correct password, **Then** they are logged in successfully.
|
||||
3. **Given** a legitimate user makes a few failed attempts and then waits beyond the cooldown window, **When** they retry within the next window, **Then** their failure counter resets and they are not blocked.
|
||||
|
||||
---
|
||||
|
||||
### User Story 2 - Operators can observe and reason about blocking activity (Priority: P2)
|
||||
|
||||
When the protection triggers, the system produces enough observable signal (log entries, response metadata) that an operator can confirm the feature is working, diagnose false positives, and tune thresholds — without exposing sensitive details to the client.
|
||||
|
||||
**Why this priority**: Invisible security controls are unmanageable. Operators need to know the system is doing what it claims, and blocked legitimate users need a clear (but not exploitable) explanation.
|
||||
|
||||
**Independent Test**: Trigger the rate limiter and confirm that: (a) the response body or headers communicate that the request was blocked and when the client may retry; (b) the server logs an entry identifying the blocked source and the reason.
|
||||
|
||||
**Acceptance Scenarios**:
|
||||
|
||||
1. **Given** a source is blocked, **When** they receive the rejection response, **Then** the response indicates they should wait before retrying (e.g., a `Retry-After` hint) without disclosing the exact threshold values.
|
||||
2. **Given** the rate limiter fires, **When** an operator inspects server logs, **Then** there is a log entry at WARNING level or above recording the blocked source and timestamp.
|
||||
|
||||
---
|
||||
|
||||
### Edge Cases
|
||||
|
||||
- What happens when a distributed attacker rotates IPs to avoid per-IP limits?
|
||||
- How does the system behave if the backing store for rate-limit counters is temporarily unavailable — does it fail open (allow all) or fail closed (block all)?
|
||||
- Are IPv6 addresses and IPv4-mapped-IPv6 addresses treated consistently?
|
||||
- Does a successful login reset the failure counter for that source?
|
||||
- What happens if many legitimate users share a NAT/proxy IP (e.g., corporate network)?
|
||||
- What if `TRUSTED_PROXY_IPS` is configured to include an IP that an external attacker controls? (An attacker could then spoof `X-Forwarded-For` and rotate fake source IPs to bypass the rate limiter — operators must only list genuinely trusted upstream infrastructure.)
|
||||
|
||||
## Requirements *(mandatory)*
|
||||
|
||||
### Functional Requirements
|
||||
|
||||
- **FR-001**: The system MUST enforce a maximum number of failed login attempts per source identifier (the resolved client IP address) within a rolling time window before blocking further attempts.
|
||||
- **FR-002**: Once a source exceeds the failure threshold, the system MUST reject subsequent login requests for a configurable cooldown period, returning a distinct response (not the normal invalid-credentials response).
|
||||
- **FR-003**: After the cooldown period expires, the system MUST permit the source to attempt login again, resetting its failure count.
|
||||
- **FR-004**: A successful login MUST reset the failure counter for that source, preventing accumulation of old failures from blocking future legitimate access.
|
||||
- **FR-005**: The rejection response MUST NOT reveal the specific threshold values or remaining lockout duration in a way that aids an attacker in timing their attempts, but MUST provide enough information (e.g., "try again later") for a legitimate user to understand the situation.
|
||||
- **FR-006**: The system MUST log a structured warning event whenever a source is blocked, including the source identifier and timestamp.
|
||||
- **FR-007**: Rate-limit thresholds (maximum attempts, window duration, cooldown duration) MUST be configurable without code changes.
|
||||
- **FR-008**: The system MUST support a configurable list of trusted upstream proxy IP addresses and CIDR ranges. When the TCP peer address matches a trusted proxy, the resolved client IP MUST be extracted from the `X-Forwarded-For` request header (first entry) or, if absent, `X-Real-IP`. When no trusted proxies are configured, the TCP peer address MUST be used directly and forwarded-IP headers MUST be ignored.
|
||||
|
||||
### Key Entities
|
||||
|
||||
- **Rate-limit record**: Tracks the number of consecutive failures and the window start time for a given source identifier; expires automatically after the cooldown period.
|
||||
- **Source identifier**: The resolved client IP address used to key rate-limit records. When `LOGIN_TRUSTED_PROXY_IPS` is empty (default), this is the TCP peer address. When one or more proxy IPs/CIDRs are configured and the TCP peer matches, the first `X-Forwarded-For` entry (or `X-Real-IP`) is used instead.
|
||||
|
||||
## Success Criteria *(mandatory)*
|
||||
|
||||
### Measurable Outcomes
|
||||
|
||||
- **SC-001**: An automated script sending 100 consecutive failed login requests completes with at least 90 of those requests rejected after the threshold is crossed — verified in a controlled test environment.
|
||||
- **SC-002**: A legitimate user who has been temporarily blocked can successfully log in within 5 minutes of the cooldown period expiring without any manual intervention.
|
||||
- **SC-003**: Zero information about threshold values or exact lockout expiry is present in blocked response bodies or headers.
|
||||
- **SC-004**: Every blocking event produces a corresponding log entry; 100% of triggered blocking events are observable in logs during testing.
|
||||
|
||||
## Assumptions
|
||||
|
||||
- The application has a single login endpoint used by all clients (the owner login introduced in feature 004).
|
||||
- Source identification uses the resolved client IP address. By default (when `LOGIN_TRUSTED_PROXY_IPS` is empty) this is the TCP peer address. When one or more proxy IPs/CIDRs are configured, the first entry of `X-Forwarded-For` (or `X-Real-IP`) is used instead — but only when the TCP peer is in the trusted list, preventing header spoofing by external clients.
|
||||
- If the rate-limit backing store is unavailable, the system fails open (allows the attempt through) rather than blocking all logins — this preserves the owner's access, which is critical for a single-user admin application.
|
||||
- No CAPTCHA or multi-factor step is in scope; protection is purely count/time-based.
|
||||
- The feature targets the login endpoint only; other endpoints are out of scope.
|
||||
- The single-user nature of the app means IP-based identification is sufficient — there is no need for per-username lockout, and using IP (rather than username) avoids contributing to username enumeration risk.
|
||||
120
specs/009-login-rate-limiting/tasks.md
Normal file
120
specs/009-login-rate-limiting/tasks.md
Normal file
@@ -0,0 +1,120 @@
|
||||
# Tasks: Login Brute-Force Protection
|
||||
|
||||
**Input**: Design documents from `specs/009-login-rate-limiting/`
|
||||
**Prerequisites**: plan.md ✅, spec.md ✅, research.md ✅, data-model.md ✅, contracts/auth.md ✅, quickstart.md ✅
|
||||
|
||||
**Tests**: TDD is non-negotiable (§5.1). Every test task appears before the implementation task it covers. For each red step, run the test and confirm it fails before proceeding to the implementation.
|
||||
|
||||
**Organization**: Phase 1 adds env vars; Phase 2 adds config fields (shared by both stories); Phase 3 implements the core blocking behaviour (US1 MVP); Phase 4 adds observability-specific test coverage (US2); Phase 5 is polish.
|
||||
|
||||
## Format: `[ID] [P?] [Story] Description`
|
||||
|
||||
- **[P]**: Can run in parallel with other [P] tasks in the same phase
|
||||
- **[Story]**: Which user story this task belongs to
|
||||
- Exact file paths included in every task description
|
||||
|
||||
---
|
||||
|
||||
## Phase 1: Setup
|
||||
|
||||
- [X] T001 Add a `# Login brute-force protection` comment block with `LOGIN_MAX_FAILURES=5`, `LOGIN_WINDOW_SECONDS=300`, `LOGIN_COOLDOWN_SECONDS=900`, and `LOGIN_TRUSTED_PROXY_IPS=` (empty by default, with an inline comment explaining it accepts comma-separated IPs/CIDRs) to both `.env.example` and `.env.test.example` at the repo root
|
||||
|
||||
---
|
||||
|
||||
## Phase 2: Foundational
|
||||
|
||||
**Purpose**: Add the three new settings fields — required before any story implementation.
|
||||
|
||||
- [X] T002 Add `login_max_failures: int = 5`, `login_window_seconds: int = 300`, `login_cooldown_seconds: int = 900`, `login_trusted_proxy_ips: str = ""` to the `Settings` class in `api/app/config.py` (append after `owner_password`)
|
||||
|
||||
**Checkpoint**: `api/app/config.py` accepts all three new env vars with defaults.
|
||||
|
||||
---
|
||||
|
||||
## Phase 3: User Story 1 — Repeated failed logins are blocked (Priority: P1) 🎯 MVP
|
||||
|
||||
**Goal**: After `LOGIN_MAX_FAILURES` consecutive failed login attempts from the same source IP within `LOGIN_WINDOW_SECONDS`, `POST /api/v1/auth/token` returns HTTP 429 for `LOGIN_COOLDOWN_SECONDS`. A successful login resets the counter.
|
||||
|
||||
**Independent Test**: `cd api && python -m pytest tests/unit/test_rate_limiter.py tests/integration/test_login_rate_limit.py::test_repeated_failures_trigger_429 tests/integration/test_login_rate_limit.py::test_success_resets_counter tests/integration/test_login_rate_limit.py::test_429_has_retry_after_header tests/integration/test_login_rate_limit.py::test_xff_header_ignored_when_no_trusted_networks -v` — all pass.
|
||||
|
||||
### Tests for User Story 1 (TDD red — write first, confirm failure before T005)
|
||||
|
||||
- [X] T003 [P] [US1] Create `api/tests/unit/test_rate_limiter.py` with ten failing unit tests — import `LoginRateLimiter` and `get_client_ip` from `app.auth.rate_limiter`; for `LoginRateLimiter` (instantiate with `max_failures=3, window_seconds=60, cooldown_seconds=300`): `test_not_blocked_initially`, `test_blocked_after_threshold`, `test_success_clears_failures`, `test_ips_are_isolated`, `test_window_resets_after_expiry`, `test_log_warning_on_lockout` (caplog at WARNING level: call `record_failure()` until threshold, assert `"Login blocked" in caplog.text` and IP in log output); for `get_client_ip` (construct a mock using `from unittest.mock import MagicMock` and `from starlette.requests import Request`: `req = MagicMock(spec=Request); req.client.host = "10.0.0.1"; req.headers = {"X-Forwarded-For": "203.0.113.5"}`): `test_get_client_ip_no_trusted_networks_returns_peer` (empty `trusted_networks=[]` → returns `req.client.host`), `test_get_client_ip_trusted_peer_uses_xff` (peer `"10.0.0.1"` in trusted CIDR `"10.0.0.0/8"` → returns `"203.0.113.5"`), `test_get_client_ip_untrusted_peer_ignores_xff` (peer `"8.8.8.8"` not in trusted CIDR `"10.0.0.0/8"` → returns `"8.8.8.8"` despite XFF), `test_get_client_ip_trusted_peer_falls_back_to_real_ip` (peer trusted, no XFF header, `X-Real-IP: "203.0.113.9"` → returns `"203.0.113.9"`); run `python -m pytest tests/unit/test_rate_limiter.py -v` and confirm `ImportError` or `ModuleNotFoundError` (red)
|
||||
- [X] T004 [P] [US1] Create `api/tests/integration/test_login_rate_limit.py` with four failing integration tests; each must override both `app.state.login_rate_limiter` (fresh `LoginRateLimiter(max_failures=3, window_seconds=60, cooldown_seconds=30)`) and `app.state.login_trusted_networks` (set to `[]` for all four tests — the `ASGITransport` peer is `"testclient"`, not a valid IP, so trusted-network matching can't be exercised here; proxy extraction is fully covered by T003 unit tests) via try/finally: (1) `test_repeated_failures_trigger_429` — POST three bad-credential requests then assert fourth returns 429 with `resp.json()["code"] == "login_rate_limited"`; (2) `test_success_resets_counter` — two failures → one valid login using `{"username": os.environ["OWNER_USERNAME"], "password": os.environ["OWNER_PASSWORD"]}` (matching conftest.py defaults: `testowner`/`testpassword`) → three more failures → assert all three return 401, not 429; (3) `test_429_has_retry_after_header` — trigger lockout (three failures), then assert `"Retry-After" in resp.headers` and `int(resp.headers["Retry-After"]) > 0`; (4) `test_xff_header_ignored_when_no_trusted_networks` — send three bad-cred requests with `headers={"X-Forwarded-For": "1.2.3.4"}` then a fourth with `headers={"X-Forwarded-For": "9.9.9.9"}` — assert the fourth returns 429 (not 401), proving the limiter tracked the real peer `"testclient"` for all requests and XFF was ignored; run `python -m pytest tests/integration/test_login_rate_limit.py -v` and confirm failure (red)
|
||||
|
||||
### Implementation for User Story 1
|
||||
|
||||
- [X] T005 [US1] Create `api/app/auth/rate_limiter.py` with two exports: (1) `get_client_ip(request: Request, trusted_networks: list[IPv4Network | IPv6Network]) -> str` — imports `ipaddress`, `from ipaddress import IPv4Network, IPv6Network`, `from starlette.requests import Request`; extracts `peer = request.client.host if request.client else "unknown"`; if `trusted_networks` is non-empty and peer is parseable as an IP address and falls within any trusted network, returns first `X-Forwarded-For` entry (strip whitespace) or `X-Real-IP` value, otherwise returns `peer`; wraps `ipaddress.ip_address(peer)` in `try/except ValueError` and falls back to `peer` on error; (2) `LoginRateLimiter` class: `__init__(self, max_failures: int = 5, window_seconds: int = 300, cooldown_seconds: int = 900)` storing params as `_max`, `_window`, `_cooldown`; `_store: dict[str, _Record]` and `_lock: threading.Lock`; `@dataclass _Record` with `failures: int = 0`, `window_start: float = field(default_factory=time.time)`, `blocked_until: float = 0.0`; `is_blocked(ip: str) -> bool`, `record_failure(ip: str) -> None` (logs WARNING on lockout), `record_success(ip: str) -> None`, `cooldown_seconds` property; stdlib imports: `import ipaddress, logging, time`, `from dataclasses import dataclass, field`, `from threading import Lock`
|
||||
- [X] T006 [US1] Update `api/app/main.py` lifespan: add `import ipaddress` at top; import `LoginRateLimiter` from `app.auth.rate_limiter`; inside `lifespan` before `engine = get_engine()`, consolidate to `settings = get_settings()` (remove the existing bare `get_settings()` call), then set `application.state.login_rate_limiter = LoginRateLimiter(max_failures=settings.login_max_failures, window_seconds=settings.login_window_seconds, cooldown_seconds=settings.login_cooldown_seconds)`; then parse `settings.login_trusted_proxy_ips` — split on `","`, strip each part, skip empty strings, call `ipaddress.ip_network(part, strict=False)` inside a `try/except ValueError` (skip invalid entries silently), collect results into `trusted_networks: list`; set `application.state.login_trusted_networks = trusted_networks`
|
||||
- [X] T007 [US1] Update `api/app/routers/auth.py` login endpoint: add `Request` to FastAPI imports and add `from fastapi.responses import JSONResponse`; add `from app.auth.rate_limiter import LoginRateLimiter, get_client_ip`; add `request: Request` as first parameter to `login()`; extract `limiter: LoginRateLimiter = request.app.state.login_rate_limiter` and `ip: str = get_client_ip(request, request.app.state.login_trusted_networks)`; add guard block — if `limiter.is_blocked(ip)`: return `JSONResponse(status_code=429, content={"detail": "Too many failed login attempts. Please try again later.", "code": "login_rate_limited"}, headers={"Retry-After": str(limiter.cooldown_seconds)})`; after `verify_credentials` returns False: call `limiter.record_failure(ip)` before the existing `HTTPException`; after `auth.create_token()`: call `limiter.record_success(ip)` before returning `TokenResponse`
|
||||
- [X] T008 [US1] Verify TDD green: run `cd api && python -m pytest tests/unit/test_rate_limiter.py -v` — all 10 pass; run `make test-integration` — all tests pass including `test_repeated_failures_trigger_429`, `test_success_resets_counter`, `test_429_has_retry_after_header`, and `test_xff_header_ignored_when_no_trusted_networks`
|
||||
|
||||
**Checkpoint**: Brute-force blocking is live. Automated repeated failures are stopped after threshold; the owner can still log in after cooldown; unit and integration tests pass.
|
||||
|
||||
---
|
||||
|
||||
## Phase 4: User Story 2 — Operators can observe blocking activity (Priority: P2)
|
||||
|
||||
**Goal**: The 429 response includes a `Retry-After` header with a positive integer; the response body `code` is `"login_rate_limited"` and contains no threshold numeric values; server logs a WARNING when blocking triggers.
|
||||
|
||||
**Independent Test**: Trigger the rate limiter (already works from Phase 3) and assert `Retry-After` header is present in the response and `code` field is `"login_rate_limited"`.
|
||||
|
||||
### Tests for User Story 2 (TDD red — extend existing file)
|
||||
|
||||
- [X] T009 [US2] Add one test to `api/tests/integration/test_login_rate_limit.py` targeting observability properties not yet covered: `test_429_body_shape` — override `app.state.login_rate_limiter` with a fresh `LoginRateLimiter(max_failures=3, window_seconds=60, cooldown_seconds=30)` via try/finally (same isolation pattern as T004), trigger lockout (three failures), then assert `resp.json() == {"detail": "Too many failed login attempts. Please try again later.", "code": "login_rate_limited"}` (exact match — confirms no threshold values leak and shape is correct); confirm this test is green immediately against the US1 implementation (T007 already returns this exact body)
|
||||
|
||||
**Checkpoint**: US2 observability properties are explicitly exercised by integration tests; a future regression in the Retry-After header or code field will be caught.
|
||||
|
||||
---
|
||||
|
||||
## Phase 5: Polish & Cross-Cutting Concerns
|
||||
|
||||
- [X] T010 Run `cd api && ruff check app/auth/rate_limiter.py app/routers/auth.py app/config.py app/main.py tests/unit/test_rate_limiter.py tests/integration/test_login_rate_limit.py` — fix any violations
|
||||
|
||||
---
|
||||
|
||||
## Dependencies & Execution Order
|
||||
|
||||
### Phase Dependencies
|
||||
|
||||
- **Phase 1 (Setup)**: No external dependencies — can start immediately
|
||||
- **Phase 2 (Foundational)**: No external dependencies — can start immediately (parallel with Phase 1)
|
||||
- **Phase 3 (US1)**: Depends on Phase 2 (T002 must exist before T006 can use `settings.login_max_failures`)
|
||||
- **Phase 4 (US2)**: Depends on Phase 3 (tests verify behaviour implemented in T007)
|
||||
- **Phase 5 (Polish)**: Depends on all prior phases
|
||||
|
||||
### Within Phase 3
|
||||
|
||||
- T003 ∥ T004 (different files, no dependency — write tests in parallel)
|
||||
- T005 after T003, T004 (implement after tests confirm they fail)
|
||||
- T006 ∥ T007 after T005 (both import from `rate_limiter.py`; write to different files — `main.py` and `auth.py`; T006 sets `app.state.login_trusted_networks` which T007's router reads)
|
||||
- T008 after T005, T006, T007 (verify all pass)
|
||||
|
||||
### Execution Order Summary
|
||||
|
||||
```
|
||||
Step 1: T001 ∥ T002 (setup + foundational — parallel, different files)
|
||||
Step 2: T003 ∥ T004 (write failing tests — parallel)
|
||||
Step 3: T005 (implement LoginRateLimiter — after red tests confirmed)
|
||||
Step 4: T006 ∥ T007 (wire limiter into app — parallel, different files)
|
||||
Step 5: T008 (verify green)
|
||||
Step 6: T009 (US2 observability tests — verify green immediately)
|
||||
Step 7: T010 (ruff clean)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### MVP (US1 — the blocker)
|
||||
|
||||
1. Complete T001–T002 (config setup)
|
||||
2. Complete T003–T008 (core blocking)
|
||||
3. **Validate**: Run `make test-integration` — all 88 existing tests still pass; 2 new rate-limit tests pass
|
||||
4. US2 adds verification coverage for already-implemented observability features
|
||||
|
||||
### Incremental Delivery
|
||||
|
||||
- After Phase 3: Brute-force attacks on the login endpoint are blocked — core security net is in place
|
||||
- After Phase 4: Observability properties are explicitly tested — regressions in headers/logs will be caught
|
||||
- After Phase 5: Lint clean, ready for merge
|
||||
@@ -63,7 +63,7 @@ describe('AppComponent', () => {
|
||||
expect(btn).toBeNull();
|
||||
});
|
||||
|
||||
it('onLogout calls auth.logout and navigates to /login', () => {
|
||||
it('onLogout calls auth.logout and navigates to / (grid)', () => {
|
||||
authSpy.isAuthenticated.and.returnValue(true);
|
||||
const fixture = TestBed.createComponent(AppComponent);
|
||||
fixture.detectChanges();
|
||||
@@ -71,7 +71,16 @@ describe('AppComponent', () => {
|
||||
spyOn(router, 'navigate');
|
||||
fixture.componentInstance.onLogout();
|
||||
expect(authSpy.logout).toHaveBeenCalled();
|
||||
expect(router.navigate).toHaveBeenCalledWith(['/login']);
|
||||
expect(router.navigate).toHaveBeenCalledWith(['/']);
|
||||
});
|
||||
|
||||
it('header app-name is a link to /', () => {
|
||||
authSpy.isAuthenticated.and.returnValue(false);
|
||||
const fixture = TestBed.createComponent(AppComponent);
|
||||
fixture.detectChanges();
|
||||
const link = (fixture.nativeElement as HTMLElement).querySelector('a.app-name') as HTMLAnchorElement;
|
||||
expect(link).not.toBeNull();
|
||||
expect(link.getAttribute('href')).toBe('/');
|
||||
});
|
||||
|
||||
it('header height is 48px', () => {
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import { Component } from '@angular/core';
|
||||
import { CommonModule } from '@angular/common';
|
||||
import { Router, RouterOutlet } from '@angular/router';
|
||||
import { Router, RouterLink, RouterOutlet } from '@angular/router';
|
||||
import { AuthService } from './auth/auth.service';
|
||||
|
||||
@Component({
|
||||
selector: 'app-root',
|
||||
standalone: true,
|
||||
imports: [CommonModule, RouterOutlet],
|
||||
imports: [CommonModule, RouterLink, RouterOutlet],
|
||||
template: `
|
||||
<header class="app-header">
|
||||
<span class="app-name">Reactbin</span>
|
||||
<a routerLink="/" class="app-name">Reactbin</a>
|
||||
<button *ngIf="auth.isAuthenticated()" class="logout-btn" (click)="onLogout()">Sign out</button>
|
||||
</header>
|
||||
<router-outlet />
|
||||
@@ -25,7 +25,7 @@ import { AuthService } from './auth/auth.service';
|
||||
background: var(--surface);
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
.app-name { font-weight: 600; font-size: 1rem; color: var(--text); letter-spacing: 0.02em; }
|
||||
.app-name { font-weight: 600; font-size: 1rem; color: var(--text); letter-spacing: 0.02em; text-decoration: none; }
|
||||
.logout-btn {
|
||||
background: none;
|
||||
border: 1px solid var(--border);
|
||||
@@ -46,6 +46,6 @@ export class AppComponent {
|
||||
|
||||
onLogout(): void {
|
||||
this.auth.logout();
|
||||
this.router.navigate(['/login']);
|
||||
this.router.navigate(['/']);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,6 +18,11 @@ export const routes: Routes = [
|
||||
loadComponent: () =>
|
||||
import('./upload/upload.component').then((m) => m.UploadComponent),
|
||||
},
|
||||
{
|
||||
path: 'tags',
|
||||
loadComponent: () =>
|
||||
import('./tags/tags.component').then((m) => m.TagsComponent),
|
||||
},
|
||||
{
|
||||
path: 'images/:id',
|
||||
loadComponent: () =>
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import { TestBed } from '@angular/core/testing';
|
||||
import { provideRouter } from '@angular/router';
|
||||
import { provideRouter, ActivatedRoute } from '@angular/router';
|
||||
import { provideHttpClient } from '@angular/common/http';
|
||||
import { provideHttpClientTesting } from '@angular/common/http/testing';
|
||||
import { of } from 'rxjs';
|
||||
@@ -7,6 +7,16 @@ import { LibraryComponent } from './library.component';
|
||||
import { ImageService } from '../services/image.service';
|
||||
import { routes } from '../app.routes';
|
||||
|
||||
function makeActivatedRoute(queryParams: Record<string, string> = {}) {
|
||||
return {
|
||||
snapshot: {
|
||||
queryParamMap: {
|
||||
get: (key: string) => queryParams[key] ?? null,
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
const EMPTY_PAGE = { items: [], total: 0, limit: 50, offset: 0 };
|
||||
const ONE_IMAGE = {
|
||||
items: [{ id: '1', filename: 'a.jpg', tags: ['cat'], hash: '', mime_type: 'image/jpeg', size_bytes: 1, width: 1, height: 1, storage_key: '', thumbnail_key: null, created_at: '' }],
|
||||
@@ -107,4 +117,32 @@ describe('LibraryComponent', () => {
|
||||
fixture.componentInstance.onImgError(event);
|
||||
expect(imgEl.src).toBe(originalSrc);
|
||||
});
|
||||
|
||||
it('pre-populates activeFilters from ?tags= query param on init', () => {
|
||||
TestBed.overrideProvider(ActivatedRoute, { useValue: makeActivatedRoute({ tags: 'cat,funny' }) });
|
||||
const fixture = TestBed.createComponent(LibraryComponent);
|
||||
const imgSvc = TestBed.inject(ImageService);
|
||||
const listSpy = spyOn(imgSvc, 'list').and.returnValue(of(EMPTY_PAGE));
|
||||
fixture.detectChanges();
|
||||
expect(fixture.componentInstance.activeFilters).toEqual(['cat', 'funny']);
|
||||
expect(listSpy).toHaveBeenCalledWith(['cat', 'funny'], jasmine.any(Number), jasmine.any(Number));
|
||||
});
|
||||
|
||||
it('does not set activeFilters when no ?tags= param present', () => {
|
||||
TestBed.overrideProvider(ActivatedRoute, { useValue: makeActivatedRoute() });
|
||||
const fixture = TestBed.createComponent(LibraryComponent);
|
||||
const imgSvc = TestBed.inject(ImageService);
|
||||
spyOn(imgSvc, 'list').and.returnValue(of(EMPTY_PAGE));
|
||||
fixture.detectChanges();
|
||||
expect(fixture.componentInstance.activeFilters).toEqual([]);
|
||||
});
|
||||
|
||||
it('header contains a link to /tags', () => {
|
||||
const fixture = TestBed.createComponent(LibraryComponent);
|
||||
const imgSvc = TestBed.inject(ImageService);
|
||||
spyOn(imgSvc, 'list').and.returnValue(of(EMPTY_PAGE));
|
||||
fixture.detectChanges();
|
||||
const link = (fixture.nativeElement as HTMLElement).querySelector('a[href="/tags"]');
|
||||
expect(link).not.toBeNull();
|
||||
});
|
||||
});
|
||||
|
||||
@@ -5,7 +5,7 @@ import {
|
||||
ChangeDetectorRef,
|
||||
} from '@angular/core';
|
||||
import { CommonModule } from '@angular/common';
|
||||
import { Router, RouterLink } from '@angular/router';
|
||||
import { Router, RouterLink, ActivatedRoute } from '@angular/router';
|
||||
import { Subject, debounceTime, distinctUntilChanged, share, timer } from 'rxjs';
|
||||
import { takeUntil } from 'rxjs/operators';
|
||||
import { ImageRecord, ImageService } from '../services/image.service';
|
||||
@@ -22,7 +22,10 @@ const PLACEHOLDER_SVG = `data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/s
|
||||
<div class="library">
|
||||
<header>
|
||||
<h1>Reactbin</h1>
|
||||
<button class="upload-btn" (click)="router.navigate(['/upload'])">Upload</button>
|
||||
<div class="header-actions">
|
||||
<a routerLink="/tags" class="tags-link">Browse tags</a>
|
||||
<button class="upload-btn" (click)="router.navigate(['/upload'])">Upload</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<div class="filter-bar">
|
||||
@@ -88,6 +91,9 @@ const PLACEHOLDER_SVG = `data:image/svg+xml,<svg xmlns="http://www.w3.org/2000/s
|
||||
styles: [`
|
||||
.library { max-width: 1200px; margin: 0 auto; padding: 24px 16px; }
|
||||
header { display: flex; justify-content: space-between; align-items: center; margin-bottom: 20px; }
|
||||
.header-actions { display: flex; align-items: center; gap: 12px; }
|
||||
.tags-link { color: var(--text-muted); text-decoration: none; font-size: 0.9rem; transition: color var(--transition); }
|
||||
.tags-link:hover { color: var(--text); }
|
||||
.upload-btn { padding: 8px 20px; background: var(--accent); color: var(--accent-text); border: none; border-radius: var(--radius); cursor: pointer; font-weight: 600; }
|
||||
.filter-bar { position: relative; margin-bottom: 24px; }
|
||||
.filter-bar input { width: 100%; padding: 10px; background: var(--surface); border: 1px solid var(--border); color: var(--text); border-radius: var(--radius); }
|
||||
@@ -134,9 +140,14 @@ export class LibraryComponent implements OnInit {
|
||||
private tagService: TagService,
|
||||
public router: Router,
|
||||
private cdr: ChangeDetectorRef,
|
||||
private route: ActivatedRoute,
|
||||
) {}
|
||||
|
||||
ngOnInit(): void {
|
||||
const tagsParam = this.route.snapshot.queryParamMap.get('tags');
|
||||
if (tagsParam) {
|
||||
this.activeFilters = tagsParam.split(',').map((t) => t.trim()).filter((t) => t.length > 0);
|
||||
}
|
||||
this.load();
|
||||
this.filterChange$.pipe(debounceTime(300), distinctUntilChanged()).subscribe((q) => {
|
||||
if (q) {
|
||||
|
||||
@@ -30,4 +30,26 @@ describe('TagService', () => {
|
||||
expect(req.request.params.has('q')).toBeFalse();
|
||||
req.flush({ items: [], total: 0, limit: 100, offset: 0 });
|
||||
});
|
||||
|
||||
it('should include sort param when provided', () => {
|
||||
service.list('', 100, 0, 'count_desc').subscribe();
|
||||
const req = httpMock.expectOne((r) => r.url === '/api/v1/tags');
|
||||
expect(req.request.params.get('sort')).toBe('count_desc');
|
||||
req.flush({ items: [], total: 0, limit: 100, offset: 0 });
|
||||
});
|
||||
|
||||
it('should include min_count param when minCount is provided', () => {
|
||||
service.list('', 500, 0, 'count_desc', 1).subscribe();
|
||||
const req = httpMock.expectOne((r) => r.url === '/api/v1/tags');
|
||||
expect(req.request.params.get('min_count')).toBe('1');
|
||||
req.flush({ items: [], total: 0, limit: 500, offset: 0 });
|
||||
});
|
||||
|
||||
it('should omit sort and min_count when not provided', () => {
|
||||
service.list('cat').subscribe();
|
||||
const req = httpMock.expectOne((r) => r.url === '/api/v1/tags');
|
||||
expect(req.request.params.has('sort')).toBeFalse();
|
||||
expect(req.request.params.has('min_count')).toBeFalse();
|
||||
req.flush({ items: [], total: 0, limit: 100, offset: 0 });
|
||||
});
|
||||
});
|
||||
|
||||
@@ -21,11 +21,17 @@ export class TagService {
|
||||
|
||||
constructor(private http: HttpClient) {}
|
||||
|
||||
list(prefix?: string, limit = 100, offset = 0): Observable<TagListResponse> {
|
||||
list(prefix = '', limit = 100, offset = 0, sort?: string, minCount?: number): Observable<TagListResponse> {
|
||||
let params = new HttpParams().set('limit', limit).set('offset', offset);
|
||||
if (prefix) {
|
||||
params = params.set('q', prefix);
|
||||
}
|
||||
if (sort) {
|
||||
params = params.set('sort', sort);
|
||||
}
|
||||
if (minCount !== undefined) {
|
||||
params = params.set('min_count', minCount);
|
||||
}
|
||||
return this.http.get<TagListResponse>(`${this.base}/tags`, { params });
|
||||
}
|
||||
}
|
||||
|
||||
102
ui/src/app/tags/tags.component.spec.ts
Normal file
102
ui/src/app/tags/tags.component.spec.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import { TestBed } from '@angular/core/testing';
|
||||
import { provideRouter } from '@angular/router';
|
||||
import { Subject, of, throwError } from 'rxjs';
|
||||
import { TagsComponent } from './tags.component';
|
||||
import { TagService, TagListResponse } from '../services/tag.service';
|
||||
import { routes } from '../app.routes';
|
||||
|
||||
const TAGS_PAGE = (items: { name: string; image_count: number }[]): TagListResponse => ({
|
||||
items: items.map((t, i) => ({ id: String(i), ...t })),
|
||||
total: items.length,
|
||||
limit: 500,
|
||||
offset: 0,
|
||||
});
|
||||
|
||||
describe('TagsComponent', () => {
|
||||
let tagSvc: jasmine.SpyObj<TagService>;
|
||||
|
||||
beforeEach(async () => {
|
||||
tagSvc = jasmine.createSpyObj('TagService', ['list']);
|
||||
|
||||
await TestBed.configureTestingModule({
|
||||
imports: [TagsComponent],
|
||||
providers: [
|
||||
{ provide: TagService, useValue: tagSvc },
|
||||
provideRouter(routes),
|
||||
],
|
||||
}).compileComponents();
|
||||
});
|
||||
|
||||
it('shows skeleton while loading', () => {
|
||||
// list() never resolves during this test
|
||||
tagSvc.list.and.returnValue(new Subject<never>().asObservable());
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
fixture.componentInstance.showSpinner = true;
|
||||
fixture.detectChanges();
|
||||
expect((fixture.nativeElement as HTMLElement).querySelector('.skeleton')).not.toBeNull();
|
||||
});
|
||||
|
||||
it('renders tag list with name and count after load', () => {
|
||||
tagSvc.list.and.returnValue(of(TAGS_PAGE([
|
||||
{ name: 'cat', image_count: 5 },
|
||||
{ name: 'dog', image_count: 2 },
|
||||
])));
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
fixture.detectChanges();
|
||||
const items = (fixture.nativeElement as HTMLElement).querySelectorAll('.tag-item');
|
||||
expect(items.length).toBe(2);
|
||||
expect(items[0].textContent).toContain('cat');
|
||||
expect(items[0].textContent).toContain('5');
|
||||
});
|
||||
|
||||
it('tags are ordered by count descending (service is called with count_desc)', () => {
|
||||
tagSvc.list.and.returnValue(of(TAGS_PAGE([])));
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
fixture.detectChanges();
|
||||
expect(tagSvc.list).toHaveBeenCalledWith('', 500, 0, 'count_desc', 1);
|
||||
});
|
||||
|
||||
it('shows empty state when tag list is empty', () => {
|
||||
tagSvc.list.and.returnValue(of(TAGS_PAGE([])));
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
fixture.detectChanges();
|
||||
expect((fixture.nativeElement as HTMLElement).querySelector('.empty-state')).not.toBeNull();
|
||||
});
|
||||
|
||||
it('shows error state on fetch failure', () => {
|
||||
tagSvc.list.and.returnValue(throwError(() => new Error('network')));
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
fixture.detectChanges();
|
||||
expect((fixture.nativeElement as HTMLElement).querySelector('.error-card')).not.toBeNull();
|
||||
});
|
||||
|
||||
it('retry button in error state calls load again', () => {
|
||||
tagSvc.list.and.returnValue(throwError(() => new Error('network')));
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
fixture.detectChanges();
|
||||
spyOn(fixture.componentInstance, 'load');
|
||||
const btn = (fixture.nativeElement as HTMLElement).querySelector('.error-card .retry-btn') as HTMLButtonElement;
|
||||
expect(btn).not.toBeNull();
|
||||
btn.click();
|
||||
expect(fixture.componentInstance.load).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('each tag item links to /?tags=<tagname>', () => {
|
||||
tagSvc.list.and.returnValue(of(TAGS_PAGE([
|
||||
{ name: 'funny', image_count: 3 },
|
||||
])));
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
fixture.detectChanges();
|
||||
const link = (fixture.nativeElement as HTMLElement).querySelector('.tag-item a') as HTMLAnchorElement;
|
||||
expect(link).not.toBeNull();
|
||||
expect(link.getAttribute('href')).toBe('/?tags=funny');
|
||||
});
|
||||
|
||||
it('renders without requiring authentication', () => {
|
||||
tagSvc.list.and.returnValue(of(TAGS_PAGE([{ name: 'test', image_count: 1 }])));
|
||||
// No AuthService injected — component must not depend on it
|
||||
const fixture = TestBed.createComponent(TagsComponent);
|
||||
expect(() => fixture.detectChanges()).not.toThrow();
|
||||
expect((fixture.nativeElement as HTMLElement).querySelector('.tag-item')).not.toBeNull();
|
||||
});
|
||||
});
|
||||
96
ui/src/app/tags/tags.component.ts
Normal file
96
ui/src/app/tags/tags.component.ts
Normal file
@@ -0,0 +1,96 @@
|
||||
import { Component, OnInit, ChangeDetectionStrategy, ChangeDetectorRef } from '@angular/core';
|
||||
import { CommonModule } from '@angular/common';
|
||||
import { RouterLink } from '@angular/router';
|
||||
import { TagRecord, TagService } from '../services/tag.service';
|
||||
|
||||
@Component({
|
||||
selector: 'app-tags',
|
||||
standalone: true,
|
||||
imports: [CommonModule, RouterLink],
|
||||
changeDetection: ChangeDetectionStrategy.OnPush,
|
||||
template: `
|
||||
<div class="tags-page">
|
||||
<header class="tags-header">
|
||||
<h1>Browse Tags</h1>
|
||||
<a routerLink="/" class="back-link">← Library</a>
|
||||
</header>
|
||||
|
||||
<!-- Skeleton -->
|
||||
<div *ngIf="showSpinner" class="tag-grid">
|
||||
<div *ngFor="let _ of skeletonItems" class="tag-item skeleton tag-skeleton"></div>
|
||||
</div>
|
||||
|
||||
<!-- Error -->
|
||||
<div *ngIf="error && !showSpinner" class="error-card">
|
||||
<p>Failed to load tags. Please check your connection.</p>
|
||||
<button class="retry-btn" (click)="load()">Retry</button>
|
||||
</div>
|
||||
|
||||
<!-- Empty -->
|
||||
<div *ngIf="!showSpinner && !error && tags.length === 0" class="empty-state">
|
||||
<span class="empty-icon">✦</span>
|
||||
<p>No tags yet. Upload some images and add tags to get started.</p>
|
||||
</div>
|
||||
|
||||
<!-- Tag grid -->
|
||||
<div *ngIf="!showSpinner && !error && tags.length > 0" class="tag-grid">
|
||||
<div *ngFor="let tag of tags" class="tag-item">
|
||||
<a [routerLink]="['/']" [queryParams]="{ tags: tag.name }" class="tag-link">
|
||||
<span class="tag-name">{{ tag.name }}</span>
|
||||
<span class="tag-count">{{ tag.image_count }}</span>
|
||||
</a>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
`,
|
||||
styles: [`
|
||||
.tags-page { max-width: 1200px; margin: 0 auto; padding: 24px 16px; }
|
||||
.tags-header { display: flex; justify-content: space-between; align-items: center; margin-bottom: 24px; }
|
||||
.tags-header h1 { margin: 0; }
|
||||
.back-link { color: var(--text-muted); text-decoration: none; font-size: 0.9rem; }
|
||||
.back-link:hover { color: var(--text); }
|
||||
.tag-grid { display: grid; grid-template-columns: repeat(auto-fill, minmax(160px, 1fr)); gap: 10px; }
|
||||
.tag-item { background: var(--surface); border: 1px solid var(--border); border-radius: var(--radius); transition: border-color var(--transition); }
|
||||
.tag-item:hover { border-color: var(--border-focus); }
|
||||
.tag-skeleton { height: 56px; }
|
||||
.tag-link { display: flex; justify-content: space-between; align-items: center; padding: 12px 16px; text-decoration: none; color: var(--text); }
|
||||
.tag-name { font-size: 0.95rem; font-weight: 500; }
|
||||
.tag-count { font-size: 0.8rem; color: var(--text-muted); background: var(--surface-raised); padding: 2px 8px; border-radius: var(--radius-chip); }
|
||||
.empty-state { text-align: center; padding: 60px 0; color: var(--text-muted); }
|
||||
.empty-icon { display: block; font-size: 2rem; margin-bottom: 12px; }
|
||||
.error-card { text-align: center; padding: 40px; background: var(--surface); border-radius: var(--radius); border: 1px solid var(--border); }
|
||||
.error-card p { color: var(--text-muted); margin-bottom: 16px; }
|
||||
.retry-btn { padding: 8px 24px; background: var(--surface-raised); color: var(--text); border: 1px solid var(--border); border-radius: var(--radius); cursor: pointer; transition: border-color var(--transition); }
|
||||
.retry-btn:hover { border-color: var(--border-focus); }
|
||||
`],
|
||||
})
|
||||
export class TagsComponent implements OnInit {
|
||||
tags: TagRecord[] = [];
|
||||
showSpinner = false;
|
||||
error = false;
|
||||
readonly skeletonItems = Array(12).fill(null);
|
||||
|
||||
constructor(private tagService: TagService, private cdr: ChangeDetectorRef) {}
|
||||
|
||||
ngOnInit(): void {
|
||||
this.load();
|
||||
}
|
||||
|
||||
load(): void {
|
||||
this.error = false;
|
||||
this.showSpinner = true;
|
||||
this.cdr.markForCheck();
|
||||
this.tagService.list('', 500, 0, 'count_desc', 1).subscribe({
|
||||
next: (res) => {
|
||||
this.tags = res.items;
|
||||
this.showSpinner = false;
|
||||
this.cdr.markForCheck();
|
||||
},
|
||||
error: () => {
|
||||
this.showSpinner = false;
|
||||
this.error = true;
|
||||
this.cdr.markForCheck();
|
||||
},
|
||||
});
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user