ai/ai
1
0
Fork 0

init: a properly working version

This commit is contained in:
Arthur K. 2026-02-27 08:29:00 +03:00
commit 13aed7fc8c
Signed by: wzray
GPG key ID: B97F30FDC4636357
49 changed files with 17203 additions and 0 deletions

7
.gitignore vendored Normal file
View file

@ -0,0 +1,7 @@
.ruff_cache/
.mypy_cache/
__pycache__/
api_ref
.venv/
.pytest_cache/
x.py

21
Dockerfile Normal file
View file

@ -0,0 +1,21 @@
FROM python:3.14-slim
ENV PYTHONDONTWRITEBYTECODE=1 \
PYTHONUNBUFFERED=1 \
AI_ROUTER_DATA_DIR=/data
WORKDIR /app
RUN pip install --no-cache-dir uv
COPY pyproject.toml ./
RUN uv sync --no-dev --no-install-project
ENV PATH="/app/.venv/bin:$PATH"
COPY app ./app
VOLUME ["/data"]
EXPOSE 80
CMD ["uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "80"]

1
app/__init__.py Normal file
View file

@ -0,0 +1 @@
"""AI router package."""

1
app/api/__init__.py Normal file
View file

@ -0,0 +1 @@
"""API package."""

234
app/api/mappers.py Normal file
View file

@ -0,0 +1,234 @@
"""Mapping functions between external and internal types."""
from __future__ import annotations
from app.api.schemas import (
ChatCompletionChoice,
ChatCompletionChunk,
ChatCompletionChunkChoice,
ChatCompletionChunkDelta,
ChatCompletionMessage,
ChatCompletionResponse,
ChatCompletionsRequest,
ModelObject,
ModelsResponse,
)
from app.core.types import CoreChatRequest, CoreChunk, CoreMessage, CoreModel
def to_core_chat_request(payload: ChatCompletionsRequest) -> CoreChatRequest:
extras = dict(payload.model_extra or {})
reasoning_effort = payload.reasoning_effort
reasoning_summary = payload.reasoning_summary
if isinstance(payload.reasoning, dict):
if reasoning_effort is None and isinstance(
payload.reasoning.get("effort"), str
):
reasoning_effort = payload.reasoning["effort"]
if reasoning_summary is None and isinstance(
payload.reasoning.get("summary"), str
):
reasoning_summary = payload.reasoning["summary"]
return CoreChatRequest(
model=payload.model,
stream=payload.stream,
messages=[
CoreMessage(
role=m.role,
content=m.content,
name=m.name,
tool_call_id=m.tool_call_id,
audio=m.audio,
refusal=m.refusal,
reasoning_content=m.reasoning_content,
tool_calls=m.tool_calls,
function_call=m.function_call,
extra=dict(m.model_extra or {}),
)
for m in payload.messages
],
audio=payload.audio,
frequency_penalty=payload.frequency_penalty,
logit_bias=payload.logit_bias,
logprobs=payload.logprobs,
max_completion_tokens=payload.max_completion_tokens,
max_tokens=payload.max_tokens,
metadata=payload.metadata,
provider=payload.provider,
plugins=payload.plugins,
session_id=payload.session_id,
trace=payload.trace,
modalities=list(payload.modalities) if payload.modalities is not None else None,
models=payload.models,
n=payload.n,
parallel_tool_calls=payload.parallel_tool_calls,
prediction=payload.prediction,
presence_penalty=payload.presence_penalty,
prompt_cache_key=payload.prompt_cache_key,
prompt_cache_retention=payload.prompt_cache_retention,
reasoning_effort=reasoning_effort,
reasoning_summary=reasoning_summary,
reasoning=payload.reasoning,
response_format=payload.response_format,
safety_identifier=payload.safety_identifier,
seed=payload.seed,
service_tier=payload.service_tier,
stop=payload.stop,
store=payload.store,
stream_options=payload.stream_options,
temperature=payload.temperature,
debug=payload.debug,
image_config=payload.image_config,
tool_choice=payload.tool_choice,
tools=payload.tools,
top_logprobs=payload.top_logprobs,
top_p=payload.top_p,
user=payload.user,
verbosity=payload.verbosity,
web_search_options=payload.web_search_options,
extra=extras,
)
def to_api_chunk(
*,
chunk: CoreChunk,
routed_model: str,
chunk_id: str,
created: int,
) -> ChatCompletionChunk:
return ChatCompletionChunk(
id=chunk_id,
created=created,
model=routed_model,
choices=[
ChatCompletionChunkChoice(
index=chunk.index,
delta=ChatCompletionChunkDelta(
role=chunk.role,
content=chunk.content,
reasoning_content=chunk.reasoning_content,
reasoning_details=chunk.reasoning_details,
tool_calls=chunk.tool_calls,
),
finish_reason=chunk.finish_reason,
)
],
)
def to_chat_completion_response(
*,
chunks: list[CoreChunk],
routed_model: str,
completion_id: str,
created: int,
) -> ChatCompletionResponse:
text_parts: list[str] = []
reasoning_parts: list[str] = []
reasoning_details_parts: list[dict[str, object]] = []
tool_calls_parts: list[dict[str, object]] = []
finish_reason: str | None = None
for chunk in chunks:
if chunk.content:
text_parts.append(chunk.content)
if chunk.reasoning_content:
reasoning_parts.append(chunk.reasoning_content)
if chunk.reasoning_details:
reasoning_details_parts.extend(chunk.reasoning_details)
if chunk.tool_calls:
tool_calls_parts.extend(chunk.tool_calls)
if chunk.finish_reason is not None:
finish_reason = chunk.finish_reason
return ChatCompletionResponse(
id=completion_id,
created=created,
model=routed_model,
choices=[
ChatCompletionChoice(
index=0,
message=ChatCompletionMessage(
content="".join(text_parts),
reasoning="".join(reasoning_parts) or None,
reasoning_content="".join(reasoning_parts) or None,
reasoning_details=reasoning_details_parts or None,
tool_calls=_merge_tool_call_deltas(tool_calls_parts) or None,
),
finish_reason=finish_reason,
)
],
)
def _merge_tool_call_deltas(
deltas: list[dict[str, object]],
) -> list[dict[str, object]]:
merged: dict[int, dict[str, object]] = {}
for delta in deltas:
index = delta.get("index")
if not isinstance(index, int):
continue
current = merged.setdefault(index, {"index": index, "type": "function"})
call_id = delta.get("id")
if isinstance(call_id, str):
current["id"] = call_id
function = delta.get("function")
if not isinstance(function, dict):
continue
current_function = current.setdefault("function", {})
if not isinstance(current_function, dict):
continue
name = function.get("name")
if isinstance(name, str):
current_function["name"] = name
arguments = function.get("arguments")
if isinstance(arguments, str):
previous = current_function.get("arguments")
if isinstance(previous, str):
current_function["arguments"] = previous + arguments
else:
current_function["arguments"] = arguments
return [merged[key] for key in sorted(merged)]
def to_models_response(models: list[CoreModel]) -> ModelsResponse:
return ModelsResponse(
data=[
ModelObject(
id=model.id,
created=model.created,
owned_by=model.owned_by,
name=_format_model_name(model),
description=model.description,
context_length=model.context_length,
architecture=model.architecture,
pricing=model.pricing,
supported_parameters=model.supported_parameters,
settings=model.settings,
opencode=model.opencode,
)
for model in models
]
)
def _format_model_name(model: CoreModel) -> str | None:
if model.name is None:
return None
provider_label = model.provider_display_name
if provider_label is None:
provider_name, _, _ = model.id.partition("/")
provider_label = provider_name or None
if provider_label is None:
return model.name
return f"{provider_label}: {model.name}"

134
app/api/middleware.py Normal file
View file

@ -0,0 +1,134 @@
"""HTTP middleware utilities for API layer."""
from __future__ import annotations
import json
import logging
import os
import time
from collections.abc import AsyncIterator
from typing import Any
from fastapi import FastAPI, Request
from starlette.responses import StreamingResponse
logger = logging.getLogger("ai.http")
if not logger.handlers:
handler = logging.StreamHandler()
handler.setFormatter(
logging.Formatter(
"%(asctime)s %(levelname)s %(name)s %(message)s",
"%Y-%m-%d %H:%M:%S",
)
)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
logger.propagate = False
def install_request_logging(app: FastAPI) -> None:
enabled = os.getenv("AI_REQUEST_LOG_ENABLED", "false").strip().lower()
detailed_logging_enabled = enabled in {"1", "true", "yes", "on"}
max_len_raw = os.getenv("AI_REQUEST_LOG_MAX_BODY_CHARS", "20000").strip()
try:
max_body_chars = max(1024, int(max_len_raw))
except ValueError:
max_body_chars = 20000
@app.middleware("http")
async def request_logging_middleware(request: Request, call_next):
started_at = time.perf_counter()
body_text = ""
if detailed_logging_enabled and request.method in {"POST", "PUT", "PATCH"}:
body_bytes = await request.body()
body_text = _format_body(body_bytes)
try:
response = await call_next(request)
except Exception:
elapsed_ms = (time.perf_counter() - started_at) * 1000
logger.exception(
"http request failed method=%s path=%s duration_ms=%.2f query=%s body=%s",
request.method,
request.url.path,
elapsed_ms,
request.url.query or "-",
body_text or "-",
)
raise
elapsed_ms = (time.perf_counter() - started_at) * 1000
if response.status_code >= 400:
response_body = await _read_response_body(response)
logger.warning(
"http request error method=%s path=%s status=%s duration_ms=%.2f query=%s body=%s response_body=%s",
request.method,
request.url.path,
response.status_code,
elapsed_ms,
request.url.query or "-",
_truncate(body_text or "-", max_body_chars),
_truncate(response_body or "-", max_body_chars),
)
return response
if not detailed_logging_enabled:
return response
logger.info(
"http request method=%s path=%s status=%s duration_ms=%.2f query=%s body=%s",
request.method,
request.url.path,
response.status_code,
elapsed_ms,
request.url.query or "-",
_truncate(body_text or "-", max_body_chars),
)
return response
def _format_body(body: bytes) -> str:
if not body:
return ""
try:
parsed: Any = json.loads(body)
text = json.dumps(parsed, ensure_ascii=True, separators=(",", ":"))
except Exception:
text = body.decode("utf-8", errors="replace")
return text
def _truncate(value: str, limit: int) -> str:
if len(value) <= limit:
return value
return f"{value[:limit]}...[truncated {len(value) - limit} chars]"
async def _read_response_body(response: Any) -> str:
body = getattr(response, "body", None)
if isinstance(body, (bytes, bytearray)):
return _format_body(bytes(body))
if isinstance(response, StreamingResponse):
return "<streaming-response>"
iterator = getattr(response, "body_iterator", None)
if iterator is None:
return ""
chunks: list[bytes] = []
async for chunk in iterator:
if isinstance(chunk, bytes):
chunks.append(chunk)
else:
chunks.append(str(chunk).encode("utf-8", errors="replace"))
raw = b"".join(chunks)
response.body_iterator = _iterate_once(raw)
return _format_body(raw)
async def _iterate_once(payload: bytes) -> AsyncIterator[bytes]:
yield payload

132
app/api/routes.py Normal file
View file

@ -0,0 +1,132 @@
"""HTTP routes for the external API."""
from __future__ import annotations
import json
import logging
import time
from uuid import uuid4
from fastapi import APIRouter, Depends, HTTPException, Response
from fastapi.responses import JSONResponse, StreamingResponse
from app.api.mappers import (
to_api_chunk,
to_chat_completion_response,
to_core_chat_request,
to_models_response,
)
from app.api.schemas import ChatCompletionsRequest, ErrorBody, ErrorEnvelope
from app.core.errors import RouterError
from app.core.router import RouterCore
from app.core.types import CoreChunk
from app.dependencies import get_router_core
router = APIRouter()
logger = logging.getLogger(__name__)
@router.get("/v1/models")
@router.get("/models")
async def list_models(core: RouterCore = Depends(get_router_core)) -> JSONResponse:
try:
models = await core.list_models()
except RouterError as exc:
raise _to_http_exception(exc) from exc
payload = to_models_response(models)
return JSONResponse(content=payload.model_dump(exclude_none=True))
@router.post("/v1/chat/completions")
@router.post("/chat/completions")
async def chat_completions(
payload: ChatCompletionsRequest,
core: RouterCore = Depends(get_router_core),
) -> Response:
_log_extra_params(payload)
core_request = to_core_chat_request(payload)
try:
core.resolve_provider(core_request.model)
except RouterError as exc:
raise _to_http_exception(exc) from exc
stream_id = f"chatcmpl-{uuid4().hex}"
created = int(time.time())
if not payload.stream:
chunks: list[CoreChunk] = []
try:
async for internal_chunk in core.stream_chat(core_request):
chunks.append(internal_chunk)
except RouterError as exc:
raise _to_http_exception(exc) from exc
except Exception as exc:
logger.exception("chat.completions non-stream failed")
raise HTTPException(
status_code=500,
detail=ErrorEnvelope(
error=ErrorBody(message=str(exc), type="internal_server_error")
).model_dump(),
) from exc
completion = to_chat_completion_response(
chunks=chunks,
routed_model=payload.model,
completion_id=stream_id,
created=created,
)
return JSONResponse(content=completion.model_dump(exclude_none=True))
async def event_stream():
try:
async for internal_chunk in core.stream_chat(core_request):
chunk = to_api_chunk(
chunk=internal_chunk,
routed_model=payload.model,
chunk_id=stream_id,
created=created,
)
yield _sse_line(chunk.model_dump_json(exclude_none=True))
except RouterError as exc:
error = ErrorEnvelope(error=ErrorBody(message=exc.message, type=exc.code))
yield _sse_line(error.model_dump_json())
except Exception as exc:
logger.exception("chat.completions stream failed")
error = ErrorEnvelope(
error=ErrorBody(message=str(exc), type="internal_server_error")
)
yield _sse_line(error.model_dump_json())
yield _sse_line("[DONE]")
return StreamingResponse(event_stream(), media_type="text/event-stream")
def _sse_line(payload: str) -> str:
return f"data: {payload}\n\n"
def _to_http_exception(exc: RouterError) -> HTTPException:
return HTTPException(
status_code=exc.status_code,
detail=json.loads(
ErrorEnvelope(
error=ErrorBody(message=exc.message, type=exc.code)
).model_dump_json()
),
)
def _log_extra_params(payload: ChatCompletionsRequest) -> None:
top_extra = payload.model_extra or {}
if top_extra:
logger.error("chat.completions request extra params: %s", top_extra)
for index, message in enumerate(payload.messages):
message_extra = message.model_extra or {}
if message_extra:
logger.error(
"chat.completions message[%s] extra params: %s",
index,
message_extra,
)

175
app/api/schemas.py Normal file
View file

@ -0,0 +1,175 @@
"""External API request/response schemas."""
from __future__ import annotations
from typing import Any, Literal
from pydantic import AliasChoices, BaseModel, ConfigDict, Field
class ChatMessage(BaseModel):
model_config = ConfigDict(extra="allow")
role: Literal["developer", "system", "user", "assistant", "tool", "function"]
content: Any | None = None
name: str | None = None
tool_call_id: str | None = None
audio: dict[str, Any] | None = None
refusal: str | None = None
reasoning_content: str | None = None
tool_calls: list[dict[str, Any]] | None = None
function_call: dict[str, Any] | None = None
class ChatCompletionsRequest(BaseModel):
model_config = ConfigDict(extra="allow")
model: str
messages: list[ChatMessage]
stream: bool = True
audio: dict[str, Any] | None = None
frequency_penalty: float | None = None
logit_bias: dict[str, float] | None = None
logprobs: bool | None = None
max_completion_tokens: int | None = None
max_tokens: int | None = None
metadata: dict[str, str] | None = None
provider: dict[str, Any] | None = None
plugins: list[dict[str, Any]] | None = None
session_id: str | None = None
trace: dict[str, Any] | None = None
modalities: list[Literal["text", "image"]] | None = None
models: list[Any] | None = None
n: int | None = None
parallel_tool_calls: bool | None = None
prediction: dict[str, Any] | None = None
presence_penalty: float | None = None
prompt_cache_key: str | None = None
prompt_cache_retention: Literal["in-memory", "24h"] | None = None
reasoning_effort: (
Literal["none", "minimal", "low", "medium", "high", "xhigh"] | None
) = None
reasoning_summary: Literal["auto", "concise", "detailed"] | None = Field(
default=None,
validation_alias=AliasChoices("reasoning_summary", "reasoningSummary"),
)
reasoning: dict[str, Any] | None = None
response_format: dict[str, Any] | None = None
safety_identifier: str | None = None
seed: int | None = None
service_tier: Literal["auto", "default", "flex", "scale", "priority"] | None = None
stop: str | list[str] | None = None
store: bool | None = None
stream_options: dict[str, Any] | None = None
debug: dict[str, Any] | None = None
image_config: dict[str, Any] | None = None
temperature: float | None = None
tool_choice: str | dict[str, Any] | None = None
tools: list[dict[str, Any]] | None = None
top_logprobs: int | None = None
top_p: float | None = None
user: str | None = None
verbosity: Literal["low", "medium", "high"] | None = None
web_search_options: dict[str, Any] | None = None
class ChatCompletionChunkDelta(BaseModel):
model_config = ConfigDict(extra="forbid")
role: str | None = None
content: str | None = None
reasoning_content: str | None = None
reasoning_details: list[dict[str, Any]] | None = None
tool_calls: list[dict[str, Any]] | None = None
class ChatCompletionChunkChoice(BaseModel):
model_config = ConfigDict(extra="forbid")
index: int
delta: ChatCompletionChunkDelta
finish_reason: str | None = None
class ChatCompletionChunk(BaseModel):
model_config = ConfigDict(extra="forbid")
id: str
object: str = "chat.completion.chunk"
created: int
model: str
choices: list[ChatCompletionChunkChoice]
class ChatCompletionMessage(BaseModel):
model_config = ConfigDict(extra="forbid")
role: str = "assistant"
content: str
reasoning: str | None = None
reasoning_content: str | None = None
reasoning_details: list[dict[str, Any]] | None = None
refusal: str | None = None
tool_calls: list[dict[str, Any]] | None = None
function_call: dict[str, Any] | None = None
audio: dict[str, Any] | None = None
annotations: list[dict[str, Any]] | None = None
class ChatCompletionChoice(BaseModel):
model_config = ConfigDict(extra="forbid")
index: int
message: ChatCompletionMessage
finish_reason: str | None = None
logprobs: dict[str, Any] | None = None
class ChatCompletionResponse(BaseModel):
model_config = ConfigDict(extra="forbid")
id: str
object: str = "chat.completion"
created: int
model: str
choices: list[ChatCompletionChoice]
usage: dict[str, Any] | None = None
service_tier: str | None = None
system_fingerprint: str | None = None
class ErrorBody(BaseModel):
model_config = ConfigDict(extra="forbid")
message: str
type: str
class ErrorEnvelope(BaseModel):
model_config = ConfigDict(extra="forbid")
error: ErrorBody
class ModelObject(BaseModel):
model_config = ConfigDict(extra="forbid")
id: str
object: Literal["model"] = "model"
created: int = 0
owned_by: str = "wzray"
name: str | None = None
description: str | None = None
context_length: int | None = None
architecture: dict[str, Any] | None = None
pricing: dict[str, Any] | None = None
supported_parameters: list[str] | None = None
settings: dict[str, Any] | None = None
opencode: dict[str, Any] | None = None
class ModelsResponse(BaseModel):
model_config = ConfigDict(extra="forbid")
object: Literal["list"] = "list"
data: list[ModelObject]

1
app/config/__init__.py Normal file
View file

@ -0,0 +1 @@
"""Configuration package."""

44
app/config/loader.py Normal file
View file

@ -0,0 +1,44 @@
"""Configuration loading and validation."""
from __future__ import annotations
from pathlib import Path
import yaml
from app.config.models import AppConfig, AuthConfig, LoadedConfig, LoadedProviderConfig
def load_config(config_path: Path, auth_path: Path) -> LoadedConfig:
app_data = _read_yaml(config_path)
auth_data = _read_yaml(auth_path)
app_config = AppConfig.model_validate(app_data)
auth_config = AuthConfig.model_validate(auth_data)
providers: dict[str, LoadedProviderConfig] = {}
for provider_name, provider in app_config.providers.items():
auth = auth_config.providers.get(provider_name)
if auth is None:
raise ValueError(f"Missing auth entry for provider '{provider_name}'")
providers[provider_name] = LoadedProviderConfig(
name=provider_name,
url=provider.url,
type=provider.type,
display_name=provider.name,
models=provider.models,
whitelist=provider.whitelist,
blacklist=provider.blacklist,
auth=auth,
)
return LoadedConfig(providers=providers)
def _read_yaml(path: Path) -> dict:
with path.open("r", encoding="utf-8") as handle:
raw = yaml.safe_load(handle)
if not isinstance(raw, dict):
raise ValueError(f"YAML file '{path}' must contain an object")
return raw

75
app/config/models.py Normal file
View file

@ -0,0 +1,75 @@
"""Configuration models for providers and auth."""
from __future__ import annotations
from typing import Literal
from pydantic import BaseModel, ConfigDict, Field
ProviderType = Literal["openai-completions", "codex-responses"]
class ProviderConfig(BaseModel):
model_config = ConfigDict(extra="forbid")
url: str
type: ProviderType
name: str | None = None
models: dict[str, dict[str, str]] | None = None
whitelist: list[str] | None = None
blacklist: list[str] | None = None
class AppConfig(BaseModel):
model_config = ConfigDict(extra="forbid")
providers: dict[str, ProviderConfig] = Field(default_factory=dict)
class TokenAuth(BaseModel):
model_config = ConfigDict(extra="forbid")
token: str
class OAuthAuth(BaseModel):
model_config = ConfigDict(extra="forbid")
access: str
refresh: str
expires: int
class UrlAuth(BaseModel):
model_config = ConfigDict(extra="forbid")
url: str
ProviderAuth = TokenAuth | OAuthAuth | UrlAuth
class AuthConfig(BaseModel):
model_config = ConfigDict(extra="forbid")
providers: dict[str, ProviderAuth] = Field(default_factory=dict)
class LoadedProviderConfig(BaseModel):
model_config = ConfigDict(extra="forbid")
name: str
url: str
type: ProviderType
display_name: str | None = None
models: dict[str, dict[str, str]] | None = None
whitelist: list[str] | None = None
blacklist: list[str] | None = None
auth: ProviderAuth
class LoadedConfig(BaseModel):
model_config = ConfigDict(extra="forbid")
providers: dict[str, LoadedProviderConfig] = Field(default_factory=dict)

1
app/core/__init__.py Normal file
View file

@ -0,0 +1 @@
"""Core package."""

39
app/core/errors.py Normal file
View file

@ -0,0 +1,39 @@
"""Domain errors for routing and provider handling."""
from __future__ import annotations
class RouterError(Exception):
"""Base class for router errors."""
code = "router_error"
status_code = 500
def __init__(self, message: str) -> None:
super().__init__(message)
self.message = message
class InvalidModelError(RouterError):
code = "invalid_model"
status_code = 400
class ProviderNotFoundError(RouterError):
code = "provider_not_found"
status_code = 404
class ModelNotAllowedError(RouterError):
code = "model_not_allowed"
status_code = 400
class ProviderNotImplementedError(RouterError):
code = "provider_not_implemented"
status_code = 501
class UpstreamProviderError(RouterError):
code = "upstream_provider_error"
status_code = 502

306
app/core/models_dev.py Normal file
View file

@ -0,0 +1,306 @@
"""models.dev catalog lookup and model enrichment."""
from __future__ import annotations
import asyncio
from collections.abc import Awaitable, Callable
from datetime import datetime, timezone
import logging
import time
from typing import Any
from urllib.parse import urlparse
import httpx
from app.core.types import CoreModel, ProviderModel
logger = logging.getLogger(__name__)
class ModelsDevCatalog:
"""Fetches and caches models.dev provider catalog."""
def __init__(
self,
*,
catalog_url: str = "https://models.dev/api.json",
cache_ttl_seconds: float = 600.0,
timeout_seconds: float = 10.0,
fetch_catalog: Callable[[], Awaitable[dict[str, Any]]] | None = None,
) -> None:
self._catalog_url = catalog_url
self._cache_ttl_seconds = cache_ttl_seconds
self._timeout_seconds = timeout_seconds
self._fetch_catalog = fetch_catalog or self._fetch_catalog_http
self._catalog: dict[str, dict[str, Any]] | None = None
self._catalog_expires_at = 0.0
self._catalog_lock = asyncio.Lock()
async def get_provider_models(
self, *, provider_name: str, provider_url: str
) -> tuple[str | None, dict[str, dict[str, Any]]]:
catalog = await self._get_catalog()
if not catalog:
return None, {}
provider_id = _resolve_provider_id(
catalog,
provider_name=provider_name,
provider_url=provider_url,
)
if provider_id is None:
return None, {}
provider = _as_dict(catalog.get(provider_id))
provider_display_name = _as_str(provider.get("name"))
raw_models = provider.get("models")
if not isinstance(raw_models, dict):
return provider_display_name, {}
return (
provider_display_name,
{
model_id: model
for model_id, model in raw_models.items()
if isinstance(model_id, str) and isinstance(model, dict)
},
)
async def _get_catalog(self) -> dict[str, dict[str, Any]]:
now = time.monotonic()
if self._catalog is not None and now < self._catalog_expires_at:
return self._catalog
async with self._catalog_lock:
now = time.monotonic()
if self._catalog is not None and now < self._catalog_expires_at:
return self._catalog
try:
fetched = await self._fetch_catalog()
self._catalog = _coerce_catalog(fetched)
except Exception:
logger.exception("failed to fetch models.dev catalog")
self._catalog = {}
self._catalog_expires_at = now + self._cache_ttl_seconds
return self._catalog
async def _fetch_catalog_http(self) -> dict[str, Any]:
async with httpx.AsyncClient(timeout=self._timeout_seconds) as client:
response = await client.get(self._catalog_url)
response.raise_for_status()
payload = response.json()
return payload if isinstance(payload, dict) else {}
def to_core_model(
*,
provider_name: str,
provider_model: ProviderModel,
models_dev_model: dict[str, Any] | None = None,
provider_display_name: str | None = None,
model_override: dict[str, Any] | None = None,
) -> CoreModel:
override_name = _as_str(model_override.get("name")) if model_override else None
models_dev_name = (
_as_str(models_dev_model.get("name")) if models_dev_model else None
)
name = override_name or models_dev_name or provider_model.name
models_dev_context_length = (
_context_length_from_models_dev(models_dev_model) if models_dev_model else None
)
context_length = models_dev_context_length or provider_model.context_length
models_dev_architecture = (
_architecture_from_models_dev(models_dev_model) if models_dev_model else None
)
architecture = models_dev_architecture or provider_model.architecture
models_dev_pricing = (
_pricing_from_models_dev(models_dev_model) if models_dev_model else None
)
pricing = models_dev_pricing or provider_model.pricing
models_dev_supported_parameters = (
_supported_parameters_from_models_dev(models_dev_model)
if models_dev_model
else None
)
supported_parameters = (
models_dev_supported_parameters or provider_model.supported_parameters
)
models_dev_created = (
_created_from_models_dev(models_dev_model) if models_dev_model else None
)
created = models_dev_created or provider_model.created
models_dev_owned_by = (
_as_str(models_dev_model.get("provider")) if models_dev_model else None
)
owned_by = models_dev_owned_by or provider_model.owned_by
return CoreModel(
id=f"{provider_name}/{provider_model.id}",
created=created or 0,
owned_by=owned_by or "wzray",
name=name,
provider_display_name=provider_display_name,
description=provider_model.description,
context_length=context_length,
architecture=architecture,
pricing=pricing,
supported_parameters=supported_parameters,
settings=provider_model.settings,
opencode=provider_model.opencode,
config_override=model_override,
)
def _coerce_catalog(raw: dict[str, Any]) -> dict[str, dict[str, Any]]:
catalog: dict[str, dict[str, Any]] = {}
for key, value in raw.items():
if isinstance(key, str) and isinstance(value, dict):
catalog[key] = value
return catalog
def _resolve_provider_id(
catalog: dict[str, dict[str, Any]], *, provider_name: str, provider_url: str
) -> str | None:
if provider_name in catalog:
return provider_name
provider_host = _host(provider_url)
if provider_host is not None:
for provider_id, provider in catalog.items():
api_url = _as_str(provider.get("api"))
if api_url is None:
continue
if _host(api_url) == provider_host:
return provider_id
normalized_name = _normalize_token(provider_name)
candidates: list[tuple[int, int, str]] = []
for provider_id in catalog:
normalized_id = _normalize_token(provider_id)
if not normalized_id:
continue
if normalized_name.startswith(normalized_id) or normalized_id.startswith(
normalized_name
):
candidates.append(
(
abs(len(normalized_name) - len(normalized_id)),
-len(normalized_id),
provider_id,
)
)
if candidates:
candidates.sort()
return candidates[0][2]
return None
def _context_length_from_models_dev(model: dict[str, Any]) -> int | None:
limit = _as_dict(model.get("limit"))
context = limit.get("context")
return context if isinstance(context, int) else None
def _architecture_from_models_dev(model: dict[str, Any]) -> dict[str, Any] | None:
modalities = _as_dict(model.get("modalities"))
architecture: dict[str, Any] = {}
input_modalities = modalities.get("input")
if isinstance(input_modalities, list):
architecture["input_modalities"] = [
str(modality) for modality in input_modalities if isinstance(modality, str)
]
output_modalities = modalities.get("output")
if isinstance(output_modalities, list):
architecture["output_modalities"] = [
str(modality) for modality in output_modalities if isinstance(modality, str)
]
family = _as_str(model.get("family"))
if family is not None:
architecture["family"] = family
return architecture or None
def _pricing_from_models_dev(model: dict[str, Any]) -> dict[str, Any] | None:
cost = model.get("cost")
return cost if isinstance(cost, dict) else None
def _supported_parameters_from_models_dev(model: dict[str, Any]) -> list[str] | None:
supported: list[str] = []
if _as_bool(model.get("reasoning")):
supported.extend(["reasoning_effort", "reasoning_summary"])
if _as_bool(model.get("tool_call")):
supported.extend(["tools", "tool_choice", "parallel_tool_calls"])
if _as_bool(model.get("structured_output")):
supported.append("response_format")
output_limit = _as_dict(model.get("limit")).get("output")
if isinstance(output_limit, int):
supported.extend(["max_tokens", "max_completion_tokens"])
modalities = model.get("modalities")
if isinstance(modalities, dict):
supported.append("modalities")
return sorted(set(supported)) or None
def _created_from_models_dev(model: dict[str, Any]) -> int | None:
release_date = _as_str(model.get("release_date"))
if release_date is None:
return None
try:
parsed = datetime.fromisoformat(release_date)
except ValueError:
return None
if parsed.tzinfo is None:
parsed = parsed.replace(tzinfo=timezone.utc)
else:
parsed = parsed.astimezone(timezone.utc)
return int(parsed.timestamp())
def _host(url: str) -> str | None:
try:
parsed = urlparse(url)
except ValueError:
return None
if not parsed.hostname:
return None
return parsed.hostname.lower()
def _normalize_token(value: str) -> str:
return "".join(ch for ch in value.lower() if ch.isalnum())
def _as_dict(raw: Any) -> dict[str, Any]:
return raw if isinstance(raw, dict) else {}
def _as_str(value: Any) -> str | None:
return value if isinstance(value, str) else None
def _as_bool(value: Any) -> bool:
return value is True

173
app/core/router.py Normal file
View file

@ -0,0 +1,173 @@
"""Core routing logic that selects provider by model prefix."""
from __future__ import annotations
import asyncio
from collections.abc import AsyncIterator
import logging
import time
from typing import Any
from app.core.models_dev import ModelsDevCatalog, to_core_model
from app.core.errors import InvalidModelError, ProviderNotFoundError
from app.core.types import (
CoreChatRequest,
CoreChunk,
CoreModel,
ProviderChatRequest,
ProviderModel,
)
from app.providers.base import BaseProvider
logger = logging.getLogger(__name__)
class RouterCore:
"""Routes normalized requests to a specific provider instance."""
def __init__(
self,
providers: dict[str, BaseProvider],
*,
models_cache_ttl_seconds: float = 600.0,
models_dev_catalog: ModelsDevCatalog | None = None,
) -> None:
self.providers = providers
self._models_cache_ttl_seconds = models_cache_ttl_seconds
self._models_dev_catalog = models_dev_catalog or ModelsDevCatalog()
self._models_cache: list[CoreModel] | None = None
self._models_cache_expires_at = 0.0
self._models_cache_lock = asyncio.Lock()
def resolve_provider(self, routed_model: str) -> tuple[BaseProvider, str]:
provider_name, upstream_model = split_routed_model(routed_model)
provider = self.providers.get(provider_name)
if provider is None:
raise ProviderNotFoundError(f"Unknown provider '{provider_name}'")
provider.ensure_model_allowed(upstream_model)
return provider, upstream_model
async def stream_chat(self, request: CoreChatRequest) -> AsyncIterator[CoreChunk]:
provider, upstream_model = self.resolve_provider(request.model)
provider_request = ProviderChatRequest(
model=upstream_model,
messages=request.messages,
stream=request.stream,
audio=request.audio,
frequency_penalty=request.frequency_penalty,
logit_bias=request.logit_bias,
logprobs=request.logprobs,
max_completion_tokens=request.max_completion_tokens,
max_tokens=request.max_tokens,
metadata=request.metadata,
provider=request.provider,
plugins=request.plugins,
session_id=request.session_id,
trace=request.trace,
modalities=request.modalities,
models=request.models,
n=request.n,
parallel_tool_calls=request.parallel_tool_calls,
prediction=request.prediction,
presence_penalty=request.presence_penalty,
prompt_cache_key=request.prompt_cache_key,
prompt_cache_retention=request.prompt_cache_retention,
reasoning_effort=request.reasoning_effort,
reasoning_summary=request.reasoning_summary,
reasoning=request.reasoning,
response_format=request.response_format,
safety_identifier=request.safety_identifier,
seed=request.seed,
service_tier=request.service_tier,
stop=request.stop,
store=request.store,
stream_options=request.stream_options,
temperature=request.temperature,
debug=request.debug,
image_config=request.image_config,
tool_choice=request.tool_choice,
tools=request.tools,
top_logprobs=request.top_logprobs,
top_p=request.top_p,
user=request.user,
verbosity=request.verbosity,
web_search_options=request.web_search_options,
extra=request.extra,
)
async for chunk in provider.stream_chat(provider_request):
yield chunk
async def list_models(self) -> list[CoreModel]:
now = time.monotonic()
if self._models_cache is not None and now < self._models_cache_expires_at:
return list(self._models_cache)
async with self._models_cache_lock:
now = time.monotonic()
if self._models_cache is not None and now < self._models_cache_expires_at:
return list(self._models_cache)
models: list[CoreModel] = []
for provider_name, provider in self.providers.items():
try:
provider_models = await provider.list_models()
except Exception as exc:
logger.exception(
"models listing failed provider=%s base_url=%s error=%s",
provider_name,
provider.base_url,
repr(exc),
)
continue
(
provider_display_name,
models_dev_models,
) = await self._models_dev_catalog.get_provider_models(
provider_name=provider_name,
provider_url=provider.base_url,
)
for model in provider_models:
if not provider.is_model_allowed(model.id):
continue
model_override = None
if provider.models_config is not None:
raw_override = provider.models_config.get(model.id)
if isinstance(raw_override, dict):
model_override = raw_override
provider_display_name = (
provider.display_name or provider_display_name
)
models.append(
to_core_model(
provider_name=provider_name,
provider_model=model,
models_dev_model=models_dev_models.get(model.id),
provider_display_name=provider_display_name,
model_override=model_override,
)
)
self._models_cache = list(models)
self._models_cache_expires_at = (
time.monotonic() + self._models_cache_ttl_seconds
)
return list(models)
def split_routed_model(routed_model: str) -> tuple[str, str]:
if "/" not in routed_model:
raise InvalidModelError(
"Model must have format 'provider/model', e.g. 'kilocode/minimax/minimax-m2.5:free'"
)
provider_name, upstream_model = routed_model.split("/", 1)
provider_name = provider_name.strip()
upstream_model = upstream_model.strip()
if not provider_name:
raise InvalidModelError("Provider name in model cannot be empty")
if not upstream_model:
raise InvalidModelError("Upstream model in model cannot be empty")
return provider_name, upstream_model

158
app/core/types.py Normal file
View file

@ -0,0 +1,158 @@
"""Internal normalized request/response types."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any
@dataclass(slots=True)
class CoreMessage:
role: str
content: Any | None = None
name: str | None = None
tool_call_id: str | None = None
audio: dict[str, Any] | None = None
refusal: str | None = None
reasoning_content: str | None = None
tool_calls: list[dict[str, Any]] | None = None
function_call: dict[str, Any] | None = None
extra: dict[str, Any] = field(default_factory=dict)
@dataclass(slots=True)
class CoreChatRequest:
model: str
messages: list[CoreMessage]
stream: bool = True
audio: dict[str, Any] | None = None
frequency_penalty: float | None = None
logit_bias: dict[str, float] | None = None
logprobs: bool | None = None
max_completion_tokens: int | None = None
max_tokens: int | None = None
metadata: dict[str, str] | None = None
provider: dict[str, Any] | None = None
plugins: list[dict[str, Any]] | None = None
session_id: str | None = None
trace: dict[str, Any] | None = None
modalities: list[str] | None = None
models: list[Any] | None = None
n: int | None = None
parallel_tool_calls: bool | None = None
prediction: dict[str, Any] | None = None
presence_penalty: float | None = None
prompt_cache_key: str | None = None
prompt_cache_retention: str | None = None
reasoning_effort: str | None = None
reasoning_summary: str | None = None
reasoning: dict[str, Any] | None = None
response_format: dict[str, Any] | None = None
safety_identifier: str | None = None
seed: int | None = None
service_tier: str | None = None
stop: str | list[str] | None = None
store: bool | None = None
stream_options: dict[str, Any] | None = None
temperature: float | None = None
debug: dict[str, Any] | None = None
image_config: dict[str, Any] | None = None
tool_choice: str | dict[str, Any] | None = None
tools: list[dict[str, Any]] | None = None
top_logprobs: int | None = None
top_p: float | None = None
user: str | None = None
verbosity: str | None = None
web_search_options: dict[str, Any] | None = None
extra: dict[str, Any] = field(default_factory=dict)
@dataclass(slots=True)
class ProviderChatRequest:
model: str
messages: list[CoreMessage]
stream: bool = True
audio: dict[str, Any] | None = None
frequency_penalty: float | None = None
logit_bias: dict[str, float] | None = None
logprobs: bool | None = None
max_completion_tokens: int | None = None
max_tokens: int | None = None
metadata: dict[str, str] | None = None
provider: dict[str, Any] | None = None
plugins: list[dict[str, Any]] | None = None
session_id: str | None = None
trace: dict[str, Any] | None = None
modalities: list[str] | None = None
models: list[Any] | None = None
n: int | None = None
parallel_tool_calls: bool | None = None
prediction: dict[str, Any] | None = None
presence_penalty: float | None = None
prompt_cache_key: str | None = None
prompt_cache_retention: str | None = None
reasoning_effort: str | None = None
reasoning_summary: str | None = None
reasoning: dict[str, Any] | None = None
response_format: dict[str, Any] | None = None
safety_identifier: str | None = None
seed: int | None = None
service_tier: str | None = None
stop: str | list[str] | None = None
store: bool | None = None
stream_options: dict[str, Any] | None = None
temperature: float | None = None
debug: dict[str, Any] | None = None
image_config: dict[str, Any] | None = None
tool_choice: str | dict[str, Any] | None = None
tools: list[dict[str, Any]] | None = None
top_logprobs: int | None = None
top_p: float | None = None
user: str | None = None
verbosity: str | None = None
web_search_options: dict[str, Any] | None = None
extra: dict[str, Any] = field(default_factory=dict)
@dataclass(slots=True)
class ProviderModel:
id: str
name: str | None = None
created: int | None = None
description: str | None = None
context_length: int | None = None
architecture: dict[str, Any] | None = None
pricing: dict[str, Any] | None = None
supported_parameters: list[str] | None = None
settings: dict[str, Any] | None = None
opencode: dict[str, Any] | None = None
owned_by: str | None = None
@dataclass(slots=True)
class CoreModel:
id: str
object: str = "model"
created: int = 0
owned_by: str = "wzray"
name: str | None = None
provider_display_name: str | None = None
description: str | None = None
context_length: int | None = None
architecture: dict[str, Any] | None = None
pricing: dict[str, Any] | None = None
supported_parameters: list[str] | None = None
settings: dict[str, Any] | None = None
opencode: dict[str, Any] | None = None
config_override: dict[str, Any] | None = None
@dataclass(slots=True)
class CoreChunk:
index: int = 0
role: str | None = None
content: str | None = None
reasoning_content: str | None = None
reasoning_details: list[dict[str, Any]] | None = None
tool_calls: list[dict[str, Any]] | None = None
finish_reason: str | None = None

26
app/dependencies.py Normal file
View file

@ -0,0 +1,26 @@
"""Dependency wiring for app services."""
from __future__ import annotations
import os
from functools import lru_cache
from pathlib import Path
from app.config.loader import load_config
from app.core.router import RouterCore
from app.providers.factory import build_provider_registry
def _resolve_paths() -> tuple[Path, Path]:
data_dir = Path(os.getenv("AI_ROUTER_DATA_DIR", "."))
config_path = Path(os.getenv("AI_ROUTER_CONFIG", str(data_dir / "config.yml")))
auth_path = Path(os.getenv("AI_ROUTER_AUTH", str(data_dir / "auth.yml")))
return config_path, auth_path
@lru_cache
def get_router_core() -> RouterCore:
config_path, auth_path = _resolve_paths()
loaded = load_config(config_path=config_path, auth_path=auth_path)
providers = build_provider_registry(loaded)
return RouterCore(providers=providers)

18
app/main.py Normal file
View file

@ -0,0 +1,18 @@
"""Application entrypoint."""
from __future__ import annotations
from fastapi import FastAPI
from app.api.middleware import install_request_logging
from app.api.routes import router as api_router
def create_app() -> FastAPI:
app = FastAPI(title="AI Router", version="0.1.0")
install_request_logging(app)
app.include_router(api_router)
return app
app = create_app()

View file

@ -0,0 +1 @@
"""Provider implementations."""

65
app/providers/base.py Normal file
View file

@ -0,0 +1,65 @@
"""Provider abstraction and shared behavior."""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections.abc import AsyncIterator
from typing import TYPE_CHECKING
from typing import Any
from app.core.errors import ModelNotAllowedError
from app.core.types import CoreChunk, ProviderChatRequest, ProviderModel
if TYPE_CHECKING:
from app.config.models import LoadedProviderConfig
class BaseProvider(ABC):
"""Common interface for all provider implementations."""
def __init__(
self,
*,
name: str,
base_url: str,
api_type: str,
whitelist: list[str] | None = None,
blacklist: list[str] | None = None,
) -> None:
self.name = name
self.base_url = base_url
self.api_type = api_type
self.whitelist = whitelist
self.blacklist = blacklist
self.display_name: str | None = None
self.models_config: dict[str, dict[str, Any]] | None = None
def ensure_model_allowed(self, model: str) -> None:
if self.whitelist is not None and model not in self.whitelist:
raise ModelNotAllowedError(
f"Model '{model}' is not in whitelist for provider '{self.name}'"
)
if self.blacklist is not None and model in self.blacklist:
raise ModelNotAllowedError(
f"Model '{model}' is blacklisted for provider '{self.name}'"
)
def is_model_allowed(self, model: str) -> bool:
if self.whitelist is not None and model not in self.whitelist:
return False
if self.blacklist is not None and model in self.blacklist:
return False
return True
@classmethod
@abstractmethod
def from_config(cls, config: "LoadedProviderConfig") -> "BaseProvider":
"""Create provider instance from merged runtime config."""
@abstractmethod
def stream_chat(self, request: ProviderChatRequest) -> AsyncIterator[CoreChunk]:
"""Execute streaming chat request and yield internal chunks."""
@abstractmethod
async def list_models(self) -> list[ProviderModel]:
"""List upstream models supported by provider."""

View file

@ -0,0 +1,5 @@
"""Codex-responses provider package."""
from app.providers.codex_responses.provider import CodexResponsesProvider
__all__ = ["CodexResponsesProvider"]

View file

@ -0,0 +1,84 @@
"""Model list mapping for codex responses provider."""
from __future__ import annotations
from typing import Any
from app.core.types import ProviderModel
from app.providers.codex_responses.utils import _to_dict
def _coerce_model_items(raw: Any) -> list[dict[str, Any]]:
data = getattr(raw, "data", None)
if isinstance(data, list):
return [_to_dict(item) for item in data]
models = getattr(raw, "models", None)
if isinstance(models, list):
return [_to_dict(item) for item in models]
if isinstance(raw, list):
return [_to_dict(item) for item in raw]
if isinstance(raw, dict):
models = raw.get("models")
if isinstance(models, list):
return [_to_dict(item) for item in models]
data = raw.get("data")
if isinstance(data, list):
return [_to_dict(item) for item in data]
return []
def _to_provider_model(raw: dict[str, Any]) -> ProviderModel:
model_id = raw.get("id")
if not isinstance(model_id, str):
model_id = raw.get("slug") if isinstance(raw.get("slug"), str) else None
if model_id is None:
raise ValueError("Codex model item missing id/slug")
arch = raw.get("architecture")
if isinstance(arch, dict):
arch = {k: v for k, v in arch.items() if k != "tokenizer"}
else:
input_modalities = raw.get("input_modalities")
if isinstance(input_modalities, list):
arch = {"input_modalities": input_modalities}
else:
arch = None
supported = raw.get("supported_parameters")
supported_parameters = None
if isinstance(supported, list):
supported_parameters = [str(v) for v in supported]
context_length = raw.get("context_length")
if not isinstance(context_length, int):
context_length = raw.get("context_window")
if not isinstance(context_length, int):
top_provider = raw.get("top_provider")
if isinstance(top_provider, dict) and isinstance(
top_provider.get("context_length"), int
):
context_length = top_provider.get("context_length")
else:
context_length = None
return ProviderModel(
id=model_id,
name=(
raw.get("name")
if isinstance(raw.get("name"), str)
else raw.get("display_name")
if isinstance(raw.get("display_name"), str)
else None
),
description=raw.get("description")
if isinstance(raw.get("description"), str)
else None,
context_length=context_length,
architecture=arch,
pricing=raw.get("pricing") if isinstance(raw.get("pricing"), dict) else None,
supported_parameters=supported_parameters,
settings=raw.get("settings") if isinstance(raw.get("settings"), dict) else None,
opencode=raw.get("opencode") if isinstance(raw.get("opencode"), dict) else None,
created=raw.get("created") if isinstance(raw.get("created"), int) else None,
owned_by=raw.get("owned_by") if isinstance(raw.get("owned_by"), str) else None,
)

View file

@ -0,0 +1,129 @@
"""OAuth helper for codex responses provider."""
from __future__ import annotations
import asyncio
import base64
import json
import time
from dataclasses import dataclass
from typing import Any
import httpx
from app.config.models import OAuthAuth
JWT_AUTH_CLAIM_PATH = "https://api.openai.com/auth"
OAUTH_TOKEN_URL = "https://auth.openai.com/oauth/token"
@dataclass(slots=True)
class OAuthData:
token: str
headers: dict[str, str]
class CodexOAuthProvider:
"""Keeps oauth state and refreshes access token when required."""
def __init__(
self,
*,
auth: OAuthAuth,
timeout_seconds: float = 30.0,
token_url: str = OAUTH_TOKEN_URL,
) -> None:
self._access = auth.access
self._refresh = auth.refresh
self._expires = auth.expires
self._client_id = _extract_client_id(self._access)
self._account_id = _extract_account_id(self._access)
self._timeout_seconds = timeout_seconds
self._token_url = token_url
self._lock = asyncio.Lock()
async def get(self) -> OAuthData:
if self._is_expired():
async with self._lock:
if self._is_expired():
await self._refresh_token()
headers = {"Authorization": f"Bearer {self._access}"}
if self._account_id:
headers["ChatGPT-Account-Id"] = self._account_id
return OAuthData(token=self._access, headers=headers)
async def get_headers(self) -> dict[str, str]:
oauth = await self.get()
return oauth.headers
def _is_expired(self) -> bool:
return int(time.time() * 1000) >= self._expires - 60_000
async def _refresh_token(self) -> None:
payload: dict[str, Any] = {
"grant_type": "refresh_token",
"refresh_token": self._refresh,
}
if self._client_id:
payload["client_id"] = self._client_id
async with httpx.AsyncClient(timeout=self._timeout_seconds) as client:
response = await client.post(self._token_url, json=payload)
if response.status_code >= 400:
raise ValueError(f"OAuth refresh failed with status {response.status_code}")
data = response.json()
access = data.get("access_token")
refresh = data.get("refresh_token")
expires_in = data.get("expires_in")
if not isinstance(access, str):
raise ValueError("OAuth refresh response missing access_token")
self._access = access
if isinstance(refresh, str):
self._refresh = refresh
if isinstance(expires_in, int):
self._expires = int(time.time() * 1000) + expires_in * 1000
self._account_id = _extract_account_id(self._access)
self._client_id = _extract_client_id(self._access) or self._client_id
def _extract_account_id(token: str) -> str | None:
payload = _decode_jwt_payload(token)
if payload is None:
return None
claim = payload.get(JWT_AUTH_CLAIM_PATH)
if not isinstance(claim, dict):
return None
account_id = claim.get("chatgpt_account_id")
return account_id if isinstance(account_id, str) else None
def _extract_client_id(token: str) -> str | None:
payload = _decode_jwt_payload(token)
if payload is None:
return None
client_id = payload.get("client_id")
return client_id if isinstance(client_id, str) else None
def _decode_jwt_payload(token: str) -> dict[str, Any] | None:
parts = token.split(".")
if len(parts) != 3:
return None
try:
payload = _b64url_decode(parts[1])
parsed = json.loads(payload)
except Exception:
return None
return parsed if isinstance(parsed, dict) else None
def _b64url_decode(data: str) -> str:
padded = data + "=" * ((4 - len(data) % 4) % 4)
raw = base64.urlsafe_b64decode(padded)
return raw.decode("utf-8")

View file

@ -0,0 +1,144 @@
"""Codex responses provider implementation."""
from __future__ import annotations
from collections.abc import AsyncIterator
import logging
import random
import string
from typing import TYPE_CHECKING, Any, cast
from openai import AsyncOpenAI, OpenAIError
from app.config.models import LoadedProviderConfig, OAuthAuth, UrlAuth
from app.core.errors import UpstreamProviderError
from app.core.types import CoreChunk, ProviderChatRequest, ProviderModel
from app.providers.base import BaseProvider
from app.providers.codex_responses.models import _coerce_model_items, _to_provider_model
from app.providers.codex_responses.oauth import CodexOAuthProvider
from app.providers.codex_responses.stream import _map_response_stream_to_chunks
from app.providers.codex_responses.translator import build_responses_create_args
from app.providers.token_url_auth import TokenUrlAuthProvider
from app.providers.registry import provider
if TYPE_CHECKING:
from openai.resources.responses.responses import AsyncResponses
logger = logging.getLogger(__name__)
@provider(type="codex-responses")
class CodexResponsesProvider(BaseProvider):
"""Provider that talks to OpenAI responses API with oauth auth."""
def __init__(
self,
*,
name: str,
base_url: str,
oauth: CodexOAuthProvider | TokenUrlAuthProvider,
client_version: str = "0.100.0",
whitelist: list[str] | None = None,
blacklist: list[str] | None = None,
client: Any | None = None,
) -> None:
super().__init__(
name=name,
base_url=base_url,
api_type="codex-responses",
whitelist=whitelist,
blacklist=blacklist,
)
self._oauth = oauth
self._client_version = client_version
responses_base_url = f"{base_url.rstrip('/')}/codex"
self._client = client or AsyncOpenAI(
api_key="oauth", base_url=responses_base_url
)
@classmethod
def from_config(cls, config: LoadedProviderConfig) -> BaseProvider:
oauth: CodexOAuthProvider | TokenUrlAuthProvider
if isinstance(config.auth, OAuthAuth):
oauth = CodexOAuthProvider(auth=config.auth)
elif isinstance(config.auth, UrlAuth):
oauth = TokenUrlAuthProvider(token_url=config.auth.url)
else:
raise ValueError(
f"Provider '{config.name}' type 'codex-responses' requires oauth or url auth"
)
provider = cls(
name=config.name,
base_url=config.url,
oauth=oauth,
whitelist=config.whitelist,
blacklist=config.blacklist,
)
provider.display_name = config.display_name
provider.models_config = config.models
return provider
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
auth = await self._oauth.get_headers()
try:
create_args, extra_body = build_responses_create_args(request)
extra_headers = dict(auth)
extra_headers.update(
{
"session_id": "ses_"
+ "".join(
random.choice(string.ascii_letters + string.digits)
for _ in range(28)
),
"originator": "opencode",
}
)
stream = await cast("AsyncResponses", self._client.responses).create(
**create_args,
extra_headers=extra_headers,
extra_body=extra_body,
)
async for chunk in _map_response_stream_to_chunks(
stream, provider_name=self.name
):
yield chunk
except OpenAIError as exc:
logger.exception("codex upstream openai error provider=%s", self.name)
raise UpstreamProviderError(
f"Provider '{self.name}' upstream request failed: {exc}"
) from exc
except Exception as exc:
logger.exception("codex provider stream failure provider=%s", self.name)
raise UpstreamProviderError(
f"Provider '{self.name}' failed while streaming: {exc}"
) from exc
async def list_models(self) -> list[ProviderModel]:
auth = await self._oauth.get_headers()
try:
response = await self._client.models.list(
extra_headers=auth,
extra_query={"client_version": self._client_version},
)
items = _coerce_model_items(response)
return [
_to_provider_model(item)
for item in items
if item.get("id") or item.get("slug")
]
except OpenAIError as exc:
logger.exception("codex models openai error provider=%s", self.name)
raise UpstreamProviderError(
f"Provider '{self.name}' models request failed: {exc}"
) from exc
except Exception as exc:
logger.exception("codex models failure provider=%s", self.name)
raise UpstreamProviderError(
f"Provider '{self.name}' failed while listing models: {exc}"
) from exc

View file

@ -0,0 +1,518 @@
"""Streaming event mapping for codex responses provider."""
from __future__ import annotations
from collections.abc import AsyncIterator, Mapping
from dataclasses import dataclass, field
from typing import Any
from app.core.errors import UpstreamProviderError
from app.core.types import CoreChunk
from app.providers.codex_responses.utils import _as_dict, _as_str
_TEXT_DELTA_EVENTS = {"response.output_text.delta", "response.refusal.delta"}
_TEXT_DONE_EVENTS = {"response.output_text.done", "response.refusal.done"}
_TOOL_CALL_DELTA_EVENTS = {
"response.function_call_arguments.delta",
"response.function_call_arguments.done",
"response.custom_tool_call_input.delta",
"response.custom_tool_call_input.done",
}
_NON_FUNCTION_TOOL_CALL_ITEMS = {
"custom_tool_call",
"computer_call",
"code_interpreter_call",
"web_search_call",
"file_search_call",
"shell_call",
"apply_patch_call",
"mcp_call",
}
@dataclass(slots=True)
class _ResponseStreamState:
sent_role: bool = False
emitted_text: bool = False
emitted_reasoning: bool = False
saw_tool_call: bool = False
next_tool_call_index: int = 0
tool_call_finalized: set[str] = field(default_factory=set)
tool_call_names: dict[str, str] = field(default_factory=dict)
tool_call_indexes: dict[str, int] = field(default_factory=dict)
pending_tool_arguments: dict[str, str] = field(default_factory=dict)
emitted_tool_arguments: dict[str, str] = field(default_factory=dict)
emitted_content_keys: set[tuple[str, int]] = field(default_factory=set)
saw_delta_keys: set[tuple[str, int]] = field(default_factory=set)
emitted_reasoning_item_ids: set[str] = field(default_factory=set)
async def _map_response_stream_to_chunks(
stream: AsyncIterator[Any], *, provider_name: str
) -> AsyncIterator[CoreChunk]:
state = _ResponseStreamState()
async for event in stream:
event_type = _event_type(event)
if event_type in _TEXT_DELTA_EVENTS:
for chunk in _handle_text_delta(event, state):
yield chunk
continue
if event_type == "response.reasoning_summary_text.delta":
for chunk in _emit_reasoning_chunk(_first_string(event, "delta"), state):
yield chunk
continue
if event_type == "response.reasoning_summary_text.done":
details_chunk = _reasoning_summary_detail_chunk(event)
if details_chunk is not None:
for chunk in _wrap_assistant_chunk(details_chunk, state):
yield chunk
if not state.emitted_reasoning:
for chunk in _emit_reasoning_chunk(_first_string(event, "text"), state):
yield chunk
continue
if event_type == "response.output_item.added":
for chunk in _handle_output_item_added(event, state):
yield chunk
continue
if event_type in _TEXT_DONE_EVENTS:
for chunk in _handle_text_done(event, state):
yield chunk
continue
if event_type == "response.content_part.done":
for chunk in _handle_content_part_done(event, state):
yield chunk
continue
if event_type == "response.output_item.done":
for chunk in _handle_output_item_done(event, state):
yield chunk
continue
if event_type in _TOOL_CALL_DELTA_EVENTS:
state.saw_tool_call = True
tool_chunk = _tool_call_delta_from_event(
event,
event_type=event_type,
state=state,
)
if tool_chunk is not None:
for chunk in _wrap_assistant_chunk(tool_chunk, state):
yield chunk
continue
if event_type == "response.completed":
finish_reason = (
"tool_calls"
if state.saw_tool_call and not state.emitted_text
else "stop"
)
yield CoreChunk(finish_reason=finish_reason)
continue
if event_type == "response.incomplete":
reason = _extract_incomplete_finish_reason(event)
yield CoreChunk(finish_reason=reason)
continue
if event_type == "response.failed":
raise UpstreamProviderError(
f"Provider '{provider_name}' upstream response failed"
)
def _event_type(event: Any) -> str:
return _as_str(getattr(event, "type", None)) or ""
def _ensure_assistant_role(state: _ResponseStreamState) -> CoreChunk | None:
if state.sent_role:
return None
state.sent_role = True
return CoreChunk(role="assistant")
def _wrap_assistant_chunk(
chunk: CoreChunk | None, state: _ResponseStreamState
) -> list[CoreChunk]:
if chunk is None:
return []
role_chunk = _ensure_assistant_role(state)
return [role_chunk, chunk] if role_chunk is not None else [chunk]
def _emit_text_chunk(text: str | None, state: _ResponseStreamState) -> list[CoreChunk]:
if not text:
return []
state.emitted_text = True
return _wrap_assistant_chunk(CoreChunk(content=text), state)
def _emit_reasoning_chunk(
text: str | None, state: _ResponseStreamState
) -> list[CoreChunk]:
if not text:
return []
state.emitted_reasoning = True
return _wrap_assistant_chunk(CoreChunk(reasoning_content=text), state)
def _handle_text_delta(event: Any, state: _ResponseStreamState) -> list[CoreChunk]:
key = _content_key(event)
if key is not None:
state.saw_delta_keys.add(key)
return _emit_text_chunk(_first_string(event, "delta"), state)
def _handle_text_done(event: Any, state: _ResponseStreamState) -> list[CoreChunk]:
key = _content_key(event)
if key is not None and key in state.saw_delta_keys:
state.emitted_content_keys.add(key)
return []
chunks = _emit_text_chunk(_first_string(event, "text", "refusal"), state)
if key is not None:
state.emitted_content_keys.add(key)
return chunks
def _content_part_text(part: Mapping[str, Any]) -> str | None:
part_type = part.get("type")
if part_type == "output_text":
return _as_str(part.get("text"))
if part_type == "refusal":
return _as_str(part.get("refusal"))
return None
def _handle_content_part_done(
event: Any, state: _ResponseStreamState
) -> list[CoreChunk]:
key = _content_key(event)
if key is None or key in state.emitted_content_keys or key in state.saw_delta_keys:
return []
text = _content_part_text(_as_dict(getattr(event, "part", None)))
chunks = _emit_text_chunk(text, state)
if chunks:
state.emitted_content_keys.add(key)
return chunks
def _handle_output_item_done(
event: Any, state: _ResponseStreamState
) -> list[CoreChunk]:
item = _as_dict(getattr(event, "item", None))
item_type = _as_str(item.get("type"))
if item_type == "reasoning":
reasoning_chunk = _reasoning_encrypted_detail_chunk(
item=item,
item_id=_as_str(item.get("id")),
output_index=getattr(event, "output_index", None),
state=state,
)
return _wrap_assistant_chunk(reasoning_chunk, state)
if item_type == "function_call":
state.saw_tool_call = True
return _wrap_assistant_chunk(
_tool_call_delta_from_item(item, state=state), state
)
if item_type in _NON_FUNCTION_TOOL_CALL_ITEMS:
state.saw_tool_call = True
return []
if item_type != "message":
return []
item_id = _as_str(item.get("id"))
content = item.get("content")
if item_id is None or not isinstance(content, list):
return []
chunks: list[CoreChunk] = []
for idx, part in enumerate(content):
key = (item_id, idx)
if key in state.emitted_content_keys or key in state.saw_delta_keys:
continue
text = _content_part_text(_as_dict(part))
emitted = _emit_text_chunk(text, state)
if emitted:
chunks.extend(emitted)
state.emitted_content_keys.add(key)
return chunks
def _handle_output_item_added(
event: Any, state: _ResponseStreamState
) -> list[CoreChunk]:
item = _as_dict(getattr(event, "item", None))
if _as_str(item.get("type")) != "reasoning":
return []
reasoning_chunk = _reasoning_encrypted_detail_chunk(
item=item,
item_id=_as_str(item.get("id")),
output_index=getattr(event, "output_index", None),
state=state,
)
return _wrap_assistant_chunk(reasoning_chunk, state)
def _reasoning_encrypted_detail_chunk(
*,
item: dict[str, Any],
item_id: str | None,
output_index: Any,
state: _ResponseStreamState,
) -> CoreChunk | None:
encrypted = _as_str(item.get("encrypted_content"))
if encrypted is None:
return None
if item_id is not None:
if item_id in state.emitted_reasoning_item_ids:
return None
state.emitted_reasoning_item_ids.add(item_id)
detail: dict[str, Any] = {
"type": "reasoning.encrypted",
"data": encrypted,
"format": "openai-responses-v1",
}
if item_id is not None:
detail["id"] = item_id
if isinstance(output_index, int):
detail["index"] = output_index
return CoreChunk(reasoning_details=[detail])
def _reasoning_summary_detail_chunk(event: Any) -> CoreChunk | None:
text = _first_string(event, "text")
if not text:
return None
detail: dict[str, Any] = {
"type": "reasoning.summary",
"summary": text,
"format": "openai-responses-v1",
}
item_id = _as_str(getattr(event, "item_id", None))
if item_id is not None:
detail["id"] = item_id
summary_index = getattr(event, "summary_index", None)
if isinstance(summary_index, int):
detail["index"] = summary_index
return CoreChunk(reasoning_details=[detail])
def _content_key(event: Any) -> tuple[str, int] | None:
item_id = _as_str(getattr(event, "item_id", None))
content_index = getattr(event, "content_index", None)
if item_id is None or not isinstance(content_index, int):
return None
return (item_id, content_index)
def _first_string(event: Any, *field_names: str) -> str | None:
for name in field_names:
value = getattr(event, name, None)
if isinstance(value, str):
return value
return None
def _extract_incomplete_finish_reason(event: Any) -> str:
response = getattr(event, "response", None)
response_dict = _as_dict(response)
incomplete_details = response_dict.get("incomplete_details")
details_dict = _as_dict(incomplete_details)
reason = _as_str(details_dict.get("reason"))
if reason == "max_output_tokens":
return "length"
if reason == "content_filter":
return "content_filter"
return "stop"
def _tool_call_delta_from_event(
event: Any,
*,
event_type: str,
state: _ResponseStreamState,
) -> CoreChunk | None:
item_id = _as_str(getattr(event, "item_id", None))
if item_id is None:
return None
index = _get_tool_call_index(item_id, state)
if event_type in {
"response.function_call_arguments.delta",
"response.custom_tool_call_input.delta",
}:
if item_id in state.tool_call_finalized:
return None
arguments_delta = _as_str(getattr(event, "delta", None))
if not arguments_delta:
return None
function_name = state.tool_call_names.get(item_id)
if function_name is None:
state.pending_tool_arguments[item_id] = (
state.pending_tool_arguments.get(item_id, "") + arguments_delta
)
return None
state.emitted_tool_arguments[item_id] = (
state.emitted_tool_arguments.get(item_id, "") + arguments_delta
)
return _build_tool_call_chunk(
item_id=item_id,
index=index,
name=function_name,
arguments=arguments_delta,
)
if event_type in {
"response.function_call_arguments.done",
"response.custom_tool_call_input.done",
}:
name = _as_str(getattr(event, "name", None))
arguments = _as_str(getattr(event, "arguments", None)) or _as_str(
getattr(event, "input", None)
)
if item_id in state.tool_call_finalized:
return None
if name:
state.tool_call_names[item_id] = name
function_name = state.tool_call_names.get(item_id)
if function_name is None:
return None
buffered = state.pending_tool_arguments.pop(item_id, "")
emitted = state.emitted_tool_arguments.pop(item_id, "")
done_arguments = _resolve_done_tool_arguments(
arguments=arguments,
buffered=buffered,
emitted=emitted,
)
state.tool_call_finalized.add(item_id)
if done_arguments is None:
return None
return _build_tool_call_chunk(
item_id=item_id,
index=index,
name=function_name,
arguments=done_arguments,
)
return None
def _tool_call_delta_from_item(
item: dict[str, Any],
*,
state: _ResponseStreamState,
) -> CoreChunk | None:
if _as_str(item.get("type")) != "function_call":
return None
item_id = _as_str(item.get("id"))
name = _as_str(item.get("name"))
if item_id is None or name is None:
return None
if item_id in state.tool_call_finalized:
return None
index = _get_tool_call_index(item_id, state)
state.tool_call_names[item_id] = name
arguments = _as_str(item.get("arguments"))
buffered = state.pending_tool_arguments.pop(item_id, "")
emitted = state.emitted_tool_arguments.pop(item_id, "")
function_arguments = _resolve_done_tool_arguments(
arguments=arguments,
buffered=buffered,
emitted=emitted,
)
if function_arguments is None:
return None
state.tool_call_finalized.add(item_id)
return _build_tool_call_chunk(
item_id=item_id,
index=index,
name=name,
arguments=function_arguments,
)
def _get_tool_call_index(item_id: str, state: _ResponseStreamState) -> int:
index = state.tool_call_indexes.get(item_id)
if index is not None:
return index
index = state.next_tool_call_index
state.tool_call_indexes[item_id] = index
state.next_tool_call_index += 1
return index
def _build_tool_call_chunk(
*, item_id: str, index: int, name: str, arguments: str
) -> CoreChunk:
return CoreChunk(
tool_calls=[
{
"index": index,
"id": item_id,
"type": "function",
"function": {"name": name, "arguments": arguments},
}
]
)
def _tool_arguments_tail(arguments: str | None, emitted: str) -> str:
if arguments is None:
return ""
if not emitted:
return arguments
if arguments.startswith(emitted):
return arguments[len(emitted) :]
return ""
def _resolve_done_tool_arguments(
*, arguments: str | None, buffered: str, emitted: str
) -> str | None:
if buffered:
if arguments is None:
return buffered
return arguments
if arguments is None:
return None if emitted else ""
if not emitted:
return arguments
tail = _tool_arguments_tail(arguments, emitted)
return tail or None

View file

@ -0,0 +1,656 @@
"""Translate chat-completions requests to OpenAI Responses create params."""
from __future__ import annotations
import json
from typing import Any, cast
from openai.types.responses.easy_input_message_param import EasyInputMessageParam
from openai.types.responses.response_create_params import (
ResponseCreateParamsStreaming,
ToolChoice,
)
from openai.types.responses.response_format_text_config_param import (
ResponseFormatTextConfigParam,
)
from openai.types.responses.response_function_tool_call_param import (
ResponseFunctionToolCallParam,
)
from openai.types.responses.response_function_call_output_item_list_param import (
ResponseFunctionCallOutputItemListParam,
)
from openai.types.responses.response_input_content_param import (
ResponseInputContentParam,
)
from openai.types.responses.response_input_param import (
FunctionCallOutput,
ResponseInputItemParam,
ResponseInputParam,
)
from openai.types.responses.response_text_config_param import ResponseTextConfigParam
from openai.types.responses.tool_param import ToolParam
from openai.types.shared_params.reasoning import Reasoning
from app.core.types import CoreMessage, ProviderChatRequest
def build_responses_create_args(
request: ProviderChatRequest,
) -> tuple[ResponseCreateParamsStreaming, dict[str, Any]]:
messages = list(request.messages)
instructions = _pop_instruction_message(messages)
args: ResponseCreateParamsStreaming = {
"model": request.model,
"input": _build_input_items(messages),
"stream": True,
"store": False,
"include": ["reasoning.encrypted_content"],
"parallel_tool_calls": request.parallel_tool_calls
if request.parallel_tool_calls is not None
else True,
}
if instructions is not None:
args["instructions"] = instructions
if request.metadata is not None:
args["metadata"] = request.metadata
if request.prompt_cache_retention is not None:
args["prompt_cache_retention"] = cast(Any, request.prompt_cache_retention)
if request.safety_identifier is not None:
args["safety_identifier"] = request.safety_identifier
if request.service_tier is not None:
args["service_tier"] = cast(Any, request.service_tier)
if request.top_p is not None:
args["top_p"] = request.top_p
if request.tools is not None:
args["tools"] = _build_tools(request.tools)
if request.tool_choice is not None:
args["tool_choice"] = _build_tool_choice(request.tool_choice)
reasoning = _build_reasoning(request)
if reasoning is not None:
args["reasoning"] = reasoning
text_config = _build_text_config(request)
if text_config is not None:
args["text"] = text_config
return args, _build_extra_body(request)
def _build_extra_body(request: ProviderChatRequest) -> dict[str, Any]:
extra_body = dict(request.extra)
if request.provider is not None:
extra_body["provider"] = request.provider
if request.plugins is not None:
extra_body["plugins"] = request.plugins
if request.session_id is not None:
extra_body["session_id"] = request.session_id
if request.trace is not None:
extra_body["trace"] = request.trace
if request.models is not None:
extra_body["models"] = request.models
if request.debug is not None:
extra_body["debug"] = request.debug
if request.image_config is not None:
extra_body["image_config"] = request.image_config
for key in (
"metadata",
"prompt_cache_retention",
"safety_identifier",
"service_tier",
"top_p",
"tools",
"tool_choice",
"reasoning",
"response_format",
"verbosity",
):
extra_body.pop(key, None)
return extra_body
def _pop_instruction_message(messages: list[CoreMessage]) -> str:
for index, message in enumerate(messages):
if message.role not in {"developer", "system"}:
continue
text = _extract_text(message.content)
if text:
messages.pop(index)
return text
return "You are a helpful assistant."
def _build_input_items(messages: list[CoreMessage]) -> ResponseInputParam:
items: list[ResponseInputItemParam] = []
for message in messages:
explicit_type = _as_str(message.extra.get("type"))
if explicit_type is not None and explicit_type != "message":
items.append(_build_explicit_item(message, explicit_type))
continue
if message.role in {"developer", "system"}:
system_message = _build_message_item(message, role_override="system")
if system_message is not None:
items.append(system_message)
continue
if message.role in {"user", "assistant"}:
chat_message = _build_message_item(message)
if chat_message is not None:
items.append(chat_message)
if message.role == "assistant":
items.extend(_build_function_call_items(message))
continue
if message.role in {"tool", "function"}:
items.append(_build_function_output(message))
continue
raise ValueError(f"Unsupported message role: {message.role}")
return items
def _build_message_item(
message: CoreMessage, role_override: str | None = None
) -> EasyInputMessageParam | None:
role_value = role_override or message.role
content = _build_message_content(message.content, role_value)
if content is None:
return None
role = cast("EasyInputMessageParam", {"role": role_value})["role"]
item: EasyInputMessageParam = {
"type": "message",
"role": role,
"content": cast(Any, content),
}
return item
def _build_message_content(
content: Any,
role: str,
) -> list[dict[str, Any]] | None:
if content is None or content == "":
return None
if isinstance(content, str):
text_part_type = "output_text" if role == "assistant" else "input_text"
return [{"type": text_part_type, "text": content}]
if isinstance(content, dict):
content = [content]
if not isinstance(content, list):
raise ValueError("Unsupported message content for responses input")
out: list[dict[str, Any]] = []
for part in content:
if not isinstance(part, dict):
continue
part_type = part.get("type")
cache_control = _extract_cache_control(part)
if part_type == "text" and isinstance(part.get("text"), str):
text_part_type = "output_text" if role == "assistant" else "input_text"
text_item: dict[str, Any] = {"type": text_part_type, "text": part["text"]}
if cache_control is not None:
text_item["cache_control"] = cache_control
out.append(text_item)
continue
if part_type in {"input_text", "output_text"} and isinstance(
part.get("text"), str
):
text_item = {"type": part_type, "text": part["text"]}
if cache_control is not None:
text_item["cache_control"] = cache_control
out.append(text_item)
continue
if part_type == "refusal" and isinstance(part.get("refusal"), str):
refusal_item: dict[str, Any] = {
"type": "input_text",
"text": part["refusal"],
}
if cache_control is not None:
refusal_item["cache_control"] = cache_control
out.append(refusal_item)
continue
if part_type == "image_url" and isinstance(part.get("image_url"), dict):
image = part["image_url"]
image_item: dict[str, Any] = {"type": "input_image", "detail": "auto"}
if isinstance(image.get("url"), str):
image_item["image_url"] = image["url"]
if image.get("detail") in {"low", "high", "auto"}:
image_item["detail"] = image["detail"]
if cache_control is not None:
image_item["cache_control"] = cache_control
out.append(image_item)
continue
if part_type == "input_image":
image_item = {
"type": "input_image",
"image_url": part.get("image_url"),
"file_id": part.get("file_id"),
"detail": part.get("detail")
if part.get("detail") in {"low", "high", "auto"}
else "auto",
}
if cache_control is not None:
image_item["cache_control"] = cache_control
out.append({k: v for k, v in image_item.items() if v is not None})
continue
if part_type == "input_audio" and isinstance(part.get("input_audio"), dict):
audio = part["input_audio"]
audio_format = audio.get("format")
if isinstance(audio.get("data"), str) and audio_format in {
"wav",
"mp3",
"flac",
"m4a",
"ogg",
"aiff",
"aac",
"pcm16",
"pcm24",
}:
audio_item: dict[str, Any] = {
"type": "input_audio",
"input_audio": {
"data": audio["data"],
"format": audio_format,
},
}
if cache_control is not None:
audio_item["cache_control"] = cache_control
out.append(audio_item)
continue
if part_type == "file" and isinstance(part.get("file"), dict):
wrapped = part["file"]
file_item: dict[str, Any] = {"type": "input_file"}
if isinstance(wrapped.get("file_data"), str):
file_item["file_data"] = wrapped["file_data"]
if isinstance(wrapped.get("file_id"), str):
file_item["file_id"] = wrapped["file_id"]
if isinstance(wrapped.get("filename"), str):
file_item["filename"] = wrapped["filename"]
if isinstance(wrapped.get("file_url"), str):
file_item["file_url"] = wrapped["file_url"]
if cache_control is not None:
file_item["cache_control"] = cache_control
out.append(file_item)
continue
if part_type == "input_file":
file_item = {
"type": "input_file",
"file_data": part.get("file_data"),
"file_id": part.get("file_id"),
"filename": part.get("filename"),
"file_url": part.get("file_url"),
}
if cache_control is not None:
file_item["cache_control"] = cache_control
out.append({k: v for k, v in file_item.items() if v is not None})
continue
if part_type in {"video_url", "input_video"} and isinstance(
part.get("video_url"), dict
):
video = part["video_url"]
if isinstance(video.get("url"), str):
video_item: dict[str, Any] = {
"type": "input_file",
"file_url": video["url"],
}
if cache_control is not None:
video_item["cache_control"] = cache_control
out.append(video_item)
continue
if out:
return out
return None
def _build_function_call_items(
message: CoreMessage,
) -> list[ResponseFunctionToolCallParam]:
items: list[ResponseFunctionToolCallParam] = []
if message.tool_calls:
for tool_call in message.tool_calls:
if not isinstance(tool_call, dict) or tool_call.get("type") != "function":
continue
function = tool_call.get("function")
if not isinstance(function, dict):
continue
call_id = _as_str(tool_call.get("id"))
name = _as_str(function.get("name"))
if call_id is None or name is None:
continue
items.append(
{
"type": "function_call",
"call_id": call_id,
"name": name,
"arguments": _json_string(function.get("arguments")),
}
)
if message.function_call is not None and isinstance(message.function_call, dict):
name = _as_str(message.function_call.get("name"))
if name is not None:
call_id = (
_as_str(message.function_call.get("id"))
or _as_str(message.extra.get("call_id"))
or message.name
or name
)
items.append(
{
"type": "function_call",
"call_id": call_id,
"name": name,
"arguments": _json_string(message.function_call.get("arguments")),
}
)
return items
def _build_function_output(message: CoreMessage) -> FunctionCallOutput:
call_id = (
message.tool_call_id
or _as_str(message.extra.get("call_id"))
or message.name
or _as_str(message.extra.get("name"))
)
if call_id is None:
raise ValueError("tool/function message requires call_id")
return {
"type": "function_call_output",
"call_id": call_id,
"output": cast(Any, _normalize_tool_output(message.content)),
}
def _build_explicit_item(
message: CoreMessage, item_type: str
) -> ResponseInputItemParam:
if item_type == "item_reference":
ref_item: dict[str, str] = {
"type": "item_reference",
"id": _require_str(message.extra.get("id"), "item_reference requires id"),
}
return cast("ResponseInputItemParam", ref_item)
if item_type in {"function_call_output", "custom_tool_call_output"}:
call_id = (
message.tool_call_id
or _as_str(message.extra.get("call_id"))
or message.name
or _as_str(message.extra.get("name"))
)
if call_id is None:
raise ValueError(f"{item_type} requires call_id")
output_item: dict[str, Any] = {
"type": item_type,
"call_id": call_id,
"output": _normalize_tool_output(message.content),
}
for key, value in message.extra.items():
if key != "type" and key not in output_item:
output_item[key] = value
return cast("ResponseInputItemParam", output_item)
if item_type == "function_call":
name = _as_str(message.extra.get("name")) or message.name
if name is None:
raise ValueError("function_call requires name")
call_id = (
message.tool_call_id
or _as_str(message.extra.get("call_id"))
or _as_str(message.extra.get("id"))
or name
)
function_call_item: dict[str, Any] = {
"type": "function_call",
"call_id": call_id,
"name": name,
"arguments": _json_string(message.content),
}
for key, value in message.extra.items():
if key != "type" and key not in function_call_item:
function_call_item[key] = value
return cast("ResponseInputItemParam", function_call_item)
raise ValueError(f"Unsupported explicit item type: {item_type}")
def _normalize_tool_output(
content: Any,
) -> str | ResponseFunctionCallOutputItemListParam:
if isinstance(content, str):
return content
if isinstance(content, list):
converted = _build_message_content(content, "tool")
if converted is None:
return ""
return cast("ResponseFunctionCallOutputItemListParam", converted)
if content is None:
return ""
return json.dumps(content, ensure_ascii=True)
def _build_reasoning(request: ProviderChatRequest) -> Reasoning | None:
effort = request.reasoning_effort
summary = request.reasoning_summary
if effort is None and isinstance(request.reasoning, dict):
reasoning_effort = request.reasoning.get("effort")
if isinstance(reasoning_effort, str):
effort = reasoning_effort
if summary is None and isinstance(request.reasoning, dict):
reasoning_summary = request.reasoning.get("summary")
if isinstance(reasoning_summary, str):
summary = reasoning_summary
if summary is None:
if request.verbosity == "low":
summary = "concise"
elif request.verbosity in {"medium", "high"}:
summary = "detailed"
if effort is None and summary is None:
return None
reasoning: Reasoning = {}
if effort is not None:
reasoning["effort"] = cast(Any, effort)
if summary is not None:
reasoning["summary"] = cast(Any, summary)
return reasoning
def _extract_cache_control(part: dict[str, Any]) -> dict[str, Any] | None:
cache_control = part.get("cache_control")
if not isinstance(cache_control, dict):
return None
cache_type = cache_control.get("type")
if cache_type != "ephemeral":
return None
out: dict[str, Any] = {"type": "ephemeral"}
ttl = cache_control.get("ttl")
if ttl in {"5m", "1h"}:
out["ttl"] = ttl
return out
def _build_text_config(request: ProviderChatRequest) -> ResponseTextConfigParam | None:
if request.response_format is None and request.verbosity is None:
return None
text_config: ResponseTextConfigParam = {}
if request.response_format is not None:
text_config["format"] = _convert_response_format(request.response_format)
if request.verbosity is not None:
text_config["verbosity"] = cast(Any, request.verbosity)
return text_config
def _convert_response_format(
response_format: dict[str, Any],
) -> ResponseFormatTextConfigParam:
fmt_type = _as_str(response_format.get("type"))
if fmt_type == "json_schema" and isinstance(
response_format.get("json_schema"), dict
):
schema = cast("dict[str, Any]", response_format["json_schema"])
out: dict[str, Any] = {"type": "json_schema"}
if isinstance(schema.get("name"), str):
out["name"] = schema["name"]
if isinstance(schema.get("description"), str):
out["description"] = schema["description"]
if isinstance(schema.get("schema"), dict):
out["schema"] = schema["schema"]
if isinstance(schema.get("strict"), bool):
out["strict"] = schema["strict"]
return cast("ResponseFormatTextConfigParam", out)
return cast("ResponseFormatTextConfigParam", response_format)
def _build_tools(tools: list[dict[str, Any]]) -> list[ToolParam]:
out: list[ToolParam] = []
for tool in tools:
if not isinstance(tool, dict):
continue
tool_type = _as_str(tool.get("type"))
if tool_type == "function" and isinstance(tool.get("function"), dict):
function = cast("dict[str, Any]", tool["function"])
name = _as_str(function.get("name"))
if name is None:
continue
mapped: dict[str, Any] = {
"type": "function",
"name": name,
"parameters": function.get("parameters")
if isinstance(function.get("parameters"), dict)
else None,
"strict": function.get("strict")
if isinstance(function.get("strict"), bool)
else False,
}
if isinstance(function.get("description"), str):
mapped["description"] = function["description"]
out.append(cast("ToolParam", mapped))
continue
if tool_type == "custom" and isinstance(tool.get("custom"), dict):
custom = cast("dict[str, Any]", tool["custom"])
name = _as_str(custom.get("name"))
if name is None:
continue
mapped_custom: dict[str, Any] = {
"type": "custom",
"name": name,
}
if isinstance(custom.get("description"), str):
mapped_custom["description"] = custom["description"]
if isinstance(custom.get("format"), dict):
mapped_custom["format"] = custom["format"]
out.append(cast("ToolParam", mapped_custom))
continue
out.append(cast("ToolParam", tool))
return out
def _build_tool_choice(choice: str | dict[str, Any]) -> ToolChoice:
if isinstance(choice, str):
return cast("ToolChoice", choice)
if not isinstance(choice, dict):
return "auto"
choice_type = _as_str(choice.get("type"))
if choice_type == "function" and isinstance(choice.get("function"), dict):
name = _as_str(choice["function"].get("name"))
if name is not None:
return {"type": "function", "name": name}
if choice_type == "custom" and isinstance(choice.get("custom"), dict):
name = _as_str(choice["custom"].get("name"))
if name is not None:
return {"type": "custom", "name": name}
if choice_type == "allowed_tools" and isinstance(choice.get("allowed_tools"), dict):
allowed_tools = choice["allowed_tools"]
mode = allowed_tools.get("mode")
tools = allowed_tools.get("tools")
if mode in {"auto", "required"} and isinstance(tools, list):
return {
"type": "allowed_tools",
"mode": mode,
"tools": [
cast("dict[str, object]", tool)
for tool in tools
if isinstance(tool, dict)
],
}
return cast("ToolChoice", choice)
def _extract_text(content: Any) -> str:
if isinstance(content, str):
return content
if not isinstance(content, list):
return ""
parts: list[str] = []
for part in content:
if not isinstance(part, dict):
continue
if part.get("type") == "text" and isinstance(part.get("text"), str):
parts.append(part["text"])
return "\n".join(parts)
def _json_string(value: Any) -> str:
if isinstance(value, str):
return value
if value is None:
return "{}"
return json.dumps(value, ensure_ascii=True)
def _as_str(value: Any) -> str | None:
return value if isinstance(value, str) else None
def _require_str(value: Any, error_message: str) -> str:
if isinstance(value, str):
return value
raise ValueError(error_message)

View file

@ -0,0 +1,36 @@
"""Shared helpers for codex responses provider."""
from __future__ import annotations
import logging
from typing import Any
logger = logging.getLogger(__name__)
def _log_ignored_extra(extra: dict[str, Any], *, provider_name: str) -> None:
if not extra:
return
logger.error(
"provider '%s' ignored unsupported extra params: %s",
provider_name,
extra,
)
def _to_dict(raw: Any) -> dict[str, Any]:
if hasattr(raw, "model_dump"):
dumped = raw.model_dump()
if isinstance(dumped, dict):
return dumped
if isinstance(raw, dict):
return raw
return {}
def _as_dict(raw: Any) -> dict[str, Any]:
return _to_dict(raw)
def _as_str(value: Any) -> str | None:
return value if isinstance(value, str) else None

21
app/providers/factory.py Normal file
View file

@ -0,0 +1,21 @@
"""Provider construction from loaded config."""
from __future__ import annotations
from app.config.models import LoadedConfig, LoadedProviderConfig
from app.providers.base import BaseProvider
import app.providers.codex_responses.provider # noqa: F401
import app.providers.openai_completions.provider # noqa: F401
from app.providers.registry import get_provider_class
def build_provider_registry(config: LoadedConfig) -> dict[str, BaseProvider]:
registry: dict[str, BaseProvider] = {}
for provider_name, provider_config in config.providers.items():
registry[provider_name] = _build_provider(provider_config)
return registry
def _build_provider(provider: LoadedProviderConfig) -> BaseProvider:
provider_cls = get_provider_class(provider.type)
return provider_cls.from_config(provider)

View file

@ -0,0 +1,5 @@
"""OpenAI-completions provider package."""
from app.providers.openai_completions.provider import OpenAICompletionsProvider
__all__ = ["OpenAICompletionsProvider"]

View file

@ -0,0 +1,436 @@
"""OpenAI-compatible chat completions provider."""
from __future__ import annotations
from collections.abc import AsyncIterator
import logging
from typing import Any, Protocol, cast
from openai import AsyncOpenAI, OpenAIError
from app.config.models import LoadedProviderConfig, TokenAuth, UrlAuth
from app.core.errors import UpstreamProviderError
from app.core.types import CoreChunk, CoreMessage, ProviderChatRequest, ProviderModel
from app.providers.base import BaseProvider
from app.providers.token_url_auth import TokenUrlAuthProvider
from app.providers.registry import provider
logger = logging.getLogger(__name__)
class _BearerAuthProvider(Protocol):
async def get_headers(self) -> dict[str, str]: ...
class _StaticBearerAuthProvider:
def __init__(self, token: str) -> None:
self._token = token
async def get_headers(self) -> dict[str, str]:
return {"Authorization": f"Bearer {self._token}"}
@provider(type="openai-completions")
class OpenAICompletionsProvider(BaseProvider):
"""Provider that talks to OpenAI-compatible /chat/completions APIs."""
def __init__(
self,
*,
name: str,
base_url: str,
token: str | None = None,
whitelist: list[str] | None = None,
blacklist: list[str] | None = None,
auth_provider: _BearerAuthProvider | None = None,
client: Any | None = None,
) -> None:
super().__init__(
name=name,
base_url=base_url,
api_type="openai-completions",
whitelist=whitelist,
blacklist=blacklist,
)
if auth_provider is None:
if token is None:
raise ValueError(
f"Provider '{name}' type 'openai-completions' requires auth provider or token"
)
auth_provider = _StaticBearerAuthProvider(token)
self._auth_provider = auth_provider
self._client = client or AsyncOpenAI(api_key="placeholder", base_url=base_url)
@classmethod
def from_config(cls, config: LoadedProviderConfig) -> BaseProvider:
auth_provider: _BearerAuthProvider
if isinstance(config.auth, TokenAuth):
auth_provider = _StaticBearerAuthProvider(config.auth.token)
elif isinstance(config.auth, UrlAuth):
auth_provider = TokenUrlAuthProvider(token_url=config.auth.url)
else:
raise ValueError(
f"Provider '{config.name}' type 'openai-completions' requires token or url auth"
)
provider = cls(
name=config.name,
base_url=config.url,
whitelist=config.whitelist,
blacklist=config.blacklist,
auth_provider=auth_provider,
)
provider.display_name = config.display_name
provider.models_config = config.models
return provider
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
try:
auth_headers = await self._auth_provider.get_headers()
extra_body, ignored_extra = _build_openai_extra_payload(request)
sent_assistant_role = False
stream = await cast(Any, self._client.chat.completions).create(
model=request.model,
messages=[_to_chat_message(m) for m in request.messages],
stream=True,
extra_headers=auth_headers,
extra_body=extra_body,
audio=request.audio,
frequency_penalty=request.frequency_penalty,
logit_bias=request.logit_bias,
logprobs=request.logprobs,
max_completion_tokens=request.max_completion_tokens,
max_tokens=request.max_tokens,
metadata=request.metadata,
modalities=request.modalities,
n=request.n,
parallel_tool_calls=request.parallel_tool_calls,
prediction=request.prediction,
presence_penalty=request.presence_penalty,
prompt_cache_key=request.prompt_cache_key,
prompt_cache_retention=request.prompt_cache_retention,
reasoning_effort=request.reasoning_effort,
response_format=request.response_format,
safety_identifier=request.safety_identifier,
seed=request.seed,
service_tier=request.service_tier,
stop=request.stop,
store=request.store,
stream_options=request.stream_options,
temperature=request.temperature,
tool_choice=request.tool_choice,
tools=request.tools,
top_logprobs=request.top_logprobs,
top_p=request.top_p,
user=request.user,
verbosity=request.verbosity,
web_search_options=request.web_search_options,
)
_log_ignored_extra(ignored_extra, provider_name=self.name)
async for chunk in stream:
choices = getattr(chunk, "choices", [])
for idx, choice in enumerate(choices):
delta = getattr(choice, "delta", None)
role = _delta_str(delta, "role")
content = _delta_str(delta, "content")
reasoning_content = _extract_reasoning_content(delta)
reasoning_details = _extract_reasoning_details(delta)
tool_calls = _extract_tool_calls(delta)
finish_reason = getattr(choice, "finish_reason", None)
choice_index = getattr(choice, "index", idx)
if role == "assistant":
sent_assistant_role = True
elif (
not sent_assistant_role
and role is None
and (
content is not None
or reasoning_content is not None
or reasoning_details is not None
or tool_calls is not None
)
):
sent_assistant_role = True
yield CoreChunk(index=choice_index, role="assistant")
if (
role is None
and content is None
and reasoning_content is None
and reasoning_details is None
and tool_calls is None
and finish_reason is None
):
continue
yield CoreChunk(
index=choice_index,
role=role,
content=content,
reasoning_content=reasoning_content,
reasoning_details=reasoning_details,
tool_calls=tool_calls,
finish_reason=finish_reason,
)
except OpenAIError as exc:
raise UpstreamProviderError(
f"Provider '{self.name}' upstream request failed: {exc}"
) from exc
except Exception as exc:
raise UpstreamProviderError(
f"Provider '{self.name}' failed while streaming: {exc}"
) from exc
async def list_models(self) -> list[ProviderModel]:
try:
auth_headers = await self._auth_provider.get_headers()
response = await self._client.models.list(extra_headers=auth_headers)
items = _coerce_model_items(response)
return [_to_provider_model(item) for item in items if item.get("id")]
except OpenAIError as exc:
raise UpstreamProviderError(
f"Provider '{self.name}' models request failed: {exc}"
) from exc
except Exception as exc:
raise UpstreamProviderError(
f"Provider '{self.name}' failed while listing models: {exc}"
) from exc
def _to_chat_message(message: CoreMessage) -> dict[str, Any]:
out: dict[str, Any] = {"role": message.role}
if message.content is not None:
out["content"] = message.content
if message.name is not None:
out["name"] = message.name
if message.tool_call_id is not None:
out["tool_call_id"] = message.tool_call_id
if message.audio is not None:
out["audio"] = message.audio
if message.refusal is not None:
out["refusal"] = message.refusal
if message.tool_calls is not None:
out["tool_calls"] = message.tool_calls
if message.function_call is not None:
out["function_call"] = message.function_call
if message.reasoning_content is not None:
out["reasoning_content"] = message.reasoning_content
return out
def _delta_value(delta: Any, field: str) -> Any:
if delta is None:
return None
if isinstance(delta, dict):
return delta.get(field)
return getattr(delta, field, None)
def _delta_str(delta: Any, field: str) -> str | None:
value = _delta_value(delta, field)
return value if isinstance(value, str) else None
def _extract_reasoning_content(delta: Any) -> str | None:
for field in ("reasoning_content", "reasoning", "reasoning_text"):
value = _delta_value(delta, field)
if isinstance(value, str):
return value
reasoning_obj = _to_dict(_delta_value(delta, "reasoning"))
for field in ("content", "text", "summary"):
value = reasoning_obj.get(field)
if isinstance(value, str):
return value
return None
def _extract_tool_calls(delta: Any) -> list[dict[str, Any]] | None:
raw = _delta_value(delta, "tool_calls")
if not isinstance(raw, list):
return None
tool_calls = [_to_dict(item) for item in raw]
return tool_calls or None
def _extract_reasoning_details(delta: Any) -> list[dict[str, Any]] | None:
candidates: list[Any] = []
raw = _delta_value(delta, "reasoning_details")
if isinstance(raw, list):
candidates.extend(raw)
reasoning_obj = _to_dict(_delta_value(delta, "reasoning"))
nested = reasoning_obj.get("details")
if isinstance(nested, list):
candidates.extend(nested)
details: list[dict[str, Any]] = []
for candidate in candidates:
detail = _to_dict(candidate)
if detail:
details.append(detail)
return details or None
def _build_openai_extra_payload(
request: ProviderChatRequest,
) -> tuple[dict[str, Any], dict[str, Any]]:
extra_body: dict[str, Any] = {}
extra = request.extra
provider_value = request.provider or (
extra.get("provider") if isinstance(extra.get("provider"), dict) else None
)
if provider_value is not None:
extra_body["provider"] = provider_value
plugins_value = request.plugins or (
extra.get("plugins") if isinstance(extra.get("plugins"), list) else None
)
if plugins_value is not None:
extra_body["plugins"] = plugins_value
session_id_value = request.session_id or (
extra.get("session_id") if isinstance(extra.get("session_id"), str) else None
)
if session_id_value is not None:
extra_body["session_id"] = session_id_value
trace_value = request.trace or (
extra.get("trace") if isinstance(extra.get("trace"), dict) else None
)
if trace_value is not None:
extra_body["trace"] = trace_value
debug_value = request.debug or (
extra.get("debug") if isinstance(extra.get("debug"), dict) else None
)
if debug_value is not None:
extra_body["debug"] = debug_value
image_config_value = request.image_config or (
extra.get("image_config")
if isinstance(extra.get("image_config"), dict)
else None
)
if image_config_value is not None:
extra_body["image_config"] = image_config_value
models_value = request.models or (
extra.get("models") if isinstance(extra.get("models"), list) else None
)
if models_value is not None:
extra_body["models"] = models_value
reasoning_value = _build_reasoning_payload(request)
if reasoning_value is not None:
extra_body["reasoning"] = reasoning_value
ignored_extra = dict(request.extra)
for key in (
"provider",
"plugins",
"session_id",
"trace",
"models",
"debug",
"image_config",
):
ignored_extra.pop(key, None)
return extra_body, ignored_extra
def _build_reasoning_payload(request: ProviderChatRequest) -> dict[str, Any] | None:
effort = request.reasoning_effort
summary = request.reasoning_summary
reasoning = request.reasoning
if isinstance(reasoning, dict):
if effort is None and isinstance(reasoning.get("effort"), str):
effort = reasoning["effort"]
if summary is None and isinstance(reasoning.get("summary"), str):
summary = reasoning["summary"]
if effort is None and summary is None:
return None
out: dict[str, Any] = {}
if effort is not None:
out["effort"] = effort
if summary is not None:
out["summary"] = summary
return out
def _log_ignored_extra(extra: dict[str, Any], *, provider_name: str) -> None:
if not extra:
return
logger.error(
"provider '%s' ignored unsupported extra params: %s",
provider_name,
extra,
)
def _coerce_model_items(raw: Any) -> list[dict[str, Any]]:
data = getattr(raw, "data", None)
if isinstance(data, list):
return [_to_dict(item) for item in data]
if isinstance(raw, list):
return [_to_dict(item) for item in raw]
return []
def _to_dict(raw: Any) -> dict[str, Any]:
if hasattr(raw, "model_dump"):
dumped = raw.model_dump()
if isinstance(dumped, dict):
return dumped
if isinstance(raw, dict):
return raw
return {}
def _to_provider_model(raw: dict[str, Any]) -> ProviderModel:
arch = raw.get("architecture")
if isinstance(arch, dict):
arch = {k: v for k, v in arch.items() if k != "tokenizer"}
else:
arch = None
supported = raw.get("supported_parameters")
supported_parameters = None
if isinstance(supported, list):
supported_parameters = [str(v) for v in supported]
context_length = raw.get("context_length")
if not isinstance(context_length, int):
top_provider = raw.get("top_provider")
if isinstance(top_provider, dict) and isinstance(
top_provider.get("context_length"), int
):
context_length = top_provider.get("context_length")
else:
context_length = None
return ProviderModel(
id=str(raw["id"]),
name=raw.get("name") if isinstance(raw.get("name"), str) else None,
description=raw.get("description")
if isinstance(raw.get("description"), str)
else None,
context_length=context_length,
architecture=arch,
pricing=raw.get("pricing") if isinstance(raw.get("pricing"), dict) else None,
supported_parameters=supported_parameters,
settings=raw.get("settings") if isinstance(raw.get("settings"), dict) else None,
opencode=raw.get("opencode") if isinstance(raw.get("opencode"), dict) else None,
created=raw.get("created") if isinstance(raw.get("created"), int) else None,
owned_by=raw.get("owned_by") if isinstance(raw.get("owned_by"), str) else None,
)

34
app/providers/registry.py Normal file
View file

@ -0,0 +1,34 @@
"""Provider class registry with decorator-based registration."""
from __future__ import annotations
import builtins
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from app.config.models import ProviderType
from app.providers.base import BaseProvider
_PROVIDER_REGISTRY: dict[str, builtins.type["BaseProvider"]] = {}
def provider(*, type: "ProviderType"):
"""Register provider class for a provider type string."""
def decorator(
cls: builtins.type["BaseProvider"],
) -> builtins.type["BaseProvider"]:
if type in _PROVIDER_REGISTRY:
raise ValueError(f"Provider type '{type}' is already registered")
_PROVIDER_REGISTRY[type] = cls
return cls
return decorator
def get_provider_class(provider_type: str) -> builtins.type["BaseProvider"]:
provider_cls = _PROVIDER_REGISTRY.get(provider_type)
if provider_cls is None:
raise ValueError(f"Unsupported provider type: {provider_type}")
return provider_cls

View file

@ -0,0 +1,36 @@
"""Auth helper for fetching bearer token from external URL."""
from __future__ import annotations
import httpx
class TokenUrlAuthProvider:
"""Fetches bearer token from URL on each request."""
def __init__(self, *, token_url: str, timeout_seconds: float = 600.0) -> None:
self._token_url = token_url
self._timeout_seconds = timeout_seconds
async def get_headers(self) -> dict[str, str]:
token = await self._fetch_token()
return {"Authorization": f"Bearer {token}"}
async def _fetch_token(self) -> str:
async with httpx.AsyncClient(timeout=self._timeout_seconds) as client:
response = await client.get(self._token_url)
if response.status_code >= 400:
raise ValueError(
f"Token URL auth failed with status {response.status_code}"
)
try:
token = response.json()['token']
except ValueError:
raise ValueError(f"Token URL auth returned invalid response: {response.content}")
if not token:
raise ValueError("Token URL auth returned empty token")
return token

24
config.yml Normal file
View file

@ -0,0 +1,24 @@
providers:
wzray:
url: http://127.0.0.1:8000/v1
type: openai-completions
name: Wzray
models:
openai/gpt-5:
name: GPT-5
zai:
url: https://api.z.ai/api/coding/paas/v4
type: openai-completions
kilocode:
url: https://api.kilo.ai/api/openrouter
type: openai-completions
whitelist:
- minimax/minimax-m2.5:free
opencode:
url: https://opencode.ai/zen/v1
type: openai-completions
blacklist:
- glm-5-free
codex:
url: https://chatgpt.com/backend-api
type: codex-responses

File diff suppressed because it is too large Load diff

8513
docs/Responses schema.md Normal file

File diff suppressed because it is too large Load diff

88
docs/TODO.md Normal file
View file

@ -0,0 +1,88 @@
# Codex Router Alignment Plan
## Confirmed Scope (from latest requirements)
- Keep `parallel_tool_calls: true` in outbound responses payloads.
- Do not send `prompt_cache_key` from the router for now.
- Always send `include: ["reasoning.encrypted_content"]`.
- Header work now: remove self-added duplicate headers only.
- Message payload work now: stop string `content` serialization and send content parts like the good sample.
## Deferred (intentionally postponed)
- Full header parity with the golden capture (transport/runtime-level UA and low-level accept-encoding parity).
- Full one-to-one `input` history shape parity (`type` omission strategy for message items).
- Recovering or synthesizing top-level developer message from upstream chat-completions schema.
- End-to-end reasoning item roundtrip parity in history (`type: reasoning` pass-through and replay behavior).
- Prompt cache implementation strategy and lifecycle management.
## Feasible Path For Deferred Items
1. Header parity
- Keep current sdk-based client for now.
- If exact parity is required, switch codex provider transport from `AsyncOpenAI` to a custom `httpx` SSE client and set an explicit header allowlist.
2. Input history shape parity
- Add a translator mode that emits implicit message items (`{"role":...,"content":...}`) without `type: "message"`.
- Keep explicit item support for `function_call` and `function_call_output` unchanged.
3. Developer message availability
- Add optional request extension field(s) in `model_extra`, e.g. `opencode_developer_message` or `opencode_input_items`.
- Use extension when provided; otherwise keep current first-system/developer-to-instructions behavior.
4. Reasoning item roundtrip
- Accept explicit inbound items with `extra.type == "reasoning"` and pass through `encrypted_content` + `summary` to responses `input`.
- Keep chat-completions output contract unchanged; reasoning passthrough is input-side only unless a dedicated raw endpoint is added.
5. Prompt cache strategy
- Keep disabled by default.
- Add optional feature flag for deterministic hash-based key generation once cache policy is agreed.
## Schema.md Gap Breakdown (planning only, no implementation yet)
### Legend
- `Supported` = already implemented.
- `Partial` = partly implemented but not schema-complete.
- `Missing` = not implemented yet.
| # | Area | What it does for users | Current status | Decision from latest review | Notes / planned behavior |
|---|---|---|---|---|---|
| 1 | Extra request controls (`provider`, `plugins`, `session_id`, `trace`, `models`, `debug`, `image_config`) | Lets users steer upstream routing, observability, plugin behavior, and image/provider-specific behavior directly from request body. | Missing | Explain each field first, then choose individually | Keep pass-through design: accept fields in API schema, preserve in internal request, forward when provider supports. |
| 2 | `reasoning` object in request (`reasoning.effort`, `reasoning.summary`) | Standard schema-compatible way to request reasoning effort and summary verbosity. | Partial (we use flat `reasoning_effort` / `reasoning_summary`) | Must support | Add canonical `reasoning` object support while preserving backward compatibility with current flat aliases. Define precedence rules if both forms are provided. |
| 3 | `modalities` alignment (`text`/`image`) | Controls output modalities users request. Must match schema contract exactly. | Partial / mismatched (`text`/`audio` now) | Must support schema behavior | Change request schema and internal mapping to `text`/`image` for the public API; ensure providers receive compatible values. |
| 4 | Full message content parts (audio/video/cache-control variants) | Enables multi-part multimodal inputs (audio, video, richer text metadata) and cache hints on message parts. | Partial | Must support | Expand accepted message content item parsing and translator mapping for all schema item variants, including preservation of unknown-but-valid provider fields where safe. |
| 5 | Assistant response extensions (`reasoning`, `reasoning_details`, `images`) | Returns richer assistant payloads: plain reasoning, structured reasoning metadata, and generated image outputs. | Missing | Must support | Extend response schemas and mappers so these fields can be emitted in non-streaming and streaming-compatible forms. |
| 6 | Encrypted reasoning passthrough (`reasoning_details` with encrypted data) | Exposes encrypted reasoning blocks from upstream exactly as received for advanced clients/debugging/replay. | Missing | High priority, must support | Capture encrypted reasoning items from responses stream (`response.output_item.*` for `type=reasoning`) and surface in API output as raw/structured reasoning details without lossy transformation. |
| 7 | Usage passthrough fidelity | Users should receive full upstream usage payload (raw), not a reduced subset. | Partial | Needed: pass full raw usage through | Do not over-normalize; preserve upstream usage object as-is when available. If upstream omits usage, return `null`/missing naturally. |
| 8 | Detailed HTTP error matrix parity | Strictly maps many status codes exactly like reference schema. | Partial | Not required now | Keep current error strategy unless product requirements change. |
| 9 | Optional `model` when `models` routing is used | OpenRouter-style multi-model router behavior. | Missing | Not required for this project | Keep `model` required in our API for now. |
## Field-by-field reference for item #1 (for product decision)
| Field | User-visible purpose | Typical payload shape | Risk/complexity |
|---|---|---|---|
| `provider` | Control provider routing policy (allow/deny fallback, specific providers, price/perf constraints). | Object with routing knobs (order/only/ignore, pricing, latency/throughput prefs). | Medium-High (router semantics + validation + provider compatibility). |
| `plugins` | Enable optional behavior modules (web search/moderation/auto-router/etc). | Array of plugin descriptors with `id` and optional settings. | Medium (validation + pass-through + provider-specific effects). |
| `session_id` (body) | Group related requests for observability/conversation continuity. | String (usually short opaque id). | Low (mostly passthrough + precedence with headers if both exist). |
| `trace` | Attach tracing metadata for distributed observability. | Object (`trace_id`, `span_name`, etc + custom keys). | Low-Medium (schema + passthrough). |
| `models` | Candidate model set for automatic selection/router behavior. | Array of model identifiers/patterns. | Medium-High (changes model resolution flow). |
| `debug` | Request debug payloads (e.g., transformed upstream request echo in stream). | Object flags like `echo_upstream_body`. | Medium (security/sensitivity review required). |
| `image_config` | Provider/model-specific image generation tuning options. | Arbitrary object map by provider/model conventions. | Medium (loosely-typed passthrough plus safety limits). |
## Execution order when implementation starts (agreed priorities)
1. Encrypted reasoning + reasoning details output path (#6 + #5 core subset).
2. Full usage passthrough fidelity (#7).
3. Request `reasoning` object support (#2).
4. Modalities contract alignment to schema (`text`/`image`) (#3).
5. Message content multimodal expansion (#4).
6. Decide and then implement selected item-#1 controls (`provider/plugins/session_id/trace/models/debug/image_config`).
## Implementation Steps (current)
1. Update codex translator payload fields:
- remove `prompt_cache_key`
- add mandatory `include`
2. Update message content serialization:
- serialize string message content as `[{"type":"input_text","text":...}]`
- preserve empty-content filtering behavior
3. Update codex provider header handling:
- avoid mutating oauth headers in place
- remove self-added duplicate `user-agent` header
4. Update/extend tests for new payload contract.
5. Run full `pytest` and fix regressions until green.

39
opencode/README.md Normal file
View file

@ -0,0 +1,39 @@
# OpenCode Wzray Plugin
Minimal OpenCode plugin that connects to your local API.
## What It Does
- Adds one provider in OpenCode config: `wzray`
- Uses OpenAI-compatible transport
- Fetches models from your API `GET /v1/models`
- Uses `WZRAY_API_KEY` as bearer token when set
## Defaults
- Base URL: `http://127.0.0.1:8000/v1`
- Provider key: `wzray`
- Fallback model list contains one safe model (`openai/gpt-5`)
## Environment Variables
- `WZRAY_API_BASE_URL` (optional)
- `AI_API_BASE_URL` (optional fallback)
- `WZRAY_API_KEY` (optional)
## Install
From this directory:
```bash
chmod +x ./install_opencode.sh
./install_opencode.sh
```
This copies plugin files to:
- `~/.config/opencode/plugin/opencode/`
And ensures `opencode.json` contains:
- `./plugin/opencode/plugin_wzray.ts`

View file

@ -0,0 +1,69 @@
#!/usr/bin/env bash
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
OPENCODE_DIR="${XDG_CONFIG_HOME:-$HOME/.config}/opencode"
PLUGIN_DIR="$OPENCODE_DIR/plugin/opencode"
CONFIG_FILE="$OPENCODE_DIR/opencode.json"
PLUGIN_ENTRY="./plugin/opencode/plugin_wzray.ts"
mkdir -p "$PLUGIN_DIR"
cp "$SCRIPT_DIR/plugin_wzray.ts" "$PLUGIN_DIR/plugin_wzray.ts"
cp "$SCRIPT_DIR/models_wzray.ts" "$PLUGIN_DIR/models_wzray.ts"
if [[ ! -f "$CONFIG_FILE" ]]; then
cat > "$CONFIG_FILE" <<EOF
{
"\$schema": "https://opencode.ai/config.json",
"plugin": [
"$PLUGIN_ENTRY"
]
}
EOF
echo "Installed plugin and created $CONFIG_FILE"
exit 0
fi
RUNTIME=""
if command -v node >/dev/null 2>&1; then
RUNTIME="node"
elif command -v bun >/dev/null 2>&1; then
RUNTIME="bun"
else
echo "Error: node or bun is required to update relaxed opencode.json" >&2
exit 1
fi
"$RUNTIME" - "$CONFIG_FILE" "$PLUGIN_ENTRY" <<'JS'
const fs = require("node:fs");
const configPath = process.argv[2];
const entry = process.argv[3];
const source = fs.readFileSync(configPath, "utf8");
let data;
try {
data = new Function(`"use strict"; return (${source});`)();
} catch (error) {
console.error(`Failed to parse ${configPath}: ${String(error)}`);
process.exit(1);
}
if (!data || typeof data !== "object" || Array.isArray(data)) {
data = {};
}
const plugins = Array.isArray(data.plugin) ? data.plugin : [];
if (!plugins.includes(entry)) {
plugins.push(entry);
}
data.plugin = plugins;
fs.writeFileSync(configPath, `${JSON.stringify(data, null, 2)}\n`, "utf8");
JS
echo "Installed plugin files to $PLUGIN_DIR"
echo "Updated $CONFIG_FILE"
echo "Optional envs: WZRAY_API_BASE_URL (default http://127.0.0.1:8000/v1), WZRAY_API_KEY"

53
opencode/models_wzray.ts Normal file
View file

@ -0,0 +1,53 @@
type ModelInfo = { name: string };
type AvailableModels = Record<string, ModelInfo>;
declare const process: {
env: Record<string, string | undefined>;
};
const FALLBACK_MODELS: AvailableModels = {
"openai/gpt-5.3-codex": { name: "OpenAI: GPT-5.3 Codex" },
};
function getApiKey(): string | undefined {
return process.env.WZRAY_API_KEY;
}
function normalizeBaseUrl(baseUrl: string): string {
const trimmed = baseUrl.replace(/\/+$/, "");
return trimmed.endsWith("/v1") ? trimmed : `${trimmed}/v1`;
}
function createHeaders(): Record<string, string> {
const apiKey = getApiKey();
if (!apiKey) return { "Content-Type": "application/json" };
return {
"Content-Type": "application/json",
Authorization: `Bearer ${apiKey}`,
};
}
export async function getAvailableModels(baseUrl: string): Promise<AvailableModels> {
const modelsUrl = `${normalizeBaseUrl(baseUrl)}/models`;
try {
const response = await fetch(modelsUrl, { headers: createHeaders() });
if (!response.ok) return FALLBACK_MODELS;
const payload = (await response.json()) as {
data?: Array<{ id?: string; name?: string }>;
};
const data = Array.isArray(payload.data) ? payload.data : [];
const models: AvailableModels = {};
for (const item of data) {
if (!item?.id) continue;
models[item.id] = { name: item.name || item.id };
}
return Object.keys(models).length > 0 ? models : FALLBACK_MODELS;
} catch {
return FALLBACK_MODELS;
}
}
export { FALLBACK_MODELS };
export type { AvailableModels, ModelInfo };

55
opencode/plugin_wzray.ts Normal file
View file

@ -0,0 +1,55 @@
// @ts-ignore
import type { Plugin, PluginInput } from "@opencode-ai/plugin";
import { FALLBACK_MODELS, getAvailableModels } from "./models_wzray";
declare const process: {
env: Record<string, string | undefined>;
};
const SCHEMA = "https://opencode.ai/config.json";
const NPM_PACKAGE = "@ai-sdk/openai-compatible";
const PROVIDER_KEY = "wzray";
const PROVIDER_NAME = "AI Router";
const DEFAULT_BASE_URL = "https://ai.wzray.com/v1";
function getBaseUrl(): string {
return (
process.env.WZRAY_API_BASE_URL ||
process.env.AI_API_BASE_URL ||
DEFAULT_BASE_URL
);
}
function getApiKey(): string {
return process.env.WZRAY_API_KEY || "{env:WZRAY_API_KEY}";
}
function createProviderConfig(models: Record<string, { name: string }>) {
return {
schema: SCHEMA,
npm: NPM_PACKAGE,
name: PROVIDER_NAME,
options: {
baseURL: getBaseUrl(),
apiKey: getApiKey(),
},
models,
};
}
async function configure(config: any): Promise<void> {
if (!config.provider) config.provider = {};
if (config.provider[PROVIDER_KEY]) return;
const baseUrl = getBaseUrl();
const models = await getAvailableModels(baseUrl);
config.provider[PROVIDER_KEY] = createProviderConfig(
Object.keys(models).length > 0 ? models : FALLBACK_MODELS,
);
}
const WzrayProviderPlugin: Plugin = async (_input: PluginInput) => {
return { config: configure };
};
export default WzrayProviderPlugin;

21
pyproject.toml Normal file
View file

@ -0,0 +1,21 @@
[project]
name = "ai-router"
version = "0.1.0"
description = "AI model router with OpenAI-compatible API"
requires-python = ">=3.14"
dependencies = [
"fastapi>=0.109.0",
"uvicorn[standard]>=0.27.0",
"pydantic>=2.5.0",
"httpx>=0.26.0",
"pyyaml>=6.0",
"openai>=1.108.0",
]
[dependency-groups]
dev = [
"mypy>=1.19.1",
"pytest>=8.0.0",
"ruff>=0.2.0",
"types-pyyaml>=6.0.12.20250915",
]

8
tests/conftest.py Normal file
View file

@ -0,0 +1,8 @@
from __future__ import annotations
import sys
from pathlib import Path
ROOT = Path(__file__).resolve().parents[1]
if str(ROOT) not in sys.path:
sys.path.insert(0, str(ROOT))

View file

@ -0,0 +1,362 @@
from __future__ import annotations
from collections.abc import AsyncIterator
from fastapi.testclient import TestClient
from app.core.models_dev import ModelsDevCatalog
from app.core.router import RouterCore
from app.core.types import CoreChunk, ProviderChatRequest, ProviderModel
from app.dependencies import get_router_core
from app.main import create_app
from app.providers.base import BaseProvider
class _StreamingProvider(BaseProvider):
def __init__(self) -> None:
super().__init__(
name="kilocode",
base_url="https://api.kilo.ai",
api_type="openai-completions",
)
self.models_seen: list[str] = []
self.last_request: ProviderChatRequest | None = None
@classmethod
def from_config(cls, config):
raise NotImplementedError
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
self.models_seen.append(request.model)
self.last_request = request
yield CoreChunk(role="assistant")
yield CoreChunk(content="Hello")
yield CoreChunk(finish_reason="stop")
async def list_models(self) -> list[ProviderModel]:
return [ProviderModel(id="minimax/minimax-m2.5:free", name="MiniMax")]
class _ReasoningProvider(_StreamingProvider):
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
self.models_seen.append(request.model)
self.last_request = request
yield CoreChunk(role="assistant")
yield CoreChunk(
reasoning_content="**Plan**",
reasoning_details=[
{
"type": "reasoning.encrypted",
"data": "enc_123",
"id": "rs_1",
"format": "openai-responses-v1",
},
{
"type": "reasoning.summary",
"summary": "**Plan**",
"id": "rs_1",
"format": "openai-responses-v1",
},
],
)
yield CoreChunk(content="Hello")
yield CoreChunk(finish_reason="stop")
class _ModelsDevCatalogWithProviderName(ModelsDevCatalog):
def __init__(self) -> None:
super().__init__(fetch_catalog=lambda: _never_called())
async def get_provider_models(
self, *, provider_name: str, provider_url: str
) -> tuple[str | None, dict[str, dict[str, object]]]:
return "Kilo AI", {
"minimax/minimax-m2.5:free": {
"name": "MiniMax",
"release_date": "2026-01-15",
}
}
async def _never_called() -> dict[str, object]:
raise RuntimeError("not expected")
def _parse_sse_data(raw: str) -> list[str]:
out: list[str] = []
for line in raw.splitlines():
if line.startswith("data: "):
out.append(line[6:])
return out
def test_chat_completions_stream_success() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [{"role": "user", "content": "hi"}],
"stream": True,
},
)
assert response.status_code == 200
payloads = _parse_sse_data(response.text)
assert payloads[-1] == "[DONE]"
assert '"content":"Hello"' in payloads[1]
assert provider.models_seen == ["minimax/minimax-m2.5:free"]
def test_chat_completions_unknown_provider_returns_404() -> None:
app = create_app()
core = RouterCore(providers={})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "missing/gpt-4.1",
"messages": [{"role": "user", "content": "hi"}],
"stream": True,
},
)
assert response.status_code == 404
assert response.json()["detail"]["error"]["type"] == "provider_not_found"
def test_chat_completions_non_stream_returns_chat_completion_object() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [{"role": "user", "content": "hi"}],
"stream": False,
},
)
assert response.status_code == 200
body = response.json()
assert body["object"] == "chat.completion"
assert body["choices"][0]["message"]["content"] == "Hello"
def test_chat_completions_non_stream_includes_reasoning_details() -> None:
app = create_app()
provider = _ReasoningProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [{"role": "user", "content": "hi"}],
"stream": False,
},
)
assert response.status_code == 200
body = response.json()
message = body["choices"][0]["message"]
assert message["reasoning"] == "**Plan**"
assert message["reasoning_content"] == "**Plan**"
assert message["reasoning_details"] == [
{
"type": "reasoning.encrypted",
"data": "enc_123",
"id": "rs_1",
"format": "openai-responses-v1",
},
{
"type": "reasoning.summary",
"summary": "**Plan**",
"id": "rs_1",
"format": "openai-responses-v1",
},
]
def test_chat_completions_supports_unversioned_path() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [{"role": "user", "content": "hi"}],
"stream": True,
"reasoning": {"effort": "low", "summary": "auto"},
},
)
assert response.status_code == 200
payloads = _parse_sse_data(response.text)
assert payloads[-1] == "[DONE]"
assert provider.last_request is not None
assert provider.last_request.reasoning_effort == "low"
assert provider.last_request.reasoning_summary == "auto"
def test_chat_completions_accepts_temporary_extra_params() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [
{
"role": "user",
"content": "hi",
"reasoning_content": "temporary",
}
],
"reasoning_summary": "auto",
"stream": False,
},
)
assert response.status_code == 200
def test_chat_completions_accepts_reasoning_summary_camel_alias() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [{"role": "user", "content": "hi"}],
"reasoningSummary": "detailed",
"stream": False,
},
)
assert response.status_code == 200
assert provider.last_request is not None
assert provider.last_request.reasoning_summary == "detailed"
def test_chat_completions_accepts_schema_modalities() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [{"role": "user", "content": "hi"}],
"modalities": ["text", "image"],
"stream": False,
},
)
assert response.status_code == 200
assert provider.last_request is not None
assert provider.last_request.modalities == ["text", "image"]
def test_chat_completions_accepts_schema_router_fields() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.post(
"/v1/chat/completions",
json={
"model": "kilocode/minimax/minimax-m2.5:free",
"messages": [{"role": "user", "content": "hi"}],
"provider": {"allow_fallbacks": False},
"plugins": [{"id": "web", "enabled": True}],
"session_id": "ses_123",
"trace": {"trace_id": "tr_1"},
"models": ["openai/gpt-5"],
"debug": {"echo_upstream_body": True},
"image_config": {"size": "1024x1024"},
"reasoning": {"effort": "high", "summary": "detailed"},
"temperature": 0.1,
"stream": False,
},
)
assert response.status_code == 200
assert provider.last_request is not None
assert provider.last_request.provider == {"allow_fallbacks": False}
assert provider.last_request.plugins == [{"id": "web", "enabled": True}]
assert provider.last_request.session_id == "ses_123"
assert provider.last_request.trace == {"trace_id": "tr_1"}
assert provider.last_request.models == ["openai/gpt-5"]
assert provider.last_request.debug == {"echo_upstream_body": True}
assert provider.last_request.image_config == {"size": "1024x1024"}
assert provider.last_request.reasoning == {"effort": "high", "summary": "detailed"}
assert provider.last_request.temperature == 0.1
def test_models_endpoint_returns_aggregated_models() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(
providers={"kilocode": provider},
models_dev_catalog=_ModelsDevCatalogWithProviderName(),
)
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.get("/models")
assert response.status_code == 200
body = response.json()
assert body["object"] == "list"
assert body["data"][0]["id"] == "kilocode/minimax/minimax-m2.5:free"
assert body["data"][0]["object"] == "model"
assert body["data"][0]["created"] > 0
assert body["data"][0]["owned_by"] == "wzray"
assert body["data"][0]["name"] == "Kilo AI: MiniMax"
def test_models_endpoint_supports_v1_path() -> None:
app = create_app()
provider = _StreamingProvider()
core = RouterCore(providers={"kilocode": provider})
app.dependency_overrides[get_router_core] = lambda: core
client = TestClient(app)
response = client.get("/v1/models")
assert response.status_code == 200

View file

@ -0,0 +1,369 @@
from __future__ import annotations
import pytest
from typing import Any, cast
from app.core.types import CoreMessage, ProviderChatRequest
from app.providers.codex_responses.translator import build_responses_create_args
def test_build_responses_create_args_maps_core_fields() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Follow policy."),
CoreMessage(
role="system", content=[{"type": "text", "text": "System rule."}]
),
CoreMessage(role="user", content="Hello"),
CoreMessage(
role="assistant",
content="Calling tool",
tool_calls=[
{
"id": "call_1",
"type": "function",
"function": {"name": "lookup", "arguments": {"x": 1}},
}
],
),
CoreMessage(role="tool", tool_call_id="call_1", content={"ok": True}),
],
metadata={"team": "infra"},
prompt_cache_key="cache-key",
prompt_cache_retention="24h",
safety_identifier="user-123",
service_tier="default",
parallel_tool_calls=False,
top_p=0.9,
tools=[
{
"type": "function",
"function": {
"name": "lookup",
"description": "Find data",
"parameters": {"type": "object", "properties": {}},
"strict": False,
},
}
],
tool_choice={"type": "function", "function": {"name": "lookup"}},
response_format={
"type": "json_schema",
"json_schema": {
"name": "shape",
"schema": {"type": "object", "properties": {}, "required": []},
"strict": True,
},
},
verbosity="high",
reasoning_effort="medium",
extra={"reasoningSummary": "ignored"},
)
args, ignored_extra = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["model"] == "gpt-5-codex"
assert args_dict["stream"] is True
assert args_dict["store"] is False
assert args_dict["instructions"] == "Follow policy."
assert args_dict["include"] == ["reasoning.encrypted_content"]
assert args_dict["parallel_tool_calls"] is False
assert "prompt_cache_key" not in args_dict
assert args_dict["prompt_cache_retention"] == "24h"
assert args_dict["metadata"] == {"team": "infra"}
assert args_dict["top_p"] == 0.9
assert args_dict["reasoning"] == {"effort": "medium", "summary": "detailed"}
assert args_dict["text"] == {
"format": {
"type": "json_schema",
"name": "shape",
"schema": {"type": "object", "properties": {}, "required": []},
"strict": True,
},
"verbosity": "high",
}
items = args_dict["input"]
assert items[0] == {
"type": "message",
"role": "system",
"content": [{"type": "input_text", "text": "System rule."}],
}
assert items[1] == {
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "Hello"}],
}
assert items[2] == {
"type": "message",
"role": "assistant",
"content": [{"type": "output_text", "text": "Calling tool"}],
}
assert items[3] == {
"type": "function_call",
"call_id": "call_1",
"name": "lookup",
"arguments": '{"x": 1}',
}
assert items[4] == {
"type": "function_call_output",
"call_id": "call_1",
"output": '{"ok": true}',
}
assert ignored_extra == {"reasoningSummary": "ignored"}
def test_build_responses_create_args_supports_explicit_items() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(role="user", content="Hi"),
CoreMessage(
role="assistant",
content=[{"type": "text", "text": "continuing"}],
extra={"type": "item_reference", "id": "msg_123"},
),
CoreMessage(
role="assistant",
content={"result": 1},
extra={"type": "function_call", "name": "apply_patch", "id": "call_2"},
),
],
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["input"][1] == {"type": "item_reference", "id": "msg_123"}
assert args_dict["input"][2] == {
"type": "function_call",
"call_id": "call_2",
"name": "apply_patch",
"arguments": '{"result": 1}',
"id": "call_2",
}
def test_build_responses_create_args_tool_choice_allowed_tools() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(role="user", content="Hi"),
],
tool_choice={
"type": "allowed_tools",
"allowed_tools": {
"mode": "required",
"tools": [{"type": "function", "name": "run"}],
},
},
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["tool_choice"] == {
"type": "allowed_tools",
"mode": "required",
"tools": [{"type": "function", "name": "run"}],
}
def test_build_responses_create_args_maps_verbosity_to_reasoning_summary() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(role="user", content="Hi"),
],
verbosity="low",
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["reasoning"] == {"summary": "concise"}
assert args_dict["text"] == {"verbosity": "low"}
def test_build_responses_create_args_allows_missing_instructions() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[CoreMessage(role="user", content="hello")],
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["instructions"] == "You are a helpful assistant."
assert args_dict["input"] == [
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "hello"}],
}
]
def test_build_responses_create_args_maps_followup_developer_to_system_message() -> (
None
):
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Primary rules"),
CoreMessage(role="developer", content="Secondary rules"),
CoreMessage(role="user", content="hello"),
],
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["instructions"] == "Primary rules"
assert args_dict["input"] == [
{
"type": "message",
"role": "system",
"content": [{"type": "input_text", "text": "Secondary rules"}],
},
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "hello"}],
},
]
def test_build_responses_create_args_skips_empty_assistant_messages() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(role="assistant", content=""),
CoreMessage(role="user", content="hello"),
],
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["input"] == [
{
"type": "message",
"role": "user",
"content": [{"type": "input_text", "text": "hello"}],
}
]
def test_build_responses_create_args_maps_reasoning_object() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(role="user", content="hello"),
],
reasoning={"effort": "low", "summary": "concise"},
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["reasoning"] == {"effort": "low", "summary": "concise"}
def test_build_responses_create_args_keeps_flat_reasoning_precedence() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(role="user", content="hello"),
],
reasoning_effort="high",
reasoning_summary="detailed",
reasoning={"effort": "low", "summary": "concise"},
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
assert args_dict["reasoning"] == {"effort": "high", "summary": "detailed"}
def test_build_responses_create_args_supports_multimodal_content_parts() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(
role="user",
content=[
{
"type": "text",
"text": "Describe media",
"cache_control": {"type": "ephemeral", "ttl": "5m"},
},
{
"type": "image_url",
"image_url": {
"url": "https://example.com/image.png",
"detail": "high",
},
},
{
"type": "input_audio",
"input_audio": {"data": "ZmFrZQ==", "format": "wav"},
},
{
"type": "video_url",
"video_url": {"url": "https://example.com/video.mp4"},
},
{
"type": "input_file",
"file_url": "https://example.com/doc.pdf",
"filename": "doc.pdf",
},
],
),
],
)
args, _ = build_responses_create_args(request)
args_dict = cast(dict[str, Any], args)
parts = args_dict["input"][0]["content"]
assert parts == [
{
"type": "input_text",
"text": "Describe media",
"cache_control": {"type": "ephemeral", "ttl": "5m"},
},
{
"type": "input_image",
"detail": "high",
"image_url": "https://example.com/image.png",
},
{
"type": "input_audio",
"input_audio": {"data": "ZmFrZQ==", "format": "wav"},
},
{
"type": "input_file",
"file_url": "https://example.com/video.mp4",
},
{
"type": "input_file",
"file_url": "https://example.com/doc.pdf",
"filename": "doc.pdf",
},
]
def test_build_responses_create_args_raises_when_tool_output_missing_call_id() -> None:
request = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Rules"),
CoreMessage(role="tool", content="result"),
],
)
with pytest.raises(ValueError, match="requires call_id"):
build_responses_create_args(request)

View file

@ -0,0 +1,152 @@
from __future__ import annotations
from pathlib import Path
import pytest
import yaml
from pydantic import ValidationError
from app.config.loader import load_config
from app.config.models import OAuthAuth, TokenAuth, UrlAuth
def _write_yaml(path: Path, data: dict) -> None:
path.write_text(yaml.safe_dump(data), encoding="utf-8")
def test_load_config_success(tmp_path: Path) -> None:
config_path = tmp_path / "config.yml"
auth_path = tmp_path / "auth.yml"
_write_yaml(
config_path,
{
"providers": {
"kilocode": {
"url": "https://api.kilo.ai/api/openrouter",
"type": "openai-completions",
"name": "Kilo Override",
"models": {"minimax/minimax-m2.5:free": {"name": "MiniMax Custom"}},
"whitelist": ["minimax/minimax-m2.5:free"],
}
}
},
)
_write_yaml(
auth_path,
{"providers": {"kilocode": {"token": "public"}}},
)
loaded = load_config(config_path=config_path, auth_path=auth_path)
provider = loaded.providers["kilocode"]
assert isinstance(provider.auth, TokenAuth)
assert provider.type == "openai-completions"
assert provider.auth.token == "public"
assert provider.display_name == "Kilo Override"
assert provider.models == {"minimax/minimax-m2.5:free": {"name": "MiniMax Custom"}}
assert provider.whitelist == ["minimax/minimax-m2.5:free"]
def test_load_config_requires_auth_entry(tmp_path: Path) -> None:
config_path = tmp_path / "config.yml"
auth_path = tmp_path / "auth.yml"
_write_yaml(
config_path,
{
"providers": {
"kilocode": {"url": "https://api.kilo.ai", "type": "openai-completions"}
}
},
)
_write_yaml(auth_path, {"providers": {}})
with pytest.raises(ValueError, match="Missing auth entry"):
load_config(config_path=config_path, auth_path=auth_path)
def test_load_config_rejects_incomplete_oauth_auth(tmp_path: Path) -> None:
config_path = tmp_path / "config.yml"
auth_path = tmp_path / "auth.yml"
_write_yaml(
config_path,
{
"providers": {
"kilocode": {"url": "https://api.kilo.ai", "type": "openai-completions"}
}
},
)
_write_yaml(auth_path, {"providers": {"kilocode": {"refresh": "rt_abc"}}})
with pytest.raises(ValidationError):
load_config(config_path=config_path, auth_path=auth_path)
def test_load_config_supports_oauth_auth(tmp_path: Path) -> None:
config_path = tmp_path / "config.yml"
auth_path = tmp_path / "auth.yml"
_write_yaml(
config_path,
{
"providers": {
"codex": {
"url": "https://chatgpt.com/backend-api",
"type": "codex-responses",
}
}
},
)
_write_yaml(
auth_path,
{
"providers": {
"codex": {
"access": "acc",
"refresh": "ref",
"expires": 123,
}
}
},
)
loaded = load_config(config_path=config_path, auth_path=auth_path)
provider = loaded.providers["codex"]
assert isinstance(provider.auth, OAuthAuth)
assert provider.auth.access == "acc"
assert provider.auth.refresh == "ref"
assert provider.auth.expires == 123
def test_load_config_supports_url_auth(tmp_path: Path) -> None:
config_path = tmp_path / "config.yml"
auth_path = tmp_path / "auth.yml"
_write_yaml(
config_path,
{
"providers": {
"kilo": {
"url": "https://api.kilo.ai/api/openrouter",
"type": "openai-completions",
}
}
},
)
_write_yaml(
auth_path,
{
"providers": {
"kilo": {
"url": "https://auth.local/token",
}
}
},
)
loaded = load_config(config_path=config_path, auth_path=auth_path)
provider = loaded.providers["kilo"]
assert isinstance(provider.auth, UrlAuth)
assert provider.auth.url == "https://auth.local/token"

View file

@ -0,0 +1,330 @@
from __future__ import annotations
import asyncio
from collections.abc import AsyncIterator
import pytest
from app.core.errors import (
InvalidModelError,
ModelNotAllowedError,
ProviderNotFoundError,
)
from app.core.models_dev import ModelsDevCatalog
from app.core.router import RouterCore, split_routed_model
from app.core.types import (
CoreChatRequest,
CoreChunk,
CoreMessage,
ProviderChatRequest,
ProviderModel,
)
from app.providers.base import BaseProvider
class StubProvider(BaseProvider):
def __init__(self, **kwargs) -> None:
super().__init__(api_type="stub", **kwargs)
self.requests: list[ProviderChatRequest] = []
self.list_models_calls = 0
@classmethod
def from_config(cls, config):
raise NotImplementedError
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
self.requests.append(request)
yield CoreChunk(content="hello")
yield CoreChunk(finish_reason="stop")
async def list_models(self) -> list[ProviderModel]:
self.list_models_calls += 1
return [ProviderModel(id="model-a", name="Model A", context_length=123)]
class StubModelsDevCatalog(ModelsDevCatalog):
def __init__(self, payload: dict[str, dict[str, object]] | None = None) -> None:
super().__init__(fetch_catalog=lambda: _never_called())
self.payload = payload or {}
self.calls: list[tuple[str, str]] = []
self.provider_display_name = "Stub Provider"
async def get_provider_models(
self, *, provider_name: str, provider_url: str
) -> tuple[str | None, dict[str, dict[str, object]]]:
self.calls.append((provider_name, provider_url))
return self.provider_display_name, self.payload
async def _never_called() -> dict[str, object]:
raise RuntimeError("not expected")
class MissingFieldsProvider(BaseProvider):
def __init__(self, **kwargs) -> None:
super().__init__(api_type="stub", **kwargs)
@classmethod
def from_config(cls, config):
raise NotImplementedError
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
yield CoreChunk(finish_reason="stop")
async def list_models(self) -> list[ProviderModel]:
return [ProviderModel(id="model-x")]
class CompleteFieldsProvider(BaseProvider):
def __init__(self, **kwargs) -> None:
super().__init__(api_type="stub", **kwargs)
@classmethod
def from_config(cls, config):
raise NotImplementedError
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
yield CoreChunk(finish_reason="stop")
async def list_models(self) -> list[ProviderModel]:
return [
ProviderModel(
id="model-a",
name="Model A",
created=1,
context_length=4096,
architecture={"input_modalities": ["text"]},
pricing={"input": 1.0, "output": 2.0},
supported_parameters=["max_tokens"],
owned_by="kilo",
)
]
class FailingModelsProvider(BaseProvider):
def __init__(self, **kwargs) -> None:
super().__init__(api_type="stub", **kwargs)
@classmethod
def from_config(cls, config):
raise NotImplementedError
async def stream_chat(
self, request: ProviderChatRequest
) -> AsyncIterator[CoreChunk]:
yield CoreChunk(finish_reason="stop")
async def list_models(self) -> list[ProviderModel]:
raise RuntimeError("upstream unavailable")
def _collect(async_iter: AsyncIterator[CoreChunk]) -> list[CoreChunk]:
async def _inner() -> list[CoreChunk]:
out: list[CoreChunk] = []
async for item in async_iter:
out.append(item)
return out
return asyncio.run(_inner())
def test_split_routed_model_success() -> None:
provider, model = split_routed_model("kilocode/minimax/minimax-m2.5:free")
assert provider == "kilocode"
assert model == "minimax/minimax-m2.5:free"
@pytest.mark.parametrize("model", ["", "kilocode", "/gpt-4", "kilocode/"])
def test_split_routed_model_invalid(model: str) -> None:
with pytest.raises(InvalidModelError):
split_routed_model(model)
def test_resolve_unknown_provider() -> None:
core = RouterCore(providers={})
with pytest.raises(ProviderNotFoundError):
core.resolve_provider("missing/gpt-4")
def test_resolve_provider_validates_whitelist() -> None:
provider = StubProvider(
name="kilo", base_url="https://kilo", whitelist=["gpt-4"], blacklist=None
)
core = RouterCore(providers={"kilo": provider})
with pytest.raises(ModelNotAllowedError):
core.resolve_provider("kilo/not-allowed")
def test_stream_chat_routes_to_provider_model_without_prefix() -> None:
provider = StubProvider(
name="kilo", base_url="https://kilo", whitelist=None, blacklist=None
)
core = RouterCore(providers={"kilo": provider})
req = CoreChatRequest(
model="kilo/minimax/minimax-m2.5:free",
messages=[CoreMessage(role="user", content="ping")],
)
chunks = _collect(core.stream_chat(req))
assert [c.content for c in chunks if c.content] == ["hello"]
assert chunks[-1].finish_reason == "stop"
assert provider.requests[0].model == "minimax/minimax-m2.5:free"
def test_list_models_prefixes_provider_and_applies_defaults() -> None:
provider = StubProvider(
name="kilo", base_url="https://kilo", whitelist=None, blacklist=None
)
core = RouterCore(providers={"kilo": provider})
models = asyncio.run(core.list_models())
assert len(models) == 1
assert models[0].id == "kilo/model-a"
assert models[0].object == "model"
assert models[0].created == 0
assert models[0].owned_by == "wzray"
def test_list_models_respects_whitelist() -> None:
provider = StubProvider(
name="kilo", base_url="https://kilo", whitelist=["x"], blacklist=None
)
core = RouterCore(providers={"kilo": provider})
models = asyncio.run(core.list_models())
assert models == []
def test_list_models_uses_ttl_cache() -> None:
provider = StubProvider(
name="kilo", base_url="https://kilo", whitelist=None, blacklist=None
)
core = RouterCore(providers={"kilo": provider})
first = asyncio.run(core.list_models())
second = asyncio.run(core.list_models())
assert len(first) == 1
assert len(second) == 1
assert provider.list_models_calls == 1
def test_list_models_cache_can_be_disabled_with_zero_ttl() -> None:
provider = StubProvider(
name="kilo", base_url="https://kilo", whitelist=None, blacklist=None
)
core = RouterCore(providers={"kilo": provider}, models_cache_ttl_seconds=0.0)
asyncio.run(core.list_models())
asyncio.run(core.list_models())
assert provider.list_models_calls == 2
def test_list_models_enriches_missing_fields_from_models_dev() -> None:
provider = MissingFieldsProvider(
name="zai",
base_url="https://api.z.ai/api/coding/paas/v4",
whitelist=None,
blacklist=None,
)
catalog = StubModelsDevCatalog(
{
"model-x": {
"name": "Model X",
"limit": {"context": 65536, "output": 4096},
"modalities": {"input": ["text"], "output": ["text"]},
"family": "glm",
"cost": {"input": 1.0, "output": 2.0},
"tool_call": True,
"reasoning": True,
"structured_output": True,
"release_date": "2026-01-15",
"provider": "z-ai",
}
}
)
core = RouterCore(providers={"zai": provider}, models_dev_catalog=catalog)
models = asyncio.run(core.list_models())
assert len(models) == 1
assert models[0].id == "zai/model-x"
assert models[0].name == "Model X"
assert models[0].context_length == 65536
assert models[0].architecture == {
"input_modalities": ["text"],
"output_modalities": ["text"],
"family": "glm",
}
assert models[0].pricing == {"input": 1.0, "output": 2.0}
assert models[0].owned_by == "z-ai"
assert models[0].created > 0
assert models[0].supported_parameters == [
"max_completion_tokens",
"max_tokens",
"modalities",
"parallel_tool_calls",
"reasoning_effort",
"reasoning_summary",
"response_format",
"tool_choice",
"tools",
]
assert models[0].provider_display_name == "Stub Provider"
assert catalog.calls == [("zai", "https://api.z.ai/api/coding/paas/v4")]
def test_list_models_always_calls_models_dev_and_prefers_it() -> None:
provider = CompleteFieldsProvider(
name="kilo", base_url="https://kilo", whitelist=None, blacklist=None
)
catalog = StubModelsDevCatalog({"model-a": {"name": "Never Used"}})
core = RouterCore(providers={"kilo": provider}, models_dev_catalog=catalog)
models = asyncio.run(core.list_models())
assert len(models) == 1
assert models[0].name == "Never Used"
assert catalog.calls == [("kilo", "https://kilo")]
def test_list_models_skips_failed_provider_and_returns_others() -> None:
good = StubProvider(
name="good", base_url="https://good", whitelist=None, blacklist=None
)
bad = FailingModelsProvider(
name="bad", base_url="https://bad", whitelist=None, blacklist=None
)
core = RouterCore(providers={"good": good, "bad": bad})
models = asyncio.run(core.list_models())
assert len(models) == 1
assert models[0].id == "good/model-a"
def test_list_models_respects_provider_and_model_name_overrides() -> None:
provider = StubProvider(
name="kilo", base_url="https://kilo", whitelist=None, blacklist=None
)
provider.display_name = "Configured Provider"
provider.models_config = {"model-a": {"name": "Configured Model"}}
catalog = StubModelsDevCatalog({"model-a": {"name": "ModelsDev Model"}})
core = RouterCore(providers={"kilo": provider}, models_dev_catalog=catalog)
models = asyncio.run(core.list_models())
assert len(models) == 1
assert models[0].name == "Configured Model"
assert models[0].provider_display_name == "Configured Provider"

View file

@ -0,0 +1,541 @@
from __future__ import annotations
import asyncio
from collections.abc import AsyncIterator
from typing import Any, cast
import pytest
from app.config.models import LoadedProviderConfig, OAuthAuth, TokenAuth, UrlAuth
from app.core.errors import UpstreamProviderError
from app.core.types import CoreMessage, ProviderChatRequest
from app.providers.codex_responses.oauth import OAuthData
from app.providers.codex_responses.provider import CodexResponsesProvider
class _FakeEvent:
def __init__(
self,
*,
type: str,
delta: str | None = None,
text: str | None = None,
name: str | None = None,
arguments: str | None = None,
refusal: str | None = None,
item_id: str | None = None,
content_index: int | None = None,
output_index: int | None = None,
summary_index: int | None = None,
item: dict[str, Any] | None = None,
part: dict[str, Any] | None = None,
response: dict[str, Any] | None = None,
) -> None:
self.type = type
self.delta = delta
self.text = text
self.name = name
self.arguments = arguments
self.refusal = refusal
self.item_id = item_id
self.content_index = content_index
self.output_index = output_index
self.summary_index = summary_index
self.item = item
self.part = part
self.response = response
class _FakeStream:
def __init__(self, events: list[_FakeEvent]) -> None:
self._events = events
def __aiter__(self) -> AsyncIterator[_FakeEvent]:
async def _it() -> AsyncIterator[_FakeEvent]:
for event in self._events:
yield event
return _it()
class _FakeResponses:
def __init__(self) -> None:
self.last_payload: dict | None = None
self.last_headers: dict[str, str] | None = None
async def create(self, **kwargs):
self.last_payload = kwargs
self.last_headers = kwargs.get("extra_headers")
return _FakeStream(
[
_FakeEvent(type="response.output_text.delta", delta="Hello"),
_FakeEvent(type="response.output_text.delta", delta=" world"),
_FakeEvent(type="response.completed"),
]
)
class _FakeModels:
def __init__(self) -> None:
self.last_headers: dict[str, str] | None = None
async def list(self, **kwargs):
self.last_headers = kwargs.get("extra_headers")
class _Resp:
data = [
{
"slug": "gpt-5-codex",
"display_name": "GPT-5 Codex",
"context_window": 400000,
"input_modalities": ["text"],
}
]
return _Resp()
class _FakeClient:
def __init__(self) -> None:
self.responses = _FakeResponses()
self.models = _FakeModels()
class _FakeOauth:
async def get(self) -> OAuthData:
return OAuthData(token="acc", headers={"Authorization": "Bearer acc"})
async def get_headers(self) -> dict[str, str]:
return {"Authorization": "Bearer acc"}
class _StaticUrlAuth:
async def get_headers(self) -> dict[str, str]:
return {"Authorization": "Bearer fetched-token"}
class _FailingResponses:
async def create(self, **kwargs):
raise RuntimeError("boom")
class _FailingClient:
def __init__(self) -> None:
self.responses = _FailingResponses()
self.models = _FailingModels()
class _FailingModels:
async def list(self, **kwargs):
raise RuntimeError("boom")
class _CustomResponses:
def __init__(self, events: list[_FakeEvent]) -> None:
self._events = events
async def create(self, **kwargs):
return _FakeStream(self._events)
class _CustomClient:
def __init__(self, events: list[_FakeEvent]) -> None:
self.responses = _CustomResponses(events)
self.models = _FakeModels()
def _collect(async_iter) -> list:
async def _inner() -> list:
out = []
async for item in async_iter:
out.append(item)
return out
return asyncio.run(_inner())
def test_codex_provider_streams_openai_responses_events() -> None:
client = _FakeClient()
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=client,
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Be concise."),
CoreMessage(role="user", content="Say hello"),
],
)
chunks = _collect(provider.stream_chat(req))
assert chunks[0].role == "assistant"
assert chunks[1].content == "Hello"
assert chunks[2].content == " world"
assert chunks[-1].finish_reason == "stop"
assert client.responses.last_headers is not None
assert client.responses.last_headers["Authorization"] == "Bearer acc"
assert client.responses.last_headers["originator"] == "opencode"
assert client.responses.last_headers["session_id"].startswith("ses_")
payload = client.responses.last_payload
assert payload is not None
assert payload["model"] == "gpt-5-codex"
assert payload["stream"] is True
def test_codex_provider_wraps_upstream_errors() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_FailingClient(),
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="system", content="Follow instructions."),
CoreMessage(role="user", content="Say hello"),
],
)
with pytest.raises(UpstreamProviderError, match="failed while streaming"):
_collect(provider.stream_chat(req))
def test_codex_provider_allows_missing_instructions_message() -> None:
client = _FakeClient()
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=client,
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[CoreMessage(role="user", content="Say hello")],
)
chunks = _collect(provider.stream_chat(req))
assert chunks[0].role == "assistant"
assert chunks[-1].finish_reason == "stop"
assert client.responses.last_payload is not None
assert (
client.responses.last_payload["instructions"] == "You are a helpful assistant."
)
def test_codex_provider_requires_oauth_in_from_config() -> None:
config = LoadedProviderConfig(
name="codex",
url="https://chatgpt.com/backend-api",
type="codex-responses",
auth=TokenAuth(token="bad"),
)
with pytest.raises(ValueError, match="requires oauth or url auth"):
CodexResponsesProvider.from_config(config)
def test_codex_provider_lists_models() -> None:
client = _FakeClient()
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=client,
)
models = asyncio.run(provider.list_models())
assert len(models) == 1
assert models[0].id == "gpt-5-codex"
assert models[0].architecture == {"input_modalities": ["text"]}
assert models[0].context_length == 400000
assert models[0].name == "GPT-5 Codex"
assert client.models.last_headers == {"Authorization": "Bearer acc"}
def test_codex_provider_wraps_model_list_errors() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_FailingClient(),
)
with pytest.raises(UpstreamProviderError, match="failed while listing models"):
asyncio.run(provider.list_models())
def test_codex_provider_from_config_success() -> None:
config = LoadedProviderConfig(
name="codex",
url="https://chatgpt.com/backend-api",
type="codex-responses",
auth=OAuthAuth(access="a", refresh="r", expires=9999999999999),
)
provider = CodexResponsesProvider.from_config(config)
assert isinstance(provider, CodexResponsesProvider)
def test_codex_provider_from_config_supports_url_auth() -> None:
config = LoadedProviderConfig(
name="codex",
url="https://chatgpt.com/backend-api",
type="codex-responses",
auth=UrlAuth(url="https://auth.local/token"),
)
provider = CodexResponsesProvider.from_config(config)
assert isinstance(provider, CodexResponsesProvider)
def test_codex_provider_uses_url_auth_headers() -> None:
client = _FakeClient()
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _StaticUrlAuth()),
client=client,
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[CoreMessage(role="user", content="hello")],
)
_collect(provider.stream_chat(req))
assert client.responses.last_headers is not None
assert client.responses.last_headers["Authorization"] == "Bearer fetched-token"
def test_codex_provider_emits_text_from_output_item_done_without_deltas() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_CustomClient(
[
_FakeEvent(
type="response.output_item.done",
item={
"type": "message",
"id": "msg_1",
"content": [{"type": "output_text", "text": "Hello from done"}],
},
),
_FakeEvent(type="response.completed"),
]
),
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Be concise."),
CoreMessage(role="user", content="Say hello"),
],
)
chunks = _collect(provider.stream_chat(req))
assert chunks[0].role == "assistant"
assert chunks[1].content == "Hello from done"
assert chunks[-1].finish_reason == "stop"
def test_codex_provider_does_not_duplicate_done_after_delta() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_CustomClient(
[
_FakeEvent(
type="response.output_text.delta",
item_id="msg_1",
content_index=0,
delta="Hi",
),
_FakeEvent(
type="response.output_text.done",
item_id="msg_1",
content_index=0,
text="Hi",
),
_FakeEvent(type="response.completed"),
]
),
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Be concise."),
CoreMessage(role="user", content="Say hello"),
],
)
chunks = _collect(provider.stream_chat(req))
content_chunks = [c.content for c in chunks if c.content is not None]
assert content_chunks == ["Hi"]
def test_codex_provider_maps_incomplete_reason_to_length_finish() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_CustomClient(
[
_FakeEvent(
type="response.incomplete",
response={"incomplete_details": {"reason": "max_output_tokens"}},
)
]
),
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Be concise."),
CoreMessage(role="user", content="Long output"),
],
)
chunks = _collect(provider.stream_chat(req))
assert chunks[-1].finish_reason == "length"
def test_codex_provider_marks_tool_calls_finish_reason() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_CustomClient(
[
_FakeEvent(
type="response.function_call_arguments.delta",
item_id="call_1",
delta="{",
),
_FakeEvent(
type="response.function_call_arguments.done",
item_id="call_1",
name="question",
arguments='{"questions":[]}',
),
_FakeEvent(type="response.completed"),
]
),
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Be concise."),
CoreMessage(role="user", content="Use tools"),
],
)
chunks = _collect(provider.stream_chat(req))
tool_chunks = [c for c in chunks if c.tool_calls]
assert len(tool_chunks) == 1
assert tool_chunks[0].tool_calls == [
{
"index": 0,
"id": "call_1",
"type": "function",
"function": {"name": "question", "arguments": '{"questions":[]}'},
}
]
assert chunks[-1].finish_reason == "tool_calls"
def test_codex_provider_streams_reasoning_summary_delta() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_CustomClient(
[
_FakeEvent(
type="response.reasoning_summary_text.delta", delta="thinking..."
),
_FakeEvent(type="response.completed"),
]
),
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Be concise."),
CoreMessage(role="user", content="Think first"),
],
)
chunks = _collect(provider.stream_chat(req))
assert chunks[0].role == "assistant"
assert chunks[1].reasoning_content == "thinking..."
assert chunks[-1].finish_reason == "stop"
def test_codex_provider_streams_reasoning_details_with_encrypted_content() -> None:
provider = CodexResponsesProvider(
name="codex",
base_url="https://chatgpt.com/backend-api",
oauth=cast(Any, _FakeOauth()),
client=_CustomClient(
[
_FakeEvent(
type="response.output_item.added",
output_index=0,
item={
"id": "rs_1",
"type": "reasoning",
"encrypted_content": "enc_123",
},
),
_FakeEvent(
type="response.reasoning_summary_text.done",
item_id="rs_1",
summary_index=0,
text="**Plan**",
),
_FakeEvent(type="response.completed"),
]
),
)
req = ProviderChatRequest(
model="gpt-5-codex",
messages=[
CoreMessage(role="developer", content="Be concise."),
CoreMessage(role="user", content="Think first"),
],
)
chunks = _collect(provider.stream_chat(req))
details = [
detail
for chunk in chunks
for detail in (chunk.reasoning_details or [])
if isinstance(detail, dict)
]
assert {d.get("type") for d in details} == {
"reasoning.encrypted",
"reasoning.summary",
}
encrypted = next(d for d in details if d.get("type") == "reasoning.encrypted")
assert encrypted["data"] == "enc_123"
assert encrypted["id"] == "rs_1"
assert encrypted["format"] == "openai-responses-v1"
summary = next(d for d in details if d.get("type") == "reasoning.summary")
assert summary["summary"] == "**Plan**"
assert summary["id"] == "rs_1"
assert summary["format"] == "openai-responses-v1"
reasoning_chunks = [c.reasoning_content for c in chunks if c.reasoning_content]
assert reasoning_chunks == ["**Plan**"]
assert chunks[-1].finish_reason == "stop"

View file

@ -0,0 +1,442 @@
from __future__ import annotations
import asyncio
from collections.abc import AsyncIterator
import pytest
from app.config.models import LoadedProviderConfig, OAuthAuth, UrlAuth
from app.core.errors import UpstreamProviderError
from app.core.types import CoreMessage, ProviderChatRequest
from app.providers.openai_completions.provider import OpenAICompletionsProvider
class _FakeDelta:
def __init__(
self,
role: str | None = None,
content: str | None = None,
reasoning_content: str | None = None,
reasoning: object | None = None,
tool_calls: list[dict[str, object]] | None = None,
) -> None:
self.role = role
self.content = content
self.reasoning_content = reasoning_content
self.reasoning = reasoning
self.tool_calls = tool_calls
class _FakeChoice:
def __init__(
self,
*,
index: int,
delta: _FakeDelta | None = None,
finish_reason: str | None = None,
) -> None:
self.index = index
self.delta = delta
self.finish_reason = finish_reason
class _FakeChunk:
def __init__(self, choices: list[_FakeChoice]) -> None:
self.choices = choices
class _FakeStream:
def __init__(self, chunks: list[_FakeChunk]) -> None:
self._chunks = chunks
def __aiter__(self) -> AsyncIterator[_FakeChunk]:
async def _it() -> AsyncIterator[_FakeChunk]:
for chunk in self._chunks:
yield chunk
return _it()
class _FakeCompletions:
def __init__(self) -> None:
self.payload: dict | None = None
async def create(self, **kwargs):
self.payload = kwargs
return _FakeStream(
[
_FakeChunk([_FakeChoice(index=0, delta=_FakeDelta(role="assistant"))]),
_FakeChunk([_FakeChoice(index=0, delta=_FakeDelta(content="Hello"))]),
_FakeChunk([_FakeChoice(index=0, finish_reason="stop")]),
]
)
class _ReasoningCompletions:
async def create(self, **kwargs):
return _FakeStream(
[
_FakeChunk([_FakeChoice(index=0, delta=_FakeDelta(role="assistant"))]),
_FakeChunk(
[
_FakeChoice(
index=0,
delta=_FakeDelta(reasoning={"summary": "thinking"}),
)
]
),
_FakeChunk(
[
_FakeChoice(
index=0,
delta=_FakeDelta(
tool_calls=[
{
"index": 0,
"id": "call_1",
"type": "function",
"function": {
"name": "question",
"arguments": "{",
},
}
]
),
)
]
),
_FakeChunk([_FakeChoice(index=0, finish_reason="tool_calls")]),
]
)
class _FakeChat:
def __init__(self) -> None:
self.completions = _FakeCompletions()
class _ReasoningChat:
def __init__(self) -> None:
self.completions = _ReasoningCompletions()
class _FakeClient:
def __init__(self) -> None:
self.chat = _FakeChat()
self.models = _FakeModels()
class _ReasoningClient:
def __init__(self) -> None:
self.chat = _ReasoningChat()
self.models = _FakeModels()
class _FakeModels:
def __init__(self) -> None:
self.last_kwargs: dict[str, object] | None = None
async def list(self, **kwargs):
self.last_kwargs = kwargs
class _Resp:
data = [
{
"id": "minimax/minimax-m2.5:free",
"name": "MiniMax",
"description": "desc",
"context_length": 2048,
"architecture": {
"input_modalities": ["text"],
"tokenizer": "Other",
},
"pricing": {"prompt": "0"},
"supported_parameters": ["max_tokens"],
"settings": {"foo": "bar"},
"opencode": {"family": "x"},
}
]
return _Resp()
class _FailingCompletions:
async def create(self, **kwargs):
raise RuntimeError("boom")
class _FailingChat:
def __init__(self) -> None:
self.completions = _FailingCompletions()
class _FailingClient:
def __init__(self) -> None:
self.chat = _FailingChat()
self.models = _FailingModels()
class _FailingModels:
async def list(self, **kwargs):
raise RuntimeError("boom")
class _StaticAuth:
async def get_headers(self) -> dict[str, str]:
return {"Authorization": "Bearer fetched-token"}
def _collect(async_iter) -> list:
async def _inner() -> list:
out = []
async for item in async_iter:
out.append(item)
return out
return asyncio.run(_inner())
def test_openai_completions_provider_streams_internal_chunks() -> None:
client = _FakeClient()
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
token="public",
client=client,
)
req = ProviderChatRequest(
model="minimax/minimax-m2.5:free",
messages=[CoreMessage(role="user", content="hello")],
top_p=0.9,
max_tokens=123,
)
chunks = _collect(provider.stream_chat(req))
assert chunks[0].role == "assistant"
assert chunks[1].content == "Hello"
assert chunks[2].finish_reason == "stop"
payload = client.chat.completions.payload
assert payload is not None
assert payload["model"] == "minimax/minimax-m2.5:free"
assert payload["stream"] is True
assert payload["top_p"] == 0.9
assert payload["max_tokens"] == 123
assert payload["messages"] == [{"role": "user", "content": "hello"}]
assert payload["extra_headers"] == {"Authorization": "Bearer public"}
def test_openai_completions_provider_streams_reasoning_details() -> None:
class _ReasoningDetailsChat:
def __init__(self) -> None:
self.completions = _ReasoningDetailsCompletions()
class _ReasoningDetailsCompletions:
async def create(self, **kwargs):
return _FakeStream(
[
_FakeChunk(
[
_FakeChoice(
index=0,
delta=_FakeDelta(
reasoning={
"details": [
{
"type": "reasoning.encrypted",
"data": "enc_123",
"format": "openai-responses-v1",
}
]
}
),
)
]
),
_FakeChunk([_FakeChoice(index=0, finish_reason="stop")]),
]
)
class _ReasoningDetailsClient:
def __init__(self) -> None:
self.chat = _ReasoningDetailsChat()
self.models = _FakeModels()
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
token="public",
client=_ReasoningDetailsClient(),
)
req = ProviderChatRequest(
model="minimax/minimax-m2.5:free",
messages=[CoreMessage(role="user", content="hello")],
)
chunks = _collect(provider.stream_chat(req))
assert chunks[0].role == "assistant"
assert chunks[1].reasoning_details == [
{
"type": "reasoning.encrypted",
"data": "enc_123",
"format": "openai-responses-v1",
}
]
assert chunks[2].finish_reason == "stop"
def test_openai_completions_provider_wraps_upstream_error() -> None:
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
token="public",
client=_FailingClient(),
)
req = ProviderChatRequest(
model="minimax/minimax-m2.5:free",
messages=[CoreMessage(role="user", content="hello")],
)
with pytest.raises(UpstreamProviderError, match="failed while streaming"):
_collect(provider.stream_chat(req))
def test_openai_completions_provider_streams_reasoning_and_tool_calls() -> None:
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
token="public",
client=_ReasoningClient(),
)
req = ProviderChatRequest(
model="minimax/minimax-m2.5:free",
messages=[CoreMessage(role="user", content="hello")],
)
chunks = _collect(provider.stream_chat(req))
assert chunks[0].role == "assistant"
assert chunks[1].reasoning_content == "thinking"
assert chunks[2].tool_calls == [
{
"index": 0,
"id": "call_1",
"type": "function",
"function": {"name": "question", "arguments": "{"},
}
]
assert chunks[3].finish_reason == "tool_calls"
def test_openai_completions_provider_lists_models() -> None:
client = _FakeClient()
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
token="public",
client=client,
)
models = asyncio.run(provider.list_models())
assert len(models) == 1
assert models[0].id == "minimax/minimax-m2.5:free"
assert models[0].context_length == 2048
assert models[0].architecture == {"input_modalities": ["text"]}
assert client.models.last_kwargs == {
"extra_headers": {"Authorization": "Bearer public"}
}
def test_openai_completions_provider_wraps_model_list_errors() -> None:
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
token="public",
client=_FailingClient(),
)
with pytest.raises(UpstreamProviderError, match="failed while listing models"):
asyncio.run(provider.list_models())
def test_openai_completions_provider_from_config_supports_url_auth() -> None:
config = LoadedProviderConfig(
name="kilo",
url="https://api.kilo.ai/api/openrouter",
type="openai-completions",
auth=UrlAuth(url="https://auth.local/token"),
)
provider = OpenAICompletionsProvider.from_config(config)
assert isinstance(provider, OpenAICompletionsProvider)
def test_openai_completions_provider_from_config_rejects_oauth() -> None:
config = LoadedProviderConfig(
name="kilo",
url="https://api.kilo.ai/api/openrouter",
type="openai-completions",
auth=OAuthAuth(access="acc", refresh="ref", expires=1),
)
with pytest.raises(ValueError, match="requires token or url auth"):
OpenAICompletionsProvider.from_config(config)
def test_openai_completions_provider_uses_custom_auth_provider() -> None:
client = _FakeClient()
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
auth_provider=_StaticAuth(),
client=client,
)
req = ProviderChatRequest(
model="minimax/minimax-m2.5:free",
messages=[CoreMessage(role="user", content="hello")],
)
_collect(provider.stream_chat(req))
payload = client.chat.completions.payload
assert payload is not None
assert payload["extra_headers"] == {"Authorization": "Bearer fetched-token"}
def test_openai_completions_provider_passes_schema_fields_via_extra_body() -> None:
client = _FakeClient()
provider = OpenAICompletionsProvider(
name="kilo",
base_url="https://api.kilo.ai/api/openrouter",
token="public",
client=client,
)
req = ProviderChatRequest(
model="minimax/minimax-m2.5:free",
messages=[CoreMessage(role="user", content="hello")],
reasoning={"effort": "high", "summary": "detailed"},
provider={"allow_fallbacks": False},
plugins=[{"id": "web", "enabled": True}],
session_id="ses_123",
trace={"trace_id": "tr_1"},
models=["openai/gpt-5"],
debug={"echo_upstream_body": True},
image_config={"size": "1024x1024"},
)
_collect(provider.stream_chat(req))
payload = client.chat.completions.payload
assert payload is not None
assert payload["extra_body"] == {
"reasoning": {"effort": "high", "summary": "detailed"},
"provider": {"allow_fallbacks": False},
"plugins": [{"id": "web", "enabled": True}],
"session_id": "ses_123",
"trace": {"trace_id": "tr_1"},
"models": ["openai/gpt-5"],
"debug": {"echo_upstream_body": True},
"image_config": {"size": "1024x1024"},
}

647
uv.lock generated Normal file
View file

@ -0,0 +1,647 @@
version = 1
revision = 3
requires-python = ">=3.14"
[[package]]
name = "ai-router"
version = "0.1.0"
source = { virtual = "." }
dependencies = [
{ name = "fastapi" },
{ name = "httpx" },
{ name = "openai" },
{ name = "pydantic" },
{ name = "pyyaml" },
{ name = "uvicorn", extra = ["standard"] },
]
[package.dev-dependencies]
dev = [
{ name = "mypy" },
{ name = "pytest" },
{ name = "ruff" },
{ name = "types-pyyaml" },
]
[package.metadata]
requires-dist = [
{ name = "fastapi", specifier = ">=0.109.0" },
{ name = "httpx", specifier = ">=0.26.0" },
{ name = "openai", specifier = ">=1.108.0" },
{ name = "pydantic", specifier = ">=2.5.0" },
{ name = "pyyaml", specifier = ">=6.0" },
{ name = "uvicorn", extras = ["standard"], specifier = ">=0.27.0" },
]
[package.metadata.requires-dev]
dev = [
{ name = "mypy", specifier = ">=1.19.1" },
{ name = "pytest", specifier = ">=8.0.0" },
{ name = "ruff", specifier = ">=0.2.0" },
{ name = "types-pyyaml", specifier = ">=6.0.12.20250915" },
]
[[package]]
name = "annotated-doc"
version = "0.0.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/57/ba/046ceea27344560984e26a590f90bc7f4a75b06701f653222458922b558c/annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4", size = 7288, upload-time = "2025-11-10T22:07:42.062Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/1e/d3/26bf1008eb3d2daa8ef4cacc7f3bfdc11818d111f7e2d0201bc6e3b49d45/annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320", size = 5303, upload-time = "2025-11-10T22:07:40.673Z" },
]
[[package]]
name = "annotated-types"
version = "0.7.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081, upload-time = "2024-05-20T21:33:25.928Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643, upload-time = "2024-05-20T21:33:24.1Z" },
]
[[package]]
name = "anyio"
version = "4.12.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "idna" },
]
sdist = { url = "https://files.pythonhosted.org/packages/96/f0/5eb65b2bb0d09ac6776f2eb54adee6abe8228ea05b20a5ad0e4945de8aac/anyio-4.12.1.tar.gz", hash = "sha256:41cfcc3a4c85d3f05c932da7c26d0201ac36f72abd4435ba90d0464a3ffed703", size = 228685, upload-time = "2026-01-06T11:45:21.246Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/38/0e/27be9fdef66e72d64c0cdc3cc2823101b80585f8119b5c112c2e8f5f7dab/anyio-4.12.1-py3-none-any.whl", hash = "sha256:d405828884fc140aa80a3c667b8beed277f1dfedec42ba031bd6ac3db606ab6c", size = 113592, upload-time = "2026-01-06T11:45:19.497Z" },
]
[[package]]
name = "certifi"
version = "2026.2.25"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" },
]
[[package]]
name = "click"
version = "8.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/3d/fa/656b739db8587d7b5dfa22e22ed02566950fbfbcdc20311993483657a5c0/click-8.3.1.tar.gz", hash = "sha256:12ff4785d337a1bb490bb7e9c2b1ee5da3112e94a8622f26a6c77f5d2fc6842a", size = 295065, upload-time = "2025-11-15T20:45:42.706Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/98/78/01c019cdb5d6498122777c1a43056ebb3ebfeef2076d9d026bfe15583b2b/click-8.3.1-py3-none-any.whl", hash = "sha256:981153a64e25f12d547d3426c367a4857371575ee7ad18df2a6183ab0545b2a6", size = 108274, upload-time = "2025-11-15T20:45:41.139Z" },
]
[[package]]
name = "colorama"
version = "0.4.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/d8/53/6f443c9a4a8358a93a6792e2acffb9d9d5cb0a5cfd8802644b7b1c9a02e4/colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44", size = 27697, upload-time = "2022-10-25T02:36:22.414Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
]
[[package]]
name = "distro"
version = "1.9.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722, upload-time = "2023-12-24T09:54:32.31Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277, upload-time = "2023-12-24T09:54:30.421Z" },
]
[[package]]
name = "fastapi"
version = "0.133.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-doc" },
{ name = "pydantic" },
{ name = "starlette" },
{ name = "typing-extensions" },
{ name = "typing-inspection" },
]
sdist = { url = "https://files.pythonhosted.org/packages/22/6f/0eafed8349eea1fa462238b54a624c8b408cd1ba2795c8e64aa6c34f8ab7/fastapi-0.133.1.tar.gz", hash = "sha256:ed152a45912f102592976fde6cbce7dae1a8a1053da94202e51dd35d184fadd6", size = 378741, upload-time = "2026-02-25T18:18:17.398Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/d2/c9/a175a7779f3599dfa4adfc97a6ce0e157237b3d7941538604aadaf97bfb6/fastapi-0.133.1-py3-none-any.whl", hash = "sha256:658f34ba334605b1617a65adf2ea6461901bdb9af3a3080d63ff791ecf7dc2e2", size = 109029, upload-time = "2026-02-25T18:18:18.578Z" },
]
[[package]]
name = "h11"
version = "0.16.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250, upload-time = "2025-04-24T03:35:25.427Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
]
[[package]]
name = "httpcore"
version = "1.0.9"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "certifi" },
{ name = "h11" },
]
sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484, upload-time = "2025-04-24T22:06:22.219Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
]
[[package]]
name = "httptools"
version = "0.7.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/b5/46/120a669232c7bdedb9d52d4aeae7e6c7dfe151e99dc70802e2fc7a5e1993/httptools-0.7.1.tar.gz", hash = "sha256:abd72556974f8e7c74a259655924a717a2365b236c882c3f6f8a45fe94703ac9", size = 258961, upload-time = "2025-10-10T03:55:08.559Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/34/50/9d095fcbb6de2d523e027a2f304d4551855c2f46e0b82befd718b8b20056/httptools-0.7.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:c08fe65728b8d70b6923ce31e3956f859d5e1e8548e6f22ec520a962c6757270", size = 203619, upload-time = "2025-10-10T03:54:54.321Z" },
{ url = "https://files.pythonhosted.org/packages/07/f0/89720dc5139ae54b03f861b5e2c55a37dba9a5da7d51e1e824a1f343627f/httptools-0.7.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7aea2e3c3953521c3c51106ee11487a910d45586e351202474d45472db7d72d3", size = 108714, upload-time = "2025-10-10T03:54:55.163Z" },
{ url = "https://files.pythonhosted.org/packages/b3/cb/eea88506f191fb552c11787c23f9a405f4c7b0c5799bf73f2249cd4f5228/httptools-0.7.1-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0e68b8582f4ea9166be62926077a3334064d422cf08ab87d8b74664f8e9058e1", size = 472909, upload-time = "2025-10-10T03:54:56.056Z" },
{ url = "https://files.pythonhosted.org/packages/e0/4a/a548bdfae6369c0d078bab5769f7b66f17f1bfaa6fa28f81d6be6959066b/httptools-0.7.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:df091cf961a3be783d6aebae963cc9b71e00d57fa6f149025075217bc6a55a7b", size = 470831, upload-time = "2025-10-10T03:54:57.219Z" },
{ url = "https://files.pythonhosted.org/packages/4d/31/14df99e1c43bd132eec921c2e7e11cda7852f65619bc0fc5bdc2d0cb126c/httptools-0.7.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f084813239e1eb403ddacd06a30de3d3e09a9b76e7894dcda2b22f8a726e9c60", size = 452631, upload-time = "2025-10-10T03:54:58.219Z" },
{ url = "https://files.pythonhosted.org/packages/22/d2/b7e131f7be8d854d48cb6d048113c30f9a46dca0c9a8b08fcb3fcd588cdc/httptools-0.7.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:7347714368fb2b335e9063bc2b96f2f87a9ceffcd9758ac295f8bbcd3ffbc0ca", size = 452910, upload-time = "2025-10-10T03:54:59.366Z" },
{ url = "https://files.pythonhosted.org/packages/53/cf/878f3b91e4e6e011eff6d1fa9ca39f7eb17d19c9d7971b04873734112f30/httptools-0.7.1-cp314-cp314-win_amd64.whl", hash = "sha256:cfabda2a5bb85aa2a904ce06d974a3f30fb36cc63d7feaddec05d2050acede96", size = 88205, upload-time = "2025-10-10T03:55:00.389Z" },
]
[[package]]
name = "httpx"
version = "0.28.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "certifi" },
{ name = "httpcore" },
{ name = "idna" },
]
sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406, upload-time = "2024-12-06T15:37:23.222Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517, upload-time = "2024-12-06T15:37:21.509Z" },
]
[[package]]
name = "idna"
version = "3.11"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/6f/6d/0703ccc57f3a7233505399edb88de3cbd678da106337b9fcde432b65ed60/idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902", size = 194582, upload-time = "2025-10-12T14:55:20.501Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0e/61/66938bbb5fc52dbdf84594873d5b51fb1f7c7794e9c0f5bd885f30bc507b/idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea", size = 71008, upload-time = "2025-10-12T14:55:18.883Z" },
]
[[package]]
name = "iniconfig"
version = "2.3.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/72/34/14ca021ce8e5dfedc35312d08ba8bf51fdd999c576889fc2c24cb97f4f10/iniconfig-2.3.0.tar.gz", hash = "sha256:c76315c77db068650d49c5b56314774a7804df16fee4402c1f19d6d15d8c4730", size = 20503, upload-time = "2025-10-18T21:55:43.219Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/cb/b1/3846dd7f199d53cb17f49cba7e651e9ce294d8497c8c150530ed11865bb8/iniconfig-2.3.0-py3-none-any.whl", hash = "sha256:f631c04d2c48c52b84d0d0549c99ff3859c98df65b3101406327ecc7d53fbf12", size = 7484, upload-time = "2025-10-18T21:55:41.639Z" },
]
[[package]]
name = "jiter"
version = "0.13.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/0d/5e/4ec91646aee381d01cdb9974e30882c9cd3b8c5d1079d6b5ff4af522439a/jiter-0.13.0.tar.gz", hash = "sha256:f2839f9c2c7e2dffc1bc5929a510e14ce0a946be9365fd1219e7ef342dae14f4", size = 164847, upload-time = "2026-02-02T12:37:56.441Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6e/f5/f1997e987211f6f9bd71b8083047b316208b4aca0b529bb5f8c96c89ef3e/jiter-0.13.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:cc5223ab19fe25e2f0bf2643204ad7318896fe3729bf12fde41b77bfc4fafff0", size = 308804, upload-time = "2026-02-02T12:36:43.496Z" },
{ url = "https://files.pythonhosted.org/packages/cd/8f/5482a7677731fd44881f0204981ce2d7175db271f82cba2085dd2212e095/jiter-0.13.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:9776ebe51713acf438fd9b4405fcd86893ae5d03487546dae7f34993217f8a91", size = 318787, upload-time = "2026-02-02T12:36:45.071Z" },
{ url = "https://files.pythonhosted.org/packages/f3/b9/7257ac59778f1cd025b26a23c5520a36a424f7f1b068f2442a5b499b7464/jiter-0.13.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879e768938e7b49b5e90b7e3fecc0dbec01b8cb89595861fb39a8967c5220d09", size = 353880, upload-time = "2026-02-02T12:36:47.365Z" },
{ url = "https://files.pythonhosted.org/packages/c3/87/719eec4a3f0841dad99e3d3604ee4cba36af4419a76f3cb0b8e2e691ad67/jiter-0.13.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:682161a67adea11e3aae9038c06c8b4a9a71023228767477d683f69903ebc607", size = 366702, upload-time = "2026-02-02T12:36:48.871Z" },
{ url = "https://files.pythonhosted.org/packages/d2/65/415f0a75cf6921e43365a1bc227c565cb949caca8b7532776e430cbaa530/jiter-0.13.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a13b68cd1cd8cc9de8f244ebae18ccb3e4067ad205220ef324c39181e23bbf66", size = 486319, upload-time = "2026-02-02T12:36:53.006Z" },
{ url = "https://files.pythonhosted.org/packages/54/a2/9e12b48e82c6bbc6081fd81abf915e1443add1b13d8fc586e1d90bb02bb8/jiter-0.13.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87ce0f14c6c08892b610686ae8be350bf368467b6acd5085a5b65441e2bf36d2", size = 372289, upload-time = "2026-02-02T12:36:54.593Z" },
{ url = "https://files.pythonhosted.org/packages/4e/c1/e4693f107a1789a239c759a432e9afc592366f04e901470c2af89cfd28e1/jiter-0.13.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c365005b05505a90d1c47856420980d0237adf82f70c4aff7aebd3c1cc143ad", size = 360165, upload-time = "2026-02-02T12:36:56.112Z" },
{ url = "https://files.pythonhosted.org/packages/17/08/91b9ea976c1c758240614bd88442681a87672eebc3d9a6dde476874e706b/jiter-0.13.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1317fdffd16f5873e46ce27d0e0f7f4f90f0cdf1d86bf6abeaea9f63ca2c401d", size = 389634, upload-time = "2026-02-02T12:36:57.495Z" },
{ url = "https://files.pythonhosted.org/packages/18/23/58325ef99390d6d40427ed6005bf1ad54f2577866594bcf13ce55675f87d/jiter-0.13.0-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:c05b450d37ba0c9e21c77fef1f205f56bcee2330bddca68d344baebfc55ae0df", size = 514933, upload-time = "2026-02-02T12:36:58.909Z" },
{ url = "https://files.pythonhosted.org/packages/5b/25/69f1120c7c395fd276c3996bb8adefa9c6b84c12bb7111e5c6ccdcd8526d/jiter-0.13.0-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:775e10de3849d0631a97c603f996f518159272db00fdda0a780f81752255ee9d", size = 548842, upload-time = "2026-02-02T12:37:00.433Z" },
{ url = "https://files.pythonhosted.org/packages/18/05/981c9669d86850c5fbb0d9e62bba144787f9fba84546ba43d624ee27ef29/jiter-0.13.0-cp314-cp314-win32.whl", hash = "sha256:632bf7c1d28421c00dd8bbb8a3bac5663e1f57d5cd5ed962bce3c73bf62608e6", size = 202108, upload-time = "2026-02-02T12:37:01.718Z" },
{ url = "https://files.pythonhosted.org/packages/8d/96/cdcf54dd0b0341db7d25413229888a346c7130bd20820530905fdb65727b/jiter-0.13.0-cp314-cp314-win_amd64.whl", hash = "sha256:f22ef501c3f87ede88f23f9b11e608581c14f04db59b6a801f354397ae13739f", size = 204027, upload-time = "2026-02-02T12:37:03.075Z" },
{ url = "https://files.pythonhosted.org/packages/fb/f9/724bcaaab7a3cd727031fe4f6995cb86c4bd344909177c186699c8dec51a/jiter-0.13.0-cp314-cp314-win_arm64.whl", hash = "sha256:07b75fe09a4ee8e0c606200622e571e44943f47254f95e2436c8bdcaceb36d7d", size = 187199, upload-time = "2026-02-02T12:37:04.414Z" },
{ url = "https://files.pythonhosted.org/packages/62/92/1661d8b9fd6a3d7a2d89831db26fe3c1509a287d83ad7838831c7b7a5c7e/jiter-0.13.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:964538479359059a35fb400e769295d4b315ae61e4105396d355a12f7fef09f0", size = 318423, upload-time = "2026-02-02T12:37:05.806Z" },
{ url = "https://files.pythonhosted.org/packages/4f/3b/f77d342a54d4ebcd128e520fc58ec2f5b30a423b0fd26acdfc0c6fef8e26/jiter-0.13.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e104da1db1c0991b3eaed391ccd650ae8d947eab1480c733e5a3fb28d4313e40", size = 351438, upload-time = "2026-02-02T12:37:07.189Z" },
{ url = "https://files.pythonhosted.org/packages/76/b3/ba9a69f0e4209bd3331470c723c2f5509e6f0482e416b612431a5061ed71/jiter-0.13.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0e3a5f0cde8ff433b8e88e41aa40131455420fb3649a3c7abdda6145f8cb7202", size = 364774, upload-time = "2026-02-02T12:37:08.579Z" },
{ url = "https://files.pythonhosted.org/packages/b3/16/6cdb31fa342932602458dbb631bfbd47f601e03d2e4950740e0b2100b570/jiter-0.13.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:57aab48f40be1db920a582b30b116fe2435d184f77f0e4226f546794cedd9cf0", size = 487238, upload-time = "2026-02-02T12:37:10.066Z" },
{ url = "https://files.pythonhosted.org/packages/ed/b1/956cc7abaca8d95c13aa8d6c9b3f3797241c246cd6e792934cc4c8b250d2/jiter-0.13.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7772115877c53f62beeb8fd853cab692dbc04374ef623b30f997959a4c0e7e95", size = 372892, upload-time = "2026-02-02T12:37:11.656Z" },
{ url = "https://files.pythonhosted.org/packages/26/c4/97ecde8b1e74f67b8598c57c6fccf6df86ea7861ed29da84629cdbba76c4/jiter-0.13.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1211427574b17b633cfceba5040de8081e5abf114f7a7602f73d2e16f9fdaa59", size = 360309, upload-time = "2026-02-02T12:37:13.244Z" },
{ url = "https://files.pythonhosted.org/packages/4b/d7/eabe3cf46715854ccc80be2cd78dd4c36aedeb30751dbf85a1d08c14373c/jiter-0.13.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7beae3a3d3b5212d3a55d2961db3c292e02e302feb43fce6a3f7a31b90ea6dfe", size = 389607, upload-time = "2026-02-02T12:37:14.881Z" },
{ url = "https://files.pythonhosted.org/packages/df/2d/03963fc0804e6109b82decfb9974eb92df3797fe7222428cae12f8ccaa0c/jiter-0.13.0-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:e5562a0f0e90a6223b704163ea28e831bd3a9faa3512a711f031611e6b06c939", size = 514986, upload-time = "2026-02-02T12:37:16.326Z" },
{ url = "https://files.pythonhosted.org/packages/f6/6c/8c83b45eb3eb1c1e18d841fe30b4b5bc5619d781267ca9bc03e005d8fd0a/jiter-0.13.0-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:6c26a424569a59140fb51160a56df13f438a2b0967365e987889186d5fc2f6f9", size = 548756, upload-time = "2026-02-02T12:37:17.736Z" },
{ url = "https://files.pythonhosted.org/packages/47/66/eea81dfff765ed66c68fd2ed8c96245109e13c896c2a5015c7839c92367e/jiter-0.13.0-cp314-cp314t-win32.whl", hash = "sha256:24dc96eca9f84da4131cdf87a95e6ce36765c3b156fc9ae33280873b1c32d5f6", size = 201196, upload-time = "2026-02-02T12:37:19.101Z" },
{ url = "https://files.pythonhosted.org/packages/ff/32/4ac9c7a76402f8f00d00842a7f6b83b284d0cf7c1e9d4227bc95aa6d17fa/jiter-0.13.0-cp314-cp314t-win_amd64.whl", hash = "sha256:0a8d76c7524087272c8ae913f5d9d608bd839154b62c4322ef65723d2e5bb0b8", size = 204215, upload-time = "2026-02-02T12:37:20.495Z" },
{ url = "https://files.pythonhosted.org/packages/f9/8e/7def204fea9f9be8b3c21a6f2dd6c020cf56c7d5ff753e0e23ed7f9ea57e/jiter-0.13.0-cp314-cp314t-win_arm64.whl", hash = "sha256:2c26cf47e2cad140fa23b6d58d435a7c0161f5c514284802f25e87fddfe11024", size = 187152, upload-time = "2026-02-02T12:37:22.124Z" },
]
[[package]]
name = "librt"
version = "0.8.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/56/9c/b4b0c54d84da4a94b37bd44151e46d5e583c9534c7e02250b961b1b6d8a8/librt-0.8.1.tar.gz", hash = "sha256:be46a14693955b3bd96014ccbdb8339ee8c9346fbe11c1b78901b55125f14c73", size = 177471, upload-time = "2026-02-17T16:13:06.101Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c9/6a/907ef6800f7bca71b525a05f1839b21f708c09043b1c6aa77b6b827b3996/librt-0.8.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6cfa7fe54fd4d1f47130017351a959fe5804bda7a0bc7e07a2cdbc3fdd28d34f", size = 66081, upload-time = "2026-02-17T16:12:12.766Z" },
{ url = "https://files.pythonhosted.org/packages/1b/18/25e991cd5640c9fb0f8d91b18797b29066b792f17bf8493da183bf5caabe/librt-0.8.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:228c2409c079f8c11fb2e5d7b277077f694cb93443eb760e00b3b83cb8b3176c", size = 68309, upload-time = "2026-02-17T16:12:13.756Z" },
{ url = "https://files.pythonhosted.org/packages/a4/36/46820d03f058cfb5a9de5940640ba03165ed8aded69e0733c417bb04df34/librt-0.8.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7aae78ab5e3206181780e56912d1b9bb9f90a7249ce12f0e8bf531d0462dd0fc", size = 196804, upload-time = "2026-02-17T16:12:14.818Z" },
{ url = "https://files.pythonhosted.org/packages/59/18/5dd0d3b87b8ff9c061849fbdb347758d1f724b9a82241aa908e0ec54ccd0/librt-0.8.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:172d57ec04346b047ca6af181e1ea4858086c80bdf455f61994c4aa6fc3f866c", size = 206907, upload-time = "2026-02-17T16:12:16.513Z" },
{ url = "https://files.pythonhosted.org/packages/d1/96/ef04902aad1424fd7299b62d1890e803e6ab4018c3044dca5922319c4b97/librt-0.8.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b1977c4ea97ce5eb7755a78fae68d87e4102e4aaf54985e8b56806849cc06a3", size = 221217, upload-time = "2026-02-17T16:12:17.906Z" },
{ url = "https://files.pythonhosted.org/packages/6d/ff/7e01f2dda84a8f5d280637a2e5827210a8acca9a567a54507ef1c75b342d/librt-0.8.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:10c42e1f6fd06733ef65ae7bebce2872bcafd8d6e6b0a08fe0a05a23b044fb14", size = 214622, upload-time = "2026-02-17T16:12:19.108Z" },
{ url = "https://files.pythonhosted.org/packages/1e/8c/5b093d08a13946034fed57619742f790faf77058558b14ca36a6e331161e/librt-0.8.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4c8dfa264b9193c4ee19113c985c95f876fae5e51f731494fc4e0cf594990ba7", size = 221987, upload-time = "2026-02-17T16:12:20.331Z" },
{ url = "https://files.pythonhosted.org/packages/d3/cc/86b0b3b151d40920ad45a94ce0171dec1aebba8a9d72bb3fa00c73ab25dd/librt-0.8.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:01170b6729a438f0dedc4a26ed342e3dc4f02d1000b4b19f980e1877f0c297e6", size = 215132, upload-time = "2026-02-17T16:12:21.54Z" },
{ url = "https://files.pythonhosted.org/packages/fc/be/8588164a46edf1e69858d952654e216a9a91174688eeefb9efbb38a9c799/librt-0.8.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:7b02679a0d783bdae30d443025b94465d8c3dc512f32f5b5031f93f57ac32071", size = 215195, upload-time = "2026-02-17T16:12:23.073Z" },
{ url = "https://files.pythonhosted.org/packages/f5/f2/0b9279bea735c734d69344ecfe056c1ba211694a72df10f568745c899c76/librt-0.8.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:190b109bb69592a3401fe1ffdea41a2e73370ace2ffdc4a0e8e2b39cdea81b78", size = 237946, upload-time = "2026-02-17T16:12:24.275Z" },
{ url = "https://files.pythonhosted.org/packages/e9/cc/5f2a34fbc8aeb35314a3641f9956fa9051a947424652fad9882be7a97949/librt-0.8.1-cp314-cp314-win32.whl", hash = "sha256:e70a57ecf89a0f64c24e37f38d3fe217a58169d2fe6ed6d70554964042474023", size = 50689, upload-time = "2026-02-17T16:12:25.766Z" },
{ url = "https://files.pythonhosted.org/packages/a0/76/cd4d010ab2147339ca2b93e959c3686e964edc6de66ddacc935c325883d7/librt-0.8.1-cp314-cp314-win_amd64.whl", hash = "sha256:7e2f3edca35664499fbb36e4770650c4bd4a08abc1f4458eab9df4ec56389730", size = 57875, upload-time = "2026-02-17T16:12:27.465Z" },
{ url = "https://files.pythonhosted.org/packages/84/0f/2143cb3c3ca48bd3379dcd11817163ca50781927c4537345d608b5045998/librt-0.8.1-cp314-cp314-win_arm64.whl", hash = "sha256:0d2f82168e55ddefd27c01c654ce52379c0750ddc31ee86b4b266bcf4d65f2a3", size = 48058, upload-time = "2026-02-17T16:12:28.556Z" },
{ url = "https://files.pythonhosted.org/packages/d2/0e/9b23a87e37baf00311c3efe6b48d6b6c168c29902dfc3f04c338372fd7db/librt-0.8.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c74a2da57a094bd48d03fa5d196da83d2815678385d2978657499063709abe1", size = 68313, upload-time = "2026-02-17T16:12:29.659Z" },
{ url = "https://files.pythonhosted.org/packages/db/9a/859c41e5a4f1c84200a7d2b92f586aa27133c8243b6cac9926f6e54d01b9/librt-0.8.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a355d99c4c0d8e5b770313b8b247411ed40949ca44e33e46a4789b9293a907ee", size = 70994, upload-time = "2026-02-17T16:12:31.516Z" },
{ url = "https://files.pythonhosted.org/packages/4c/28/10605366ee599ed34223ac2bf66404c6fb59399f47108215d16d5ad751a8/librt-0.8.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2eb345e8b33fb748227409c9f1233d4df354d6e54091f0e8fc53acdb2ffedeb7", size = 220770, upload-time = "2026-02-17T16:12:33.294Z" },
{ url = "https://files.pythonhosted.org/packages/af/8d/16ed8fd452dafae9c48d17a6bc1ee3e818fd40ef718d149a8eff2c9f4ea2/librt-0.8.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9be2f15e53ce4e83cc08adc29b26fb5978db62ef2a366fbdf716c8a6c8901040", size = 235409, upload-time = "2026-02-17T16:12:35.443Z" },
{ url = "https://files.pythonhosted.org/packages/89/1b/7bdf3e49349c134b25db816e4a3db6b94a47ac69d7d46b1e682c2c4949be/librt-0.8.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:785ae29c1f5c6e7c2cde2c7c0e148147f4503da3abc5d44d482068da5322fd9e", size = 246473, upload-time = "2026-02-17T16:12:36.656Z" },
{ url = "https://files.pythonhosted.org/packages/4e/8a/91fab8e4fd2a24930a17188c7af5380eb27b203d72101c9cc000dbdfd95a/librt-0.8.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d3a7da44baf692f0c6aeb5b2a09c5e6fc7a703bca9ffa337ddd2e2da53f7732", size = 238866, upload-time = "2026-02-17T16:12:37.849Z" },
{ url = "https://files.pythonhosted.org/packages/b9/e0/c45a098843fc7c07e18a7f8a24ca8496aecbf7bdcd54980c6ca1aaa79a8e/librt-0.8.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fc48998000cbc39ec0d5311312dda93ecf92b39aaf184c5e817d5d440b29624", size = 250248, upload-time = "2026-02-17T16:12:39.445Z" },
{ url = "https://files.pythonhosted.org/packages/82/30/07627de23036640c952cce0c1fe78972e77d7d2f8fd54fa5ef4554ff4a56/librt-0.8.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:e96baa6820280077a78244b2e06e416480ed859bbd8e5d641cf5742919d8beb4", size = 240629, upload-time = "2026-02-17T16:12:40.889Z" },
{ url = "https://files.pythonhosted.org/packages/fb/c1/55bfe1ee3542eba055616f9098eaf6eddb966efb0ca0f44eaa4aba327307/librt-0.8.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:31362dbfe297b23590530007062c32c6f6176f6099646bb2c95ab1b00a57c382", size = 239615, upload-time = "2026-02-17T16:12:42.446Z" },
{ url = "https://files.pythonhosted.org/packages/2b/39/191d3d28abc26c9099b19852e6c99f7f6d400b82fa5a4e80291bd3803e19/librt-0.8.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cc3656283d11540ab0ea01978378e73e10002145117055e03722417aeab30994", size = 263001, upload-time = "2026-02-17T16:12:43.627Z" },
{ url = "https://files.pythonhosted.org/packages/b9/eb/7697f60fbe7042ab4e88f4ee6af496b7f222fffb0a4e3593ef1f29f81652/librt-0.8.1-cp314-cp314t-win32.whl", hash = "sha256:738f08021b3142c2918c03692608baed43bc51144c29e35807682f8070ee2a3a", size = 51328, upload-time = "2026-02-17T16:12:45.148Z" },
{ url = "https://files.pythonhosted.org/packages/7c/72/34bf2eb7a15414a23e5e70ecb9440c1d3179f393d9349338a91e2781c0fb/librt-0.8.1-cp314-cp314t-win_amd64.whl", hash = "sha256:89815a22daf9c51884fb5dbe4f1ef65ee6a146e0b6a8df05f753e2e4a9359bf4", size = 58722, upload-time = "2026-02-17T16:12:46.85Z" },
{ url = "https://files.pythonhosted.org/packages/b2/c8/d148e041732d631fc76036f8b30fae4e77b027a1e95b7a84bb522481a940/librt-0.8.1-cp314-cp314t-win_arm64.whl", hash = "sha256:bf512a71a23504ed08103a13c941f763db13fb11177beb3d9244c98c29fb4a61", size = 48755, upload-time = "2026-02-17T16:12:47.943Z" },
]
[[package]]
name = "mypy"
version = "1.19.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "librt", marker = "platform_python_implementation != 'PyPy'" },
{ name = "mypy-extensions" },
{ name = "pathspec" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/f5/db/4efed9504bc01309ab9c2da7e352cc223569f05478012b5d9ece38fd44d2/mypy-1.19.1.tar.gz", hash = "sha256:19d88bb05303fe63f71dd2c6270daca27cb9401c4ca8255fe50d1d920e0eb9ba", size = 3582404, upload-time = "2025-12-15T05:03:48.42Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/de/eb/b83e75f4c820c4247a58580ef86fcd35165028f191e7e1ba57128c52782d/mypy-1.19.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:06e6170bd5836770e8104c8fdd58e5e725cfeb309f0a6c681a811f557e97eac1", size = 13199744, upload-time = "2025-12-15T05:03:30.823Z" },
{ url = "https://files.pythonhosted.org/packages/94/28/52785ab7bfa165f87fcbb61547a93f98bb20e7f82f90f165a1f69bce7b3d/mypy-1.19.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:804bd67b8054a85447c8954215a906d6eff9cabeabe493fb6334b24f4bfff718", size = 12215815, upload-time = "2025-12-15T05:02:42.323Z" },
{ url = "https://files.pythonhosted.org/packages/0a/c6/bdd60774a0dbfb05122e3e925f2e9e846c009e479dcec4821dad881f5b52/mypy-1.19.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:21761006a7f497cb0d4de3d8ef4ca70532256688b0523eee02baf9eec895e27b", size = 12740047, upload-time = "2025-12-15T05:03:33.168Z" },
{ url = "https://files.pythonhosted.org/packages/32/2a/66ba933fe6c76bd40d1fe916a83f04fed253152f451a877520b3c4a5e41e/mypy-1.19.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:28902ee51f12e0f19e1e16fbe2f8f06b6637f482c459dd393efddd0ec7f82045", size = 13601998, upload-time = "2025-12-15T05:03:13.056Z" },
{ url = "https://files.pythonhosted.org/packages/e3/da/5055c63e377c5c2418760411fd6a63ee2b96cf95397259038756c042574f/mypy-1.19.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:481daf36a4c443332e2ae9c137dfee878fcea781a2e3f895d54bd3002a900957", size = 13807476, upload-time = "2025-12-15T05:03:17.977Z" },
{ url = "https://files.pythonhosted.org/packages/cd/09/4ebd873390a063176f06b0dbf1f7783dd87bd120eae7727fa4ae4179b685/mypy-1.19.1-cp314-cp314-win_amd64.whl", hash = "sha256:8bb5c6f6d043655e055be9b542aa5f3bdd30e4f3589163e85f93f3640060509f", size = 10281872, upload-time = "2025-12-15T05:03:05.549Z" },
{ url = "https://files.pythonhosted.org/packages/8d/f4/4ce9a05ce5ded1de3ec1c1d96cf9f9504a04e54ce0ed55cfa38619a32b8d/mypy-1.19.1-py3-none-any.whl", hash = "sha256:f1235f5ea01b7db5468d53ece6aaddf1ad0b88d9e7462b86ef96fe04995d7247", size = 2471239, upload-time = "2025-12-15T05:03:07.248Z" },
]
[[package]]
name = "mypy-extensions"
version = "1.1.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a2/6e/371856a3fb9d31ca8dac321cda606860fa4548858c0cc45d9d1d4ca2628b/mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558", size = 6343, upload-time = "2025-04-22T14:54:24.164Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/79/7b/2c79738432f5c924bef5071f933bcc9efd0473bac3b4aa584a6f7c1c8df8/mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505", size = 4963, upload-time = "2025-04-22T14:54:22.983Z" },
]
[[package]]
name = "openai"
version = "2.24.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
{ name = "distro" },
{ name = "httpx" },
{ name = "jiter" },
{ name = "pydantic" },
{ name = "sniffio" },
{ name = "tqdm" },
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/55/13/17e87641b89b74552ed408a92b231283786523edddc95f3545809fab673c/openai-2.24.0.tar.gz", hash = "sha256:1e5769f540dbd01cb33bc4716a23e67b9d695161a734aff9c5f925e2bf99a673", size = 658717, upload-time = "2026-02-24T20:02:07.958Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c9/30/844dc675ee6902579b8eef01ed23917cc9319a1c9c0c14ec6e39340c96d0/openai-2.24.0-py3-none-any.whl", hash = "sha256:fed30480d7d6c884303287bde864980a4b137b60553ffbcf9ab4a233b7a73d94", size = 1120122, upload-time = "2026-02-24T20:02:05.669Z" },
]
[[package]]
name = "packaging"
version = "26.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" },
]
[[package]]
name = "pathspec"
version = "1.0.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" },
]
[[package]]
name = "pluggy"
version = "1.6.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f9/e2/3e91f31a7d2b083fe6ef3fa267035b518369d9511ffab804f839851d2779/pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3", size = 69412, upload-time = "2025-05-15T12:30:07.975Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/54/20/4d324d65cc6d9205fabedc306948156824eb9f0ee1633355a8f7ec5c66bf/pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746", size = 20538, upload-time = "2025-05-15T12:30:06.134Z" },
]
[[package]]
name = "pydantic"
version = "2.12.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "annotated-types" },
{ name = "pydantic-core" },
{ name = "typing-extensions" },
{ name = "typing-inspection" },
]
sdist = { url = "https://files.pythonhosted.org/packages/69/44/36f1a6e523abc58ae5f928898e4aca2e0ea509b5aa6f6f392a5d882be928/pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49", size = 821591, upload-time = "2025-11-26T15:11:46.471Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5a/87/b70ad306ebb6f9b585f114d0ac2137d792b48be34d732d60e597c2f8465a/pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d", size = 463580, upload-time = "2025-11-26T15:11:44.605Z" },
]
[[package]]
name = "pydantic-core"
version = "2.41.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/71/70/23b021c950c2addd24ec408e9ab05d59b035b39d97cdc1130e1bce647bb6/pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e", size = 460952, upload-time = "2025-11-04T13:43:49.098Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/ea/28/46b7c5c9635ae96ea0fbb779e271a38129df2550f763937659ee6c5dbc65/pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a", size = 2119622, upload-time = "2025-11-04T13:40:56.68Z" },
{ url = "https://files.pythonhosted.org/packages/74/1a/145646e5687e8d9a1e8d09acb278c8535ebe9e972e1f162ed338a622f193/pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14", size = 1891725, upload-time = "2025-11-04T13:40:58.807Z" },
{ url = "https://files.pythonhosted.org/packages/23/04/e89c29e267b8060b40dca97bfc64a19b2a3cf99018167ea1677d96368273/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1", size = 1915040, upload-time = "2025-11-04T13:41:00.853Z" },
{ url = "https://files.pythonhosted.org/packages/84/a3/15a82ac7bd97992a82257f777b3583d3e84bdb06ba6858f745daa2ec8a85/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66", size = 2063691, upload-time = "2025-11-04T13:41:03.504Z" },
{ url = "https://files.pythonhosted.org/packages/74/9b/0046701313c6ef08c0c1cf0e028c67c770a4e1275ca73131563c5f2a310a/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869", size = 2213897, upload-time = "2025-11-04T13:41:05.804Z" },
{ url = "https://files.pythonhosted.org/packages/8a/cd/6bac76ecd1b27e75a95ca3a9a559c643b3afcd2dd62086d4b7a32a18b169/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2", size = 2333302, upload-time = "2025-11-04T13:41:07.809Z" },
{ url = "https://files.pythonhosted.org/packages/4c/d2/ef2074dc020dd6e109611a8be4449b98cd25e1b9b8a303c2f0fca2f2bcf7/pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375", size = 2064877, upload-time = "2025-11-04T13:41:09.827Z" },
{ url = "https://files.pythonhosted.org/packages/18/66/e9db17a9a763d72f03de903883c057b2592c09509ccfe468187f2a2eef29/pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553", size = 2180680, upload-time = "2025-11-04T13:41:12.379Z" },
{ url = "https://files.pythonhosted.org/packages/d3/9e/3ce66cebb929f3ced22be85d4c2399b8e85b622db77dad36b73c5387f8f8/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90", size = 2138960, upload-time = "2025-11-04T13:41:14.627Z" },
{ url = "https://files.pythonhosted.org/packages/a6/62/205a998f4327d2079326b01abee48e502ea739d174f0a89295c481a2272e/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07", size = 2339102, upload-time = "2025-11-04T13:41:16.868Z" },
{ url = "https://files.pythonhosted.org/packages/3c/0d/f05e79471e889d74d3d88f5bd20d0ed189ad94c2423d81ff8d0000aab4ff/pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb", size = 2326039, upload-time = "2025-11-04T13:41:18.934Z" },
{ url = "https://files.pythonhosted.org/packages/ec/e1/e08a6208bb100da7e0c4b288eed624a703f4d129bde2da475721a80cab32/pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23", size = 1995126, upload-time = "2025-11-04T13:41:21.418Z" },
{ url = "https://files.pythonhosted.org/packages/48/5d/56ba7b24e9557f99c9237e29f5c09913c81eeb2f3217e40e922353668092/pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf", size = 2015489, upload-time = "2025-11-04T13:41:24.076Z" },
{ url = "https://files.pythonhosted.org/packages/4e/bb/f7a190991ec9e3e0ba22e4993d8755bbc4a32925c0b5b42775c03e8148f9/pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0", size = 1977288, upload-time = "2025-11-04T13:41:26.33Z" },
{ url = "https://files.pythonhosted.org/packages/92/ed/77542d0c51538e32e15afe7899d79efce4b81eee631d99850edc2f5e9349/pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a", size = 2120255, upload-time = "2025-11-04T13:41:28.569Z" },
{ url = "https://files.pythonhosted.org/packages/bb/3d/6913dde84d5be21e284439676168b28d8bbba5600d838b9dca99de0fad71/pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3", size = 1863760, upload-time = "2025-11-04T13:41:31.055Z" },
{ url = "https://files.pythonhosted.org/packages/5a/f0/e5e6b99d4191da102f2b0eb9687aaa7f5bea5d9964071a84effc3e40f997/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c", size = 1878092, upload-time = "2025-11-04T13:41:33.21Z" },
{ url = "https://files.pythonhosted.org/packages/71/48/36fb760642d568925953bcc8116455513d6e34c4beaa37544118c36aba6d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612", size = 2053385, upload-time = "2025-11-04T13:41:35.508Z" },
{ url = "https://files.pythonhosted.org/packages/20/25/92dc684dd8eb75a234bc1c764b4210cf2646479d54b47bf46061657292a8/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d", size = 2218832, upload-time = "2025-11-04T13:41:37.732Z" },
{ url = "https://files.pythonhosted.org/packages/e2/09/f53e0b05023d3e30357d82eb35835d0f6340ca344720a4599cd663dca599/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9", size = 2327585, upload-time = "2025-11-04T13:41:40Z" },
{ url = "https://files.pythonhosted.org/packages/aa/4e/2ae1aa85d6af35a39b236b1b1641de73f5a6ac4d5a7509f77b814885760c/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660", size = 2041078, upload-time = "2025-11-04T13:41:42.323Z" },
{ url = "https://files.pythonhosted.org/packages/cd/13/2e215f17f0ef326fc72afe94776edb77525142c693767fc347ed6288728d/pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9", size = 2173914, upload-time = "2025-11-04T13:41:45.221Z" },
{ url = "https://files.pythonhosted.org/packages/02/7a/f999a6dcbcd0e5660bc348a3991c8915ce6599f4f2c6ac22f01d7a10816c/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3", size = 2129560, upload-time = "2025-11-04T13:41:47.474Z" },
{ url = "https://files.pythonhosted.org/packages/3a/b1/6c990ac65e3b4c079a4fb9f5b05f5b013afa0f4ed6780a3dd236d2cbdc64/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf", size = 2329244, upload-time = "2025-11-04T13:41:49.992Z" },
{ url = "https://files.pythonhosted.org/packages/d9/02/3c562f3a51afd4d88fff8dffb1771b30cfdfd79befd9883ee094f5b6c0d8/pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470", size = 2331955, upload-time = "2025-11-04T13:41:54.079Z" },
{ url = "https://files.pythonhosted.org/packages/5c/96/5fb7d8c3c17bc8c62fdb031c47d77a1af698f1d7a406b0f79aaa1338f9ad/pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa", size = 1988906, upload-time = "2025-11-04T13:41:56.606Z" },
{ url = "https://files.pythonhosted.org/packages/22/ed/182129d83032702912c2e2d8bbe33c036f342cc735737064668585dac28f/pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c", size = 1981607, upload-time = "2025-11-04T13:41:58.889Z" },
{ url = "https://files.pythonhosted.org/packages/9f/ed/068e41660b832bb0b1aa5b58011dea2a3fe0ba7861ff38c4d4904c1c1a99/pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008", size = 1974769, upload-time = "2025-11-04T13:42:01.186Z" },
]
[[package]]
name = "pygments"
version = "2.19.2"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/b0/77/a5b8c569bf593b0140bde72ea885a803b82086995367bf2037de0159d924/pygments-2.19.2.tar.gz", hash = "sha256:636cb2477cec7f8952536970bc533bc43743542f70392ae026374600add5b887", size = 4968631, upload-time = "2025-06-21T13:39:12.283Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217, upload-time = "2025-06-21T13:39:07.939Z" },
]
[[package]]
name = "pytest"
version = "9.0.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "iniconfig" },
{ name = "packaging" },
{ name = "pluggy" },
{ name = "pygments" },
]
sdist = { url = "https://files.pythonhosted.org/packages/d1/db/7ef3487e0fb0049ddb5ce41d3a49c235bf9ad299b6a25d5780a89f19230f/pytest-9.0.2.tar.gz", hash = "sha256:75186651a92bd89611d1d9fc20f0b4345fd827c41ccd5c299a868a05d70edf11", size = 1568901, upload-time = "2025-12-06T21:30:51.014Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/3b/ab/b3226f0bd7cdcf710fbede2b3548584366da3b19b5021e74f5bde2a8fa3f/pytest-9.0.2-py3-none-any.whl", hash = "sha256:711ffd45bf766d5264d487b917733b453d917afd2b0ad65223959f59089f875b", size = 374801, upload-time = "2025-12-06T21:30:49.154Z" },
]
[[package]]
name = "python-dotenv"
version = "1.2.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/f0/26/19cadc79a718c5edbec86fd4919a6b6d3f681039a2f6d66d14be94e75fb9/python_dotenv-1.2.1.tar.gz", hash = "sha256:42667e897e16ab0d66954af0e60a9caa94f0fd4ecf3aaf6d2d260eec1aa36ad6", size = 44221, upload-time = "2025-10-26T15:12:10.434Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/14/1b/a298b06749107c305e1fe0f814c6c74aea7b2f1e10989cb30f544a1b3253/python_dotenv-1.2.1-py3-none-any.whl", hash = "sha256:b81ee9561e9ca4004139c6cbba3a238c32b03e4894671e181b671e8cb8425d61", size = 21230, upload-time = "2025-10-26T15:12:09.109Z" },
]
[[package]]
name = "pyyaml"
version = "6.0.3"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/05/8e/961c0007c59b8dd7729d542c61a4d537767a59645b82a0b521206e1e25c2/pyyaml-6.0.3.tar.gz", hash = "sha256:d76623373421df22fb4cf8817020cbb7ef15c725b9d5e45f17e189bfc384190f", size = 130960, upload-time = "2025-09-25T21:33:16.546Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9d/8c/f4bd7f6465179953d3ac9bc44ac1a8a3e6122cf8ada906b4f96c60172d43/pyyaml-6.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:8d1fab6bb153a416f9aeb4b8763bc0f22a5586065f86f7664fc23339fc1c1fac", size = 181814, upload-time = "2025-09-25T21:32:35.712Z" },
{ url = "https://files.pythonhosted.org/packages/bd/9c/4d95bb87eb2063d20db7b60faa3840c1b18025517ae857371c4dd55a6b3a/pyyaml-6.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:34d5fcd24b8445fadc33f9cf348c1047101756fd760b4dacb5c3e99755703310", size = 173809, upload-time = "2025-09-25T21:32:36.789Z" },
{ url = "https://files.pythonhosted.org/packages/92/b5/47e807c2623074914e29dabd16cbbdd4bf5e9b2db9f8090fa64411fc5382/pyyaml-6.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:501a031947e3a9025ed4405a168e6ef5ae3126c59f90ce0cd6f2bfc477be31b7", size = 766454, upload-time = "2025-09-25T21:32:37.966Z" },
{ url = "https://files.pythonhosted.org/packages/02/9e/e5e9b168be58564121efb3de6859c452fccde0ab093d8438905899a3a483/pyyaml-6.0.3-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:b3bc83488de33889877a0f2543ade9f70c67d66d9ebb4ac959502e12de895788", size = 836355, upload-time = "2025-09-25T21:32:39.178Z" },
{ url = "https://files.pythonhosted.org/packages/88/f9/16491d7ed2a919954993e48aa941b200f38040928474c9e85ea9e64222c3/pyyaml-6.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c458b6d084f9b935061bc36216e8a69a7e293a2f1e68bf956dcd9e6cbcd143f5", size = 794175, upload-time = "2025-09-25T21:32:40.865Z" },
{ url = "https://files.pythonhosted.org/packages/dd/3f/5989debef34dc6397317802b527dbbafb2b4760878a53d4166579111411e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:7c6610def4f163542a622a73fb39f534f8c101d690126992300bf3207eab9764", size = 755228, upload-time = "2025-09-25T21:32:42.084Z" },
{ url = "https://files.pythonhosted.org/packages/d7/ce/af88a49043cd2e265be63d083fc75b27b6ed062f5f9fd6cdc223ad62f03e/pyyaml-6.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5190d403f121660ce8d1d2c1bb2ef1bd05b5f68533fc5c2ea899bd15f4399b35", size = 789194, upload-time = "2025-09-25T21:32:43.362Z" },
{ url = "https://files.pythonhosted.org/packages/23/20/bb6982b26a40bb43951265ba29d4c246ef0ff59c9fdcdf0ed04e0687de4d/pyyaml-6.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:4a2e8cebe2ff6ab7d1050ecd59c25d4c8bd7e6f400f5f82b96557ac0abafd0ac", size = 156429, upload-time = "2025-09-25T21:32:57.844Z" },
{ url = "https://files.pythonhosted.org/packages/f4/f4/a4541072bb9422c8a883ab55255f918fa378ecf083f5b85e87fc2b4eda1b/pyyaml-6.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:93dda82c9c22deb0a405ea4dc5f2d0cda384168e466364dec6255b293923b2f3", size = 143912, upload-time = "2025-09-25T21:32:59.247Z" },
{ url = "https://files.pythonhosted.org/packages/7c/f9/07dd09ae774e4616edf6cda684ee78f97777bdd15847253637a6f052a62f/pyyaml-6.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:02893d100e99e03eda1c8fd5c441d8c60103fd175728e23e431db1b589cf5ab3", size = 189108, upload-time = "2025-09-25T21:32:44.377Z" },
{ url = "https://files.pythonhosted.org/packages/4e/78/8d08c9fb7ce09ad8c38ad533c1191cf27f7ae1effe5bb9400a46d9437fcf/pyyaml-6.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:c1ff362665ae507275af2853520967820d9124984e0f7466736aea23d8611fba", size = 183641, upload-time = "2025-09-25T21:32:45.407Z" },
{ url = "https://files.pythonhosted.org/packages/7b/5b/3babb19104a46945cf816d047db2788bcaf8c94527a805610b0289a01c6b/pyyaml-6.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6adc77889b628398debc7b65c073bcb99c4a0237b248cacaf3fe8a557563ef6c", size = 831901, upload-time = "2025-09-25T21:32:48.83Z" },
{ url = "https://files.pythonhosted.org/packages/8b/cc/dff0684d8dc44da4d22a13f35f073d558c268780ce3c6ba1b87055bb0b87/pyyaml-6.0.3-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a80cb027f6b349846a3bf6d73b5e95e782175e52f22108cfa17876aaeff93702", size = 861132, upload-time = "2025-09-25T21:32:50.149Z" },
{ url = "https://files.pythonhosted.org/packages/b1/5e/f77dc6b9036943e285ba76b49e118d9ea929885becb0a29ba8a7c75e29fe/pyyaml-6.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:00c4bdeba853cc34e7dd471f16b4114f4162dc03e6b7afcc2128711f0eca823c", size = 839261, upload-time = "2025-09-25T21:32:51.808Z" },
{ url = "https://files.pythonhosted.org/packages/ce/88/a9db1376aa2a228197c58b37302f284b5617f56a5d959fd1763fb1675ce6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e1674c3ef6f541c35191caae2d429b967b99e02040f5ba928632d9a7f0f065", size = 805272, upload-time = "2025-09-25T21:32:52.941Z" },
{ url = "https://files.pythonhosted.org/packages/da/92/1446574745d74df0c92e6aa4a7b0b3130706a4142b2d1a5869f2eaa423c6/pyyaml-6.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:16249ee61e95f858e83976573de0f5b2893b3677ba71c9dd36b9cf8be9ac6d65", size = 829923, upload-time = "2025-09-25T21:32:54.537Z" },
{ url = "https://files.pythonhosted.org/packages/f0/7a/1c7270340330e575b92f397352af856a8c06f230aa3e76f86b39d01b416a/pyyaml-6.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4ad1906908f2f5ae4e5a8ddfce73c320c2a1429ec52eafd27138b7f1cbe341c9", size = 174062, upload-time = "2025-09-25T21:32:55.767Z" },
{ url = "https://files.pythonhosted.org/packages/f1/12/de94a39c2ef588c7e6455cfbe7343d3b2dc9d6b6b2f40c4c6565744c873d/pyyaml-6.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:ebc55a14a21cb14062aa4162f906cd962b28e2e9ea38f9b4391244cd8de4ae0b", size = 149341, upload-time = "2025-09-25T21:32:56.828Z" },
]
[[package]]
name = "ruff"
version = "0.15.4"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/da/31/d6e536cdebb6568ae75a7f00e4b4819ae0ad2640c3604c305a0428680b0c/ruff-0.15.4.tar.gz", hash = "sha256:3412195319e42d634470cc97aa9803d07e9d5c9223b99bcb1518f0c725f26ae1", size = 4569550, upload-time = "2026-02-26T20:04:14.959Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f2/82/c11a03cfec3a4d26a0ea1e571f0f44be5993b923f905eeddfc397c13d360/ruff-0.15.4-py3-none-linux_armv6l.whl", hash = "sha256:a1810931c41606c686bae8b5b9a8072adac2f611bb433c0ba476acba17a332e0", size = 10453333, upload-time = "2026-02-26T20:04:20.093Z" },
{ url = "https://files.pythonhosted.org/packages/ce/5d/6a1f271f6e31dffb31855996493641edc3eef8077b883eaf007a2f1c2976/ruff-0.15.4-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:5a1632c66672b8b4d3e1d1782859e98d6e0b4e70829530666644286600a33992", size = 10853356, upload-time = "2026-02-26T20:04:05.808Z" },
{ url = "https://files.pythonhosted.org/packages/b1/d8/0fab9f8842b83b1a9c2bf81b85063f65e93fb512e60effa95b0be49bfc54/ruff-0.15.4-py3-none-macosx_11_0_arm64.whl", hash = "sha256:a4386ba2cd6c0f4ff75252845906acc7c7c8e1ac567b7bc3d373686ac8c222ba", size = 10187434, upload-time = "2026-02-26T20:03:54.656Z" },
{ url = "https://files.pythonhosted.org/packages/85/cc/cc220fd9394eff5db8d94dec199eec56dd6c9f3651d8869d024867a91030/ruff-0.15.4-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2496488bdfd3732747558b6f95ae427ff066d1fcd054daf75f5a50674411e75", size = 10535456, upload-time = "2026-02-26T20:03:52.738Z" },
{ url = "https://files.pythonhosted.org/packages/fa/0f/bced38fa5cf24373ec767713c8e4cadc90247f3863605fb030e597878661/ruff-0.15.4-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3f1c4893841ff2d54cbda1b2860fa3260173df5ddd7b95d370186f8a5e66a4ac", size = 10287772, upload-time = "2026-02-26T20:04:08.138Z" },
{ url = "https://files.pythonhosted.org/packages/2b/90/58a1802d84fed15f8f281925b21ab3cecd813bde52a8ca033a4de8ab0e7a/ruff-0.15.4-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:820b8766bd65503b6c30aaa6331e8ef3a6e564f7999c844e9a547c40179e440a", size = 11049051, upload-time = "2026-02-26T20:04:03.53Z" },
{ url = "https://files.pythonhosted.org/packages/d2/ac/b7ad36703c35f3866584564dc15f12f91cb1a26a897dc2fd13d7cb3ae1af/ruff-0.15.4-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c9fb74bab47139c1751f900f857fa503987253c3ef89129b24ed375e72873e85", size = 11890494, upload-time = "2026-02-26T20:04:10.497Z" },
{ url = "https://files.pythonhosted.org/packages/93/3d/3eb2f47a39a8b0da99faf9c54d3eb24720add1e886a5309d4d1be73a6380/ruff-0.15.4-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f80c98765949c518142b3a50a5db89343aa90f2c2bf7799de9986498ae6176db", size = 11326221, upload-time = "2026-02-26T20:04:12.84Z" },
{ url = "https://files.pythonhosted.org/packages/ff/90/bf134f4c1e5243e62690e09d63c55df948a74084c8ac3e48a88468314da6/ruff-0.15.4-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:451a2e224151729b3b6c9ffb36aed9091b2996fe4bdbd11f47e27d8f2e8888ec", size = 11168459, upload-time = "2026-02-26T20:04:00.969Z" },
{ url = "https://files.pythonhosted.org/packages/b5/e5/a64d27688789b06b5d55162aafc32059bb8c989c61a5139a36e1368285eb/ruff-0.15.4-py3-none-manylinux_2_31_riscv64.whl", hash = "sha256:a8f157f2e583c513c4f5f896163a93198297371f34c04220daf40d133fdd4f7f", size = 11104366, upload-time = "2026-02-26T20:03:48.099Z" },
{ url = "https://files.pythonhosted.org/packages/f1/f6/32d1dcb66a2559763fc3027bdd65836cad9eb09d90f2ed6a63d8e9252b02/ruff-0.15.4-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:917cc68503357021f541e69b35361c99387cdbbf99bd0ea4aa6f28ca99ff5338", size = 10510887, upload-time = "2026-02-26T20:03:45.771Z" },
{ url = "https://files.pythonhosted.org/packages/ff/92/22d1ced50971c5b6433aed166fcef8c9343f567a94cf2b9d9089f6aa80fe/ruff-0.15.4-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:e9737c8161da79fd7cfec19f1e35620375bd8b2a50c3e77fa3d2c16f574105cc", size = 10285939, upload-time = "2026-02-26T20:04:22.42Z" },
{ url = "https://files.pythonhosted.org/packages/e6/f4/7c20aec3143837641a02509a4668fb146a642fd1211846634edc17eb5563/ruff-0.15.4-py3-none-musllinux_1_2_i686.whl", hash = "sha256:291258c917539e18f6ba40482fe31d6f5ac023994ee11d7bdafd716f2aab8a68", size = 10765471, upload-time = "2026-02-26T20:03:58.924Z" },
{ url = "https://files.pythonhosted.org/packages/d0/09/6d2f7586f09a16120aebdff8f64d962d7c4348313c77ebb29c566cefc357/ruff-0.15.4-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:3f83c45911da6f2cd5936c436cf86b9f09f09165f033a99dcf7477e34041cbc3", size = 11263382, upload-time = "2026-02-26T20:04:24.424Z" },
{ url = "https://files.pythonhosted.org/packages/1b/fa/2ef715a1cd329ef47c1a050e10dee91a9054b7ce2fcfdd6a06d139afb7ec/ruff-0.15.4-py3-none-win32.whl", hash = "sha256:65594a2d557d4ee9f02834fcdf0a28daa8b3b9f6cb2cb93846025a36db47ef22", size = 10506664, upload-time = "2026-02-26T20:03:50.56Z" },
{ url = "https://files.pythonhosted.org/packages/d0/a8/c688ef7e29983976820d18710f955751d9f4d4eb69df658af3d006e2ba3e/ruff-0.15.4-py3-none-win_amd64.whl", hash = "sha256:04196ad44f0df220c2ece5b0e959c2f37c777375ec744397d21d15b50a75264f", size = 11651048, upload-time = "2026-02-26T20:04:17.191Z" },
{ url = "https://files.pythonhosted.org/packages/3e/0a/9e1be9035b37448ce2e68c978f0591da94389ade5a5abafa4cf99985d1b2/ruff-0.15.4-py3-none-win_arm64.whl", hash = "sha256:60d5177e8cfc70e51b9c5fad936c634872a74209f934c1e79107d11787ad5453", size = 10966776, upload-time = "2026-02-26T20:03:56.908Z" },
]
[[package]]
name = "sniffio"
version = "1.3.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372, upload-time = "2024-02-25T23:20:04.057Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235, upload-time = "2024-02-25T23:20:01.196Z" },
]
[[package]]
name = "starlette"
version = "0.52.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c4/68/79977123bb7be889ad680d79a40f339082c1978b5cfcf62c2d8d196873ac/starlette-0.52.1.tar.gz", hash = "sha256:834edd1b0a23167694292e94f597773bc3f89f362be6effee198165a35d62933", size = 2653702, upload-time = "2026-01-18T13:34:11.062Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/81/0d/13d1d239a25cbfb19e740db83143e95c772a1fe10202dda4b76792b114dd/starlette-0.52.1-py3-none-any.whl", hash = "sha256:0029d43eb3d273bc4f83a08720b4912ea4b071087a3b48db01b7c839f7954d74", size = 74272, upload-time = "2026-01-18T13:34:09.188Z" },
]
[[package]]
name = "tqdm"
version = "4.67.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" },
]
[[package]]
name = "types-pyyaml"
version = "6.0.12.20250915"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/7e/69/3c51b36d04da19b92f9e815be12753125bd8bc247ba0470a982e6979e71c/types_pyyaml-6.0.12.20250915.tar.gz", hash = "sha256:0f8b54a528c303f0e6f7165687dd33fafa81c807fcac23f632b63aa624ced1d3", size = 17522, upload-time = "2025-09-15T03:01:00.728Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/bd/e0/1eed384f02555dde685fff1a1ac805c1c7dcb6dd019c916fe659b1c1f9ec/types_pyyaml-6.0.12.20250915-py3-none-any.whl", hash = "sha256:e7d4d9e064e89a3b3cae120b4990cd370874d2bf12fa5f46c97018dd5d3c9ab6", size = 20338, upload-time = "2025-09-15T03:00:59.218Z" },
]
[[package]]
name = "typing-extensions"
version = "4.15.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/72/94/1a15dd82efb362ac84269196e94cf00f187f7ed21c242792a923cdb1c61f/typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466", size = 109391, upload-time = "2025-08-25T13:49:26.313Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/18/67/36e9267722cc04a6b9f15c7f3441c2363321a3ea07da7ae0c0707beb2a9c/typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548", size = 44614, upload-time = "2025-08-25T13:49:24.86Z" },
]
[[package]]
name = "typing-inspection"
version = "0.4.2"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/55/e3/70399cb7dd41c10ac53367ae42139cf4b1ca5f36bb3dc6c9d33acdb43655/typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464", size = 75949, upload-time = "2025-10-01T02:14:41.687Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/dc/9b/47798a6c91d8bdb567fe2698fe81e0c6b7cb7ef4d13da4114b41d239f65d/typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7", size = 14611, upload-time = "2025-10-01T02:14:40.154Z" },
]
[[package]]
name = "uvicorn"
version = "0.41.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "click" },
{ name = "h11" },
]
sdist = { url = "https://files.pythonhosted.org/packages/32/ce/eeb58ae4ac36fe09e3842eb02e0eb676bf2c53ae062b98f1b2531673efdd/uvicorn-0.41.0.tar.gz", hash = "sha256:09d11cf7008da33113824ee5a1c6422d89fbc2ff476540d69a34c87fab8b571a", size = 82633, upload-time = "2026-02-16T23:07:24.1Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/83/e4/d04a086285c20886c0daad0e026f250869201013d18f81d9ff5eada73a88/uvicorn-0.41.0-py3-none-any.whl", hash = "sha256:29e35b1d2c36a04b9e180d4007ede3bcb32a85fbdfd6c6aeb3f26839de088187", size = 68783, upload-time = "2026-02-16T23:07:22.357Z" },
]
[package.optional-dependencies]
standard = [
{ name = "colorama", marker = "sys_platform == 'win32'" },
{ name = "httptools" },
{ name = "python-dotenv" },
{ name = "pyyaml" },
{ name = "uvloop", marker = "platform_python_implementation != 'PyPy' and sys_platform != 'cygwin' and sys_platform != 'win32'" },
{ name = "watchfiles" },
{ name = "websockets" },
]
[[package]]
name = "uvloop"
version = "0.22.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/06/f0/18d39dbd1971d6d62c4629cc7fa67f74821b0dc1f5a77af43719de7936a7/uvloop-0.22.1.tar.gz", hash = "sha256:6c84bae345b9147082b17371e3dd5d42775bddce91f885499017f4607fdaf39f", size = 2443250, upload-time = "2025-10-16T22:17:19.342Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/90/cd/b62bdeaa429758aee8de8b00ac0dd26593a9de93d302bff3d21439e9791d/uvloop-0.22.1-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3879b88423ec7e97cd4eba2a443aa26ed4e59b45e6b76aabf13fe2f27023a142", size = 1362067, upload-time = "2025-10-16T22:16:44.503Z" },
{ url = "https://files.pythonhosted.org/packages/0d/f8/a132124dfda0777e489ca86732e85e69afcd1ff7686647000050ba670689/uvloop-0.22.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:4baa86acedf1d62115c1dc6ad1e17134476688f08c6efd8a2ab076e815665c74", size = 752423, upload-time = "2025-10-16T22:16:45.968Z" },
{ url = "https://files.pythonhosted.org/packages/a3/94/94af78c156f88da4b3a733773ad5ba0b164393e357cc4bd0ab2e2677a7d6/uvloop-0.22.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:297c27d8003520596236bdb2335e6b3f649480bd09e00d1e3a99144b691d2a35", size = 4272437, upload-time = "2025-10-16T22:16:47.451Z" },
{ url = "https://files.pythonhosted.org/packages/b5/35/60249e9fd07b32c665192cec7af29e06c7cd96fa1d08b84f012a56a0b38e/uvloop-0.22.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c1955d5a1dd43198244d47664a5858082a3239766a839b2102a269aaff7a4e25", size = 4292101, upload-time = "2025-10-16T22:16:49.318Z" },
{ url = "https://files.pythonhosted.org/packages/02/62/67d382dfcb25d0a98ce73c11ed1a6fba5037a1a1d533dcbb7cab033a2636/uvloop-0.22.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b31dc2fccbd42adc73bc4e7cdbae4fc5086cf378979e53ca5d0301838c5682c6", size = 4114158, upload-time = "2025-10-16T22:16:50.517Z" },
{ url = "https://files.pythonhosted.org/packages/f0/7a/f1171b4a882a5d13c8b7576f348acfe6074d72eaf52cccef752f748d4a9f/uvloop-0.22.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:93f617675b2d03af4e72a5333ef89450dfaa5321303ede6e67ba9c9d26878079", size = 4177360, upload-time = "2025-10-16T22:16:52.646Z" },
{ url = "https://files.pythonhosted.org/packages/79/7b/b01414f31546caf0919da80ad57cbfe24c56b151d12af68cee1b04922ca8/uvloop-0.22.1-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:37554f70528f60cad66945b885eb01f1bb514f132d92b6eeed1c90fd54ed6289", size = 1454790, upload-time = "2025-10-16T22:16:54.355Z" },
{ url = "https://files.pythonhosted.org/packages/d4/31/0bb232318dd838cad3fa8fb0c68c8b40e1145b32025581975e18b11fab40/uvloop-0.22.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:b76324e2dc033a0b2f435f33eb88ff9913c156ef78e153fb210e03c13da746b3", size = 796783, upload-time = "2025-10-16T22:16:55.906Z" },
{ url = "https://files.pythonhosted.org/packages/42/38/c9b09f3271a7a723a5de69f8e237ab8e7803183131bc57c890db0b6bb872/uvloop-0.22.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:badb4d8e58ee08dad957002027830d5c3b06aea446a6a3744483c2b3b745345c", size = 4647548, upload-time = "2025-10-16T22:16:57.008Z" },
{ url = "https://files.pythonhosted.org/packages/c1/37/945b4ca0ac27e3dc4952642d4c900edd030b3da6c9634875af6e13ae80e5/uvloop-0.22.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b91328c72635f6f9e0282e4a57da7470c7350ab1c9f48546c0f2866205349d21", size = 4467065, upload-time = "2025-10-16T22:16:58.206Z" },
{ url = "https://files.pythonhosted.org/packages/97/cc/48d232f33d60e2e2e0b42f4e73455b146b76ebe216487e862700457fbf3c/uvloop-0.22.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:daf620c2995d193449393d6c62131b3fbd40a63bf7b307a1527856ace637fe88", size = 4328384, upload-time = "2025-10-16T22:16:59.36Z" },
{ url = "https://files.pythonhosted.org/packages/e4/16/c1fd27e9549f3c4baf1dc9c20c456cd2f822dbf8de9f463824b0c0357e06/uvloop-0.22.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6cde23eeda1a25c75b2e07d39970f3374105d5eafbaab2a4482be82f272d5a5e", size = 4296730, upload-time = "2025-10-16T22:17:00.744Z" },
]
[[package]]
name = "watchfiles"
version = "1.1.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "anyio" },
]
sdist = { url = "https://files.pythonhosted.org/packages/c2/c9/8869df9b2a2d6c59d79220a4db37679e74f807c559ffe5265e08b227a210/watchfiles-1.1.1.tar.gz", hash = "sha256:a173cb5c16c4f40ab19cecf48a534c409f7ea983ab8fed0741304a1c0a31b3f2", size = 94440, upload-time = "2025-10-14T15:06:21.08Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c3/f4/0872229324ef69b2c3edec35e84bd57a1289e7d3fe74588048ed8947a323/watchfiles-1.1.1-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:d1715143123baeeaeadec0528bb7441103979a1d5f6fd0e1f915383fea7ea6d5", size = 404315, upload-time = "2025-10-14T15:05:26.501Z" },
{ url = "https://files.pythonhosted.org/packages/7b/22/16d5331eaed1cb107b873f6ae1b69e9ced582fcf0c59a50cd84f403b1c32/watchfiles-1.1.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:39574d6370c4579d7f5d0ad940ce5b20db0e4117444e39b6d8f99db5676c52fd", size = 390869, upload-time = "2025-10-14T15:05:27.649Z" },
{ url = "https://files.pythonhosted.org/packages/b2/7e/5643bfff5acb6539b18483128fdc0ef2cccc94a5b8fbda130c823e8ed636/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7365b92c2e69ee952902e8f70f3ba6360d0d596d9299d55d7d386df84b6941fb", size = 449919, upload-time = "2025-10-14T15:05:28.701Z" },
{ url = "https://files.pythonhosted.org/packages/51/2e/c410993ba5025a9f9357c376f48976ef0e1b1aefb73b97a5ae01a5972755/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bfff9740c69c0e4ed32416f013f3c45e2ae42ccedd1167ef2d805c000b6c71a5", size = 460845, upload-time = "2025-10-14T15:05:30.064Z" },
{ url = "https://files.pythonhosted.org/packages/8e/a4/2df3b404469122e8680f0fcd06079317e48db58a2da2950fb45020947734/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b27cf2eb1dda37b2089e3907d8ea92922b673c0c427886d4edc6b94d8dfe5db3", size = 489027, upload-time = "2025-10-14T15:05:31.064Z" },
{ url = "https://files.pythonhosted.org/packages/ea/84/4587ba5b1f267167ee715b7f66e6382cca6938e0a4b870adad93e44747e6/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:526e86aced14a65a5b0ec50827c745597c782ff46b571dbfe46192ab9e0b3c33", size = 595615, upload-time = "2025-10-14T15:05:32.074Z" },
{ url = "https://files.pythonhosted.org/packages/6a/0f/c6988c91d06e93cd0bb3d4a808bcf32375ca1904609835c3031799e3ecae/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:04e78dd0b6352db95507fd8cb46f39d185cf8c74e4cf1e4fbad1d3df96faf510", size = 474836, upload-time = "2025-10-14T15:05:33.209Z" },
{ url = "https://files.pythonhosted.org/packages/b4/36/ded8aebea91919485b7bbabbd14f5f359326cb5ec218cd67074d1e426d74/watchfiles-1.1.1-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5c85794a4cfa094714fb9c08d4a218375b2b95b8ed1666e8677c349906246c05", size = 455099, upload-time = "2025-10-14T15:05:34.189Z" },
{ url = "https://files.pythonhosted.org/packages/98/e0/8c9bdba88af756a2fce230dd365fab2baf927ba42cd47521ee7498fd5211/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:74d5012b7630714b66be7b7b7a78855ef7ad58e8650c73afc4c076a1f480a8d6", size = 630626, upload-time = "2025-10-14T15:05:35.216Z" },
{ url = "https://files.pythonhosted.org/packages/2a/84/a95db05354bf2d19e438520d92a8ca475e578c647f78f53197f5a2f17aaf/watchfiles-1.1.1-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:8fbe85cb3201c7d380d3d0b90e63d520f15d6afe217165d7f98c9c649654db81", size = 622519, upload-time = "2025-10-14T15:05:36.259Z" },
{ url = "https://files.pythonhosted.org/packages/1d/ce/d8acdc8de545de995c339be67711e474c77d643555a9bb74a9334252bd55/watchfiles-1.1.1-cp314-cp314-win32.whl", hash = "sha256:3fa0b59c92278b5a7800d3ee7733da9d096d4aabcfabb9a928918bd276ef9b9b", size = 272078, upload-time = "2025-10-14T15:05:37.63Z" },
{ url = "https://files.pythonhosted.org/packages/c4/c9/a74487f72d0451524be827e8edec251da0cc1fcf111646a511ae752e1a3d/watchfiles-1.1.1-cp314-cp314-win_amd64.whl", hash = "sha256:c2047d0b6cea13b3316bdbafbfa0c4228ae593d995030fda39089d36e64fc03a", size = 287664, upload-time = "2025-10-14T15:05:38.95Z" },
{ url = "https://files.pythonhosted.org/packages/df/b8/8ac000702cdd496cdce998c6f4ee0ca1f15977bba51bdf07d872ebdfc34c/watchfiles-1.1.1-cp314-cp314-win_arm64.whl", hash = "sha256:842178b126593addc05acf6fce960d28bc5fae7afbaa2c6c1b3a7b9460e5be02", size = 277154, upload-time = "2025-10-14T15:05:39.954Z" },
{ url = "https://files.pythonhosted.org/packages/47/a8/e3af2184707c29f0f14b1963c0aace6529f9d1b8582d5b99f31bbf42f59e/watchfiles-1.1.1-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:88863fbbc1a7312972f1c511f202eb30866370ebb8493aef2812b9ff28156a21", size = 403820, upload-time = "2025-10-14T15:05:40.932Z" },
{ url = "https://files.pythonhosted.org/packages/c0/ec/e47e307c2f4bd75f9f9e8afbe3876679b18e1bcec449beca132a1c5ffb2d/watchfiles-1.1.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:55c7475190662e202c08c6c0f4d9e345a29367438cf8e8037f3155e10a88d5a5", size = 390510, upload-time = "2025-10-14T15:05:41.945Z" },
{ url = "https://files.pythonhosted.org/packages/d5/a0/ad235642118090f66e7b2f18fd5c42082418404a79205cdfca50b6309c13/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f53fa183d53a1d7a8852277c92b967ae99c2d4dcee2bfacff8868e6e30b15f7", size = 448408, upload-time = "2025-10-14T15:05:43.385Z" },
{ url = "https://files.pythonhosted.org/packages/df/85/97fa10fd5ff3332ae17e7e40e20784e419e28521549780869f1413742e9d/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:6aae418a8b323732fa89721d86f39ec8f092fc2af67f4217a2b07fd3e93c6101", size = 458968, upload-time = "2025-10-14T15:05:44.404Z" },
{ url = "https://files.pythonhosted.org/packages/47/c2/9059c2e8966ea5ce678166617a7f75ecba6164375f3b288e50a40dc6d489/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f096076119da54a6080e8920cbdaac3dbee667eb91dcc5e5b78840b87415bd44", size = 488096, upload-time = "2025-10-14T15:05:45.398Z" },
{ url = "https://files.pythonhosted.org/packages/94/44/d90a9ec8ac309bc26db808a13e7bfc0e4e78b6fc051078a554e132e80160/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00485f441d183717038ed2e887a7c868154f216877653121068107b227a2f64c", size = 596040, upload-time = "2025-10-14T15:05:46.502Z" },
{ url = "https://files.pythonhosted.org/packages/95/68/4e3479b20ca305cfc561db3ed207a8a1c745ee32bf24f2026a129d0ddb6e/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a55f3e9e493158d7bfdb60a1165035f1cf7d320914e7b7ea83fe22c6023b58fc", size = 473847, upload-time = "2025-10-14T15:05:47.484Z" },
{ url = "https://files.pythonhosted.org/packages/4f/55/2af26693fd15165c4ff7857e38330e1b61ab8c37d15dc79118cdba115b7a/watchfiles-1.1.1-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c91ed27800188c2ae96d16e3149f199d62f86c7af5f5f4d2c61a3ed8cd3666c", size = 455072, upload-time = "2025-10-14T15:05:48.928Z" },
{ url = "https://files.pythonhosted.org/packages/66/1d/d0d200b10c9311ec25d2273f8aad8c3ef7cc7ea11808022501811208a750/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:311ff15a0bae3714ffb603e6ba6dbfba4065ab60865d15a6ec544133bdb21099", size = 629104, upload-time = "2025-10-14T15:05:49.908Z" },
{ url = "https://files.pythonhosted.org/packages/e3/bd/fa9bb053192491b3867ba07d2343d9f2252e00811567d30ae8d0f78136fe/watchfiles-1.1.1-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:a916a2932da8f8ab582f242c065f5c81bed3462849ca79ee357dd9551b0e9b01", size = 622112, upload-time = "2025-10-14T15:05:50.941Z" },
]
[[package]]
name = "websockets"
version = "16.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/04/24/4b2031d72e840ce4c1ccb255f693b15c334757fc50023e4db9537080b8c4/websockets-16.0.tar.gz", hash = "sha256:5f6261a5e56e8d5c42a4497b364ea24d94d9563e8fbd44e78ac40879c60179b5", size = 179346, upload-time = "2026-01-10T09:23:47.181Z" }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f3/1d/e88022630271f5bd349ed82417136281931e558d628dd52c4d8621b4a0b2/websockets-16.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8cc451a50f2aee53042ac52d2d053d08bf89bcb31ae799cb4487587661c038a0", size = 177406, upload-time = "2026-01-10T09:23:12.178Z" },
{ url = "https://files.pythonhosted.org/packages/f2/78/e63be1bf0724eeb4616efb1ae1c9044f7c3953b7957799abb5915bffd38e/websockets-16.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:daa3b6ff70a9241cf6c7fc9e949d41232d9d7d26fd3522b1ad2b4d62487e9904", size = 175085, upload-time = "2026-01-10T09:23:13.511Z" },
{ url = "https://files.pythonhosted.org/packages/bb/f4/d3c9220d818ee955ae390cf319a7c7a467beceb24f05ee7aaaa2414345ba/websockets-16.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:fd3cb4adb94a2a6e2b7c0d8d05cb94e6f1c81a0cf9dc2694fb65c7e8d94c42e4", size = 175328, upload-time = "2026-01-10T09:23:14.727Z" },
{ url = "https://files.pythonhosted.org/packages/63/bc/d3e208028de777087e6fb2b122051a6ff7bbcca0d6df9d9c2bf1dd869ae9/websockets-16.0-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:781caf5e8eee67f663126490c2f96f40906594cb86b408a703630f95550a8c3e", size = 185044, upload-time = "2026-01-10T09:23:15.939Z" },
{ url = "https://files.pythonhosted.org/packages/ad/6e/9a0927ac24bd33a0a9af834d89e0abc7cfd8e13bed17a86407a66773cc0e/websockets-16.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:caab51a72c51973ca21fa8a18bd8165e1a0183f1ac7066a182ff27107b71e1a4", size = 186279, upload-time = "2026-01-10T09:23:17.148Z" },
{ url = "https://files.pythonhosted.org/packages/b9/ca/bf1c68440d7a868180e11be653c85959502efd3a709323230314fda6e0b3/websockets-16.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:19c4dc84098e523fd63711e563077d39e90ec6702aff4b5d9e344a60cb3c0cb1", size = 185711, upload-time = "2026-01-10T09:23:18.372Z" },
{ url = "https://files.pythonhosted.org/packages/c4/f8/fdc34643a989561f217bb477cbc47a3a07212cbda91c0e4389c43c296ebf/websockets-16.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:a5e18a238a2b2249c9a9235466b90e96ae4795672598a58772dd806edc7ac6d3", size = 184982, upload-time = "2026-01-10T09:23:19.652Z" },
{ url = "https://files.pythonhosted.org/packages/dd/d1/574fa27e233764dbac9c52730d63fcf2823b16f0856b3329fc6268d6ae4f/websockets-16.0-cp314-cp314-win32.whl", hash = "sha256:a069d734c4a043182729edd3e9f247c3b2a4035415a9172fd0f1b71658a320a8", size = 177915, upload-time = "2026-01-10T09:23:21.458Z" },
{ url = "https://files.pythonhosted.org/packages/8a/f1/ae6b937bf3126b5134ce1f482365fde31a357c784ac51852978768b5eff4/websockets-16.0-cp314-cp314-win_amd64.whl", hash = "sha256:c0ee0e63f23914732c6d7e0cce24915c48f3f1512ec1d079ed01fc629dab269d", size = 178381, upload-time = "2026-01-10T09:23:22.715Z" },
{ url = "https://files.pythonhosted.org/packages/06/9b/f791d1db48403e1f0a27577a6beb37afae94254a8c6f08be4a23e4930bc0/websockets-16.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:a35539cacc3febb22b8f4d4a99cc79b104226a756aa7400adc722e83b0d03244", size = 177737, upload-time = "2026-01-10T09:23:24.523Z" },
{ url = "https://files.pythonhosted.org/packages/bd/40/53ad02341fa33b3ce489023f635367a4ac98b73570102ad2cdd770dacc9a/websockets-16.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:b784ca5de850f4ce93ec85d3269d24d4c82f22b7212023c974c401d4980ebc5e", size = 175268, upload-time = "2026-01-10T09:23:25.781Z" },
{ url = "https://files.pythonhosted.org/packages/74/9b/6158d4e459b984f949dcbbb0c5d270154c7618e11c01029b9bbd1bb4c4f9/websockets-16.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:569d01a4e7fba956c5ae4fc988f0d4e187900f5497ce46339c996dbf24f17641", size = 175486, upload-time = "2026-01-10T09:23:27.033Z" },
{ url = "https://files.pythonhosted.org/packages/e5/2d/7583b30208b639c8090206f95073646c2c9ffd66f44df967981a64f849ad/websockets-16.0-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:50f23cdd8343b984957e4077839841146f67a3d31ab0d00e6b824e74c5b2f6e8", size = 185331, upload-time = "2026-01-10T09:23:28.259Z" },
{ url = "https://files.pythonhosted.org/packages/45/b0/cce3784eb519b7b5ad680d14b9673a31ab8dcb7aad8b64d81709d2430aa8/websockets-16.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:152284a83a00c59b759697b7f9e9cddf4e3c7861dd0d964b472b70f78f89e80e", size = 186501, upload-time = "2026-01-10T09:23:29.449Z" },
{ url = "https://files.pythonhosted.org/packages/19/60/b8ebe4c7e89fb5f6cdf080623c9d92789a53636950f7abacfc33fe2b3135/websockets-16.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:bc59589ab64b0022385f429b94697348a6a234e8ce22544e3681b2e9331b5944", size = 186062, upload-time = "2026-01-10T09:23:31.368Z" },
{ url = "https://files.pythonhosted.org/packages/88/a8/a080593f89b0138b6cba1b28f8df5673b5506f72879322288b031337c0b8/websockets-16.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:32da954ffa2814258030e5a57bc73a3635463238e797c7375dc8091327434206", size = 185356, upload-time = "2026-01-10T09:23:32.627Z" },
{ url = "https://files.pythonhosted.org/packages/c2/b6/b9afed2afadddaf5ebb2afa801abf4b0868f42f8539bfe4b071b5266c9fe/websockets-16.0-cp314-cp314t-win32.whl", hash = "sha256:5a4b4cc550cb665dd8a47f868c8d04c8230f857363ad3c9caf7a0c3bf8c61ca6", size = 178085, upload-time = "2026-01-10T09:23:33.816Z" },
{ url = "https://files.pythonhosted.org/packages/9f/3e/28135a24e384493fa804216b79a6a6759a38cc4ff59118787b9fb693df93/websockets-16.0-cp314-cp314t-win_amd64.whl", hash = "sha256:b14dc141ed6d2dde437cddb216004bcac6a1df0935d79656387bd41632ba0bbd", size = 178531, upload-time = "2026-01-10T09:23:35.016Z" },
{ url = "https://files.pythonhosted.org/packages/6f/28/258ebab549c2bf3e64d2b0217b973467394a9cea8c42f70418ca2c5d0d2e/websockets-16.0-py3-none-any.whl", hash = "sha256:1637db62fad1dc833276dded54215f2c7fa46912301a24bd94d45d46a011ceec", size = 171598, upload-time = "2026-01-10T09:23:45.395Z" },
]