1
0
Fork 0

refactor!: a lot of stuff

This commit is contained in:
Arthur K. 2026-03-02 21:14:20 +03:00
parent d6396e4050
commit 0af7179596
Signed by: wzray
GPG key ID: B97F30FDC4636357
15 changed files with 663 additions and 302 deletions

12
tests/conftest.py Normal file
View file

@ -0,0 +1,12 @@
import sys
from pathlib import Path
def _add_src_to_path() -> None:
root = Path(__file__).resolve().parents[1]
src = root / "src"
if str(src) not in sys.path:
sys.path.insert(0, str(src))
_add_src_to_path()

View file

@ -0,0 +1,37 @@
from providers.chatgpt.registration import (
build_authorize_url,
extract_verification_code,
generate_birthdate_90s,
generate_name,
)
def test_generate_name_shape():
name = generate_name()
parts = name.split(" ")
assert len(parts) == 2
assert all(p.isalpha() for p in parts)
def test_generate_birthdate_90s_range():
month, day, year = generate_birthdate_90s()
assert 1 <= int(month) <= 12
assert 1 <= int(day) <= 28
assert 1990 <= int(year) <= 1999
def test_extract_verification_code_prefers_chatgpt_phrase():
text = "foo 123456 bar Your ChatGPT code is 654321"
assert extract_verification_code(text) == "654321"
def test_extract_verification_code_fallback_last_code():
text = "codes 111111 and 222222"
assert extract_verification_code(text) == "222222"
def test_build_authorize_url_contains_required_params():
url = build_authorize_url("challenge", "state123")
assert "response_type=code" in url
assert "code_challenge=challenge" in url
assert "state=state123" in url

150
tests/test_server_unit.py Normal file
View file

@ -0,0 +1,150 @@
import asyncio
import json
import server
from providers.base import Provider, ProviderTokens
class FakeRequest:
def __init__(self, provider: str):
self.match_info = {"provider": provider}
class FakeProvider(Provider):
def __init__(
self,
token: str | None = "tok",
usage: dict | None = None,
rotate: bool = False,
):
self._token = token
self._usage = usage or {
"used_percent": 10,
"remaining_percent": 90,
"primary_window": None,
"secondary_window": None,
}
self._rotate = rotate
self.get_token_calls = 0
self.standby_calls = 0
@property
def name(self) -> str:
return "fake"
async def get_token(self) -> str | None:
self.get_token_calls += 1
return self._token
async def register_new_account(self) -> bool:
return True
async def get_usage_info(self, access_token: str) -> dict:
_ = access_token
return dict(self._usage)
def load_tokens(self) -> ProviderTokens | None:
return None
def save_tokens(self, tokens: ProviderTokens) -> None:
_ = tokens
async def maybe_rotate_account(self, usage_percent: int) -> bool:
_ = usage_percent
return self._rotate
async def ensure_standby_account(
self, usage_percent: int, prepare_threshold: int
) -> None:
_ = usage_percent, prepare_threshold
self.standby_calls += 1
def _response_json(resp) -> dict:
return json.loads(resp.body.decode("utf-8"))
def test_parse_int_env_defaults(monkeypatch):
monkeypatch.delenv("X_TEST", raising=False)
assert server._parse_int_env("X_TEST", 10, 1, 20) == 10
def test_parse_int_env_invalid(monkeypatch):
monkeypatch.setenv("X_TEST", "abc")
assert server._parse_int_env("X_TEST", 10, 1, 20) == 10
def test_parse_int_env_out_of_range(monkeypatch):
monkeypatch.setenv("X_TEST", "999")
assert server._parse_int_env("X_TEST", 10, 1, 20) == 10
def test_build_limit_fields():
limit = server.build_limit(90, 85)
assert limit == {
"used_percent": 90,
"remaining_percent": 10,
"exhausted": False,
"needs_prepare": True,
}
def test_get_prepare_threshold():
assert server.get_prepare_threshold("chatgpt") == server.CHATGPT_PREPARE_THRESHOLD
assert server.get_prepare_threshold("unknown") == 100
def test_token_handler_unknown_provider(monkeypatch):
monkeypatch.setattr(server, "PROVIDERS", {})
resp = asyncio.run(server.token_handler(FakeRequest("missing")))
assert resp.status == 404
def test_token_handler_success(monkeypatch):
provider = FakeProvider()
monkeypatch.setattr(server, "PROVIDERS", {"fake": provider})
monkeypatch.setattr(server, "background_tasks", {"fake": None})
monkeypatch.setattr(server, "get_prepare_threshold", lambda _: 80)
resp = asyncio.run(server.token_handler(FakeRequest("fake")))
data = _response_json(resp)
assert resp.status == 200
assert data["token"] == "tok"
assert data["limit"]["needs_prepare"] is False
def test_token_handler_triggers_standby(monkeypatch):
provider = FakeProvider(usage={"used_percent": 90, "remaining_percent": 10})
monkeypatch.setattr(server, "PROVIDERS", {"fake": provider})
monkeypatch.setattr(server, "background_tasks", {"fake": None})
called = {"value": False}
def fake_trigger(name, usage_percent, reason):
assert name == "fake"
assert usage_percent == 90
assert "threshold" in reason
called["value"] = True
monkeypatch.setattr(server, "get_prepare_threshold", lambda _: 80)
monkeypatch.setattr(server, "trigger_standby_prepare", fake_trigger)
resp = asyncio.run(server.token_handler(FakeRequest("fake")))
assert resp.status == 200
assert called["value"] is True
def test_token_handler_rotation_path(monkeypatch):
provider = FakeProvider(
usage={"used_percent": 96, "remaining_percent": 4},
rotate=True,
)
monkeypatch.setattr(server, "PROVIDERS", {"fake": provider})
monkeypatch.setattr(server, "background_tasks", {"fake": None})
monkeypatch.setattr(server, "get_prepare_threshold", lambda _: 80)
monkeypatch.setattr(server, "trigger_standby_prepare", lambda *_: None)
resp = asyncio.run(server.token_handler(FakeRequest("fake")))
assert resp.status == 200
assert provider.get_token_calls >= 2

60
tests/test_tokens_unit.py Normal file
View file

@ -0,0 +1,60 @@
import json
from pathlib import Path
from providers.base import ProviderTokens
from providers.chatgpt import tokens as t
def test_normalize_state_backward_compatible():
raw = {"access_token": "a", "refresh_token": "r", "expires_at": 1}
normalized = t._normalize_state(raw)
assert normalized["active"]["access_token"] == "a"
assert normalized["next_account"] is None
def test_promote_next_tokens(tmp_path, monkeypatch):
file_path = tmp_path / "chatgpt_tokens.json"
monkeypatch.setattr(t, "TOKENS_FILE", file_path)
active = ProviderTokens("a1", "r1", 100)
nxt = ProviderTokens("a2", "r2", 200)
t.save_state(active, nxt)
assert t.promote_next_tokens() is True
cur, next_cur = t.load_state()
assert cur is not None
assert cur.access_token == "a2"
assert next_cur is None
def test_save_tokens_preserves_next(tmp_path, monkeypatch):
file_path = tmp_path / "chatgpt_tokens.json"
monkeypatch.setattr(t, "TOKENS_FILE", file_path)
active = ProviderTokens("a1", "r1", 100)
nxt = ProviderTokens("a2", "r2", 200)
t.save_state(active, nxt)
t.save_tokens(ProviderTokens("a3", "r3", 300))
cur, next_cur = t.load_state()
assert cur is not None and cur.access_token == "a3"
assert next_cur is not None and next_cur.access_token == "a2"
def test_atomic_write_produces_valid_json(tmp_path, monkeypatch):
file_path = tmp_path / "chatgpt_tokens.json"
monkeypatch.setattr(t, "TOKENS_FILE", file_path)
t.save_state(ProviderTokens("x", "y", 123), None)
with open(file_path) as f:
data = json.load(f)
assert "active" in data
assert data["active"]["access_token"] == "x"
def test_load_state_from_missing_file(tmp_path, monkeypatch):
file_path = tmp_path / "missing.json"
monkeypatch.setattr(t, "TOKENS_FILE", file_path)
active, nxt = t.load_state()
assert active is None
assert nxt is None

32
tests/test_usage_unit.py Normal file
View file

@ -0,0 +1,32 @@
from providers.chatgpt.usage import _parse_window, clamp_percent
def test_clamp_percent_bounds():
assert clamp_percent(-1) == 0
assert clamp_percent(150) == 100
assert clamp_percent(49.6) == 50
def test_clamp_percent_invalid():
assert clamp_percent(None) == 0
assert clamp_percent("bad") == 0
def test_parse_window_valid():
window = {
"used_percent": 34.4,
"limit_window_seconds": 3600,
"reset_after_seconds": 120,
"reset_at": 999,
}
parsed = _parse_window(window)
assert parsed == {
"used_percent": 34,
"limit_window_seconds": 3600,
"reset_after_seconds": 120,
"reset_at": 999,
}
def test_parse_window_none():
assert _parse_window(None) is None