Coverage for projects/04-llm-adapter-shadow/tests/test_err_cases.py: 100%
56 statements
« prev ^ index » next coverage.py v7.10.7, created at 2025-09-24 01:32 +0000
« prev ^ index » next coverage.py v7.10.7, created at 2025-09-24 01:32 +0000
1import json
3import pytest
5from src.llm_adapter.errors import TimeoutError
6from src.llm_adapter.providers.mock import MockProvider
7from src.llm_adapter.runner import Runner
8from src.llm_adapter.provider_spi import ProviderRequest
11def _providers_for(marker: str):
12 failing = MockProvider("p1", base_latency_ms=5, error_markers={marker})
13 fallback = MockProvider("p2", base_latency_ms=5, error_markers=set())
14 return failing, fallback
17def _read_metrics(path):
18 return [json.loads(line) for line in path.read_text().splitlines() if line.strip()]
21def test_timeout_fallback():
22 p1, p2 = _providers_for("[TIMEOUT]")
23 runner = Runner([p1, p2])
25 response = runner.run(ProviderRequest(prompt="[TIMEOUT] hello"))
26 assert response.text.startswith("echo(p2):")
29def test_ratelimit_retry_fallback():
30 p1, p2 = _providers_for("[RATELIMIT]")
31 runner = Runner([p1, p2])
33 response = runner.run(ProviderRequest(prompt="[RATELIMIT] test"))
34 assert response.text.startswith("echo(p2):")
37def test_invalid_json_fallback():
38 p1, p2 = _providers_for("[INVALID_JSON]")
39 runner = Runner([p1, p2])
41 response = runner.run(ProviderRequest(prompt="[INVALID_JSON] test"))
42 assert response.text.startswith("echo(p2):")
45def test_timeout_fallback_records_metrics(tmp_path):
46 p1, p2 = _providers_for("[TIMEOUT]")
47 runner = Runner([p1, p2])
49 metrics_path = tmp_path / "fallback.jsonl"
50 response = runner.run(
51 ProviderRequest(prompt="[TIMEOUT] metrics"),
52 shadow=None,
53 shadow_metrics_path=metrics_path,
54 )
56 assert response.text.startswith("echo(p2):")
58 payloads = _read_metrics(metrics_path)
59 error_event = next(item for item in payloads if item["event"] == "provider_error")
60 success_event = next(item for item in payloads if item["event"] == "provider_success")
62 assert error_event["provider"] == "p1"
63 assert error_event["attempt"] == 1
64 assert error_event["error_type"] == "TimeoutError"
65 assert error_event["request_fingerprint"]
67 assert success_event["provider"] == "p2"
68 assert success_event["attempt"] == 2
69 assert success_event["shadow_used"] is False
72def test_runner_emits_chain_failed_metric(tmp_path):
73 failing1 = MockProvider("p1", base_latency_ms=5, error_markers={"[TIMEOUT]"})
74 failing2 = MockProvider("p2", base_latency_ms=5, error_markers={"[TIMEOUT]"})
75 runner = Runner([failing1, failing2])
77 metrics_path = tmp_path / "failure.jsonl"
79 with pytest.raises(TimeoutError):
80 runner.run(
81 ProviderRequest(prompt="[TIMEOUT] hard"),
82 shadow=None,
83 shadow_metrics_path=metrics_path,
84 )
86 payloads = _read_metrics(metrics_path)
87 error_events = [item for item in payloads if item["event"] == "provider_error"]
88 assert {event["provider"] for event in error_events} == {"p1", "p2"}
90 chain_event = next(item for item in payloads if item["event"] == "provider_chain_failed")
91 assert chain_event["provider_attempts"] == 2
92 assert chain_event["last_error_type"] == "TimeoutError"