Coverage for projects/04-llm-adapter-shadow/tests/test_err_cases.py: 100%

56 statements  

« prev     ^ index     » next       coverage.py v7.10.7, created at 2025-09-24 01:32 +0000

1import json 

2 

3import pytest 

4 

5from src.llm_adapter.errors import TimeoutError 

6from src.llm_adapter.providers.mock import MockProvider 

7from src.llm_adapter.runner import Runner 

8from src.llm_adapter.provider_spi import ProviderRequest 

9 

10 

11def _providers_for(marker: str): 

12 failing = MockProvider("p1", base_latency_ms=5, error_markers={marker}) 

13 fallback = MockProvider("p2", base_latency_ms=5, error_markers=set()) 

14 return failing, fallback 

15 

16 

17def _read_metrics(path): 

18 return [json.loads(line) for line in path.read_text().splitlines() if line.strip()] 

19 

20 

21def test_timeout_fallback(): 

22 p1, p2 = _providers_for("[TIMEOUT]") 

23 runner = Runner([p1, p2]) 

24 

25 response = runner.run(ProviderRequest(prompt="[TIMEOUT] hello")) 

26 assert response.text.startswith("echo(p2):") 

27 

28 

29def test_ratelimit_retry_fallback(): 

30 p1, p2 = _providers_for("[RATELIMIT]") 

31 runner = Runner([p1, p2]) 

32 

33 response = runner.run(ProviderRequest(prompt="[RATELIMIT] test")) 

34 assert response.text.startswith("echo(p2):") 

35 

36 

37def test_invalid_json_fallback(): 

38 p1, p2 = _providers_for("[INVALID_JSON]") 

39 runner = Runner([p1, p2]) 

40 

41 response = runner.run(ProviderRequest(prompt="[INVALID_JSON] test")) 

42 assert response.text.startswith("echo(p2):") 

43 

44 

45def test_timeout_fallback_records_metrics(tmp_path): 

46 p1, p2 = _providers_for("[TIMEOUT]") 

47 runner = Runner([p1, p2]) 

48 

49 metrics_path = tmp_path / "fallback.jsonl" 

50 response = runner.run( 

51 ProviderRequest(prompt="[TIMEOUT] metrics"), 

52 shadow=None, 

53 shadow_metrics_path=metrics_path, 

54 ) 

55 

56 assert response.text.startswith("echo(p2):") 

57 

58 payloads = _read_metrics(metrics_path) 

59 error_event = next(item for item in payloads if item["event"] == "provider_error") 

60 success_event = next(item for item in payloads if item["event"] == "provider_success") 

61 

62 assert error_event["provider"] == "p1" 

63 assert error_event["attempt"] == 1 

64 assert error_event["error_type"] == "TimeoutError" 

65 assert error_event["request_fingerprint"] 

66 

67 assert success_event["provider"] == "p2" 

68 assert success_event["attempt"] == 2 

69 assert success_event["shadow_used"] is False 

70 

71 

72def test_runner_emits_chain_failed_metric(tmp_path): 

73 failing1 = MockProvider("p1", base_latency_ms=5, error_markers={"[TIMEOUT]"}) 

74 failing2 = MockProvider("p2", base_latency_ms=5, error_markers={"[TIMEOUT]"}) 

75 runner = Runner([failing1, failing2]) 

76 

77 metrics_path = tmp_path / "failure.jsonl" 

78 

79 with pytest.raises(TimeoutError): 

80 runner.run( 

81 ProviderRequest(prompt="[TIMEOUT] hard"), 

82 shadow=None, 

83 shadow_metrics_path=metrics_path, 

84 ) 

85 

86 payloads = _read_metrics(metrics_path) 

87 error_events = [item for item in payloads if item["event"] == "provider_error"] 

88 assert {event["provider"] for event in error_events} == {"p1", "p2"} 

89 

90 chain_event = next(item for item in payloads if item["event"] == "provider_chain_failed") 

91 assert chain_event["provider_attempts"] == 2 

92 assert chain_event["last_error_type"] == "TimeoutError"