Coverage for projects/04-llm-adapter-shadow/demo_shadow.py: 0%
10 statements
« prev ^ index » next coverage.py v7.10.7, created at 2025-09-24 01:32 +0000
« prev ^ index » next coverage.py v7.10.7, created at 2025-09-24 01:32 +0000
1from src.llm_adapter.providers.mock import MockProvider
2from src.llm_adapter.provider_spi import ProviderRequest
3from src.llm_adapter.runner import Runner
5if __name__ == "__main__":
6 primary = MockProvider("openai-like", base_latency_ms=20)
7 shadow = MockProvider("anthropic-like", base_latency_ms=15)
8 runner = Runner([primary])
9 res = runner.run(ProviderRequest(prompt="こんにちは、世界"), shadow=shadow)
10 print(res.text, res.latency_ms, "ms")
11 print("Shadow metrics would be written to artifacts/runs-metrics.jsonl")