Coverage for projects/04-llm-adapter-shadow/src/llm_adapter/provider_spi.py: 100%
22 statements
« prev ^ index » next coverage.py v7.10.7, created at 2025-09-24 01:32 +0000
« prev ^ index » next coverage.py v7.10.7, created at 2025-09-24 01:32 +0000
1from __future__ import annotations
3from dataclasses import dataclass
4from typing import Any, Protocol
7@dataclass
8class ProviderRequest:
9 prompt: str
10 max_tokens: int = 256
11 options: dict[str, Any] | None = None
14@dataclass
15class TokenUsage:
16 prompt: int
17 completion: int
19 @property
20 def total(self) -> int:
21 return self.prompt + self.completion
24@dataclass
25class ProviderResponse:
26 text: str
27 token_usage: TokenUsage
28 latency_ms: int
31class ProviderSPI(Protocol):
32 def name(self) -> str:
33 ...
35 def capabilities(self) -> set[str]:
36 ...
38 def invoke(self, request: ProviderRequest) -> ProviderResponse:
39 ...
42__all__ = ["ProviderSPI", "ProviderRequest", "ProviderResponse", "TokenUsage"]