Coverage for projects/04-llm-adapter-shadow/src/llm_adapter/provider_spi.py: 100%

22 statements  

« prev     ^ index     » next       coverage.py v7.10.7, created at 2025-09-24 01:32 +0000

1from __future__ import annotations 

2 

3from dataclasses import dataclass 

4from typing import Any, Protocol 

5 

6 

7@dataclass 

8class ProviderRequest: 

9 prompt: str 

10 max_tokens: int = 256 

11 options: dict[str, Any] | None = None 

12 

13 

14@dataclass 

15class TokenUsage: 

16 prompt: int 

17 completion: int 

18 

19 @property 

20 def total(self) -> int: 

21 return self.prompt + self.completion 

22 

23 

24@dataclass 

25class ProviderResponse: 

26 text: str 

27 token_usage: TokenUsage 

28 latency_ms: int 

29 

30 

31class ProviderSPI(Protocol): 

32 def name(self) -> str: 

33 ... 

34 

35 def capabilities(self) -> set[str]: 

36 ... 

37 

38 def invoke(self, request: ProviderRequest) -> ProviderResponse: 

39 ... 

40 

41 

42__all__ = ["ProviderSPI", "ProviderRequest", "ProviderResponse", "TokenUsage"]