Skip to content

Client

LLM client for unified access to language models.

logger module-attribute

logger = getLogger(__name__)

LLMClient

Client for interacting with LLM services in a unified way.

Source code in src/codemap/llm/client.py
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
class LLMClient:
	"""Client for interacting with LLM services in a unified way."""

	# Default templates - empty in base class
	DEFAULT_TEMPLATES: ClassVar[dict[str, str]] = {}

	def __init__(
		self,
		config_loader: ConfigLoader,
		repo_path: Path | None = None,
	) -> None:
		"""
		Initialize the LLM client.

		Args:
		    config_loader: ConfigLoader instance to use
		    repo_path: Path to the repository (for loading configuration)
		"""
		self.repo_path = repo_path
		self.config_loader = config_loader
		self._templates = self.DEFAULT_TEMPLATES.copy()

	def set_template(self, name: str, template: str) -> None:
		"""
		Set a prompt template.

		Args:
		    name: Template name
		    template: Template content

		"""
		self._templates[name] = template

	def completion(
		self,
		messages: list[MessageDict],
		pydantic_model: type[PydanticModelT] | None = None,
	) -> str | PydanticModelT:
		"""
		Generate text using the configured LLM.

		Args:
		    messages: List of messages to send to the LLM
		    pydantic_model: Optional Pydantic model for response validation

		Returns:
		    Generated text or Pydantic model instance

		Raises:
		    LLMError: If the API call fails

		"""
		# Call the API
		return call_llm_api(
			messages=messages,
			pydantic_model=pydantic_model,
			config_loader=self.config_loader,
		)

DEFAULT_TEMPLATES class-attribute

DEFAULT_TEMPLATES: dict[str, str] = {}

__init__

__init__(
	config_loader: ConfigLoader,
	repo_path: Path | None = None,
) -> None

Initialize the LLM client.

Parameters:

Name Type Description Default
config_loader ConfigLoader

ConfigLoader instance to use

required
repo_path Path | None

Path to the repository (for loading configuration)

None
Source code in src/codemap/llm/client.py
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
def __init__(
	self,
	config_loader: ConfigLoader,
	repo_path: Path | None = None,
) -> None:
	"""
	Initialize the LLM client.

	Args:
	    config_loader: ConfigLoader instance to use
	    repo_path: Path to the repository (for loading configuration)
	"""
	self.repo_path = repo_path
	self.config_loader = config_loader
	self._templates = self.DEFAULT_TEMPLATES.copy()

repo_path instance-attribute

repo_path = repo_path

config_loader instance-attribute

config_loader = config_loader

set_template

set_template(name: str, template: str) -> None

Set a prompt template.

Parameters:

Name Type Description Default
name str

Template name

required
template str

Template content

required
Source code in src/codemap/llm/client.py
40
41
42
43
44
45
46
47
48
49
def set_template(self, name: str, template: str) -> None:
	"""
	Set a prompt template.

	Args:
	    name: Template name
	    template: Template content

	"""
	self._templates[name] = template

completion

completion(
	messages: list[MessageDict],
	pydantic_model: type[PydanticModelT] | None = None,
) -> str | PydanticModelT

Generate text using the configured LLM.

Parameters:

Name Type Description Default
messages list[MessageDict]

List of messages to send to the LLM

required
pydantic_model type[PydanticModelT] | None

Optional Pydantic model for response validation

None

Returns:

Type Description
str | PydanticModelT

Generated text or Pydantic model instance

Raises:

Type Description
LLMError

If the API call fails

Source code in src/codemap/llm/client.py
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
def completion(
	self,
	messages: list[MessageDict],
	pydantic_model: type[PydanticModelT] | None = None,
) -> str | PydanticModelT:
	"""
	Generate text using the configured LLM.

	Args:
	    messages: List of messages to send to the LLM
	    pydantic_model: Optional Pydantic model for response validation

	Returns:
	    Generated text or Pydantic model instance

	Raises:
	    LLMError: If the API call fails

	"""
	# Call the API
	return call_llm_api(
		messages=messages,
		pydantic_model=pydantic_model,
		config_loader=self.config_loader,
	)