Skip to content

Utils

Utility functions for working with LLMs.

logger module-attribute

logger = getLogger(__name__)

load_prompt_template

load_prompt_template(
	template_path: str | None,
) -> str | None

Load custom prompt template from file.

Parameters:

Name Type Description Default
template_path str | None

Path to prompt template file

required

Returns:

Type Description
str | None

Loaded template or None if loading failed

Source code in src/codemap/llm/utils.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
def load_prompt_template(template_path: str | None) -> str | None:
	"""
	Load custom prompt template from file.

	Args:
	    template_path: Path to prompt template file

	Returns:
	    Loaded template or None if loading failed

	"""
	if not template_path:
		return None

	try:
		template_file = Path(template_path)
		with template_file.open("r") as f:
			return f.read()
	except OSError:
		logger.warning("Could not load prompt template: %s", template_path)
		return None

LLMResponseType module-attribute

LLMResponseType = (
	dict[str, Any] | Mapping[str, Any] | object
)

is_ollama_model

is_ollama_model(model_name: str) -> bool

Check if the model name is an Ollama model.

Source code in src/codemap/llm/utils.py
40
41
42
def is_ollama_model(model_name: str) -> bool:
	"""Check if the model name is an Ollama model."""
	return model_name.startswith("ollama:")