Skip to content

Services

Services provide core functionality for LLM interactions, chat history, and prompt management.

LLM Services

AWSBedrockService

Primary service for AWS Bedrock LLM integration:

from akordi_agents.services import AWSBedrockService

llm_service = AWSBedrockService()

response = llm_service.generate_response(
    prompt="What is machine learning?",
    context="Machine learning is a subset of AI...",
    system_message="You are a helpful assistant.",
    max_tokens=1000,
    temperature=0.1,
)

Configuration:

from akordi_agents.config import AWSBedrockConfig

config = AWSBedrockConfig(
    region_name="us-east-1",
    model_id="anthropic.claude-3-sonnet-20240229-v1:0",
    max_tokens=4096,
    temperature=0.1,
)

llm_service = AWSBedrockService(config=config)

LLMServiceFactory

Create LLM services dynamically:

from akordi_agents.services import LLMServiceFactory, get_llm_service

# Get default service
service = get_llm_service()

# Or use factory
service = LLMServiceFactory.create("bedrock", config={
    "model_id": "anthropic.claude-3-sonnet-20240229-v1:0"
})

Custom LLM Service

Implement the LLMServiceInterface:

from akordi_agents.core.interfaces import LLMServiceInterface
from typing import Any, Dict, Optional

class MyLLMService(LLMServiceInterface):
    def __init__(self, api_key: str):
        self.api_key = api_key

    def generate_response(
        self,
        prompt: str,
        context: Optional[str] = None,
        **kwargs
    ) -> Dict[str, Any]:
        # Your LLM API call
        response = self.call_api(prompt, context, **kwargs)

        return {
            "response": response["text"],
            "model_info": {"model_id": "custom-model"},
            "token_usage": {
                "input_tokens": response["input_tokens"],
                "output_tokens": response["output_tokens"],
            }
        }

    def get_service_name(self) -> str:
        return "my_llm_service"

Chat History Service

ChatHistoryService

Manage chat sessions and messages in DynamoDB:

from akordi_agents.services import ChatHistoryService

chat_service = ChatHistoryService()

# Create a session
session = chat_service.create_chat_session(
    user_id="user-123",
    chat_id="chat-456",
    title="Technical Discussion",
    context_files=["doc1.pdf", "doc2.pdf"],
    metadata={"topic": "AI"}
)

# Add messages
chat_service.add_message(
    chat_id="chat-456",
    actor="user",
    message="What is machine learning?",
)

chat_service.add_message(
    chat_id="chat-456",
    actor="assistant",
    message="Machine learning is a subset of AI...",
)

# Get history
messages = chat_service.get_chat_history(chat_id="chat-456", limit=50)

# List user sessions
sessions = chat_service.list_sessions(user_id="user-123")

ChatHistoryManager

Utility class for chat history operations:

from akordi_agents.utils.chat_history_utils import ChatHistoryManager

manager = ChatHistoryManager()

# Handle chat request
chat_id = manager.handle_chat_request(
    user_id="user-123",
    query="Hello!",
    chat_id=None,  # Creates new session
    context_files=["file.pdf"],
)

# Add assistant response
manager.add_assistant_response(
    chat_id=chat_id,
    response="Hi there! How can I help?",
    metadata={"tokens": 50}
)

# Get formatted history
history = manager.get_chat_history_for_llm(
    user_id="user-123",
    chat_id=chat_id
)

Prompt Service

PromptManager

Manage and customize prompts:

from akordi_agents.services import get_prompt_manager

manager = get_prompt_manager()

# Get prompt for a persona
prompt = manager.get_prompt(
    persona="construction_expert",
    context={
        "project_type": "commercial",
        "location": "Sydney"
    }
)

PromptFactory

Create prompts programmatically:

from akordi_agents.services import PromptFactory

factory = PromptFactory()

# Register custom prompt builder
factory.register("custom", CustomPromptBuilder())

# Get prompt
prompt = factory.get_prompt("custom", context={...})

Custom Prompt Builder

from akordi_agents.services import PromptBuilder

class MyPromptBuilder(PromptBuilder):
    def build(self, context: dict) -> str:
        topic = context.get("topic", "general")

        return f"""You are an expert assistant specializing in {topic}.

        Guidelines:
        - Provide accurate, detailed information
        - Use examples when helpful
        - Cite sources when possible

        Context: {context.get('background', '')}
        """

Model Service

ModelService

Manage LLM model information:

from akordi_agents.services import ModelService

model_service = ModelService()

# Get model info
info = model_service.get_model_info("anthropic.claude-3-sonnet")

# List available models
models = model_service.list_models()

Token Usage Service

TokenUsageService

Track LLM token consumption:

from akordi_agents.services.token_usage_service import TokenUsageService

token_service = TokenUsageService()

# Initialize agent tracking
token_service.initialize_agent_usage(
    agent_id="my-agent",
    model_id="claude-3-sonnet",
    metadata={"environment": "production"}
)

# Record usage
token_service.record_query_usage(
    agent_id="my-agent",
    query_text="What is AI?",
    input_tokens=50,
    output_tokens=200,
    user_id="user-123",
)

# Get statistics
stats = token_service.get_agent_usage("my-agent")
print(f"Total tokens: {stats.total_token_usage}")
print(f"LLM calls: {stats.number_of_llm_calls}")

# Get query history
history = token_service.get_query_history("my-agent", limit=10)

Search Handlers

AWSBedrockSearchHandler

Search AWS Bedrock Knowledge Base:

from akordi_agents.handlers import AWSBedrockSearchHandler

search_handler = AWSBedrockSearchHandler(
    knowledge_base_id="your-kb-id",
)

results = search_handler.search(
    query="What are the safety guidelines?",
    max_results=10,
    min_score=0.5,
)

for result in results:
    print(f"Score: {result.score}")
    print(f"Text: {result.text}")
    print(f"Metadata: {result.metadata}")

AzureSearchHandler

Search Azure Cognitive Search:

from akordi_agents.handlers import AzureSearchHandler

search_handler = AzureSearchHandler(
    endpoint="https://your-search.search.windows.net",
    index_name="your-index",
    api_key="your-api-key",
)

Custom Search Handler

from akordi_agents.core.interfaces import SearchHandlerInterface
from akordi_agents.models.types import SearchResult
from typing import List

class MySearchHandler(SearchHandlerInterface):
    def search(self, query: str, **kwargs) -> List[SearchResult]:
        # Your search implementation
        results = self.custom_search(query)

        return [
            SearchResult(
                text=r["content"],
                score=r["relevance"],
                metadata=r.get("metadata", {})
            )
            for r in results
        ]

    def get_handler_name(self) -> str:
        return "my_search_handler"

Agent Utilities

Utility functions for fetching agent configurations and building agents from DynamoDB.

get_system_prompt

Fetch system prompts from DynamoDB with caching:

from akordi_agents.utils.agent import get_system_prompt

# Get system prompt for an agent
prompt = get_system_prompt(agent_code="AP-001")

if prompt:
    print(f"System prompt: {prompt}")

get_agent_by_code

Fetch agent configuration from DynamoDB:

from akordi_agents.utils.agent import get_agent_by_code

# Get agent configuration
agent_config = get_agent_by_code("my-agent-001")

print(f"Agent name: {agent_config['name']}")
print(f"Model: {agent_config['model']}")

get_agent

Build a CustomAgent from DynamoDB configuration:

from akordi_agents.utils.agent import get_agent

# Build agent from DynamoDB configuration
agent = get_agent("my-agent-001")

# Process a request
response = agent.process_request({
    "query": "What is the project status?",
    "system_message": "You are a helpful assistant.",
})

print(response["llm_response"]["response"])

With custom LLM service:

from akordi_agents.utils.agent import get_agent
from akordi_agents.core.interfaces import LLMServiceInterface

class MyCustomLLMService(LLMServiceInterface):
    def generate_response(self, prompt, context=None, **kwargs):
        return {"response": "...", "model_info": {}, "token_usage": {}}

    def get_service_name(self):
        return "my_custom_service"

# Use custom LLM service
agent = get_agent("my-agent-001", llm_service=MyCustomLLMService())

Service Registration

Register custom services for use with AgentBuilder:

from akordi_agents.core import (
    register_llm_service,
    register_search_handler,
)

# Register services
register_llm_service("my_llm", MyLLMService)
register_search_handler("my_search", MySearchHandler)

# Use in AgentBuilder
from akordi_agents.core import AgentBuilder

agent = (
    AgentBuilder("my_agent")
    .with_llm_service("my_llm")
    .with_search_handler("my_search")
    .build()
)

Best Practices

1. Use Environment Variables

import os

# Don't hardcode credentials
service = AWSBedrockService()  # Uses AWS_REGION, etc.

# Or configure via environment
os.environ["AWS_REGION"] = "us-east-1"

2. Handle Errors Gracefully

from akordi_agents.services import ChatHistoryService

try:
    service = ChatHistoryService()
    messages = service.get_chat_history(chat_id="...")
except Exception as e:
    logger.error(f"Chat service error: {e}")
    messages = []  # Graceful fallback

3. Use Connection Pooling

# Good: Reuse service instance
class MyApp:
    def __init__(self):
        self.llm_service = AWSBedrockService()

    def process(self, query):
        return self.llm_service.generate_response(query)

# Avoid: Creating new instances per request
def process(query):
    service = AWSBedrockService()  # Don't do this
    return service.generate_response(query)

4. Monitor Token Usage

from akordi_agents.services.token_usage_service import TokenUsageService

# Enable token tracking for cost monitoring
token_service = TokenUsageService()

# Check usage periodically
stats = token_service.get_agent_usage("my-agent")
if stats.total_token_usage > BUDGET_LIMIT:
    logger.warning("Token budget exceeded!")