Skip to content

Quick Start

Get up and running with Akordi Agents SDK in minutes.

Your First Agent

Step 1: Create an LLM Service

from akordi_agents.services import AWSBedrockService

# Create AWS Bedrock service
llm_service = AWSBedrockService()

Step 2: Create an Agent

The simplest way to create an agent:

from akordi_agents.core import create_langgraph_agent

agent = create_langgraph_agent(
    name="my_first_agent",
    llm_service=llm_service,
)

# Process a request
response = agent.process_request({
    "query": "What is machine learning?",
    "system_message": "You are a helpful AI assistant.",
    "max_tokens": 500,
})

print(response["llm_response"]["response"])

Adding Tools

Tools extend your agent's capabilities:

from akordi_agents.tools import Tool

class WeatherTool(Tool):
    """Get current weather for a location."""

    def get_name(self) -> str:
        return "get_weather"

    def get_description(self) -> str:
        return "Get the current weather for a specified location"

    def get_input_schema(self) -> dict:
        return {
            "type": "object",
            "properties": {
                "location": {
                    "type": "string",
                    "description": "City name or coordinates"
                }
            },
            "required": ["location"]
        }

    def execute(self, **kwargs) -> dict:
        location = kwargs.get("location", "Unknown")
        # Your weather API logic here
        return {
            "location": location,
            "temperature": "22°C",
            "conditions": "Sunny"
        }

# Create agent with tools
weather_tool = WeatherTool()

agent = create_langgraph_agent(
    name="weather_agent",
    llm_service=llm_service,
    tools=[weather_tool],
    config={
        "enable_tools": True,
        "temperature": 0.1,
    }
)

# The agent will automatically use the tool when needed
response = agent.process_request({
    "query": "What's the weather in London?",
    "system_message": "You are a helpful weather assistant.",
})

Adding Guardrails

Protect your agent with input validation:

from akordi_agents.core.interfaces import ValidatorInterface
from akordi_agents.models.validation_models import ValidationResult, ValidationError

class ContentValidator(ValidatorInterface):
    """Validate user input for safety."""

    def validate(self, data: dict) -> ValidationResult:
        query = data.get("query", "")
        errors = []

        # Check for prohibited content
        prohibited = ["harmful", "illegal", "dangerous"]
        for word in prohibited:
            if word in query.lower():
                errors.append(ValidationError(
                    field="query",
                    message=f"Query contains prohibited content"
                ))

        return ValidationResult(
            is_valid=len(errors) == 0,
            errors=errors
        )

    def get_validator_name(self) -> str:
        return "content_validator"

# Create agent with guardrails
validator = ContentValidator()

agent = create_langgraph_agent(
    name="safe_agent",
    llm_service=llm_service,
    validator=validator,
    config={
        "enable_validation": True,
    }
)

Using the AgentBuilder

For more control, use the AgentBuilder pattern:

from akordi_agents.core import AgentBuilder

agent = (
    AgentBuilder("advanced_agent")
    .with_llm_service_instance(llm_service)
    .with_validator_instance(validator)
    .with_tools([weather_tool])
    .with_langgraph(
        enable=True,
        config={
            "enable_validation": True,
            "enable_tools": True,
            "max_iterations": 10,
            "temperature": 0.1,
        }
    )
    .build()
)

Chat History Integration

Persist conversations with DynamoDB:

# Set environment variables
import os
os.environ["CHAT_SESSIONS_TABLE_NAME"] = "my-sessions"
os.environ["CHAT_MESSAGES_TABLE_NAME"] = "my-messages"

# Process request with chat tracking
response = agent.process_request({
    "query": "Hello! What can you help me with?",
    "user_id": "user-123",
    "chat_id": "chat-456",
    "system_message": "You are a helpful assistant.",
})

# Subsequent requests in the same chat will have context
response = agent.process_request({
    "query": "Tell me more about that.",
    "user_id": "user-123",
    "chat_id": "chat-456",
})

Loading Agents from DynamoDB

Load agent configurations from DynamoDB:

import os
from akordi_agents.utils.agent import get_agent, get_system_prompt

# Set the agent table
os.environ["AKORDI_AGENT_TABLE"] = "my-agent-config-table"

# Build agent from DynamoDB configuration
agent = get_agent("my-agent-001")

# Get system prompt from DynamoDB
system_prompt = get_system_prompt(agent_code="AP-001")

# Process a request
response = agent.process_request({
    "query": "What is the project status?",
    "system_message": system_prompt or "You are a helpful assistant.",
})

print(response["llm_response"]["response"])

Integrate with AWS Bedrock Knowledge Base:

from akordi_agents.handlers import AWSBedrockSearchHandler

# Create search handler
search_handler = AWSBedrockSearchHandler(
    knowledge_base_id="your-kb-id",
)

agent = create_langgraph_agent(
    name="kb_agent",
    llm_service=llm_service,
    search_handler=search_handler,
)

# Query will search knowledge base for context
response = agent.process_request({
    "query": "What are the company policies on remote work?",
    "knowledge_base_id": "your-kb-id",
})

Complete Example

Here's a complete example putting it all together:

import os
from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
from akordi_agents.tools import Tool

# Configure environment
os.environ["AWS_REGION"] = "us-east-1"

# Create LLM service
llm_service = AWSBedrockService()

# Define a custom tool
class CalculatorTool(Tool):
    def get_name(self) -> str:
        return "calculator"

    def get_description(self) -> str:
        return "Perform basic mathematical calculations"

    def get_input_schema(self) -> dict:
        return {
            "type": "object",
            "properties": {
                "expression": {
                    "type": "string",
                    "description": "Math expression to evaluate"
                }
            },
            "required": ["expression"]
        }

    def execute(self, **kwargs) -> dict:
        expr = kwargs.get("expression", "0")
        try:
            result = eval(expr)  # Note: Use a safe eval in production
            return {"result": result}
        except Exception as e:
            return {"error": str(e)}

# Create agent
agent = create_langgraph_agent(
    name="math_assistant",
    llm_service=llm_service,
    tools=[CalculatorTool()],
    config={
        "enable_tools": True,
        "temperature": 0.1,
    }
)

# Chat with the agent
response = agent.process_request({
    "query": "What is 42 * 17 + 256?",
    "system_message": "You are a helpful math assistant. Use the calculator tool for computations.",
    "max_tokens": 500,
})

print(response["llm_response"]["response"])