Basic Agent Example¶
Create your first agent with the Akordi Agents SDK.
Simple Agent¶
The most basic agent uses create_langgraph_agent:
import os
from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
# Configure AWS region
os.environ["AWS_REGION"] = "us-east-1"
# Create LLM service
llm_service = AWSBedrockService()
# Create agent
agent = create_langgraph_agent(
name="simple_agent",
llm_service=llm_service,
)
# Process a request
response = agent.process_request({
"query": "What is artificial intelligence?",
"system_message": "You are a helpful AI assistant.",
"max_tokens": 500,
})
# Print response
print(response["llm_response"]["response"])
Using AgentBuilder¶
For more control, use the AgentBuilder pattern:
from akordi_agents.core import AgentBuilder
from akordi_agents.services import AWSBedrockService
# Create LLM service
llm_service = AWSBedrockService()
# Build agent
agent = (
AgentBuilder("my_agent")
.with_llm_service_instance(llm_service)
.with_config({
"temperature": 0.1,
"max_tokens": 1000,
})
.build()
)
# Use agent
response = agent.process_request({
"query": "Explain machine learning",
"system_message": "You are a tech expert.",
})
CustomAgent (No LangGraph)¶
If you don't need LangGraph features:
from akordi_agents.core import CustomAgent
from akordi_agents.services import AWSBedrockService
# Create agent directly
agent = CustomAgent(
name="basic_agent",
llm_service=AWSBedrockService(),
config={"temperature": 0.1},
)
# Initialize
agent.initialize({})
# Process request
response = agent.process_request({
"query": "What is Python?",
})
With Chat History¶
Enable conversation persistence:
import os
from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
# Configure DynamoDB tables
os.environ["CHAT_SESSIONS_TABLE_NAME"] = "my-sessions"
os.environ["CHAT_MESSAGES_TABLE_NAME"] = "my-messages"
# Create agent
agent = create_langgraph_agent(
name="chat_agent",
llm_service=AWSBedrockService(),
)
# First message
response = agent.process_request({
"query": "Hello! My name is Alex.",
"user_id": "user-123",
"chat_id": "chat-456",
"system_message": "You are a friendly assistant.",
})
print(response["llm_response"]["response"])
# Second message (remembers context)
response = agent.process_request({
"query": "What's my name?",
"user_id": "user-123",
"chat_id": "chat-456",
})
print(response["llm_response"]["response"])
# Output: "Your name is Alex!"
With Knowledge Base¶
Add knowledge base search:
from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
from akordi_agents.handlers import AWSBedrockSearchHandler
# Create search handler
search_handler = AWSBedrockSearchHandler(
knowledge_base_id="your-kb-id",
)
# Create agent with search
agent = create_langgraph_agent(
name="kb_agent",
llm_service=AWSBedrockService(),
search_handler=search_handler,
)
# Query knowledge base
response = agent.process_request({
"query": "What are the company policies on remote work?",
"knowledge_base_id": "your-kb-id",
})
# Response includes search results
print("Answer:", response["llm_response"]["response"])
print("Sources:", response.get("search_results", []))
Response Handling¶
Handle different response scenarios:
response = agent.process_request({
"query": "Hello",
})
# Check success
if response.get("success"):
# Get response text
answer = response["llm_response"]["response"]
# Get token usage
tokens = response["llm_response"].get("token_usage", {})
print(f"Tokens used: {tokens.get('total_tokens', 'N/A')}")
# Get model info
model = response["llm_response"].get("model_info", {})
print(f"Model: {model.get('model_id', 'Unknown')}")
else:
# Handle error
error = response.get("error", "Unknown error")
validation_errors = response.get("validation_errors", [])
print(f"Error: {error}")
for ve in validation_errors:
print(f" - {ve['field']}: {ve['message']}")
Complete Example¶
Here's a complete working example:
#!/usr/bin/env python
"""Basic agent example."""
import os
from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
def main():
# Configure
os.environ["AWS_REGION"] = "us-east-1"
# Create agent
agent = create_langgraph_agent(
name="demo_agent",
llm_service=AWSBedrockService(),
config={
"temperature": 0.1,
"max_iterations": 5,
}
)
# Interactive loop
print("Chat with the agent (type 'quit' to exit)")
print("-" * 50)
while True:
query = input("\nYou: ").strip()
if query.lower() in ["quit", "exit", "q"]:
break
if not query:
continue
response = agent.process_request({
"query": query,
"system_message": "You are a helpful assistant.",
"max_tokens": 500,
})
if response.get("success"):
print(f"\nAgent: {response['llm_response']['response']}")
else:
print(f"\nError: {response.get('error', 'Unknown error')}")
print("\nGoodbye!")
if __name__ == "__main__":
main()
Running the Example¶
# Set up environment
export AWS_REGION=us-east-1
# Run the example
poetry run python examples/basic_agent.py
Agent Configuration Options¶
WorkflowConfig¶
Configure LangGraph workflow behavior:
from akordi_agents.core import create_langgraph_agent
agent = create_langgraph_agent(
name="configured_agent",
llm_service=llm_service,
config={
# Validation
"enable_validation": True, # Enable input validation
# Tools
"enable_tools": True, # Enable tool use
"max_iterations": 10, # Max tool call iterations
# LLM Settings
"temperature": 0.1, # Lower = more deterministic
"max_tokens": 2000, # Max response length
# Tracing
"enable_tracing": True, # Enable LangSmith tracing
}
)
Model Selection¶
Use different Bedrock models:
from akordi_agents.services import AWSBedrockService
# Claude 3 Sonnet (balanced)
llm_service = AWSBedrockService(
model_id="anthropic.claude-3-sonnet-20240229-v1:0"
)
# Claude 3 Haiku (fast, cost-effective)
llm_service = AWSBedrockService(
model_id="anthropic.claude-3-haiku-20240307-v1:0"
)
# Claude 3 Opus (most capable)
llm_service = AWSBedrockService(
model_id="anthropic.claude-3-opus-20240229-v1:0"
)
# With custom region
llm_service = AWSBedrockService(
model_id="anthropic.claude-3-sonnet-20240229-v1:0",
region_name="ap-southeast-2"
)
Common Patterns¶
Error Handling Pattern¶
def safe_query(agent, query: str) -> str:
"""Safely query an agent with error handling."""
try:
response = agent.process_request({
"query": query,
"system_message": "You are a helpful assistant.",
"max_tokens": 1000,
})
if response.get("success"):
return response["llm_response"]["response"]
else:
error = response.get("error", "Unknown error")
validation_errors = response.get("validation_errors", [])
if validation_errors:
return f"Validation failed: {validation_errors}"
return f"Error: {error}"
except Exception as e:
return f"Exception: {str(e)}"
Streaming Pattern (Async)¶
import asyncio
from akordi_agents.core.langgraph import ToolUseWorkflow, WorkflowConfig
async def stream_response(query: str):
"""Stream response chunks."""
workflow = ToolUseWorkflow(
name="streaming_agent",
config=WorkflowConfig(enable_tracing=False),
llm_service=llm_service,
)
async for chunk in workflow.stream({"query": query}):
print(chunk, end="", flush=True)
print() # Newline at end
# Run
asyncio.run(stream_response("Tell me about AI"))
Related Documentation¶
- Recipes - Complete, copy-paste code examples for all patterns
- Recipes: Basic Agent - Minimal working example
- Tool Integration - Add tools to your agent
- Multi-Agent - Coordinate multiple agents
- Guardrails - Add safety controls