Tools¶
Tools extend agent capabilities by providing specific functions the LLM can use.
How to Create a Tool¶
Every tool must extend the Tool base class and implement four methods:
from akordi_agents.tools import Tool
class MyTool(Tool):
def get_name(self) -> str:
"""Return unique tool identifier."""
return "my_tool_name"
def get_description(self) -> str:
"""Return description for the LLM to understand when to use this tool."""
return "Description of what this tool does"
def get_input_schema(self) -> dict:
"""Return JSON Schema defining the tool's input parameters."""
return {
"type": "object",
"properties": {
"param1": {"type": "string", "description": "Parameter description"}
},
"required": ["param1"]
}
def execute(self, **kwargs) -> dict:
"""Execute the tool and return results."""
param1 = kwargs.get("param1")
return {"result": f"Processed {param1}"}
Complete Tool Example¶
import os
from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
from akordi_agents.tools import Tool
os.environ["AWS_REGION"] = "us-east-1"
class WeatherTool(Tool):
"""Get current weather for a location."""
def get_name(self) -> str:
return "get_weather"
def get_description(self) -> str:
return "Get the current weather conditions for a specified location"
def get_input_schema(self) -> dict:
return {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name (e.g., 'London, UK')"
},
"units": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature units"
}
},
"required": ["location"]
}
def execute(self, **kwargs) -> dict:
location = kwargs.get("location", "Unknown")
units = kwargs.get("units", "celsius")
# Replace with actual API call
return {
"location": location,
"temperature": "22°C" if units == "celsius" else "72°F",
"conditions": "Sunny",
"humidity": "45%"
}
# Create agent with tool
agent = create_langgraph_agent(
name="weather_agent",
llm_service=AWSBedrockService(),
tools=[WeatherTool()],
config={"enable_tools": True, "temperature": 0.1}
)
# Agent automatically uses tool when needed
response = agent.process_request({
"query": "What's the weather in London?",
"system_message": "You are a weather assistant. Use the weather tool.",
})
print(response["llm_response"]["response"])
Tool Interface Reference¶
All tools must implement these methods:
| Method | Return Type | Description |
|---|---|---|
get_name() |
str |
Unique tool identifier |
get_description() |
str |
Description for the LLM |
get_input_schema() |
dict |
JSON Schema for inputs |
execute(**kwargs) |
dict |
Execute and return results |
Tool Interface¶
All tools must implement these methods:
| Method | Return Type | Description |
|---|---|---|
get_name() |
str |
Unique tool identifier |
get_description() |
str |
Description for the LLM |
get_input_schema() |
dict |
JSON Schema for inputs |
execute(**kwargs) |
dict |
Execute and return results |
Input Schema¶
The input schema follows JSON Schema format:
def get_input_schema(self) -> dict:
return {
"type": "object",
"properties": {
# String property
"name": {
"type": "string",
"description": "User's name"
},
# Number property
"age": {
"type": "integer",
"description": "User's age",
"minimum": 0,
"maximum": 150
},
# Enum property
"status": {
"type": "string",
"enum": ["active", "inactive"],
"description": "Account status"
},
# Array property
"tags": {
"type": "array",
"items": {"type": "string"},
"description": "User tags"
},
# Object property
"address": {
"type": "object",
"properties": {
"street": {"type": "string"},
"city": {"type": "string"}
}
}
},
"required": ["name", "status"]
}
Using Tools with Agents¶
With create_langgraph_agent¶
from akordi_agents.core import create_langgraph_agent
# Create tools
weather_tool = WeatherTool()
calculator_tool = CalculatorTool()
# Create agent with tools
agent = create_langgraph_agent(
name="assistant",
llm_service=llm_service,
tools=[weather_tool, calculator_tool],
config={"enable_tools": True}
)
# The LLM will automatically use tools when needed
response = agent.process_request({
"query": "What's the weather in Paris and what is 42 * 17?",
"system_message": "You are a helpful assistant with weather and math tools.",
})
With AgentBuilder¶
from akordi_agents.core import AgentBuilder
agent = (
AgentBuilder("tool_agent")
.with_llm_service_instance(llm_service)
.with_tools([weather_tool, calculator_tool])
.with_langgraph(enable=True, config={"enable_tools": True})
.build()
)
Built-in Tools¶
AkordiChatTool¶
Tool for managing chat sessions and messages:
from akordi_agents.tools import AkordiChatTool
chat_tool = AkordiChatTool()
# Operations available:
# - create_session: Create a new chat session
# - add_message: Add a message to a session
# - get_history: Get chat history
# - list_sessions: List user's sessions
# - get_session: Get session details
Tool Patterns¶
API Integration Tool¶
import requests
class APITool(Tool):
def __init__(self, api_key: str, base_url: str):
self.api_key = api_key
self.base_url = base_url
def get_name(self) -> str:
return "api_call"
def get_description(self) -> str:
return "Make API calls to external service"
def get_input_schema(self) -> dict:
return {
"type": "object",
"properties": {
"endpoint": {"type": "string"},
"method": {"type": "string", "enum": ["GET", "POST"]},
"data": {"type": "object"}
},
"required": ["endpoint"]
}
def execute(self, **kwargs) -> dict:
endpoint = kwargs.get("endpoint")
method = kwargs.get("method", "GET")
data = kwargs.get("data", {})
headers = {"Authorization": f"Bearer {self.api_key}"}
url = f"{self.base_url}/{endpoint}"
try:
if method == "GET":
response = requests.get(url, headers=headers)
else:
response = requests.post(url, json=data, headers=headers)
return {
"status": response.status_code,
"data": response.json()
}
except Exception as e:
return {"error": str(e)}
Database Query Tool¶
class DatabaseTool(Tool):
def __init__(self, connection_string: str):
self.connection_string = connection_string
def get_name(self) -> str:
return "query_database"
def get_description(self) -> str:
return "Query the database for information"
def get_input_schema(self) -> dict:
return {
"type": "object",
"properties": {
"table": {
"type": "string",
"description": "Table to query"
},
"filters": {
"type": "object",
"description": "Query filters"
},
"limit": {
"type": "integer",
"description": "Max results",
"default": 10
}
},
"required": ["table"]
}
def execute(self, **kwargs) -> dict:
# Implementation with your database client
pass
File Processing Tool¶
import json
import csv
from io import StringIO
class FileProcessorTool(Tool):
def get_name(self) -> str:
return "process_file"
def get_description(self) -> str:
return "Process and extract data from files"
def get_input_schema(self) -> dict:
return {
"type": "object",
"properties": {
"content": {"type": "string"},
"format": {
"type": "string",
"enum": ["json", "csv", "text"]
}
},
"required": ["content", "format"]
}
def execute(self, **kwargs) -> dict:
content = kwargs.get("content", "")
format_type = kwargs.get("format", "text")
if format_type == "json":
return {"data": json.loads(content)}
elif format_type == "csv":
reader = csv.DictReader(StringIO(content))
return {"data": list(reader)}
else:
return {"data": content}
Tool Execution Flow¶
sequenceDiagram
participant U as User
participant A as Agent
participant TD as ToolDecisionNode
participant TE as ToolExecutionNode
participant T as Tool
participant L as LLM
U->>A: "What's the weather in London?"
A->>TD: Analyze query
TD->>L: Should I use tools?
L-->>TD: Yes, use weather_tool
TD->>TE: Execute weather_tool
TE->>T: execute(location="London")
T-->>TE: {"temp": "15°C", ...}
TE->>L: Generate response with tool result
L-->>A: "The weather in London is 15°C..."
A-->>U: Response
Error Handling¶
class RobustTool(Tool):
def execute(self, **kwargs) -> dict:
try:
# Validate inputs
if not kwargs.get("required_param"):
return {
"success": False,
"error": "required_param is missing"
}
# Execute logic
result = self.do_work(kwargs)
return {
"success": True,
"data": result
}
except ValueError as e:
return {
"success": False,
"error": f"Invalid input: {e}"
}
except Exception as e:
return {
"success": False,
"error": f"Unexpected error: {e}"
}
Best Practices¶
1. Clear Descriptions¶
Help the LLM understand when to use your tool:
# Good: Specific description
def get_description(self) -> str:
return "Get real-time stock prices for a given ticker symbol. Use for current market data."
# Avoid: Vague description
def get_description(self) -> str:
return "Get data"
2. Validate Inputs¶
def execute(self, **kwargs) -> dict:
location = kwargs.get("location")
if not location or len(location) < 2:
return {"error": "Location must be at least 2 characters"}
# Continue with execution
3. Return Structured Data¶
# Good: Structured response
return {
"success": True,
"data": {
"temperature": 22,
"unit": "celsius",
"conditions": "sunny"
}
}
# Avoid: Unstructured response
return "It's 22 degrees and sunny"
4. Handle Rate Limits¶
import time
class RateLimitedTool(Tool):
def __init__(self):
self.last_call = 0
self.min_interval = 1.0 # seconds
def execute(self, **kwargs) -> dict:
elapsed = time.time() - self.last_call
if elapsed < self.min_interval:
time.sleep(self.min_interval - elapsed)
self.last_call = time.time()
return self.do_work(kwargs)
5. Log Tool Usage¶
import logging
logger = logging.getLogger(__name__)
class LoggedTool(Tool):
def execute(self, **kwargs) -> dict:
logger.info(f"Executing {self.get_name()} with args: {kwargs}")
result = self.do_work(kwargs)
logger.info(f"Tool {self.get_name()} returned: {result}")
return result
Related Documentation¶
- Recipes: Agent with Custom Tool - Complete working example
- Recipes: Agent with Multiple Tools - Multiple tools example
- LangGraph Workflows - Use tools in workflows
- Examples - Tool integration examples
- API Reference - Complete Tool API