Skip to content

Tool Integration Example

Add custom tools to extend agent capabilities.

Tool Interface

Tools implement the Tool base class with four methods:

from akordi_agents.tools import Tool
from typing import Any, Dict

class MyTool(Tool):
    def get_name(self) -> str:
        """Return the tool name (used by LLM)."""
        return "my_tool"

    def get_description(self) -> str:
        """Return a description (LLM uses this to decide when to use the tool)."""
        return "Description of what this tool does"

    def get_input_schema(self) -> Dict[str, Any]:
        """Return JSON schema for tool parameters."""
        return {
            "type": "object",
            "properties": {
                "param1": {"type": "string", "description": "First parameter"}
            },
            "required": ["param1"]
        }

    def execute(self, **kwargs) -> Any:
        """Execute the tool and return results."""
        return {"success": True, "data": "result"}

Real Weather Tool Example

Here's the actual weather tool from examples/tools/weather_tool.py:

"""Weather Tool for fetching real-time weather data."""

import logging
import os
from typing import Any, Dict

import requests
from akordi_agents.tools import Tool

logger = logging.getLogger(__name__)


class WeatherTool(Tool):
    """Weather information tool that fetches real weather data."""

    def get_name(self) -> str:
        return "weather_tool"

    def get_description(self) -> str:
        return (
            "Fetches current weather information for any city. "
            "Returns temperature, conditions, humidity, and wind speed."
        )

    def get_input_schema(self) -> Dict[str, Any]:
        return {
            "type": "object",
            "properties": {
                "city": {
                    "type": "string",
                    "description": "City name (e.g., 'London, UK' or 'New York, USA')",
                }
            },
            "required": ["city"],
        }

    def execute(self, **kwargs) -> Any:
        """Execute weather lookup."""
        city = kwargs.get("city", "Auckland, New Zealand")
        api_key = os.getenv("WEATHER_API_KEY")

        print(f"🌤️  WeatherTool: Fetching weather for {city}")

        if not api_key:
            return {
                "success": False,
                "error": "Weather API key not configured",
                "city": city,
                "message": "Set WEATHER_API_KEY environment variable",
            }

        try:
            url = "http://api.weatherapi.com/v1/current.json"
            params = {"key": api_key, "q": city, "aqi": "no"}

            response = requests.get(url, params=params, timeout=10)
            response.raise_for_status()
            data = response.json()

            location = data["location"]
            current = data["current"]

            weather_info = {
                "success": True,
                "city": f"{location['name']}, {location['country']}",
                "temperature_c": current["temp_c"],
                "temperature_f": current["temp_f"],
                "condition": current["condition"]["text"],
                "humidity": current["humidity"],
                "wind_kph": current["wind_kph"],
                "feels_like_c": current["feelslike_c"],
                "uv_index": current["uv"],
                "local_time": location["localtime"],
            }

            print(f"✅ Weather: {weather_info['city']} - {weather_info['temperature_c']}°C")
            return weather_info

        except Exception as e:
            return {
                "success": False,
                "error": str(e),
                "city": city,
            }

Using Tools with create_langgraph_agent

The recommended way to add tools:

from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
from examples.tools.weather_tool import WeatherTool

# Create agent with tool
agent = create_langgraph_agent(
    name="weather_agent",
    llm_service=AWSBedrockService(),
    tools=[WeatherTool()],
    config={
        "enable_tools": True,
        "temperature": 0.1,
        "max_iterations": 10,
    }
)

# Query that triggers tool use
response = agent.process_request({
    "query": "What's the weather like in Paris?",
    "system_message": "You are a helpful weather assistant. Use the weather tool for accurate information.",
})

print(response["llm_response"]["response"])
print("Tools used:", response["workflow_metadata"]["tools_used"])

Using Tools with AgentBuilder

For more explicit control:

from akordi_agents.core import AgentBuilder
from akordi_agents.services import AWSBedrockService

agent = (
    AgentBuilder("tool_agent")
    .with_llm_service_instance(AWSBedrockService())
    .with_tools([WeatherTool()])
    .with_langgraph(
        enable=True,
        config={
            "enable_tools": True,
            "max_iterations": 10,
        }
    )
    .build()
)

Multiple Tools

Add multiple tools for diverse capabilities:

from akordi_agents.tools import Tool

class CalculatorTool(Tool):
    def get_name(self) -> str:
        return "calculator"

    def get_description(self) -> str:
        return "Perform mathematical calculations"

    def get_input_schema(self) -> dict:
        return {
            "type": "object",
            "properties": {
                "expression": {
                    "type": "string",
                    "description": "Math expression (e.g., '2 + 2', '10 * 5')"
                }
            },
            "required": ["expression"]
        }

    def execute(self, **kwargs) -> dict:
        expression = kwargs.get("expression", "0")
        try:
            # Safe evaluation (only basic math)
            allowed_chars = set("0123456789+-*/(). ")
            if not all(c in allowed_chars for c in expression):
                return {"success": False, "error": "Invalid characters"}
            result = eval(expression)
            return {"success": True, "result": result}
        except Exception as e:
            return {"success": False, "error": str(e)}


class RiskAssessmentTool(Tool):
    def get_name(self) -> str:
        return "risk_assessment"

    def get_description(self) -> str:
        return "Assess project risks based on factors like weather, location, and activity type"

    def get_input_schema(self) -> dict:
        return {
            "type": "object",
            "properties": {
                "activity": {
                    "type": "string",
                    "description": "Activity to assess (e.g., 'roofing', 'excavation')"
                },
                "weather_conditions": {
                    "type": "string",
                    "description": "Current weather conditions"
                }
            },
            "required": ["activity"]
        }

    def execute(self, **kwargs) -> dict:
        activity = kwargs.get("activity", "general work")
        weather = kwargs.get("weather_conditions", "unknown")

        # Risk assessment logic
        risk_level = "Medium"
        if "rain" in weather.lower() or "wind" in weather.lower():
            risk_level = "High"

        return {
            "success": True,
            "activity": activity,
            "risk_level": risk_level,
            "recommendations": [
                "Ensure proper safety equipment",
                "Check weather updates regularly",
                "Have emergency protocols ready"
            ]
        }


# Create agent with multiple tools
agent = create_langgraph_agent(
    name="multi_tool_agent",
    llm_service=AWSBedrockService(),
    tools=[
        WeatherTool(),
        CalculatorTool(),
        RiskAssessmentTool(),
    ],
    config={"enable_tools": True}
)

# The LLM automatically selects appropriate tools
response = agent.process_request({
    "query": "What's the weather in London? And calculate 15% of 250.",
    "system_message": "Use tools to answer accurately.",
})

Tool Execution Flow

sequenceDiagram
    participant U as User
    participant A as Agent
    participant LLM as LLM
    participant T as Tool

    U->>A: "What's the weather in London?"
    A->>LLM: Analyze query + available tools
    LLM->>A: Decision: Use weather_tool
    A->>T: execute(city="London")
    T->>A: {temperature: "15°C", conditions: "Cloudy"}
    A->>LLM: Generate response with tool data
    LLM->>A: "The weather in London is 15°C and cloudy"
    A->>U: Final response

Tool with API Integration

Create tools that call external APIs:

import requests
from akordi_agents.tools import Tool

class StockPriceTool(Tool):
    def __init__(self, api_key: str):
        self.api_key = api_key
        self.base_url = "https://api.example.com/v1"

    def get_name(self) -> str:
        return "get_stock_price"

    def get_description(self) -> str:
        return "Get current stock price for a ticker symbol"

    def get_input_schema(self) -> dict:
        return {
            "type": "object",
            "properties": {
                "symbol": {
                    "type": "string",
                    "description": "Stock ticker (e.g., 'AAPL', 'GOOGL')"
                }
            },
            "required": ["symbol"]
        }

    def execute(self, **kwargs) -> dict:
        symbol = kwargs.get("symbol", "").upper()

        try:
            response = requests.get(
                f"{self.base_url}/quote/{symbol}",
                headers={"Authorization": f"Bearer {self.api_key}"},
                timeout=10
            )

            if response.status_code == 200:
                data = response.json()
                return {
                    "success": True,
                    "symbol": symbol,
                    "price": data["price"],
                    "change": data["change"],
                }
            else:
                return {"success": False, "error": f"API error: {response.status_code}"}
        except Exception as e:
            return {"success": False, "error": str(e)}

Complete Example Script

#!/usr/bin/env python
"""Tool integration example."""

import os
from dotenv import load_dotenv
from akordi_agents.core import create_langgraph_agent
from akordi_agents.services import AWSBedrockService
from akordi_agents.tools import Tool

# Load environment
load_dotenv()


class WeatherTool(Tool):
    def get_name(self) -> str:
        return "get_weather"

    def get_description(self) -> str:
        return "Get current weather for a city"

    def get_input_schema(self) -> dict:
        return {
            "type": "object",
            "properties": {
                "city": {"type": "string", "description": "City name"}
            },
            "required": ["city"]
        }

    def execute(self, **kwargs) -> dict:
        city = kwargs.get("city", "Unknown")
        # In production, call real weather API
        return {
            "success": True,
            "data": {
                "city": city,
                "temperature": "20°C",
                "conditions": "Sunny"
            }
        }


def main():
    # Create agent with tool
    agent = create_langgraph_agent(
        name="weather_agent",
        llm_service=AWSBedrockService(),
        tools=[WeatherTool()],
        config={"enable_tools": True}
    )

    # Test queries
    queries = [
        "What's the weather in London?",
        "Tell me about the weather in Tokyo and Paris",
        "Is it warm in Sydney today?",
    ]

    for query in queries:
        print(f"\nQuery: {query}")
        print("-" * 50)

        response = agent.process_request({
            "query": query,
            "system_message": "Use the weather tool for accurate information.",
        })

        if response.get("success"):
            print(f"Response: {response['llm_response']['response']}")
            print(f"Tools used: {response['workflow_metadata'].get('tools_used', [])}")
        else:
            print(f"Error: {response.get('error')}")


if __name__ == "__main__":
    main()

Running Examples

# Set up environment
export AWS_REGION=us-east-1
export WEATHER_API_KEY=your-api-key  # For real weather data

# Simple tool example (without LangGraph workflow)
poetry run python examples/simple_tool.py --query "What's the weather?"

# LangGraph tool example
poetry run python examples/lang_tool.py --query "What's the weather?"

# Interactive mode
poetry run python examples/langgraph_with_simple_tool.py

Best Practices

1. Clear Descriptions

LLMs use descriptions to decide when to use tools:

# Good - specific and actionable
def get_description(self) -> str:
    return "Get current weather conditions including temperature, humidity, and wind speed for any city worldwide"

# Bad - too vague
def get_description(self) -> str:
    return "Weather tool"

2. Proper Error Handling

def execute(self, **kwargs) -> dict:
    try:
        # Tool logic
        result = self.do_work(**kwargs)
        return {"success": True, "data": result}
    except ValueError as e:
        return {"success": False, "error": f"Invalid input: {e}"}
    except requests.Timeout:
        return {"success": False, "error": "Request timed out"}
    except Exception as e:
        return {"success": False, "error": str(e)}

3. Type Hints in Schema

def get_input_schema(self) -> dict:
    return {
        "type": "object",
        "properties": {
            "city": {
                "type": "string",
                "description": "City name (e.g., 'London, UK')"
            },
            "units": {
                "type": "string",
                "enum": ["celsius", "fahrenheit"],
                "default": "celsius"
            }
        },
        "required": ["city"]
    }

Next Steps