How It Works
Tool calling follows a 3-step cycle:- Inference with tools — You send a request with tool definitions. The model decides to call a tool and returns a
tool_callsresponse. - Tool execution (client-side) — Your application runs the requested function and collects the result.
- Inference with tool results — You send the tool result back to the model, which generates a final response.
Step 1: Inference with Tools
{
"model": "openai/gpt-5.4",
"messages": [
{ "role": "user", "content": "What is the weather in San Francisco?" }
],
"tools": [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name, e.g. 'San Francisco'"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}
],
"tool_choice": "auto"
}
tool_calls array:
{
"id": "chatcmpl-xxx",
"choices": [
{
"message": {
"role": "assistant",
"content": null,
"tool_calls": [
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\": \"San Francisco\", \"unit\": \"fahrenheit\"}"
}
}
]
},
"finish_reason": "tool_calls"
}
]
}
Step 2: Tool Execution (Client-Side)
Your application executes the function with the model’s arguments:import json
tool_call = response.choices[0].message.tool_calls[0]
args = json.loads(tool_call.function.arguments)
# Execute your function
result = get_weather(
location=args["location"],
unit=args.get("unit", "fahrenheit")
)
# result = {"temperature": 72, "condition": "sunny", "unit": "fahrenheit"}
Step 3: Inference with Tool Results
Send the tool result back as atool role message:
{
"model": "openai/gpt-5.4",
"messages": [
{ "role": "user", "content": "What is the weather in San Francisco?" },
{
"role": "assistant",
"content": null,
"tool_calls": [
{
"id": "call_abc123",
"type": "function",
"function": {
"name": "get_weather",
"arguments": "{\"location\": \"San Francisco\", \"unit\": \"fahrenheit\"}"
}
}
]
},
{
"role": "tool",
"tool_call_id": "call_abc123",
"content": "{\"temperature\": 72, \"condition\": \"sunny\", \"unit\": \"fahrenheit\"}"
}
],
"tools": [...]
}
Full Example
- Python (OpenAI)
- Node.js (OpenAI)
- Go
- cURL
import json
from openai import OpenAI
client = OpenAI(
base_url="https://api.arouter.ai/v1",
api_key="lr_live_xxxx",
)
# Define the tool
tools = [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City name, e.g. 'San Francisco'",
},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]},
},
"required": ["location"],
},
},
}
]
messages = [{"role": "user", "content": "What is the weather in San Francisco?"}]
# Step 1: First inference
response = client.chat.completions.create(
model="openai/gpt-5.4",
messages=messages,
tools=tools,
tool_choice="auto",
)
assistant_message = response.choices[0].message
messages.append(assistant_message)
# Step 2: Execute tools
if assistant_message.tool_calls:
for tool_call in assistant_message.tool_calls:
if tool_call.function.name == "get_weather":
args = json.loads(tool_call.function.arguments)
# Simulate weather API
result = {"temperature": 72, "condition": "sunny", "unit": args.get("unit", "fahrenheit")}
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": json.dumps(result),
})
# Step 3: Final inference
final_response = client.chat.completions.create(
model="openai/gpt-5.4",
messages=messages,
tools=tools,
)
print(final_response.choices[0].message.content)
# "The current weather in San Francisco is 72°F and sunny."
import OpenAI from "openai";
const client = new OpenAI({
baseURL: "https://api.arouter.ai/v1",
apiKey: "lr_live_xxxx",
});
// Define the tool
const tools: OpenAI.Chat.ChatCompletionTool[] = [
{
type: "function",
function: {
name: "get_weather",
description: "Get the current weather for a location",
parameters: {
type: "object",
properties: {
location: {
type: "string",
description: "City name, e.g. 'San Francisco'",
},
unit: { type: "string", enum: ["celsius", "fahrenheit"] },
},
required: ["location"],
},
},
},
];
const messages: OpenAI.Chat.ChatCompletionMessageParam[] = [
{ role: "user", content: "What is the weather in San Francisco?" },
];
// Step 1: First inference
const response = await client.chat.completions.create({
model: "openai/gpt-5.4",
messages,
tools,
tool_choice: "auto",
});
const assistantMessage = response.choices[0].message;
messages.push(assistantMessage);
// Step 2: Execute tools
if (assistantMessage.tool_calls) {
for (const toolCall of assistantMessage.tool_calls) {
if (toolCall.function.name === "get_weather") {
const args = JSON.parse(toolCall.function.arguments);
const result = {
temperature: 72,
condition: "sunny",
unit: args.unit ?? "fahrenheit",
};
messages.push({
role: "tool",
tool_call_id: toolCall.id,
content: JSON.stringify(result),
});
}
}
}
// Step 3: Final inference
const finalResponse = await client.chat.completions.create({
model: "openai/gpt-5.4",
messages,
tools,
});
console.log(finalResponse.choices[0].message.content);
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"github.com/arouter-ai/arouter-go"
)
func main() {
client := arouter.NewClient("lr_live_xxxx",
arouter.WithBaseURL("https://api.arouter.ai/v1"),
)
tools := []arouter.Tool{
{
Type: "function",
Function: &arouter.FunctionDefinition{
Name: "get_weather",
Description: "Get the current weather for a location",
Parameters: map[string]any{
"type": "object",
"properties": map[string]any{
"location": map[string]any{
"type": "string",
"description": "City name",
},
"unit": map[string]any{
"type": "string",
"enum": []string{"celsius", "fahrenheit"},
},
},
"required": []string{"location"},
},
},
},
}
messages := []arouter.Message{
{Role: "user", Content: "What is the weather in San Francisco?"},
}
// Step 1
resp, err := client.CreateChatCompletion(context.Background(), arouter.ChatCompletionRequest{
Model: "openai/gpt-5.4",
Messages: messages,
Tools: tools,
})
if err != nil {
log.Fatal(err)
}
assistantMsg := resp.Choices[0].Message
messages = append(messages, assistantMsg)
// Step 2 & 3
for _, tc := range assistantMsg.ToolCalls {
var args map[string]string
json.Unmarshal([]byte(tc.Function.Arguments), &args)
result := fmt.Sprintf(`{"temperature":72,"condition":"sunny","unit":"%s"}`, args["unit"])
messages = append(messages, arouter.Message{
Role: "tool",
ToolCallID: tc.ID,
Content: result,
})
}
finalResp, err := client.CreateChatCompletion(context.Background(), arouter.ChatCompletionRequest{
Model: "openai/gpt-5.4",
Messages: messages,
Tools: tools,
})
if err != nil {
log.Fatal(err)
}
fmt.Println(finalResp.Choices[0].Message.Content)
}
# Step 1: Inference with tools
curl https://api.arouter.ai/v1/chat/completions \
-H "Authorization: Bearer lr_live_xxxx" \
-H "Content-Type: application/json" \
-d '{
"model": "openai/gpt-5.4",
"messages": [
{"role": "user", "content": "What is the weather in San Francisco?"}
],
"tools": [
{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get the current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {"type": "string"},
"unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}
},
"required": ["location"]
}
}
}
],
"tool_choice": "auto"
}'
# Step 3: Send tool result
curl https://api.arouter.ai/v1/chat/completions \
-H "Authorization: Bearer lr_live_xxxx" \
-H "Content-Type: application/json" \
-d '{
"model": "openai/gpt-5.4",
"messages": [
{"role": "user", "content": "What is the weather in San Francisco?"},
{
"role": "assistant",
"content": null,
"tool_calls": [{"id": "call_abc123", "type": "function", "function": {"name": "get_weather", "arguments": "{\"location\": \"San Francisco\"}"}}]
},
{
"role": "tool",
"tool_call_id": "call_abc123",
"content": "{\"temperature\": 72, \"condition\": \"sunny\"}"
}
]
}'
Streaming Tool Calls
When streaming is enabled, tool call arguments are delivered incrementally viadelta.tool_calls:
const stream = await client.chat.completions.create({
model: "openai/gpt-5.4",
messages: [{ role: "user", content: "What's the weather in NYC?" }],
tools,
stream: true,
});
let toolCallArgs = "";
let toolCallId = "";
let toolCallName = "";
for await (const chunk of stream) {
const delta = chunk.choices[0]?.delta;
if (delta?.tool_calls) {
const tc = delta.tool_calls[0];
if (tc.id) toolCallId = tc.id;
if (tc.function?.name) toolCallName = tc.function.name;
if (tc.function?.arguments) toolCallArgs += tc.function.arguments;
}
if (chunk.choices[0]?.finish_reason === "tool_calls") {
// All arguments have been received
const args = JSON.parse(toolCallArgs);
console.log(`Calling ${toolCallName} with:`, args);
}
}
Supported Models
UseGET /v1/models to find models that support tool calling. Models with tools in their capabilities list support this feature.
curl https://api.arouter.ai/v1/models \
-H "Authorization: Bearer lr_live_xxxx"
openai/gpt-5.4,openai/gpt-5.4-proanthropic/claude-sonnet-4.6,anthropic/claude-opus-4.5google/gemini-2.5-flash,google/gemini-2.5-prodeepseek/deepseek-v3.2