Claude Tool Use (Function Calling)#
Tool use lets Claude call external functions during a conversation. Claude decides when to call a tool, sends a structured request, receives the result, and incorporates it into its response.
Define a tool#
Tools are JSON Schema objects describing name, purpose, and input parameters.
tools = [
{
"name": "get_weather",
"description": "Get current weather for a location. Call this when the user asks about weather.",
"input_schema": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and country, e.g. 'Toronto, Canada'"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"],
"description": "Temperature unit. Default celsius."
}
},
"required": ["location"]
}
}
]
[!TIP] Write the
descriptionfrom Claudeβs perspective: explain when to call the tool, not just what it does. Claude uses descriptions to decide whether to call the tool at all.
First API call#
import anthropic
client = anthropic.Anthropic()
response = client.messages.create(
model="claude-opus-4-7",
max_tokens=1024,
tools=tools,
messages=[{"role": "user", "content": "What's the weather in Toronto?"}]
)
print(response.stop_reason) # "tool_use"
print(response.content) # list of TextBlock and/or ToolUseBlock
Output:
tool_use
[ToolUseBlock(id='toolu_01XVn...', input={'location': 'Toronto, Canada'}, name='get_weather', type='tool_use')]
Handle the tool call#
import json
def handle_tool_call(name: str, inputs: dict) -> str:
if name == "get_weather":
location = inputs["location"]
unit = inputs.get("unit", "celsius")
# Call your real weather API here
return json.dumps({"temp": 12, "condition": "cloudy", "unit": unit})
raise ValueError(f"Unknown tool: {name}")
tool_use = next(b for b in response.content if b.type == "tool_use")
result = handle_tool_call(tool_use.name, tool_use.input)
Continue the conversation#
Append the assistantβs response and the tool result, then call again to get the final answer.
messages = [
{"role": "user", "content": "What's the weather in Toronto?"},
{"role": "assistant", "content": response.content}, # include full content list
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": result # string or list of content blocks
}
]
}
]
final = client.messages.create(
model="claude-opus-4-7",
max_tokens=1024,
tools=tools,
messages=messages
)
print(final.content[0].text)
Output:
The current weather in Toronto, Canada is 12Β°C and cloudy.
Full agentic loop#
def run_agent(user_message: str, tools: list, max_turns: int = 10) -> str:
messages = [{"role": "user", "content": user_message}]
for turn in range(max_turns):
response = client.messages.create(
model="claude-opus-4-7",
max_tokens=4096,
tools=tools,
messages=messages,
)
messages.append({"role": "assistant", "content": response.content})
if response.stop_reason == "end_turn":
text = [b.text for b in response.content if b.type == "text"]
return text[-1] if text else ""
if response.stop_reason == "tool_use":
tool_results = []
for block in response.content:
if block.type != "tool_use":
continue
try:
result_content = handle_tool_call(block.name, block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": result_content,
})
except Exception as exc:
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
"content": f"Error: {exc}",
"is_error": True, # tells Claude the tool failed
})
messages.append({"role": "user", "content": tool_results})
return "Max turns reached"
[!WARNING] Always set a
max_turnsceiling. Without one, a bug in your tool handler or an unexpected Claude response can loop indefinitely. 10 is a safe default for most tasks; complex agentic pipelines may need 20β50.
Parallel tool use#
Claude may call multiple tools in a single response. Handle all ToolUseBlock items in the content list.
import anthropic
tools = [
{
"name": "get_stock_price",
"description": "Get current stock price for a ticker symbol.",
"input_schema": {
"type": "object",
"properties": {"ticker": {"type": "string"}},
"required": ["ticker"]
}
},
{
"name": "get_company_news",
"description": "Get recent news headlines for a company.",
"input_schema": {
"type": "object",
"properties": {"ticker": {"type": "string"}},
"required": ["ticker"]
}
}
]
response = client.messages.create(
model="claude-opus-4-7",
max_tokens=1024,
tools=tools,
messages=[{"role": "user", "content": "What's AAPL's price and latest news?"}]
)
# Claude may return TWO tool_use blocks in one response
tool_calls = [b for b in response.content if b.type == "tool_use"]
print(f"Tool calls requested: {len(tool_calls)}")
# Handle all of them and return all results in one user turn
tool_results = []
for tc in tool_calls:
result = handle_tool_call(tc.name, tc.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": tc.id,
"content": result,
})
Output:
Tool calls requested: 2
Disable parallel tool use#
response = client.messages.create(
model="claude-opus-4-7",
max_tokens=1024,
tools=tools,
tool_choice={"type": "auto", "disable_parallel_tool_use": True},
messages=messages
)
Error handling with is_error#
When a tool call fails, return is_error: true instead of raising an exception. Claude will acknowledge the failure and decide whether to retry or respond differently.
def safe_tool_call(name: str, inputs: dict) -> dict:
try:
content = handle_tool_call(name, inputs)
return {"content": content}
except TimeoutError:
return {"content": "Tool timed out after 10s.", "is_error": True}
except Exception as exc:
return {"content": f"Tool error: {type(exc).__name__}: {exc}", "is_error": True}
# Then in your loop:
for block in response.content:
if block.type == "tool_use":
result = safe_tool_call(block.name, block.input)
tool_results.append({
"type": "tool_result",
"tool_use_id": block.id,
**result,
})
Tool choice control#
# Force Claude to call a specific tool (useful for structured extraction)
tool_choice={"type": "tool", "name": "extract_fields"}
# Force any tool call (not end_turn)
tool_choice={"type": "any"}
# Claude decides (default)
tool_choice={"type": "auto"}
# Never use tools β return text only
tool_choice={"type": "none"}
Prompt caching with tools#
Mark your tool definitions as cacheable when they are large and reused across many calls. Cache TTL is 5 minutes (ephemeral).
tools_with_cache = [
{
"name": "search_docs",
"description": "Search the documentation database...",
"input_schema": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"},
"max_results": {"type": "integer", "default": 5}
},
"required": ["query"]
},
"cache_control": {"type": "ephemeral"} # cache this tool definition
}
]
response = client.messages.create(
model="claude-opus-4-7",
max_tokens=1024,
tools=tools_with_cache,
system=[
{
"type": "text",
"text": "You are a documentation assistant with access to search.",
"cache_control": {"type": "ephemeral"} # also cache system prompt
}
],
messages=messages
)
Output (usage block when cached):
Usage(cache_creation_input_tokens=1024, cache_read_input_tokens=1024, input_tokens=52, output_tokens=80)
Tool schema best practices#
| Practice | Why |
|---|---|
| Keep descriptions short but precise | Token efficiency; Claude reads every description every turn |
| Name parameters unambiguously | city_name not name when there could be other names |
Mark truly required fields as required | Prevents Claude from omitting fields you always need |
Use enum for fixed choices | Avoids hallucinated values; validation is free |
Add default in description, not schema | JSON Schema default is informational; Claude reads descriptions |
| Keep tool count under ~20 | Beyond ~20 tools, Claude struggles to choose; group by domain |
| Write description from Claudeβs POV | βCall this when the user asks about weatherβ not βGets weatherβ |
Tool result content types#
The content field in a tool_result can be a string, or a list of content blocks (text + images):
# String (simple)
{"type": "tool_result", "tool_use_id": tc.id, "content": "12Β°C, cloudy"}
# List with image (e.g. a chart tool that returns a plot)
{
"type": "tool_result",
"tool_use_id": tc.id,
"content": [
{"type": "text", "text": "Chart generated:"},
{
"type": "image",
"source": {
"type": "base64",
"media_type": "image/png",
"data": base64_png_data
}
}
]
}
Streaming with tool use#
with client.messages.stream(
model="claude-opus-4-7",
max_tokens=1024,
tools=tools,
messages=messages,
) as stream:
for event in stream:
if event.type == "content_block_start":
if event.content_block.type == "tool_use":
print(f"\nTool call: {event.content_block.name}")
elif event.type == "content_block_delta":
if event.delta.type == "input_json_delta":
print(event.delta.partial_json, end="", flush=True)
elif event.type == "message_stop":
print()
# Get the final message for the full tool use input
final_message = stream.get_final_message()