import json
import requests
API_KEY = "your-token" # Required for paid features
BASE_URL = "https://api.llm7.io/v1/chat/completions"
# 1) Describe the callable function
tools = [{
"type": "function",
"function": {
"name": "get_weather",
"description": "Get current weather for a location",
"parameters": {
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "City and state, e.g. Boston, MA"
},
"unit": {
"type": "string",
"enum": ["celsius", "fahrenheit"]
}
},
"required": ["location"]
}
}
}]
# 2) Let the model decide whether to call the function
payload = {
"model": "openai",
"messages": [{"role": "user", "content": "What's the weather in London?"}],
"tools": tools,
"tool_choice": "auto"
}
response = requests.post(
BASE_URL,
headers={"Authorization": f"Bearer {API_KEY}"},
json=payload,
timeout=30,
).json()
message = response["choices"][0]["message"]
# 3) If the model asks for a tool, supply the result and get the final reply
if message.get("tool_calls"):
tool_call = message["tool_calls"][0]
print(f"Model requested: {tool_call}")
# Replace with a real weather API call
weather_data = {
"temperature": 12,
"condition": "sunny",
"unit": "celsius"
}
follow_up_messages = [
{"role": "user", "content": "What's the weather in London?"},
message,
{
"role": "tool",
"tool_call_id": tool_call["id"],
"content": json.dumps(weather_data)
}
]
final = requests.post(
BASE_URL,
headers={"Authorization": f"Bearer {API_KEY}"},
json={"model": "openai", "messages": follow_up_messages},
timeout=30,
).json()
print(final["choices"][0]["message"]["content"])
else:
print(message["content"])