OpenAI Integration
This guide shows how to use Danube tools with OpenAI’s function calling feature, enabling GPT models to execute real-world actions through Danube’s tool marketplace.Overview
OpenAI’s function calling allows GPT models to:- Receive a list of available functions (tools)
- Decide which function to call based on user input
- Generate structured arguments for the function
- Process the function result and respond to the user
Prerequisites
Copy
pip install danube openai
Basic Example
Here’s a complete example of using Danube tools with OpenAI:Copy
import json
from openai import OpenAI
from danube import DanubeClient
# Initialize clients
openai_client = OpenAI(api_key="sk-...")
danube_client = DanubeClient(api_key="dk_...")
def danube_tool_to_openai_function(tool):
"""Convert a Danube tool to OpenAI function format."""
# Build properties from tool parameters
properties = {}
required = []
for name, param in tool.parameters.items():
param_type = param.get("type", "string")
# Map to JSON Schema types
json_type = {
"string": "string",
"integer": "integer",
"number": "number",
"boolean": "boolean",
"array": "array",
"object": "object",
}.get(param_type, "string")
properties[name] = {
"type": json_type,
"description": param.get("description", ""),
}
if param.get("required", False):
required.append(name)
return {
"type": "function",
"function": {
"name": tool.id, # Use tool ID for execution
"description": f"{tool.name}: {tool.description}",
"parameters": {
"type": "object",
"properties": properties,
"required": required,
},
},
}
def search_and_convert_tools(query: str, limit: int = 5):
"""Search Danube tools and convert to OpenAI format."""
tools = danube_client.tools.search(query, limit=limit)
return [danube_tool_to_openai_function(t) for t in tools]
def execute_tool_call(tool_call):
"""Execute an OpenAI tool call using Danube."""
tool_id = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
result = danube_client.tools.execute(
tool_id=tool_id,
parameters=arguments
)
return result.content if result.success else f"Error: {result.error}"
def chat_with_tools(user_message: str, tool_query: str = None):
"""Chat with GPT using Danube tools."""
# Get relevant tools
tools = search_and_convert_tools(tool_query or user_message)
# Initial API call
messages = [{"role": "user", "content": user_message}]
response = openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages,
tools=tools,
tool_choice="auto",
)
assistant_message = response.choices[0].message
# Check if the model wants to call tools
if assistant_message.tool_calls:
messages.append(assistant_message)
# Execute each tool call
for tool_call in assistant_message.tool_calls:
print(f"Executing: {tool_call.function.name}")
result = execute_tool_call(tool_call)
messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": result,
})
# Get final response
final_response = openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages,
)
return final_response.choices[0].message.content
return assistant_message.content
# Example usage
if __name__ == "__main__":
response = chat_with_tools(
"What are the top stories on Hacker News right now?",
tool_query="hacker news"
)
print(response)
Full Agent Example
Here’s a more complete agent implementation with conversation history:Copy
import json
from typing import List, Dict, Any, Optional
from openai import OpenAI
from danube import DanubeClient
class DanubeOpenAIAgent:
"""An AI agent that uses Danube tools with OpenAI."""
def __init__(
self,
openai_api_key: str,
danube_api_key: str,
model: str = "gpt-4-turbo-preview",
system_prompt: str = None,
):
self.openai = OpenAI(api_key=openai_api_key)
self.danube = DanubeClient(api_key=danube_api_key)
self.model = model
self.messages: List[Dict[str, Any]] = []
self.available_tools: List[Dict] = []
self.tool_id_map: Dict[str, str] = {} # maps function name to tool_id
if system_prompt:
self.messages.append({"role": "system", "content": system_prompt})
def load_tools(self, query: str = "", service_id: str = None, limit: int = 20):
"""Load tools from Danube and convert to OpenAI format."""
if query:
tools = self.danube.tools.search(query, service_id=service_id, limit=limit)
elif service_id:
result = self.danube.services.get_tools(service_id, limit=limit)
tools = result.tools
else:
tools = self.danube.tools.search("", limit=limit)
self.available_tools = []
self.tool_id_map = {}
for tool in tools:
# Create safe function name (OpenAI requires alphanumeric + underscore)
safe_name = tool.name.replace(" ", "_").replace("-", "_")
safe_name = "".join(c for c in safe_name if c.isalnum() or c == "_")
self.tool_id_map[safe_name] = tool.id
# Build parameters
properties = {}
required = []
for name, param in tool.parameters.items():
param_info = param if isinstance(param, dict) else {}
properties[name] = {
"type": param_info.get("type", "string"),
"description": param_info.get("description", ""),
}
if param_info.get("required"):
required.append(name)
self.available_tools.append({
"type": "function",
"function": {
"name": safe_name,
"description": f"{tool.name}: {tool.description}"[:1024],
"parameters": {
"type": "object",
"properties": properties,
"required": required,
},
},
})
print(f"Loaded {len(self.available_tools)} tools")
return self
def _execute_tool(self, function_name: str, arguments: Dict) -> str:
"""Execute a tool and return the result."""
tool_id = self.tool_id_map.get(function_name)
if not tool_id:
return f"Error: Unknown tool '{function_name}'"
try:
result = self.danube.tools.execute(tool_id=tool_id, parameters=arguments)
return result.content if result.success else f"Error: {result.error}"
except Exception as e:
return f"Error executing tool: {str(e)}"
def chat(self, user_message: str, max_iterations: int = 5) -> str:
"""Send a message and get a response, executing tools as needed."""
self.messages.append({"role": "user", "content": user_message})
for _ in range(max_iterations):
# Call OpenAI
response = self.openai.chat.completions.create(
model=self.model,
messages=self.messages,
tools=self.available_tools if self.available_tools else None,
tool_choice="auto" if self.available_tools else None,
)
assistant_message = response.choices[0].message
# If no tool calls, we're done
if not assistant_message.tool_calls:
self.messages.append({
"role": "assistant",
"content": assistant_message.content
})
return assistant_message.content
# Add assistant message with tool calls
self.messages.append(assistant_message)
# Execute each tool call
for tool_call in assistant_message.tool_calls:
function_name = tool_call.function.name
arguments = json.loads(tool_call.function.arguments)
print(f" Tool: {function_name}")
print(f" Args: {arguments}")
result = self._execute_tool(function_name, arguments)
print(f" Result: {result[:200]}...")
self.messages.append({
"role": "tool",
"tool_call_id": tool_call.id,
"content": result,
})
return "Max iterations reached"
def clear_history(self):
"""Clear conversation history (keeps system prompt)."""
self.messages = [m for m in self.messages if m.get("role") == "system"]
# Example usage
if __name__ == "__main__":
agent = DanubeOpenAIAgent(
openai_api_key="sk-...",
danube_api_key="dk_...",
system_prompt="You are a helpful assistant with access to various tools."
)
# Load tools for a specific domain
agent.load_tools(query="hacker news")
# Chat
response = agent.chat("What are the top 5 stories on Hacker News?")
print(response)
# Continue conversation
response = agent.chat("Tell me more about the first one")
print(response)
Dynamic Tool Loading
You can dynamically load tools based on the conversation:Copy
def dynamic_tool_agent(user_message: str):
"""Agent that dynamically loads relevant tools."""
# First, determine what tools might be needed
classification_response = openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=[{
"role": "user",
"content": f"""Based on this user request, what type of tools would be helpful?
Return a JSON object with a 'tool_query' field containing search terms.
User request: {user_message}"""
}],
response_format={"type": "json_object"},
)
tool_info = json.loads(classification_response.choices[0].message.content)
tool_query = tool_info.get("tool_query", user_message)
# Load relevant tools
tools = search_and_convert_tools(tool_query, limit=10)
# Now handle the actual request with tools
return chat_with_tools(user_message, tools)
Best Practices
1. Tool Selection
Don’t load too many tools at once - it increases token usage and can confuse the model:Copy
# Good: Load specific tools
tools = client.tools.search("email", limit=5)
# Avoid: Loading everything
tools = client.tools.search("", limit=100) # Too many
2. Error Handling
Always handle tool execution errors gracefully:Copy
def safe_execute(tool_id: str, params: dict) -> str:
try:
result = danube_client.tools.execute(tool_id=tool_id, parameters=params)
if result.success:
return result.content
return f"Tool returned error: {result.error}"
except Exception as e:
return f"Failed to execute tool: {str(e)}"
3. Parameter Validation
Validate parameters before execution:Copy
def validate_and_execute(tool, arguments):
required_params = [p.name for p in tool.get_required_parameters()]
missing = [p for p in required_params if p not in arguments]
if missing:
return f"Missing required parameters: {missing}"
return danube_client.tools.execute(tool_id=tool.id, parameters=arguments)
4. Streaming Responses
For long-running tools, consider streaming:Copy
response = openai_client.chat.completions.create(
model="gpt-4-turbo-preview",
messages=messages,
tools=tools,
stream=True,
)
for chunk in response:
if chunk.choices[0].delta.content:
print(chunk.choices[0].delta.content, end="")
Complete Working Example
See the full example in the SDK repository:Copy
# Run the example
cd danube-python
python examples/openai_function_calling.py
