LLMs and Agents
IUDEX provides a way to view your calls to OpenAI, Anthropic, or any other foundation model. For a single request or trigger, you can view the user’s request, the logs, and the agent calls that were made.
The first step is to set up IUDEX in your code base. This will get you basic tracking for foundation model, vector db, and LLM agent frameworks.
This example uses Open AI and Guardrails AI
Tracking LLM Agents
Let say you created an agent class using llama_index
for a math agent.
from llama_index.llms.openai import OpenAI
from llama_index.core.llms import ChatMessage
from llama_index.core.tools import BaseTool, FunctionTool
from openai.types.chat import ChatCompletionMessageToolCall
def multiply(a: int, b: int) -> int:
"""Multiple two integers and returns the result integer"""
return a * b
multiply_tool = FunctionTool.from_defaults(fn=multiply)
def add(a: int, b: int) -> int:
"""Add two integers and returns the result integer"""
return a + b
add_tool = FunctionTool.from_defaults(fn=add)
class YourOpenAIAgent:
def __init__(
self,
tools: Sequence[BaseTool] = [],
llm: OpenAI = OpenAI(temperature=0, model="gpt-4o"),
chat_history: List[ChatMessage] = [],
) -> None:
self._llm = llm
self._tools = {tool.metadata.name: tool for tool in tools}
self._chat_history = chat_history
def reset(self) -> None:
self._chat_history = []
def chat(self, message: str) -> str:
chat_history = self._chat_history
chat_history.append(ChatMessage(role="user", content=message))
tools = [
tool.metadata.to_openai_tool() for _, tool in self._tools.items()
]
ai_message = self._llm.chat(chat_history, tools=tools).message
additional_kwargs = ai_message.additional_kwargs
chat_history.append(ai_message)
tool_calls = additional_kwargs.get("tool_calls", None)
if tool_calls is not None:
for tool_call in tool_calls:
function_message = self._call_function(tool_call)
chat_history.append(function_message)
ai_message = self._llm.chat(chat_history).message
chat_history.append(ai_message)
return ai_message.content
def _call_function(
self, tool_call: ChatCompletionMessageToolCall
) -> ChatMessage:
id_ = tool_call.id
function_call = tool_call.function
tool = self._tools[function_call.name]
output = tool(**json.loads(function_call.arguments))
return ChatMessage(
name=function_call.name,
content=str(output),
role="tool",
additional_kwargs={
"tool_call_id": id_,
"name": function_call.name,
},
)
# My math agent!
math_agent = YourOpenAIAgent(tools=[multiply_tool, add_tool])
Now that you have an agent, you can track the agent’s calls by instrumenting the file and adding @trace
to the methods you want to track.
from iudex import instrument, trace
instrument(
service_name="math_agent",
env="prod",
iudex_api_key="WRITE_ONLY_IUDEX_KEY", # only ever commit your WRITE ONLY key
)
# Other imports below iudex ...
class YourOpenAIAgent:
def __init__(
self,
tools: Sequence[BaseTool] = [],
llm: OpenAI = OpenAI(temperature=0, model="gpt-4o"),
chat_history: List[ChatMessage] = [],
) -> None:
...
def reset(self) -> None:
...
@trace
def chat(self, message: str) -> str:
...
@trace
def _call_function(
self, tool_call: ChatCompletionMessageToolCall
) -> ChatMessage:
...
Now you can view the agent’s calls in the IUDEX UI!