Getting Started
Quick start guide for PiMax agent diagnosis across all supported platforms
Getting Started with PiMax
PiMax integrates seamlessly with your existing agent development workflow. Simply capture traces from your code and share them with Picept for comprehensive failure analysis. This guide shows you how to instrument your agents across all supported platforms.
Supported Platforms
This guide covers integration examples for:
- Langchain/LangGraph - Multi-agent workflows with state management
- OpenAI Agent SDK - Official OpenAI assistants and tools
- Custom OpenAI Clients - Direct OpenAI API implementations
- Custom Anthropic Clients - Claude-based agent systems
Each example includes minimal setup code plus complete implementation details.
Installation
Install the Picept SDK for trace collection:
pip install picept
For the latest features and development updates, check out the Picept repository on GitHub.
Platform-Specific Dependencies
Depending on your agent platform, you’ll also need:
# For OpenAI Agent SDK
pip install openai
# For Langchain/LangGraph
pip install langchain-openai
pip install langgraph
pip install openinference-instrumentation-langchain
# For Anthropic clients
pip install anthropic
# For OpenTelemetry instrumentation (recommended)
pip install opentelemetry-instrumentation-threading
pip install opentelemetry-instrumentation-asyncio
Platform Examples
1. Langchain/LangGraph Integration
Perfect for complex multi-agent workflows and state management systems.
import os
from typing import Literal, Dict, List, Any
from langchain_core.messages import HumanMessage, AIMessage, BaseMessage, SystemMessage
from langchain_openai import ChatOpenAI
from langgraph.checkpoint.memory import MemorySaver
from langgraph.graph import END, StateGraph
from langchain_core.tools import tool
from pydantic import BaseModel, Field
# Import Picept and instrumentors
import picept
from openinference.instrumentation.langchain import LangChainInstrumentor
from opentelemetry.instrumentation.threading import ThreadingInstrumentor
from opentelemetry.instrumentation.asyncio import AsyncioInstrumentor
# Initialize Picept with LangChain instrumentation
picept.init(
project_id="your-project-name",
experiment_id="langchain-experiment",
user_id="your-user-id",
session_id="agent-session",
config_id="agent-config",
context_id="agent-context",
api_key='your-picept-api-key',
# Auto-instrument LangChain operations
integrations=[
LangChainInstrumentor(), # Captures LangChain traces
ThreadingInstrumentor(), # Captures threading operations
AsyncioInstrumentor() # Captures async operations
]
)
# Set your OpenAI API key
os.environ["OPENAI_API_KEY"] = "your-openai-api-key"
@tool
def get_weather(city: str) -> str:
"""Get the current weather in a given city."""
print(f"🌤️ Getting weather for: {city}")
# Simulate weather API call
return f"The weather in {city} is sunny with 72°F"
class MessagesState(BaseModel):
messages: List[BaseMessage] = Field(default_factory=list)
current_agent: str = Field(default="manager")
def manager_agent(state: MessagesState) -> Dict[str, Any]:
"""Main coordination agent - automatically traced by Picept"""
messages = state.messages
# LangChain operations are automatically instrumented
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
system_msg = SystemMessage(content="""You are a helpful manager assistant.
If the user asks about weather, respond with: 'I'll check the weather for you. Delegating to weather agent.'
Otherwise, try to help directly.""")
# This LLM call will be captured in traces
response = llm.invoke([system_msg] + messages)
# Route to appropriate agent
next_agent = "weather" if "weather" in str(messages[-1].content).lower() else "manager"
return {
"messages": messages + [response],
"current_agent": next_agent
}
def weather_agent(state: MessagesState) -> Dict[str, Any]:
"""Weather specialist agent - tool usage automatically traced"""
messages = state.messages
# Extract city from user query
human_queries = [msg for msg in messages if isinstance(msg, HumanMessage)]
if not human_queries:
return {
"messages": messages + [AIMessage(content="I need a weather query.")],
"current_agent": "manager"
}
query = human_queries[-1].content.lower()
# Simple city extraction logic
city = "Paris" # Default
if "weather in " in query:
parts = query.split("weather in ")
if len(parts) > 1:
city = parts[1].strip().split()[0].capitalize()
# Tool call - automatically traced by Picept
weather_result = get_weather.invoke(city)
# Format response using LLM - also traced
llm = ChatOpenAI(model="gpt-4o-mini", temperature=0)
format_prompt = f"Format this weather information nicely: {weather_result}"
formatted_response = llm.invoke([HumanMessage(content=format_prompt)])
return {
"messages": messages + [formatted_response],
"current_agent": "manager"
}
def router(state: MessagesState) -> Literal["manager", "weather", END]:
"""Route between agents based on state"""
if len(state.messages) > 6: # Prevent infinite loops
return END
if state.current_agent == "weather":
return "weather"
elif state.current_agent == "manager":
if len(state.messages) > 0 and isinstance(state.messages[-1], AIMessage):
if "delegating to weather" in state.messages[-1].content.lower():
return "weather"
return END
return "manager"
# Build the LangGraph workflow
workflow = StateGraph(MessagesState)
workflow.add_node("manager", manager_agent)
workflow.add_node("weather", weather_agent)
workflow.set_entry_point("manager")
workflow.add_conditional_edges("manager", router)
workflow.add_conditional_edges("weather", router)
checkpointer = MemorySaver()
app = workflow.compile(checkpointer=checkpointer)
@picept.traced("weather-workflow") # Custom trace for the entire workflow
def run_workflow(query: str):
"""Run the multi-agent workflow - all operations traced"""
print(f"🚀 Starting workflow with query: {query}")
initial_state = MessagesState(
messages=[HumanMessage(content=query)],
current_agent="manager"
)
config = {"configurable": {"thread_id": "weather_demo_thread"}}
# Execute workflow - all LangChain operations automatically traced
final_state = app.invoke(initial_state, config=config)
print("\n📋 Conversation History:")
for i, message in enumerate(final_state["messages"]):
if isinstance(message, HumanMessage):
print(f"{i+1}. Human: {message.content}")
elif isinstance(message, AIMessage):
print(f"{i+1}. AI: {message.content}")
return final_state
# Run the example
if __name__ == "__main__":
result = run_workflow("What is the weather in Paris?")
print("✅ Workflow complete - check Picept dashboard for trace analysis!")
2. OpenAI Agent SDK Integration
For agents built with OpenAI’s official SDK and assistant framework.
import picept
from openai import OpenAI
import json
# Initialize Picept for OpenAI agents
picept.init(
project_id="openai-agents",
experiment_id="assistant-experiment",
user_id="openai-user",
api_key='your-picept-api-key'
)
client = OpenAI(api_key="your-openai-api-key")
@picept.traced("openai-assistant")
def create_and_run_assistant():
"""Create an OpenAI assistant and trace its execution"""
# Create assistant with tools
assistant = client.beta.assistants.create(
name="Data Analyst",
instructions="You are a helpful data analyst. Use tools to help analyze data.",
model="gpt-4o-mini",
tools=[
{
"type": "function",
"function": {
"name": "calculate_average",
"description": "Calculate the average of a list of numbers",
"parameters": {
"type": "object",
"properties": {
"numbers": {
"type": "array",
"items": {"type": "number"},
"description": "List of numbers to average"
}
},
"required": ["numbers"]
}
}
}
]
)
# Create thread and message
thread = client.beta.threads.create()
message = client.beta.threads.messages.create(
thread_id=thread.id,
role="user",
content="Calculate the average of these numbers: 10, 20, 30, 40, 50"
)
# Run the assistant - all operations traced
run = client.beta.threads.runs.create(
thread_id=thread.id,
assistant_id=assistant.id
)
# Poll for completion and handle tool calls
while run.status in ['queued', 'in_progress', 'requires_action']:
run = client.beta.threads.runs.retrieve(thread_id=thread.id, run_id=run.id)
if run.status == 'requires_action':
# Handle tool calls
tool_calls = run.required_action.submit_tool_outputs.tool_calls
tool_outputs = []
for tool_call in tool_calls:
if tool_call.function.name == "calculate_average":
args = json.loads(tool_call.function.arguments)
numbers = args["numbers"]
average = sum(numbers) / len(numbers)
tool_outputs.append({
"tool_call_id": tool_call.id,
"output": f"The average is: {average}"
})
# Submit tool outputs
run = client.beta.threads.runs.submit_tool_outputs(
thread_id=thread.id,
run_id=run.id,
tool_outputs=tool_outputs
)
# Get final messages
messages = client.beta.threads.messages.list(thread_id=thread.id)
return messages, assistant.id
# Run the assistant
if __name__ == "__main__":
messages, assistant_id = create_and_run_assistant()
print("✅ OpenAI Assistant execution traced successfully!")
3. Custom OpenAI Client Integration
For custom implementations using OpenAI’s API directly.
import picept
from openai import OpenAI
import json
# Initialize Picept
picept.init(
project_id="custom-openai",
experiment_id="custom-client",
user_id="custom-user",
api_key='your-picept-api-key'
)
client = OpenAI(api_key="your-openai-api-key")
@picept.traced("custom-openai-agent")
def run_custom_agent(user_input: str):
"""Custom OpenAI agent with manual tracing"""
# Define available tools
tools = [
{
"type": "function",
"function": {
"name": "search_knowledge_base",
"description": "Search internal knowledge base",
"parameters": {
"type": "object",
"properties": {
"query": {"type": "string", "description": "Search query"}
},
"required": ["query"]
}
}
}
]
# Initial conversation
messages = [
{"role": "system", "content": "You are a helpful assistant with access to a knowledge base."},
{"role": "user", "content": user_input}
]
# Make the API call - traced by Picept
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
tools=tools,
tool_choice="auto"
)
message = response.choices[0].message
messages.append(message)
# Handle tool calls if present
if message.tool_calls:
for tool_call in message.tool_calls:
function_name = tool_call.function.name
function_args = json.loads(tool_call.function.arguments)
# Simulate tool execution
if function_name == "search_knowledge_base":
# Mock knowledge base search
search_result = f"Found information about: {function_args['query']}"
# Add tool response to conversation
messages.append({
"tool_call_id": tool_call.id,
"role": "tool",
"name": function_name,
"content": search_result
})
# Get final response with tool results
final_response = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages
)
return final_response.choices[0].message.content
return message.content
# Usage example
if __name__ == "__main__":
result = run_custom_agent("Tell me about machine learning best practices")
print(f"Agent response: {result}")
print("✅ Custom OpenAI agent execution traced!")
4. Custom Anthropic Client Integration
For agents built with Claude and Anthropic’s API.
import picept
from anthropic import Anthropic
import json
# Initialize Picept for Anthropic agents
picept.init(
project_id="anthropic-agents",
experiment_id="claude-experiment",
user_id="anthropic-user",
api_key='your-picept-api-key'
)
client = Anthropic(api_key="your-anthropic-api-key")
@picept.traced("claude-agent")
def run_claude_agent(user_message: str):
"""Custom Claude agent with tool usage"""
# Define tools for Claude
tools = [
{
"name": "get_stock_price",
"description": "Get current stock price for a given symbol",
"input_schema": {
"type": "object",
"properties": {
"symbol": {
"type": "string",
"description": "Stock symbol (e.g., AAPL, GOOGL)"
}
},
"required": ["symbol"]
}
}
]
# Initial message to Claude
response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1000,
tools=tools,
messages=[
{
"role": "user",
"content": user_message
}
]
)
# Handle tool use if Claude requests it
if response.stop_reason == "tool_use":
tool_use = next(block for block in response.content if block.type == "tool_use")
# Execute the tool
if tool_use.name == "get_stock_price":
symbol = tool_use.input["symbol"]
# Mock stock price lookup
stock_price = f"${150.00 + hash(symbol) % 100:.2f}"
# Continue conversation with tool result
follow_up_response = client.messages.create(
model="claude-3-5-sonnet-20241022",
max_tokens=1000,
messages=[
{"role": "user", "content": user_message},
{"role": "assistant", "content": response.content},
{
"role": "user",
"content": [
{
"type": "tool_result",
"tool_use_id": tool_use.id,
"content": f"Current stock price for {symbol}: {stock_price}"
}
]
}
]
)
return follow_up_response.content[0].text
return response.content[0].text
# Usage example
if __name__ == "__main__":
result = run_claude_agent("What's the current stock price for Apple?")
print(f"Claude response: {result}")
print("✅ Claude agent execution traced!")
Viewing Your Traces
After running any of these examples:
-
Visit your Picept dashboard
https://www.picept.ai/logs -
Navigate to your project using the project_id you specified
-
Click on each log and on the top right corner select “Start New Analysis”
-
Review the analysis across your agent execution
-
Implement suggested optimizations to improve your agent’s reliability