from opperai import Opper
import time
opper = Opper(http_bearer="YOUR_API_KEY")
# First, create a function and call it to generate a trace
unique_name = f"research_assistant_{int(time.time())}"
created_function = opper.functions.create(
name=unique_name,
description="Research assistant that provides detailed analysis",
instructions="You are a research assistant. Provide detailed, well-researched answers with multiple perspectives and cite your reasoning process.",
model="openai/gpt-4o-mini",
)
print(f"Created function: {created_function.name}")
# Call the function to generate a trace
response = opper.functions.call(
function_id=created_function.id,
input={
"question": "What are the key factors that led to the success of the Tesla Model S?",
"context": "I'm researching electric vehicle market adoption",
},
tags={"research_type": "automotive", "complexity": "detailed"},
)
print(f"Function call completed. Span ID: {response.span_id}")
# Get the trace ID from the response (spans are part of traces)
span = opper.spans.get(span_id=response.span_id)
trace_id = span.trace_id
print(f"Generated trace ID: {trace_id}")
def print_span_hierarchy(spans, parent_id, depth):
"""Helper function to print span hierarchy"""
children = [s for s in spans if s.parent_id == parent_id]
for child in children:
indent = " " * depth
print(f"{indent}├─ {child.name} ({child.duration_ms}ms)")
print_span_hierarchy(spans, child.id, depth + 1)
# Get detailed trace with all spans
trace = opper.traces.get(trace_id=trace_id)
print(f"🔍 Trace Analysis: {trace.name}")
print(f"ID: {trace.id}")
print(f"Duration: {trace.duration_ms}ms")
print(f"Status: {trace.status}")
print(f"Total Tokens: {trace.total_tokens:,}")
if trace.input:
print(f"Input: {trace.input[:100]}...")
if trace.output:
print(f"Output: {trace.output[:100]}...")
print(f"\n📊 Span Breakdown ({len(trace.spans)} spans):")
# Group spans by type for analysis
span_types = {}
for span in trace.spans:
span_type = span.type or "unknown"
if span_type not in span_types:
span_types[span_type] = []
span_types[span_type].append(span)
for span_type, spans in span_types.items():
print(f"\n{span_type.upper()} spans ({len(spans)}):")
for span in spans:
print(f" • {span.name}")
print(
f" Duration: {span.duration_ms}ms"
if span.duration_ms
else " Duration: N/A"
)
if span.data and span.data.model:
print(f" Model: {span.data.model}")
if span.data and span.data.total_tokens:
print(f" Tokens: {span.data.total_tokens}")
if span.score:
print(f" Score: {span.score}/10")
if span.error:
print(f" ❌ Error: {span.error}")
# Show metrics if available
if span.metrics:
print(f" 📈 Metrics:")
for metric in span.metrics:
print(f" - {metric.dimension}: {metric.value}")
# Analyze execution flow
print(f"\n🔄 Execution Flow:")
root_spans = [s for s in trace.spans if not s.parent_id]
for root_span in root_spans:
print(f"🏁 {root_span.name}")
print_span_hierarchy(trace.spans, root_span.id, 1)
# Performance insights
print(f"\n⚡ Performance Insights:")
if trace.duration_ms:
total_duration = trace.duration_ms
generation_spans = [s for s in trace.spans if s.type == "generation"]
if generation_spans:
generation_time = sum(s.duration_ms for s in generation_spans if s.duration_ms)
print(
f"LLM Generation Time: {generation_time}ms ({generation_time / total_duration * 100:.1f}%)"
)
call_spans = [s for s in trace.spans if s.type == "call"]
if call_spans:
call_durations = [s.duration_ms for s in call_spans if s.duration_ms]
if call_durations:
avg_call_duration = sum(call_durations) / len(call_durations)
print(f"Average Call Duration: {avg_call_duration:.1f}ms"){
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"start_time": "2023-11-07T05:31:56Z",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"status": "<string>",
"name": "<string>",
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"spans": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"name": "<string>",
"start_time": "2023-11-07T05:31:56Z",
"type": "generation",
"parent_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"error": "<string>",
"meta": {},
"data": {
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"model": "<string>",
"instructions": "<string>",
"function": "<string>",
"tags": [
"<string>"
],
"score": 123,
"generation_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"observations": "<string>"
},
"metrics": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"dimension": "<string>",
"value": 123,
"created_at": "2023-11-07T05:31:56Z",
"comment": "Expert feedback"
}
],
"score": 123
}
]
}Get a trace by its id
from opperai import Opper
import time
opper = Opper(http_bearer="YOUR_API_KEY")
# First, create a function and call it to generate a trace
unique_name = f"research_assistant_{int(time.time())}"
created_function = opper.functions.create(
name=unique_name,
description="Research assistant that provides detailed analysis",
instructions="You are a research assistant. Provide detailed, well-researched answers with multiple perspectives and cite your reasoning process.",
model="openai/gpt-4o-mini",
)
print(f"Created function: {created_function.name}")
# Call the function to generate a trace
response = opper.functions.call(
function_id=created_function.id,
input={
"question": "What are the key factors that led to the success of the Tesla Model S?",
"context": "I'm researching electric vehicle market adoption",
},
tags={"research_type": "automotive", "complexity": "detailed"},
)
print(f"Function call completed. Span ID: {response.span_id}")
# Get the trace ID from the response (spans are part of traces)
span = opper.spans.get(span_id=response.span_id)
trace_id = span.trace_id
print(f"Generated trace ID: {trace_id}")
def print_span_hierarchy(spans, parent_id, depth):
"""Helper function to print span hierarchy"""
children = [s for s in spans if s.parent_id == parent_id]
for child in children:
indent = " " * depth
print(f"{indent}├─ {child.name} ({child.duration_ms}ms)")
print_span_hierarchy(spans, child.id, depth + 1)
# Get detailed trace with all spans
trace = opper.traces.get(trace_id=trace_id)
print(f"🔍 Trace Analysis: {trace.name}")
print(f"ID: {trace.id}")
print(f"Duration: {trace.duration_ms}ms")
print(f"Status: {trace.status}")
print(f"Total Tokens: {trace.total_tokens:,}")
if trace.input:
print(f"Input: {trace.input[:100]}...")
if trace.output:
print(f"Output: {trace.output[:100]}...")
print(f"\n📊 Span Breakdown ({len(trace.spans)} spans):")
# Group spans by type for analysis
span_types = {}
for span in trace.spans:
span_type = span.type or "unknown"
if span_type not in span_types:
span_types[span_type] = []
span_types[span_type].append(span)
for span_type, spans in span_types.items():
print(f"\n{span_type.upper()} spans ({len(spans)}):")
for span in spans:
print(f" • {span.name}")
print(
f" Duration: {span.duration_ms}ms"
if span.duration_ms
else " Duration: N/A"
)
if span.data and span.data.model:
print(f" Model: {span.data.model}")
if span.data and span.data.total_tokens:
print(f" Tokens: {span.data.total_tokens}")
if span.score:
print(f" Score: {span.score}/10")
if span.error:
print(f" ❌ Error: {span.error}")
# Show metrics if available
if span.metrics:
print(f" 📈 Metrics:")
for metric in span.metrics:
print(f" - {metric.dimension}: {metric.value}")
# Analyze execution flow
print(f"\n🔄 Execution Flow:")
root_spans = [s for s in trace.spans if not s.parent_id]
for root_span in root_spans:
print(f"🏁 {root_span.name}")
print_span_hierarchy(trace.spans, root_span.id, 1)
# Performance insights
print(f"\n⚡ Performance Insights:")
if trace.duration_ms:
total_duration = trace.duration_ms
generation_spans = [s for s in trace.spans if s.type == "generation"]
if generation_spans:
generation_time = sum(s.duration_ms for s in generation_spans if s.duration_ms)
print(
f"LLM Generation Time: {generation_time}ms ({generation_time / total_duration * 100:.1f}%)"
)
call_spans = [s for s in trace.spans if s.type == "call"]
if call_spans:
call_durations = [s.duration_ms for s in call_spans if s.duration_ms]
if call_durations:
avg_call_duration = sum(call_durations) / len(call_durations)
print(f"Average Call Duration: {avg_call_duration:.1f}ms"){
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"start_time": "2023-11-07T05:31:56Z",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"status": "<string>",
"name": "<string>",
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"spans": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"name": "<string>",
"start_time": "2023-11-07T05:31:56Z",
"type": "generation",
"parent_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"end_time": "2023-11-07T05:31:56Z",
"duration_ms": 123,
"error": "<string>",
"meta": {},
"data": {
"input": "<string>",
"output": "<string>",
"total_tokens": 123,
"model": "<string>",
"instructions": "<string>",
"function": "<string>",
"tags": [
"<string>"
],
"score": 123,
"generation_id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"observations": "<string>"
},
"metrics": [
{
"id": "3c90c3cc-0d44-4b50-8888-8dd25736052a",
"dimension": "<string>",
"value": 123,
"created_at": "2023-11-07T05:31:56Z",
"comment": "Expert feedback"
}
],
"score": 123
}
]
}Bearer authentication header of the form Bearer <token>, where <token> is your auth token.
The id of the trace to get
Successful Response
The id of the trace
The start time of the trace
The end time of the trace
The duration of the trace
The status of the trace
The name of the trace, set to the name of the root span of the trace
The input of the trace, set to the input of the root span of the trace
The output of the trace, set to the output of the root span of the trace
The total tokens of the trace
The spans of the trace
Show child attributes