Getting started
Set up Blue Guardrails to monitor hallucinations in your AI application
Blue Guardrails detects hallucinations in LLM-powered applications. It analyzes your AI's responses and identifies when the model fabricates information or contradicts provided context.
This guide walks you through connecting your application to Blue Guardrails.
Create an API key
- Click API Keys in the sidebar.
- Click Create API Key.
- Enter a name, select a workspace scope, and choose an expiration period.
- Click Create.
- Copy the key using the copy button. You won't see it again after closing the dialog.
You'll also need your workspace ID. Click the copy icon next to the workspace selector at the top of the sidebar to copy it.
Install the SDK
Blue Guardrails receives traces via OpenTelemetry. The Logfire SDK (maintained by the Pydantic team) provides automatic instrumentation for popular LLM frameworks.
Install it with uv or pip:
uv add logfireOr with pip:
pip install logfireSome frameworks require additional dependencies. Add them as needed:
pip install 'logfire[openai]' # For OpenAI
pip install 'logfire[google-genai]' # For Google GenAI
pip install 'logfire[pydantic-ai]' # For Pydantic AISend your first traces
Configure the SDK to send traces to Blue Guardrails instead of Logfire's own platform. Set the environment variables before importing your LLM framework.
import os
os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
f'x-workspace-id={YOUR_WORKSPACE_ID},'
f'Authorization=Bearer {YOUR_API_KEY}'
)
import openai
import logfire
client = openai.Client()
logfire.configure(send_to_logfire=False)
logfire.instrument_openai()
response = client.chat.completions.create(
model='gpt-4o-mini',
messages=[
{'role': 'system', 'content': 'Answer the question based on the context.'},
{'role': 'user', 'content': 'Context: Revenue in Q1 was $85 Billion.'},
{'role': 'user', 'content': 'What was the revenue in Q1?'},
]
)
print(response.choices[0].message.content)import os
os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
f'x-workspace-id={YOUR_WORKSPACE_ID},'
f'Authorization=Bearer {YOUR_API_KEY}'
)
import anthropic
import logfire
client = anthropic.Anthropic()
logfire.configure(send_to_logfire=False)
logfire.instrument_anthropic()
response = client.messages.create(
max_tokens=1000,
model='claude-3-5-haiku-latest',
system='Answer the question based on the context.',
messages=[
{'role': 'user', 'content': 'Context: Revenue in Q1 was $85 Billion.'},
{'role': 'user', 'content': 'What was the revenue in Q1?'}
],
)
print(response.content[0].text)import os
os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
f'x-workspace-id={YOUR_WORKSPACE_ID},'
f'Authorization=Bearer {YOUR_API_KEY}'
)
os.environ['OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT'] = 'true'
from google.genai import Client
import logfire
client = Client()
logfire.configure(send_to_logfire=False)
logfire.instrument_google_genai()
message = '''Answer the question based on the context.
Context: Revenue in Q1 was $85 Billion.
What was the revenue in Q1?'''
response = client.models.generate_content(model='gemini-2.0-flash', contents=[message])
print(response.text)import os
os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
f'x-workspace-id={YOUR_WORKSPACE_ID},'
f'Authorization=Bearer {YOUR_API_KEY}'
)
import logfire
from pydantic_ai import Agent
logfire.configure(send_to_logfire=False)
logfire.instrument_pydantic_ai()
agent = Agent(
'openai:gpt-4o-mini',
result_type=str,
system_prompt='Answer the question based on the context.',
)
result = agent.run_sync(
'Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?'
)
print(result.data)import os
os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
f'x-workspace-id={YOUR_WORKSPACE_ID},'
f'Authorization=Bearer {YOUR_API_KEY}'
)
os.environ['OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT'] = 'true'
import logfire
from haystack.dataclasses import ChatMessage
from haystack.components.generators.chat import OpenAIChatGenerator
from haystack_integrations.components.generators.anthropic import AnthropicChatGenerator
from haystack_integrations.components.generators.google_genai import GoogleGenAIChatGenerator
# Haystack doesn't have built-in logfire support.
# Instrument the underlying SDKs instead.
logfire.configure(send_to_logfire=False)
logfire.instrument_openai()
logfire.instrument_anthropic()
logfire.instrument_google_genai()
# OpenAI
openai_generator = OpenAIChatGenerator(model='gpt-4o-mini')
openai_response = openai_generator.run([
ChatMessage.from_system('Answer the question based on the context.'),
ChatMessage.from_user('Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?')
])
print(openai_response)
# Anthropic
anthropic_generator = AnthropicChatGenerator(model='claude-3-5-haiku-latest')
anthropic_response = anthropic_generator.run([
ChatMessage.from_system('Answer the question based on the context.'),
ChatMessage.from_user('Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?')
])
print(anthropic_response)
# Google GenAI
google_generator = GoogleGenAIChatGenerator(model='gemini-2.0-flash')
google_response = google_generator.run(messages=[
ChatMessage.from_system('Answer the question based on the context.'),
ChatMessage.from_user('Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?')
])
print(google_response)import os
# Set these before importing langchain
os.environ['LANGSMITH_OTEL_ENABLED'] = 'true'
os.environ['LANGSMITH_OTEL_ONLY'] = 'true'
os.environ['LANGSMITH_TRACING'] = 'true'
os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
f'x-workspace-id={YOUR_WORKSPACE_ID},'
f'Authorization=Bearer {YOUR_API_KEY}'
)
import logfire
from langgraph.prebuilt import create_react_agent
logfire.configure(send_to_logfire=False)
agent = create_react_agent('openai:gpt-4o-mini', tools=[], name='agent')
result = agent.invoke({
'messages': [{
'role': 'user',
'content': 'Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?'
}]
})
print(result['messages'][-1].content)After running your code, traces appear in Blue Guardrails within a few seconds. Open your workspace and click Dashboard to see incoming messages and hallucination metrics.