Blue Guardrails
Getting started

Getting started

Set up Blue Guardrails to monitor hallucinations in your AI application

Blue Guardrails detects hallucinations in LLM-powered applications or workflows. It analyzes the model's responses and identifies when it fabricates information or contradicts provided context.

To connect your AI application to Blue Guardrails, follow this guide. It walks through sending model inputs and outputs from your application to your Blue Guardrails workspace.

Create an API key

To send data to Blue Guardrails, you need to create an API key.

Find the API Keys-section in the sidebar menu and create one.

You can scope access for an API key to a single workspace by selecting a workspace. A user-scoped key has access to all resources that you have access to. It can access resources in all workspaces.

It's important that you copy the API key and store it safely because we only display the full key once.

Install the SDK

Blue Guardrails receives data from your AI application via OpenTelemetry traces. To send traces from your application, use the Logfire SDK (developed by the Pydantic team). It provides automatic instrumentation for popular LLM providers and frameworks.

Install it with uv or pip:

uv add logfire

Or with pip:

pip install logfire

Send your first traces

Configure the SDK to send traces to Blue Guardrails instead of Logfire's own platform. Set the environment variables before importing your LLM framework.

import os

os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
    f'x-workspace-id={YOUR_WORKSPACE_ID},'
    f'Authorization=Bearer {YOUR_API_KEY}'
)

import openai
import logfire

client = openai.Client()
logfire.configure(send_to_logfire=False)
logfire.instrument_openai()

response = client.chat.completions.create(
    model='gpt-4o-mini',
    messages=[
        {'role': 'system', 'content': 'Answer the question based on the context.'},
        {'role': 'user', 'content': 'Context: Revenue in Q1 was $85 Billion.'},
        {'role': 'user', 'content': 'What was the revenue in Q1?'},
    ]
)
print(response.choices[0].message.content)
import os

os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
    f'x-workspace-id={YOUR_WORKSPACE_ID},'
    f'Authorization=Bearer {YOUR_API_KEY}'
)

import anthropic
import logfire

client = anthropic.Anthropic()
logfire.configure(send_to_logfire=False)
logfire.instrument_anthropic()

response = client.messages.create(
    max_tokens=1000,
    model='claude-3-5-haiku-latest',
    system='Answer the question based on the context.',
    messages=[
        {'role': 'user', 'content': 'Context: Revenue in Q1 was $85 Billion.'},
        {'role': 'user', 'content': 'What was the revenue in Q1?'}
    ],
)
print(response.content[0].text)
import os

os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
    f'x-workspace-id={YOUR_WORKSPACE_ID},'
    f'Authorization=Bearer {YOUR_API_KEY}'
)
os.environ['OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT'] = 'true'

from google.genai import Client
import logfire

client = Client()
logfire.configure(send_to_logfire=False)
logfire.instrument_google_genai()

message = '''Answer the question based on the context.
Context: Revenue in Q1 was $85 Billion.
What was the revenue in Q1?'''
response = client.models.generate_content(model='gemini-2.0-flash', contents=[message])
print(response.text)
import os

os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
    f'x-workspace-id={YOUR_WORKSPACE_ID},'
    f'Authorization=Bearer {YOUR_API_KEY}'
)

import logfire
from pydantic_ai import Agent

logfire.configure(send_to_logfire=False)
logfire.instrument_pydantic_ai()

agent = Agent(
    'openai:gpt-4o-mini',
    result_type=str,
    system_prompt='Answer the question based on the context.',
)

result = agent.run_sync(
    'Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?'
)
print(result.data)
import os

os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
    f'x-workspace-id={YOUR_WORKSPACE_ID},'
    f'Authorization=Bearer {YOUR_API_KEY}'
)
os.environ['OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT'] = 'true'

import logfire
from haystack.dataclasses import ChatMessage
from haystack.components.generators.chat import OpenAIChatGenerator
from haystack_integrations.components.generators.anthropic import AnthropicChatGenerator
from haystack_integrations.components.generators.google_genai import GoogleGenAIChatGenerator

# Haystack doesn't have built-in logfire support.
# Instrument the underlying SDKs instead.
logfire.configure(send_to_logfire=False)
logfire.instrument_openai()
logfire.instrument_anthropic()
logfire.instrument_google_genai()

# OpenAI
openai_generator = OpenAIChatGenerator(model='gpt-4o-mini')
openai_response = openai_generator.run([
    ChatMessage.from_system('Answer the question based on the context.'),
    ChatMessage.from_user('Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?')
])
print(openai_response)

# Anthropic
anthropic_generator = AnthropicChatGenerator(model='claude-3-5-haiku-latest')
anthropic_response = anthropic_generator.run([
    ChatMessage.from_system('Answer the question based on the context.'),
    ChatMessage.from_user('Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?')
])
print(anthropic_response)

# Google GenAI
google_generator = GoogleGenAIChatGenerator(model='gemini-2.0-flash')
google_response = google_generator.run(messages=[
    ChatMessage.from_system('Answer the question based on the context.'),
    ChatMessage.from_user('Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?')
])
print(google_response)
import os

# Set these before importing langchain
os.environ['LANGSMITH_OTEL_ENABLED'] = 'true'
os.environ['LANGSMITH_OTEL_ONLY'] = 'true'
os.environ['LANGSMITH_TRACING'] = 'true'

os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
    f'x-workspace-id={YOUR_WORKSPACE_ID},'
    f'Authorization=Bearer {YOUR_API_KEY}'
)

import logfire
from langgraph.prebuilt import create_react_agent

logfire.configure(send_to_logfire=False)

agent = create_react_agent('openai:gpt-4o-mini', tools=[], name='agent')
result = agent.invoke({
    'messages': [{
        'role': 'user',
        'content': 'Context: Revenue in Q1 was $85 Billion.\nWhat was the revenue in Q1?'
    }]
})
print(result['messages'][-1].content)
import asyncio
import os

# These environment variables need to be set BEFORE importing langsmith or claude_agent_sdk
os.environ['LANGSMITH_OTEL_ENABLED'] = 'true'
os.environ['LANGSMITH_OTEL_ONLY'] = 'true'
os.environ['LANGSMITH_TRACING'] = 'true'

os.environ['OTEL_EXPORTER_OTLP_TRACES_ENDPOINT'] = 'https://api.blueguardrails.com/v1/traces'
os.environ['OTEL_EXPORTER_OTLP_HEADERS'] = (
    f'x-workspace-id={YOUR_WORKSPACE_ID},'
    f'Authorization=Bearer {YOUR_API_KEY}'
)

import logfire
from claude_agent_sdk import ClaudeAgentOptions, ClaudeSDKClient
from langsmith.integrations.claude_agent_sdk import configure_claude_agent_sdk

logfire.configure(send_to_logfire=False)

# Instrument the Claude Agent SDK with Langsmith
configure_claude_agent_sdk()

async def main() -> None:
    options = ClaudeAgentOptions(
        allowed_tools=['Read', 'Edit', 'Glob'],
        permission_mode='acceptEdits',
    )

    async with ClaudeSDKClient(options=options) as client:
        await client.query(
            'Read sample_code.py and convert all numpy-style docstrings to Google-style docstrings. Edit the file in place.'
        )

        async for message in client.receive_response():
            if hasattr(message, 'content'):
                for block in message.content:
                    if hasattr(block, 'text'):
                        print(block.text)
                    elif hasattr(block, 'name'):
                        print(f'Tool: {block.name}')

asyncio.run(main())

After running your code, traces appear in Blue Guardrails within a few seconds. Open your workspace and click Dashboard to see incoming messages and hallucination metrics.

Next steps

On this page