Blue Guardrails

Create Experiment

Run an issue-detection experiment on traced conversations.

POST
/v1/experiments

Header Parameters

x-workspace-id?|

Workspace UUID for session-authenticated calls.

Request Body

application/json

name*string

Name of the experiment

Length1 <= length <= 255
model*string

Target model to run inference with

Length1 <= length
pass_through?boolean

If true, copy existing responses instead of running inference. Useful for re-evaluating existing conversations without regenerating responses.

Defaultfalse
conversation_ids*array<>

Conversation IDs to include

Items1 <= items <= 100
reasoning_config?|||||

Optional reasoning/thinking configuration for the model

system_prompt_override?|

Optional system prompt to replace the original in all copied conversations

output_schema?array<>|

Optional structured output schema for the model response

Response Body

application/json

application/json

curl -X POST "https://loading/v1/experiments" \  -H "Content-Type: application/json" \  -d '{    "name": "string",    "model": "string",    "conversation_ids": [      "497f6eca-6276-4993-bfeb-53cbbbba6f08"    ]  }'
{
  "id": "497f6eca-6276-4993-bfeb-53cbbbba6f08",
  "name": "string",
  "model": "string",
  "pass_through": true,
  "conversation_ids": [
    "string"
  ],
  "created_at": "2019-08-24T14:15:22Z",
  "completed_at": "2019-08-24T14:15:22Z",
  "created_by_user_id": "209f54c4-4c33-43bc-9c6a-ef4c65ad7473",
  "created_by_user_name": "string",
  "created_by_service_account_id": "5434fcf1-291e-49ce-ac43-2f469070b2f5",
  "created_by_service_account_name": "string",
  "reasoning_config": {},
  "system_prompt_override": "string",
  "output_schema": [
    {
      "name": "string",
      "type": "string",
      "is_list": false,
      "is_optional": false
    }
  ],
  "total_items_count": 0,
  "completed_items_count": 0,
  "evaluations_expected": 0,
  "evaluations_received": 0,
  "status_summary": {
    "pending": 0,
    "in_progress": 0,
    "completed": 0,
    "failed": 0,
    "total": 0
  },
  "metrics": {
    "issue_rate": 0,
    "total_issues": 0,
    "cost_per_1k": 0,
    "issue_labels": {
      "property1": 0,
      "property2": 0
    },
    "total_input_tokens": 0,
    "total_output_tokens": 0,
    "avg_input_tokens": 0,
    "avg_output_tokens": 0,
    "avg_issues_per_evaluated_message": 0,
    "issue_free_message_count": 0,
    "total_generation_time_ms": 0,
    "avg_response_latency_ms": 0,
    "max_response_latency_ms": 0,
    "parameter_issues": {
      "property1": {
        "property1": 0,
        "property2": 0
      },
      "property2": {
        "property1": 0,
        "property2": 0
      }
    }
  }
}
{
  "detail": [
    {
      "loc": [
        "string"
      ],
      "msg": "string",
      "type": "string"
    }
  ]
}