{"id":"honeycomb-api","name":"Honeycomb","homepage":"https://www.honeycomb.io","repo_url":null,"category":"observability","subcategories":["distributed-tracing","observability","application-performance"],"tags":["honeycomb","observability","distributed-tracing","opentelemetry","debugging","production"],"what_it_does":"Observability platform for distributed systems that enables high-cardinality event exploration and debugging via a query API, supporting OpenTelemetry ingestion and powerful BubbleUp analysis.","use_cases":["Querying production trace data to identify performance bottlenecks in agent workflows","Creating automated SLO monitors and alert conditions via API","Ingesting custom telemetry events from agent execution runs for observability","Building dashboards and burn alerts programmatically for production monitoring","Correlating slow API calls with specific user attributes using high-cardinality queries"],"not_for":["Infrastructure-level metrics (use Prometheus/Datadog for host-level monitoring)","Log aggregation as primary use case (Honeycomb is trace-centric, not log-centric)","Very small teams with simple monolithic applications — overhead not justified","Compliance log archival (use dedicated log storage for long-term retention)"],"best_when":"You're debugging distributed systems and microservices and need to explore high-cardinality production data interactively, or want to build automated observability into agent execution pipelines.","avoid_when":"Your system is a simple monolith, you primarily need infrastructure metrics rather than application traces, or you already have Datadog/New Relic deeply integrated.","alternatives":["datadog-api","newrelic-api","lightstep-api","sentry-api"],"af_score":80.5,"security_score":null,"reliability_score":null,"package_type":"mcp_server","discovery_source":["github"],"priority":"low","status":"evaluated","version_evaluated":"current","last_evaluated":"2026-03-01T09:50:05.696137+00:00","performance":{"latency_p50_ms":100,"latency_p99_ms":400,"uptime_sla_percent":99.9,"rate_limits":"Query API: up to 10 concurrent queries per team; Ingest: very high throughput","data_source":"llm_estimated","measured_on":null}}