import ReplaceDatasetToken from "/snippets/replace-dataset-token.mdx" import ReplaceDomain from "/snippets/replace-domain.mdx" import Prerequisites from "/snippets/standard-prerequisites.mdx" import AIInstrumentationApproaches from "/snippets/ai-instrumentation-approaches.mdx"

Quickly start capturing telemetry data from your generative AI capabilities. After installation and configuration, follow the Axiom AI engineering workflow to create, evaluate, observe, and iterate.

This page explains how to set up instrumentation with Axiom AI SDK. Expand the section below to choose the right instrumentation approach for your needs.

Install

Install Axiom AI SDK into your TypeScript project:

pnpm i axiom
npm i axiom
yarn add axiom
bun add axiom
The `axiom` package includes the `axiom` command-line interface (CLI) for running evaluations, which you'll use to systematically test and improve your AI capabilities.

Configure tracer

To send data to Axiom, configure a tracer. For example, use a dedicated instrumentation file and load it before the rest of your app. An example configuration for a Node.js environment:

  1. Install dependencies:

    pnpm i \
      dotenv \
      @opentelemetry/exporter-trace-otlp-http \
      @opentelemetry/resources \
      @opentelemetry/sdk-trace-node \
      @opentelemetry/semantic-conventions \
      @opentelemetry/api
    npm i \
      dotenv \
      @opentelemetry/exporter-trace-otlp-http \
      @opentelemetry/resources \
      @opentelemetry/sdk-trace-node \
      @opentelemetry/semantic-conventions \
      @opentelemetry/api
    yarn add \
      dotenv \
      @opentelemetry/exporter-trace-otlp-http \
      @opentelemetry/resources \
      @opentelemetry/sdk-trace-node \
      @opentelemetry/semantic-conventions \
      @opentelemetry/api
    bun add \
      dotenv \
      @opentelemetry/exporter-trace-otlp-http \
      @opentelemetry/resources \
      @opentelemetry/sdk-trace-node \
      @opentelemetry/semantic-conventions \
      @opentelemetry/api
  2. Create an instrumentation file:

    import { OTLPTraceExporter } from '@opentelemetry/exporter-trace-otlp-http';
    import { resourceFromAttributes } from '@opentelemetry/resources';
    import { BatchSpanProcessor, NodeTracerProvider } from '@opentelemetry/sdk-trace-node';
    import { ATTR_SERVICE_NAME } from '@opentelemetry/semantic-conventions';
    import { trace } from "@opentelemetry/api";
    import { initAxiomAI, RedactionPolicy } from 'axiom/ai';
    import type { AxiomEvalInstrumentationHook } from 'axiom/ai/config';
     
    const tracer = trace.getTracer("my-tracer");
     
    let provider: NodeTracerProvider | undefined;
     
    // Wrap your logic in the AxiomEvalInstrumentationHook function
    export const setupAppInstrumentation: AxiomEvalInstrumentationHook = async ({
      dataset,
      edgeUrl,
      token,
    }) => {
      if (provider) {
        return { provider };
      }
     
      if (!dataset || !edgeUrl || !token) {
        throw new Error('Missing environment variables');
      }
     
      const exporter = new OTLPTraceExporter({
        url: `${edgeUrl}/v1/traces`,
        headers: {
          Authorization: `Bearer ${token}`,
          'X-Axiom-Dataset': dataset,
        },
      })
     
      // Configure the provider to export traces to your Axiom dataset
      provider = new NodeTracerProvider({
        resource: resourceFromAttributes({
          [ATTR_SERVICE_NAME]: 'my-app', // Replace with your service name
        },
        {
          // Use the latest schema version
          // Info: https://opentelemetry.io/docs/specs/semconv/
          schemaUrl: 'https://opentelemetry.io/schemas/1.37.0',
        }),
        spanProcessors: [new BatchSpanProcessor(exporter)],
      });
     
      // Register the provider
      provider.register();
     
      // Initialize Axiom AI SDK with the configured tracer
      initAxiomAI({ tracer, redactionPolicy: RedactionPolicy.AxiomDefault });
     
      return { provider };
    };

For more information on specifying redaction policies, see Redaction policies.

Create Axiom configuration file

The Axiom configuration file enables the evaluation framework, allowing you to run systematic tests against your AI capabilities and track improvements over time.

At the root of your project, create the Axiom configuration file /axiom.config.ts:

import { defineConfig } from 'axiom/ai/config';
import { setupAppInstrumentation } from './src/instrumentation';
 
export default defineConfig({
  eval: {
    edgeUrl: process.env.AXIOM_EDGE_URL,
    token: process.env.AXIOM_TOKEN,
    dataset: process.env.AXIOM_DATASET,
 
    // Optional: customize which files to run
    include: ['**/*.eval.{ts,js}'],
 
    // Optional: exclude patterns
    exclude: [],
 
    // Optional: timeout for eval execution
    timeoutMs: 60_000,
 
    // Optional: instrumentation hook for OpenTelemetry
    // (created this in the "Create instrumentation setup" step)
    instrumentation: (options) => setupAppInstrumentation(options),
  },
});

Store environment variables

Store environment variables in an .env file in the root of your project:

AXIOM_EDGE_URL="https://AXIOM_DOMAIN"
AXIOM_TOKEN="API_TOKEN"
AXIOM_DATASET="DATASET_NAME"
OPENAI_API_KEY=""
GEMINI_API_KEY=""
XAI_API_KEY=""
ANTHROPIC_API_KEY=""

Enter the API keys for the LLMs you want to work with.

To run offline evaluations, you can authenticate using OAuth instead of using environment variables. For more information, see [Set up and authenticate offline evaluations](/ai-engineering/evaluate/write-evaluations#prerequisites).

What’s next?

  • Build your first capability: Start prototyping with Create.
  • Set up evaluations: Learn how to systematically test your capabilities with Evaluate.
  • Capture production telemetry: Instrument your AI calls for observability with Observe.

Good afternoon

I'm here to help you with the docs.

I
AIBased on your context