Skip to content

OpenAI Integration

This guide shows how to integrate Ladger with the OpenAI SDK to track costs for chat completions, embeddings, and other API calls.

Installation

Terminal window
npm install @ladger/sdk openai

Basic Setup

import { LadgerTracer } from '@ladger/sdk';
import OpenAI from 'openai';
const tracer = new LadgerTracer({
apiKey: process.env.LADGER_API_KEY!,
flowName: 'openai-chatbot',
});
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY,
});

Chat Completions

Basic Chat

async function chat(message: string) {
return tracer.trace('chat-completion', async (span) => {
const completion = await openai.chat.completions.create({
model: 'gpt-4o-mini',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: message },
],
});
span.recordCost({
provider: 'openai',
model: 'gpt-4o-mini',
inputTokens: completion.usage?.prompt_tokens,
outputTokens: completion.usage?.completion_tokens,
});
return completion.choices[0].message.content;
});
}

With Streaming

async function chatStream(message: string) {
return tracer.trace('chat-stream', async (span) => {
let inputTokens = 0;
let outputTokens = 0;
const stream = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: message }],
stream: true,
stream_options: { include_usage: true },
});
let content = '';
for await (const chunk of stream) {
if (chunk.choices[0]?.delta?.content) {
content += chunk.choices[0].delta.content;
process.stdout.write(chunk.choices[0].delta.content);
}
// Capture usage from final chunk
if (chunk.usage) {
inputTokens = chunk.usage.prompt_tokens;
outputTokens = chunk.usage.completion_tokens;
}
}
span.recordCost({
provider: 'openai',
model: 'gpt-4o',
inputTokens,
outputTokens,
});
return content;
});
}

Function Calling

async function chatWithTools(message: string) {
return tracer.trace('chat-with-tools', async (span) => {
const completion = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: message }],
tools: [
{
type: 'function',
function: {
name: 'get_weather',
description: 'Get current weather',
parameters: {
type: 'object',
properties: {
location: { type: 'string' },
},
required: ['location'],
},
},
},
],
});
span.setAttributes({
'openai.tool_calls': completion.choices[0].message.tool_calls?.length ?? 0,
'openai.finish_reason': completion.choices[0].finish_reason,
});
span.recordCost({
provider: 'openai',
model: 'gpt-4o',
inputTokens: completion.usage?.prompt_tokens,
outputTokens: completion.usage?.completion_tokens,
});
return completion.choices[0].message;
});
}

Embeddings

async function createEmbedding(text: string) {
return tracer.trace('create-embedding', async (span) => {
const embedding = await openai.embeddings.create({
model: 'text-embedding-3-small',
input: text,
});
span.setAttributes({
'embedding.dimensions': embedding.data[0].embedding.length,
});
span.recordCost({
provider: 'openai',
model: 'text-embedding-3-small',
inputTokens: embedding.usage.total_tokens,
// Embeddings have no output tokens
});
return embedding.data[0].embedding;
});
}

Batch Embeddings

async function createBatchEmbeddings(texts: string[]) {
return tracer.trace('batch-embeddings', async (span) => {
const embedding = await openai.embeddings.create({
model: 'text-embedding-3-small',
input: texts,
});
span.setAttributes({
'embedding.batch_size': texts.length,
'embedding.dimensions': embedding.data[0].embedding.length,
});
span.recordCost({
provider: 'openai',
model: 'text-embedding-3-small',
inputTokens: embedding.usage.total_tokens,
});
return embedding.data.map((d) => d.embedding);
});
}

Image Generation

async function generateImage(prompt: string) {
return tracer.trace('generate-image', async (span) => {
const image = await openai.images.generate({
model: 'dall-e-3',
prompt,
size: '1024x1024',
quality: 'standard',
n: 1,
});
span.setAttributes({
'image.size': '1024x1024',
'image.quality': 'standard',
});
// DALL-E pricing is per image, not per token
span.recordCost({
provider: 'openai',
model: 'dall-e-3',
costUsd: 0.04, // $0.04 for 1024x1024 standard
});
return image.data[0].url;
});
}

Audio (Whisper & TTS)

Speech to Text

import fs from 'fs';
async function transcribe(audioPath: string) {
return tracer.trace('transcribe-audio', async (span) => {
const transcription = await openai.audio.transcriptions.create({
file: fs.createReadStream(audioPath),
model: 'whisper-1',
});
// Whisper pricing is per minute
const audioStats = fs.statSync(audioPath);
span.setAttributes({
'audio.file_size': audioStats.size,
});
span.recordCost({
provider: 'openai',
model: 'whisper-1',
// Estimate cost based on audio duration
costUsd: 0.006, // $0.006 per minute
});
return transcription.text;
});
}

Text to Speech

async function textToSpeech(text: string) {
return tracer.trace('text-to-speech', async (span) => {
const mp3 = await openai.audio.speech.create({
model: 'tts-1',
voice: 'alloy',
input: text,
});
span.setAttributes({
'tts.voice': 'alloy',
'tts.input_length': text.length,
});
// TTS pricing is per character
span.recordCost({
provider: 'openai',
model: 'tts-1',
costUsd: (text.length / 1000) * 0.015, // $0.015 per 1K chars
});
return Buffer.from(await mp3.arrayBuffer());
});
}

Model Pricing Reference

ModelInputOutput
gpt-4o$5.00 / 1M$15.00 / 1M
gpt-4o-mini$0.15 / 1M$0.60 / 1M
gpt-4-turbo$10.00 / 1M$30.00 / 1M
gpt-3.5-turbo$0.50 / 1M$1.50 / 1M
text-embedding-3-small$0.02 / 1M-
text-embedding-3-large$0.13 / 1M-

Complete Example

import 'dotenv/config';
import express from 'express';
import { LadgerTracer } from '@ladger/sdk';
import OpenAI from 'openai';
const tracer = new LadgerTracer({
apiKey: process.env.LADGER_API_KEY!,
flowName: 'openai-chatbot',
debug: true,
});
const openai = new OpenAI();
const app = express();
app.use(express.json());
app.post('/chat', async (req, res) => {
const { message, model = 'gpt-4o-mini' } = req.body;
tracer.newSession();
const response = await tracer.trace('chat-endpoint', async (span) => {
span.setAttributes({
'http.method': 'POST',
'http.path': '/chat',
'request.model': model,
});
const completion = await openai.chat.completions.create({
model,
messages: [{ role: 'user', content: message }],
});
span.recordCost({
provider: 'openai',
model,
inputTokens: completion.usage?.prompt_tokens,
outputTokens: completion.usage?.completion_tokens,
});
return completion.choices[0].message.content;
});
res.json({ response });
});
// Graceful shutdown
process.on('SIGTERM', async () => {
await tracer.shutdown();
process.exit(0);
});
app.listen(3000, () => {
console.log('Server running on http://localhost:3000');
});

Error Handling

async function safeChat(message: string) {
return tracer.trace('safe-chat', async (span) => {
try {
const completion = await openai.chat.completions.create({
model: 'gpt-4o',
messages: [{ role: 'user', content: message }],
});
span.recordCost({
provider: 'openai',
model: 'gpt-4o',
inputTokens: completion.usage?.prompt_tokens,
outputTokens: completion.usage?.completion_tokens,
});
return completion.choices[0].message.content;
} catch (error) {
if (error instanceof OpenAI.APIError) {
span.setAttributes({
'error.type': 'OpenAIAPIError',
'error.status': error.status,
'error.code': error.code,
});
}
throw error;
}
});
}

Next Steps