Anthropic Integration
This guide shows how to integrate Ladger with the Anthropic SDK to track costs for Claude model calls.
Installation
npm install @ladger/sdk @anthropic-ai/sdkyarn add @ladger/sdk @anthropic-ai/sdkpnpm add @ladger/sdk @anthropic-ai/sdkBasic Setup
import { LadgerTracer } from '@ladger/sdk';import Anthropic from '@anthropic-ai/sdk';
const tracer = new LadgerTracer({ apiKey: process.env.LADGER_API_KEY!, flowName: 'claude-assistant',});
const anthropic = new Anthropic({ apiKey: process.env.ANTHROPIC_API_KEY,});Chat Messages
Basic Chat
async function chat(message: string) { return tracer.trace('claude-chat', async (span) => { const response = await anthropic.messages.create({ model: 'claude-3-sonnet-20240229', max_tokens: 1024, messages: [{ role: 'user', content: message }], });
span.recordCost({ provider: 'anthropic', model: 'claude-3-sonnet', inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
// Handle response type const textContent = response.content.find((c) => c.type === 'text'); return textContent?.text || ''; });}With System Prompt
async function chatWithSystem(message: string, systemPrompt: string) { return tracer.trace('claude-system-chat', async (span) => { span.setAttributes({ 'system_prompt.length': systemPrompt.length, });
const response = await anthropic.messages.create({ model: 'claude-3-opus-20240229', max_tokens: 2048, system: systemPrompt, messages: [{ role: 'user', content: message }], });
span.recordCost({ provider: 'anthropic', model: 'claude-3-opus', inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
const textContent = response.content.find((c) => c.type === 'text'); return textContent?.text || ''; });}Streaming
async function chatStream(message: string) { return tracer.trace('claude-stream', async (span) => { let inputTokens = 0; let outputTokens = 0;
const stream = await anthropic.messages.stream({ model: 'claude-3-sonnet-20240229', max_tokens: 1024, messages: [{ role: 'user', content: message }], });
let content = '';
for await (const event of stream) { if (event.type === 'content_block_delta' && event.delta.type === 'text_delta') { content += event.delta.text; process.stdout.write(event.delta.text); }
if (event.type === 'message_delta' && event.usage) { outputTokens = event.usage.output_tokens; }
if (event.type === 'message_start' && event.message.usage) { inputTokens = event.message.usage.input_tokens; } }
span.recordCost({ provider: 'anthropic', model: 'claude-3-sonnet', inputTokens, outputTokens, });
return content; });}Tool Use
async function chatWithTools(message: string) { return tracer.trace('claude-tools', async (span) => { const response = await anthropic.messages.create({ model: 'claude-3-sonnet-20240229', max_tokens: 1024, tools: [ { name: 'get_weather', description: 'Get the current weather in a location', input_schema: { type: 'object', properties: { location: { type: 'string', description: 'City and state', }, }, required: ['location'], }, }, ], messages: [{ role: 'user', content: message }], });
// Track tool usage const toolUseBlocks = response.content.filter((c) => c.type === 'tool_use'); span.setAttributes({ 'anthropic.tool_calls': toolUseBlocks.length, 'anthropic.stop_reason': response.stop_reason, });
span.recordCost({ provider: 'anthropic', model: 'claude-3-sonnet', inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
return response; });}Vision (Image Analysis)
import fs from 'fs';import path from 'path';
async function analyzeImage(imagePath: string, question: string) { return tracer.trace('claude-vision', async (span) => { // Read and encode image const imageBuffer = fs.readFileSync(imagePath); const base64Image = imageBuffer.toString('base64'); const mediaType = imagePath.endsWith('.png') ? 'image/png' : 'image/jpeg';
span.setAttributes({ 'image.size': imageBuffer.length, 'image.type': mediaType, });
const response = await anthropic.messages.create({ model: 'claude-3-sonnet-20240229', max_tokens: 1024, messages: [ { role: 'user', content: [ { type: 'image', source: { type: 'base64', media_type: mediaType, data: base64Image, }, }, { type: 'text', text: question, }, ], }, ], });
span.recordCost({ provider: 'anthropic', model: 'claude-3-sonnet', inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
const textContent = response.content.find((c) => c.type === 'text'); return textContent?.text || ''; });}Multi-Turn Conversations
interface Message { role: 'user' | 'assistant'; content: string;}
async function conversation(messages: Message[]) { return tracer.trace('claude-conversation', async (span) => { span.setAttributes({ 'conversation.turns': messages.length, });
const response = await anthropic.messages.create({ model: 'claude-3-sonnet-20240229', max_tokens: 1024, messages: messages, });
span.recordCost({ provider: 'anthropic', model: 'claude-3-sonnet', inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
const textContent = response.content.find((c) => c.type === 'text'); return textContent?.text || ''; });}Model Pricing Reference
| Model | Input | Output |
|---|---|---|
| claude-3-opus | $15.00 / 1M | $75.00 / 1M |
| claude-3-sonnet | $3.00 / 1M | $15.00 / 1M |
| claude-3-haiku | $0.25 / 1M | $1.25 / 1M |
| claude-3.5-sonnet | $3.00 / 1M | $15.00 / 1M |
Model Selection Guide
| Use Case | Recommended Model | Cost Profile |
|---|---|---|
| Quick classification | claude-3-haiku | $ |
| General assistance | claude-3-sonnet | $$ |
| Complex reasoning | claude-3-opus | $$$ |
| Balanced performance | claude-3.5-sonnet | $$ |
Complete Example
import 'dotenv/config';import { LadgerTracer } from '@ladger/sdk';import Anthropic from '@anthropic-ai/sdk';
const tracer = new LadgerTracer({ apiKey: process.env.LADGER_API_KEY!, flowName: 'claude-assistant', debug: true,});
const anthropic = new Anthropic();
async function intelligentRouter(query: string) { // Parent span for entire operation return tracer.trace('intelligent-router', async (parentSpan) => {
// Step 1: Quick classification with Haiku (fast, cheap) const intent = await tracer.trace('classify-intent', async (span) => { const response = await anthropic.messages.create({ model: 'claude-3-haiku-20240307', max_tokens: 50, system: 'Classify the query as: simple, moderate, or complex. Respond with just the word.', messages: [{ role: 'user', content: query }], });
span.recordCost({ provider: 'anthropic', model: 'claude-3-haiku', inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
const text = response.content.find((c) => c.type === 'text'); return text?.text?.toLowerCase().trim() || 'moderate'; }, { parent: parentSpan });
parentSpan.setAttributes({ 'routing.intent': intent });
// Step 2: Route to appropriate model const model = intent === 'complex' ? 'claude-3-opus-20240229' : intent === 'simple' ? 'claude-3-haiku-20240307' : 'claude-3-sonnet-20240229';
// Step 3: Generate response return tracer.trace('generate-response', async (span) => { span.setAttributes({ 'model.selected': model });
const response = await anthropic.messages.create({ model, max_tokens: 1024, messages: [{ role: 'user', content: query }], });
span.recordCost({ provider: 'anthropic', model: model.split('-20')[0], // Normalize model name inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
const text = response.content.find((c) => c.type === 'text'); return text?.text || ''; }, { parent: parentSpan }); });}
// Usageasync function main() { const response = await intelligentRouter('What is 2+2?'); console.log('Response:', response);
await tracer.shutdown();}
main();Error Handling
async function safeChat(message: string) { return tracer.trace('safe-claude-chat', async (span) => { try { const response = await anthropic.messages.create({ model: 'claude-3-sonnet-20240229', max_tokens: 1024, messages: [{ role: 'user', content: message }], });
span.recordCost({ provider: 'anthropic', model: 'claude-3-sonnet', inputTokens: response.usage.input_tokens, outputTokens: response.usage.output_tokens, });
const text = response.content.find((c) => c.type === 'text'); return text?.text || ''; } catch (error) { if (error instanceof Anthropic.APIError) { span.setAttributes({ 'error.type': 'AnthropicAPIError', 'error.status': error.status, 'error.message': error.message, }); } throw error; } });}Next Steps
- Learn about Ollama Integration for local models
- Explore Multi-Agent Examples