Loading...
Official Rax AI SDK for Node.js, Deno, Bun & Browsers
Choose your preferred package manager
npm install rax-aiyarn add rax-aipnpm add rax-aibun add rax-aiRequirements: Node.js 18.0.0 or higher (uses native fetch). Also works with Deno, Bun, and modern browsers.
Get up and running in under 5 minutes
import { RaxAI } from 'rax-ai';
// Initialize the client
const rax = new RaxAI({
apiKey: process.env.RAX_API_KEY
});
// Make your first request
const response = await rax.chat({
model: 'rax-4.0',
messages: [
{ role: 'user', content: 'Hello, how are you?' }
]
});
console.log(response.choices[0].message.content);Send messages and receive AI responses
import { RaxAI } from 'rax-ai';
const rax = new RaxAI({ apiKey: 'rax_your_api_key' });
// Simple chat completion
const response = await rax.chat({
model: 'rax-4.0',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'What is the capital of France?' }
]
});
console.log(response.choices[0].message.content);
// Output: "The capital of France is Paris."
// Access usage information
console.log('Tokens used:', response.usage.total_tokens);Get responses in real-time as they're generated
import { RaxAI } from 'rax-ai';
const rax = new RaxAI({ apiKey: 'rax_your_api_key' });
// Stream responses in real-time
const stream = await rax.chatStream({
model: 'rax-4.5',
messages: [
{ role: 'user', content: 'Write a short poem about coding.' }
]
});
// Process chunks as they arrive
for await (const chunk of stream) {
const content = chunk.choices[0]?.delta?.content;
if (content) {
process.stdout.write(content);
}
}
// For React/Browser:
// stream.on('data', (chunk) => {
// setResponse(prev => prev + chunk.choices[0]?.delta?.content || '');
// });Tip: Streaming provides a better user experience by showing text as it's generated, rather than waiting for the full response.
Build chatbots with conversation history
import { RaxAI } from 'rax-ai';
const rax = new RaxAI({ apiKey: 'rax_your_api_key' });
// Maintain conversation history
const messages = [
{ role: 'system', content: 'You are a helpful coding assistant.' }
];
async function chat(userMessage: string) {
// Add user message
messages.push({ role: 'user', content: userMessage });
// Get response
const response = await rax.chat({
model: 'rax-4.5',
messages: messages
});
const assistantMessage = response.choices[0].message.content;
// Add to history for context
messages.push({ role: 'assistant', content: assistantMessage });
return assistantMessage;
}
// Multi-turn conversation
await chat('How do I reverse a string in JavaScript?');
await chat('Can you show me a more efficient way?');
await chat('What about handling unicode characters?');Fine-tune generation with these options
import { RaxAI } from 'rax-ai';
const rax = new RaxAI({ apiKey: 'rax_your_api_key' });
const response = await rax.chat({
// Required
model: 'rax-4.5', // Model to use
messages: [...], // Conversation messages
// Optional - Generation Control
temperature: 0.7, // Creativity (0-2, default: 0.7)
max_tokens: 2000, // Max response length
top_p: 0.9, // Nucleus sampling (0-1)
// Optional - Streaming
stream: false, // Enable streaming (use chatStream instead)
// Optional - Stop Sequences
stop: ['END', '###'], // Stop generation at these sequences
});temperatureControls randomness. Lower = more focused, Higher = more creative
max_tokensMaximum length of the response
top_pNucleus sampling - alternative to temperature
stopStop sequences to end generation early
Built-in error types for easy handling
import { RaxAI, RaxAIError } from 'rax-ai';
const rax = new RaxAI({ apiKey: 'rax_your_api_key' });
try {
const response = await rax.chat({
model: 'rax-4.0',
messages: [{ role: 'user', content: 'Hello!' }]
});
console.log(response.choices[0].message.content);
} catch (error) {
if (error instanceof RaxAIError) {
// Handle specific error types
if (error.isRateLimited()) {
console.log('Rate limited. Retry after:', error.retryAfter, 'seconds');
// Implement exponential backoff
} else if (error.isAuthError()) {
console.log('Invalid API key. Please check your credentials.');
} else if (error.isValidationError()) {
console.log('Invalid request:', error.message);
} else if (error.isServerError()) {
console.log('Server error. Please try again later.');
}
// Access error details
console.log('Status:', error.status);
console.log('Code:', error.code);
console.log('Message:', error.message);
} else {
// Network or other errors
console.log('Unexpected error:', error);
}
}isValidationError()isAuthError()isRateLimited()isServerError()Complete TypeScript definitions included
import { RaxAI, ChatMessage, ChatResponse, StreamChunk } from 'rax-ai';
// Fully typed client
const rax = new RaxAI({ apiKey: process.env.RAX_API_KEY! });
// Type-safe messages
const messages: ChatMessage[] = [
{ role: 'system', content: 'You are helpful.' },
{ role: 'user', content: 'Hello!' }
];
// Type-safe response
const response: ChatResponse = await rax.chat({
model: 'rax-4.0',
messages
});
// Type-safe streaming
const stream = await rax.chatStream({
model: 'rax-4.5',
messages
});
for await (const chunk: StreamChunk of stream) {
// TypeScript knows the shape of chunk
const content = chunk.choices[0]?.delta?.content;
}
// Available types:
// - RaxAIConfig: Client configuration
// - ChatMessage: Message object
// - ChatRequest: Request parameters
// - ChatResponse: Full response
// - StreamChunk: Streaming chunk
// - RaxAIError: Error classFast and efficient for everyday tasks
rax-4.0Enhanced reasoning for complex tasks
rax-4.5Complete API reference
// RaxAI Client
const rax = new RaxAI(config: RaxAIConfig);
interface RaxAIConfig {
apiKey: string; // Your API key (required)
baseUrl?: string; // Custom API URL (optional)
timeout?: number; // Request timeout in ms (default: 30000)
maxRetries?: number; // Auto-retry count (default: 2)
}
// Chat Completion
rax.chat(request: ChatRequest): Promise<ChatResponse>
// Streaming Chat
rax.chatStream(request: ChatRequest): AsyncIterable<StreamChunk>
// List Models
rax.models(): Promise<Model[]>
// Get Usage Stats
rax.usage(): Promise<UsageStats>