AI SDK Orama Provider
A provider for Vercel's AI SDK that enables seamless integration with Orama's search and chat capabilities.
Features
- 🔍 Full-text, vector, and hybrid search
- 💬 Streaming chat/QA functionality
- 🚀 Framework agnostic
- 🔄 Real-time streaming responses
- 🖼️ Rich media search results
Installation
npm install @oramacloud/ai-sdk-provider
Quick Start
import { generateText, streamText } from 'ai';
import { oramaProvider } from '@oramacloud/ai-sdk-provider';
const provider = oramaProvider({
endpoint: process.env.ORAMA_API_URL,
apiKey: process.env.ORAMA_API_KEY,
userContext: "The user is looking for documentation help",
inferenceType: "documentation"
});
export default function Chat() {
const [messages, setMessages] = useState([]);
const [input, setInput] = useState('');
const handleSubmit = async (e) => {
e.preventDefault();
setMessages(prev => [...prev,
{ role: 'user', content: input },
{ role: 'assistant', content: '' }
]);
try {
const response = await streamText({
model: provider.ask(),
prompt: input,
temperature: 0
});
let previousLength = 0;
for await (const chunk of response.textStream) {
if (chunk) {
setMessages(prev => {
const newMessages = [...prev];
const lastMessage = newMessages[newMessages.length - 1];
const currentChunk = chunk.toString();
const newText = currentChunk.slice(previousLength);
previousLength = currentChunk.length;
lastMessage.content += newText;
return newMessages;
});
}
}
} catch (error) {
setMessages(prev => {
const newMessages = [...prev];
newMessages[newMessages.length - 1].content = 'An error occurred while processing your request.';
return newMessages;
});
}
};
return (
);
}
Configuration
Provider Configuration
interface OramaProviderConfig {
endpoint: string;
apiKey: string;
userContext?: string;
inferenceType?: "documentation" | "chat";
searchMode?: "fulltext" | "vector" | "hybrid";
searchOptions?: OramaSearchOptions;
}
Search Options
interface OramaSearchOptions {
mode?: "fulltext" | "vector" | "hybrid";
where?: Record<string, any>;
sortBy?: Array<{ property: string; order?: "asc" | "desc" }>;
facets?: Record<string, any>;
limit?: number;
boost?: Record<string, number>;
order?: "asc" | "desc";
}
Usage Examples
Chat Mode with Streaming
const provider = oramaProvider({
endpoint: process.env.ORAMA_API_URL,
apiKey: process.env.ORAMA_API_KEY,
userContext: "The user is looking for documentation help",
inferenceType: "documentation"
});
const response = await streamText({
model: provider.ask(),
prompt: "What is Orama?",
temperature: 0
});
for await (const chunk of response.textStream) {
console.log(chunk);
}
Search Mode
const provider = oramaProvider({
endpoint: process.env.ORAMA_API_URL,
apiKey: process.env.ORAMA_API_KEY,
searchMode: "fulltext",
searchOptions: {
sortBy: [{ property: "rating", order: "desc" }],
where: {
category: "documentation"
}
}
});
const response = await generateText({
model: provider.ask(),
prompt: "Search query"
});
Search Results Structure
Search results are returned with the following structure:
interface SearchResult {
text: string;
results: Array<{
document: {
title?: string;
description?: string;
image?: string;
url?: string;
releaseDate?: string;
rating?: string;
genres?: string[];
};
score: number;
}>;
finishReason: string;
usage: {
promptTokens: number;
completionTokens: number;
totalTokens: number;
};
}
Contributing
Contributions are welcome! Please feel free to submit a Pull Request.
License
Apache 2.0. Read the full license here