create-llama
Advanced tools
Comparing version 0.3.11 to 0.3.12
@@ -11,5 +11,5 @@ ## Overview | ||
1. [Choreography](./app/examples/choreography.py) - the agents decide themselves to delegate a task to another agent | ||
1. [Orchestrator](./app/examples/orchestrator.py) - a central orchestrator decides which agent should execute a task | ||
1. [Explicit Workflow](./app/examples/workflow.py) - a pre-defined workflow specific for the task is used to execute the tasks | ||
1. [Choreography](./app/agents/choreography.py) - the agents decide themselves to delegate a task to another agent | ||
1. [Orchestrator](./app/agents/orchestrator.py) - a central orchestrator decides which agent should execute a task | ||
1. [Explicit Workflow](./app/agents/workflow.py) - a pre-defined workflow specific for the task is used to execute the tasks | ||
@@ -16,0 +16,0 @@ ## Getting Started |
@@ -36,3 +36,3 @@ This is a [LlamaIndex](https://www.llamaindex.ai/) multi-agents project using [Workflows](https://docs.llamaindex.ai/en/stable/understanding/workflows/). | ||
You can start editing the API by modifying `app/api/routers/chat.py` or `app/financial_report/workflow.py`. The API auto-updates as you save the files. | ||
You can start editing the API by modifying `app/api/routers/chat.py` or `app/workflows/financial_report.py`. The API auto-updates as you save the files. | ||
@@ -39,0 +39,0 @@ Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API. |
@@ -42,3 +42,3 @@ This is a [LlamaIndex](https://www.llamaindex.ai/) multi-agents project using [Workflows](https://docs.llamaindex.ai/en/stable/understanding/workflows/). | ||
You can start editing the API by modifying `app/api/routers/chat.py` or `app/agents/form_filling.py`. The API auto-updates as you save the files. | ||
You can start editing the API by modifying `app/api/routers/chat.py` or `app/workflows/form_filling.py`. The API auto-updates as you save the files. | ||
@@ -45,0 +45,0 @@ Open [http://localhost:8000/docs](http://localhost:8000/docs) with your browser to see the Swagger UI of the API. |
import { BaseToolWithCall } from "llamaindex"; | ||
import { ToolsFactory } from "llamaindex/tools/ToolsFactory"; | ||
import fs from "node:fs/promises"; | ||
import path from "node:path"; | ||
import { CodeGeneratorTool, CodeGeneratorToolParams } from "./code-generator"; | ||
@@ -9,2 +11,8 @@ import { | ||
import { DuckDuckGoSearchTool, DuckDuckGoToolParams } from "./duckduckgo"; | ||
import { | ||
ExtractMissingCellsParams, | ||
ExtractMissingCellsTool, | ||
FillMissingCellsParams, | ||
FillMissingCellsTool, | ||
} from "./form-filling"; | ||
import { ImgGeneratorTool, ImgGeneratorToolParams } from "./img-gen"; | ||
@@ -58,2 +66,8 @@ import { InterpreterTool, InterpreterToolParams } from "./interpreter"; | ||
}, | ||
form_filling: async (config: unknown) => { | ||
return [ | ||
new ExtractMissingCellsTool(config as ExtractMissingCellsParams), | ||
new FillMissingCellsTool(config as FillMissingCellsParams), | ||
]; | ||
}, | ||
}; | ||
@@ -75,1 +89,17 @@ | ||
} | ||
export async function getConfiguredTools( | ||
configPath?: string, | ||
): Promise<BaseToolWithCall[]> { | ||
const configFile = path.join(configPath ?? "config", "tools.json"); | ||
const toolConfig = JSON.parse(await fs.readFile(configFile, "utf8")); | ||
const tools = await createTools(toolConfig); | ||
return tools; | ||
} | ||
export async function getTool( | ||
toolName: string, | ||
): Promise<BaseToolWithCall | undefined> { | ||
const tools = await getConfiguredTools(); | ||
return tools.find((tool) => tool.metadata.name === toolName); | ||
} |
@@ -16,3 +16,3 @@ import { Document } from "llamaindex"; | ||
const UPLOADED_FOLDER = "output/uploaded"; | ||
export const UPLOADED_FOLDER = "output/uploaded"; | ||
@@ -19,0 +19,0 @@ export async function storeAndParseFile( |
import { Document, LLamaCloudFileService, VectorStoreIndex } from "llamaindex"; | ||
import { LlamaCloudIndex } from "llamaindex/cloud/LlamaCloudIndex"; | ||
import fs from "node:fs/promises"; | ||
import path from "node:path"; | ||
import { DocumentFile } from "../streaming/annotations"; | ||
@@ -21,4 +19,4 @@ import { parseFile, storeFile } from "./helper"; | ||
// If the file is csv and has codeExecutorTool, we don't need to index the file. | ||
if (mimeType === "text/csv" && (await hasCodeExecutorTool())) { | ||
// Do not index csv files | ||
if (mimeType === "text/csv") { | ||
return fileMetadata; | ||
@@ -65,12 +63,1 @@ } | ||
} | ||
const hasCodeExecutorTool = async () => { | ||
const codeExecutorTools = ["interpreter", "artifact"]; | ||
const configFile = path.join("config", "tools.json"); | ||
const toolConfig = JSON.parse(await fs.readFile(configFile, "utf8")); | ||
const localTools = toolConfig.local || {}; | ||
// Check if local tools contains codeExecutorTools | ||
return codeExecutorTools.some((tool) => localTools[tool] !== undefined); | ||
}; |
import { JSONValue, Message } from "ai"; | ||
import { MessageContent, MessageContentDetail } from "llamaindex"; | ||
import { | ||
ChatMessage, | ||
MessageContent, | ||
MessageContentDetail, | ||
MessageType, | ||
} from "llamaindex"; | ||
import { UPLOADED_FOLDER } from "../documents/helper"; | ||
@@ -61,2 +67,41 @@ export type DocumentFileType = "csv" | "pdf" | "txt" | "docx"; | ||
export function convertToChatHistory(messages: Message[]): ChatMessage[] { | ||
if (!messages || !Array.isArray(messages)) { | ||
return []; | ||
} | ||
const agentHistory = retrieveAgentHistoryMessage(messages); | ||
if (agentHistory) { | ||
const previousMessages = messages.slice(0, -1); | ||
return [...previousMessages, agentHistory].map((msg) => ({ | ||
role: msg.role as MessageType, | ||
content: msg.content, | ||
})); | ||
} | ||
return messages.map((msg) => ({ | ||
role: msg.role as MessageType, | ||
content: msg.content, | ||
})); | ||
} | ||
function retrieveAgentHistoryMessage( | ||
messages: Message[], | ||
maxAgentMessages = 10, | ||
): ChatMessage | null { | ||
const agentAnnotations = getAnnotations<{ agent: string; text: string }>( | ||
messages, | ||
{ role: "assistant", type: "agent" }, | ||
).slice(-maxAgentMessages); | ||
if (agentAnnotations.length > 0) { | ||
const messageContent = | ||
"Here is the previous conversation of agents:\n" + | ||
agentAnnotations.map((annotation) => annotation.data.text).join("\n"); | ||
return { | ||
role: "assistant", | ||
content: messageContent, | ||
}; | ||
} | ||
return null; | ||
} | ||
function getFileContent(file: DocumentFile): string { | ||
@@ -88,2 +133,6 @@ let defaultContent = `=====File: ${file.name}=====\n`; | ||
// Include local file path | ||
const localFilePath = `${UPLOADED_FOLDER}/${file.name}`; | ||
defaultContent += `Local file path (instruction: use for local tool that requires a local path): ${localFilePath}\n`; | ||
return defaultContent; | ||
@@ -132,9 +181,6 @@ } | ||
function convertAnnotations(messages: Message[]): MessageContentDetail[] { | ||
// annotations from the last user message that has annotations | ||
const annotations: Annotation[] = | ||
messages | ||
.slice() | ||
.reverse() | ||
.find((message) => message.role === "user" && message.annotations) | ||
?.annotations?.map(getValidAnnotation) || []; | ||
// get all annotations from user messages | ||
const annotations: Annotation[] = messages | ||
.filter((message) => message.role === "user" && message.annotations) | ||
.flatMap((message) => message.annotations?.map(getValidAnnotation) || []); | ||
if (annotations.length === 0) return []; | ||
@@ -141,0 +187,0 @@ |
@@ -1,34 +0,32 @@ | ||
import { StopEvent } from "@llamaindex/core/workflow"; | ||
import { Message, streamToResponse } from "ai"; | ||
import { Request, Response } from "express"; | ||
import { ChatResponseChunk } from "llamaindex"; | ||
import { | ||
convertToChatHistory, | ||
retrieveMessageContent, | ||
} from "./llamaindex/streaming/annotations"; | ||
import { createWorkflow } from "./workflow/factory"; | ||
import { toDataStream, workflowEventsToStreamData } from "./workflow/stream"; | ||
import { createStreamFromWorkflowContext } from "./workflow/stream"; | ||
export const chat = async (req: Request, res: Response) => { | ||
try { | ||
const { messages, data }: { messages: Message[]; data?: any } = req.body; | ||
const userMessage = messages.pop(); | ||
if (!messages || !userMessage || userMessage.role !== "user") { | ||
const { messages }: { messages: Message[] } = req.body; | ||
if (!messages || messages.length === 0) { | ||
return res.status(400).json({ | ||
error: | ||
"messages are required in the request body and the last message must be from the user", | ||
error: "messages are required in the request body", | ||
}); | ||
} | ||
const chatHistory = convertToChatHistory(messages); | ||
const userMessageContent = retrieveMessageContent(messages); | ||
const agent = createWorkflow(messages, data); | ||
const result = agent.run<AsyncGenerator<ChatResponseChunk>>( | ||
userMessage.content, | ||
) as unknown as Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>; | ||
const workflow = await createWorkflow({ chatHistory }); | ||
// convert the workflow events to a vercel AI stream data object | ||
const agentStreamData = await workflowEventsToStreamData( | ||
agent.streamEvents(), | ||
); | ||
// convert the workflow result to a vercel AI content stream | ||
const stream = toDataStream(result, { | ||
onFinal: () => agentStreamData.close(), | ||
const context = workflow.run({ | ||
message: userMessageContent, | ||
streaming: true, | ||
}); | ||
return streamToResponse(stream, res, {}, agentStreamData); | ||
const { stream, dataStream } = | ||
await createStreamFromWorkflowContext(context); | ||
return streamToResponse(stream, res, {}, dataStream); | ||
} catch (error) { | ||
@@ -35,0 +33,0 @@ console.error("[LlamaIndex]", error); |
import { initObservability } from "@/app/observability"; | ||
import { StopEvent } from "@llamaindex/core/workflow"; | ||
import { Message, StreamingTextResponse } from "ai"; | ||
import { ChatResponseChunk } from "llamaindex"; | ||
import { StreamingTextResponse, type Message } from "ai"; | ||
import { NextRequest, NextResponse } from "next/server"; | ||
import { initSettings } from "./engine/settings"; | ||
import { | ||
convertToChatHistory, | ||
isValidMessages, | ||
retrieveMessageContent, | ||
} from "./llamaindex/streaming/annotations"; | ||
import { createWorkflow } from "./workflow/factory"; | ||
import { toDataStream, workflowEventsToStreamData } from "./workflow/stream"; | ||
import { createStreamFromWorkflowContext } from "./workflow/stream"; | ||
@@ -19,5 +22,4 @@ initObservability(); | ||
const body = await request.json(); | ||
const { messages, data }: { messages: Message[]; data?: any } = body; | ||
const userMessage = messages.pop(); | ||
if (!messages || !userMessage || userMessage.role !== "user") { | ||
const { messages }: { messages: Message[]; data?: any } = body; | ||
if (!isValidMessages(messages)) { | ||
return NextResponse.json( | ||
@@ -32,16 +34,16 @@ { | ||
const agent = createWorkflow(messages, data); | ||
// TODO: fix type in agent.run in LITS | ||
const result = agent.run<AsyncGenerator<ChatResponseChunk>>( | ||
userMessage.content, | ||
) as unknown as Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>; | ||
// convert the workflow events to a vercel AI stream data object | ||
const agentStreamData = await workflowEventsToStreamData( | ||
agent.streamEvents(), | ||
); | ||
// convert the workflow result to a vercel AI content stream | ||
const stream = toDataStream(result, { | ||
onFinal: () => agentStreamData.close(), | ||
const chatHistory = convertToChatHistory(messages); | ||
const userMessageContent = retrieveMessageContent(messages); | ||
const workflow = await createWorkflow({ chatHistory }); | ||
const context = workflow.run({ | ||
message: userMessageContent, | ||
streaming: true, | ||
}); | ||
return new StreamingTextResponse(stream, {}, agentStreamData); | ||
const { stream, dataStream } = | ||
await createStreamFromWorkflowContext(context); | ||
// Return the two streams in one response | ||
return new StreamingTextResponse(stream, {}, dataStream); | ||
} catch (error) { | ||
@@ -48,0 +50,0 @@ console.error("[LlamaIndex]", error); |
import { | ||
Context, | ||
HandlerContext, | ||
StartEvent, | ||
@@ -7,3 +7,3 @@ StopEvent, | ||
WorkflowEvent, | ||
} from "@llamaindex/core/workflow"; | ||
} from "@llamaindex/workflow"; | ||
import { | ||
@@ -13,10 +13,9 @@ BaseToolWithCall, | ||
ChatMessage, | ||
ChatResponse, | ||
ChatResponseChunk, | ||
QueryEngineTool, | ||
Settings, | ||
ToolCall, | ||
ToolCallLLM, | ||
ToolCallLLMMessageOptions, | ||
callTool, | ||
} from "llamaindex"; | ||
import { callTools, chatWithTools } from "./tools"; | ||
import { AgentInput, AgentRunEvent } from "./type"; | ||
@@ -32,7 +31,19 @@ | ||
export class FunctionCallingAgent extends Workflow { | ||
type FunctionCallingAgentContextData = { | ||
streaming: boolean; | ||
}; | ||
export type FunctionCallingAgentInput = AgentInput & { | ||
displayName: string; | ||
}; | ||
export class FunctionCallingAgent extends Workflow< | ||
FunctionCallingAgentContextData, | ||
FunctionCallingAgentInput, | ||
string | AsyncGenerator<boolean | ChatResponseChunk<object>> | ||
> { | ||
name: string; | ||
llm: ToolCallLLM; | ||
memory: ChatMemoryBuffer; | ||
tools: BaseToolWithCall[]; | ||
tools: BaseToolWithCall[] | QueryEngineTool[]; | ||
systemPrompt?: string; | ||
@@ -59,3 +70,5 @@ writeEvents: boolean; | ||
this.llm = options.llm ?? (Settings.llm as ToolCallLLM); | ||
this.checkToolCallSupport(); | ||
if (!(this.llm instanceof ToolCallLLM)) { | ||
throw new Error("LLM is not a ToolCallLLM"); | ||
} | ||
this.memory = new ChatMemoryBuffer({ | ||
@@ -71,11 +84,23 @@ llm: this.llm, | ||
// add steps | ||
this.addStep(StartEvent<AgentInput>, this.prepareChatHistory, { | ||
outputs: InputEvent, | ||
}); | ||
this.addStep(InputEvent, this.handleLLMInput, { | ||
outputs: [ToolCallEvent, StopEvent], | ||
}); | ||
this.addStep(ToolCallEvent, this.handleToolCalls, { | ||
outputs: InputEvent, | ||
}); | ||
this.addStep( | ||
{ | ||
inputs: [StartEvent<AgentInput>], | ||
outputs: [InputEvent], | ||
}, | ||
this.prepareChatHistory, | ||
); | ||
this.addStep( | ||
{ | ||
inputs: [InputEvent], | ||
outputs: [ToolCallEvent, StopEvent], | ||
}, | ||
this.handleLLMInput, | ||
); | ||
this.addStep( | ||
{ | ||
inputs: [ToolCallEvent], | ||
outputs: [InputEvent], | ||
}, | ||
this.handleToolCalls, | ||
); | ||
} | ||
@@ -87,8 +112,8 @@ | ||
private async prepareChatHistory( | ||
ctx: Context, | ||
prepareChatHistory = async ( | ||
ctx: HandlerContext<FunctionCallingAgentContextData>, | ||
ev: StartEvent<AgentInput>, | ||
): Promise<InputEvent> { | ||
const { message, streaming } = ev.data.input; | ||
ctx.set("streaming", streaming); | ||
): Promise<InputEvent> => { | ||
const { message, streaming } = ev.data; | ||
ctx.data.streaming = streaming ?? false; | ||
this.writeEvent(`Start to work on: ${message}`, ctx); | ||
@@ -100,111 +125,46 @@ if (this.systemPrompt) { | ||
return new InputEvent({ input: this.chatHistory }); | ||
} | ||
}; | ||
private async handleLLMInput( | ||
ctx: Context, | ||
handleLLMInput = async ( | ||
ctx: HandlerContext<FunctionCallingAgentContextData>, | ||
ev: InputEvent, | ||
): Promise<StopEvent<string | AsyncGenerator> | ToolCallEvent> { | ||
if (ctx.get("streaming")) { | ||
return await this.handleLLMInputStream(ctx, ev); | ||
): Promise<StopEvent<string | AsyncGenerator> | ToolCallEvent> => { | ||
const toolCallResponse = await chatWithTools( | ||
this.llm, | ||
this.tools, | ||
this.chatHistory, | ||
); | ||
if (toolCallResponse.toolCallMessage) { | ||
this.memory.put(toolCallResponse.toolCallMessage); | ||
} | ||
const result = await this.llm.chat({ | ||
messages: this.chatHistory, | ||
tools: this.tools, | ||
}); | ||
this.memory.put(result.message); | ||
const toolCalls = this.getToolCallsFromResponse(result); | ||
if (toolCalls.length) { | ||
return new ToolCallEvent({ toolCalls }); | ||
if (toolCallResponse.hasToolCall()) { | ||
return new ToolCallEvent({ toolCalls: toolCallResponse.toolCalls }); | ||
} | ||
this.writeEvent("Finished task", ctx); | ||
return new StopEvent({ result: result.message.content.toString() }); | ||
} | ||
private async handleLLMInputStream( | ||
context: Context, | ||
ev: InputEvent, | ||
): Promise<StopEvent<AsyncGenerator> | ToolCallEvent> { | ||
const { llm, tools, memory } = this; | ||
const llmArgs = { messages: this.chatHistory, tools }; | ||
const responseGenerator = async function* () { | ||
const responseStream = await llm.chat({ ...llmArgs, stream: true }); | ||
let fullResponse = null; | ||
let yieldedIndicator = false; | ||
for await (const chunk of responseStream) { | ||
const hasToolCalls = chunk.options && "toolCall" in chunk.options; | ||
if (!hasToolCalls) { | ||
if (!yieldedIndicator) { | ||
yield false; | ||
yieldedIndicator = true; | ||
} | ||
yield chunk; | ||
} else if (!yieldedIndicator) { | ||
yield true; | ||
yieldedIndicator = true; | ||
} | ||
fullResponse = chunk; | ||
if (ctx.data.streaming) { | ||
if (!toolCallResponse.responseGenerator) { | ||
throw new Error("No streaming response"); | ||
} | ||
if (fullResponse?.options && Object.keys(fullResponse.options).length) { | ||
memory.put({ | ||
role: "assistant", | ||
content: "", | ||
options: fullResponse.options, | ||
}); | ||
yield fullResponse; | ||
} | ||
}; | ||
const generator = responseGenerator(); | ||
const isToolCall = await generator.next(); | ||
if (isToolCall.value) { | ||
const fullResponse = await generator.next(); | ||
const toolCalls = this.getToolCallsFromResponse( | ||
fullResponse.value as ChatResponseChunk<ToolCallLLMMessageOptions>, | ||
); | ||
return new ToolCallEvent({ toolCalls }); | ||
return new StopEvent(toolCallResponse.responseGenerator); | ||
} | ||
this.writeEvent("Finished task", context); | ||
return new StopEvent({ result: generator }); | ||
} | ||
const fullResponse = await toolCallResponse.asFullResponse(); | ||
this.memory.put(fullResponse); | ||
return new StopEvent(fullResponse.content.toString()); | ||
}; | ||
private async handleToolCalls( | ||
ctx: Context, | ||
handleToolCalls = async ( | ||
ctx: HandlerContext<FunctionCallingAgentContextData>, | ||
ev: ToolCallEvent, | ||
): Promise<InputEvent> { | ||
): Promise<InputEvent> => { | ||
const { toolCalls } = ev.data; | ||
const toolMsgs: ChatMessage[] = []; | ||
const toolMsgs = await callTools({ | ||
tools: this.tools, | ||
toolCalls, | ||
ctx, | ||
agentName: this.name, | ||
}); | ||
for (const call of toolCalls) { | ||
const targetTool = this.tools.find( | ||
(tool) => tool.metadata.name === call.name, | ||
); | ||
// TODO: make logger optional in callTool in framework | ||
const toolOutput = await callTool(targetTool, call, { | ||
log: () => {}, | ||
error: (...args: unknown[]) => { | ||
console.error(`[Tool ${call.name} Error]:`, ...args); | ||
}, | ||
warn: () => {}, | ||
}); | ||
toolMsgs.push({ | ||
content: JSON.stringify(toolOutput.output), | ||
role: "user", | ||
options: { | ||
toolResult: { | ||
result: toolOutput.output, | ||
isError: toolOutput.isError, | ||
id: call.id, | ||
}, | ||
}, | ||
}); | ||
} | ||
for (const msg of toolMsgs) { | ||
@@ -215,32 +175,13 @@ this.memory.put(msg); | ||
return new InputEvent({ input: this.memory.getMessages() }); | ||
} | ||
}; | ||
private writeEvent(msg: string, context: Context) { | ||
writeEvent = ( | ||
msg: string, | ||
ctx: HandlerContext<FunctionCallingAgentContextData>, | ||
) => { | ||
if (!this.writeEvents) return; | ||
context.writeEventToStream({ | ||
data: new AgentRunEvent({ name: this.name, msg }), | ||
}); | ||
} | ||
private checkToolCallSupport() { | ||
const { supportToolCall } = this.llm as ToolCallLLM; | ||
if (!supportToolCall) throw new Error("LLM does not support tool calls"); | ||
} | ||
private getToolCallsFromResponse( | ||
response: | ||
| ChatResponse<ToolCallLLMMessageOptions> | ||
| ChatResponseChunk<ToolCallLLMMessageOptions>, | ||
): ToolCall[] { | ||
let options; | ||
if ("message" in response) { | ||
options = response.message.options; | ||
} else { | ||
options = response.options; | ||
} | ||
if (options && "toolCall" in options) { | ||
return options.toolCall as ToolCall[]; | ||
} | ||
return []; | ||
} | ||
ctx.sendEvent( | ||
new AgentRunEvent({ agent: this.name, text: msg, type: "text" }), | ||
); | ||
}; | ||
} |
@@ -1,8 +0,10 @@ | ||
import { StopEvent } from "@llamaindex/core/workflow"; | ||
import { | ||
createCallbacksTransformer, | ||
StopEvent, | ||
WorkflowContext, | ||
WorkflowEvent, | ||
} from "@llamaindex/workflow"; | ||
import { | ||
StreamData, | ||
createStreamDataTransformer, | ||
StreamData, | ||
trimStartOfStreamHelper, | ||
type AIStreamCallbacksAndOptions, | ||
} from "ai"; | ||
@@ -12,55 +14,65 @@ import { ChatResponseChunk } from "llamaindex"; | ||
export function toDataStream( | ||
result: Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>, | ||
callbacks?: AIStreamCallbacksAndOptions, | ||
) { | ||
return toReadableStream(result) | ||
.pipeThrough(createCallbacksTransformer(callbacks)) | ||
.pipeThrough(createStreamDataTransformer()); | ||
} | ||
export async function createStreamFromWorkflowContext<Input, Output, Context>( | ||
context: WorkflowContext<Input, Output, Context>, | ||
): Promise<{ stream: ReadableStream<string>; dataStream: StreamData }> { | ||
const trimStartOfStream = trimStartOfStreamHelper(); | ||
const dataStream = new StreamData(); | ||
const encoder = new TextEncoder(); | ||
let generator: AsyncGenerator<ChatResponseChunk> | undefined; | ||
function toReadableStream( | ||
result: Promise<StopEvent<AsyncGenerator<ChatResponseChunk>>>, | ||
) { | ||
const trimStartOfStream = trimStartOfStreamHelper(); | ||
return new ReadableStream<string>({ | ||
start(controller) { | ||
controller.enqueue(""); // Kickstart the stream | ||
const closeStreams = (controller: ReadableStreamDefaultController) => { | ||
controller.close(); | ||
dataStream.close(); | ||
}; | ||
const mainStream = new ReadableStream({ | ||
async start(controller) { | ||
// Kickstart the stream by sending an empty string | ||
controller.enqueue(encoder.encode("")); | ||
}, | ||
async pull(controller): Promise<void> { | ||
const stopEvent = await result; | ||
const generator = stopEvent.data.result; | ||
const { value, done } = await generator.next(); | ||
async pull(controller) { | ||
while (!generator) { | ||
// get next event from workflow context | ||
const { value: event, done } = | ||
await context[Symbol.asyncIterator]().next(); | ||
if (done) { | ||
closeStreams(controller); | ||
return; | ||
} | ||
generator = handleEvent(event, dataStream); | ||
} | ||
const { value: chunk, done } = await generator.next(); | ||
if (done) { | ||
controller.close(); | ||
closeStreams(controller); | ||
return; | ||
} | ||
const text = trimStartOfStream(value.delta ?? ""); | ||
if (text) controller.enqueue(text); | ||
const text = trimStartOfStream(chunk.delta ?? ""); | ||
if (text) { | ||
controller.enqueue(encoder.encode(text)); | ||
} | ||
}, | ||
}); | ||
return { | ||
stream: mainStream.pipeThrough(createStreamDataTransformer()), | ||
dataStream, | ||
}; | ||
} | ||
export async function workflowEventsToStreamData( | ||
events: AsyncIterable<AgentRunEvent>, | ||
): Promise<StreamData> { | ||
const streamData = new StreamData(); | ||
(async () => { | ||
for await (const event of events) { | ||
if (event instanceof AgentRunEvent) { | ||
const { name, msg } = event.data; | ||
if ((streamData as any).isClosed) { | ||
break; | ||
} | ||
streamData.appendMessageAnnotation({ | ||
type: "agent", | ||
data: { agent: name, text: msg }, | ||
}); | ||
} | ||
} | ||
})(); | ||
return streamData; | ||
function handleEvent( | ||
event: WorkflowEvent<any>, | ||
dataStream: StreamData, | ||
): AsyncGenerator<ChatResponseChunk> | undefined { | ||
// Handle for StopEvent | ||
if (event instanceof StopEvent) { | ||
return event.data as AsyncGenerator<ChatResponseChunk>; | ||
} | ||
// Handle for AgentRunEvent | ||
if (event instanceof AgentRunEvent) { | ||
dataStream.appendMessageAnnotation({ | ||
type: "agent", | ||
data: event.data, | ||
}); | ||
} | ||
} |
@@ -1,11 +0,24 @@ | ||
import { WorkflowEvent } from "@llamaindex/core/workflow"; | ||
import { WorkflowEvent } from "@llamaindex/workflow"; | ||
import { MessageContent } from "llamaindex"; | ||
export type AgentInput = { | ||
message: string; | ||
message: MessageContent; | ||
streaming?: boolean; | ||
}; | ||
export type AgentRunEventType = "text" | "progress"; | ||
export type ProgressEventData = { | ||
id: string; | ||
total: number; | ||
current: number; | ||
}; | ||
export type AgentRunEventData = ProgressEventData; | ||
export class AgentRunEvent extends WorkflowEvent<{ | ||
name: string; | ||
msg: string; | ||
agent: string; | ||
text: string; | ||
type: AgentRunEventType; | ||
data?: AgentRunEventData; | ||
}> {} |
@@ -30,3 +30,4 @@ { | ||
"formdata-node": "^6.0.3", | ||
"marked": "^14.1.2" | ||
"marked": "^14.1.2", | ||
"papaparse": "^5.4.1" | ||
}, | ||
@@ -37,2 +38,4 @@ "devDependencies": { | ||
"@types/node": "^20.9.5", | ||
"@llamaindex/workflow": "^0.0.3", | ||
"@types/papaparse": "^5.3.15", | ||
"concurrently": "^8.2.2", | ||
@@ -39,0 +42,0 @@ "eslint": "^8.54.0", |
@@ -21,2 +21,3 @@ { | ||
"@radix-ui/react-tabs": "^1.1.0", | ||
"@llamaindex/chat-ui": "0.0.5", | ||
"ai": "3.3.42", | ||
@@ -33,2 +34,3 @@ "ajv": "^8.12.0", | ||
"next": "^14.2.4", | ||
"papaparse": "^5.4.1", | ||
"react": "^18.2.0", | ||
@@ -41,4 +43,3 @@ "react-dom": "^18.2.0", | ||
"vaul": "^0.9.1", | ||
"marked": "^14.1.2", | ||
"@llamaindex/chat-ui": "0.0.5" | ||
"marked": "^14.1.2" | ||
}, | ||
@@ -50,2 +51,4 @@ "devDependencies": { | ||
"@types/uuid": "^9.0.8", | ||
"@llamaindex/workflow": "^0.0.3", | ||
"@types/papaparse": "^5.3.15", | ||
"autoprefixer": "^10.4.16", | ||
@@ -52,0 +55,0 @@ "cross-env": "^7.0.3", |
{ | ||
"name": "create-llama", | ||
"version": "0.3.11", | ||
"version": "0.3.12", | ||
"description": "Create LlamaIndex-powered apps with one command", | ||
@@ -5,0 +5,0 @@ "keywords": [ |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
1589423
271
21034