Socket
Socket
Sign inDemoInstall

@langchain/core

Package Overview
Dependencies
Maintainers
8
Versions
152
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@langchain/core - npm Package Compare versions

Comparing version 0.2.2 to 0.2.3

3

dist/callbacks/manager.js

@@ -595,3 +595,4 @@ import { v4 as uuidv4 } from "uuid";

options?.verbose;
const tracingV2Enabled = getEnvironmentVariable("LANGCHAIN_TRACING_V2") === "true";
const tracingV2Enabled = getEnvironmentVariable("LANGCHAIN_TRACING_V2") === "true" ||
getEnvironmentVariable("LANGSMITH_TRACING") === "true";
const tracingEnabled = tracingV2Enabled ||

@@ -598,0 +599,0 @@ (getEnvironmentVariable("LANGCHAIN_TRACING") ?? false);

@@ -5,2 +5,5 @@ import { AIMessage, HumanMessage, coerceMessageLikeToMessage, } from "../messages/index.js";

import { CallbackManager, } from "../callbacks/manager.js";
import { isStreamEventsHandler } from "../tracers/event_stream.js";
import { isLogStreamHandler } from "../tracers/log_stream.js";
import { concat } from "../utils/stream.js";
/**

@@ -128,35 +131,72 @@ * Creates a transform stream for encoding chat message chunks.

const runManagers = await callbackManager_?.handleChatModelStart(this.toJSON(), baseMessages, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions.runName);
// generate results
const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
// handle results
const generations = [];
const llmOutputs = [];
await Promise.all(results.map(async (pResult, i) => {
if (pResult.status === "fulfilled") {
const result = pResult.value;
for (const generation of result.generations) {
generation.message.response_metadata = {
...generation.generationInfo,
...generation.message.response_metadata,
};
// Even if stream is not explicitly called, check if model is implicitly
// called from streamEvents() or streamLog() to get all streamed events.
// Bail out if _streamResponseChunks not overridden
const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
return isStreamEventsHandler(handler) || isLogStreamHandler(handler);
});
if (hasStreamingHandler &&
baseMessages.length === 1 &&
this._streamResponseChunks !==
BaseChatModel.prototype._streamResponseChunks) {
try {
const stream = await this._streamResponseChunks(baseMessages[0], parsedOptions, runManagers?.[0]);
let aggregated;
for await (const chunk of stream) {
if (aggregated === undefined) {
aggregated = chunk;
}
else {
aggregated = concat(aggregated, chunk);
}
}
if (result.generations.length === 1) {
result.generations[0].message.response_metadata = {
...result.llmOutput,
...result.generations[0].message.response_metadata,
};
if (aggregated === undefined) {
throw new Error("Received empty response from chat model call.");
}
generations[i] = result.generations;
llmOutputs[i] = result.llmOutput;
return runManagers?.[i]?.handleLLMEnd({
generations: [result.generations],
llmOutput: result.llmOutput,
generations.push([aggregated]);
await runManagers?.[0].handleLLMEnd({
generations,
llmOutput: {},
});
}
else {
// status === "rejected"
await runManagers?.[i]?.handleLLMError(pResult.reason);
return Promise.reject(pResult.reason);
catch (e) {
await runManagers?.[0].handleLLMError(e);
throw e;
}
}));
}
else {
// generate results
const results = await Promise.allSettled(baseMessages.map((messageList, i) => this._generate(messageList, { ...parsedOptions, promptIndex: i }, runManagers?.[i])));
// handle results
await Promise.all(results.map(async (pResult, i) => {
if (pResult.status === "fulfilled") {
const result = pResult.value;
for (const generation of result.generations) {
generation.message.response_metadata = {
...generation.generationInfo,
...generation.message.response_metadata,
};
}
if (result.generations.length === 1) {
result.generations[0].message.response_metadata = {
...result.llmOutput,
...result.generations[0].message.response_metadata,
};
}
generations[i] = result.generations;
llmOutputs[i] = result.llmOutput;
return runManagers?.[i]?.handleLLMEnd({
generations: [result.generations],
llmOutput: result.llmOutput,
});
}
else {
// status === "rejected"
await runManagers?.[i]?.handleLLMError(pResult.reason);
return Promise.reject(pResult.reason);
}
}));
}
// create combined output

@@ -163,0 +203,0 @@ const output = {

@@ -5,2 +5,5 @@ import { AIMessage, getBufferString, } from "../messages/index.js";

import { BaseLanguageModel, } from "./base.js";
import { isStreamEventsHandler } from "../tracers/event_stream.js";
import { isLogStreamHandler } from "../tracers/log_stream.js";
import { concat } from "../utils/stream.js";
/**

@@ -134,12 +137,45 @@ * LLM Wrapper. Takes in a prompt (or prompts) and returns a string.

const runManagers = await callbackManager_?.handleLLMStart(this.toJSON(), prompts, handledOptions.runId, undefined, extra, undefined, undefined, handledOptions?.runName);
// Even if stream is not explicitly called, check if model is implicitly
// called from streamEvents() or streamLog() to get all streamed events.
// Bail out if _streamResponseChunks not overridden
const hasStreamingHandler = !!runManagers?.[0].handlers.find((handler) => {
return isStreamEventsHandler(handler) || isLogStreamHandler(handler);
});
let output;
try {
output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
if (hasStreamingHandler &&
prompts.length === 1 &&
this._streamResponseChunks !== BaseLLM.prototype._streamResponseChunks) {
try {
const stream = await this._streamResponseChunks(prompts[0], parsedOptions, runManagers?.[0]);
let aggregated;
for await (const chunk of stream) {
if (aggregated === undefined) {
aggregated = chunk;
}
else {
aggregated = concat(aggregated, chunk);
}
}
if (aggregated === undefined) {
throw new Error("Received empty response from chat model call.");
}
output = { generations: [[aggregated]], llmOutput: {} };
await runManagers?.[0].handleLLMEnd(output);
}
catch (e) {
await runManagers?.[0].handleLLMError(e);
throw e;
}
}
catch (err) {
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
throw err;
else {
try {
output = await this._generate(prompts, parsedOptions, runManagers?.[0]);
}
catch (err) {
await Promise.all((runManagers ?? []).map((runManager) => runManager?.handleLLMError(err)));
throw err;
}
const flattenedOutputs = this._flattenLLMResult(output);
await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
}
const flattenedOutputs = this._flattenLLMResult(output);
await Promise.all((runManagers ?? []).map((runManager, i) => runManager?.handleLLMEnd(flattenedOutputs[i])));
const runIds = runManagers?.map((manager) => manager.runId) || undefined;

@@ -146,0 +182,0 @@ // This defines RUN_KEY as a non-enumerable property on the output object

@@ -8,6 +8,8 @@ /* eslint-disable no-promise-executor-return */

import { ChatPromptTemplate } from "../../prompts/chat.js";
import { FakeListChatModel, FakeRetriever, FakeStreamingLLM, } from "../../utils/testing/index.js";
import { AIMessageChunk, HumanMessage, SystemMessage, } from "../../messages/index.js";
import { FakeChatModel, FakeLLM, FakeListChatModel, FakeRetriever, FakeStreamingLLM, } from "../../utils/testing/index.js";
import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, } from "../../messages/index.js";
import { DynamicStructuredTool, DynamicTool } from "../../tools.js";
import { Document } from "../../documents/document.js";
import { PromptTemplate } from "../../prompts/prompt.js";
import { GenerationChunk } from "../../outputs.js";
function reverse(s) {

@@ -532,11 +534,25 @@ // Reverse a string.

event: "on_llm_start",
data: {
input: "hello",
},
name: "my_model",
tags: ["my_model"],
run_id: expect.any(String),
tags: expect.arrayContaining(["my_model"]),
metadata: {
a: "b",
},
},
{
event: "on_llm_stream",
data: {
input: "hello",
chunk: {
text: "h",
},
},
run_id: expect.any(String),
name: "my_model",
tags: ["my_model"],
metadata: {
a: "b",
},
},

@@ -546,8 +562,24 @@ {

run_id: expect.any(String),
tags: expect.arrayContaining(["my_model"]),
name: "my_model",
tags: ["my_model"],
metadata: {
a: "b",
},
data: {
chunk: "h",
},
},
{
event: "on_llm_stream",
data: {
chunk: {
text: "e",
},
},
run_id: expect.any(String),
name: "my_model",
data: { chunk: "h" },
tags: ["my_model"],
metadata: {
a: "b",
},
},

@@ -557,8 +589,24 @@ {

run_id: expect.any(String),
tags: expect.arrayContaining(["my_model"]),
name: "my_model",
tags: ["my_model"],
metadata: {
a: "b",
},
data: {
chunk: "e",
},
},
{
event: "on_llm_stream",
data: {
chunk: {
text: "y",
},
},
run_id: expect.any(String),
name: "my_model",
data: { chunk: "e" },
tags: ["my_model"],
metadata: {
a: "b",
},
},

@@ -568,24 +616,30 @@ {

run_id: expect.any(String),
tags: expect.arrayContaining(["my_model"]),
name: "my_model",
tags: ["my_model"],
metadata: {
a: "b",
},
name: "my_model",
data: { chunk: "y" },
data: {
chunk: "y",
},
},
{
event: "on_llm_stream",
data: {
chunk: {
text: "!",
},
},
run_id: expect.any(String),
tags: expect.arrayContaining(["my_model"]),
name: "my_model",
tags: ["my_model"],
metadata: {
a: "b",
},
name: "my_model",
data: { chunk: "!" },
},
{
event: "on_llm_end",
event: "on_llm_stream",
run_id: expect.any(String),
name: "my_model",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_model"]),
tags: ["my_model"],
metadata: {

@@ -595,2 +649,8 @@ a: "b",

data: {
chunk: "!",
},
},
{
event: "on_llm_end",
data: {
output: {

@@ -600,4 +660,4 @@ generations: [

{
text: "hey!",
generationInfo: {},
text: "hey!",
},

@@ -609,2 +669,8 @@ ],

},
run_id: expect.any(String),
name: "my_model",
tags: ["my_model"],
metadata: {
a: "b",
},
},

@@ -821,2 +887,812 @@ ]);

});
test("Chat model that supports streaming, but is invoked, should still emit on_stream events", async () => {
const template = ChatPromptTemplate.fromMessages([
["system", "You are Godzilla"],
["human", "{question}"],
]).withConfig({
runName: "my_template",
tags: ["my_template"],
});
const model = new FakeListChatModel({
responses: ["ROAR"],
}).withConfig({
metadata: { a: "b" },
tags: ["my_model"],
runName: "my_model",
});
const chain = template
.pipe(async (val, config) => {
const result = await model.invoke(val, config);
return result;
})
.withConfig({
metadata: { foo: "bar" },
tags: ["my_chain"],
runName: "my_chain",
});
const events = [];
const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
for await (const event of eventStream) {
events.push(event);
}
expect(events).toEqual([
{
run_id: expect.any(String),
event: "on_chain_start",
name: "my_chain",
tags: ["my_chain"],
metadata: {
foo: "bar",
},
data: {
input: {
question: "hello",
},
},
},
{
data: { input: { question: "hello" } },
event: "on_prompt_start",
metadata: { foo: "bar" },
name: "my_template",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]),
},
{
event: "on_prompt_end",
name: "my_template",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]),
metadata: {
foo: "bar",
},
data: {
input: {
question: "hello",
},
output: await template.invoke({ question: "hello" }),
},
},
{
event: "on_chain_start",
data: {},
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_chat_model_start",
name: "my_model",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
metadata: {
foo: "bar",
a: "b",
ls_model_type: "chat",
ls_stop: undefined,
},
data: {
input: {
messages: [
[new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
],
},
},
},
{
event: "on_chat_model_stream",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
metadata: {
a: "b",
foo: "bar",
ls_model_type: "chat",
ls_stop: undefined,
},
name: "my_model",
data: { chunk: new AIMessageChunk("R") },
},
{
event: "on_chat_model_stream",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
metadata: {
a: "b",
foo: "bar",
ls_model_type: "chat",
ls_stop: undefined,
},
name: "my_model",
data: { chunk: new AIMessageChunk("O") },
},
{
event: "on_chat_model_stream",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
metadata: {
a: "b",
foo: "bar",
ls_model_type: "chat",
ls_stop: undefined,
},
name: "my_model",
data: { chunk: new AIMessageChunk("A") },
},
{
event: "on_chat_model_stream",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
metadata: {
a: "b",
foo: "bar",
ls_model_type: "chat",
ls_stop: undefined,
},
name: "my_model",
data: { chunk: new AIMessageChunk("R") },
},
{
event: "on_chat_model_end",
name: "my_model",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
metadata: {
foo: "bar",
a: "b",
ls_model_type: "chat",
ls_stop: undefined,
},
data: {
input: {
messages: [
[new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
],
},
output: new AIMessageChunk("ROAR"),
},
},
{
event: "on_chain_stream",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
metadata: {
foo: "bar",
},
name: "RunnableLambda",
data: { chunk: new AIMessageChunk("ROAR") },
},
{
event: "on_chain_stream",
run_id: expect.any(String),
tags: ["my_chain"],
metadata: {
foo: "bar",
},
name: "my_chain",
data: { chunk: new AIMessageChunk("ROAR") },
},
{
event: "on_chain_end",
name: "RunnableLambda",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
metadata: {
foo: "bar",
},
data: {
input: await template.invoke({ question: "hello" }),
output: new AIMessageChunk("ROAR"),
},
},
{
event: "on_chain_end",
name: "my_chain",
run_id: expect.any(String),
tags: ["my_chain"],
metadata: {
foo: "bar",
},
data: {
output: new AIMessageChunk("ROAR"),
},
},
]);
});
test("Chat model that doesn't support streaming, but is invoked, should emit one on_stream event", async () => {
const template = ChatPromptTemplate.fromMessages([
["system", "You are Godzilla"],
["human", "{question}"],
]).withConfig({
runName: "my_template",
tags: ["my_template"],
});
const model = new FakeChatModel({}).withConfig({
metadata: { a: "b" },
tags: ["my_model"],
runName: "my_model",
});
const chain = template
.pipe(async (val, config) => {
const result = await model.invoke(val, config);
return result;
})
.withConfig({
metadata: { foo: "bar" },
tags: ["my_chain"],
runName: "my_chain",
});
const events = [];
const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
for await (const event of eventStream) {
events.push(event);
}
expect(events).toEqual([
{
run_id: expect.any(String),
event: "on_chain_start",
name: "my_chain",
tags: ["my_chain"],
metadata: {
foo: "bar",
},
data: {
input: {
question: "hello",
},
},
},
{
data: { input: { question: "hello" } },
event: "on_prompt_start",
metadata: { foo: "bar" },
name: "my_template",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_chain", "seq:step:1", "my_template"]),
},
{
event: "on_prompt_end",
name: "my_template",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:1", "my_template", "my_chain"]),
metadata: {
foo: "bar",
},
data: {
input: {
question: "hello",
},
output: await template.invoke({ question: "hello" }),
},
},
{
event: "on_chain_start",
data: {},
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_chat_model_start",
name: "my_model",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
metadata: {
foo: "bar",
a: "b",
ls_model_type: "chat",
ls_stop: undefined,
},
data: {
input: {
messages: [
[new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
],
},
},
},
{
event: "on_chat_model_stream",
run_id: expect.any(String),
tags: expect.arrayContaining(["my_chain", "my_model", "seq:step:2"]),
metadata: {
a: "b",
foo: "bar",
ls_model_type: "chat",
ls_stop: undefined,
},
name: "my_model",
data: { chunk: new AIMessageChunk("You are Godzilla\nhello") },
},
{
event: "on_chat_model_end",
name: "my_model",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_model", "my_chain"]),
metadata: {
foo: "bar",
a: "b",
ls_model_type: "chat",
ls_stop: undefined,
},
data: {
input: {
messages: [
[new SystemMessage("You are Godzilla"), new HumanMessage("hello")],
],
},
output: new AIMessage("You are Godzilla\nhello"),
},
},
{
event: "on_chain_stream",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
metadata: {
foo: "bar",
},
name: "RunnableLambda",
data: { chunk: new AIMessage("You are Godzilla\nhello") },
},
{
event: "on_chain_stream",
run_id: expect.any(String),
tags: ["my_chain"],
metadata: {
foo: "bar",
},
name: "my_chain",
data: { chunk: new AIMessage("You are Godzilla\nhello") },
},
{
event: "on_chain_end",
name: "RunnableLambda",
run_id: expect.any(String),
tags: expect.arrayContaining(["seq:step:2", "my_chain"]),
metadata: {
foo: "bar",
},
data: {
input: await template.invoke({ question: "hello" }),
output: new AIMessage("You are Godzilla\nhello"),
},
},
{
event: "on_chain_end",
name: "my_chain",
run_id: expect.any(String),
tags: ["my_chain"],
metadata: {
foo: "bar",
},
data: {
output: new AIMessage("You are Godzilla\nhello"),
},
},
]);
});
test("LLM that supports streaming, but is invoked, should still emit on_stream events", async () => {
const template = PromptTemplate.fromTemplate(`You are Godzilla\n{question}`).withConfig({
runName: "my_template",
tags: ["my_template"],
});
const model = new FakeStreamingLLM({
responses: ["ROAR"],
}).withConfig({
metadata: { a: "b" },
tags: ["my_model"],
runName: "my_model",
});
const chain = template
.pipe(async (val, config) => {
const result = await model.invoke(val, config);
return result;
})
.withConfig({
metadata: { foo: "bar" },
tags: ["my_chain"],
runName: "my_chain",
});
const events = [];
const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
for await (const event of eventStream) {
events.push(event);
}
expect(events).toEqual([
{
event: "on_chain_start",
data: {
input: {
question: "hello",
},
},
name: "my_chain",
tags: ["my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_prompt_start",
data: {
input: {
question: "hello",
},
},
name: "my_template",
tags: ["seq:step:1", "my_template", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_prompt_end",
data: {
output: await template.invoke({ question: "hello" }),
input: {
question: "hello",
},
},
run_id: expect.any(String),
name: "my_template",
tags: ["seq:step:1", "my_template", "my_chain"],
metadata: {
foo: "bar",
},
},
{
event: "on_chain_start",
data: {},
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_llm_start",
data: {
input: {
prompts: ["You are Godzilla\nhello"],
},
},
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_llm_stream",
data: {
chunk: new GenerationChunk({
text: "R",
}),
},
run_id: expect.any(String),
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_llm_stream",
data: {
chunk: new GenerationChunk({
text: "O",
}),
},
run_id: expect.any(String),
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_llm_stream",
data: {
chunk: new GenerationChunk({
text: "A",
}),
},
run_id: expect.any(String),
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_llm_stream",
data: {
chunk: new GenerationChunk({
text: "R",
}),
},
run_id: expect.any(String),
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_llm_end",
data: {
output: {
generations: [
[
{
text: "ROAR",
generationInfo: {},
},
],
],
llmOutput: {},
},
input: {
prompts: ["You are Godzilla\nhello"],
},
},
run_id: expect.any(String),
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_chain_stream",
run_id: expect.any(String),
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
metadata: {
foo: "bar",
},
data: {
chunk: "ROAR",
},
},
{
event: "on_chain_stream",
run_id: expect.any(String),
name: "my_chain",
tags: ["my_chain"],
metadata: {
foo: "bar",
},
data: {
chunk: "ROAR",
},
},
{
event: "on_chain_end",
data: {
output: "ROAR",
input: await template.invoke({ question: "hello" }),
},
run_id: expect.any(String),
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
metadata: {
foo: "bar",
},
},
{
event: "on_chain_end",
data: {
output: "ROAR",
},
run_id: expect.any(String),
name: "my_chain",
tags: ["my_chain"],
metadata: {
foo: "bar",
},
},
]);
});
test("LLM that doesn't support streaming, but is invoked, should emit one on_stream event", async () => {
const template = PromptTemplate.fromTemplate(`You are Godzilla\n{question}`).withConfig({
runName: "my_template",
tags: ["my_template"],
});
const model = new FakeLLM({}).withConfig({
metadata: { a: "b" },
tags: ["my_model"],
runName: "my_model",
});
const chain = template
.pipe(async (val, config) => {
const result = await model.invoke(val, config);
return result;
})
.withConfig({
metadata: { foo: "bar" },
tags: ["my_chain"],
runName: "my_chain",
});
const events = [];
const eventStream = await chain.streamEvents({ question: "hello" }, { version: "v2" });
for await (const event of eventStream) {
events.push(event);
}
expect(events).toEqual([
{
event: "on_chain_start",
data: {
input: {
question: "hello",
},
},
name: "my_chain",
tags: ["my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_prompt_start",
data: {
input: {
question: "hello",
},
},
name: "my_template",
tags: ["seq:step:1", "my_template", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_prompt_end",
data: {
output: await template.invoke({ question: "hello" }),
input: {
question: "hello",
},
},
run_id: expect.any(String),
name: "my_template",
tags: ["seq:step:1", "my_template", "my_chain"],
metadata: {
foo: "bar",
},
},
{
event: "on_chain_start",
data: {},
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
},
},
{
event: "on_llm_start",
data: {
input: {
prompts: ["You are Godzilla\nhello"],
},
},
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
run_id: expect.any(String),
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_llm_stream",
data: {
chunk: new GenerationChunk({
text: "You are Godzilla\nhello",
}),
},
run_id: expect.any(String),
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_llm_end",
data: {
output: {
generations: [
[
{
text: "You are Godzilla\nhello",
generationInfo: undefined,
},
],
],
llmOutput: {},
},
input: {
prompts: ["You are Godzilla\nhello"],
},
},
run_id: expect.any(String),
name: "my_model",
tags: ["seq:step:2", "my_model", "my_chain"],
metadata: {
foo: "bar",
a: "b",
},
},
{
event: "on_chain_stream",
run_id: expect.any(String),
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
metadata: {
foo: "bar",
},
data: {
chunk: "You are Godzilla\nhello",
},
},
{
event: "on_chain_stream",
run_id: expect.any(String),
name: "my_chain",
tags: ["my_chain"],
metadata: {
foo: "bar",
},
data: {
chunk: "You are Godzilla\nhello",
},
},
{
event: "on_chain_end",
data: {
output: "You are Godzilla\nhello",
input: await template.invoke({ question: "hello" }),
},
run_id: expect.any(String),
name: "RunnableLambda",
tags: ["seq:step:2", "my_chain"],
metadata: {
foo: "bar",
},
},
{
event: "on_chain_end",
data: {
output: "You are Godzilla\nhello",
},
run_id: expect.any(String),
name: "my_chain",
tags: ["my_chain"],
metadata: {
foo: "bar",
},
},
]);
});
test("Runnable streamEvents method with simple tools", async () => {

@@ -823,0 +1699,0 @@ const tool = new DynamicTool({

@@ -7,6 +7,3 @@ /* eslint-disable no-promise-executor-return */

import { RunnableSequence, RunnableMap } from "../base.js";
import { FakeLLM, FakeStreamingLLM, FakeChatModel, FakeRetriever, } from "../../utils/testing/index.js";
import { SystemMessage, HumanMessage } from "../../messages/index.js";
import { CommaSeparatedListOutputParser } from "../../output_parsers/list.js";
import { ChatPromptValue } from "../../prompt_values.js";
import { FakeLLM, FakeChatModel, FakeRetriever, } from "../../utils/testing/index.js";
test("Runnable streamLog method", async () => {

@@ -76,209 +73,1 @@ const promptTemplate = PromptTemplate.fromTemplate("{input}");

});
test("Test stream log aggregation", async () => {
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a nice assistant"],
["human", "{question}"],
]);
const llm = new FakeStreamingLLM({
responses: ["tomato, lettuce, onion"],
});
const parser = new CommaSeparatedListOutputParser({});
const chain = prompt.pipe(llm).pipe(parser);
const logStream = await chain.streamLog({
question: "what is up?",
});
const chunks = [];
for await (const chunk of logStream) {
chunks.push(chunk);
}
expect(chunks).toMatchObject([
{
ops: [
{
op: "replace",
path: "",
value: {
id: expect.any(String),
streamed_output: [],
logs: {},
},
},
],
},
{
ops: [
{
op: "add",
path: "/logs/ChatPromptTemplate",
value: {
id: expect.any(String),
name: "ChatPromptTemplate",
type: "prompt",
tags: ["seq:step:1"],
metadata: {},
start_time: expect.any(String),
streamed_output: [],
streamed_output_str: [],
},
},
],
},
{
ops: [
{
op: "add",
path: "/logs/ChatPromptTemplate/final_output",
value: new ChatPromptValue([
new SystemMessage("You are a nice assistant"),
new HumanMessage("what is up?"),
]),
},
{
op: "add",
path: "/logs/ChatPromptTemplate/end_time",
value: expect.any(String),
},
],
},
{
ops: [
{
op: "add",
path: "/logs/FakeStreamingLLM",
value: {
id: expect.any(String),
name: "FakeStreamingLLM",
type: "llm",
tags: ["seq:step:2"],
metadata: {},
start_time: expect.any(String),
streamed_output: [],
streamed_output_str: [],
},
},
],
},
{
ops: [
{
op: "add",
path: "/logs/CommaSeparatedListOutputParser",
value: {
id: expect.any(String),
name: "CommaSeparatedListOutputParser",
type: "parser",
tags: ["seq:step:3"],
metadata: {},
start_time: expect.any(String),
streamed_output: [],
streamed_output_str: [],
},
},
],
},
{
ops: [
{
op: "add",
path: "/logs/CommaSeparatedListOutputParser/streamed_output/-",
value: ["tomato"],
},
],
},
{
ops: [
{
op: "add",
path: "/streamed_output/-",
value: ["tomato"],
},
],
},
{
ops: [
{
op: "add",
path: "/logs/CommaSeparatedListOutputParser/streamed_output/-",
value: ["lettuce"],
},
],
},
{
ops: [
{
op: "add",
path: "/streamed_output/-",
value: ["lettuce"],
},
],
},
{
ops: [
{
op: "add",
path: "/logs/FakeStreamingLLM/final_output",
value: {
generations: [
[
{
text: "tomato, lettuce, onion",
generationInfo: {},
},
],
],
},
},
{
op: "add",
path: "/logs/FakeStreamingLLM/end_time",
value: expect.any(String),
},
],
},
{
ops: [
{
op: "add",
path: "/logs/CommaSeparatedListOutputParser/streamed_output/-",
value: ["onion"],
},
],
},
{
ops: [
{
op: "add",
path: "/streamed_output/-",
value: ["onion"],
},
],
},
{
ops: [
{
op: "add",
path: "/logs/CommaSeparatedListOutputParser/final_output",
value: {
output: ["tomato", "lettuce", "onion"],
},
},
{
op: "add",
path: "/logs/CommaSeparatedListOutputParser/end_time",
value: expect.any(String),
},
],
},
{
ops: [
{
op: "replace",
path: "/final_output",
value: {
output: ["tomato", "lettuce", "onion"],
},
},
],
},
]);
});

@@ -10,4 +10,2 @@ export interface AsyncLocalStorageInterface {

declare class AsyncLocalStorageProvider {
private asyncLocalStorage;
private hasBeenInitialized;
getInstance(): AsyncLocalStorageInterface;

@@ -14,0 +12,0 @@ initializeGlobalInstance(instance: AsyncLocalStorageInterface): void;

@@ -10,24 +10,11 @@ /* eslint-disable @typescript-eslint/no-explicit-any */

}
const mockAsyncLocalStorage = new MockAsyncLocalStorage();
class AsyncLocalStorageProvider {
constructor() {
Object.defineProperty(this, "asyncLocalStorage", {
enumerable: true,
configurable: true,
writable: true,
value: new MockAsyncLocalStorage()
});
Object.defineProperty(this, "hasBeenInitialized", {
enumerable: true,
configurable: true,
writable: true,
value: false
});
}
getInstance() {
return this.asyncLocalStorage;
return (globalThis.__lc_tracing_async_local_storage ??
mockAsyncLocalStorage);
}
initializeGlobalInstance(instance) {
if (!this.hasBeenInitialized) {
this.hasBeenInitialized = true;
this.asyncLocalStorage = instance;
if (globalThis.__lc_tracing_async_local_storage === undefined) {
globalThis.__lc_tracing_async_local_storage = instance;
}

@@ -34,0 +21,0 @@ }

import { BaseTracer, type Run } from "./base.js";
import { BaseCallbackHandlerInput } from "../callbacks/base.js";
import { BaseCallbackHandler, BaseCallbackHandlerInput } from "../callbacks/base.js";
import { IterableReadableStream } from "../utils/stream.js";

@@ -97,2 +97,3 @@ /**

}
export declare const isStreamEventsHandler: (handler: BaseCallbackHandler) => handler is EventStreamCallbackHandler;
/**

@@ -99,0 +100,0 @@ * Class that extends the `BaseTracer` class from the

@@ -17,2 +17,3 @@ import { BaseTracer } from "./base.js";

}
export const isStreamEventsHandler = (handler) => handler.name === "event_stream_tracer";
/**

@@ -19,0 +20,0 @@ * Class that extends the `BaseTracer` class from the

import { type Operation as JSONPatchOperation } from "../utils/fast-json-patch/index.js";
import { BaseTracer, type Run } from "./base.js";
import { BaseCallbackHandlerInput, HandleLLMNewTokenCallbackFields } from "../callbacks/base.js";
import { BaseCallbackHandler, BaseCallbackHandlerInput, HandleLLMNewTokenCallbackFields } from "../callbacks/base.js";
import { IterableReadableStream } from "../utils/stream.js";

@@ -86,2 +86,3 @@ import type { StreamEvent, StreamEventData } from "./event_stream.js";

}
export declare const isLogStreamHandler: (handler: BaseCallbackHandler) => handler is LogStreamCallbackHandler;
/**

@@ -88,0 +89,0 @@ * Class that extends the `BaseTracer` class from the

@@ -57,2 +57,3 @@ import { applyPatch, } from "../utils/fast-json-patch/index.js";

}
export const isLogStreamHandler = (handler) => handler.name === "log_stream_tracer";
/**

@@ -59,0 +60,0 @@ * Extract standardized inputs from a run.

@@ -54,3 +54,3 @@ import { z } from "zod";

_call(prompt: string): Promise<string>;
_streamResponseChunks(input: string): AsyncGenerator<GenerationChunk, void, unknown>;
_streamResponseChunks(input: string, _options?: this["ParsedCallOptions"], runManager?: CallbackManagerForLLMRun): AsyncGenerator<GenerationChunk, void, unknown>;
}

@@ -57,0 +57,0 @@ export declare class FakeChatModel extends BaseChatModel {

@@ -127,3 +127,3 @@ /* eslint-disable no-promise-executor-return */

}
async *_streamResponseChunks(input) {
async *_streamResponseChunks(input, _options, runManager) {
if (this.thrownErrorString) {

@@ -137,2 +137,3 @@ throw new Error(this.thrownErrorString);

yield { text: c, generationInfo: {} };
await runManager?.handleLLMNewToken(c);
}

@@ -139,0 +140,0 @@ }

{
"name": "@langchain/core",
"version": "0.2.2",
"version": "0.2.3",
"description": "Core LangChain.js abstractions and schemas",

@@ -5,0 +5,0 @@ "type": "module",

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc