Socket
Socket
Sign inDemoInstall

@langchain/openai

Package Overview
Dependencies
Maintainers
5
Versions
69
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@langchain/openai - npm Package Compare versions

Comparing version 0.0.14 to 0.0.15

21

dist/chat_models.d.ts

@@ -5,5 +5,7 @@ import { type ClientOptions, OpenAI as OpenAIClient } from "openai";

import { ChatGenerationChunk, type ChatResult } from "@langchain/core/outputs";
import type { StructuredToolInterface } from "@langchain/core/tools";
import { type StructuredToolInterface } from "@langchain/core/tools";
import { BaseChatModel, type BaseChatModelParams } from "@langchain/core/language_models/chat_models";
import type { BaseFunctionCallOptions } from "@langchain/core/language_models/base";
import type { BaseFunctionCallOptions, BaseLanguageModelInput } from "@langchain/core/language_models/base";
import { z } from "zod";
import { Runnable } from "@langchain/core/runnables";
import type { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput, OpenAICoreRequestOptions, LegacyOpenAIInput } from "./types.js";

@@ -144,2 +146,17 @@ export type { AzureOpenAIInput, OpenAICallOptions, OpenAIChatInput };

_combineLLMOutput(...llmOutputs: OpenAILLMOutput[]): OpenAILLMOutput;
withStructuredOutput<RunInput = BaseLanguageModelInput, RunOutput extends z.ZodObject<any, any, any, any> = z.ZodObject<any, any, any, any>>({ schema, name, method, includeRaw, }: {
schema: z.ZodEffects<RunOutput> | Record<string, any>;
name: string;
method?: "functionCalling" | "jsonMode";
includeRaw: true;
}): Runnable<RunInput, {
raw: BaseMessage;
parsed: RunOutput;
}>;
withStructuredOutput<RunInput = BaseLanguageModelInput, RunOutput extends z.ZodObject<any, any, any, any> = z.ZodObject<any, any, any, any>>({ schema, name, method, includeRaw, }: {
schema: z.ZodEffects<RunOutput> | Record<string, any>;
name: string;
method?: "functionCalling" | "jsonMode";
includeRaw?: false;
}): Runnable<RunInput, RunOutput>;
}

@@ -7,2 +7,6 @@ import { OpenAI as OpenAIClient } from "openai";

import { convertToOpenAITool } from "@langchain/core/utils/function_calling";
import { RunnablePassthrough, RunnableSequence, } from "@langchain/core/runnables";
import { JsonOutputParser } from "@langchain/core/output_parsers";
import { JsonOutputKeyToolsParser } from "@langchain/core/output_parsers/openai_tools";
import { zodToJsonSchema } from "zod-to-json-schema";
import { getEndpoint } from "./utils/azure.js";

@@ -714,2 +718,88 @@ import { wrapOpenAIClientError } from "./utils/openai.js";

}
/**
* Model wrapper that returns outputs formatted to match the given schema.
*
* @template {z.ZodObject<any, any, any, any>} RunOutput The output type for the Runnable, expected to be a Zod schema object for structured output validation.
*
* @param {z.ZodEffects<RunOutput>} schema The schema for the structured output. Either as a Zod schema or a valid JSON schema object.
* @param {string} name The name of the function to call.
* @param {"functionCalling" | "jsonMode"} method The method to use for getting the structured output. Defaults to "functionCalling".
* @param {boolean | undefined} includeRaw Whether to include the raw output in the result. Defaults to false.
* @returns {Runnable<RunInput, RunOutput> | Runnable<RunInput, { raw: BaseMessage; parsed: RunOutput }>} A new runnable that calls the LLM with structured output.
*/
withStructuredOutput({ schema, name, method = "functionCalling", includeRaw = false, }) {
let llm;
let outputParser;
if (method === "jsonMode") {
llm = this.bind({
response_format: { type: "json_object" },
});
outputParser = new JsonOutputParser();
}
else {
// Is function calling
if (isZodSchema(schema)) {
const asZodSchema = zodToJsonSchema(schema);
llm = this.bind({
tools: [
{
type: "function",
function: {
name,
description: asZodSchema.description,
parameters: asZodSchema,
},
},
],
tool_choice: "auto",
});
outputParser = new JsonOutputKeyToolsParser({
returnSingle: true,
keyName: name,
});
}
else {
llm = this.bind({
tools: [
{
type: "function",
function: {
name,
description: schema.description,
parameters: schema,
},
},
],
tool_choice: "auto",
});
outputParser = new JsonOutputKeyToolsParser({
returnSingle: true,
keyName: name,
});
}
}
if (!includeRaw) {
return llm.pipe(outputParser);
}
const parserAssign = RunnablePassthrough.assign({
// eslint-disable-next-line @typescript-eslint/no-explicit-any
parsed: (input, config) => outputParser.invoke(input.raw, config),
});
const parserNone = RunnablePassthrough.assign({
parsed: () => null,
});
const parsedWithFallback = parserAssign.withFallbacks({
fallbacks: [parserNone],
});
return RunnableSequence.from([
{
raw: llm,
},
parsedWithFallback,
]);
}
}
function isZodSchema(input) {
// Check for a characteristic method of Zod schemas
return typeof input?.parse === "function";
}

@@ -1,2 +0,2 @@

import { test, jest, expect } from "@jest/globals";
import { test, jest, expect, describe } from "@jest/globals";
import { ChatMessage, HumanMessage, SystemMessage, } from "@langchain/core/messages";

@@ -7,2 +7,4 @@ import { ChatPromptValue } from "@langchain/core/prompt_values";

import { InMemoryCache } from "@langchain/core/caches";
import { z } from "zod";
import { zodToJsonSchema } from "zod-to-json-schema";
import { ChatOpenAI } from "../chat_models.js";

@@ -632,1 +634,171 @@ test("Test ChatOpenAI", async () => {

});
describe("ChatOpenAI withStructuredOutput", () => {
test("withStructuredOutput zod schema function calling", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-4-turbo-preview",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput({
schema: calculatorSchema,
name: "calculator",
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
"You are VERY bad at math and must always use a calculator.",
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput zod schema JSON mode", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-4-turbo-preview",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput({
schema: calculatorSchema,
name: "calculator",
method: "jsonMode",
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
`You are VERY bad at math and must always use a calculator.
Respond with a JSON object containing three keys:
'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',
'number1': the first number to operate on,
'number2': the second number to operate on.
`,
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput JSON schema function calling", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-4-turbo-preview",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput({
schema: zodToJsonSchema(calculatorSchema),
name: "calculator",
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
`You are VERY bad at math and must always use a calculator.`,
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput JSON schema JSON mode", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-4-turbo-preview",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput({
schema: zodToJsonSchema(calculatorSchema),
name: "calculator",
method: "jsonMode",
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
`You are VERY bad at math and must always use a calculator.
Respond with a JSON object containing three keys:
'operation': the type of operation to execute, either 'add', 'subtract', 'multiply' or 'divide',
'number1': the first number to operate on,
'number2': the second number to operate on.
`,
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
expect("operation" in result).toBe(true);
expect("number1" in result).toBe(true);
expect("number2" in result).toBe(true);
});
test("withStructuredOutput includeRaw true", async () => {
const model = new ChatOpenAI({
temperature: 0,
modelName: "gpt-4-turbo-preview",
});
const calculatorSchema = z.object({
operation: z.enum(["add", "subtract", "multiply", "divide"]),
number1: z.number(),
number2: z.number(),
});
const modelWithStructuredOutput = model.withStructuredOutput({
schema: calculatorSchema,
name: "calculator",
includeRaw: true,
});
const prompt = ChatPromptTemplate.fromMessages([
"system",
"You are VERY bad at math and must always use a calculator.",
"human",
"Please help me!! What is 2 + 2?",
]);
const chain = prompt.pipe(modelWithStructuredOutput);
const result = await chain.invoke({});
console.log(result);
expect("parsed" in result).toBe(true);
// Need to make TS happy :)
if (!("parsed" in result)) {
throw new Error("parsed not in result");
}
const { parsed } = result;
expect("operation" in parsed).toBe(true);
expect("number1" in parsed).toBe(true);
expect("number2" in parsed).toBe(true);
expect("raw" in result).toBe(true);
// Need to make TS happy :)
if (!("raw" in result)) {
throw new Error("raw not in result");
}
const { raw } = result;
expect(raw.additional_kwargs.tool_calls?.length).toBeGreaterThan(0);
expect(raw.additional_kwargs.tool_calls?.[0].function.name).toBe("calculator");
expect("operation" in
JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "")).toBe(true);
expect("number1" in
JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "")).toBe(true);
expect("number2" in
JSON.parse(raw.additional_kwargs.tool_calls?.[0].function.arguments ?? "")).toBe(true);
});
});

7

package.json
{
"name": "@langchain/openai",
"version": "0.0.14",
"version": "0.0.15",
"description": "OpenAI integrations for LangChain.js",

@@ -15,2 +15,3 @@ "type": "module",

},
"homepage": "https://github.com/langchain-ai/langchainjs/tree/main/libs/langchain-openai/",
"scripts": {

@@ -27,3 +28,3 @@ "build": "yarn run build:deps && yarn clean && yarn build:esm && yarn build:cjs && yarn build:scripts",

"lint:fix": "yarn lint:eslint --fix && yarn lint:dpdm",
"clean": "rimraf .turbo/ dist/ && NODE_OPTIONS=--max-old-space-size=4096 yarn create-entrypoints -- --pre",
"clean": "rimraf .turbo/ dist/ && NODE_OPTIONS=--max-old-space-size=4096 yarn lc-build --config ./langchain.config.js --create-entrypoints --pre",
"prepack": "yarn build",

@@ -43,3 +44,3 @@ "test": "yarn run build:deps && NODE_OPTIONS=--experimental-vm-modules jest --testPathIgnorePatterns=\\.int\\.test.ts --testTimeout 30000 --maxWorkers=50%",

"dependencies": {
"@langchain/core": "~0.1.13",
"@langchain/core": "~0.1.39",
"js-tiktoken": "^1.0.7",

@@ -46,0 +47,0 @@ "openai": "^4.26.0",

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc