Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

@callstack/byorg-core

Package Overview
Dependencies
Maintainers
0
Versions
14
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@callstack/byorg-core - npm Package Compare versions

Comparing version 0.6.0 to 0.7.0

14

CHANGELOG.md
# @callstack/byorg-core
## 0.7.0
### Minor Changes
- 3770dfb: core: chatModel is customizable using RequestContext, removed default maxTokens and maxSteps values
- 4463e11: core: pass delta as 2nd param to onPartialUpdate
### Patch Changes
- 6152fb3: core: fix for a case when user message isn't the newest one
- d6b3e3a: core: reduce some of the startup logs severity to debug
- Updated dependencies [d6b3e3a]
- @callstack/byorg-utils@0.7.0
## 0.6.0

@@ -4,0 +18,0 @@

14

dist/esm/index.d.ts

@@ -9,3 +9,3 @@ import { LanguageModel } from 'ai';

export declare type ApplicationConfig = {
chatModel: ChatModel | ((context: RequestContext) => ChatModel);
chatModel: ChatModel;
systemPrompt?: ((context: RequestContext) => string | null) | string;

@@ -62,3 +62,3 @@ plugins?: ApplicationPlugin[];

export declare function createMockChatModel(config?: MockModelConfig): ChatModel;
export declare function createMockChatModel(config?: MockModelConfig): MockChatModel;

@@ -101,2 +101,6 @@ export declare type DocumentReference = {

declare type MockChatModel = ChatModel & {
calls: Parameters<ChatModel['generateResponse']>[];
};
export declare type MockModelConfig = {

@@ -106,2 +110,3 @@ responses?: string[];

seed?: number;
processRequest?: (context: RequestContext) => string;
};

@@ -167,3 +172,3 @@

extras?: MessageRequestExtras;
onPartialResponse?: (partialText: string) => void;
onPartialResponse?: (partialText: string, delta: string) => void;
};

@@ -189,4 +194,5 @@

resolvedEntities: EntityInfo;
chatModel: ChatModel;
systemPrompt: () => string | null;
onPartialResponse?: (text: string) => void;
onPartialResponse?: (text: string, delta: string) => void;
extras: MessageRequestExtras;

@@ -193,0 +199,0 @@ performance: PerformanceTimeline;

@@ -178,5 +178,5 @@ import * as __WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__ from "@callstack/byorg-utils";

function createApp(config) {
const { plugins = [], chatModel, errorHandler = defaultErrorHandler } = config;
const { plugins = [], errorHandler = defaultErrorHandler } = config;
plugins.forEach((plugin)=>{
__WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.info(`Plugin "${plugin.name}" registered`);
__WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.debug(`Plugin "${plugin.name}" registered`);
});

@@ -195,3 +195,3 @@ const middlewares = [

tools.forEach((tool)=>{
__WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.info(`Tool "${tool.name}" registered`);
__WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.debug(`Tool "${tool.name}" registered`);
});

@@ -202,7 +202,5 @@ const middlewareExecutor = new MiddlewareHandler();

processMessages: async (messages, options)=>{
messages = normalizeMessages(messages);
const lastMessage = messages.at(-1);
if (lastMessage?.role !== 'user') {
throw new Error('Last message in the "messages" list should be a "UserMessage"');
}
__WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.debug(`Processing message for user: ${lastMessage.senderId}`);
__WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.debug(`Processing message for user: ${lastMessage?.senderId}`);
const performance = new PerformanceTimeline();

@@ -214,3 +212,3 @@ performance.markStart(PerformanceMarks.processMessages);

const lastMessage = this.messages.at(-1);
// This will normally be true, unless the middleware
// This will normally be true, unless the middleware changes the messages
if (lastMessage?.role !== 'user') {

@@ -221,2 +219,3 @@ throw new Error('Last message in the "messages" list should be a "UserMessage"');

},
chatModel: config.chatModel,
systemPrompt: ()=>typeof config.systemPrompt === 'function' ? config.systemPrompt(context) : config.systemPrompt ?? null,

@@ -234,4 +233,3 @@ onPartialResponse: options?.onPartialResponse,

performance.markStart(PerformanceMarks.chatModel);
const resolvedChatModel = typeof chatModel === 'function' ? chatModel(context) : chatModel;
const response = await resolvedChatModel.generateResponse(context);
const response = await context.chatModel.generateResponse(context);
performance.markEnd(PerformanceMarks.chatModel);

@@ -269,2 +267,12 @@ // Opens the 'middleware:afterHandler' mark that will be closed after middlewareExecutor has run

}
// Removes trailling assistant messages in order to make UserMessage the last one
function normalizeMessages(messages) {
const lastUserMessageIndex = messages.findLastIndex((m)=>m.role === 'user');
if (lastUserMessageIndex === messages.length - 1) {
return messages;
}
const result = messages.slice(0, lastUserMessageIndex + 1);
__WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.warn(`Ignored ${messages.length - result.length} trailing assistant message(s).`);
return result;
}
function defaultErrorHandler(error, _context) {

@@ -284,4 +292,2 @@ __WEBPACK_EXTERNAL_MODULE__callstack_byorg_utils__.logger.error('Error while processing message:', error);

const DEFAULT_MAX_TOKENS = 1024;
const DEFAULT_MAX_STEPS = 5;
// Workaround for memory issue happening when sending image attachment. The attachments get inefficiently serialised causing a memory spike.

@@ -353,5 +359,5 @@ const VERCEL_AI_SHARED_OPTIONS = {

model: this._options.languageModel,
maxTokens: this._options.maxTokens ?? DEFAULT_MAX_TOKENS,
maxSteps: this._options.maxSteps ?? DEFAULT_MAX_STEPS,
messages: context.messages,
maxTokens: this._options.maxTokens,
maxSteps: this._options.maxSteps,
tools: context.tools

@@ -362,3 +368,3 @@ });

currentText += textDelta;
onPartialResponse(currentText);
onPartialResponse(currentText, textDelta);
}

@@ -384,5 +390,5 @@ const responseTime = performance.now() - startTime;

model: this._options.languageModel,
maxTokens: this._options.maxTokens ?? DEFAULT_MAX_TOKENS,
maxSteps: this._options.maxSteps ?? DEFAULT_MAX_STEPS,
messages: context.messages,
maxTokens: this._options.maxTokens,
maxSteps: this._options.maxSteps,
tools: context.tools

@@ -546,7 +552,13 @@ });

const delay = config?.delay ?? 100;
const processRequest = config?.processRequest;
const calls = [];
let lastRandom = config?.seed ?? Date.now();
return {
calls,
generateResponse: async (context)=>{
calls.push([
context
]);
lastRandom = random(lastRandom);
const response = responses[lastRandom % responses.length];
const response = processRequest ? processRequest(context) : responses[lastRandom % responses.length];
const tokens = response.split(/(\S+\s*)/).filter(Boolean);

@@ -558,3 +570,3 @@ if (context.onPartialResponse) {

accumulator += token;
context.onPartialResponse(accumulator);
context.onPartialResponse(accumulator, token);
}

@@ -561,0 +573,0 @@ }

{
"name": "@callstack/byorg-core",
"version": "0.6.0",
"version": "0.7.0",
"type": "module",

@@ -28,6 +28,6 @@ "license": "MIT",

"zod": "^3.23.8",
"@callstack/byorg-utils": "0.6.0"
"@callstack/byorg-utils": "0.7.0"
},
"peerDependencies": {
"ai": "^4.0.3"
"ai": "^4.0.18"
},

@@ -37,3 +37,3 @@ "devDependencies": {

"@rslib/core": "^0.1.3",
"ai": "^4.0.13",
"ai": "^4.0.18",
"vitest": "^2.1.8"

@@ -40,0 +40,0 @@ },

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc