New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

aiconfig

Package Overview
Dependencies
Maintainers
1
Versions
25
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

aiconfig - npm Package Compare versions

Comparing version 1.0.1 to 1.0.2

dist/__tests__/testProgramaticallyCreateConfig.d.ts

20

dist/__tests__/config.test.js

@@ -119,13 +119,7 @@ "use strict";

const prompt1 = prompts[0];
expect(prompt1.input).toEqual({
content: "I need to create a JSON representation of a list of products for our e-commerce website. Please provide the JSON structure with placeholders for product details. Product names: MacBook, Apple Watch",
role: "user",
});
expect(prompt1.input).toEqual("I need to create a JSON representation of a list of products for our e-commerce website. Please provide the JSON structure with placeholders for product details. Product names: MacBook, Apple Watch");
expect(prompt1.metadata.model).toBe("gpt-3.5-turbo");
expect(prompt1.metadata.parameters).toEqual({ products: "Thunderbolt" });
const prompt2 = prompts[1];
expect(prompt2.input).toEqual({
content: "Now, fill in the placeholders with the details of three products, including their names, prices, and descriptions.",
role: "user",
});
expect(prompt2.input).toEqual("Now, fill in the placeholders with the details of three products, including their names, prices, and descriptions.");
expect(prompt2.metadata.model).toBe("gpt-3.5-turbo");

@@ -160,6 +154,3 @@ expect(prompt2.metadata.parameters).toEqual({ products: "Thunderbolt" });

const prompt1 = prompts[0];
expect(prompt1.input).toEqual({
content: "I need to create a JSON representation of a list of products for our e-commerce website. Please provide the JSON structure with placeholders for product details. Product names: MacBook, Apple Watch",
role: "user",
});
expect(prompt1.input).toEqual("I need to create a JSON representation of a list of products for our e-commerce website. Please provide the JSON structure with placeholders for product details. Product names: MacBook, Apple Watch");
// Prompt Model metadata should override just the differences from the global model metadata

@@ -172,6 +163,3 @@ expect(prompt1.metadata.model).toEqual({

const prompt2 = prompts[1];
expect(prompt2.input).toEqual({
content: "Now, fill in the placeholders with the details of three products, including their names, prices, and descriptions.",
role: "user",
});
expect(prompt2.input).toEqual("Now, fill in the placeholders with the details of three products, including their names, prices, and descriptions.");
expect(prompt1.metadata.model).toEqual({

@@ -178,0 +166,0 @@ name: "gpt-3.5-turbo",

16

dist/demo/demo.js

@@ -120,7 +120,3 @@ #!/usr/bin/env -S npm run tsn -T

// }
// Streaming:
let result2 = yield aiConfig.run("demoPrompt",
/*params*/ {
name: "Streaming Demo",
}, {
const inferenceOptions = {
callbacks: {

@@ -131,3 +127,11 @@ streamCallback: (data, _accumulatedData, _index) => {

},
});
};
const streamCallback = (data, _accumulatedData, _index) => {
process.stdout.write((data === null || data === void 0 ? void 0 : data.content) || "\n");
};
// Streaming:
let result2 = yield aiConfig.run("demoPrompt",
/*params*/ {
name: "Streaming Demo",
}, inferenceOptions);
// This is a Streaming Demo test.

@@ -134,0 +138,0 @@ console.log(aiConfig.getOutputText("demoPrompt"));

@@ -0,1 +1,2 @@

export * from "./common";
export * from "./lib/config";

@@ -6,1 +7,2 @@ export * from "./types";

export * from "./lib/parameterizedModelParser";
export * from "./lib/parsers/openai";

@@ -17,2 +17,3 @@ "use strict";

Object.defineProperty(exports, "__esModule", { value: true });
__exportStar(require("./common"), exports);
__exportStar(require("./lib/config"), exports);

@@ -23,1 +24,2 @@ __exportStar(require("./types"), exports);

__exportStar(require("./lib/parameterizedModelParser"), exports);
__exportStar(require("./lib/parsers/openai"), exports);

@@ -10,4 +10,4 @@ "use strict";

},
transformIgnorePatterns: ["<rootDir>/node_modules/"],
transformIgnorePatterns: ["<rootDir>/node_modules/", "dist"],
};
exports.default = config;
import { JSONObject, JSONValue } from "../common";
import { AIConfig, ModelMetadata, Output, Prompt, SchemaVersion } from "../types";
import { AIConfig, InferenceSettings, ModelMetadata, Output, Prompt, SchemaVersion } from "../types";
import { InferenceOptions, ModelParser } from "./modelParser";

@@ -28,2 +28,3 @@ export type PromptWithOutputs = Prompt & {

prompts: PromptWithOutputs[];
filePath?: string;
constructor(name: string, description?: string, schemaVersion?: SchemaVersion, metadata?: AIConfig["metadata"], prompts?: PromptWithOutputs[]);

@@ -62,3 +63,3 @@ /**

*/
save(filePath: string, saveOptions?: SaveOptions): void;
save(filePath?: string, saveOptions?: SaveOptions): void;
/**

@@ -79,3 +80,3 @@ * Saves this AIConfig to an AI Workbook.

*/
static registerModelParser(modelParser: ModelParser, ids?: string[]): void;
static registerModelParser(modelParser: ModelParser<any, any>, ids?: string[]): void;
/**

@@ -238,2 +239,14 @@ * Retrieves a model parser from the registry.

getOutputText(prompt: string | Prompt, output?: Output): string;
/**
* Returns the global settings for a given model.
*/
getGlobalSettings(modelName: string): InferenceSettings | undefined;
/**
* Generates a ModelMetadata object from the inferene settings by extracting the settings that override the global settings.
*
* @param inferenceSettings - The inference settings to be used for the model.
* @param modelName - The unique identifier for the model.
* @returns A ModelMetadata object that includes the model's name and optional settings.
*/
getModelMetadata(inferenceSettings: InferenceSettings, modelName: string): ModelMetadata;
}

@@ -46,2 +46,3 @@ "use strict";

const openai_1 = require("./parsers/openai");
const utils_2 = require("./utils");
modelParserRegistry_1.ModelParserRegistry.registerModelParser(new openai_1.OpenAIModelParser(), [

@@ -93,3 +94,5 @@ "babbage-002",

const aiConfigObj = JSON.parse(aiConfigString);
return this.loadJSON(aiConfigObj);
const config = this.loadJSON(aiConfigObj);
config.filePath = aiConfigFilePath;
return config;
}

@@ -163,2 +166,3 @@ /**

save(filePath, saveOptions) {
var _a;
try {

@@ -173,4 +177,9 @@ let aiConfigObj = lodash_1.default.cloneDeep(this);

}
// Remove the filePath property from the to-be-saved AIConfig
aiConfigObj.filePath = undefined;
// TODO: saqadri - make sure that the object satisfies the AIConfig schema
const aiConfigString = JSON.stringify(aiConfigObj, null, 2);
if (!filePath) {
filePath = (_a = this.filePath) !== null && _a !== void 0 ? _a : "aiconfig.json";
}
fs.writeFileSync(filePath, aiConfigString);

@@ -407,3 +416,6 @@ }

}
this.metadata.models[modelMetadata.name] = modelMetadata;
if (!modelMetadata.settings) {
modelMetadata.settings = {};
}
this.metadata.models[modelMetadata.name] = modelMetadata.settings;
}

@@ -613,3 +625,3 @@ }

getModelName(prompt) {
var _a;
var _a, _b, _c;
if (typeof prompt === "string") {

@@ -621,6 +633,6 @@ prompt = this.getPrompt(prompt);

}
if (typeof prompt.metadata.model === "string") {
if (typeof ((_a = prompt === null || prompt === void 0 ? void 0 : prompt.metadata) === null || _a === void 0 ? void 0 : _a.model) === "string") {
return prompt.metadata.model;
}
else if (prompt.metadata.model == null) {
else if (((_b = prompt === null || prompt === void 0 ? void 0 : prompt.metadata) === null || _b === void 0 ? void 0 : _b.model) == null) {
const defaultModel = this.metadata.default_model;

@@ -632,5 +644,3 @@ if (defaultModel == null) {

}
{
return (_a = prompt.metadata.model) === null || _a === void 0 ? void 0 : _a.name;
}
return (_c = prompt.metadata.model) === null || _c === void 0 ? void 0 : _c.name;
}

@@ -654,3 +664,26 @@ /**

}
/**
* Returns the global settings for a given model.
*/
getGlobalSettings(modelName) {
var _a;
return (_a = this.metadata.models) === null || _a === void 0 ? void 0 : _a[modelName];
}
/**
* Generates a ModelMetadata object from the inferene settings by extracting the settings that override the global settings.
*
* @param inferenceSettings - The inference settings to be used for the model.
* @param modelName - The unique identifier for the model.
* @returns A ModelMetadata object that includes the model's name and optional settings.
*/
getModelMetadata(inferenceSettings, modelName) {
const overrideSettings = (0, utils_2.extractOverrideSettings)(this, inferenceSettings, modelName);
if (!overrideSettings || Object.keys(overrideSettings).length === 0) {
return { name: modelName };
}
else {
return { name: modelName, settings: overrideSettings };
}
}
}
exports.AIConfigRuntime = AIConfigRuntime;

@@ -22,9 +22,9 @@ "use strict";

getModelSettings(prompt, aiConfig) {
var _a, _b, _c, _d;
var _a, _b, _c, _d, _e;
if (prompt == null) {
return (_a = aiConfig.metadata.models) === null || _a === void 0 ? void 0 : _a[this.id];
}
const modelMetadata = prompt.metadata.model;
const modelMetadata = (_b = prompt.metadata) === null || _b === void 0 ? void 0 : _b.model;
if (typeof modelMetadata === "string") {
return (_b = aiConfig.metadata.models) === null || _b === void 0 ? void 0 : _b[modelMetadata];
return (_c = aiConfig.metadata.models) === null || _c === void 0 ? void 0 : _c[modelMetadata];
}

@@ -36,6 +36,6 @@ else if (modelMetadata == null) {

}
return (_c = aiConfig.metadata.models) === null || _c === void 0 ? void 0 : _c[defaultModel];
return (_d = aiConfig.metadata.models) === null || _d === void 0 ? void 0 : _d[defaultModel];
}
else {
const globalModelMetadata = (_d = aiConfig.metadata.models) === null || _d === void 0 ? void 0 : _d[modelMetadata.name];
const globalModelMetadata = (_e = aiConfig.metadata.models) === null || _e === void 0 ? void 0 : _e[modelMetadata.name];
return Object.assign(Object.assign({}, (globalModelMetadata || {})), (modelMetadata.settings || {}));

@@ -42,0 +42,0 @@ }

@@ -16,4 +16,4 @@ import { Prompt } from "../types";

*/
static getModelParser(id: string): ModelParser<import("../common").JSONObject, import("../common").JSONObject> | undefined;
static getModelParserForPrompt(prompt: Prompt, aiConfig: AIConfigRuntime): ModelParser<import("../common").JSONObject, import("../common").JSONObject> | undefined;
static getModelParser(id: string): ModelParser<import("..").JSONObject, import("..").JSONObject> | undefined;
static getModelParserForPrompt(prompt: Prompt, aiConfig: AIConfigRuntime): ModelParser<import("..").JSONObject, import("..").JSONObject> | undefined;
/**

@@ -20,0 +20,0 @@ * Removes a model parser from the registry.

@@ -26,9 +26,9 @@ "use strict";

static getModelParserForPrompt(prompt, aiConfig) {
var _a;
var _a, _b;
let id;
if (prompt.metadata.model != null) {
if (((_a = prompt === null || prompt === void 0 ? void 0 : prompt.metadata) === null || _a === void 0 ? void 0 : _a.model) != null) {
id =
typeof prompt.metadata.model === "string"
? prompt.metadata.model
: (_a = prompt.metadata.model) === null || _a === void 0 ? void 0 : _a.name;
: (_b = prompt.metadata.model) === null || _b === void 0 ? void 0 : _b.name;
}

@@ -35,0 +35,0 @@ else if (aiConfig.metadata.default_model != null) {

@@ -307,3 +307,3 @@ "use strict";

deserialize(prompt, aiConfig, params) {
var _a, _b, _c;
var _a, _b, _c, _d;
// Build the completion params

@@ -324,3 +324,3 @@ const modelMetadata = (_a = this.getModelSettings(prompt, aiConfig)) !== null && _a !== void 0 ? _a : {};

}
if (prompt.metadata.remember_chat_context !== false) {
if (((_c = prompt === null || prompt === void 0 ? void 0 : prompt.metadata) === null || _c === void 0 ? void 0 : _c.remember_chat_context) !== false) {
// Loop through the prompts in the AIConfig and add the user messages to the messages array

@@ -348,3 +348,3 @@ for (let i = 0; i < aiConfig.prompts.length; i++) {

for (let i = 0; i < completionParams.messages.length; i++) {
completionParams.messages[i].content = this.resolvePromptTemplate((_c = completionParams.messages[i].content) !== null && _c !== void 0 ? _c : "", prompt, aiConfig, params);
completionParams.messages[i].content = this.resolvePromptTemplate((_d = completionParams.messages[i].content) !== null && _d !== void 0 ? _d : "", prompt, aiConfig, params);
}

@@ -351,0 +351,0 @@ // Add the latest message to the messages array

@@ -0,1 +1,17 @@

import { AIConfigRuntime } from "./config";
import { InferenceSettings } from "../types";
import { JSONObject } from "../common";
export declare function getAPIKeyFromEnv(apiKeyName: string): string;
/**
* Extract inference settings with overrides based on inference settings.
*
* This function takes the inference settings and a model ID and returns a subset
* of inference settings that have been overridden by model-specific settings. It
* compares the provided settings with global settings, and returns only those that
* differ or have no corresponding global setting.
* @param configRuntime The AIConfigRuntime that the prompt belongs to.
* @param inferenceSettings The model settings from the input data.
* @param modelName The model ID of the prompt.
* @returns The model settings from the input data.
*/
export declare function extractOverrideSettings(configRuntime: AIConfigRuntime, inferenceSettings: InferenceSettings, modelName: string): JSONObject;
"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
exports.getAPIKeyFromEnv = void 0;
exports.extractOverrideSettings = exports.getAPIKeyFromEnv = void 0;
const lodash_1 = __importDefault(require("lodash"));
function getAPIKeyFromEnv(apiKeyName) {

@@ -12,1 +16,34 @@ const apiKeyValue = process.env[apiKeyName];

exports.getAPIKeyFromEnv = getAPIKeyFromEnv;
/**
* Extract inference settings with overrides based on inference settings.
*
* This function takes the inference settings and a model ID and returns a subset
* of inference settings that have been overridden by model-specific settings. It
* compares the provided settings with global settings, and returns only those that
* differ or have no corresponding global setting.
* @param configRuntime The AIConfigRuntime that the prompt belongs to.
* @param inferenceSettings The model settings from the input data.
* @param modelName The model ID of the prompt.
* @returns The model settings from the input data.
*/
function extractOverrideSettings(configRuntime, inferenceSettings, modelName) {
var _a, _b;
let modelMetadata;
const globalModelSettings = Object.assign({}, (_a = (configRuntime.getGlobalSettings(modelName))) !== null && _a !== void 0 ? _a : {});
inferenceSettings = Object.assign({}, (_b = (inferenceSettings)) !== null && _b !== void 0 ? _b : {});
if (globalModelSettings != null) {
// Check if the model settings from the input data are the same as the global model settings
// Compute the difference between the global model settings and the model settings from the input data
// If there is a difference, then we need to add the different model settings as overrides on the prompt's metadata
const keys = lodash_1.default.union(lodash_1.default.keys(globalModelSettings), lodash_1.default.keys(inferenceSettings));
const overrides = lodash_1.default.reduce(keys, (result, key) => {
if (!lodash_1.default.isEqual(globalModelSettings[key], inferenceSettings[key])) {
result[key] = inferenceSettings[key];
}
return result;
}, {});
return overrides;
}
return inferenceSettings;
}
exports.extractOverrideSettings = extractOverrideSettings;
{
"name": "aiconfig",
"version": "1.0.1",
"version": "1.0.2",
"description": "Library to help manage AI configs (i.e. prompts, models and parameters) using the .aiconfig file format.",

@@ -25,6 +25,9 @@ "repository": {

"build": "tsc",
"clean": "rm -rf ./dist",
"test": "jest --runInBand",
"tsn": "ts-node -r tsconfig-paths/register",
"fix": "eslint --fix --ext ts,js .",
"genSchema": "ts-node ./scripts/genJSONSchema.ts"
"genSchema": "ts-node ./scripts/genJSONSchema.ts",
"lint": "eslint . --max-warnings=0",
"compile": "tsc"
},

@@ -34,3 +37,4 @@ "devDependencies": {

"@types/lodash": "^4.14.197",
"dotenv": "^16.0.3",
"@typescript-eslint/eslint-plugin": "^6.7.2",
"@typescript-eslint/parser": "^6.7.2",
"jest": "^29.7.0",

@@ -40,6 +44,9 @@ "ts-jest": "^29.1.1",

"typedoc": "^0.23.27",
"typescript": "^4.9.5"
"typescript": "^4.9.5",
"dotenv": "^16.3.1",
"eslint": "^8.50.0"
},
"dependencies": {
"@google-ai/generativelanguage": "^1.1.0",
"@huggingface/inference": "^2.6.4",
"axios": "^1.5.1",

@@ -46,0 +53,0 @@ "google-auth-library": "^9.1.0",

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc