New Case Study:See how Anthropic automated 95% of dependency reviews with Socket.Learn More
Socket
Sign inDemoInstall
Socket

portkey-ai

Package Overview
Dependencies
Maintainers
1
Versions
47
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

portkey-ai - npm Package Compare versions

Comparing version 1.3.1 to 1.3.2

2

dist/package.json
{
"name": "portkey-ai",
"version": "1.3.1",
"version": "1.3.2",
"description": "Node client library for the Portkey API",

@@ -5,0 +5,0 @@ "types": "./src/index.d.ts",

@@ -16,2 +16,3 @@ import { APIResponseType, ApiClientInterface } from "../_types/generalTypes";

messages?: Array<Message>;
response_format?: object;
}

@@ -30,5 +31,21 @@ export interface ChatCompletionsBodyStreaming extends ChatCompletionsBodyBase {

}
interface FunctionType {
arguments?: string;
name?: string;
}
interface ToolCall {
index?: number;
id?: string;
function?: FunctionType;
type?: 'function';
}
interface FunctionCall {
arguments?: string;
name?: string;
}
interface Message {
role: string;
content: string;
content: string | null;
function_call?: FunctionCall;
tool_calls?: Array<ToolCall>;
}

@@ -35,0 +52,0 @@ interface Choices {

@@ -52,2 +52,3 @@ import { APIResponseType, ApiClientInterface } from "../_types/generalTypes";

messages?: Array<Message>;
response_format?: object;
}

@@ -71,5 +72,24 @@

interface FunctionType {
arguments?: string;
name?: string;
}
interface ToolCall {
index?: number;
id?: string;
function?: FunctionType;
type?: 'function';
}
interface FunctionCall {
arguments?: string;
name?: string;
}
interface Message {
role: string
content: string
role: string;
content: string | null;
function_call?: FunctionCall;
tool_calls?: Array<ToolCall>;
}

@@ -80,3 +100,3 @@

message?: Message;
delta?: Message
delta?: Message;
finish_reason?: string;

@@ -83,0 +103,0 @@ }

@@ -9,4 +9,4 @@ import { ApiClientInterface } from "./_types/generalTypes";

export class Portkey extends ApiClient {
override apiKey: string | null;
override baseURL: string;
declare apiKey: string | null;
declare baseURL: string;
virtualKey: string | null;

@@ -13,0 +13,0 @@ config: Record<string, unknown> | string | null | undefined;

@@ -1,1 +0,1 @@

export declare const VERSION = "1.2.1";
export declare const VERSION = "1.3.2";
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
exports.VERSION = void 0;
exports.VERSION = "1.2.1";
exports.VERSION = "1.3.2";
//# sourceMappingURL=version.js.map

@@ -1,1 +0,1 @@

export const VERSION = "1.2.1";
export const VERSION = "1.3.2";

@@ -40,3 +40,7 @@ "use strict";

test('model: codellama/CodeLlama-34b-Instruct-hf', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'codellama/CodeLlama-34b-Instruct-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
const completion = yield client.chat.completions.create({
model: 'codellama/CodeLlama-34b-Instruct-hf',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 30
});
expect(completion).toBeDefined();

@@ -52,3 +56,63 @@ expect(completion.choices).toBeDefined();

}));
test('model: google/gemma-7b-it', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'google/gemma-7b-it',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 25
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Meta-Llama-3-8B-Instruct', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'meta-llama/Meta-Llama-3-8B-Instruct',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 25
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: meta-llama/Meta-Llama-3-70B-Instruct', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'meta-llama/Meta-Llama-3-70B-Instruct',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mistralai/Mixtral-8x7B-Instruct-v0.1', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mistralai/Mixtral-8x22B-Instruct-v0.1', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
test('model: mlabonne/NeuralHermes-2.5-Mistral-7B', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({
model: 'mlabonne/NeuralHermes-2.5-Mistral-7B',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
});
//# sourceMappingURL=anyscale.test.js.map

@@ -75,3 +75,9 @@ "use strict";

}));
test('model: gpt-4-turbo-2024-04-09', () => __awaiter(void 0, void 0, void 0, function* () {
const completion = yield client.chat.completions.create({ model: 'gpt-4-turbo-2024-04-09', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
}));
});
//# sourceMappingURL=openai.test.js.map

@@ -60,2 +60,5 @@ /**

// Default timeout of a test in milliseconds
// testTimeout: 20000,
// Force coverage collection from ignored files using an array of glob patterns

@@ -62,0 +65,0 @@ // forceCoverageMatch: [],

{
"name": "portkey-ai",
"version": "1.3.1",
"version": "1.3.2",
"description": "Node client library for the Portkey API",

@@ -5,0 +5,0 @@ "types": "dist/src/index.d.ts",

@@ -52,2 +52,3 @@ import { APIResponseType, ApiClientInterface } from "../_types/generalTypes";

messages?: Array<Message>;
response_format?: object;
}

@@ -71,5 +72,24 @@

interface FunctionType {
arguments?: string;
name?: string;
}
interface ToolCall {
index?: number;
id?: string;
function?: FunctionType;
type?: 'function';
}
interface FunctionCall {
arguments?: string;
name?: string;
}
interface Message {
role: string
content: string
role: string;
content: string | null;
function_call?: FunctionCall;
tool_calls?: Array<ToolCall>;
}

@@ -80,3 +100,3 @@

message?: Message;
delta?: Message
delta?: Message;
finish_reason?: string;

@@ -83,0 +103,0 @@ }

@@ -9,4 +9,4 @@ import { ApiClientInterface } from "./_types/generalTypes";

export class Portkey extends ApiClient {
override apiKey: string | null;
override baseURL: string;
declare apiKey: string | null;
declare baseURL: string;
virtualKey: string | null;

@@ -13,0 +13,0 @@ config: Record<string, unknown> | string | null | undefined;

@@ -1,1 +0,1 @@

export const VERSION = "1.2.1";
export const VERSION = "1.3.2";

@@ -33,3 +33,7 @@ import { config } from 'dotenv';

test('model: codellama/CodeLlama-34b-Instruct-hf', async () => {
const completion = await client.chat.completions.create({ model: 'codellama/CodeLlama-34b-Instruct-hf', messages: [{ "role": "user", "content": "Say this is a test" }] });
const completion = await client.chat.completions.create({
model: 'codellama/CodeLlama-34b-Instruct-hf',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 30
});
expect(completion).toBeDefined();

@@ -46,2 +50,68 @@ expect(completion.choices).toBeDefined();

});
test('model: google/gemma-7b-it', async () => {
const completion = await client.chat.completions.create({
model: 'google/gemma-7b-it',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 25
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
});
test('model: meta-llama/Meta-Llama-3-8B-Instruct', async () => {
const completion = await client.chat.completions.create({
model: 'meta-llama/Meta-Llama-3-8B-Instruct',
messages: [{ "role": "user", "content": "Say this is a test" }],
max_tokens: 25
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
});
test('model: meta-llama/Meta-Llama-3-70B-Instruct', async () => {
const completion = await client.chat.completions.create({
model: 'meta-llama/Meta-Llama-3-70B-Instruct',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
});
test('model: mistralai/Mixtral-8x7B-Instruct-v0.1', async () => {
const completion = await client.chat.completions.create({
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
});
test('model: mistralai/Mixtral-8x22B-Instruct-v0.1', async () => {
const completion = await client.chat.completions.create({
model: 'mistralai/Mixtral-8x22B-Instruct-v0.1',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
});
test('model: mlabonne/NeuralHermes-2.5-Mistral-7B', async () => {
const completion = await client.chat.completions.create({
model: 'mlabonne/NeuralHermes-2.5-Mistral-7B',
messages: [{ role: 'user', content: 'Say this is a test' }],
max_tokens: 25,
});
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
});
});

@@ -73,2 +73,9 @@ import { config } from 'dotenv';

});
test('model: gpt-4-turbo-2024-04-09', async () => {
const completion = await client.chat.completions.create({ model: 'gpt-4-turbo-2024-04-09', messages: [{ "role": "user", "content": "Say this is a test" }] });
expect(completion).toBeDefined();
expect(completion.choices).toBeDefined();
expect(completion.choices.length).toBeGreaterThan(0);
});
});

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc