Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

gpt-tokens

Package Overview
Dependencies
Maintainers
1
Versions
30
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

gpt-tokens - npm Package Compare versions

Comparing version 1.1.2 to 1.1.3

9

index.d.ts
import { Tiktoken } from 'js-tiktoken';
import OpenAI from 'openai';
export declare function getEncodingForModelCached(model: supportModelType): Tiktoken;

@@ -8,3 +9,3 @@ /**

*/
export type supportModelType = 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613';
export type supportModelType = 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview';
interface MessageItem {

@@ -27,2 +28,4 @@ name?: string;

readonly gpt3_5_turbo_16kCompletionTokenUnit: number;
readonly gpt3_5_turbo_1106PromptTokenUnit: number;
readonly gpt3_5_turbo_1106CompletionTokenUnit: number;
readonly gpt4_8kPromptTokenUnit: number;

@@ -32,2 +35,4 @@ readonly gpt4_8kCompletionTokenUnit: number;

readonly gpt4_32kCompletionTokenUnit: number;
readonly gpt4_turbo_previewPromptTokenUnit: number;
readonly gpt4_turbo_previewCompletionTokenUnit: number;
get usedUSD(): number;

@@ -56,3 +61,3 @@ get usedTokens(): number;

}
export declare function testGPTTokens(apiKey: string): Promise<void>;
export declare function testGPTTokens(openai: OpenAI): Promise<void>;
export {};
"use strict";
var __createBinding = (this && this.__createBinding) || (Object.create ? (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
var desc = Object.getOwnPropertyDescriptor(m, k);
if (!desc || ("get" in desc ? !m.__esModule : desc.writable || desc.configurable)) {
desc = { enumerable: true, get: function() { return m[k]; } };
}
Object.defineProperty(o, k2, desc);
}) : (function(o, m, k, k2) {
if (k2 === undefined) k2 = k;
o[k2] = m[k];
}));
var __setModuleDefault = (this && this.__setModuleDefault) || (Object.create ? (function(o, v) {
Object.defineProperty(o, "default", { enumerable: true, value: v });
}) : function(o, v) {
o["default"] = v;
});
var __importStar = (this && this.__importStar) || function (mod) {
if (mod && mod.__esModule) return mod;
var result = {};
if (mod != null) for (var k in mod) if (k !== "default" && Object.prototype.hasOwnProperty.call(mod, k)) __createBinding(result, mod, k);
__setModuleDefault(result, mod);
return result;
};
var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {

@@ -45,6 +22,10 @@ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }

try {
if (['gpt-3.5-turbo-1106'].includes(model))
model = 'gpt-3.5-turbo';
if (['gpt-4-1106-preview'].includes(model))
model = 'gpt-4';
modelEncodingCache[model] = (0, js_tiktoken_1.encodingForModel)(model);
}
catch (e) {
console.info('Model not found. Using cl100k_base encoding.');
console.error('Model not found. Using cl100k_base encoding.');
modelEncodingCache[model] = (0, js_tiktoken_1.getEncoding)('cl100k_base');

@@ -75,5 +56,12 @@ }

// https://openai.com/pricing/
// gpt-3.5-turbo-16k
// Prompt: $0.001 / 1K tokens
this.gpt3_5_turbo_1106PromptTokenUnit = new decimal_js_1.default(0.001).div(1000).toNumber();
// https://openai.com/pricing/
// gpt-3.5-turbo-16k
// Prompt: $0.002 / 1K tokens
this.gpt3_5_turbo_1106CompletionTokenUnit = new decimal_js_1.default(0.002).div(1000).toNumber();
// https://openai.com/pricing/
// gpt-4-8k
// Prompt: $0.03 / 1K tokens
//
this.gpt4_8kPromptTokenUnit = new decimal_js_1.default(0.03).div(1000).toNumber();

@@ -92,2 +80,10 @@ // https://openai.com/pricing/

this.gpt4_32kCompletionTokenUnit = new decimal_js_1.default(0.12).div(1000).toNumber();
// https://openai.com/pricing/
// gpt-4-1106-preview
// Prompt: $0.01 / 1K tokens
this.gpt4_turbo_previewPromptTokenUnit = new decimal_js_1.default(0.01).div(1000).toNumber();
// https://openai.com/pricing/
// gpt-4-1106-preview
// Completion: $0.03 / 1K tokens
this.gpt4_turbo_previewCompletionTokenUnit = new decimal_js_1.default(0.03).div(1000).toNumber();
const { model, messages, } = options;

@@ -132,2 +128,11 @@ if (!GPTTokens.supportModels.includes(model))

if ([
'gpt-3.5-turbo-1106',
].includes(this.model)) {
const promptUSD = new decimal_js_1.default(this.promptUsedTokens)
.mul(this.gpt3_5_turbo_1106PromptTokenUnit);
const completionUSD = new decimal_js_1.default(this.completionUsedTokens)
.mul(this.gpt3_5_turbo_1106CompletionTokenUnit);
price = promptUSD.add(completionUSD).toNumber();
}
if ([
'gpt-4',

@@ -154,2 +159,9 @@ 'gpt-4-0314',

}
if (this.model === 'gpt-4-1106-preview') {
const promptUSD = new decimal_js_1.default(this.promptUsedTokens)
.mul(this.gpt4_turbo_previewPromptTokenUnit);
const completionUSD = new decimal_js_1.default(this.completionUsedTokens)
.mul(this.gpt4_turbo_previewCompletionTokenUnit);
price = promptUSD.add(completionUSD).toNumber();
}
return price;

@@ -216,2 +228,3 @@ }

'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-16k',

@@ -225,2 +238,3 @@ 'gpt-3.5-turbo-16k-0613',

'gpt-4-32k-0613',
'gpt-4-1106-preview',
].includes(model)) {

@@ -253,2 +267,3 @@ tokens_per_message = 3;

'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-1106',
'gpt-4',

@@ -260,11 +275,10 @@ 'gpt-4-0314',

'gpt-4-32k-0613',
'gpt-4-1106-preview',
];
exports.GPTTokens = GPTTokens;
function testGPTTokens(apiKey) {
function testGPTTokens(openai) {
return __awaiter(this, void 0, void 0, function* () {
const { Configuration, OpenAIApi } = yield Promise.resolve().then(() => __importStar(require('openai')));
const configuration = new Configuration({ apiKey });
const openai = new OpenAIApi(configuration);
const prompt = `How are u`;
const messages = [
{ role: 'user', content: 'Hello, how are u' },
{ role: 'user', content: prompt },
];

@@ -276,3 +290,3 @@ const { length: modelsNum } = GPTTokens.supportModels;

let ignoreModel = false;
const chatCompletion = yield openai.createChatCompletion({
const chatCompletion = yield openai.chat.completions.create({
model,

@@ -285,6 +299,3 @@ messages,

});
if (ignoreModel)
continue;
const responseMessage = chatCompletion.data.choices[0].message;
const openaiUsage = chatCompletion.data.usage;
const openaiUsage = chatCompletion === null || chatCompletion === void 0 ? void 0 : chatCompletion.usage;
const gptTokens = new GPTTokens({

@@ -294,7 +305,11 @@ model,

...messages,
...[responseMessage],
...[chatCompletion === null || chatCompletion === void 0 ? void 0 : chatCompletion.choices[0].message],
],
});
if (gptTokens.usedTokens !== (openaiUsage === null || openaiUsage === void 0 ? void 0 : openaiUsage.total_tokens))
throw new Error(`Test ${model} usedTokens failed (openai: ${openaiUsage.total_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`);
if (ignoreModel)
continue;
if (!openaiUsage) {
console.error(`Test ${model} failed (openai return usage is null)`);
continue;
}
if (gptTokens.promptUsedTokens !== openaiUsage.prompt_tokens)

@@ -304,2 +319,4 @@ throw new Error(`Test ${model} promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.promptUsedTokens})`);

throw new Error(`Test ${model} completionUsedTokens failed (openai: ${openaiUsage.completion_tokens}/ gpt-tokens: ${gptTokens.completionUsedTokens})`);
if (gptTokens.usedTokens !== (openaiUsage === null || openaiUsage === void 0 ? void 0 : openaiUsage.total_tokens))
throw new Error(`Test ${model} usedTokens failed (openai: ${openaiUsage === null || openaiUsage === void 0 ? void 0 : openaiUsage.total_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`);
console.info('Pass!');

@@ -306,0 +323,0 @@ }

import { encodingForModel, getEncoding, Tiktoken } from 'js-tiktoken'
import Decimal from 'decimal.js'
import OpenAI from 'openai'

@@ -9,5 +10,8 @@ let modelEncodingCache: { [key in supportModelType]?: Tiktoken } = {}

try {
modelEncodingCache[model] = encodingForModel(model)
if (['gpt-3.5-turbo-1106'].includes(model)) model = 'gpt-3.5-turbo'
if (['gpt-4-1106-preview'].includes(model)) model = 'gpt-4'
modelEncodingCache[model] = encodingForModel(model as Parameters<typeof encodingForModel>[0])
} catch (e) {
console.info('Model not found. Using cl100k_base encoding.')
console.error('Model not found. Using cl100k_base encoding.')
modelEncodingCache[model] = getEncoding('cl100k_base')

@@ -30,2 +34,3 @@ }

| 'gpt-3.5-turbo-0613'
| 'gpt-3.5-turbo-1106'
| 'gpt-3.5-turbo-16k'

@@ -39,2 +44,3 @@ | 'gpt-3.5-turbo-16k-0613'

| 'gpt-4-32k-0613'
| 'gpt-4-1106-preview'

@@ -78,2 +84,3 @@ interface MessageItem {

'gpt-3.5-turbo-16k-0613',
'gpt-3.5-turbo-1106',
'gpt-4',

@@ -85,2 +92,3 @@ 'gpt-4-0314',

'gpt-4-32k-0613',
'gpt-4-1106-preview',
]

@@ -112,5 +120,14 @@

// https://openai.com/pricing/
// gpt-3.5-turbo-16k
// Prompt: $0.001 / 1K tokens
public readonly gpt3_5_turbo_1106PromptTokenUnit = new Decimal(0.001).div(1000).toNumber()
// https://openai.com/pricing/
// gpt-3.5-turbo-16k
// Prompt: $0.002 / 1K tokens
public readonly gpt3_5_turbo_1106CompletionTokenUnit = new Decimal(0.002).div(1000).toNumber()
// https://openai.com/pricing/
// gpt-4-8k
// Prompt: $0.03 / 1K tokens
//
public readonly gpt4_8kPromptTokenUnit = new Decimal(0.03).div(1000).toNumber()

@@ -133,2 +150,12 @@

// https://openai.com/pricing/
// gpt-4-1106-preview
// Prompt: $0.01 / 1K tokens
public readonly gpt4_turbo_previewPromptTokenUnit = new Decimal(0.01).div(1000).toNumber()
// https://openai.com/pricing/
// gpt-4-1106-preview
// Completion: $0.03 / 1K tokens
public readonly gpt4_turbo_previewCompletionTokenUnit = new Decimal(0.03).div(1000).toNumber()
// Used USD

@@ -164,2 +191,13 @@ public get usedUSD (): number {

if ([
'gpt-3.5-turbo-1106',
].includes(this.model)) {
const promptUSD = new Decimal(this.promptUsedTokens)
.mul(this.gpt3_5_turbo_1106PromptTokenUnit)
const completionUSD = new Decimal(this.completionUsedTokens)
.mul(this.gpt3_5_turbo_1106CompletionTokenUnit)
price = promptUSD.add(completionUSD).toNumber()
}
if ([
'gpt-4',

@@ -190,2 +228,11 @@ 'gpt-4-0314',

if (this.model === 'gpt-4-1106-preview') {
const promptUSD = new Decimal(this.promptUsedTokens)
.mul(this.gpt4_turbo_previewPromptTokenUnit)
const completionUSD = new Decimal(this.completionUsedTokens)
.mul(this.gpt4_turbo_previewCompletionTokenUnit)
price = promptUSD.add(completionUSD).toNumber()
}
return price

@@ -220,7 +267,7 @@ }

private get lastMessage () {
return this.messages[this.messages.length - 1]
return this.messages![this.messages!.length - 1]
}
private get promptMessages () {
return this.lastMessage.role === 'assistant' ? this.messages.slice(0, -1) : this.messages
return this.lastMessage.role === 'assistant' ? this.messages!.slice(0, -1) : this.messages!
}

@@ -266,2 +313,3 @@

'gpt-3.5-turbo-0613',
'gpt-3.5-turbo-1106',
'gpt-3.5-turbo-16k',

@@ -275,2 +323,3 @@ 'gpt-3.5-turbo-16k-0613',

'gpt-4-32k-0613',
'gpt-4-1106-preview',
].includes(model)) {

@@ -289,3 +338,5 @@ tokens_per_message = 3

num_tokens += encoding.encode(value as string).length
if (key === 'name') { num_tokens += tokens_per_name }
if (key === 'name') {
num_tokens += tokens_per_name
}
}

@@ -302,9 +353,6 @@ }

export async function testGPTTokens (apiKey: string) {
const { Configuration, OpenAIApi } = await import('openai')
const configuration = new Configuration({ apiKey })
const openai = new OpenAIApi(configuration)
export async function testGPTTokens (openai: OpenAI) {
const prompt = `How are u`
const messages: MessageItem [] = [
{ role: 'user', content: 'Hello, how are u' },
{ role: 'user', content: prompt },
]

@@ -320,3 +368,3 @@ const { length: modelsNum } = GPTTokens.supportModels

const chatCompletion = await openai.createChatCompletion({
const chatCompletion = await openai.chat.completions.create({
model,

@@ -331,7 +379,4 @@ messages,

if (ignoreModel) continue
const openaiUsage = chatCompletion?.usage
const responseMessage = chatCompletion!.data.choices[0].message!
const openaiUsage = chatCompletion!.data.usage!
const gptTokens = new GPTTokens({

@@ -341,8 +386,11 @@ model,

...messages,
...[responseMessage],
...[chatCompletion?.choices[0].message],
] as MessageItem [],
})
if (gptTokens.usedTokens !== openaiUsage?.total_tokens)
throw new Error(`Test ${model} usedTokens failed (openai: ${openaiUsage.total_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`)
if (ignoreModel) continue
if (!openaiUsage) {
console.error(`Test ${model} failed (openai return usage is null)`)
continue
}

@@ -355,2 +403,5 @@ if (gptTokens.promptUsedTokens !== openaiUsage.prompt_tokens)

if (gptTokens.usedTokens !== openaiUsage?.total_tokens)
throw new Error(`Test ${model} usedTokens failed (openai: ${openaiUsage?.total_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`)
console.info('Pass!')

@@ -357,0 +408,0 @@ }

{
"name": "gpt-tokens",
"version": "1.1.2",
"version": "1.1.3",
"description": "Calculate the token consumption and amount of openai gpt message",

@@ -27,3 +27,3 @@ "keywords": [

"js-tiktoken": "^1.0.7",
"openai": "^3.3.0"
"openai": "^4.6.1"
},

@@ -30,0 +30,0 @@ "devDependencies": {

@@ -91,2 +91,3 @@ # gpt-tokens

* gpt-3.5-turbo-0613
* gpt-3.5-turbo-1106
* gpt-3.5-turbo-16k

@@ -100,37 +101,52 @@ * gpt-3.5-turbo-16k-0613

* gpt-4-32k-0613
* gpt-4-1106-preview
Test in your project
```typescript
import { testGPTTokens } from 'gpt-tokens'
```bash
node test.js yourAPIKey
testGPTTokens('Your openai apiKey').then()
// [1/11]: Testing gpt-3.5-turbo-0301...
// Pass!
// [2/11]: Testing gpt-3.5-turbo...
// Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613
// Pass!
// [3/11]: Testing gpt-3.5-turbo-0613...
// Pass!
// [4/11]: Testing gpt-3.5-turbo-16k...
// Warning: gpt-3.5-turbo-16k may update over time. Returning num tokens assuming gpt-3.5-turbo-16k-0613
// Pass!
// [5/11]: Testing gpt-3.5-turbo-16k-0613...
// Pass!
// [6/11]: Testing gpt-4...
// Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613
// Pass!
// [7/11]: Testing gpt-4-0314...
// Pass!
// [8/11]: Testing gpt-4-0613...
// Pass!
// [9/11]: Testing gpt-4-32k...
// Ignore model gpt-4-32k: Request failed with status code 404
// [10/11]: Testing gpt-4-32k-0314...
// Ignore model gpt-4-32k-0314: Request failed with status code 404
// [11/11]: Testing gpt-4-32k-0613...
// Ignore model gpt-4-32k-0613: Request failed with status code 404
// Test success!
// ✨ Done in 27.13s.
# Testing GPT...
# [1/13]: Testing gpt-3.5-turbo-0301...
# Pass!
# [2/13]: Testing gpt-3.5-turbo...
# Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613
# Pass!
# [3/13]: Testing gpt-3.5-turbo-0613...
# Pass!
# [4/13]: Testing gpt-3.5-turbo-16k...
# Warning: gpt-3.5-turbo-16k may update over time. Returning num tokens assuming gpt-3.5-turbo-16k-0613
# Pass!
# [5/13]: Testing gpt-3.5-turbo-16k-0613...
# Pass!
# [6/13]: Testing gpt-3.5-turbo-1106...
# Pass!
# [7/13]: Testing gpt-4...
# Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613
# Pass!
# [8/13]: Testing gpt-4-0314...
# Pass!
# [9/13]: Testing gpt-4-0613...
# Pass!
# [10/13]: Testing gpt-4-32k...
# Ignore model gpt-4-32k: 404 The model `gpt-4-32k` does not exist or you do not have access to it. Learn more: https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4.
# Warning: gpt-4-32k may update over time. Returning num tokens assuming gpt-4-32k-0613
# [11/13]: Testing gpt-4-32k-0314...
# Ignore model gpt-4-32k-0314: 404 The model `gpt-4-32k-0314` does not exist or you do not have access to it. Learn more: https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4.
# [12/13]: Testing gpt-4-32k-0613...
# Ignore model gpt-4-32k-0613: 404 The model `gpt-4-32k-0613` does not exist or you do not have access to it. Learn more: https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4.
# [13/13]: Testing gpt-4-1106-preview...
# Pass!
# Test success!
# Testing performance...
# GPTTokens: 0.473ms
# GPTTokens: 0.097ms
# GPTTokens: 0.072ms
# GPTTokens: 0.079ms
# GPTTokens: 0.095ms
# GPTTokens: 0.066ms
# GPTTokens: 0.064ms
# GPTTokens: 0.068ms
# GPTTokens: 0.077ms
# GPTTokens: 0.08ms
```

@@ -137,0 +153,0 @@

@@ -1,8 +0,18 @@

const { GPTTokens, testGPTTokens } = require('./index')
const OpenAI = require('openai')
const { GPTTokens, testGPTTokens } = require('./index');
;(async () => {
console.log('Running GPT tests')
await testGPTTokens(process.env.OPENAI_API_KEY)
(async () => {
const [apiKey = process.env.OPENAI_API_KEY] = process.argv.slice(2)
console.log('Testing performance')
if (!apiKey) {
console.error('No API key provided. Ignoring test.')
process.exit(0)
}
const openai = new OpenAI({ apiKey })
console.info('Testing GPT...')
await testGPTTokens(openai)
console.info('Testing performance...')
for (let i = 0; i < 10; i++) {

@@ -9,0 +19,0 @@ console.time('GPTTokens')

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc