gpt-tokens
Advanced tools
Comparing version 1.1.3 to 1.2.0
@@ -9,3 +9,3 @@ import { Tiktoken } from 'js-tiktoken'; | ||
*/ | ||
export type supportModelType = 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview'; | ||
export declare type supportModelType = 'gpt-3.5-turbo' | 'gpt-3.5-turbo-0301' | 'gpt-3.5-turbo-0613' | 'gpt-3.5-turbo-1106' | 'gpt-3.5-turbo-16k' | 'gpt-3.5-turbo-16k-0613' | 'gpt-4' | 'gpt-4-0314' | 'gpt-4-0613' | 'gpt-4-32k' | 'gpt-4-32k-0314' | 'gpt-4-32k-0613' | 'gpt-4-1106-preview'; | ||
interface MessageItem { | ||
@@ -18,8 +18,29 @@ name?: string; | ||
constructor(options: { | ||
model: supportModelType; | ||
messages: MessageItem[]; | ||
model?: supportModelType; | ||
fineTuneModel?: string; | ||
messages?: GPTTokens['messages']; | ||
training?: GPTTokens['training']; | ||
tools?: GPTTokens['tools']; | ||
debug?: boolean; | ||
}); | ||
private checkOptions; | ||
static readonly supportModels: supportModelType[]; | ||
readonly debug: boolean; | ||
readonly model: supportModelType; | ||
readonly messages: MessageItem[]; | ||
readonly fineTuneModel: string | undefined; | ||
readonly messages?: MessageItem[]; | ||
readonly training?: { | ||
data: { | ||
messages: MessageItem[]; | ||
}[]; | ||
epochs: number; | ||
}; | ||
readonly tools?: { | ||
type: 'function'; | ||
function: { | ||
name: string; | ||
description?: string; | ||
parameters: Record<string, unknown>; | ||
}; | ||
}[]; | ||
readonly gpt3_5_turboPromptTokenUnit: number; | ||
@@ -37,3 +58,10 @@ readonly gpt3_5_turboCompletionTokenUnit: number; | ||
readonly gpt4_turbo_previewCompletionTokenUnit: number; | ||
readonly gpt3_5_turbo_fine_tuneTrainingTokenUnit: number; | ||
readonly gpt3_5_turbo_fine_tunePromptTokenUnit: number; | ||
readonly gpt3_5_turbo_fine_tuneCompletionTokenUnit: number; | ||
get usedUSD(): number; | ||
private trainingUsedUSD; | ||
private functionUsedUSD; | ||
private fineTuneUsedUSD; | ||
private basicUsedTokens; | ||
get usedTokens(): number; | ||
@@ -61,3 +89,3 @@ get promptUsedTokens(): number; | ||
} | ||
export declare function testGPTTokens(openai: OpenAI): Promise<void>; | ||
export declare function testGPTTokens(openai: OpenAI, prompt: string): Promise<void>; | ||
export {}; |
160
index.js
@@ -18,2 +18,3 @@ "use strict"; | ||
const decimal_js_1 = __importDefault(require("decimal.js")); | ||
const openai_chat_tokens_1 = require("openai-chat-tokens"); | ||
let modelEncodingCache = {}; | ||
@@ -87,21 +88,115 @@ function getEncodingForModelCached(model) { | ||
this.gpt4_turbo_previewCompletionTokenUnit = new decimal_js_1.default(0.03).div(1000).toNumber(); | ||
const { model, messages, } = options; | ||
if (!GPTTokens.supportModels.includes(model)) | ||
throw new Error(`Model ${model} is not supported`); | ||
if (model === 'gpt-3.5-turbo') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-3.5-turbo-0613`); | ||
if (model === 'gpt-3.5-turbo-16k') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-3.5-turbo-16k-0613`); | ||
if (model === 'gpt-4') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-4-0613`); | ||
if (model === 'gpt-4-32k') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-4-32k-0613`); | ||
this.model = model; | ||
// https://openai.com/pricing/ | ||
// Fine-tuning models gpt-3.5-turbo | ||
// Training: $0.008 / 1K tokens | ||
this.gpt3_5_turbo_fine_tuneTrainingTokenUnit = new decimal_js_1.default(0.008).div(1000).toNumber(); | ||
// https://openai.com/pricing/ | ||
// Fine-tuning models gpt-3.5-turbo | ||
// Prompt: $0.003 / 1K tokens | ||
this.gpt3_5_turbo_fine_tunePromptTokenUnit = new decimal_js_1.default(0.003).div(1000).toNumber(); | ||
// https://openai.com/pricing/ | ||
// Fine-tuning models gpt-3.5-turbo | ||
// Completion: $0.006 / 1K tokens | ||
this.gpt3_5_turbo_fine_tuneCompletionTokenUnit = new decimal_js_1.default(0.006).div(1000).toNumber(); | ||
const { model, fineTuneModel, messages, training, tools, debug = false, } = options; | ||
this.model = model || (fineTuneModel === null || fineTuneModel === void 0 ? void 0 : fineTuneModel.split(':')[1]); | ||
this.debug = debug; | ||
this.fineTuneModel = fineTuneModel; | ||
this.messages = messages; | ||
this.training = training; | ||
this.tools = tools; | ||
this.checkOptions(); | ||
} | ||
checkOptions() { | ||
if (!GPTTokens.supportModels.includes(this.model)) | ||
throw new Error(`Model ${this.model} is not supported`); | ||
if (!this.messages && !this.training && !this.tools) | ||
throw new Error('Must set on of messages | training | function'); | ||
if (this.fineTuneModel && !this.fineTuneModel.startsWith('ft:gpt')) | ||
throw new Error(`Fine-tuning is not supported for ${this.fineTuneModel}`); | ||
// https://platform.openai.com/docs/guides/fine-tuning | ||
if (![ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0613', | ||
'gpt-3.5-turbo-1106', | ||
'gpt-4-0613', | ||
].includes(this.model) && this.training) | ||
throw new Error(`Fine-tuning is not supported for model ${this.model}`); | ||
// https://platform.openai.com/docs/guides/function-calling | ||
if (![ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0613', | ||
'gpt-3.5-turbo-1106', | ||
'gpt-4', | ||
'gpt-4-0613', | ||
'gpt-4-1106-preview', | ||
].includes(this.model) && this.tools) | ||
throw new Error(`Function is not supported for model ${this.model}`); | ||
if (this.tools && !this.messages) | ||
throw new Error('Function must set messages'); | ||
if (this.model === 'gpt-3.5-turbo') | ||
this.warning(`${this.model} may update over time. Returning num tokens assuming gpt-3.5-turbo-1106`); | ||
if (this.model === 'gpt-4') | ||
this.warning(`${this.model} may update over time. Returning num tokens assuming gpt-4-0613`); | ||
if (this.model === 'gpt-4-32k') | ||
this.warning(`${this.model} may update over time. Returning num tokens assuming gpt-4-32k-0613`); | ||
// old model | ||
if ([ | ||
'gpt-3.5-turbo-0301', | ||
'gpt-3.5-turbo-0613', | ||
'gpt-3.5-turbo-16k', | ||
'gpt-3.5-turbo-16k-0613', | ||
'gpt-4-0314', | ||
'gpt-4-32k-0314', | ||
].includes(this.model)) | ||
this.warning(`${this.model} is old model. Please migrating to replacements: https://platform.openai.com/docs/deprecations/`); | ||
} | ||
// Used USD | ||
get usedUSD() { | ||
let price = 0; | ||
if (this.training) | ||
return this.trainingUsedUSD(); | ||
if (this.tools) | ||
return this.functionUsedUSD(); | ||
if (this.fineTuneModel) | ||
return this.fineTuneUsedUSD(); | ||
return this.basicUsedTokens(); | ||
} | ||
trainingUsedUSD() { | ||
return new decimal_js_1.default(this.usedTokens).mul(this.gpt3_5_turbo_fine_tuneTrainingTokenUnit).toNumber(); | ||
} | ||
functionUsedUSD() { | ||
if ([ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0613', | ||
].includes(this.model)) | ||
return new decimal_js_1.default(this.usedTokens) | ||
.mul(this.gpt3_5_turboPromptTokenUnit).toNumber(); | ||
if ([ | ||
'gpt-3.5-turbo-1106', | ||
].includes(this.model)) | ||
return new decimal_js_1.default(this.usedTokens) | ||
.mul(this.gpt3_5_turbo_1106PromptTokenUnit).toNumber(); | ||
if ([ | ||
'gpt-4', | ||
'gpt-4-0613', | ||
].includes(this.model)) | ||
return new decimal_js_1.default(this.usedTokens) | ||
.mul(this.gpt4_8kPromptTokenUnit).toNumber(); | ||
if ([ | ||
'gpt-4-1106-preview', | ||
].includes(this.model)) | ||
return new decimal_js_1.default(this.usedTokens) | ||
.mul(this.gpt4_turbo_previewPromptTokenUnit).toNumber(); | ||
throw new Error(`Model ${this.model} is not supported`); | ||
} | ||
fineTuneUsedUSD() { | ||
const promptUSD = new decimal_js_1.default(this.promptUsedTokens) | ||
.mul(this.gpt3_5_turbo_fine_tunePromptTokenUnit); | ||
const completionUSD = new decimal_js_1.default(this.completionUsedTokens) | ||
.mul(this.gpt3_5_turbo_fine_tuneCompletionTokenUnit); | ||
return promptUSD.add(completionUSD).toNumber(); | ||
} | ||
basicUsedTokens() { | ||
if ([ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0301', | ||
@@ -114,3 +209,3 @@ 'gpt-3.5-turbo-0613', | ||
.mul(this.gpt3_5_turboCompletionTokenUnit); | ||
price = promptUSD.add(completionUSD).toNumber(); | ||
return promptUSD.add(completionUSD).toNumber(); | ||
} | ||
@@ -125,3 +220,3 @@ if ([ | ||
.mul(this.gpt3_5_turbo_16kCompletionTokenUnit); | ||
price = promptUSD.add(completionUSD).toNumber(); | ||
return promptUSD.add(completionUSD).toNumber(); | ||
} | ||
@@ -135,3 +230,3 @@ if ([ | ||
.mul(this.gpt3_5_turbo_1106CompletionTokenUnit); | ||
price = promptUSD.add(completionUSD).toNumber(); | ||
return promptUSD.add(completionUSD).toNumber(); | ||
} | ||
@@ -147,3 +242,3 @@ if ([ | ||
.mul(this.gpt4_8kCompletionTokenUnit); | ||
price = promptUSD.add(completionUSD).toNumber(); | ||
return promptUSD.add(completionUSD).toNumber(); | ||
} | ||
@@ -159,3 +254,3 @@ if ([ | ||
.mul(this.gpt4_32kCompletionTokenUnit); | ||
price = promptUSD.add(completionUSD).toNumber(); | ||
return promptUSD.add(completionUSD).toNumber(); | ||
} | ||
@@ -167,9 +262,23 @@ if (this.model === 'gpt-4-1106-preview') { | ||
.mul(this.gpt4_turbo_previewCompletionTokenUnit); | ||
price = promptUSD.add(completionUSD).toNumber(); | ||
return promptUSD.add(completionUSD).toNumber(); | ||
} | ||
return price; | ||
throw new Error(`Model ${this.model} is not supported`); | ||
} | ||
// Used Tokens (total) | ||
get usedTokens() { | ||
return this.promptUsedTokens + this.completionUsedTokens; | ||
if (this.training) | ||
return this.training.data | ||
.map(({ messages }) => new GPTTokens({ | ||
model: this.model, | ||
messages, | ||
}).usedTokens + 2) | ||
.reduce((a, b) => a + b, 0) * this.training.epochs; | ||
if (this.tools) | ||
return (0, openai_chat_tokens_1.promptTokensEstimate)({ | ||
messages: this.messages, | ||
functions: this.tools.map(item => item.function), | ||
}); | ||
if (this.messages) | ||
return this.promptUsedTokens + this.completionUsedTokens; | ||
return 0; | ||
} | ||
@@ -208,2 +317,4 @@ // Used Tokens (prompt) | ||
warning(message) { | ||
if (!this.debug) | ||
return; | ||
console.warn('Warning:', message); | ||
@@ -251,2 +362,4 @@ } | ||
for (const [key, value] of Object.entries(message)) { | ||
if (typeof value !== 'string') | ||
continue; | ||
num_tokens += encoding.encode(value).length; | ||
@@ -264,2 +377,3 @@ if (key === 'name') { | ||
} | ||
exports.GPTTokens = GPTTokens; | ||
GPTTokens.supportModels = [ | ||
@@ -280,6 +394,4 @@ 'gpt-3.5-turbo-0301', | ||
]; | ||
exports.GPTTokens = GPTTokens; | ||
function testGPTTokens(openai) { | ||
function testGPTTokens(openai, prompt) { | ||
return __awaiter(this, void 0, void 0, function* () { | ||
const prompt = `How are u`; | ||
const messages = [ | ||
@@ -286,0 +398,0 @@ { role: 'user', content: prompt }, |
207
index.ts
import { encodingForModel, getEncoding, Tiktoken } from 'js-tiktoken' | ||
import Decimal from 'decimal.js' | ||
import OpenAI from 'openai' | ||
import { promptTokensEstimate } from 'openai-chat-tokens' | ||
@@ -52,23 +53,80 @@ let modelEncodingCache: { [key in supportModelType]?: Tiktoken } = {} | ||
constructor (options: { | ||
model: supportModelType | ||
messages: MessageItem [] | ||
model?: supportModelType | ||
fineTuneModel?: string | ||
messages?: GPTTokens['messages'] | ||
training?: GPTTokens['training'] | ||
tools?: GPTTokens['tools'] | ||
debug?: boolean | ||
}) { | ||
const { | ||
model, | ||
fineTuneModel, | ||
messages, | ||
training, | ||
tools, | ||
debug = false, | ||
} = options | ||
if (!GPTTokens.supportModels.includes(model)) throw new Error(`Model ${model} is not supported`) | ||
this.model = model || fineTuneModel?.split(':')[1] as supportModelType | ||
this.debug = debug | ||
this.fineTuneModel = fineTuneModel | ||
this.messages = messages | ||
this.training = training | ||
this.tools = tools | ||
if (model === 'gpt-3.5-turbo') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-3.5-turbo-0613`) | ||
if (model === 'gpt-3.5-turbo-16k') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-3.5-turbo-16k-0613`) | ||
if (model === 'gpt-4') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-4-0613`) | ||
if (model === 'gpt-4-32k') | ||
this.warning(`${model} may update over time. Returning num tokens assuming gpt-4-32k-0613`) | ||
this.checkOptions() | ||
} | ||
this.model = model | ||
this.messages = messages | ||
private checkOptions () { | ||
if (!GPTTokens.supportModels.includes(this.model)) | ||
throw new Error(`Model ${this.model} is not supported`) | ||
if (!this.messages && !this.training && !this.tools) | ||
throw new Error('Must set on of messages | training | function') | ||
if (this.fineTuneModel && !this.fineTuneModel.startsWith('ft:gpt')) | ||
throw new Error(`Fine-tuning is not supported for ${this.fineTuneModel}`) | ||
// https://platform.openai.com/docs/guides/fine-tuning | ||
if (![ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0613', | ||
'gpt-3.5-turbo-1106', | ||
'gpt-4-0613', | ||
].includes(this.model) && this.training) | ||
throw new Error(`Fine-tuning is not supported for model ${this.model}`) | ||
// https://platform.openai.com/docs/guides/function-calling | ||
if (![ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0613', | ||
'gpt-3.5-turbo-1106', | ||
'gpt-4', | ||
'gpt-4-0613', | ||
'gpt-4-1106-preview', | ||
].includes(this.model) && this.tools) | ||
throw new Error(`Function is not supported for model ${this.model}`) | ||
if (this.tools && !this.messages) | ||
throw new Error('Function must set messages') | ||
if (this.model === 'gpt-3.5-turbo') | ||
this.warning(`${this.model} may update over time. Returning num tokens assuming gpt-3.5-turbo-1106`) | ||
if (this.model === 'gpt-4') | ||
this.warning(`${this.model} may update over time. Returning num tokens assuming gpt-4-0613`) | ||
if (this.model === 'gpt-4-32k') | ||
this.warning(`${this.model} may update over time. Returning num tokens assuming gpt-4-32k-0613`) | ||
// old model | ||
if ([ | ||
'gpt-3.5-turbo-0301', | ||
'gpt-3.5-turbo-0613', | ||
'gpt-3.5-turbo-16k', | ||
'gpt-3.5-turbo-16k-0613', | ||
'gpt-4-0314', | ||
'gpt-4-32k-0314', | ||
].includes(this.model)) this.warning(`${this.model} is old model. Please migrating to replacements: https://platform.openai.com/docs/deprecations/`) | ||
} | ||
@@ -92,4 +150,20 @@ | ||
public readonly debug | ||
public readonly model | ||
public readonly messages | ||
public readonly fineTuneModel | ||
public readonly messages?: MessageItem [] | ||
public readonly training?: { | ||
data: { | ||
messages: MessageItem [] | ||
} [] | ||
epochs: number | ||
} | ||
public readonly tools?: { | ||
type: 'function' | ||
function: { | ||
name: string | ||
description?: string | ||
parameters: Record<string, unknown> | ||
} | ||
} [] | ||
@@ -156,8 +230,70 @@ // https://openai.com/pricing/ | ||
// https://openai.com/pricing/ | ||
// Fine-tuning models gpt-3.5-turbo | ||
// Training: $0.008 / 1K tokens | ||
public readonly gpt3_5_turbo_fine_tuneTrainingTokenUnit = new Decimal(0.008).div(1000).toNumber() | ||
// https://openai.com/pricing/ | ||
// Fine-tuning models gpt-3.5-turbo | ||
// Prompt: $0.003 / 1K tokens | ||
public readonly gpt3_5_turbo_fine_tunePromptTokenUnit = new Decimal(0.003).div(1000).toNumber() | ||
// https://openai.com/pricing/ | ||
// Fine-tuning models gpt-3.5-turbo | ||
// Completion: $0.006 / 1K tokens | ||
public readonly gpt3_5_turbo_fine_tuneCompletionTokenUnit = new Decimal(0.006).div(1000).toNumber() | ||
// Used USD | ||
public get usedUSD (): number { | ||
let price = 0 | ||
public get usedUSD () { | ||
if (this.training) return this.trainingUsedUSD() | ||
if (this.tools) return this.functionUsedUSD() | ||
if (this.fineTuneModel) return this.fineTuneUsedUSD() | ||
return this.basicUsedTokens() | ||
} | ||
private trainingUsedUSD () { | ||
return new Decimal(this.usedTokens).mul(this.gpt3_5_turbo_fine_tuneTrainingTokenUnit).toNumber() | ||
} | ||
private functionUsedUSD () { | ||
if ([ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0613', | ||
].includes(this.model)) | ||
return new Decimal(this.usedTokens) | ||
.mul(this.gpt3_5_turboPromptTokenUnit).toNumber() | ||
if ([ | ||
'gpt-3.5-turbo-1106', | ||
].includes(this.model)) | ||
return new Decimal(this.usedTokens) | ||
.mul(this.gpt3_5_turbo_1106PromptTokenUnit).toNumber() | ||
if ([ | ||
'gpt-4', | ||
'gpt-4-0613', | ||
].includes(this.model)) return new Decimal(this.usedTokens) | ||
.mul(this.gpt4_8kPromptTokenUnit).toNumber() | ||
if ([ | ||
'gpt-4-1106-preview', | ||
].includes(this.model)) return new Decimal(this.usedTokens) | ||
.mul(this.gpt4_turbo_previewPromptTokenUnit).toNumber() | ||
throw new Error(`Model ${this.model} is not supported`) | ||
} | ||
private fineTuneUsedUSD () { | ||
const promptUSD = new Decimal(this.promptUsedTokens) | ||
.mul(this.gpt3_5_turbo_fine_tunePromptTokenUnit) | ||
const completionUSD = new Decimal(this.completionUsedTokens) | ||
.mul(this.gpt3_5_turbo_fine_tuneCompletionTokenUnit) | ||
return promptUSD.add(completionUSD).toNumber() | ||
} | ||
private basicUsedTokens () { | ||
if ([ | ||
'gpt-3.5-turbo', | ||
'gpt-3.5-turbo-0301', | ||
@@ -171,3 +307,3 @@ 'gpt-3.5-turbo-0613', | ||
price = promptUSD.add(completionUSD).toNumber() | ||
return promptUSD.add(completionUSD).toNumber() | ||
} | ||
@@ -184,3 +320,3 @@ | ||
price = promptUSD.add(completionUSD).toNumber() | ||
return promptUSD.add(completionUSD).toNumber() | ||
} | ||
@@ -196,3 +332,3 @@ | ||
price = promptUSD.add(completionUSD).toNumber() | ||
return promptUSD.add(completionUSD).toNumber() | ||
} | ||
@@ -210,3 +346,3 @@ | ||
price = promptUSD.add(completionUSD).toNumber() | ||
return promptUSD.add(completionUSD).toNumber() | ||
} | ||
@@ -224,3 +360,3 @@ | ||
price = promptUSD.add(completionUSD).toNumber() | ||
return promptUSD.add(completionUSD).toNumber() | ||
} | ||
@@ -234,11 +370,25 @@ | ||
price = promptUSD.add(completionUSD).toNumber() | ||
return promptUSD.add(completionUSD).toNumber() | ||
} | ||
return price | ||
throw new Error(`Model ${this.model} is not supported`) | ||
} | ||
// Used Tokens (total) | ||
public get usedTokens () { | ||
return this.promptUsedTokens + this.completionUsedTokens | ||
public get usedTokens (): number { | ||
if (this.training) return this.training.data | ||
.map(({ messages }) => new GPTTokens({ | ||
model: this.model, | ||
messages, | ||
}).usedTokens + 2) | ||
.reduce((a, b) => a + b, 0) * this.training.epochs | ||
if (this.tools) return promptTokensEstimate({ | ||
messages : this.messages!, | ||
functions: this.tools.map(item => item.function), | ||
}) | ||
if (this.messages) return this.promptUsedTokens + this.completionUsedTokens | ||
return 0 | ||
} | ||
@@ -286,2 +436,3 @@ | ||
private warning (message: string) { | ||
if (!this.debug) return | ||
console.warn('Warning:', message) | ||
@@ -335,2 +486,4 @@ } | ||
for (const [key, value] of Object.entries(message)) { | ||
if (typeof value !== 'string') continue | ||
num_tokens += encoding.encode(value as string).length | ||
@@ -351,4 +504,3 @@ if (key === 'name') { | ||
export async function testGPTTokens (openai: OpenAI) { | ||
const prompt = `How are u` | ||
export async function testGPTTokens (openai: OpenAI, prompt: string) { | ||
const messages: MessageItem [] = [ | ||
@@ -387,2 +539,3 @@ { role: 'user', content: prompt }, | ||
if (ignoreModel) continue | ||
if (!openaiUsage) { | ||
@@ -389,0 +542,0 @@ console.error(`Test ${model} failed (openai return usage is null)`) |
{ | ||
"name": "gpt-tokens", | ||
"version": "1.1.3", | ||
"version": "1.2.0", | ||
"description": "Calculate the token consumption and amount of openai gpt message", | ||
@@ -27,3 +27,4 @@ "keywords": [ | ||
"js-tiktoken": "^1.0.7", | ||
"openai": "^4.6.1" | ||
"openai": "^4.6.1", | ||
"openai-chat-tokens": "^0.2.8" | ||
}, | ||
@@ -30,0 +31,0 @@ "devDependencies": { |
230
README.md
@@ -16,4 +16,36 @@ # gpt-tokens | ||
### Usage | ||
## Support | ||
### Basic Models | ||
* gpt-3.5-turbo | ||
* gpt-3.5-turbo-0301 | ||
* gpt-3.5-turbo-0613 | ||
* gpt-3.5-turbo-1106 | ||
* gpt-3.5-turbo-16k | ||
* gpt-3.5-turbo-16k-0613 | ||
* gpt-4 | ||
* gpt-4-0314 | ||
* gpt-4-0613 | ||
* gpt-4-32k (Not tested) | ||
* gpt-4-32k-0314 (Not tested) | ||
* gpt-4-32k-0613 (Not tested) | ||
* gpt-4-1106-preview | ||
### Fine Tune Models | ||
* ft:gpt-3.5-turbo:xxx | ||
* ft:gpt-3.5-turbo-1106:xxx | ||
* ft:gpt-3.5-turbo-0613:xxx | ||
* ft:gpt-4:xxx (Not tested) | ||
### Others | ||
* Fine tune training (Not rigorously tested) | ||
* Function calling (Not rigorously tested) | ||
## Usage | ||
### Basic chat messages | ||
```typescript | ||
@@ -23,56 +55,93 @@ import { GPTTokens } from 'gpt-tokens' | ||
const usageInfo = new GPTTokens({ | ||
model : 'gpt-3.5-turbo-0613', | ||
model : 'gpt-3.5-turbo-1106', | ||
messages: [ | ||
{ | ||
'role' : 'system', | ||
'content': 'You are a helpful, pattern-following assistant that translates corporate jargon into plain English.', | ||
}, | ||
{ | ||
'role' : 'system', | ||
'name' : 'example_user', | ||
'content': 'New synergies will help drive top-line growth.', | ||
}, | ||
{ | ||
'role' : 'system', | ||
'name' : 'example_assistant', | ||
'content': 'Things working well together will increase revenue.', | ||
}, | ||
{ | ||
'role' : 'system', | ||
'name' : 'example_user', | ||
'content': 'Let\'s circle back when we have more bandwidth to touch base on opportunities for increased leverage.', | ||
}, | ||
{ | ||
'role' : 'system', | ||
'name' : 'example_assistant', | ||
'content': 'Let\'s talk later when we\'re less busy about how to do better.', | ||
}, | ||
{ | ||
'role' : 'user', | ||
'content': 'This late pivot means we don\'t have time to boil the ocean for the client deliverable.', | ||
}, | ||
{ | ||
'role' : 'assistant', | ||
'content': 'This last-minute change means we don\'t have enough time to complete the entire project for the client.', | ||
}, | ||
{ 'role' :'system', 'content': 'You are a helpful, pattern-following assistant that translates corporate jargon into plain English.' }, | ||
{ 'role' :'user', 'content': 'This late pivot means we don\'t have time to boil the ocean for the client deliverable.' }, | ||
] | ||
}) | ||
// ┌───────────────────┬────────┐ | ||
// │ (index) │ Values │ | ||
// ├───────────────────┼────────┤ | ||
// │ Tokens prompt │ 129 │ | ||
// │ Tokens completion │ 20 │ | ||
// │ Tokens total │ 149 │ | ||
// └───────────────────┴────────┘ | ||
console.table({ | ||
'Tokens prompt' : usageInfo.promptUsedTokens, | ||
'Tokens completion': usageInfo.completionUsedTokens, | ||
'Tokens total' : usageInfo.usedTokens, | ||
console.info('Used tokens: ', usageInfo.usedTokens) | ||
console.info('Used USD: ', usageInfo.usedUSD) | ||
``` | ||
### Fine tune training | ||
```typescript | ||
import { GPTTokens } from 'gpt-tokens' | ||
const usageInfo = new GPTTokens({ | ||
model : 'gpt-3.5-turbo-1106', | ||
training: { | ||
data : fs | ||
.readFileSync(filepath, 'utf-8') | ||
.split('\n') | ||
.filter(Boolean) | ||
.map(row => JSON.parse(row)), | ||
epochs: 7, | ||
}, | ||
}) | ||
// Price USD: 0.000298 | ||
console.log('Price USD: ', usageInfo.usedUSD) | ||
console.info('Used tokens: ', usageInfo.usedTokens) | ||
console.info('Used USD: ', usageInfo.usedUSD) | ||
``` | ||
### Fine tune chat messages | ||
```typescript | ||
import { GPTTokens } from 'gpt-tokens' | ||
const usageInfo = new GPTTokens({ | ||
fineTuneModel: 'ft:gpt-3.5-turbo-1106:opensftp::8IWeqPit', | ||
messages : [ | ||
{ role: 'system', content: 'You are a helpful assistant.' }, | ||
], | ||
}) | ||
console.info('Used tokens: ', usageInfo.usedTokens) | ||
console.info('Used USD: ', usageInfo.usedUSD) | ||
``` | ||
### Function calling | ||
```typescript | ||
import { GPTTokens } from 'gpt-tokens' | ||
const usageInfo = new GPTTokens({ | ||
model : 'gpt-3.5-turbo-1106', | ||
messages: [ | ||
{ role: 'user', content: 'What\'s the weather like in San Francisco and Paris?' }, | ||
], | ||
tools : [ | ||
{ | ||
type : 'function', | ||
function: { | ||
name : 'get_current_weather', | ||
description: 'Get the current weather in a given location', | ||
parameters : { | ||
type : 'object', | ||
properties: { | ||
location: { | ||
type : 'string', | ||
description: 'The city and state, e.g. San Francisco, CA', | ||
}, | ||
unit : { | ||
type: 'string', | ||
enum: ['celsius', 'fahrenheit'], | ||
}, | ||
}, | ||
required : ['location'], | ||
}, | ||
}, | ||
}, | ||
] | ||
}) | ||
console.info('Used tokens: ', usageInfo.usedTokens) | ||
console.info('Used USD: ', usageInfo.usedUSD) | ||
``` | ||
## Calculation method | ||
### Basic chat messages | ||
> Tokens calculation rules for prompt and completion: | ||
@@ -88,66 +157,12 @@ > | ||
## Support Models | ||
### Function calling | ||
* gpt-3.5-turbo | ||
* gpt-3.5-turbo-0301 | ||
* gpt-3.5-turbo-0613 | ||
* gpt-3.5-turbo-1106 | ||
* gpt-3.5-turbo-16k | ||
* gpt-3.5-turbo-16k-0613 | ||
* gpt-4 | ||
* gpt-4-0314 | ||
* gpt-4-0613 | ||
* gpt-4-32k | ||
* gpt-4-32k-0314 | ||
* gpt-4-32k-0613 | ||
* gpt-4-1106-preview | ||
Thanks for hmarr | ||
Test in your project | ||
https://hmarr.com/blog/counting-openai-tokens/ | ||
## Test in your project | ||
```bash | ||
node test.js yourAPIKey | ||
# Testing GPT... | ||
# [1/13]: Testing gpt-3.5-turbo-0301... | ||
# Pass! | ||
# [2/13]: Testing gpt-3.5-turbo... | ||
# Warning: gpt-3.5-turbo may update over time. Returning num tokens assuming gpt-3.5-turbo-0613 | ||
# Pass! | ||
# [3/13]: Testing gpt-3.5-turbo-0613... | ||
# Pass! | ||
# [4/13]: Testing gpt-3.5-turbo-16k... | ||
# Warning: gpt-3.5-turbo-16k may update over time. Returning num tokens assuming gpt-3.5-turbo-16k-0613 | ||
# Pass! | ||
# [5/13]: Testing gpt-3.5-turbo-16k-0613... | ||
# Pass! | ||
# [6/13]: Testing gpt-3.5-turbo-1106... | ||
# Pass! | ||
# [7/13]: Testing gpt-4... | ||
# Warning: gpt-4 may update over time. Returning num tokens assuming gpt-4-0613 | ||
# Pass! | ||
# [8/13]: Testing gpt-4-0314... | ||
# Pass! | ||
# [9/13]: Testing gpt-4-0613... | ||
# Pass! | ||
# [10/13]: Testing gpt-4-32k... | ||
# Ignore model gpt-4-32k: 404 The model `gpt-4-32k` does not exist or you do not have access to it. Learn more: https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4. | ||
# Warning: gpt-4-32k may update over time. Returning num tokens assuming gpt-4-32k-0613 | ||
# [11/13]: Testing gpt-4-32k-0314... | ||
# Ignore model gpt-4-32k-0314: 404 The model `gpt-4-32k-0314` does not exist or you do not have access to it. Learn more: https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4. | ||
# [12/13]: Testing gpt-4-32k-0613... | ||
# Ignore model gpt-4-32k-0613: 404 The model `gpt-4-32k-0613` does not exist or you do not have access to it. Learn more: https://help.openai.com/en/articles/7102672-how-can-i-access-gpt-4. | ||
# [13/13]: Testing gpt-4-1106-preview... | ||
# Pass! | ||
# Test success! | ||
# Testing performance... | ||
# GPTTokens: 0.473ms | ||
# GPTTokens: 0.097ms | ||
# GPTTokens: 0.072ms | ||
# GPTTokens: 0.079ms | ||
# GPTTokens: 0.095ms | ||
# GPTTokens: 0.066ms | ||
# GPTTokens: 0.064ms | ||
# GPTTokens: 0.068ms | ||
# GPTTokens: 0.077ms | ||
# GPTTokens: 0.08ms | ||
node test.js yourOpenAIAPIKey | ||
``` | ||
@@ -158,1 +173,2 @@ | ||
- [js-tiktoken](https://github.com/dqbd/tiktoken) | ||
- [openai-chat-tokens](https://github.com/hmarr/openai-chat-tokens#readme) |
334
test.js
@@ -0,29 +1,53 @@ | ||
const fs = require('fs') | ||
const OpenAI = require('openai') | ||
const { GPTTokens, testGPTTokens } = require('./index'); | ||
const { GPTTokens, testGPTTokens } = require('./index') | ||
(async () => { | ||
const [apiKey = process.env.OPENAI_API_KEY] = process.argv.slice(2) | ||
const [apiKey = process.env.OPENAI_API_KEY] = process.argv.slice(2) | ||
if (!apiKey) { | ||
console.error('No API key provided. Ignoring test.') | ||
process.exit(0) | ||
} | ||
if (!apiKey) { | ||
console.error('No API key provided. Ignoring test.') | ||
process.exit(0) | ||
} | ||
const openai = new OpenAI({ apiKey }) | ||
const openai = new OpenAI({ apiKey }) | ||
async function testBasic(prompt) { | ||
console.info('Testing GPT...') | ||
await testGPTTokens(openai) | ||
await testGPTTokens(openai, prompt) | ||
} | ||
function testTraining(filepath) { | ||
console.info('Testing Create a fine-tuned model...') | ||
const openaiUsedTokens = 4445 | ||
const gptTokens = new GPTTokens({ | ||
model : 'gpt-3.5-turbo-1106', | ||
training: { | ||
data : fs | ||
.readFileSync(filepath, 'utf-8') | ||
.split('\n') | ||
.filter(Boolean) | ||
.map(row => JSON.parse(row)), | ||
epochs: 7, | ||
}, | ||
}) | ||
if (gptTokens.usedTokens !== openaiUsedTokens) throw new Error(`Test training usedTokens failed (openai: ${openaiUsedTokens}/ gpt-tokens: ${gptTokens.usedTokens})`) | ||
console.info('Pass!') | ||
} | ||
function testPerformance(messages) { | ||
console.info('Testing performance...') | ||
console.info('Messages:', JSON.stringify(messages)) | ||
for (let i = 0; i < 10; i++) { | ||
console.time('GPTTokens') | ||
const usageInfo = new GPTTokens({ | ||
plus : false, | ||
model : 'gpt-3.5-turbo-0613', | ||
messages: [ | ||
{ | ||
role : 'user', | ||
content: 'Hello world', | ||
}, | ||
], | ||
plus : false, | ||
model: 'gpt-3.5-turbo-0613', | ||
messages, | ||
}) | ||
@@ -35,4 +59,278 @@ | ||
usageInfo.usedUSD | ||
console.timeEnd('GPTTokens') | ||
} | ||
})() | ||
} | ||
async function testFunctionCalling() { | ||
console.info('Testing function calling...') | ||
await Promise.all([ | ||
functionCalling1(), | ||
functionCalling2(), | ||
]) | ||
console.info('Pass!') | ||
async function functionCalling1() { | ||
// https://platform.openai.com/docs/guides/function-calling | ||
// Example dummy function hard coded to return the same weather | ||
// In production, this could be your backend API or an external API | ||
function getCurrentWeather(location, unit = 'fahrenheit') { | ||
if (location.toLowerCase().includes('tokyo')) { | ||
return JSON.stringify({ location: 'Tokyo', temperature: '10', unit: 'celsius' }) | ||
} else if (location.toLowerCase().includes('san francisco')) { | ||
return JSON.stringify({ location: 'San Francisco', temperature: '72', unit: 'fahrenheit' }) | ||
} else if (location.toLowerCase().includes('paris')) { | ||
return JSON.stringify({ location: 'Paris', temperature: '22', unit: 'fahrenheit' }) | ||
} else { | ||
return JSON.stringify({ location, temperature: 'unknown' }) | ||
} | ||
} | ||
async function runConversation() { | ||
// Step 1: send the conversation and available functions to the model | ||
const model = 'gpt-3.5-turbo-1106' | ||
const messages = [ | ||
{ role: 'user', content: 'What\'s the weather like in San Francisco and Paris?' }, | ||
] | ||
const tools = [ | ||
{ | ||
type : 'function', | ||
function: { | ||
name : 'get_current_weather', | ||
description: 'Get the current weather in a given location', | ||
parameters : { | ||
type : 'object', | ||
properties: { | ||
location: { | ||
type : 'string', | ||
description: 'The city and state, e.g. San Francisco, CA', | ||
}, | ||
unit : { | ||
type: 'string', | ||
enum: ['celsius', 'fahrenheit'], | ||
}, | ||
}, | ||
required : ['location'], | ||
}, | ||
}, | ||
}, | ||
] | ||
const response = await openai.chat.completions.create({ | ||
model, | ||
messages, | ||
tools, | ||
tool_choice: 'auto', // auto is default, but we'll be explicit | ||
}) | ||
const { usage: openaiUsage } = response | ||
const gptTokens = new GPTTokens({ | ||
model, | ||
messages, | ||
tools, | ||
}) | ||
if (gptTokens.usedTokens !== openaiUsage.prompt_tokens) | ||
throw new Error(`Test function calling promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`) | ||
const responseMessage = response.choices[0].message | ||
// Step 2: check if the model wanted to call a function | ||
const toolCalls = responseMessage.tool_calls | ||
if (responseMessage.tool_calls) { | ||
// Step 3: call the function | ||
// Note: the JSON response may not always be valid; be sure to handle errors | ||
const availableFunctions = { | ||
get_current_weather: getCurrentWeather, | ||
} // only one function in this example, but you can have multiple | ||
messages.push(responseMessage) // extend conversation with assistant's reply | ||
for (const toolCall of toolCalls) { | ||
const functionName = toolCall.function.name | ||
const functionToCall = availableFunctions[functionName] | ||
const functionArgs = JSON.parse(toolCall.function.arguments) | ||
const functionResponse = functionToCall( | ||
functionArgs.location, | ||
functionArgs.unit, | ||
) | ||
messages.push({ | ||
tool_call_id: toolCall.id, | ||
role : 'tool', | ||
name : functionName, | ||
content : functionResponse, | ||
}) // extend conversation with function response | ||
} | ||
const secondResponse = await openai.chat.completions.create({ | ||
model : 'gpt-3.5-turbo-1106', | ||
messages: messages, | ||
}) // get a new response from the model where it can see the function response | ||
return secondResponse.choices | ||
} | ||
} | ||
await runConversation() | ||
} | ||
async function functionCalling2() { | ||
// https://platform.openai.com/docs/guides/function-calling | ||
// Example dummy function hard coded to return the same weather | ||
// In production, this could be your backend API or an external API | ||
function getProductPrice(store, product) { | ||
return JSON.stringify({ | ||
store, | ||
product, | ||
price: (Math.random() * 1000).toFixed(0), | ||
unit : '$', | ||
}) | ||
} | ||
async function runConversation() { | ||
// Step 1: send the conversation and available functions to the model | ||
const model = 'gpt-3.5-turbo-1106' | ||
const messages = [ | ||
{ role: 'user', content: 'ps5 price in all stores' }, | ||
] | ||
const tools = [ | ||
{ | ||
type : 'function', | ||
function: { | ||
name : 'get_product_price', | ||
description: 'Get the price of an item in a specified store', | ||
parameters : { | ||
type : 'object', | ||
properties: { | ||
store : { | ||
type : 'string', | ||
description: 'The store name', | ||
enum : ['Amazon', 'Ebay', 'TaoBao'], | ||
}, | ||
product: { | ||
type : 'string', | ||
description: 'The product name e.g. MacbookPro', | ||
}, | ||
}, | ||
required : ['product'], | ||
}, | ||
}, | ||
}, | ||
] | ||
const response = await openai.chat.completions.create({ | ||
model, | ||
messages, | ||
tools, | ||
tool_choice: 'auto', // auto is default, but we'll be explicit | ||
}) | ||
const { usage: openaiUsage } = response | ||
const gptTokens = new GPTTokens({ | ||
model, | ||
messages, | ||
tools, | ||
}) | ||
if (gptTokens.usedTokens !== openaiUsage.prompt_tokens) | ||
throw new Error(`Test function calling promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`) | ||
const responseMessage = response.choices[0].message | ||
// Step 2: check if the model wanted to call a function | ||
const toolCalls = responseMessage.tool_calls | ||
if (responseMessage.tool_calls) { | ||
// Step 3: call the function | ||
// Note: the JSON response may not always be valid; be sure to handle errors | ||
const availableFunctions = { | ||
get_product_price: getProductPrice, | ||
} // only one function in this example, but you can have multiple | ||
messages.push(responseMessage) // extend conversation with assistant's reply | ||
for (const toolCall of toolCalls) { | ||
const functionName = toolCall.function.name | ||
const functionToCall = availableFunctions[functionName] | ||
const functionArgs = JSON.parse(toolCall.function.arguments) | ||
const functionResponse = functionToCall( | ||
functionArgs.store, | ||
functionArgs.product, | ||
functionArgs.unit, | ||
) | ||
messages.push({ | ||
tool_call_id: toolCall.id, | ||
role : 'tool', | ||
name : functionName, | ||
content : functionResponse, | ||
}) // extend conversation with function response | ||
} | ||
const secondResponse = await openai.chat.completions.create({ | ||
model : 'gpt-3.5-turbo-1106', | ||
messages: messages, | ||
}) // get a new response from the model where it can see the function response | ||
return secondResponse.choices | ||
} | ||
} | ||
await runConversation() | ||
} | ||
} | ||
async function testFineTune() { | ||
console.info('Testing fine-tune...') | ||
const model = 'ft:gpt-3.5-turbo-1106:opensftp::8IWeqPit' | ||
const messages = [{ role: 'system', content: 'You are a helpful assistant.' }] | ||
const completion = await openai.chat.completions.create({ | ||
messages, | ||
model, | ||
}) | ||
const { usage: openaiUsage } = completion | ||
const gptTokens = new GPTTokens({ | ||
fineTuneModel: model, | ||
messages, | ||
}) | ||
if (gptTokens.usedTokens !== openaiUsage.prompt_tokens) | ||
throw new Error(`Test fine-tune promptUsedTokens failed (openai: ${openaiUsage.prompt_tokens}/ gpt-tokens: ${gptTokens.usedTokens})`) | ||
console.info('Pass!') | ||
} | ||
async function start() { | ||
await testBasic('How are u') | ||
await testFunctionCalling() | ||
await testFineTune() | ||
testTraining('./fine-tuning-data.jsonl') | ||
testPerformance([ | ||
{ | ||
role : 'user', | ||
content: 'Hello world', | ||
}, | ||
]) | ||
} | ||
start().then() |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
59609
1234
171
4
3
+ Addedopenai-chat-tokens@^0.2.8
+ Addedopenai-chat-tokens@0.2.8(transitive)