openai-streams
Advanced tools
Comparing version 3.1.2 to 4.0.0
@@ -1,1 +0,1 @@ | ||
import{streamArray as n}from"yield-stream";import{ENCODER as p}from"../../globs/shared.js";import{EventStream as h,TokenStream as w}from"../streaming/index.js";const k=async(s,d,{mode:o="tokens",apiKey:a=process.env.OPENAI_API_KEY}={})=>{if(!a)throw new Error("No API key provided. Please set the OPENAI_API_KEY environment variable or pass the { apiKey } option.");const i=s==="completions",t=await fetch(`https://api.openai.com/v1/${s}`,{method:"POST",body:JSON.stringify({...d,stream:i?!0:void 0}),headers:{Authorization:`Bearer ${a}`,"Content-Type":"application/json",Accept:"application/json"}});if(!t.body)throw new Error("No response body");let e;if(i)switch(o){case"tokens":e=w(t.body);break;case"raw":e=h(t.body);break;default:throw new Error(`Invalid mode: ${o}`)}else{const r=await t.text();switch(o){case"tokens":const m=JSON.parse(r),{text:c}=m.choices?.[0]??{};if(typeof c!="string"){console.error("No text choices received from OpenAI: "+r),e=n([]);break}const l=p.encode(c);e=n([l]);break;case"raw":const f=p.encode(r);e=n([f]);break;default:throw new Error(`Invalid mode: ${o}`)}}return e};export{k as OpenAI}; | ||
import{streamArray as a}from"yield-stream";import{ENCODER as p}from"../../globs/shared.js";import{ChatStream as b,EventStream as w,getTokensFromResponse as A,TokenStream as k}from"../streaming/index.js";import{OpenAIAPIEndpoints as E}from"../types.js";const S=async(o,d,{mode:r="tokens",apiKey:s=process.env.OPENAI_API_KEY}={})=>{if(!s)throw new Error("No API key provided. Please set the OPENAI_API_KEY environment variable or pass the { apiKey } option.");const c=o==="completions"||o==="chat",m=E[o],t=await fetch(`https://api.openai.com/v1/${m}`,{method:"POST",body:JSON.stringify({...d,stream:c?!0:void 0}),headers:{Authorization:`Bearer ${s}`,"Content-Type":"application/json",Accept:"application/json"}});if(!t.body)throw new Error("No response body");let e;if(c)switch(r){case"raw":e=w(t.body);break;case"tokens":switch(o){case"chat":e=b(t.body);break;default:e=k(t.body);break}break;default:throw new Error(`Invalid mode: ${r}`)}else{const n=await t.text();switch(r){case"tokens":const h=JSON.parse(n),i=A(h);if(typeof i!="string"){console.error("No text choices received from OpenAI: "+n),e=a([]);break}const l=p.encode(i);e=a([l]);break;case"raw":const f=p.encode(n);e=a([f]);break;default:throw new Error(`Invalid mode: ${r}`)}}return e};export{S as OpenAI}; |
/** | ||
* @fileoverview | ||
* | ||
* These are pinned types literally copy-pasted from the OpenAI library in the | ||
* crudest way possible, as it is more stable to manually version the types | ||
* internally than try to re-export them. | ||
*/ | ||
/** | ||
* OpenAI API APIs for sampling from and fine-tuning language models | ||
@@ -14,2 +21,205 @@ * | ||
* @export | ||
* @interface CreateChatCompletionRequest | ||
*/ | ||
export interface CreateChatCompletionRequest { | ||
/** | ||
* ID of the model to use. Currently, only `gpt-3.5-turbo` and `gpt-3.5-turbo-0301` are supported. | ||
* @type {string} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'model': string; | ||
/** | ||
* The messages to generate chat completions for, in the [chat format](/docs/guides/chat/introduction). | ||
* @type {Array<ChatCompletionRequestMessage>} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'messages': Array<ChatCompletionRequestMessage>; | ||
/** | ||
* What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the output more random, while lower values like 0.2 will make it more focused and deterministic. We generally recommend altering this or `top_p` but not both. | ||
* @type {number} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'temperature'?: number | null; | ||
/** | ||
* An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. We generally recommend altering this or `temperature` but not both. | ||
* @type {number} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'top_p'?: number | null; | ||
/** | ||
* How many chat completion choices to generate for each input message. | ||
* @type {number} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'n'?: number | null; | ||
/** | ||
* If set, partial message deltas will be sent, like in ChatGPT. Tokens will be sent as data-only [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) as they become available, with the stream terminated by a `data: [DONE]` message. | ||
* @type {boolean} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'stream'?: boolean | null; | ||
/** | ||
* | ||
* @type {CreateChatCompletionRequestStop} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'stop'?: CreateChatCompletionRequestStop; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the text so far, increasing the model\'s likelihood to talk about new topics. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
* @type {number} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'presence_penalty'?: number | null; | ||
/** | ||
* Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in the text so far, decreasing the model\'s likelihood to repeat the same line verbatim. [See more information about frequency and presence penalties.](/docs/api-reference/parameter-details) | ||
* @type {number} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'frequency_penalty'?: number | null; | ||
/** | ||
* Modify the likelihood of specified tokens appearing in the completion. Accepts a json object that maps tokens (specified by their token ID in the tokenizer) to an associated bias value from -100 to 100. Mathematically, the bias is added to the logits generated by the model prior to sampling. The exact effect will vary per model, but values between -1 and 1 should decrease or increase likelihood of selection; values like -100 or 100 should result in a ban or exclusive selection of the relevant token. | ||
* @type {object} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'logit_bias'?: object | null; | ||
/** | ||
* A unique identifier representing your end-user, which can help OpenAI to monitor and detect abuse. [Learn more](/docs/guides/safety-best-practices/end-user-ids). | ||
* @type {string} | ||
* @memberof CreateChatCompletionRequest | ||
*/ | ||
'user'?: string; | ||
} | ||
/** | ||
* @type CreateChatCompletionRequestStop | ||
* Up to 4 sequences where the API will stop generating further tokens. | ||
* @export | ||
*/ | ||
export type CreateChatCompletionRequestStop = Array<string> | string; | ||
/** | ||
* | ||
* @export | ||
* @interface CreateChatCompletionResponse | ||
*/ | ||
export interface CreateChatCompletionResponse { | ||
/** | ||
* | ||
* @type {string} | ||
* @memberof CreateChatCompletionResponse | ||
*/ | ||
'id': string; | ||
/** | ||
* | ||
* @type {string} | ||
* @memberof CreateChatCompletionResponse | ||
*/ | ||
'object': string; | ||
/** | ||
* | ||
* @type {number} | ||
* @memberof CreateChatCompletionResponse | ||
*/ | ||
'created': number; | ||
/** | ||
* | ||
* @type {string} | ||
* @memberof CreateChatCompletionResponse | ||
*/ | ||
'model': string; | ||
/** | ||
* | ||
* @type {Array<CreateChatCompletionResponseChoicesInner>} | ||
* @memberof CreateChatCompletionResponse | ||
*/ | ||
'choices': Array<CreateChatCompletionResponseChoicesInner>; | ||
/** | ||
* | ||
* @type {CreateCompletionResponseUsage} | ||
* @memberof CreateChatCompletionResponse | ||
*/ | ||
'usage'?: CreateCompletionResponseUsage; | ||
} | ||
/** | ||
* | ||
* @export | ||
* @interface CreateChatCompletionResponseChoicesInner | ||
*/ | ||
export interface CreateChatCompletionResponseChoicesInner { | ||
/** | ||
* | ||
* @type {number} | ||
* @memberof CreateChatCompletionResponseChoicesInner | ||
*/ | ||
'index'?: number; | ||
/** | ||
* | ||
* @type {ChatCompletionResponseMessage} | ||
* @memberof CreateChatCompletionResponseChoicesInner | ||
*/ | ||
'message'?: ChatCompletionResponseMessage; | ||
/** | ||
* | ||
* @type {string} | ||
* @memberof CreateChatCompletionResponseChoicesInner | ||
*/ | ||
'finish_reason'?: string; | ||
} | ||
/** | ||
* | ||
* @export | ||
* @interface ChatCompletionRequestMessage | ||
*/ | ||
export interface ChatCompletionRequestMessage { | ||
/** | ||
* The role of the author of this message. | ||
* @type {string} | ||
* @memberof ChatCompletionRequestMessage | ||
*/ | ||
'role': ChatCompletionRequestMessageRoleEnum; | ||
/** | ||
* The contents of the message | ||
* @type {string} | ||
* @memberof ChatCompletionRequestMessage | ||
*/ | ||
'content': string; | ||
/** | ||
* The name of the user in a multi-user chat | ||
* @type {string} | ||
* @memberof ChatCompletionRequestMessage | ||
*/ | ||
'name'?: string; | ||
} | ||
export declare const ChatCompletionRequestMessageRoleEnum: { | ||
readonly System: "system"; | ||
readonly User: "user"; | ||
readonly Assistant: "assistant"; | ||
}; | ||
export type ChatCompletionRequestMessageRoleEnum = typeof ChatCompletionRequestMessageRoleEnum[keyof typeof ChatCompletionRequestMessageRoleEnum]; | ||
/** | ||
* | ||
* @export | ||
* @interface ChatCompletionResponseMessage | ||
*/ | ||
export interface ChatCompletionResponseMessage { | ||
/** | ||
* The role of the author of this message. | ||
* @type {string} | ||
* @memberof ChatCompletionResponseMessage | ||
*/ | ||
'role': ChatCompletionResponseMessageRoleEnum; | ||
/** | ||
* The contents of the message | ||
* @type {string} | ||
* @memberof ChatCompletionResponseMessage | ||
*/ | ||
'content': string; | ||
} | ||
export declare const ChatCompletionResponseMessageRoleEnum: { | ||
readonly System: "system"; | ||
readonly User: "user"; | ||
readonly Assistant: "assistant"; | ||
}; | ||
export type ChatCompletionResponseMessageRoleEnum = typeof ChatCompletionResponseMessageRoleEnum[keyof typeof ChatCompletionResponseMessageRoleEnum]; | ||
/** | ||
* | ||
* @export | ||
* @interface CreateAnswerRequest | ||
@@ -16,0 +226,0 @@ */ |
@@ -13,1 +13,5 @@ export type OpenAIStream = (stream: ReadableStream<Uint8Array>) => ReadableStream<Uint8Array>; | ||
export declare const TokenStream: OpenAIStream; | ||
/** | ||
* A `ReadableStream` of parsed deltas from the given ChatGPT stream. | ||
*/ | ||
export declare const ChatStream: OpenAIStream; |
@@ -1,1 +0,1 @@ | ||
import{ENCODER as c,DECODER as m}from"../../globs/shared.js";import{TokenParser as p}from"./transforms.js";import{createParser as f}from"eventsource-parser";import{pipeline as S,yieldStream as d}from"yield-stream";import{OpenAIError as O}from"../errors.js";const h=t=>new ReadableStream({async start(a){const n=f(e=>{if(e.type==="event"){const{data:o}=e;if(o==="[DONE]"){a.close();return}try{a.enqueue(c.encode(o));const r=JSON.parse(o);if(r?.choices){const{choices:i}=r;for(const s of i)if(s?.finish_reason==="length")throw new O("MAX_TOKENS")}}catch(r){a.error(r)}}});for await(const e of d(t))n.feed(m.decode(e))}}),w=t=>S(h(t),p);export{h as EventStream,w as TokenStream}; | ||
import{ENCODER as p,DECODER as f}from"../../globs/shared.js";import{ChatParser as S,TokenParser as d}from"./transforms.js";import{createParser as h}from"eventsource-parser";import{pipeline as n,yieldStream as O}from"yield-stream";import{OpenAIError as A}from"../errors.js";const s=e=>new ReadableStream({async start(a){const m=h(r=>{if(r.type==="event"){const{data:o}=r;if(o==="[DONE]"){a.close();return}try{a.enqueue(p.encode(o));const t=JSON.parse(o);if(t?.choices){const{choices:c}=t;for(const i of c)if(i?.finish_reason==="length")throw new A("MAX_TOKENS")}}catch(t){a.error(t)}}});for await(const r of O(e))m.feed(f.decode(r))}}),w=e=>n(s(e),d),x=e=>n(s(e),S);export{x as ChatStream,s as EventStream,w as TokenStream}; |
import { Transform } from "yield-stream"; | ||
/** | ||
* Parse the first text choice from an OpenAI API response. It may be stored on | ||
* result.choices[0].text or now, result.choices[0].message for Chat. | ||
*/ | ||
export declare const getTokensFromResponse: (response: any) => string; | ||
/** | ||
* A transformer that receives chunks of parsed server sent events from OpenAI | ||
* and yields the delta of the first choice. | ||
*/ | ||
export declare const ChatParser: Transform; | ||
/** | ||
* A transformer that receives chunks of parsed server sent events from OpenAI | ||
* and yields the text of the first choice. | ||
@@ -5,0 +15,0 @@ */ |
@@ -1,1 +0,1 @@ | ||
import{ENCODER as t,DECODER as d}from"../../globs/shared.js";const f=async function*(o){const n=d.decode(o),s=JSON.parse(n),{text:e}=s?.choices?.[0];!e||(yield t.encode(e))},m=async function*(o){const n=new TextEncoder,e=new TextDecoder().decode(o),c=JSON.parse(e),{logprobs:r}=c?.choices?.[0];!r||(yield n.encode(JSON.stringify(r)))};export{m as LogprobsParser,f as TokenParser}; | ||
import{ENCODER as c,DECODER as i}from"../../globs/shared.js";const d=o=>{const e=o?.choices?.[0];if(!e)throw new Error("No choices received from OpenAI");const s=e?.text??e?.message;if(typeof s!="string")throw new Error("No text response received from OpenAI");return s},m=async function*(o){const e=i.decode(o),n=JSON.parse(e)?.choices?.[0]?.delta;if(typeof n!="object")throw new Error("Received invalid delta from OpenAI in ChatParser.");const t=JSON.stringify(n);yield c.encode(t)},E=async function*(o){const e=i.decode(o),s=JSON.parse(e),r=d(s);yield c.encode(r)},O=async function*(o){const e=new TextEncoder,r=new TextDecoder().decode(o),n=JSON.parse(r),{logprobs:t}=n?.choices?.[0];!t||(yield e.encode(JSON.stringify(t)))};export{m as ChatParser,O as LogprobsParser,E as TokenParser,d as getTokensFromResponse}; |
/// <reference types="node" /> | ||
import type { CreateCompletionRequest, CreateEditRequest, CreateEmbeddingRequest, CreateFineTuneRequest, CreateImageRequest } from "./pinned"; | ||
import type { CreateChatCompletionRequest, CreateCompletionRequest, CreateEditRequest, CreateEmbeddingRequest, CreateFineTuneRequest, CreateImageRequest } from "./pinned"; | ||
export type StreamMode = "raw" | "tokens"; | ||
export type OpenAIAPIEndpoint = "completions" | "edits" | "embeddings" | "images" | "fine-tunes"; | ||
export type OpenAICreateArgs<T extends OpenAIAPIEndpoint> = T extends "completions" ? Omit<CreateCompletionRequest, "stream"> : T extends "edits" ? CreateEditRequest : T extends "embeddings" ? CreateEmbeddingRequest : T extends "images" ? CreateImageRequest : T extends "fine-tunes" ? CreateFineTuneRequest : never; | ||
export declare const OpenAIAPIEndpoints: { | ||
readonly chat: "chat/completions"; | ||
readonly completions: "completions"; | ||
readonly edits: "edits"; | ||
readonly embeddings: "embeddings"; | ||
readonly images: "images"; | ||
readonly "fine-tunes": "fine-tunes"; | ||
}; | ||
export type OpenAIAPIEndpoint = keyof typeof OpenAIAPIEndpoints; | ||
export type OpenAICreateArgs<T extends OpenAIAPIEndpoint> = T extends "completions" ? Omit<CreateCompletionRequest, "stream"> : T extends "edits" ? CreateEditRequest : T extends "embeddings" ? CreateEmbeddingRequest : T extends "images" ? CreateImageRequest : T extends "fine-tunes" ? CreateFineTuneRequest : T extends "chat" ? CreateChatCompletionRequest : never; | ||
export type OpenAIOptions = { | ||
@@ -7,0 +15,0 @@ /** |
@@ -1,1 +0,1 @@ | ||
export*from"./pinned.js"; | ||
const e={chat:"chat/completions",completions:"completions",edits:"edits",embeddings:"embeddings",images:"images","fine-tunes":"fine-tunes"};export*from"./pinned.js";export{e as OpenAIAPIEndpoints}; |
{ | ||
"name": "openai-streams", | ||
"description": "Tools for working with OpenAI streams in Node.js and TypeScript.", | ||
"version": "3.1.2", | ||
"version": "4.0.0", | ||
"license": "MIT", | ||
@@ -47,2 +47,3 @@ "type": "module", | ||
"eventsource-parser": "^0.1.0", | ||
"openai": "^3.2.0", | ||
"yield-stream": "^2.0.4" | ||
@@ -49,0 +50,0 @@ }, |
@@ -7,2 +7,4 @@ # OpenAI Streams | ||
> Now with ChatGPT API support (Whisper coming soon)! See ()[] | ||
This library returns OpenAI API responses as streams only. Non-stream endpoints | ||
@@ -55,2 +57,5 @@ like `edits` etc. are simply a stream with only one chunk update. | ||
This will also work in the browser, but you'll need users to paste their OpenAI | ||
key and pass it in via the `{ apiKey }` option. | ||
```ts | ||
@@ -101,3 +106,2 @@ import { OpenAI } from "openai-streams"; | ||
<sub>See the example in | ||
@@ -109,2 +113,38 @@ [`example/src/pages/api/hello.ts`](https://github.com/gptlabs/openai-streams/blob/master/src/pages/api/hello.ts).</sub> | ||
#### Use with ChatGPT API | ||
By default, with `mode = "tokens"`, you will receive just the message deltas. | ||
For full events, use `mode = "raw"`. | ||
See: https://platform.openai.com/docs/guides/chat/introduction | ||
```ts | ||
const stream = await OpenAI( | ||
"chat", | ||
{ | ||
model: "gpt-3.5-turbo", | ||
messages: [ | ||
{ "role": "system", "content": "You are a helpful assistant that translates English to French." }, | ||
{ "role": "user", "content": "Translate the following English text to French: \"Hello world!\"" } | ||
], | ||
}, | ||
); | ||
``` | ||
In both modes, for Chat, you will receive a stream of serialized JSON objects. | ||
Even in `mode = "tokens"`, you will need to parse the deltas because they | ||
sometimes indicate a role and sometimes indicate part of the message body. The | ||
stream chunks look like: | ||
``` | ||
{"role":"assistant"} | ||
{"content":"\""} | ||
{"content":"Bonjour"} | ||
{"content":" le"} | ||
{"content":" monde"} | ||
{"content":" !\""} | ||
{} | ||
``` | ||
### Notes | ||
@@ -111,0 +151,0 @@ |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
76453
2104
150
4
+ Addedopenai@^3.2.0
+ Addedasynckit@0.4.0(transitive)
+ Addedaxios@0.26.1(transitive)
+ Addedcombined-stream@1.0.8(transitive)
+ Addeddelayed-stream@1.0.0(transitive)
+ Addedfollow-redirects@1.15.9(transitive)
+ Addedform-data@4.0.1(transitive)
+ Addedmime-db@1.52.0(transitive)
+ Addedmime-types@2.1.35(transitive)
+ Addedopenai@3.3.0(transitive)