Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More
Socket
Sign inDemoInstall
Socket

openai-streams

Package Overview
Dependencies
Maintainers
1
Versions
105
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

openai-streams - npm Package Compare versions

Comparing version 5.2.0 to 5.2.1-canary.0

2

dist/lib/openai/edge.js

@@ -1,1 +0,1 @@

import{streamArray as c}from"yield-stream";import{ENCODER as h}from"../../globs/shared.js";import{OpenAIError as e}from"../errors.js";import{ChatStream as I,EventStream as N,getTokensFromResponse as O,TokenStream as k}from"../streaming/index.js";import{OpenAIAPIEndpoints as b}from"../types.js";const C=async(n,m,{mode:r="tokens",apiKey:i=process.env.OPENAI_API_KEY,controller:d}={})=>{if(!i)throw new e("NO_API_KEY");const p=n==="completions"||n==="chat",f=b[n],o=await fetch(`https://api.openai.com/v1/${f}`,{method:"POST",body:JSON.stringify({...m,stream:p?!0:void 0}),headers:{Authorization:`Bearer ${i}`,"Content-Type":"application/json",Accept:"application/json"},signal:d?.signal});switch(o.status){case 401:throw new e("INVALID_API_KEY");case 404:throw new e("INVALID_MODEL");case 429:throw new e("RATE_LIMIT_REACHED");case 500:throw new e("SERVER_ERROR");default:if(!o.body)throw new e("UNKNOWN")}let t;const s={mode:r};if(p)switch(r){case"raw":t=N(o.body,s);break;case"tokens":switch(n){case"chat":t=I(o.body,s);break;default:t=k(o.body,s);break}break;default:throw console.error(`Unknown mode: ${r} for streaming response.`),new e("UNKNOWN")}else{const a=await o.text();switch(r){case"tokens":const l=JSON.parse(a),w=O(l);if(typeof w!="string"){console.error("No text choices received from OpenAI: "+a),t=c([]);break}const A=h.encode(w);t=c([A]);break;case"raw":const E=h.encode(a);t=c([E]);break;default:throw console.error(`Unknown mode: ${r} for non-streaming response.`),new e("UNKNOWN")}}return t};export{C as OpenAI};
import{streamArray as c}from"yield-stream";import{ENCODER as h}from"../../globs/shared.js";import{OpenAIError as e}from"../errors.js";import{ChatStream as O,EventStream as k,getTokensFromResponse as b,TokenStream as u}from"../streaming/index.js";import{OpenAIAPIEndpoints as R}from"../types.js";const T=async(n,m,{mode:r="tokens",apiBase:d="https://api.openai.com/v1",apiKey:i=process.env.OPENAI_API_KEY,apiHeaders:f={},controller:l}={})=>{if(!i)throw new e("NO_API_KEY");const p=n==="completions"||n==="chat",A=R[n],o=await fetch(`${d}/${A}`,{method:"POST",body:JSON.stringify({...m,stream:p?!0:void 0}),headers:{Authorization:`Bearer ${i}`,"Content-Type":"application/json",Accept:"application/json",...f},signal:l?.signal});switch(o.status){case 401:throw new e("INVALID_API_KEY");case 404:throw new e("INVALID_MODEL");case 429:throw new e("RATE_LIMIT_REACHED");case 500:throw new e("SERVER_ERROR");default:if(!o.body)throw new e("UNKNOWN")}let t;const s={mode:r};if(p)switch(r){case"raw":t=k(o.body,s);break;case"tokens":switch(n){case"chat":t=O(o.body,s);break;default:t=u(o.body,s);break}break;default:throw console.error(`Unknown mode: ${r} for streaming response.`),new e("UNKNOWN")}else{const a=await o.text();switch(r){case"tokens":const E=JSON.parse(a),w=b(E);if(typeof w!="string"){console.error("No text choices received from OpenAI: "+a),t=c([]);break}const I=h.encode(w);t=c([I]);break;case"raw":const N=h.encode(a);t=c([N]);break;default:throw console.error(`Unknown mode: ${r} for non-streaming response.`),new e("UNKNOWN")}}return t};export{T as OpenAI};

@@ -13,5 +13,10 @@ /// <reference types="node" />

export type OpenAIAPIEndpoint = keyof typeof OpenAIAPIEndpoints;
export type OpenAICreateArgs<T extends OpenAIAPIEndpoint> = (T extends "completions" ? Omit<CreateCompletionRequest, "stream"> : T extends "edits" ? CreateEditRequest : T extends "embeddings" ? CreateEmbeddingRequest : T extends "images" ? CreateImageRequest : T extends "fine-tunes" ? CreateFineTuneRequest : T extends "chat" ? CreateChatCompletionRequest : never);
export type OpenAICreateArgs<T extends OpenAIAPIEndpoint> = T extends "completions" ? Omit<CreateCompletionRequest, "stream"> : T extends "edits" ? CreateEditRequest : T extends "embeddings" ? CreateEmbeddingRequest : T extends "images" ? CreateImageRequest : T extends "fine-tunes" ? CreateFineTuneRequest : T extends "chat" ? CreateChatCompletionRequest : never;
export type OpenAIOptions = {
/**
* By default, the API base is https://api.openai.com/v1, corresponding
* to OpenAIs API. You can override this to use a different provider or proxy.
*/
apiBase?: string;
/**
* By default, the API key is read from the OPENAI_API_KEY environment

@@ -22,2 +27,7 @@ * variable. You can override this by passing a different key here.

/**
* Additional headers to pass to the API. This is useful is you want to
* pass additional parameters to a proxy service, for instance.
*/
apiHeaders?: Record<string, string>;
/**
* Whether to return tokens or raw events.

@@ -35,4 +45,4 @@ */

*/
export type OpenAIEdgeClient = (<T extends OpenAIAPIEndpoint>(endpoint: T, args: OpenAICreateArgs<T>, options?: OpenAIOptions) => Promise<ReadableStream<Uint8Array>>);
export type OpenAIEdgeClient = <T extends OpenAIAPIEndpoint>(endpoint: T, args: OpenAICreateArgs<T>, options?: OpenAIOptions) => Promise<ReadableStream<Uint8Array>>;
export type OpenAINodeClient = <T extends OpenAIAPIEndpoint>(endpoint: T, args: OpenAICreateArgs<T>, options?: OpenAIOptions) => Promise<NodeJS.ReadableStream>;
export * from "./pinned";
{
"author": "Spellcraft Inc. <lewis@spellcraft.org>",
"name": "openai-streams",
"description": "Tools for working with OpenAI streams in Node.js and TypeScript.",
"homepage": "https://github.com/SpellcraftAI/openai-streams",
"version": "5.2.0",
"version": "5.2.1-canary.0",
"license": "MIT",

@@ -54,3 +55,4 @@ "type": "module",

"streams",
"gpt-3"
"gpt-3",
"gpt-4"
],

@@ -57,0 +59,0 @@ "release-it": {

@@ -37,5 +37,5 @@ # OpenAI Streams

PARAMS,
/** apiKey, mode, controller, etc */
/** apiBase, apiKey, mode, controller, etc */
OPTIONS
)
);
```

@@ -54,6 +54,8 @@

await OpenAI(
"completions",
{/* endpoint params */},
"completions",
{
/* endpoint params */
},
{ apiKey: process.env.MY_SECRET_API_KEY }
)
);
```

@@ -71,8 +73,12 @@

await OpenAI(
"chat",
{ messages: [/* ... */] },
{ mode: "raw" }
)
"chat",
{
messages: [
/* ... */
],
},
{ mode: "raw" }
);
```
#### Edge/Browser: Consuming streams in Next.js Edge functions

@@ -87,10 +93,7 @@

export default async function handler() {
const stream = await OpenAI(
"completions",
{
model: "text-davinci-003",
prompt: "Write a happy sentence.\n\n",
max_tokens: 100
}
);
const stream = await OpenAI("completions", {
model: "text-davinci-003",
prompt: "Write a happy sentence.\n\n",
max_tokens: 100,
});

@@ -101,7 +104,6 @@ return new Response(stream);

export const config = {
runtime: "edge"
runtime: "edge",
};
```
#### Node: Consuming streams in Next.js API Route (Node)

@@ -116,11 +118,8 @@

export default async function test (_: NextApiRequest, res: NextApiResponse) {
const stream = await OpenAI(
"completions",
{
model: "text-davinci-003",
prompt: "Write a happy sentence.\n\n",
max_tokens: 25
}
);
export default async function test(_: NextApiRequest, res: NextApiResponse) {
const stream = await OpenAI("completions", {
model: "text-davinci-003",
prompt: "Write a happy sentence.\n\n",
max_tokens: 25,
});

@@ -143,15 +142,17 @@ stream.pipe(res);

```ts
const stream = await OpenAI(
"chat",
{
model: "gpt-3.5-turbo",
messages: [
{ "role": "system", "content": "You are a helpful assistant that translates English to French." },
{ "role": "user", "content": "Translate the following English text to French: \"Hello world!\"" }
],
},
);
const stream = await OpenAI("chat", {
model: "gpt-3.5-turbo",
messages: [
{
role: "system",
content: "You are a helpful assistant that translates English to French.",
},
{
role: "user",
content: 'Translate the following English text to French: "Hello world!"',
},
],
});
```
In `tokens` mode, you will just receive the response chunks, which look like this

@@ -177,3 +178,3 @@ (separated with newlines for illustration):

1. Internally, streams are often manipulated using generators via `for await
(const chunk of yieldStream(stream)) { ... }`. We recommend following this
(const chunk of yieldStream(stream)) { ... }`. We recommend following this
pattern if you find it intuitive.
SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with ⚡️ by Socket Inc