llm-prompt-stream
Advanced tools
Comparing version
@@ -1,2 +0,18 @@ | ||
export * from "./utils"; | ||
//# sourceMappingURL=index.d.ts.map | ||
import * as openai_streaming from 'openai/streaming'; | ||
import OpenAI from 'openai'; | ||
declare const streamPrompt: (completion: any) => ReadableStream<any> | undefined; | ||
declare const setUpCompletionForStream: (OPENAI_API_KEY: string, messages?: any) => Promise<openai_streaming.Stream<OpenAI.Chat.Completions.ChatCompletionChunk> & { | ||
_request_id?: string | null; | ||
}>; | ||
declare function parseMarkdownToCompletions(markdown: string): Generator<{ | ||
choices: { | ||
delta: { | ||
content: string; | ||
}; | ||
}[]; | ||
}, void, unknown>; | ||
declare function readStream(stream: ReadableStream, createFile?: boolean, outputFilename?: string): Promise<string>; | ||
declare const createCompletionAndStream: (openAIKey: string, messages: any) => Promise<ReadableStream<any> | undefined>; | ||
export { createCompletionAndStream, parseMarkdownToCompletions, readStream, setUpCompletionForStream, streamPrompt }; |
@@ -1,2 +0,102 @@ | ||
export * from "./utils"; | ||
//# sourceMappingURL=index.js.map | ||
// src/utils.ts | ||
import OpenAI from "openai"; | ||
import fs from "fs"; | ||
import path from "path"; | ||
var streamPrompt = (completion) => { | ||
let buffer = ""; | ||
let rawTotalResponse = ""; | ||
let totalFormattedResponse = ""; | ||
try { | ||
const stream = new ReadableStream({ | ||
async start(controller) { | ||
const encoder = new TextEncoder(); | ||
for await (const chunk of completion) { | ||
let content = chunk.choices[0]?.delta?.content; | ||
if (content) { | ||
rawTotalResponse += content; | ||
buffer += content; | ||
let lines = buffer.split(/(?=\n|^#{1,4}|\s-\s|\n\s\*\s|\n\d+\.\s)/); | ||
buffer = ""; | ||
lines.forEach((line, index) => { | ||
if (index === lines.length - 1 && !line.endsWith("\n")) { | ||
buffer = line; | ||
} else { | ||
totalFormattedResponse += line; | ||
controller.enqueue(encoder.encode(line)); | ||
} | ||
}); | ||
} | ||
} | ||
if (buffer) { | ||
totalFormattedResponse += buffer; | ||
controller.enqueue(encoder.encode(buffer)); | ||
} | ||
controller.enqueue(encoder.encode(`event: done | ||
`)); | ||
controller.close(); | ||
} | ||
}); | ||
return stream; | ||
} catch (e) { | ||
console.log(e); | ||
} | ||
}; | ||
var setUpCompletionForStream = async (OPENAI_API_KEY, messages = []) => { | ||
const openai = new OpenAI({ apiKey: OPENAI_API_KEY }); | ||
const completion = await openai.chat.completions.create({ | ||
stream: true, | ||
messages, | ||
model: "gpt-4o-mini" | ||
}); | ||
return completion; | ||
}; | ||
function* parseMarkdownToCompletions(markdown) { | ||
const lines = markdown.split("\n"); | ||
for (const line of lines) { | ||
if (line.trim()) { | ||
yield { choices: [{ delta: { content: line + "\n" } }] }; | ||
} | ||
} | ||
yield { choices: [{ delta: { content: "" } }] }; | ||
} | ||
async function readStream(stream, createFile = false, outputFilename = "response.md") { | ||
if (!stream || !(stream instanceof ReadableStream)) { | ||
throw new Error("\u274C Invalid stream provided to readStream."); | ||
} | ||
const reader = stream.getReader(); | ||
const decoder = new TextDecoder(); | ||
let fullResponse = ""; | ||
const filePath = path.resolve(outputFilename); | ||
let fileStream = null; | ||
if (createFile) { | ||
fileStream = fs.createWriteStream(filePath, { encoding: "utf-8" }); | ||
} | ||
let done = false; | ||
while (!done) { | ||
const { value, done: readerDone } = await reader.read(); | ||
done = readerDone; | ||
if (value) { | ||
const chunk = decoder.decode(value, { stream: true }); | ||
fullResponse += chunk; | ||
if (createFile && fileStream) { | ||
fileStream.write(chunk); | ||
} | ||
} | ||
} | ||
if (fileStream) { | ||
fileStream.end(); | ||
} | ||
return fullResponse; | ||
} | ||
var createCompletionAndStream = async (openAIKey, messages) => { | ||
const completion = await setUpCompletionForStream(openAIKey, messages); | ||
return streamPrompt(completion); | ||
}; | ||
export { | ||
createCompletionAndStream, | ||
parseMarkdownToCompletions, | ||
readStream, | ||
setUpCompletionForStream, | ||
streamPrompt | ||
}; |
{ | ||
"name": "llm-prompt-stream", | ||
"version": "1.0.21", | ||
"version": "1.0.22", | ||
"description": "", | ||
@@ -5,0 +5,0 @@ "scripts": { |
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
31886
43.29%36
5.88%686
59.16%5
66.67%