@markprompt/core
Advanced tools
Comparing version 0.7.0 to 0.8.0
@@ -0,4 +1,5 @@ | ||
export { submitFeedback, type SubmitFeedbackOptions, type SubmitFeedbackBody, DEFAULT_SUBMIT_FEEDBACK_OPTIONS, } from './feedback.js'; | ||
export { submitPrompt, type SubmitPromptOptions, DEFAULT_SUBMIT_PROMPT_OPTIONS, STREAM_SEPARATOR, } from './prompt.js'; | ||
export { submitSearchQuery, type SubmitSearchQueryOptions, DEFAULT_SUBMIT_SEARCH_QUERY_OPTIONS, } from './search.js'; | ||
export { type OpenAIModelId, type OpenAIChatCompletionsModelId, type OpenAICompletionsModelId, type OpenAIEmbeddingsModelId, type SearchResult, type SearchResultSection, type SearchResultsResponse, type Source, type SourceType, } from './types.js'; | ||
export { type FileSectionReference, type OpenAIModelId, type OpenAIChatCompletionsModelId, type OpenAICompletionsModelId, type OpenAIEmbeddingsModelId, type SearchResult, type SearchResultSection, type SearchResultsResponse, type Source, type SourceType, } from './types.js'; | ||
//# sourceMappingURL=index.d.ts.map |
@@ -0,1 +1,2 @@ | ||
export { submitFeedback, DEFAULT_SUBMIT_FEEDBACK_OPTIONS, } from './feedback.js'; | ||
export { submitPrompt, DEFAULT_SUBMIT_PROMPT_OPTIONS, STREAM_SEPARATOR, } from './prompt.js'; | ||
@@ -2,0 +3,0 @@ export { submitSearchQuery, DEFAULT_SUBMIT_SEARCH_QUERY_OPTIONS, } from './search.js'; |
@@ -11,3 +11,3 @@ import { rest } from 'msw'; | ||
let stream; | ||
const server = setupServer(rest.post(DEFAULT_SUBMIT_PROMPT_OPTIONS.completionsUrl, async (req, res, ctx) => { | ||
const server = setupServer(rest.post(DEFAULT_SUBMIT_PROMPT_OPTIONS.apiUrl, async (req, res, ctx) => { | ||
request = req; | ||
@@ -66,5 +66,2 @@ stream = new ReadableStream({ | ||
]); | ||
expect(onReferences.mock.calls).toStrictEqual([ | ||
[['https://calculator.example']], | ||
]); | ||
expect(onError).not.toHaveBeenCalled(); | ||
@@ -88,5 +85,2 @@ }); | ||
]); | ||
expect(onReferences.mock.calls).toStrictEqual([ | ||
[['https://calculator.example']], | ||
]); | ||
expect(onError).not.toHaveBeenCalled(); | ||
@@ -93,0 +87,0 @@ }); |
@@ -1,2 +0,2 @@ | ||
import type { OpenAIModelId } from './types.js'; | ||
import type { FileSectionReference, OpenAIModelId } from './types.js'; | ||
export type SubmitPromptOptions = { | ||
@@ -7,3 +7,3 @@ /** | ||
* */ | ||
completionsUrl?: string; | ||
apiUrl?: string; | ||
/** | ||
@@ -77,3 +77,3 @@ * Message returned when the model does not have an answer | ||
*/ | ||
export declare function submitPrompt(prompt: string, projectKey: string, onAnswerChunk: (answerChunk: string) => boolean | undefined | void, onReferences: (references: string[]) => void, onError: (error: Error) => void, options?: SubmitPromptOptions): Promise<void>; | ||
export declare function submitPrompt(prompt: string, projectKey: string, onAnswerChunk: (answerChunk: string) => boolean | undefined | void, onReferences: (references: FileSectionReference[]) => void, onError: (error: Error) => void, options?: SubmitPromptOptions, debug?: boolean): Promise<void>; | ||
//# sourceMappingURL=prompt.d.ts.map |
@@ -0,4 +1,5 @@ | ||
import { parseEncodedJSONHeader } from './utils.js'; | ||
export const STREAM_SEPARATOR = '___START_RESPONSE_STREAM___'; | ||
export const DEFAULT_SUBMIT_PROMPT_OPTIONS = { | ||
completionsUrl: 'https://api.markprompt.com/v1/completions', | ||
apiUrl: 'https://api.markprompt.com/v1/completions', | ||
iDontKnowMessage: 'Sorry, I am not sure how to answer that.', | ||
@@ -25,3 +26,3 @@ model: 'gpt-3.5-turbo', | ||
*/ | ||
export async function submitPrompt(prompt, projectKey, onAnswerChunk, onReferences, onError, options = {}) { | ||
export async function submitPrompt(prompt, projectKey, onAnswerChunk, onReferences, onError, options = {}, debug) { | ||
if (!projectKey) { | ||
@@ -34,7 +35,7 @@ throw new Error('A projectKey is required.'); | ||
try { | ||
const res = await fetch(options.completionsUrl ?? DEFAULT_SUBMIT_PROMPT_OPTIONS.completionsUrl, { | ||
const res = await fetch(options.apiUrl ?? DEFAULT_SUBMIT_PROMPT_OPTIONS.apiUrl, { | ||
method: 'POST', | ||
headers: { | ||
headers: new Headers({ | ||
'Content-Type': 'application/json', | ||
}, | ||
}), | ||
body: JSON.stringify({ | ||
@@ -67,2 +68,11 @@ prompt: prompt, | ||
} | ||
const data = parseEncodedJSONHeader(res, 'x-markprompt-data'); | ||
const debugInfo = parseEncodedJSONHeader(res, 'x-markprompt-debug-info'); | ||
if (debug && debugInfo) { | ||
// eslint-disable-next-line no-console | ||
console.debug(JSON.stringify(debugInfo, null, 2)); | ||
} | ||
if (data?.references) { | ||
onReferences(data?.references); | ||
} | ||
const reader = res.body.getReader(); | ||
@@ -72,3 +82,3 @@ const decoder = new TextDecoder(); | ||
let startText = ''; | ||
let didHandleHeader = false; | ||
let hasPassedStreamSeparator = false; | ||
while (!done) { | ||
@@ -78,16 +88,13 @@ const { value, done: doneReading } = await reader.read(); | ||
const chunkValue = decoder.decode(value); | ||
if (!didHandleHeader) { | ||
if (!hasPassedStreamSeparator) { | ||
startText = startText + chunkValue; | ||
// For backwards compatibility, we still stream the response | ||
// with reference ids first followed by the response, the two | ||
// parts being separated by `STREAM_SEPARATOR`. | ||
if (startText.includes(STREAM_SEPARATOR)) { | ||
const parts = startText.split(STREAM_SEPARATOR); | ||
try { | ||
onReferences(JSON.parse(parts[0])); | ||
} | ||
catch { | ||
// do nothing | ||
} | ||
if (parts[1]) { | ||
onAnswerChunk(parts[1]); | ||
} | ||
didHandleHeader = true; | ||
hasPassedStreamSeparator = true; | ||
} | ||
@@ -94,0 +101,0 @@ } |
@@ -12,3 +12,3 @@ import type { SearchResultsResponse } from './types.js'; | ||
**/ | ||
searchUrl?: string; | ||
apiUrl?: string; | ||
/** | ||
@@ -15,0 +15,0 @@ * AbortController signal |
@@ -0,4 +1,5 @@ | ||
import { getErrorMessage } from './utils.js'; | ||
export const DEFAULT_SUBMIT_SEARCH_QUERY_OPTIONS = { | ||
limit: 8, | ||
searchUrl: 'https://api.markprompt.com/v1/search', | ||
apiUrl: 'https://api.markprompt.com/v1/search', | ||
}; | ||
@@ -13,3 +14,3 @@ /** | ||
export async function submitSearchQuery(query, projectKey, options) { | ||
const { limit = DEFAULT_SUBMIT_SEARCH_QUERY_OPTIONS.limit, searchUrl = DEFAULT_SUBMIT_SEARCH_QUERY_OPTIONS.searchUrl, } = options ?? {}; | ||
const { limit = DEFAULT_SUBMIT_SEARCH_QUERY_OPTIONS.limit, apiUrl = DEFAULT_SUBMIT_SEARCH_QUERY_OPTIONS.apiUrl, } = options ?? {}; | ||
const params = new URLSearchParams({ | ||
@@ -21,11 +22,11 @@ query, | ||
try { | ||
const response = await fetch(`${searchUrl}?${params.toString()}`, { | ||
const res = await fetch(`${apiUrl}?${params.toString()}`, { | ||
method: 'GET', | ||
signal: options?.signal, | ||
}); | ||
if (!response.ok) { | ||
const error = (await response.json())?.error; | ||
throw new Error(`Failed to fetch search results: ${error || 'Unknown error'}`); | ||
if (!res.ok) { | ||
const message = await getErrorMessage(res); | ||
throw new Error(`Failed to fetch search results: ${message || 'Unknown error'}`); | ||
} | ||
return response.json(); | ||
return res.json(); | ||
} | ||
@@ -32,0 +33,0 @@ catch (error) { |
@@ -6,17 +6,6 @@ export type OpenAIChatCompletionsModelId = 'gpt-4' | 'gpt-3.5-turbo'; | ||
export type RequiredKeys<T, K extends keyof T> = Required<Pick<T, K>> & Omit<T, K>; | ||
export type SearchResultSection = { | ||
content?: string; | ||
snippet: string; | ||
meta?: { | ||
leadHeading?: { | ||
id?: string; | ||
depth: number; | ||
value: string; | ||
}; | ||
}; | ||
}; | ||
export type SourceType = 'github' | 'motif' | 'website' | 'file-upload' | 'api-upload'; | ||
export type Source = { | ||
type: SourceType; | ||
data: { | ||
data?: { | ||
url?: string; | ||
@@ -27,9 +16,26 @@ domain?: string; | ||
export type SearchResult = { | ||
file: SearchResultFileData; | ||
file: FileReferenceFileData; | ||
matchType: 'title' | 'leadHeading' | 'content'; | ||
} & SearchResultSection; | ||
export type SearchResultFileData = { | ||
export type SearchResultSection = { | ||
content?: string; | ||
snippet: string; | ||
} & FileSectionReferenceSectionData; | ||
export type FileSectionReference = { | ||
file: FileReferenceFileData; | ||
} & FileSectionReferenceSectionData; | ||
export type FileSectionReferenceSectionData = { | ||
meta?: { | ||
leadHeading?: { | ||
id?: string; | ||
depth?: number; | ||
value?: string; | ||
slug?: string; | ||
}; | ||
}; | ||
}; | ||
export type FileReferenceFileData = { | ||
title?: string; | ||
path: string; | ||
meta: any; | ||
meta?: any; | ||
source: Source; | ||
@@ -36,0 +42,0 @@ }; |
{ | ||
"name": "@markprompt/core", | ||
"version": "0.7.0", | ||
"version": "0.8.0", | ||
"repository": { | ||
@@ -5,0 +5,0 @@ "type": "git", |
@@ -61,3 +61,3 @@ # `@markprompt/core` | ||
iDontKnowMessage: 'Sorry, I am not sure how to answer that.', | ||
completionsUrl: 'https://api.markprompt.com/v1/completions', // or your own completions API endpoint, | ||
apiUrl: 'https://api.markprompt.com/v1/completions', // or your own completions API endpoint, | ||
}; | ||
@@ -79,3 +79,3 @@ | ||
- `onAnswerChunk` (`function`): Answers come in via streaming. This function is called when a new chunk arrives | ||
- `onReferences` (`function`): This function is called when a chunk includes references. | ||
- `onReferences` (`function`): This function is called when receiving the list of references from which the response was created. | ||
- `onError` (`function`): called when an error occurs | ||
@@ -86,3 +86,3 @@ - [`options`](#options) (`object`): Optional options object | ||
- `completionsUrl` (`string`): URL at which to fetch completions | ||
- `apiUrl` (`string`): URL at which to fetch completions | ||
- `iDontKnowMessage` (`string`): Message returned when the model does not have an answer | ||
@@ -89,0 +89,0 @@ - `model` (`OpenAIModelId`): The OpenAI model to use |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
42442
31
528
3