@promptbook/node
Advanced tools
Comparing version 0.59.0-29 to 0.59.0-30
@@ -12,2 +12,2 @@ /** | ||
*/ | ||
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "promptbook-library"; | ||
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "index"; |
@@ -17,2 +17,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools'; | ||
/** | ||
* TODO: !!!! Add embedding models OR Anthropic has only chat+completion models? | ||
* TODO: [🧠] Some mechanism to propagate unsureness | ||
@@ -19,0 +20,0 @@ * TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,... |
@@ -18,2 +18,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools'; | ||
/** | ||
* TODO: !!!! Add embedding models | ||
* TODO: [🧠] Some mechanism to propagate unsureness | ||
@@ -20,0 +21,0 @@ * TODO: [🕚][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing |
@@ -31,2 +31,6 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools'; | ||
/** | ||
* !!!! | ||
*/ | ||
embed(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>; | ||
/** | ||
* Default model for chat variant. | ||
@@ -40,2 +44,6 @@ */ | ||
/** | ||
* Default model for completion variant. | ||
*/ | ||
private getDefaultEmbeddingModel; | ||
/** | ||
* List all available OpenAI models that can be used | ||
@@ -42,0 +50,0 @@ */ |
import type { string_model_name } from './typeAliases'; | ||
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT"]; | ||
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"]; | ||
/** | ||
@@ -4,0 +4,0 @@ * Model variant describes the very general type of the model |
@@ -1,2 +0,1 @@ | ||
import type { IVectorData } from 'xyzt'; | ||
import type { string_keyword } from '../../utils/normalization/IKeywords'; | ||
@@ -15,3 +14,3 @@ import type { string_href } from '../typeAliases'; | ||
modelName: string_model_name; | ||
position: IVectorData; | ||
position: Array<number>; | ||
}>; | ||
@@ -24,2 +23,3 @@ readonly sources: Array<{ | ||
/** | ||
* TODO: !!! Use or uninstall xyzt | ||
* !!!! Annotate | ||
@@ -26,0 +26,0 @@ * TODO: [🧠][🦪] Maybe allow internal linkes between (Material)KnowledgePieces withing the KnowledgeJson and maybe require to explicitelly reference the source of the knowledge |
{ | ||
"name": "@promptbook/node", | ||
"version": "0.59.0-29", | ||
"version": "0.59.0-30", | ||
"description": "Library to supercharge your use of large language models", | ||
@@ -52,3 +52,3 @@ "private": false, | ||
"peerDependencies": { | ||
"@promptbook/core": "0.59.0-29" | ||
"@promptbook/core": "0.59.0-30" | ||
}, | ||
@@ -55,0 +55,0 @@ "main": "./umd/index.umd.js", |
@@ -12,2 +12,2 @@ /** | ||
*/ | ||
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "promptbook-library"; | ||
export declare const PROMPTBOOK_MAKED_BASE_FILENAME = "index"; |
@@ -17,2 +17,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools'; | ||
/** | ||
* TODO: !!!! Add embedding models OR Anthropic has only chat+completion models? | ||
* TODO: [🧠] Some mechanism to propagate unsureness | ||
@@ -19,0 +20,0 @@ * TODO: [🧠][👮♀️] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,... |
@@ -18,2 +18,3 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools'; | ||
/** | ||
* TODO: !!!! Add embedding models | ||
* TODO: [🧠] Some mechanism to propagate unsureness | ||
@@ -20,0 +21,0 @@ * TODO: [🕚][👮♀️] Make this list dynamic - dynamically can be listed modelNames but not modelVariant, legacy status, context length and pricing |
@@ -31,2 +31,6 @@ import type { AvailableModel } from '../../execution/LlmExecutionTools'; | ||
/** | ||
* !!!! | ||
*/ | ||
embed(prompt: Pick<Prompt, 'content' | 'modelRequirements'>): Promise<PromptCompletionResult>; | ||
/** | ||
* Default model for chat variant. | ||
@@ -40,2 +44,6 @@ */ | ||
/** | ||
* Default model for completion variant. | ||
*/ | ||
private getDefaultEmbeddingModel; | ||
/** | ||
* List all available OpenAI models that can be used | ||
@@ -42,0 +50,0 @@ */ |
import type { string_model_name } from './typeAliases'; | ||
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT"]; | ||
export declare const MODEL_VARIANTS: readonly ["COMPLETION", "CHAT", "EMBEDDING"]; | ||
/** | ||
@@ -4,0 +4,0 @@ * Model variant describes the very general type of the model |
@@ -1,2 +0,1 @@ | ||
import type { IVectorData } from 'xyzt'; | ||
import type { string_keyword } from '../../utils/normalization/IKeywords'; | ||
@@ -15,3 +14,3 @@ import type { string_href } from '../typeAliases'; | ||
modelName: string_model_name; | ||
position: IVectorData; | ||
position: Array<number>; | ||
}>; | ||
@@ -24,2 +23,3 @@ readonly sources: Array<{ | ||
/** | ||
* TODO: !!! Use or uninstall xyzt | ||
* !!!! Annotate | ||
@@ -26,0 +26,0 @@ * TODO: [🧠][🦪] Maybe allow internal linkes between (Material)KnowledgePieces withing the KnowledgeJson and maybe require to explicitelly reference the source of the knowledge |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
983855
15234