Huge News!Announcing our $40M Series B led by Abstract Ventures.Learn More โ†’
Socket
Sign inDemoInstall
Socket

@promptbook/azure-openai

Package Overview
Dependencies
Maintainers
1
Versions
265
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

@promptbook/azure-openai - npm Package Compare versions

Comparing version 0.69.0-15 to 0.69.0-16

54

esm/index.es.js

@@ -9,4 +9,4 @@ import { OpenAIClient, AzureKeyCredential } from '@azure/openai';

*/
var PROMPTBOOK_VERSION = '0.69.0-14';
// TODO: !!!! List here all the versions and annotate + put into script
var PROMPTBOOK_VERSION = '0.69.0-15';
// TODO:[main] !!!! List here all the versions and annotate + put into script

@@ -696,3 +696,3 @@ /*! *****************************************************************************

* TODO: [๐Ÿง ][๐Ÿ›ฃ] More elegant way to tracking than passing `name`
* TODO: [๐Ÿง ] !!! In-memory cache of same values to prevent multiple checks
* TODO: [๐Ÿง ][main] !!! In-memory cache of same values to prevent multiple checks
* Note: [๐Ÿ ] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message

@@ -1196,2 +1196,3 @@ */

},
//TODO:[main] !!!!!! Add gpt-4o-mini-2024-07-18 and all others to be up to date
},

@@ -1213,2 +1214,47 @@ /**/

modelVariant: 'CHAT',
modelTitle: 'o1-preview',
modelName: 'o1-preview',
pricing: {
prompt: computeUsage("$15.00 / 1M tokens"),
output: computeUsage("$60.00 / 1M tokens"),
},
},
/**/
/**/
{
modelVariant: 'CHAT',
modelTitle: 'o1-preview-2024-09-12',
modelName: 'o1-preview-2024-09-12',
// <- TODO:[main] !!!!!! Some better system to organize theese date suffixes and versions
pricing: {
prompt: computeUsage("$15.00 / 1M tokens"),
output: computeUsage("$60.00 / 1M tokens"),
},
},
/**/
/**/
{
modelVariant: 'CHAT',
modelTitle: 'o1-mini',
modelName: 'o1-mini',
pricing: {
prompt: computeUsage("$3.00 / 1M tokens"),
output: computeUsage("$12.00 / 1M tokens"),
},
},
/**/
/**/
{
modelVariant: 'CHAT',
modelTitle: 'o1-mini-2024-09-12',
modelName: 'o1-mini-2024-09-12',
pricing: {
prompt: computeUsage("$3.00 / 1M tokens"),
output: computeUsage("$12.00 / 1M tokens"),
},
},
/**/
/**/
{
modelVariant: 'CHAT',
modelTitle: 'gpt-3.5-turbo-16k-0613',

@@ -1300,3 +1346,3 @@ modelName: 'gpt-3.5-turbo-16k-0613',

return __generator(this, function (_a) {
// TODO: !!! Do here some filtering which models are really available as deployment
// TODO:[main] !!! Do here some filtering which models are really available as deployment
// @see https://management.azure.com/subscriptions/subscriptionId/resourceGroups/resourceGroupName/providers/Microsoft.CognitiveServices/accounts/accountName/deployments?api-version=2023-05-01

@@ -1303,0 +1349,0 @@ return [2 /*return*/, OPENAI_MODELS.map(function (_a) {

3

esm/typings/promptbook-collection/index.d.ts
declare const _default: ({
title: string;
pipelineUrl: string;
promptbookVersion: string;
parameters: {

@@ -27,3 +26,2 @@ name: string;

pipelineUrl: string;
promptbookVersion: string;
parameters: {

@@ -57,3 +55,2 @@ name: string;

pipelineUrl: string;
promptbookVersion: string;
parameters: {

@@ -60,0 +57,0 @@ name: string;

@@ -9,3 +9,3 @@ import type { Command as Program } from 'commander';

/**
* TODO: [๐Ÿฅƒ] !!! Allow `ptbk make` without configuring any llm tools
* TODO: [๐Ÿฅƒ][main] !!! Allow `ptbk make` without configuring any llm tools
* TODO: Maybe remove this command - "about" command should be enough?

@@ -12,0 +12,0 @@ * TODO: [0] DRY Javascript and typescript - Maybe make ONLY typescript and for javascript just remove types

@@ -28,3 +28,3 @@ import type { string_url } from '../../types/typeAliases';

/**
* TODO: !!!! [๐Ÿง ] Library precompilation and do not mix markdown and json promptbooks
* TODO:[main] !!!! [๐Ÿง ] Library precompilation and do not mix markdown and json promptbooks
*/

@@ -72,3 +72,3 @@ import type { CsvSettings } from './formats/csv/CsvSettings';

* @@@
* TODO: [๐Ÿ] !!! Use
* TODO: [๐Ÿ][main] !!! Use
*

@@ -80,3 +80,3 @@ * @public exported from `@promptbook/core`

* @@@
* TODO: [๐Ÿ] !!! Use
* TODO: [๐Ÿ][main] !!! Use
*

@@ -83,0 +83,0 @@ * @public exported from `@promptbook/core`

@@ -21,3 +21,3 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

/**
* TODO: !!!! Warn if used only sync version
* TODO:[main] !!!! Warn if used only sync version
* TODO: [๐Ÿšž] Report here line/column of error

@@ -24,0 +24,0 @@ * TODO: Use spaceTrim more effectively

@@ -35,9 +35,9 @@ import type { PipelineJson } from '../../types/PipelineJson/PipelineJson';

/**
* TODO: [๐Ÿฃ] !!!! Validate that all samples match expectations
* TODO: [๐Ÿฃ][๐Ÿ] !!!! Validate that knowledge is valid (non-void)
* TODO: [๐Ÿฃ] !!!! Validate that persona can be used only with CHAT variant
* TODO: [๐Ÿฃ] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
* TODO: [๐Ÿฃ] !!!! Validate that reserved parameter is not used as joker
* TODO: [๐Ÿฃ][main] !!!! Validate that all samples match expectations
* TODO: [๐Ÿฃ][๐Ÿ][main] !!!! Validate that knowledge is valid (non-void)
* TODO: [๐Ÿฃ][main] !!!! Validate that persona can be used only with CHAT variant
* TODO: [๐Ÿฃ][main] !!!! Validate that parameter with reserved name not used RESERVED_PARAMETER_NAMES
* TODO: [๐Ÿฃ][main] !!!! Validate that reserved parameter is not used as joker
* TODO: [๐Ÿง ] Validation not only logic itself but imports around - files and websites and rerefenced pipelines exists
* TODO: [๐Ÿ› ] Actions, instruments (and maybe knowledge) => Functions and tools
*/

@@ -11,5 +11,5 @@ import type { PrepareOptions } from '../../../prepare/PrepareOptions';

/**
* TODO: [๐Ÿ][๐Ÿ”ผ] !!! Export via `@promptbook/markdown`
* TODO: [๐Ÿ][๐Ÿ”ผ][main] !!! Export via `@promptbook/markdown`
* TODO: [๐Ÿช‚] Do it in parallel 11:11
* Note: No need to aggregate usage here, it is done by intercepting the llmTools
*/

@@ -11,3 +11,3 @@ import type { PrepareOptions } from '../../../prepare/PrepareOptions';

/**
* TODO: [๐Ÿ][๐Ÿ”ผ] !!! Export via `@promptbook/pdf`
* TODO: [๐Ÿ][๐Ÿ”ผ][main] !!! Export via `@promptbook/pdf`
* TODO: [๐Ÿงบ] In future, content can be alse File or Blob BUT for now for wider compatibility its only base64

@@ -14,0 +14,0 @@ * @see https://stackoverflow.com/questions/14653349/node-js-cant-create-blobs

@@ -16,3 +16,3 @@ import type { PromptResult } from '../../../../execution/PromptResult';

*/
promptbookVersion: string_promptbook_version;
promptbookVersion?: string_promptbook_version;
/**

@@ -19,0 +19,0 @@ * @@@

@@ -19,3 +19,3 @@ import type { AvailableModel } from '../../execution/AvailableModel';

* Note: [๐Ÿค–] Add models of new variant
* TODO: [๐Ÿง ] !!! Add embedding models OR Anthropic has only chat+completion models?
* TODO: [๐Ÿง ][main] !!! Add embedding models OR Anthropic has only chat+completion models?
* TODO: [๐Ÿง ] Some mechanism to propagate unsureness

@@ -22,0 +22,0 @@ * TODO: [๐Ÿง ][๐Ÿ‘ฎโ€โ™€๏ธ] Put here more info like description, isVision, trainingDateCutoff, languages, strengths ( Top-level performance, intelligence, fluency, and understanding), contextWindow,...

@@ -14,4 +14,4 @@ import { RemoteLlmExecutionTools } from '../remote/RemoteLlmExecutionTools';

/**
* TODO: [๐Ÿง ] !!!! Make anonymous this with all LLM providers
* TODO: [๐Ÿง ][๐Ÿงฑ] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
* TODO: [๐Ÿง ][main] !!!! Make anonymous this with all LLM providers
* TODO: [๐Ÿง ][๐Ÿงฑ][main] !!!! Maybe change all `new AnthropicClaudeExecutionTools` -> `createAnthropicClaudeExecutionTools` in manual
* TODO: [๐Ÿง ] Maybe auto-detect usage in browser and determine default value of `isProxied`

@@ -18,0 +18,0 @@ * TODO: [๐Ÿฆบ] Is there some way how to put `packageName` and `className` on top and function definition on bottom?

#!/usr/bin/env ts-node
export {};
/**
* TODO: !!! Playground with WebGPT / Promptbook.studio anonymous server
* TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
* TODO:[main] !!! Playground with WebGPT / Promptbook.studio anonymous server
* TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
*/
#!/usr/bin/env ts-node
export {};
/**
* TODO: !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
* TODO:[main] !!! Test here that `systemMessage`, `temperature` and `seed` are working correctly
*/

@@ -12,3 +12,3 @@ import type { PrepareOptions } from '../prepare/PrepareOptions';

/**
* TODO: [๐Ÿ”ƒ] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
* TODO: [๐Ÿ”ƒ][main] !!!!! If the persona was prepared with different version or different set of models, prepare it once again
* TODO: [๐Ÿข] !! Check validity of `modelName` in pipeline

@@ -15,0 +15,0 @@ * TODO: [๐Ÿข] !! Check validity of `systemMessage` in pipeline

@@ -9,3 +9,3 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

/**
* TODO: [๐Ÿ”ƒ] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
* TODO: [๐Ÿ”ƒ][main] !!!!! If the pipeline was prepared with different version or different set of models, prepare it once again
* TODO: [๐Ÿ ] Maybe base this on `makeValidator`

@@ -12,0 +12,0 @@ * TODO: [๐ŸงŠ] Pipeline can be partially prepared, this should return true ONLY if fully prepared

@@ -26,3 +26,3 @@ import type { PipelineJson } from '../types/PipelineJson/PipelineJson';

* TODO: [๐Ÿง ] What is better name `prepareTemplate` or `prepareTemplateAndParameters`
* TODO: [โ™จ] !!! Prepare index the samples and maybe templates
* TODO: [โ™จ][main] !!! Prepare index the samples and maybe templates
* TODO: Write tests for `preparePipeline`

@@ -29,0 +29,0 @@ * TODO: [๐Ÿ] Leverage the batch API and build queues @see https://platform.openai.com/docs/guides/batch

@@ -45,3 +45,3 @@ import type { ModelRequirements } from '../ModelRequirements';

*/
readonly promptbookVersion: string_semantic_version;
readonly promptbookVersion?: string_semantic_version;
/**

@@ -48,0 +48,0 @@ * Description of the promptbook

@@ -595,5 +595,5 @@ import type { TupleToUnion } from 'type-fest';

/**.
* TODO: !!! Change "For example" to @example
* TODO:[main] !!! Change "For example" to @example
* TODO: !! Change to branded types
* TODO: Delete type aliases that are not exported or used internally
*/

@@ -25,4 +25,4 @@ import type { string_name } from '../../types/typeAliases';

* TODO: [๐Ÿง ][๐Ÿ›ฃ] More elegant way to tracking than passing `name`
* TODO: [๐Ÿง ] !!! In-memory cache of same values to prevent multiple checks
* TODO: [๐Ÿง ][main] !!! In-memory cache of same values to prevent multiple checks
* Note: [๐Ÿ ] This is how `checkSerializableAsJson` + `isSerializableAsJson` together can just retun true/false or rich error message
*/

@@ -22,4 +22,4 @@ /**

/**
* TODO: [๐Ÿง ] !!! In-memory cache of same values to prevent multiple checks
* TODO: [๐Ÿง ][main] !!! In-memory cache of same values to prevent multiple checks
* TODO: [๐Ÿง ][๐Ÿ’บ] Can be done this on type-level?
*/
{
"name": "@promptbook/azure-openai",
"version": "0.69.0-15",
"version": "0.69.0-16",
"description": "Supercharge your use of large language models",

@@ -50,3 +50,3 @@ "private": false,

"peerDependencies": {
"@promptbook/core": "0.69.0-15"
"@promptbook/core": "0.69.0-16"
},

@@ -53,0 +53,0 @@ "dependencies": {

@@ -165,2 +165,6 @@ <!-- โš ๏ธ WARNING: This code has been generated so that any manual changes will be overwritten -->

# โœจ New Features
- โœจ **Support [OpenAI o1 model](https://openai.com/o1/)**
## ๐Ÿค The Promptbook Whitepaper

@@ -167,0 +171,0 @@

Sorry, the diff of this file is not supported yet

Sorry, the diff of this file is too big to display

Sorry, the diff of this file is not supported yet

SocketSocket SOC 2 Logo

Product

  • Package Alerts
  • Integrations
  • Docs
  • Pricing
  • FAQ
  • Roadmap
  • Changelog

Packages

npm

Stay in touch

Get open source security insights delivered straight into your inbox.


  • Terms
  • Privacy
  • Security

Made with โšก๏ธ by Socket Inc