portkey-ai
Advanced tools
Comparing version 0.1.15 to 0.1.16
@@ -1,2 +0,14 @@ | ||
export declare class Generations { | ||
import { ModelParams } from "../_types/portkeyConstructs"; | ||
import { ApiResource } from "../apiResource"; | ||
import { APIPromise, RequestOptions } from "../baseClient"; | ||
export declare class Generations extends ApiResource { | ||
create(_body: GenerationsBody, opts?: RequestOptions): APIPromise<Generation>; | ||
} | ||
export interface GenerationsBody extends ModelParams { | ||
promptId: string; | ||
variables?: Record<string, any>; | ||
} | ||
export interface Generation { | ||
success: boolean; | ||
data: Record<string, any>; | ||
} |
"use strict"; | ||
Object.defineProperty(exports, "__esModule", { value: true }); | ||
exports.Generations = void 0; | ||
class Generations { | ||
const apiResource_1 = require("../apiResource"); | ||
class Generations extends apiResource_1.ApiResource { | ||
create(_body, opts) { | ||
const config = this.client.config || { | ||
mode: this.client.mode, | ||
options: this.client.llms | ||
}; | ||
const body = { "variables": _body.variables }; | ||
return this.post(`/v1/prompts/${_body.promptId}/generate`, Object.assign({ body }, opts)); | ||
} | ||
} | ||
exports.Generations = Generations; | ||
//# sourceMappingURL=generations.js.map |
@@ -21,4 +21,5 @@ import * as Types from "./_types/portkeyConstructs"; | ||
chatCompletions: API.ChatCompletions; | ||
generations: API.Generations; | ||
} | ||
export import LLMOptions = Types.LLMOptions; | ||
export {}; |
@@ -42,2 +42,3 @@ "use strict"; | ||
this.chatCompletions = new API.ChatCompletions(this); | ||
this.generations = new API.Generations(this); | ||
this.apiKey = apiKey; | ||
@@ -47,6 +48,6 @@ if (!this.apiKey) { | ||
} | ||
this.config = config || null; | ||
this.baseURL = baseURL || constants_1.PORTKEY_BASE_URL; | ||
this.mode = mode || null; | ||
this.llms = this.constructLlms(llms || null); | ||
this.config = config || null; | ||
} | ||
@@ -53,0 +54,0 @@ constructLlms(llms) { |
{ | ||
"name": "portkey-ai", | ||
"version": "0.1.15", | ||
"version": "0.1.16", | ||
"description": "Node client library for the Portkey API", | ||
@@ -5,0 +5,0 @@ "types": "dist/index.d.ts", |
163
README.md
<div align="center"> | ||
<img src="docs/images/header.png" height=150><br /> | ||
<img src="https://assets.portkey.ai/header.png" height=150><br /> | ||
## Build reliable, secure, and production-ready AI apps easily. | ||
## Ship Ambitious Gen AI Apps with Portkey's full-stack LLMOps Platform | ||
```bash | ||
npm install portkey-ai | ||
``` | ||
</div> | ||
@@ -11,12 +15,12 @@ | ||
**🚪 AI Gateway:** | ||
* Unified API Signature: If you've used OpenAI, you already know how to use Portkey with any other provider. | ||
* Interoperability: Write once, run with any provider. Switch between _any model_ from _any provider_ seamlessly. | ||
* Automated Fallbacks & Retries: Ensure your application remains functional even if a primary service fails. | ||
* Load Balancing: Efficiently distribute incoming requests among multiple models. | ||
* Semantic Caching: Reduce costs and latency by intelligently caching results. | ||
* **Unified API Signature**: If you've used OpenAI, you already know how to use Portkey with any other provider. | ||
* **Interoperability**: Write once, run with any provider. Switch between _any model_ from _any provider_ seamlessly. | ||
* **Automated Fallbacks & Retries**: Ensure your application remains functional even if a primary service fails. | ||
* **Load Balancing & A/B Testing**: Efficiently distribute incoming requests among multiple models and run A/B tests at scale. | ||
* **Semantic Caching**: Reduce costs and latency by intelligently caching results. | ||
**🔬 Observability:** | ||
* Logging: Keep track of all requests for monitoring and debugging. | ||
* Requests Tracing: Understand the journey of each request for optimization. | ||
* Custom Tags: Segment and categorize requests for better insights. | ||
* **Logging**: Keep track of all requests for monitoring and debugging. | ||
* **Requests Tracing**: Understand the journey of each request for optimization. | ||
* **Custom Tags**: Segment and categorize requests for better insights. | ||
@@ -26,8 +30,42 @@ | ||
**4️ Steps to Integrate the SDK** | ||
1. Get your Portkey API key and your virtual key for AI providers. | ||
2. Construct your LLM, add Portkey features, provider features, and prompt. | ||
3. Construct the Portkey client and set your usage mode. | ||
4. Now call Portkey regularly like you would call your OpenAI constructor. | ||
#### First, install the SDK & export Portkey API Key | ||
[Get Portkey API key here.](https://app.portkey.ai/signup) | ||
```bash | ||
$ npm install portkey-ai | ||
$ export PORTKEY_API_KEY="PORTKEY_API_KEY" | ||
``` | ||
#### Now, let's make a request with GPT-4 | ||
```js | ||
import { Portkey } from "portkey-ai"; | ||
const portkey = new Portkey({ | ||
mode: "single", | ||
llms: [{ provider: "openai", virtual_key: "open-ai-xxx" }] | ||
}); | ||
async function main() { | ||
const chatCompletion = await portkey.chat.completions.create({ | ||
messages: [{ role: 'user', content: 'Say this is a test' }], | ||
model: 'gpt-4' | ||
}); | ||
console.log(chatCompletion.choices); | ||
}; | ||
main(); | ||
``` | ||
Portkey fully adheres to the OpenAI SDK signature. This means that you can instantly switch to Portkey and start using Portkey's advanced production features right out of the box. | ||
## **🪜 Detailed Integration Guide** | ||
**There are 4️ Steps to Integrate Portkey** | ||
1. Setting your Portkey API key and your virtual key for AI providers. | ||
2. Constructing your LLM with Portkey features, provider features (and prompt!). | ||
3. Constructing the Portkey client and setting usage mode. | ||
4. Making your request! | ||
Let's dive in! If you are an advanced user and want to directly jump to various full-fledged examples, [click here](https://github.com/Portkey-AI/portkey-node-sdk/blob/main/examples). | ||
@@ -51,3 +89,3 @@ | ||
**Provider Features**: | ||
Portkey is designed to be flexible. All the features you're familiar with from your LLM provider, like `top_p`, `top_k`, and `temperature`, can be used seamlessly. Check out the [complete list of provider features here](https://github.com/Portkey-AI/portkey-python-sdk/blob/af0814ebf4f1961b5dfed438918fe68b26ef5f1e/portkey/api_resources/utils.py#L137). | ||
Portkey is designed to be flexible. All the features you're familiar with from your LLM provider, like `top_p`, `top_k`, and `temperature`, can be used seamlessly. Check out the [complete list of provider features here](https://github.com/Portkey-AI/portkey-node-sdk/blob/539021dcae8fa0945cf7f0b8c27fc26a7dd56092/src/_types/portkeyConstructs.ts#L34). | ||
@@ -59,25 +97,35 @@ **Setting the Prompt Input**: | ||
```javascript | ||
import { Portkey } from "portkey-ai"; | ||
```js | ||
import { LLMOptions } from "portkey-ai"; | ||
// Portkey Config | ||
const portkey = new Portkey({ | ||
mode: "single", | ||
llms: [{ | ||
provider: "openai", | ||
virtual_key: "<>", | ||
model: "gpt-3.5-turbo", | ||
max_tokens: 2000, | ||
temperature: 0, | ||
// ** more params can be added here. | ||
}] | ||
}) | ||
const provider = "openai"; | ||
const virtual_key = "open-ai-xxx"; | ||
const trace_id = "portkey_sdk_test"; | ||
const cache_status = "semantic"; | ||
// Model Params | ||
const model = "gpt-4"; | ||
const temperature = 1; | ||
// Prompt | ||
const messages = [{"role": "user", "content": "Who are you?"}]; | ||
const llm_a: LLMOptions = { | ||
provider: provider, | ||
virtual_key: virtual_key, | ||
cache_status: cache_status, | ||
trace_id: trace_id, | ||
model: model, | ||
temperature: temperature, | ||
messages: messages | ||
}; | ||
``` | ||
### **Steo 3️ : Construct the Portkey Client** | ||
### **Step 3️ : Construct the Portkey Client** | ||
Portkey client's config takes 3 params: `api_key`, `mode`, `llms`. | ||
* `api_key`: You can set your Portkey API key here or with `bash script` as done above. | ||
* `api_key`: You can set your Portkey API key here or with `$ EXPORT` as done above. | ||
* `mode`: There are **3** modes - Single, Fallback, Loadbalance. | ||
@@ -89,23 +137,23 @@ * **Single** - This is the standard mode. Use it if you do not want Fallback OR Loadbalance features. | ||
### **Step 4️ : Let's Call the Portkey Client!** | ||
```js | ||
import { Portkey } from "portkey-ai"; | ||
The Portkey client can do `ChatCompletions` and `Completions`. | ||
const portkey = new Portkey({ mode: "single", llms: [llm_a] }); | ||
``` | ||
### **Step 4️ : Call the Portkey Client!** | ||
The Portkey client can do `ChatCompletions` and `Completions` calls. | ||
Since our LLM is GPT4, we will use ChatCompletions: | ||
```javascript | ||
```js | ||
async function main() { | ||
const response = await portkey.chatCompletions.create({ | ||
messages: [{ | ||
"role": "user", | ||
"content": "Who are you ?" | ||
}] | ||
}) | ||
console.log(response.choices[0].message) | ||
} | ||
messages: [{ "role": "user", "content": "Who are you ?"}] | ||
}); | ||
console.log(response.choices[0].message); | ||
}; | ||
main().catch((err) => { | ||
console.error(err); | ||
process.exit(1); | ||
}); | ||
main(); | ||
``` | ||
@@ -118,9 +166,13 @@ | ||
## **📔 Full List of Portkey Config** | ||
## **📔 List of Portkey Features** | ||
| Feature | Config Key | Value(Type) | Required | | ||
|---------------------|-------------------------|--------------------------------------------------|-------------| | ||
| Provider Name | `provider` | `string` | ✅ Required | | ||
| Model Name | `model` | `string` | ✅ Required | | ||
| Virtual Key OR API Key | `virtual_key` or `api_key` | `string` | ✅ Required (can be set externally) | | ||
You can set all of these features while constructing your LLMOptions object. | ||
| Feature | Config Key | Value(Type) | Required | | ||
|--|--|--|--| | ||
| API Key OR Virtual Key | `api_key` OR `virtual_key` | `string` | ✅ Required | | ||
| Provider Name | `provider` | `openai`, `cohere`, `anthropic`, `azure-openai` | ✅ Required | | ||
| Model Name | `model` | The relevant model name from the provider. For example, `gpt-3.5-turbo` OR `claude-2` | ❔ Optional | | ||
| Weight (For Loadbalance) | `weight` | `integer` | ❔ Optional | | ||
| Cache Type | `cache_status` | `simple`, `semantic` | ❔ Optional | | ||
@@ -132,2 +184,3 @@ | Force Cache Refresh | `cache_force_refresh` | `True`, `False` (Boolean) | ❔ Optional | | ||
| Metadata | `metadata` | `json object` [More info](https://docs.portkey.ai/key-features/custom-metadata) | ❔ Optional | | ||
| All Model Params | As per the model/provider | This is params like `top_p`, `temperature`, etc | ❔ Optional | | ||
@@ -138,6 +191,6 @@ ## **🤝 Supported Providers** | ||
|---|---|---|---| | ||
| <img src="docs/images/openai.png" width=18 />| OpenAI | ✅ Supported | `/completion`, `/embed` | | ||
| <img src="docs/images/azure.png" width=18>| Azure OpenAI | ✅ Supported | `/completion`, `/embed` | | ||
| <img src="docs/images/anthropic.png" width=18>| Anthropic | ✅ Supported | `/complete` | | ||
| <img src="docs/images/cohere.png" width=18>| Cohere | 🚧 Coming Soon | `generate`, `embed` | | ||
| <img src="https://assets.portkey.ai/openai.png" width=18 />| OpenAI | ✅ Supported | `/completion`, `/chatcompletion` | | ||
| <img src="https://assets.portkey.ai/azure.png" width=18>| Azure OpenAI | ✅ Supported | `/completion`, `/chatcompletion` | | ||
| <img src="https://assets.portkey.ai/anthropic.png" width=18>| Anthropic | ✅ Supported | `/complete` | | ||
| <img src="https://assets.portkey.ai/cohere.png" width=18>| Cohere | ✅ Supported | `generate` | | ||
@@ -144,0 +197,0 @@ |
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
72653
1082
196
1