together-ai
Advanced tools
Comparing version 0.5.2 to 0.6.0-alpha.1
118
package.json
{ | ||
"name": "together-ai", | ||
"version": "0.5.2", | ||
"description": "Node.js SDK for Together.ai.", | ||
"author": "Hassan El Mghari (@nutlope)", | ||
"repository": "Nutlope/together-js", | ||
"license": "MIT", | ||
"main": "dist/index.js", | ||
"types": "dist/index.d.ts", | ||
"version": "0.6.0-alpha.1", | ||
"description": "The official TypeScript library for the Together API", | ||
"author": "Together <dev-feedback@TogetherAI.com>", | ||
"types": "./index.d.ts", | ||
"main": "./index.js", | ||
"type": "commonjs", | ||
"repository": "github:togethercomputer/together-typescript", | ||
"license": "Apache-2.0", | ||
"packageManager": "yarn@1.22.22", | ||
"files": [ | ||
"*" | ||
], | ||
"private": false, | ||
"scripts": { | ||
"build": "tsc" | ||
"test": "./scripts/test", | ||
"build": "./scripts/build", | ||
"format": "prettier --write --cache --cache-strategy metadata . !dist", | ||
"tsn": "ts-node -r tsconfig-paths/register", | ||
"lint": "./scripts/lint", | ||
"fix": "eslint --fix --ext ts,js ." | ||
}, | ||
"devDependencies": { | ||
"@types/node": "^20.6.3", | ||
"typescript": "^5.2.2" | ||
"dependencies": { | ||
"@types/node": "^18.11.18", | ||
"@types/node-fetch": "^2.6.4", | ||
"abort-controller": "^3.0.0", | ||
"agentkeepalive": "^4.2.1", | ||
"form-data-encoder": "1.7.2", | ||
"formdata-node": "^4.3.2", | ||
"node-fetch": "^2.6.7", | ||
"web-streams-polyfill": "^3.2.1" | ||
}, | ||
"dependencies": {}, | ||
"keywords": [ | ||
"together ai", | ||
"LLMs", | ||
"open source", | ||
"AI models", | ||
"mistral", | ||
"Llama" | ||
] | ||
"sideEffects": [ | ||
"./_shims/index.js", | ||
"./_shims/index.mjs", | ||
"./shims/node.js", | ||
"./shims/node.mjs", | ||
"./shims/web.js", | ||
"./shims/web.mjs" | ||
], | ||
"imports": { | ||
"together-ai": ".", | ||
"together-ai/*": "./src/*" | ||
}, | ||
"exports": { | ||
"./_shims/auto/*": { | ||
"deno": { | ||
"types": "./_shims/auto/*.d.ts", | ||
"require": "./_shims/auto/*.js", | ||
"default": "./_shims/auto/*.mjs" | ||
}, | ||
"bun": { | ||
"types": "./_shims/auto/*.d.ts", | ||
"require": "./_shims/auto/*-bun.js", | ||
"default": "./_shims/auto/*-bun.mjs" | ||
}, | ||
"browser": { | ||
"types": "./_shims/auto/*.d.ts", | ||
"require": "./_shims/auto/*.js", | ||
"default": "./_shims/auto/*.mjs" | ||
}, | ||
"worker": { | ||
"types": "./_shims/auto/*.d.ts", | ||
"require": "./_shims/auto/*.js", | ||
"default": "./_shims/auto/*.mjs" | ||
}, | ||
"workerd": { | ||
"types": "./_shims/auto/*.d.ts", | ||
"require": "./_shims/auto/*.js", | ||
"default": "./_shims/auto/*.mjs" | ||
}, | ||
"node": { | ||
"types": "./_shims/auto/*-node.d.ts", | ||
"require": "./_shims/auto/*-node.js", | ||
"default": "./_shims/auto/*-node.mjs" | ||
}, | ||
"types": "./_shims/auto/*.d.ts", | ||
"require": "./_shims/auto/*.js", | ||
"default": "./_shims/auto/*.mjs" | ||
}, | ||
".": { | ||
"require": { | ||
"types": "./index.d.ts", | ||
"default": "./index.js" | ||
}, | ||
"types": "./index.d.mts", | ||
"default": "./index.mjs" | ||
}, | ||
"./*.mjs": { | ||
"types": "./*.d.ts", | ||
"default": "./*.mjs" | ||
}, | ||
"./*.js": { | ||
"types": "./*.d.ts", | ||
"default": "./*.js" | ||
}, | ||
"./*": { | ||
"types": "./*.d.ts", | ||
"require": "./*.js", | ||
"default": "./*.mjs" | ||
} | ||
} | ||
} |
346
README.md
@@ -1,21 +0,22 @@ | ||
<div align="center"> | ||
<div> | ||
<h1 align="center">Together.ai Node SDK</h1> | ||
</div> | ||
<p>An npm library to run open source LLMs through <a href="https://www.together.ai/">Together.ai</a>. | ||
# Together Node API Library | ||
<a href="https://www.npmjs.com/package/together-ai"><img src="https://img.shields.io/npm/v/together-ai" alt="Current version"></a> | ||
[![NPM version](https://img.shields.io/npm/v/together-ai.svg)](https://npmjs.org/package/together-ai) | ||
</div> | ||
This library provides convenient access to the Together REST API from server-side TypeScript or JavaScript. | ||
--- | ||
The REST API documentation can be found [on docs.together.ai](https://docs.together.ai/). The full API of this library can be found in [api.md](api.md). | ||
It is generated with [Stainless](https://www.stainlessapi.com/). | ||
## Installation | ||
`npm i together-ai` | ||
```sh | ||
npm install together-ai | ||
``` | ||
## Usage | ||
Create an account at [together.ai](https://www.together.ai/) and add the API key in. Then simply run the code snippet below with your preferred AI model and inputs to get back a reply. | ||
The full API of this library can be found in [api.md](api.md). | ||
<!-- prettier-ignore --> | ||
```js | ||
@@ -25,57 +26,318 @@ import Together from 'together-ai'; | ||
const together = new Together({ | ||
auth: process.env.TOGETHER_API_KEY, | ||
accessToken: process.env['TOGETHER_API_KEY'], // This is the default and can be omitted | ||
}); | ||
const model = 'mistralai/Mixtral-8x7B-Instruct-v0.1'; | ||
async function main() { | ||
const chatCompletion = await together.chat.completions.create({ | ||
messages: [{ role: 'user', content: 'Say this is a test!' }], | ||
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', | ||
}); | ||
const result = await together.inference(model, { | ||
prompt: 'Suggest some fun winter family activities', | ||
max_tokens: 700, | ||
console.log(chatCompletion.choices); | ||
} | ||
main(); | ||
``` | ||
## Streaming responses | ||
We provide support for streaming responses using Server Sent Events (SSE). | ||
```ts | ||
import Together from 'together-ai'; | ||
const together = new Together(); | ||
const stream = await together.chat.completions.create({ | ||
messages: [{ role: 'user', content: 'Say this is a test' }], | ||
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', | ||
stream: true, | ||
}); | ||
for await (const chatCompletionChunk of stream) { | ||
console.log(chatCompletionChunk.choices); | ||
} | ||
``` | ||
### Streaming with LLMs | ||
If you need to cancel a stream, you can `break` from the loop | ||
or call `stream.controller.abort()`. | ||
If you want to stream, simply specify `stream-tokens: true`. | ||
### Request & Response types | ||
```js | ||
const result = await together.inference('togethercomputer/llama-2-70b-chat', { | ||
prompt: 'Tell me about the history of the United States', | ||
max_tokens: 1000, | ||
stream_tokens: true, | ||
This library includes TypeScript definitions for all request params and response fields. You may import and use them like so: | ||
<!-- prettier-ignore --> | ||
```ts | ||
import Together from 'together-ai'; | ||
const together = new Together({ | ||
accessToken: process.env['TOGETHER_API_KEY'], // This is the default and can be omitted | ||
}); | ||
async function main() { | ||
const params: Together.Chat.CompletionCreateParams = { | ||
messages: [{ role: 'user', content: 'Say this is a test' }], | ||
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', | ||
}; | ||
const chatCompletion: Together.Chat.ChatCompletion = await together.chat.completions.create(params); | ||
} | ||
main(); | ||
``` | ||
### Next.js Chat App with streaming | ||
Documentation for each method, request param, and response field are available in docstrings and will appear on hover in most modern editors. | ||
You can see an example of this library being used in a Next.js chat app here: https://simple-ai-chat.vercel.app. | ||
## Handling errors | ||
The code for the example is also available, including code on how to stream the results of the LLM directly to the frontend: https://github.com/Nutlope/chat. | ||
When the library is unable to connect to the API, | ||
or if the API returns a non-success status code (i.e., 4xx or 5xx response), | ||
a subclass of `APIError` will be thrown: | ||
### Filtering responses with Llama Guard | ||
<!-- prettier-ignore --> | ||
```ts | ||
async function main() { | ||
const chatCompletion = await together.chat.completions | ||
.create({ | ||
messages: [{ role: 'user', content: 'Say this is a test' }], | ||
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', | ||
}) | ||
.catch(async (err) => { | ||
if (err instanceof Together.APIError) { | ||
console.log(err.status); // 400 | ||
console.log(err.name); // BadRequestError | ||
console.log(err.headers); // {server: 'nginx', ...} | ||
} else { | ||
throw err; | ||
} | ||
}); | ||
} | ||
You can now use Llama Guard, an LLM-based input-output safeguard model, with models on the Together.ai platform. To do this, simply add `"safety_model": "Meta-Llama/Llama-Guard-7b"`. | ||
main(); | ||
``` | ||
Error codes are as followed: | ||
| Status Code | Error Type | | ||
| ----------- | -------------------------- | | ||
| 400 | `BadRequestError` | | ||
| 401 | `AuthenticationError` | | ||
| 403 | `PermissionDeniedError` | | ||
| 404 | `NotFoundError` | | ||
| 422 | `UnprocessableEntityError` | | ||
| 429 | `RateLimitError` | | ||
| >=500 | `InternalServerError` | | ||
| N/A | `APIConnectionError` | | ||
### Retries | ||
Certain errors will be automatically retried 5 times by default, with a short exponential backoff. | ||
Connection errors (for example, due to a network connectivity problem), 408 Request Timeout, 409 Conflict, | ||
429 Rate Limit, and >=500 Internal errors will all be retried by default. | ||
You can use the `maxRetries` option to configure or disable this: | ||
<!-- prettier-ignore --> | ||
```js | ||
const result = await together.inference('togethercomputer/llama-2-13b-chat', { | ||
prompt: 'Tell me about San Francisco', | ||
max_tokens: 1000, | ||
safety_model: 'Meta-Llama/Llama-Guard-7b', | ||
// Configure the default for all requests: | ||
const together = new Together({ | ||
maxRetries: 0, // default is 2 | ||
}); | ||
// Or, configure per-request: | ||
await together.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'mistralai/Mixtral-8x7B-Instruct-v0.1' }, { | ||
maxRetries: 5, | ||
}); | ||
``` | ||
## Popular Supported Models | ||
### Timeouts | ||
This is a non-exhaustive list of popular models that are supported. | ||
Requests time out after 1 minute by default. You can configure this with a `timeout` option: | ||
- Mixtral Instruct v0.1 (`mistralai/Mixtral-8x7B-Instruct-v0.1`) | ||
- Mistral-7B (`mistralai/Mistral-7B-Instruct-v0.1`) | ||
- Llama-2 70B (`togethercomputer/llama-2-70b-chat`) | ||
- Llama-2 13B (`togethercomputer/llama-2-13b-chat`) | ||
- RedPajama 7B (`togethercomputer/RedPajama-INCITE-7B-Chat`) | ||
- OpenOrca Mistral (`Open-Orca/Mistral-7B-OpenOrca`) | ||
- Alpaca 7B (`togethercomputer/alpaca-7b`) | ||
<!-- prettier-ignore --> | ||
```ts | ||
// Configure the default for all requests: | ||
const together = new Together({ | ||
timeout: 20 * 1000, // 20 seconds (default is 1 minute) | ||
}); | ||
## How it works | ||
// Override per-request: | ||
await together.chat.completions.create({ messages: [{ role: 'user', content: 'Say this is a test' }], model: 'mistralai/Mixtral-8x7B-Instruct-v0.1' }, { | ||
timeout: 5 * 1000, | ||
}); | ||
``` | ||
This library uses the [Together Inference Engine](https://www.together.ai/blog/together-inference-engine-v1), the world's fastest inference stack for open source LLMs. It calls the [Together.ai](<[together.ai](https://www.together.ai/)>) Inference API, specifically their serverless endpoints product, to enable you to use OSS LLMs quickly and effeciently. | ||
On timeout, an `APIConnectionTimeoutError` is thrown. | ||
Note that requests which time out will be [retried twice by default](#retries). | ||
## Advanced Usage | ||
### Accessing raw Response data (e.g., headers) | ||
The "raw" `Response` returned by `fetch()` can be accessed through the `.asResponse()` method on the `APIPromise` type that all methods return. | ||
You can also use the `.withResponse()` method to get the raw `Response` along with the parsed data. | ||
<!-- prettier-ignore --> | ||
```ts | ||
const together = new Together(); | ||
const response = await together.chat.completions | ||
.create({ | ||
messages: [{ role: 'user', content: 'Say this is a test' }], | ||
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', | ||
}) | ||
.asResponse(); | ||
console.log(response.headers.get('X-My-Header')); | ||
console.log(response.statusText); // access the underlying Response object | ||
const { data: chatCompletion, response: raw } = await together.chat.completions | ||
.create({ | ||
messages: [{ role: 'user', content: 'Say this is a test' }], | ||
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', | ||
}) | ||
.withResponse(); | ||
console.log(raw.headers.get('X-My-Header')); | ||
console.log(chatCompletion.choices); | ||
``` | ||
### Making custom/undocumented requests | ||
This library is typed for convenient access to the documented API. If you need to access undocumented | ||
endpoints, params, or response properties, the library can still be used. | ||
#### Undocumented endpoints | ||
To make requests to undocumented endpoints, you can use `client.get`, `client.post`, and other HTTP verbs. | ||
Options on the client, such as retries, will be respected when making these requests. | ||
```ts | ||
await client.post('/some/path', { | ||
body: { some_prop: 'foo' }, | ||
query: { some_query_arg: 'bar' }, | ||
}); | ||
``` | ||
#### Undocumented request params | ||
To make requests using undocumented parameters, you may use `// @ts-expect-error` on the undocumented | ||
parameter. This library doesn't validate at runtime that the request matches the type, so any extra values you | ||
send will be sent as-is. | ||
```ts | ||
client.foo.create({ | ||
foo: 'my_param', | ||
bar: 12, | ||
// @ts-expect-error baz is not yet public | ||
baz: 'undocumented option', | ||
}); | ||
``` | ||
For requests with the `GET` verb, any extra params will be in the query, all other requests will send the | ||
extra param in the body. | ||
If you want to explicitly send an extra argument, you can do so with the `query`, `body`, and `headers` request | ||
options. | ||
#### Undocumented response properties | ||
To access undocumented response properties, you may access the response object with `// @ts-expect-error` on | ||
the response object, or cast the response object to the requisite type. Like the request params, we do not | ||
validate or strip extra properties from the response from the API. | ||
### Customizing the fetch client | ||
By default, this library uses `node-fetch` in Node, and expects a global `fetch` function in other environments. | ||
If you would prefer to use a global, web-standards-compliant `fetch` function even in a Node environment, | ||
(for example, if you are running Node with `--experimental-fetch` or using NextJS which polyfills with `undici`), | ||
add the following import before your first import `from "Together"`: | ||
```ts | ||
// Tell TypeScript and the package to use the global web fetch instead of node-fetch. | ||
// Note, despite the name, this does not add any polyfills, but expects them to be provided if needed. | ||
import 'together-ai/shims/web'; | ||
import Together from 'together-ai'; | ||
``` | ||
To do the inverse, add `import "together-ai/shims/node"` (which does import polyfills). | ||
This can also be useful if you are getting the wrong TypeScript types for `Response` ([more details](https://github.com/togethercomputer/together-typescript/tree/main/src/_shims#readme)). | ||
### Logging and middleware | ||
You may also provide a custom `fetch` function when instantiating the client, | ||
which can be used to inspect or alter the `Request` or `Response` before/after each request: | ||
```ts | ||
import { fetch } from 'undici'; // as one example | ||
import Together from 'together-ai'; | ||
const client = new Together({ | ||
fetch: async (url: RequestInfo, init?: RequestInit): Promise<Response> => { | ||
console.log('About to make a request', url, init); | ||
const response = await fetch(url, init); | ||
console.log('Got response', response); | ||
return response; | ||
}, | ||
}); | ||
``` | ||
Note that if given a `DEBUG=true` environment variable, this library will log all requests and responses automatically. | ||
This is intended for debugging purposes only and may change in the future without notice. | ||
### Configuring an HTTP(S) Agent (e.g., for proxies) | ||
By default, this library uses a stable agent for all http/https requests to reuse TCP connections, eliminating many TCP & TLS handshakes and shaving around 100ms off most requests. | ||
If you would like to disable or customize this behavior, for example to use the API behind a proxy, you can pass an `httpAgent` which is used for all requests (be they http or https), for example: | ||
<!-- prettier-ignore --> | ||
```ts | ||
import http from 'http'; | ||
import { HttpsProxyAgent } from 'https-proxy-agent'; | ||
// Configure the default for all requests: | ||
const together = new Together({ | ||
httpAgent: new HttpsProxyAgent(process.env.PROXY_URL), | ||
}); | ||
// Override per-request: | ||
await together.chat.completions.create( | ||
{ | ||
messages: [{ role: 'user', content: 'Say this is a test' }], | ||
model: 'mistralai/Mixtral-8x7B-Instruct-v0.1', | ||
}, | ||
{ | ||
httpAgent: new http.Agent({ keepAlive: false }), | ||
}, | ||
); | ||
``` | ||
## Semantic versioning | ||
This package generally follows [SemVer](https://semver.org/spec/v2.0.0.html) conventions, though certain backwards-incompatible changes may be released as minor versions: | ||
1. Changes that only affect static types, without breaking runtime behavior. | ||
2. Changes to library internals which are technically public but not intended or documented for external use. _(Please open a GitHub issue to let us know if you are relying on such internals)_. | ||
3. Changes that we do not expect to impact the vast majority of users in practice. | ||
We take backwards-compatibility seriously and work hard to ensure you can rely on a smooth upgrade experience. | ||
We are keen for your feedback; please open an [issue](https://www.github.com/togethercomputer/together-typescript/issues) with questions, bugs, or suggestions. | ||
## Requirements | ||
TypeScript >= 4.5 is supported. | ||
The following runtimes are supported: | ||
- Node.js 18 LTS or later ([non-EOL](https://endoflife.date/nodejs)) versions. | ||
- Deno v1.28.0 or higher, using `import Together from "npm:together-ai"`. | ||
- Bun 1.0 or later. | ||
- Cloudflare Workers. | ||
- Vercel Edge Runtime. | ||
- Jest 28 or greater with the `"node"` environment (`"jsdom"` is not supported at this time). | ||
- Nitro v2.6 or greater. | ||
Note that React Native is not supported at this time. | ||
If you are interested in other runtime environments, please open or upvote an issue on GitHub. |
260
src/index.ts
@@ -1,59 +0,221 @@ | ||
type ApiResponse = { | ||
status: string; | ||
prompt: string[]; | ||
model: string; | ||
model_owner: string; | ||
tags: Record<string, unknown>; | ||
num_returns: number; | ||
args: { | ||
model: string; | ||
prompt: string; | ||
max_tokens: number; | ||
stop: string; | ||
temperature: number; | ||
top_p: number; | ||
top_k: number; | ||
repetition_penalty: number; | ||
}; | ||
subjobs: any[]; | ||
output: { | ||
choices: { | ||
text: string; | ||
}[]; | ||
request_id: string; | ||
}; | ||
}; | ||
// File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. | ||
class Together { | ||
authApiKey: string | null = null; | ||
constructor({ auth }: { auth: string }) { | ||
this.authApiKey = auth; | ||
} | ||
import * as Core from './core'; | ||
import * as Errors from './error'; | ||
import { type Agent } from './_shims/index'; | ||
import * as Uploads from './uploads'; | ||
import * as API from './resources/index'; | ||
async inference(model: string, { ...inputs }) { | ||
if (!this.authApiKey) { | ||
throw new Error('Auth key is not set!'); | ||
export interface ClientOptions { | ||
/** | ||
* Defaults to process.env['TOGETHER_API_KEY']. | ||
*/ | ||
accessToken?: string | undefined; | ||
/** | ||
* Override the default base URL for the API, e.g., "https://api.example.com/v2/" | ||
* | ||
* Defaults to process.env['TOGETHER_BASE_URL']. | ||
*/ | ||
baseURL?: string | null | undefined; | ||
/** | ||
* The maximum amount of time (in milliseconds) that the client should wait for a response | ||
* from the server before timing out a single request. | ||
* | ||
* Note that request timeouts are retried by default, so in a worst-case scenario you may wait | ||
* much longer than this timeout before the promise succeeds or fails. | ||
*/ | ||
timeout?: number; | ||
/** | ||
* An HTTP agent used to manage HTTP(S) connections. | ||
* | ||
* If not provided, an agent will be constructed by default in the Node.js environment, | ||
* otherwise no agent is used. | ||
*/ | ||
httpAgent?: Agent; | ||
/** | ||
* Specify a custom `fetch` function implementation. | ||
* | ||
* If not provided, we use `node-fetch` on Node.js and otherwise expect that `fetch` is | ||
* defined globally. | ||
*/ | ||
fetch?: Core.Fetch | undefined; | ||
/** | ||
* The maximum number of times that the client will retry a request in case of a | ||
* temporary failure, like a network error or a 5XX error from the server. | ||
* | ||
* @default 5 | ||
*/ | ||
maxRetries?: number; | ||
/** | ||
* Default headers to include with every request to the API. | ||
* | ||
* These can be removed in individual requests by explicitly setting the | ||
* header to `undefined` or `null` in request options. | ||
*/ | ||
defaultHeaders?: Core.Headers; | ||
/** | ||
* Default query parameters to include with every request to the API. | ||
* | ||
* These can be removed in individual requests by explicitly setting the | ||
* param to `undefined` in request options. | ||
*/ | ||
defaultQuery?: Core.DefaultQuery; | ||
} | ||
/** API Client for interfacing with the Together API. */ | ||
export class Together extends Core.APIClient { | ||
accessToken: string; | ||
private _options: ClientOptions; | ||
/** | ||
* API Client for interfacing with the Together API. | ||
* | ||
* @param {string | undefined} [opts.accessToken=process.env['TOGETHER_API_KEY'] ?? undefined] | ||
* @param {string} [opts.baseURL=process.env['TOGETHER_BASE_URL'] ?? https://api.together.xyz/v1] - Override the default base URL for the API. | ||
* @param {number} [opts.timeout=1 minute] - The maximum amount of time (in milliseconds) the client will wait for a response before timing out. | ||
* @param {number} [opts.httpAgent] - An HTTP agent used to manage HTTP(s) connections. | ||
* @param {Core.Fetch} [opts.fetch] - Specify a custom `fetch` function implementation. | ||
* @param {number} [opts.maxRetries=5] - The maximum number of times the client will retry a request. | ||
* @param {Core.Headers} opts.defaultHeaders - Default headers to include with every request to the API. | ||
* @param {Core.DefaultQuery} opts.defaultQuery - Default query parameters to include with every request to the API. | ||
*/ | ||
constructor({ | ||
baseURL = Core.readEnv('TOGETHER_BASE_URL'), | ||
accessToken = Core.readEnv('TOGETHER_API_KEY'), | ||
...opts | ||
}: ClientOptions = {}) { | ||
if (accessToken === undefined) { | ||
throw new Errors.TogetherError( | ||
"The TOGETHER_API_KEY environment variable is missing or empty; either provide it, or instantiate the Together client with an accessToken option, like new Together({ accessToken: 'My Access Token' }).", | ||
); | ||
} | ||
const res = await fetch('https://api.together.xyz/inference', { | ||
method: 'POST', | ||
headers: { | ||
'Content-Type': 'application/json', | ||
Authorization: 'Bearer ' + this.authApiKey, | ||
}, | ||
body: JSON.stringify({ | ||
model, | ||
...inputs, | ||
}), | ||
const options: ClientOptions = { | ||
accessToken, | ||
...opts, | ||
baseURL: baseURL || `https://api.together.xyz/v1`, | ||
}; | ||
super({ | ||
baseURL: options.baseURL!, | ||
timeout: options.timeout ?? 60000 /* 1 minute */, | ||
httpAgent: options.httpAgent, | ||
maxRetries: options.maxRetries, | ||
fetch: options.fetch, | ||
}); | ||
this._options = options; | ||
if (inputs.stream_tokens === true) { | ||
return res.body; | ||
} else { | ||
const data: ApiResponse = await res.json(); | ||
return data; | ||
} | ||
this.accessToken = accessToken; | ||
} | ||
chat: API.Chat = new API.Chat(this); | ||
completions: API.Completions = new API.Completions(this); | ||
embeddings: API.Embeddings = new API.Embeddings(this); | ||
files: API.Files = new API.Files(this); | ||
fineTune: API.FineTuneResource = new API.FineTuneResource(this); | ||
images: API.Images = new API.Images(this); | ||
models: API.Models = new API.Models(this); | ||
protected override defaultQuery(): Core.DefaultQuery | undefined { | ||
return this._options.defaultQuery; | ||
} | ||
protected override defaultHeaders(opts: Core.FinalRequestOptions): Core.Headers { | ||
return { | ||
...super.defaultHeaders(opts), | ||
...this._options.defaultHeaders, | ||
}; | ||
} | ||
protected override authHeaders(opts: Core.FinalRequestOptions): Core.Headers { | ||
return { Authorization: `Bearer ${this.accessToken}` }; | ||
} | ||
static Together = this; | ||
static TogetherError = Errors.TogetherError; | ||
static APIError = Errors.APIError; | ||
static APIConnectionError = Errors.APIConnectionError; | ||
static APIConnectionTimeoutError = Errors.APIConnectionTimeoutError; | ||
static APIUserAbortError = Errors.APIUserAbortError; | ||
static NotFoundError = Errors.NotFoundError; | ||
static ConflictError = Errors.ConflictError; | ||
static RateLimitError = Errors.RateLimitError; | ||
static BadRequestError = Errors.BadRequestError; | ||
static AuthenticationError = Errors.AuthenticationError; | ||
static InternalServerError = Errors.InternalServerError; | ||
static PermissionDeniedError = Errors.PermissionDeniedError; | ||
static UnprocessableEntityError = Errors.UnprocessableEntityError; | ||
static toFile = Uploads.toFile; | ||
static fileFromPath = Uploads.fileFromPath; | ||
} | ||
export const { | ||
TogetherError, | ||
APIError, | ||
APIConnectionError, | ||
APIConnectionTimeoutError, | ||
APIUserAbortError, | ||
NotFoundError, | ||
ConflictError, | ||
RateLimitError, | ||
BadRequestError, | ||
AuthenticationError, | ||
InternalServerError, | ||
PermissionDeniedError, | ||
UnprocessableEntityError, | ||
} = Errors; | ||
export import toFile = Uploads.toFile; | ||
export import fileFromPath = Uploads.fileFromPath; | ||
export namespace Together { | ||
export import RequestOptions = Core.RequestOptions; | ||
export import Chat = API.Chat; | ||
export import Completions = API.Completions; | ||
export import Completion = API.Completion; | ||
export import LogProbs = API.LogProbs; | ||
export import ToolChoice = API.ToolChoice; | ||
export import Tools = API.Tools; | ||
export import CompletionCreateParams = API.CompletionCreateParams; | ||
export import CompletionCreateParamsNonStreaming = API.CompletionCreateParamsNonStreaming; | ||
export import CompletionCreateParamsStreaming = API.CompletionCreateParamsStreaming; | ||
export import Embeddings = API.Embeddings; | ||
export import Embedding = API.Embedding; | ||
export import EmbeddingCreateParams = API.EmbeddingCreateParams; | ||
export import Files = API.Files; | ||
export import FileObject = API.FileObject; | ||
export import FileRetrieveResponse = API.FileRetrieveResponse; | ||
export import FileListResponse = API.FileListResponse; | ||
export import FileDeleteResponse = API.FileDeleteResponse; | ||
export import FineTuneResource = API.FineTuneResource; | ||
export import FineTune = API.FineTune; | ||
export import FineTuneEvent = API.FineTuneEvent; | ||
export import FineTuneListResponse = API.FineTuneListResponse; | ||
export import FineTuneDownloadResponse = API.FineTuneDownloadResponse; | ||
export import FineTuneCreateParams = API.FineTuneCreateParams; | ||
export import FineTuneDownloadParams = API.FineTuneDownloadParams; | ||
export import Images = API.Images; | ||
export import ImageFile = API.ImageFile; | ||
export import ImageCreateParams = API.ImageCreateParams; | ||
export import Models = API.Models; | ||
export import ModelListResponse = API.ModelListResponse; | ||
} | ||
export default Together; |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
Major refactor
Supply chain riskPackage has recently undergone a major refactor. It may be unstable or indicate significant internal changes. Use caution when updating to versions that include significant changes.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 2 instances in 1 package
No repository
Supply chain riskPackage does not have a linked source code repository. Without this field, a package will have no reference to the location of the source code use to generate the package.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
No repository
Supply chain riskPackage does not have a linked source code repository. Without this field, a package will have no reference to the location of the source code use to generate the package.
Found 1 instance in 1 package
571021
0
235
9554
343
8
5
5
+ Added@types/node@^18.11.18
+ Added@types/node-fetch@^2.6.4
+ Addedabort-controller@^3.0.0
+ Addedagentkeepalive@^4.2.1
+ Addedform-data-encoder@1.7.2
+ Addedformdata-node@^4.3.2
+ Addednode-fetch@^2.6.7
+ Addedweb-streams-polyfill@^3.2.1
+ Added@types/node@18.19.65(transitive)
+ Added@types/node-fetch@2.6.12(transitive)
+ Addedabort-controller@3.0.0(transitive)
+ Addedagentkeepalive@4.5.0(transitive)
+ Addedasynckit@0.4.0(transitive)
+ Addedcombined-stream@1.0.8(transitive)
+ Addeddelayed-stream@1.0.0(transitive)
+ Addedevent-target-shim@5.0.1(transitive)
+ Addedform-data@4.0.1(transitive)
+ Addedform-data-encoder@1.7.2(transitive)
+ Addedformdata-node@4.4.1(transitive)
+ Addedhumanize-ms@1.2.1(transitive)
+ Addedmime-db@1.52.0(transitive)
+ Addedmime-types@2.1.35(transitive)
+ Addedms@2.1.3(transitive)
+ Addednode-domexception@1.0.0(transitive)
+ Addednode-fetch@2.7.0(transitive)
+ Addedtr46@0.0.3(transitive)
+ Addedundici-types@5.26.5(transitive)
+ Addedweb-streams-polyfill@3.3.34.0.0-beta.3(transitive)
+ Addedwebidl-conversions@3.0.1(transitive)
+ Addedwhatwg-url@5.0.0(transitive)