@nlux/hf-react
Advanced tools
Comparing version 0.4.7 to 0.4.8
@@ -1,1 +0,1 @@ | ||
"use strict";var e=require("@nlux/hf"),r=require("react"),t=require("@nlux/nlux");const o="hooks/initHfAdapter";Object.defineProperty(exports,"createAdapter",{enumerable:!0,get:function(){return e.createAdapter}}),exports.useAdapter=n=>{if(!n.model&&!n.endpoint)throw new Error("You must provide either a model or an endpoint to use Hugging Face Inference API.");const[a,s]=r.useState(!1),[u]=r.useState((r=>{const{model:n,endpoint:a,authToken:s,task:u,dataTransferMode:i}=r||{};if(i&&"stream"!==i&&"fetch"!==i)throw new t.NluxUsageError({source:o,message:'Data transfer mode for Hugging Face Inference API must be either "stream" or "fetch"'});if(n&&a)throw new t.NluxUsageError({source:o,message:"You must provide either a model or an endpoint to use Hugging Face Inference API, but not both."});const d=n||a;if(!d)throw new t.NluxUsageError({source:o,message:"You must provide either a model or an endpoint to use Hugging Face Inference API."});let c=e.createAdapter().withModel(d);return s&&(c=c.withAuthToken(s)),u&&(c=c.withTask(u)),i&&(c=c.withDataTransferMode(i)),c})(n)),{authToken:i,dataTransferMode:d,model:c,endpoint:f,task:h}=n||{};return r.useEffect((()=>{a||s(!0)}),[i,d,c,f,h]),u}; | ||
"use strict";var e=require("@nlux/hf"),r=require("react"),t=require("@nlux/nlux");const o="hooks/initHfAdapter";Object.defineProperty(exports,"createAdapter",{enumerable:!0,get:function(){return e.createAdapter}}),Object.defineProperty(exports,"llama2InputPreProcessor",{enumerable:!0,get:function(){return e.llama2InputPreProcessor}}),exports.useAdapter=s=>{if(!s.model)throw new Error("You must provide either a model or an endpoint to use Hugging Face Inference API.");const[n,a]=r.useState(!1),[u]=r.useState((r=>{const{model:s,authToken:n,dataTransferMode:a,inputPreProcessor:u,maxNewTokens:i,systemMessage:d}=r||{};if(a&&"stream"!==a&&"fetch"!==a)throw new t.NluxUsageError({source:o,message:'Data transfer mode for Hugging Face Inference API must be either "stream" or "fetch"'});if(void 0===s)throw new t.NluxUsageError({source:o,message:"You must provide either a model or an endpoint to use Hugging Face Inference API."});let c=e.createAdapter().withModel(s);return void 0!==n&&(c=c.withAuthToken(n)),void 0!==a&&(c=c.withDataTransferMode(a)),void 0!==u&&(c=c.withInputPreProcessor(u)),void 0!==d&&(c=c.withSystemMessage(d)),void 0!==i&&(c=c.withMaxNewTokens(i)),c})(s)),{authToken:i,dataTransferMode:d,model:c,systemMessage:m,inputPreProcessor:f,maxNewTokens:h}=s||{};return r.useEffect((()=>{n||a(!0)}),[i,d,c,m,f,h]),u}; |
@@ -1,1 +0,1 @@ | ||
import{createAdapter as e}from"@nlux/hf";export{createAdapter}from"@nlux/hf";import{useState as o,useEffect as t}from"react";import{NluxUsageError as r}from"@nlux/nlux";const n="hooks/initHfAdapter",a=a=>{if(!a.model&&!a.endpoint)throw new Error("You must provide either a model or an endpoint to use Hugging Face Inference API.");const[i,s]=o(!1),[u]=o((o=>{const{model:t,endpoint:a,authToken:i,task:s,dataTransferMode:u}=o||{};if(u&&"stream"!==u&&"fetch"!==u)throw new r({source:n,message:'Data transfer mode for Hugging Face Inference API must be either "stream" or "fetch"'});if(t&&a)throw new r({source:n,message:"You must provide either a model or an endpoint to use Hugging Face Inference API, but not both."});const d=t||a;if(!d)throw new r({source:n,message:"You must provide either a model or an endpoint to use Hugging Face Inference API."});let m=e().withModel(d);return i&&(m=m.withAuthToken(i)),s&&(m=m.withTask(s)),u&&(m=m.withDataTransferMode(u)),m})(a)),{authToken:d,dataTransferMode:m,model:f,endpoint:h,task:c}=a||{};return t((()=>{i||s(!0)}),[d,m,f,h,c]),u};export{a as useAdapter}; | ||
import{createAdapter as e}from"@nlux/hf";export{createAdapter,llama2InputPreProcessor}from"@nlux/hf";import{useState as o,useEffect as r}from"react";import{NluxUsageError as t}from"@nlux/nlux";const s="hooks/initHfAdapter",n=n=>{if(!n.model)throw new Error("You must provide either a model or an endpoint to use Hugging Face Inference API.");const[a,i]=o(!1),[m]=o((o=>{const{model:r,authToken:n,dataTransferMode:a,inputPreProcessor:i,maxNewTokens:m,systemMessage:u}=o||{};if(a&&"stream"!==a&&"fetch"!==a)throw new t({source:s,message:'Data transfer mode for Hugging Face Inference API must be either "stream" or "fetch"'});if(void 0===r)throw new t({source:s,message:"You must provide either a model or an endpoint to use Hugging Face Inference API."});let d=e().withModel(r);return void 0!==n&&(d=d.withAuthToken(n)),void 0!==a&&(d=d.withDataTransferMode(a)),void 0!==i&&(d=d.withInputPreProcessor(i)),void 0!==u&&(d=d.withSystemMessage(u)),void 0!==m&&(d=d.withMaxNewTokens(m)),d})(n)),{authToken:u,dataTransferMode:d,model:f,systemMessage:h,inputPreProcessor:c,maxNewTokens:p}=n||{};return r((()=>{a||i(!0)}),[u,d,f,h,c,p]),m};export{n as useAdapter}; |
@@ -1,62 +0,8 @@ | ||
import { DataTransferMode, InferenceTask } from '@nlux/hf'; | ||
export { createAdapter } from '@nlux/hf'; | ||
import { AdapterBuilder } from '@nlux/nlux'; | ||
export { Adapter, DataTransferMode, StandardAdapter } from '@nlux/nlux'; | ||
import { HfAdapterOptions } from '@nlux/hf'; | ||
export { HfAdapterBuilder, HfAdapterOptions, HfInputPreProcessor, createAdapter, llama2InputPreProcessor } from '@nlux/hf'; | ||
type UseAdapterOptions = { | ||
/** | ||
* The authorization token to use for Hugging Face Inference API. | ||
* This will be passed to the `Authorization` header of the HTTP request. | ||
* If no token is provided, the request will be sent without an `Authorization` header as in this example: | ||
* `"Authorization": f"Bearer {AUTH_TOKEN}"`. | ||
* | ||
* Public models do not require an authorization token, but if your model is private, you will need to provide one. | ||
* | ||
* @optional | ||
* @param {string} authToken | ||
*/ | ||
authToken?: string; | ||
/** | ||
* Instruct the adapter to connect to API and load data either in streaming mode or in pull mode. | ||
* The `stream` mode would use protocols such as websockets or server-side events, and Nlux will display data as | ||
* it's being generated by the server. The `pull` mode would use a single request to fetch data, and the response | ||
* would only be displayed once the entire message is loaded. | ||
* | ||
* @optional | ||
* @default 'stream' | ||
* @param {DataTransferMode} dataTransferMode | ||
*/ | ||
dataTransferMode?: DataTransferMode; | ||
/** | ||
* The endpoint to use for Hugging Face Inference API. | ||
* You should provide either a model or an endpoint, but not both. | ||
* For more information, please refer to the | ||
* [Nlux Hugging Face documentation](https://docs.nlux.ai/category/nlux-with-hugging-face). | ||
* | ||
* @optional | ||
* @param {string} endpoint | ||
*/ | ||
endpoint?: string; | ||
/** | ||
* The model or the endpoint to use for Hugging Face Inference API. | ||
* You should provide either a model or an endpoint, but not both. | ||
* For more information, please refer to the | ||
* [Nlux Hugging Face documentation](https://docs.nlux.ai/category/nlux-with-hugging-face). | ||
* | ||
* @param {string} model | ||
*/ | ||
model?: string; | ||
/** | ||
* The task to use for Hugging Face Inference API. | ||
* Each model exposed by Hugging Face Inference API supports a specific task, such a text generation, translation, | ||
* classification, image generation, etc. | ||
* | ||
* Nlux currently supports text-related tasks: `text-generation` and `text2text-generation`. | ||
* | ||
* @param {InferenceTask} task | ||
*/ | ||
task?: InferenceTask; | ||
}; | ||
declare const useAdapter: (options: HfAdapterOptions) => AdapterBuilder<any, any>; | ||
declare const useAdapter: (options: UseAdapterOptions) => AdapterBuilder<any, any>; | ||
export { useAdapter }; |
{ | ||
"name": "@nlux/hf-react", | ||
"version": "0.4.7", | ||
"version": "0.4.8", | ||
"description": "The Hugging Face adapters for NLUX React, the React JS library for building conversational AI interfaces.", | ||
@@ -55,4 +55,4 @@ "keywords": [ | ||
"dependencies": { | ||
"@nlux/nlux": "0.4.7", | ||
"@nlux/openai": "0.4.7" | ||
"@nlux/nlux": "0.4.8", | ||
"@nlux/openai": "0.4.8" | ||
}, | ||
@@ -59,0 +59,0 @@ "peerDependencies": { |
@@ -1,1 +0,1 @@ | ||
!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?t(exports,require("@nlux/hf"),require("react"),require("@nlux/nlux")):"function"==typeof define&&define.amd?define(["exports","@nlux/hf","react","@nlux/nlux"],t):t((e="undefined"!=typeof globalThis?globalThis:e||self)["@nlux/hf-react"]={},e.hf,e.react,e.nlux)}(this,(function(e,t,r,n){"use strict";const o="hooks/initHfAdapter";Object.defineProperty(e,"createAdapter",{enumerable:!0,get:function(){return t.createAdapter}}),e.useAdapter=e=>{if(!e.model&&!e.endpoint)throw new Error("You must provide either a model or an endpoint to use Hugging Face Inference API.");const[a,u]=r.useState(!1),[s]=r.useState((e=>{const{model:r,endpoint:a,authToken:u,task:s,dataTransferMode:i}=e||{};if(i&&"stream"!==i&&"fetch"!==i)throw new n.NluxUsageError({source:o,message:'Data transfer mode for Hugging Face Inference API must be either "stream" or "fetch"'});if(r&&a)throw new n.NluxUsageError({source:o,message:"You must provide either a model or an endpoint to use Hugging Face Inference API, but not both."});const d=r||a;if(!d)throw new n.NluxUsageError({source:o,message:"You must provide either a model or an endpoint to use Hugging Face Inference API."});let f=t.createAdapter().withModel(d);return u&&(f=f.withAuthToken(u)),s&&(f=f.withTask(s)),i&&(f=f.withDataTransferMode(i)),f})(e)),{authToken:i,dataTransferMode:d,model:f,endpoint:c,task:h}=e||{};return r.useEffect((()=>{a||u(!0)}),[i,d,f,c,h]),s}})); | ||
!function(e,r){"object"==typeof exports&&"undefined"!=typeof module?r(exports,require("@nlux/hf"),require("react"),require("@nlux/nlux")):"function"==typeof define&&define.amd?define(["exports","@nlux/hf","react","@nlux/nlux"],r):r((e="undefined"!=typeof globalThis?globalThis:e||self)["@nlux/hf-react"]={},e.hf,e.react,e.nlux)}(this,(function(e,r,t,o){"use strict";const n="hooks/initHfAdapter";Object.defineProperty(e,"createAdapter",{enumerable:!0,get:function(){return r.createAdapter}}),Object.defineProperty(e,"llama2InputPreProcessor",{enumerable:!0,get:function(){return r.llama2InputPreProcessor}}),e.useAdapter=e=>{if(!e.model)throw new Error("You must provide either a model or an endpoint to use Hugging Face Inference API.");const[s,a]=t.useState(!1),[u]=t.useState((e=>{const{model:t,authToken:s,dataTransferMode:a,inputPreProcessor:u,maxNewTokens:i,systemMessage:d}=e||{};if(a&&"stream"!==a&&"fetch"!==a)throw new o.NluxUsageError({source:n,message:'Data transfer mode for Hugging Face Inference API must be either "stream" or "fetch"'});if(void 0===t)throw new o.NluxUsageError({source:n,message:"You must provide either a model or an endpoint to use Hugging Face Inference API."});let f=r.createAdapter().withModel(t);return void 0!==s&&(f=f.withAuthToken(s)),void 0!==a&&(f=f.withDataTransferMode(a)),void 0!==u&&(f=f.withInputPreProcessor(u)),void 0!==d&&(f=f.withSystemMessage(d)),void 0!==i&&(f=f.withMaxNewTokens(i)),f})(e)),{authToken:i,dataTransferMode:d,model:f,systemMessage:c,inputPreProcessor:l,maxNewTokens:h}=e||{};return t.useEffect((()=>{s||a(!0)}),[i,d,f,c,l,h]),u}})); |
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
6220
25
+ Added@nlux/openai@0.4.8(transitive)
- Removed@nlux/openai@0.4.7(transitive)
Updated@nlux/nlux@0.4.8
Updated@nlux/openai@0.4.8