@xenova/transformers
Advanced tools
Comparing version 2.15.0 to 2.15.1
{ | ||
"name": "@xenova/transformers", | ||
"version": "2.15.0", | ||
"version": "2.15.1", | ||
"description": "State-of-the-art Machine Learning for the web. Run 🤗 Transformers directly in your browser, with no need for a server!", | ||
@@ -43,3 +43,3 @@ "main": "./src/transformers.js", | ||
"sharp": "^0.32.0", | ||
"@huggingface/jinja": "^0.1.0" | ||
"@huggingface/jinja": "^0.1.3" | ||
}, | ||
@@ -46,0 +46,0 @@ "optionalDependencies": { |
@@ -104,3 +104,3 @@ | ||
<script type="module"> | ||
import { pipeline } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.15.0'; | ||
import { pipeline } from 'https://cdn.jsdelivr.net/npm/@xenova/transformers@2.15.1'; | ||
</script> | ||
@@ -138,3 +138,3 @@ ``` | ||
By default, Transformers.js uses [hosted pretrained models](https://huggingface.co/models?library=transformers.js) and [precompiled WASM binaries](https://cdn.jsdelivr.net/npm/@xenova/transformers@2.15.0/dist/), which should work out-of-the-box. You can customize this as follows: | ||
By default, Transformers.js uses [hosted pretrained models](https://huggingface.co/models?library=transformers.js) and [precompiled WASM binaries](https://cdn.jsdelivr.net/npm/@xenova/transformers@2.15.1/dist/), which should work out-of-the-box. You can customize this as follows: | ||
@@ -329,2 +329,3 @@ | ||
1. **[OWL-ViT](https://huggingface.co/docs/transformers/model_doc/owlvit)** (from Google AI) released with the paper [Simple Open-Vocabulary Object Detection with Vision Transformers](https://arxiv.org/abs/2205.06230) by Matthias Minderer, Alexey Gritsenko, Austin Stone, Maxim Neumann, Dirk Weissenborn, Alexey Dosovitskiy, Aravindh Mahendran, Anurag Arnab, Mostafa Dehghani, Zhuoran Shen, Xiao Wang, Xiaohua Zhai, Thomas Kipf, and Neil Houlsby. | ||
1. **[OWLv2](https://huggingface.co/docs/transformers/model_doc/owlv2)** (from Google AI) released with the paper [Scaling Open-Vocabulary Object Detection](https://arxiv.org/abs/2306.09683) by Matthias Minderer, Alexey Gritsenko, Neil Houlsby. | ||
1. **[Phi](https://huggingface.co/docs/transformers/main/model_doc/phi)** (from Microsoft) released with the papers - [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio César Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sébastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li, [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee. | ||
@@ -331,0 +332,0 @@ 1. **[Qwen2](https://huggingface.co/docs/transformers/model_doc/qwen2)** (from the Qwen team, Alibaba Group) released with the paper [Qwen Technical Report](https://arxiv.org/abs/2309.16609) by Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, Binyuan Hui, Luo Ji, Mei Li, Junyang Lin, Runji Lin, Dayiheng Liu, Gao Liu, Chengqiang Lu, Keming Lu, Jianxin Ma, Rui Men, Xingzhang Ren, Xuancheng Ren, Chuanqi Tan, Sinan Tan, Jianhong Tu, Peng Wang, Shijie Wang, Wei Wang, Shengguang Wu, Benfeng Xu, Jin Xu, An Yang, Hao Yang, Jian Yang, Shusheng Yang, Yang Yao, Bowen Yu, Hongyi Yuan, Zheng Yuan, Jianwei Zhang, Xingxuan Zhang, Yichang Zhang, Zhenru Zhang, Chang Zhou, Jingren Zhou, Xiaohuan Zhou and Tianhang Zhu. |
@@ -32,3 +32,3 @@ /** | ||
const VERSION = '2.15.0'; | ||
const VERSION = '2.15.1'; | ||
@@ -35,0 +35,0 @@ // Check if various APIs are available (depends on environment) |
@@ -766,2 +766,38 @@ /** | ||
/** | ||
* Apply Layer Normalization for last certain number of dimensions. | ||
* @param {Tensor} input The input tensor | ||
* @param {number[]} normalized_shape input shape from an expected input of size | ||
* @param {Object} options The options for the layer normalization | ||
* @param {number} [options.eps=1e-5] A value added to the denominator for numerical stability. | ||
* @returns {Tensor} The normalized tensor. | ||
*/ | ||
export function layer_norm(input, normalized_shape, { | ||
eps = 1e-5, | ||
} = {}) { | ||
if (input.dims.length !== 2) { | ||
throw new Error('`layer_norm` currently only supports 2D input.'); | ||
} | ||
const [batchSize, featureDim] = input.dims; | ||
if (normalized_shape.length !== 1 && normalized_shape[0] !== featureDim) { | ||
throw new Error('`normalized_shape` must be a 1D array with shape `[input.dims[1]]`.'); | ||
} | ||
const [std, mean] = std_mean(input, 1, 0, true); | ||
// @ts-ignore | ||
const returnedData = new input.data.constructor(input.data.length); | ||
for (let i = 0; i < batchSize; ++i) { | ||
const offset = i * featureDim; | ||
for (let j = 0; j < featureDim; ++j) { | ||
const offset2 = offset + j; | ||
returnedData[offset2] = (input.data[offset2] - mean.data[i]) / (std.data[i] + eps); | ||
} | ||
} | ||
return new Tensor(input.type, returnedData, input.dims); | ||
} | ||
/** | ||
* Helper function to calculate new dimensions when performing a squeeze operation. | ||
@@ -768,0 +804,0 @@ * @param {number[]} dims The dimensions of the tensor. |
@@ -22,3 +22,3 @@ export namespace env { | ||
declare const __dirname: any; | ||
declare const VERSION: "2.15.0"; | ||
declare const VERSION: "2.15.1"; | ||
declare const localModelPath: any; | ||
@@ -25,0 +25,0 @@ declare const FS_AVAILABLE: boolean; |
@@ -225,2 +225,4 @@ declare const FeatureExtractor_base: new () => { | ||
} | ||
export class Owlv2ImageProcessor extends OwlViTFeatureExtractor { | ||
} | ||
export class DeiTFeatureExtractor extends ImageFeatureExtractor { | ||
@@ -672,2 +674,3 @@ } | ||
OwlViTFeatureExtractor: typeof OwlViTFeatureExtractor; | ||
Owlv2ImageProcessor: typeof Owlv2ImageProcessor; | ||
CLIPFeatureExtractor: typeof CLIPFeatureExtractor; | ||
@@ -674,0 +677,0 @@ ChineseCLIPFeatureExtractor: typeof ChineseCLIPFeatureExtractor; |
@@ -382,2 +382,4 @@ declare const TokenizerModel_base: new () => { | ||
} | ||
export class GemmaTokenizer extends PreTrainedTokenizer { | ||
} | ||
/** | ||
@@ -623,2 +625,3 @@ * The NllbTokenizer class is used to tokenize text for NLLB ("No Language Left Behind") models. | ||
Qwen2Tokenizer: typeof Qwen2Tokenizer; | ||
GemmaTokenizer: typeof GemmaTokenizer; | ||
PreTrainedTokenizer: typeof PreTrainedTokenizer; | ||
@@ -625,0 +628,0 @@ }; |
@@ -25,2 +25,13 @@ /** | ||
/** | ||
* Apply Layer Normalization for last certain number of dimensions. | ||
* @param {Tensor} input The input tensor | ||
* @param {number[]} normalized_shape input shape from an expected input of size | ||
* @param {Object} options The options for the layer normalization | ||
* @param {number} [options.eps=1e-5] A value added to the denominator for numerical stability. | ||
* @returns {Tensor} The normalized tensor. | ||
*/ | ||
export function layer_norm(input: Tensor, normalized_shape: number[], { eps, }?: { | ||
eps?: number; | ||
}): Tensor; | ||
/** | ||
* Concatenates an array of tensors along a specified dimension. | ||
@@ -27,0 +38,0 @@ * @param {Tensor[]} tensors The array of tensors to concatenate. |
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is too big to display
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
Sorry, the diff of this file is not supported yet
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
License Policy Violation
LicenseThis package is not allowed per your license policy. Review the package's license to ensure compliance.
Found 1 instance in 1 package
46360358
56396
355
Updated@huggingface/jinja@^0.1.3