🚀 Big News: Socket Acquires Coana to Bring Reachability Analysis to Every Appsec Team.Learn more
Socket
Sign inDemoInstall
Socket

nlp-tokenizer

Package Overview
Dependencies
Maintainers
1
Versions
1
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

nlp-tokenizer

tokenizer

1.0.0
latest
Source
npm
Version published
Maintainers
1
Created
Source

tokenizer

tokenizer utils for NLP

Install

$ npm install nlp-tokenizer

Basic Tokenizer

import { Tokenizer, NNTokenizer, WordPieceTokenizer }  from 'nlp-tokenizer';
// or
const { Tokenizer, NNTokenizer, WordPieceTokenizer } = require('nlp-tokenizer');

let tokenizer = new Tokenizer()
let tokens = tokenizer.tokenize("one two three, four, five.")
// tokens == ['one', 'two', 'three', ',', 'four', ',', 'five', '.']

let result = tokenizer.lexical_diversity("aa aa bb bb cc cc 😀 πüé Grüße")
// 6 unique words, 9 words, lexical diversity is 6/9
// result == [6,9,6/9]

Word Piece Tokenizer

import { Tokenizer, NNTokenizer, WordPieceTokenizer }  from 'nlp-tokenizer';
// or
const { Tokenizer, NNTokenizer, WordPieceTokenizer } = require('nlp-tokenizer');

let token_to_id = {'one': 1, 'two': 2, 'un': 3, 'able': 4, 'aff': 5}
let unknown_token = '[UNK]'
let tokenizer = new WordPieceTokenizer(token_to_id, unknown_token)
let t = tokenizer.tokenize('unaffable')
// t == ['un', '##aff', '##able']

NN Tokenizer

import { Tokenizer, NNTokenizer, WordPieceTokenizer }  from 'nlp-tokenizer';
// or
const { Tokenizer, NNTokenizer, WordPieceTokenizer } = require('nlp-tokenizer');

let vocabulary = ['one', 'two', 'three', 'four', 'five', ',', '.']
let extra_vocabulary = ['foo', 'bar']
let tokenizer = new NNTokenizer(vocabulary)
tokenizer.add_tokens(extra_vocabulary)
let t = tokenizer.tokenize('one two three, four foo, five.')
// tokens == ['one', 'two', 'three', ',', 'four', 'foo', ',', 'five', '.']

let encoded = tokenizer.encode('one two three, four foo unknowned, five.')
// vocabulary ranges from 0 to 6
// unknown token takes 7
// extra vocabulary ranges from 8 to 9
// encoded == [0, 1, 2, 5, 3, 8, 7, 5, 4, 6]

Keywords

tokenizer

FAQs

Package last updated on 03 Sep 2019

Did you know?

Socket

Socket for GitHub automatically highlights issues in each pull request and monitors the health of all your open source dependencies. Discover the contents of your packages and block harmful activity before you install or update your dependencies.

Install

Related posts