New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details →
Socket
Book a DemoSign in
Socket

text2vec

Package Overview
Dependencies
Maintainers
1
Versions
44
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

text2vec - pypi Package Compare versions

Comparing version
1.3.3
to
1.3.4
+14
-2
PKG-INFO

@@ -1,4 +0,4 @@

Metadata-Version: 2.1
Metadata-Version: 2.2
Name: text2vec
Version: 1.3.3
Version: 1.3.4
Summary: Text to vector Tool, encode text

@@ -28,2 +28,14 @@ Home-page: https://github.com/shibing624/text2vec

Requires-Dist: pandas
Requires-Dist: gensim
Dynamic: author
Dynamic: author-email
Dynamic: classifier
Dynamic: description
Dynamic: description-content-type
Dynamic: home-page
Dynamic: keywords
Dynamic: license
Dynamic: requires-dist
Dynamic: requires-python
Dynamic: summary

@@ -30,0 +42,0 @@ [**🇨🇳中文**](https://github.com/shibing624/text2vec/blob/master/README.md) | [**🌐English**](https://github.com/shibing624/text2vec/blob/master/README_EN.md) | [**📖文档/Docs**](https://github.com/shibing624/text2vec/wiki) | [**🤖模型/Models**](https://huggingface.co/shibing624)

@@ -48,2 +48,3 @@ # -*- coding: utf-8 -*-

"pandas",
"gensim",
],

@@ -50,0 +51,0 @@ packages=find_packages(exclude=['tests']),

@@ -1,4 +0,4 @@

Metadata-Version: 2.1
Metadata-Version: 2.2
Name: text2vec
Version: 1.3.3
Version: 1.3.4
Summary: Text to vector Tool, encode text

@@ -28,2 +28,14 @@ Home-page: https://github.com/shibing624/text2vec

Requires-Dist: pandas
Requires-Dist: gensim
Dynamic: author
Dynamic: author-email
Dynamic: classifier
Dynamic: description
Dynamic: description-content-type
Dynamic: home-page
Dynamic: keywords
Dynamic: license
Dynamic: requires-dist
Dynamic: requires-python
Dynamic: summary

@@ -30,0 +42,0 @@ [**🇨🇳中文**](https://github.com/shibing624/text2vec/blob/master/README.md) | [**🌐English**](https://github.com/shibing624/text2vec/blob/master/README_EN.md) | [**📖文档/Docs**](https://github.com/shibing624/text2vec/wiki) | [**🤖模型/Models**](https://huggingface.co/shibing624)

@@ -8,1 +8,2 @@ jieba

pandas
gensim
+3
-2

@@ -14,6 +14,7 @@ # -*- coding: utf-8 -*-

from torch import nn
from torch.optim import AdamW
from torch.utils.data import DataLoader, Dataset, DistributedSampler
from tqdm import tqdm, trange
from transformers import BertForSequenceClassification, BertTokenizer
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers.optimization import get_linear_schedule_with_warmup

@@ -29,3 +30,3 @@ from text2vec.bertmatching_dataset import (

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"

@@ -32,0 +33,0 @@

@@ -16,5 +16,6 @@ # -*- coding: utf-8 -*-

from torch import nn
from torch.optim import AdamW
from torch.utils.data import DataLoader, Dataset, DistributedSampler
from tqdm import tqdm, trange
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers.optimization import get_linear_schedule_with_warmup

@@ -21,0 +22,0 @@ from text2vec.bge_dataset import BgeTrainDataset

@@ -14,5 +14,6 @@ # -*- coding: utf-8 -*-

from torch import nn
from torch.optim import AdamW
from torch.utils.data import DataLoader, Dataset, DistributedSampler
from tqdm import tqdm, trange
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers.optimization import get_linear_schedule_with_warmup

@@ -19,0 +20,0 @@ from text2vec.cosent_dataset import CosentTrainDataset, load_cosent_train_data, HFCosentTrainDataset

@@ -14,5 +14,6 @@ # -*- coding: utf-8 -*-

from torch import nn
from torch.optim import AdamW
from torch.utils.data import DataLoader, Dataset, DistributedSampler
from tqdm import tqdm, trange
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from transformers.optimization import get_linear_schedule_with_warmup

@@ -19,0 +20,0 @@ from text2vec.sentence_model import SentenceModel

@@ -20,3 +20,3 @@ # -*- coding: utf-8 -*-

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"

@@ -177,4 +177,4 @@

:param query_embeddings: A 2 dimensional tensor with the query embeddings.
:param corpus_embeddings: A 2 dimensional tensor with the corpus embeddings.
:param query_embeddings: Two dim tensor with the query embeddings.
:param corpus_embeddings: Two dim tensor with the corpus embeddings.
:param query_chunk_size: Process 100 queries simultaneously. Increasing that value increases the speed, but requires more memory.

@@ -200,3 +200,2 @@ :param corpus_chunk_size: Scans the corpus 100k entries at a time. Increasing that value increases the speed, but requires more memory.

# Check that corpus and queries are on the same device
query_embeddings = query_embeddings.to(device)

@@ -203,0 +202,0 @@ corpus_embeddings = corpus_embeddings.to(device)

@@ -7,2 +7,2 @@ # -*- coding: utf-8 -*-

__version__ = '1.3.3'
__version__ = '1.3.4'