Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

mlserver

Package Overview
Dependencies
Maintainers
1
Versions
121
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

mlserver - npm Package Compare versions

Comparing version
1.6.2rc1
to
1.7.0rc1
+46
-0
mlserver/grpc/dataplane_pb2_grpc.py
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import warnings
from . import dataplane_pb2 as dataplane__pb2
GRPC_GENERATED_VERSION = "1.67.1"
GRPC_VERSION = grpc.__version__
_version_not_supported = False
try:
from grpc._utilities import first_version_is_lower
_version_not_supported = first_version_is_lower(
GRPC_VERSION, GRPC_GENERATED_VERSION
)
except ImportError:
_version_not_supported = True
if _version_not_supported:
raise RuntimeError(
f"The grpc package installed is at version {GRPC_VERSION},"
+ f" but the generated code in dataplane_pb2_grpc.py depends on"
+ f" grpcio>={GRPC_GENERATED_VERSION}."
+ f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
+ f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
)
class GRPCInferenceServiceStub(object):

@@ -24,2 +47,3 @@ """

response_deserializer=dataplane__pb2.ServerLiveResponse.FromString,
_registered_method=True,
)

@@ -30,2 +54,3 @@ self.ServerReady = channel.unary_unary(

response_deserializer=dataplane__pb2.ServerReadyResponse.FromString,
_registered_method=True,
)

@@ -36,2 +61,3 @@ self.ModelReady = channel.unary_unary(

response_deserializer=dataplane__pb2.ModelReadyResponse.FromString,
_registered_method=True,
)

@@ -42,2 +68,3 @@ self.ServerMetadata = channel.unary_unary(

response_deserializer=dataplane__pb2.ServerMetadataResponse.FromString,
_registered_method=True,
)

@@ -48,2 +75,3 @@ self.ModelMetadata = channel.unary_unary(

response_deserializer=dataplane__pb2.ModelMetadataResponse.FromString,
_registered_method=True,
)

@@ -54,2 +82,3 @@ self.ModelInfer = channel.unary_unary(

response_deserializer=dataplane__pb2.ModelInferResponse.FromString,
_registered_method=True,
)

@@ -60,2 +89,3 @@ self.ModelStreamInfer = channel.stream_stream(

response_deserializer=dataplane__pb2.ModelInferResponse.FromString,
_registered_method=True,
)

@@ -66,2 +96,3 @@ self.RepositoryIndex = channel.unary_unary(

response_deserializer=dataplane__pb2.RepositoryIndexResponse.FromString,
_registered_method=True,
)

@@ -72,2 +103,3 @@ self.RepositoryModelLoad = channel.unary_unary(

response_deserializer=dataplane__pb2.RepositoryModelLoadResponse.FromString,
_registered_method=True,
)

@@ -78,2 +110,3 @@ self.RepositoryModelUnload = channel.unary_unary(

response_deserializer=dataplane__pb2.RepositoryModelUnloadResponse.FromString,
_registered_method=True,
)

@@ -206,2 +239,5 @@

server.add_generic_rpc_handlers((generic_handler,))
server.add_registered_method_handlers(
"inference.GRPCInferenceService", rpc_method_handlers
)

@@ -243,2 +279,3 @@

metadata,
_registered_method=True,
)

@@ -273,2 +310,3 @@

metadata,
_registered_method=True,
)

@@ -303,2 +341,3 @@

metadata,
_registered_method=True,
)

@@ -333,2 +372,3 @@

metadata,
_registered_method=True,
)

@@ -363,2 +403,3 @@

metadata,
_registered_method=True,
)

@@ -393,2 +434,3 @@

metadata,
_registered_method=True,
)

@@ -423,2 +465,3 @@

metadata,
_registered_method=True,
)

@@ -453,2 +496,3 @@

metadata,
_registered_method=True,
)

@@ -483,2 +527,3 @@

metadata,
_registered_method=True,
)

@@ -513,2 +558,3 @@

metadata,
_registered_method=True,
)
+23
-12
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# source: dataplane.proto
# Protobuf Python Version: 4.25.1
# Protobuf Python Version: 5.27.2
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_runtime_version.ValidateProtobufRuntimeVersion(
_runtime_version.Domain.PUBLIC, 5, 27, 2, "", "dataplane.proto"
)
# @@protoc_insertion_point(imports)

@@ -23,12 +28,16 @@

_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "dataplane_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
_globals["DESCRIPTOR"]._options = None
if not _descriptor._USE_C_DESCRIPTORS:
_globals["DESCRIPTOR"]._loaded_options = None
_globals["DESCRIPTOR"]._serialized_options = b"\n\007ex.grpc\242\002\003HSW"
_globals["_MODELMETADATARESPONSE_TENSORMETADATA_PARAMETERSENTRY"]._options = None
_globals[
"_MODELMETADATARESPONSE_TENSORMETADATA_PARAMETERSENTRY"
]._loaded_options = None
_globals[
"_MODELMETADATARESPONSE_TENSORMETADATA_PARAMETERSENTRY"
]._serialized_options = b"8\001"
_globals["_MODELMETADATARESPONSE_PARAMETERSENTRY"]._options = None
_globals["_MODELMETADATARESPONSE_PARAMETERSENTRY"]._loaded_options = None
_globals["_MODELMETADATARESPONSE_PARAMETERSENTRY"]._serialized_options = b"8\001"
_globals["_MODELINFERREQUEST_INFERINPUTTENSOR_PARAMETERSENTRY"]._options = None
_globals["_MODELINFERREQUEST_INFERINPUTTENSOR_PARAMETERSENTRY"]._loaded_options = (
None
)
_globals[

@@ -39,19 +48,21 @@ "_MODELINFERREQUEST_INFERINPUTTENSOR_PARAMETERSENTRY"

"_MODELINFERREQUEST_INFERREQUESTEDOUTPUTTENSOR_PARAMETERSENTRY"
]._options = None
]._loaded_options = None
_globals[
"_MODELINFERREQUEST_INFERREQUESTEDOUTPUTTENSOR_PARAMETERSENTRY"
]._serialized_options = b"8\001"
_globals["_MODELINFERREQUEST_PARAMETERSENTRY"]._options = None
_globals["_MODELINFERREQUEST_PARAMETERSENTRY"]._loaded_options = None
_globals["_MODELINFERREQUEST_PARAMETERSENTRY"]._serialized_options = b"8\001"
_globals["_MODELINFERRESPONSE_INFEROUTPUTTENSOR_PARAMETERSENTRY"]._options = None
_globals[
"_MODELINFERRESPONSE_INFEROUTPUTTENSOR_PARAMETERSENTRY"
]._loaded_options = None
_globals[
"_MODELINFERRESPONSE_INFEROUTPUTTENSOR_PARAMETERSENTRY"
]._serialized_options = b"8\001"
_globals["_MODELINFERRESPONSE_PARAMETERSENTRY"]._options = None
_globals["_MODELINFERRESPONSE_PARAMETERSENTRY"]._loaded_options = None
_globals["_MODELINFERRESPONSE_PARAMETERSENTRY"]._serialized_options = b"8\001"
_globals["_REPOSITORYMODELLOADREQUEST_PARAMETERSENTRY"]._options = None
_globals["_REPOSITORYMODELLOADREQUEST_PARAMETERSENTRY"]._loaded_options = None
_globals["_REPOSITORYMODELLOADREQUEST_PARAMETERSENTRY"]._serialized_options = (
b"8\001"
)
_globals["_REPOSITORYMODELUNLOADREQUEST_PARAMETERSENTRY"]._options = None
_globals["_REPOSITORYMODELUNLOADREQUEST_PARAMETERSENTRY"]._loaded_options = None
_globals["_REPOSITORYMODELUNLOADREQUEST_PARAMETERSENTRY"]._serialized_options = (

@@ -58,0 +69,0 @@ b"8\001"

# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
import warnings
from . import model_repository_pb2 as model__repository__pb2
GRPC_GENERATED_VERSION = "1.67.1"
GRPC_VERSION = grpc.__version__
_version_not_supported = False
try:
from grpc._utilities import first_version_is_lower
_version_not_supported = first_version_is_lower(
GRPC_VERSION, GRPC_GENERATED_VERSION
)
except ImportError:
_version_not_supported = True
if _version_not_supported:
raise RuntimeError(
f"The grpc package installed is at version {GRPC_VERSION},"
+ f" but the generated code in model_repository_pb2_grpc.py depends on"
+ f" grpcio>={GRPC_GENERATED_VERSION}."
+ f" Please upgrade your grpc module to grpcio>={GRPC_GENERATED_VERSION}"
+ f" or downgrade your generated code using grpcio-tools<={GRPC_VERSION}."
)
class ModelRepositoryServiceStub(object):

@@ -21,2 +44,3 @@ """Missing associated documentation comment in .proto file."""

response_deserializer=model__repository__pb2.RepositoryIndexResponse.FromString,
_registered_method=True,
)

@@ -27,2 +51,3 @@ self.RepositoryModelLoad = channel.unary_unary(

response_deserializer=model__repository__pb2.RepositoryModelLoadResponse.FromString,
_registered_method=True,
)

@@ -33,2 +58,3 @@ self.RepositoryModelUnload = channel.unary_unary(

response_deserializer=model__repository__pb2.RepositoryModelUnloadResponse.FromString,
_registered_method=True,
)

@@ -81,2 +107,5 @@

server.add_generic_rpc_handlers((generic_handler,))
server.add_registered_method_handlers(
"inference.model_repository.ModelRepositoryService", rpc_method_handlers
)

@@ -115,2 +144,3 @@

metadata,
_registered_method=True,
)

@@ -145,2 +175,3 @@

metadata,
_registered_method=True,
)

@@ -175,2 +206,3 @@

metadata,
_registered_method=True,
)
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# NO CHECKED-IN PROTOBUF GENCODE
# source: model_repository.proto
# Protobuf Python Version: 4.25.1
# Protobuf Python Version: 5.27.2
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import runtime_version as _runtime_version
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
_runtime_version.ValidateProtobufRuntimeVersion(
_runtime_version.Domain.PUBLIC, 5, 27, 2, "", "model_repository.proto"
)
# @@protoc_insertion_point(imports)

@@ -23,4 +28,4 @@

_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "model_repository_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS == False:
DESCRIPTOR._options = None
if not _descriptor._USE_C_DESCRIPTORS:
DESCRIPTOR._loaded_options = None
_globals["_REPOSITORYINDEXREQUEST"]._serialized_start = 54

@@ -27,0 +32,0 @@ _globals["_REPOSITORYINDEXREQUEST"]._serialized_end = 118

@@ -41,2 +41,8 @@ import asyncio

def _append_gid_environment_hash(
env_hash: str, inference_pool_gid: Optional[str] = None
) -> str:
return f"{env_hash}-{inference_pool_gid}"
class InferencePoolRegistry:

@@ -84,3 +90,4 @@ """

pool = await self._get_or_create_with_existing_env(
model.settings.parameters.environment_path
model.settings.parameters.environment_path,
model.settings.parameters.inference_pool_gid,
)

@@ -92,3 +99,5 @@ else:

async def _get_or_create_with_existing_env(
self, environment_path: str
self,
environment_path: str,
inference_pool_gid: Optional[str],
) -> InferencePool:

@@ -104,4 +113,9 @@ """

env_hash = await compute_hash_of_string(expanded_environment_path)
if inference_pool_gid is not None:
env_hash = _append_gid_environment_hash(env_hash, inference_pool_gid)
if env_hash in self._pools:
return self._pools[env_hash]
env = Environment(

@@ -121,9 +135,25 @@ env_path=expanded_environment_path,

Creates or returns the InferencePool for a model that uses a
tarball as python environment.
tarball as a Python environment.
"""
env_tarball = _get_env_tarball(model)
inference_pool_gid = (
model.settings.parameters.inference_pool_gid
if model.settings.parameters
else None
)
if not env_tarball:
return self._default_pool
return (
self._pools.setdefault(
inference_pool_gid,
InferencePool(self._settings, on_worker_stop=self._on_worker_stop),
)
if inference_pool_gid
else self._default_pool
)
env_hash = await compute_hash_of_file(env_tarball)
if inference_pool_gid is not None:
env_hash = _append_gid_environment_hash(env_hash, inference_pool_gid)
if env_hash in self._pools:

@@ -133,8 +163,8 @@ return self._pools[env_hash]

env = await self._extract_tarball(env_hash, env_tarball)
pool = InferencePool(
self._pools[env_hash] = InferencePool(
self._settings, env=env, on_worker_stop=self._on_worker_stop
)
self._pools[env_hash] = pool
return pool
return self._pools[env_hash]
async def _extract_tarball(self, env_hash: str, env_tarball: str) -> Environment:

@@ -154,4 +184,13 @@ env_path = self._get_env_path(env_hash)

env_hash = _get_environment_hash(model)
inference_pool_gid = (
model.settings.parameters.inference_pool_gid
if model.settings.parameters
else None
)
if not env_hash:
return self._default_pool
if not inference_pool_gid:
return self._default_pool
else:
return self._pools[inference_pool_gid]

@@ -272,2 +311,4 @@ if env_hash not in self._pools:

if env_hash:
# force calling __del__ on `Environment` to clean up
self._pools[env_hash]._env = None # pylint: disable=protected-access
del self._pools[env_hash]
import sys
import os
import uuid
import json

@@ -17,2 +18,3 @@ import importlib

)
from typing_extensions import Self
from pydantic import (

@@ -23,2 +25,3 @@ ImportString,

)
from pydantic import model_validator
from pydantic._internal._validators import import_string

@@ -318,2 +321,8 @@ import pydantic_settings

inference_pool_gid: Optional[str] = None
"""Inference pool group id to be used to serve this model."""
autogenerate_inference_pool_gid: bool = False
"""Flag to autogenerate the inference pool group id for this model."""
format: Optional[str] = None

@@ -329,3 +338,9 @@ """Format of the model (only available on certain runtimes)."""

@model_validator(mode="after")
def set_inference_pool_gid(self) -> Self:
if self.autogenerate_inference_pool_gid and self.inference_pool_gid is None:
self.inference_pool_gid = str(uuid.uuid4())
return self
class ModelSettings(BaseSettings):

@@ -332,0 +347,0 @@ model_config = SettingsConfigDict(

@@ -1,1 +0,1 @@

__version__ = "1.6.2.rc1"
__version__ = "1.7.0.rc1"

@@ -1,4 +0,4 @@

Metadata-Version: 2.1
Metadata-Version: 2.3
Name: mlserver
Version: 1.6.2rc1
Version: 1.7.0rc1
Summary: MLServer

@@ -8,3 +8,3 @@ License: Apache-2.0

Author-email: hello@seldon.io
Requires-Python: >=3.9,<3.12
Requires-Python: >=3.9,<3.13
Classifier: License :: OSI Approved :: Apache Software License

@@ -17,2 +17,3 @@ Classifier: Operating System :: MacOS

Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Requires-Dist: aiofiles

@@ -36,3 +37,3 @@ Requires-Dist: aiokafka

Requires-Dist: pydantic (>=2.7.1,<3.0.0)
Requires-Dist: pydantic-settings (>=2.2.1,<3.0.0)
Requires-Dist: pydantic-settings (>=2.3.0,<3.0.0)
Requires-Dist: python-dotenv

@@ -148,4 +149,5 @@ Requires-Dist: python-multipart

| 3.10 | 🟢 |
| 3.11 | 🔵 |
| 3.12 | 🔵 |
| 3.11 | 🟢 |
| 3.12 | 🟢 |
| 3.13 | 🔴 |

@@ -152,0 +154,0 @@ ## Examples

[tool.poetry]
name = "mlserver"
version = "1.6.2.rc1"
version = "1.7.0.rc1"
description = "MLServer"

@@ -46,3 +46,3 @@ authors = ["Seldon Technologies Ltd. <hello@seldon.io>"]

[tool.poetry.dependencies]
python = ">=3.9,<3.12"
python = ">=3.9,<3.13"
click = "*"

@@ -67,3 +67,3 @@ fastapi = ">=0.88.0,!=0.89.0,<0.116.0"

pydantic = "^2.7.1"
pydantic-settings = "^2.2.1"
pydantic-settings = "^2.3.0"
python-multipart = "*"

@@ -97,3 +97,3 @@

httpx = "0.27.0"
kafka-python = "2.0.2"
kafka-python-ng = "2.2.3"
tenacity = "8.4.1"

@@ -117,3 +117,3 @@ pyyaml = "6.0.1"

[tool.poetry.group.docker.dependencies]
tensorflow = "<2.15"
tensorflow = "^2.16"

@@ -142,10 +142,12 @@ [tool.poetry.group.all-runtimes]

## Dev dependencies from MLflow
torch = "2.2.1"
pytorch-lightning = "2.4.0"
torchmetrics = "1.5.0"
torchvision = "0.17.1"
mlflow = "2.17.0"
torch = "^2.4"
pytorch-lightning = "^2.4"
torchmetrics = "1.6.0"
torchvision = "0.19.1"
mlflow = "2.19.0"
## Dev dependencies from HuggingFace
transformers = ">=4.30,<5.0"
# TODO: Relax when we deprecate Conversation pipeline
# see: https://github.com/SeldonIO/MLServer/issues/1955
transformers = "4.41.2"

@@ -152,0 +154,0 @@ [tool.poetry.group.docs]

@@ -103,4 +103,5 @@ # MLServer

| 3.10 | 🟢 |
| 3.11 | 🔵 |
| 3.12 | 🔵 |
| 3.11 | 🟢 |
| 3.12 | 🟢 |
| 3.13 | 🔴 |

@@ -107,0 +108,0 @@ ## Examples