Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

inferless-cli

Package Overview
Dependencies
Maintainers
3
Versions
60
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

inferless-cli - npm Package Compare versions

Comparing version
2.0.16
to
2.0.17
+1
-1
inferless_cli/__init__.py
# __init__.py
__version__ = "2.0.16"
__version__ = "2.0.17"

@@ -32,3 +32,2 @@ import os

read_yaml,
yaml,
)

@@ -337,3 +336,3 @@ from inferless_cli.utils.inferless_config_handler import InferlessConfigHandler

rich.print(
"if you want to redeploy the model please use this command [blue]`inferless redeploy`[/blue]\n"
"if you want to redeploy the model please use this command [blue]`inferless rebuild`[/blue]\n"
)

@@ -346,3 +345,7 @@ raise InferlessCLIError(

old_response = get_model_import_details(config.get_value("model_import_id"))
if old_response and old_response.get("model_import").get("status") == "FAILURE":
if old_response and (
old_response.get("model_import").get("status") == "FAILURE"
or old_response.get("model_import").get("status")
== "FILE_STRUCTURE_FAILURE"
):
is_failed = True

@@ -353,3 +356,3 @@

rich.print(
"if you want to redeploy the model please use this command [blue]`inferless redeploy`[/blue]\n"
"if you want to redeploy the model please use this command [blue]`inferless rebuild`[/blue]\n"
)

@@ -771,3 +774,3 @@ raise InferlessCLIError(

if status == "FAILURE":
if status == "FAILURE" or status == "FILE_STRUCTURE_FAILURE":
error_msg = res["model_import"]["import_error"]["message"]

@@ -787,6 +790,14 @@ rich.print(f"[red]{error_msg}[/red]")

if status in ["FILE_STRUCTURE_VALIDATED", "SUCCESS", "FAILURE"]:
if status in [
"FILE_STRUCTURE_VALIDATED",
"SUCCESS",
"FAILURE",
"FILE_STRUCTURE_FAILURE",
]:
return status, response
if status in ["FILE_STRUCTURE_VALIDATION_FAILED", "IMPORT_FAILED"]:
if status in [
"FILE_STRUCTURE_VALIDATION_FAILED",
"IMPORT_FAILED",
]:
raise InferlessCLIError(f"Status was {status}, response was: {response}")

@@ -793,0 +804,0 @@

from datetime import datetime, timedelta
import rich
import typer
import time
from rich.progress import Progress, SpinnerColumn, TextColumn
from inferless_cli.utils.exceptions import InferlessCLIError, ServerError

@@ -14,3 +16,8 @@ from inferless_cli.utils.helpers import (

def log_prompt(model_id: str, logs_type: str = "BUILD", import_logs: bool = False):
def log_prompt(
model_id: str,
logs_type: str = "BUILD",
import_logs: bool = False,
tail: bool = False,
):
try:

@@ -23,3 +30,3 @@ _, _, _, workspace_id, workspace_name = decrypt_tokens()

if logs_type == "BUILD":
handle_build_logs(import_logs, model_id)
handle_build_logs(import_logs, model_id, tail)
analytics_capture_event(

@@ -32,6 +39,7 @@ "cli_model_logs",

"logs_type": logs_type,
"tail": tail,
},
)
elif logs_type == "CALL":
handle_call_logs(model_id)
handle_call_logs(model_id, tail)
analytics_capture_event(

@@ -44,2 +52,3 @@ "cli_model_logs",

"logs_type": logs_type,
"tail": tail,
},

@@ -61,3 +70,3 @@ )

def handle_call_logs(model_id):
def handle_call_logs(model_id, tail=False):
try:

@@ -72,2 +81,5 @@ start_date = datetime.now() - timedelta(days=7)

token = None
last_log_time = None
# Initial log fetch
while True:

@@ -84,5 +96,18 @@ # Fetch logs based on the build_id and token

rich.print("\nNo Logs found\n")
if not tail:
return
# Reverse the logs before printing
logs["details"].reverse()
print_logs(logs)
# Update last log time if we have logs
if logs["details"]:
try:
last_log_time = dateutil.parser.isoparse(
logs["details"][-1]["time"]
)
except Exception:
pass
# Check if there is a next_token

@@ -95,7 +120,73 @@ next_token = logs.get("next_token")

token = next_token
# If tail flag is not set, we're done
if not tail:
return
# Continue streaming logs
with Progress(
SpinnerColumn(),
TextColumn("[yellow]Tailing logs... Press Ctrl+C to exit[/yellow]"),
transient=False,
refresh_per_second=4,
) as progress:
progress_task = progress.add_task("", total=None)
while True:
try:
time.sleep(10) # Wait for 10 seconds before fetching new logs
# Update time_from to the last log time + 1 microsecond
if last_log_time:
# Add 1 microsecond to last log time
last_log_time_plus = last_log_time + timedelta(milliseconds=1)
payload["time_from"] = last_log_time_plus.strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
# Update time_to to current time
payload["time_to"] = datetime.now().strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
# Reset token for new streaming request
if "next_token" in payload:
del payload["next_token"]
logs = get_call_logs(payload)
if logs["details"]:
# Temporarily stop the progress bar while printing logs
progress.remove_task(progress_task)
# Reverse the logs before printing
logs["details"].reverse()
print_logs(logs)
try:
last_log_time = dateutil.parser.isoparse(
logs["details"][-1]["time"]
)
except Exception:
# Silently continue if timestamp parsing fails
pass
# Resume the progress bar
progress_task = progress.add_task("", total=None)
except KeyboardInterrupt:
progress.remove_task(progress_task)
rich.print("[yellow]Stopping log stream.[/yellow]")
return
except Exception as e:
progress.remove_task(progress_task)
error_msg = str(e) if str(e) else "Unknown error occurred"
rich.print(f"[red]Error fetching logs: {error_msg}[/red]")
progress_task = progress.add_task("", total=None)
time.sleep(10) # Wait before retrying
except Exception as e:
raise InferlessCLIError(f"[red]Error while fetching call logs: {e}[/red]")
error_msg = str(e) if str(e) else "Unknown error occurred"
raise InferlessCLIError(
f"[red]Error while fetching call logs: {error_msg}[/red]"
)
def handle_build_logs(import_logs, model_id):
def handle_build_logs(import_logs, model_id, tail=False):
try:

@@ -112,2 +203,5 @@ start_date = datetime.now() - timedelta(days=7)

token = None
last_log_time = None
# Initial log fetch
while True:

@@ -124,5 +218,18 @@ # Fetch logs based on the build_id and token

rich.print("\nNo Logs found\n")
if not tail:
return
# Reverse the logs before printing
logs["details"].reverse()
print_logs(logs)
# Update last log time if we have logs
if logs["details"]:
try:
last_log_time = dateutil.parser.isoparse(
logs["details"][-1]["time"]
)
except Exception:
pass
# Check if there is a next_token

@@ -136,4 +243,70 @@ next_token = logs.get("next_token")

token = next_token
# If tail flag is not set, we're done
if not tail:
return
# Continue streaming logs
with Progress(
SpinnerColumn(),
TextColumn("[yellow]Tailing logs... Press Ctrl+C to exit[/yellow]"),
transient=False,
refresh_per_second=4,
) as progress:
progress_task = progress.add_task("", total=None)
while True:
try:
time.sleep(10) # Wait for 10 seconds before fetching new logs
# Update time_from to the last log time + 1 microsecond
if last_log_time:
# Add 1 microsecond to last log time
last_log_time_plus = last_log_time + timedelta(milliseconds=1)
payload["time_from"] = last_log_time_plus.strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
# Update time_to to current time
payload["time_to"] = datetime.now().strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"
)
# Reset token for new streaming request
if "next_token" in payload:
del payload["next_token"]
logs = get_build_logs(payload)
if logs["details"]:
# Temporarily stop the progress bar while printing logs
progress.remove_task(progress_task)
# Reverse the logs before printing
logs["details"].reverse()
print_logs(logs)
try:
last_log_time = dateutil.parser.isoparse(
logs["details"][-1]["time"]
)
except Exception:
# Silently continue if timestamp parsing fails
pass
# Resume the progress bar
progress_task = progress.add_task("", total=None)
except KeyboardInterrupt:
progress.remove_task(progress_task)
rich.print("[yellow]Stopping log stream.[/yellow]")
return
except Exception as e:
progress.remove_task(progress_task)
error_msg = str(e) if str(e) else "Unknown error occurred"
rich.print(f"[red]Error fetching logs: {error_msg}[/red]")
progress_task = progress.add_task("", total=None)
time.sleep(10) # Wait before retrying
except Exception as e:
raise InferlessCLIError(f"[red]Error while fetching build logs: {e}[/red]")
error_msg = str(e) if str(e) else "Unknown error occurred"
raise InferlessCLIError(
f"[red]Error while fetching build logs: {error_msg}[/red]"
)

@@ -147,4 +320,7 @@

except Exception as e:
timestamp = "-"
log_exception(e)
rich.print(f"[green]{timestamp}[/green]: {log_entry['log']}")
log_line = log_entry["log"]
rich.print(f"[green]{timestamp}[/green]: {log_line}")

@@ -35,2 +35,3 @@ # EXTERNAL PACKAGES

from inferless_cli.utils.helpers import (
analytics_shutdown,
log_exception,

@@ -63,2 +64,3 @@ sentry_init,

callback=sentry_init,
result_callback=analytics_shutdown,
)

@@ -194,5 +196,6 @@

),
tail: bool = typer.Option(False, "--tail", help="Stream logs continuously"),
):
callback_with_auth_validation()
log_prompt(model_id, logs_type, import_logs)
log_prompt(model_id, logs_type, import_logs, tail)

@@ -199,0 +202,0 @@

@@ -452,5 +452,13 @@ import json

except Exception as e:
print("Error in analytics_capture_event:", e)
log_exception(e)
def analytics_shutdown(executed_command_result, **kwargs):
try:
ANALYTICS.shutdown()
except Exception as e:
log_exception(e)
def check_pydantic(model_path):

@@ -457,0 +465,0 @@ try:

Metadata-Version: 2.1
Name: inferless-cli
Version: 2.0.16
Version: 2.0.17
Summary: Inferless - Deploy Machine Learning Models in Minutes.

@@ -5,0 +5,0 @@ Author: Naveen

[tool.poetry]
name = "inferless-cli"
version = "2.0.16"
version = "2.0.17"
description = "Inferless - Deploy Machine Learning Models in Minutes."

@@ -5,0 +5,0 @@ authors = ["Naveen <naveen@inferless.com>"]