@vercel/python
Advanced tools
+1
-1
| { | ||
| "name": "@vercel/python", | ||
| "version": "5.0.8", | ||
| "version": "5.0.9", | ||
| "main": "./dist/index.js", | ||
@@ -5,0 +5,0 @@ "license": "Apache-2.0", |
+105
-231
@@ -1,2 +0,1 @@ | ||
| from __future__ import annotations | ||
| import sys | ||
@@ -9,15 +8,5 @@ import os | ||
| import inspect | ||
| import threading | ||
| import asyncio | ||
| import http | ||
| import time | ||
| from importlib import util | ||
| from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer | ||
| from http.server import BaseHTTPRequestHandler | ||
| import socket | ||
| import functools | ||
| import logging | ||
| import builtins | ||
| from typing import Callable, Literal | ||
| import contextvars | ||
| import io | ||
@@ -66,65 +55,68 @@ _here = os.path.dirname(__file__) | ||
| # Custom logging handler so logs are properly categorized | ||
| class VCLogHandler(logging.Handler): | ||
| def __init__(self, send_message: Callable[[dict], None], context_getter: Callable[[], dict] | None = None): | ||
| super().__init__() | ||
| self._send_message = send_message | ||
| self._context_getter = context_getter | ||
| if 'VERCEL_IPC_PATH' in os.environ: | ||
| from http.server import ThreadingHTTPServer | ||
| import http | ||
| import time | ||
| import contextvars | ||
| import functools | ||
| import builtins | ||
| import logging | ||
| def emit(self, record): | ||
| try: | ||
| message = record.getMessage() | ||
| except Exception: | ||
| try: | ||
| message = f"{record.msg}" | ||
| except Exception: | ||
| message = "" | ||
| start_time = time.time() | ||
| sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) | ||
| sock.connect(os.getenv("VERCEL_IPC_PATH", "")) | ||
| if record.levelno >= logging.CRITICAL: | ||
| level = "fatal" | ||
| elif record.levelno >= logging.ERROR: | ||
| level = "error" | ||
| elif record.levelno >= logging.WARNING: | ||
| level = "warn" | ||
| elif record.levelno >= logging.INFO: | ||
| level = "info" | ||
| else: | ||
| level = "debug" | ||
| send_message = lambda message: sock.sendall((json.dumps(message) + '\0').encode()) | ||
| storage = contextvars.ContextVar('storage', default=None) | ||
| ctx = None | ||
| try: | ||
| ctx = self._context_getter() if self._context_getter is not None else None | ||
| except Exception: | ||
| ctx = None | ||
| # Override urlopen from urllib3 (& requests) to send Request Metrics | ||
| try: | ||
| import urllib3 | ||
| from urllib.parse import urlparse | ||
| if ctx is not None: | ||
| try: | ||
| self._send_message({ | ||
| "type": "log", | ||
| "payload": { | ||
| "context": { | ||
| "invocationId": ctx['invocationId'], | ||
| "requestId": ctx['requestId'], | ||
| }, | ||
| "message": base64.b64encode(message.encode()).decode(), | ||
| "level": level, | ||
| } | ||
| }) | ||
| except Exception: | ||
| pass | ||
| else: | ||
| try: | ||
| sys.stdout.write(message + "\n") | ||
| except Exception: | ||
| pass | ||
| def timed_request(func): | ||
| fetchId = 0 | ||
| @functools.wraps(func) | ||
| def wrapper(self, method, url, *args, **kwargs): | ||
| nonlocal fetchId | ||
| fetchId += 1 | ||
| start_time = int(time.time() * 1000) | ||
| result = func(self, method, url, *args, **kwargs) | ||
| elapsed_time = int(time.time() * 1000) - start_time | ||
| parsed_url = urlparse(url) | ||
| context = storage.get() | ||
| if context is not None: | ||
| send_message({ | ||
| "type": "metric", | ||
| "payload": { | ||
| "context": { | ||
| "invocationId": context['invocationId'], | ||
| "requestId": context['requestId'], | ||
| }, | ||
| "type": "fetch-metric", | ||
| "payload": { | ||
| "pathname": parsed_url.path, | ||
| "search": parsed_url.query, | ||
| "start": start_time, | ||
| "duration": elapsed_time, | ||
| "host": parsed_url.hostname or self.host, | ||
| "statusCode": result.status, | ||
| "method": method, | ||
| "id": fetchId | ||
| } | ||
| } | ||
| }) | ||
| return result | ||
| return wrapper | ||
| urllib3.connectionpool.HTTPConnectionPool.urlopen = timed_request(urllib3.connectionpool.HTTPConnectionPool.urlopen) | ||
| except: | ||
| pass | ||
| def setup_logging(send_message: Callable[[dict], None], storage: contextvars.ContextVar[dict | None]): | ||
| # Override sys.stdout and sys.stderr to map logs to the correct request | ||
| class StreamWrapper: | ||
| def __init__(self, stream: io.TextIOBase, stream_name: Literal["stdout", "stderr"]): | ||
| def __init__(self, stream, stream_name): | ||
| self.stream = stream | ||
| self.stream_name = stream_name | ||
| def write(self, message: str): | ||
| def write(self, message): | ||
| context = storage.get() | ||
@@ -152,11 +144,15 @@ if context is not None: | ||
| # Wrap top-level logging helpers to emit structured logs when a request | ||
| # context is available; otherwise fall back to the original behavior. | ||
| def logging_wrapper(func: Callable[..., None], level: str = "info") -> Callable[..., None]: | ||
| # Override the global print to log to stdout | ||
| def print_wrapper(func): | ||
| @functools.wraps(func) | ||
| def wrapper(*args, **kwargs): | ||
| try: | ||
| context = storage.get() | ||
| except Exception: | ||
| context = None | ||
| sys.stdout.write(' '.join(map(str, args)) + '\n') | ||
| return wrapper | ||
| builtins.print = print_wrapper(builtins.print) | ||
| # Override logging to maps logs to the correct request | ||
| def logging_wrapper(func, level="info"): | ||
| @functools.wraps(func) | ||
| def wrapper(*args, **kwargs): | ||
| context = storage.get() | ||
| if context is not None: | ||
@@ -178,74 +174,9 @@ send_message({ | ||
| logging.basicConfig(level=logging.INFO, handlers=[VCLogHandler(send_message, storage.get)], force=True) | ||
| logging.debug = logging_wrapper(logging.debug, "debug") | ||
| logging.info = logging_wrapper(logging.info, "info") | ||
| logging.basicConfig(level=logging.INFO) | ||
| logging.debug = logging_wrapper(logging.debug) | ||
| logging.info = logging_wrapper(logging.info) | ||
| logging.warning = logging_wrapper(logging.warning, "warn") | ||
| logging.error = logging_wrapper(logging.error, "error") | ||
| logging.fatal = logging_wrapper(logging.fatal, "fatal") | ||
| logging.critical = logging_wrapper(logging.critical, "fatal") | ||
| logging.critical = logging_wrapper(logging.critical, "error") | ||
| # Ensure built-in print funnels through stdout wrapper so prints are | ||
| # attributed to the current request context. | ||
| def print_wrapper(func: Callable[..., None]) -> Callable[..., None]: | ||
| @functools.wraps(func) | ||
| def wrapper(*args, **kwargs): | ||
| sys.stdout.write(' '.join(map(str, args)) + '\n') | ||
| return wrapper | ||
| builtins.print = print_wrapper(builtins.print) | ||
| if 'VERCEL_IPC_PATH' in os.environ: | ||
| start_time = time.time() | ||
| sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) | ||
| sock.connect(os.getenv("VERCEL_IPC_PATH", "")) | ||
| send_message = lambda message: sock.sendall((json.dumps(message) + '\0').encode()) | ||
| storage = contextvars.ContextVar('storage', default=None) | ||
| # Override urlopen from urllib3 (& requests) to send Request Metrics | ||
| try: | ||
| import urllib3 | ||
| from urllib.parse import urlparse | ||
| def timed_request(func): | ||
| fetchId = 0 | ||
| @functools.wraps(func) | ||
| def wrapper(self, method, url, *args, **kwargs): | ||
| nonlocal fetchId | ||
| fetchId += 1 | ||
| start_time = int(time.time() * 1000) | ||
| result = func(self, method, url, *args, **kwargs) | ||
| elapsed_time = int(time.time() * 1000) - start_time | ||
| parsed_url = urlparse(url) | ||
| context = storage.get() | ||
| if context is not None: | ||
| send_message({ | ||
| "type": "metric", | ||
| "payload": { | ||
| "context": { | ||
| "invocationId": context['invocationId'], | ||
| "requestId": context['requestId'], | ||
| }, | ||
| "type": "fetch-metric", | ||
| "payload": { | ||
| "pathname": parsed_url.path, | ||
| "search": parsed_url.query, | ||
| "start": start_time, | ||
| "duration": elapsed_time, | ||
| "host": parsed_url.hostname or self.host, | ||
| "statusCode": result.status, | ||
| "method": method, | ||
| "id": fetchId | ||
| } | ||
| } | ||
| }) | ||
| return result | ||
| return wrapper | ||
| urllib3.connectionpool.HTTPConnectionPool.urlopen = timed_request(urllib3.connectionpool.HTTPConnectionPool.urlopen) | ||
| except: | ||
| pass | ||
| setup_logging(send_message, storage) | ||
| class BaseHandler(BaseHTTPRequestHandler): | ||
@@ -332,3 +263,2 @@ # Re-implementation of BaseHTTPRequestHandler's log_message method to | ||
| elif 'app' in __vc_variables: | ||
| # WSGI | ||
| if ( | ||
@@ -399,6 +329,6 @@ not inspect.iscoroutinefunction(__vc_module.app) and | ||
| response.close() | ||
| # ASGI | ||
| else: | ||
| from urllib.parse import urlparse | ||
| from io import BytesIO | ||
| import asyncio | ||
@@ -418,3 +348,2 @@ app = __vc_module.app | ||
| headers_encoded.append([k.lower().encode(), v.encode()]) | ||
| scope = { | ||
@@ -442,93 +371,38 @@ 'server': (self.headers.get('host', 'lambda'), self.headers.get('x-forwarded-port', 80)), | ||
| # Event to signal that the response has been fully sent | ||
| response_done = threading.Event() | ||
| # Event to signal the ASGI app has fully completed (incl. background tasks) | ||
| app_done = threading.Event() | ||
| if _use_legacy_asyncio: | ||
| loop = asyncio.new_event_loop() | ||
| app_queue = asyncio.Queue(loop=loop) | ||
| else: | ||
| app_queue = asyncio.Queue() | ||
| app_queue.put_nowait({'type': 'http.request', 'body': body, 'more_body': False}) | ||
| # Propagate request context to background thread for logging & metrics | ||
| request_context = storage.get() | ||
| # Prepare ASGI receive function | ||
| async def receive(): | ||
| message = await app_queue.get() | ||
| return message | ||
| def run_asgi(): | ||
| # Ensure request context is available in this thread | ||
| if request_context is not None: | ||
| token = storage.set(request_context) | ||
| else: | ||
| token = None | ||
| # Track if headers were sent, so we can synthesize a 500 on early failure | ||
| response_started = False | ||
| try: | ||
| async def runner(): | ||
| # Per-request app queue | ||
| if _use_legacy_asyncio: | ||
| loop = asyncio.get_running_loop() | ||
| app_queue = asyncio.Queue(loop=loop) | ||
| else: | ||
| app_queue = asyncio.Queue() | ||
| # Prepare ASGI send function | ||
| response_started = False | ||
| async def send(event): | ||
| nonlocal response_started | ||
| if event['type'] == 'http.response.start': | ||
| self.send_response(event['status']) | ||
| if 'headers' in event: | ||
| for name, value in event['headers']: | ||
| self.send_header(name.decode(), value.decode()) | ||
| self.end_headers() | ||
| response_started = True | ||
| elif event['type'] == 'http.response.body': | ||
| self.wfile.write(event['body']) | ||
| if not event.get('more_body', False): | ||
| self.wfile.flush() | ||
| await app_queue.put({'type': 'http.request', 'body': body, 'more_body': False}) | ||
| # Run the ASGI application | ||
| asgi_instance = app(scope, receive, send) | ||
| if _use_legacy_asyncio: | ||
| asgi_task = loop.create_task(asgi_instance) | ||
| loop.run_until_complete(asgi_task) | ||
| else: | ||
| asyncio.run(asgi_instance) | ||
| async def receive(): | ||
| message = await app_queue.get() | ||
| return message | ||
| async def send(event): | ||
| nonlocal response_started | ||
| if event['type'] == 'http.response.start': | ||
| self.send_response(event['status']) | ||
| if 'headers' in event: | ||
| for name, value in event['headers']: | ||
| self.send_header(name.decode(), value.decode()) | ||
| self.end_headers() | ||
| response_started = True | ||
| elif event['type'] == 'http.response.body': | ||
| # Stream body as it is produced; flush on completion | ||
| body_bytes = event.get('body', b'') or b'' | ||
| if body_bytes: | ||
| self.wfile.write(body_bytes) | ||
| if not event.get('more_body', False): | ||
| try: | ||
| self.wfile.flush() | ||
| finally: | ||
| response_done.set() | ||
| try: | ||
| app_queue.put_nowait({'type': 'http.disconnect'}) | ||
| except Exception: | ||
| pass | ||
| # Run ASGI app (includes background tasks) | ||
| asgi_instance = app(scope, receive, send) | ||
| await asgi_instance | ||
| # Mark app completion when the ASGI callable returns | ||
| app_done.set() | ||
| asyncio.run(runner()) | ||
| except Exception: | ||
| # If the app raised before starting the response, synthesize a 500 | ||
| try: | ||
| if not response_started: | ||
| self.send_response(500) | ||
| self.end_headers() | ||
| try: | ||
| self.wfile.flush() | ||
| except Exception: | ||
| pass | ||
| except Exception: | ||
| pass | ||
| finally: | ||
| # Always unblock the waiting thread to avoid hangs | ||
| response_done.set() | ||
| # Ensure app completion is always signaled | ||
| app_done.set() | ||
| if token is not None: | ||
| storage.reset(token) | ||
| # Run ASGI in background thread to allow returning after final flush | ||
| t = threading.Thread(target=run_asgi, daemon=True) | ||
| t.start() | ||
| # Wait until final body chunk has been flushed to client | ||
| response_done.wait() | ||
| # Also wait until the ASGI app finishes (includes background tasks) | ||
| app_done.wait() | ||
| if 'Handler' in locals(): | ||
@@ -535,0 +409,0 @@ server = ThreadingHTTPServer(('127.0.0.1', 0), Handler) |
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
Dynamic require
Supply chain riskDynamic require can indicate the package is performing dangerous or unsafe dynamic code execution.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 9 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
Shell access
Supply chain riskThis module accesses the system shell. Accessing the system shell increases the risk of executing arbitrary code.
Found 1 instance in 1 package
Dynamic require
Supply chain riskDynamic require can indicate the package is performing dangerous or unsafe dynamic code execution.
Found 1 instance in 1 package
Environment variable access
Supply chain riskPackage accesses environment variables, which may be a sign of credential stuffing or data theft.
Found 9 instances in 1 package
Filesystem access
Supply chain riskAccesses the file system, and could potentially read sensitive data.
Found 1 instance in 1 package
AI-detected potential code anomaly
Supply chain riskAI has identified unusual behaviors that may pose a security risk.
Found 1 instance in 1 package
Long strings
Supply chain riskContains long string literals, which may be a sign of obfuscated or packed code.
Found 1 instance in 1 package
113
-0.88%308341
-1.83%8604
-1.27%