New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details →
Socket
Book a DemoSign in
Socket

executorlib

Package Overview
Dependencies
Maintainers
2
Versions
45
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

executorlib - pypi Package Compare versions

Comparing version
1.7.4
to
1.8.0
+3
-3
PKG-INFO
Metadata-Version: 2.4
Name: executorlib
Version: 1.7.4
Version: 1.8.0
Summary: Up-scale python functions for high performance computing (HPC) with executorlib.

@@ -58,3 +58,3 @@ Project-URL: Homepage, https://github.com/pyiron/executorlib

Requires-Dist: pygraphviz<=1.14,>=1.10; extra == 'all'
Requires-Dist: pysqa==0.3.3; extra == 'all'
Requires-Dist: pysqa==0.3.4; extra == 'all'
Provides-Extra: cache

@@ -64,3 +64,3 @@ Requires-Dist: h5py<=3.15.1,>=3.6.0; extra == 'cache'

Requires-Dist: h5py<=3.15.1,>=3.6.0; extra == 'cluster'
Requires-Dist: pysqa==0.3.3; extra == 'cluster'
Requires-Dist: pysqa==0.3.4; extra == 'cluster'
Provides-Extra: graph

@@ -67,0 +67,0 @@ Requires-Dist: networkx<=3.6.1,>=2.8.8; extra == 'graph'

@@ -55,3 +55,3 @@ [build-system]

cluster = [
"pysqa==0.3.3",
"pysqa==0.3.4",
"h5py>=3.6.0,<=3.15.1",

@@ -61,3 +61,3 @@ ]

"mpi4py>=3.1.4,<=4.1.1",
"pysqa==0.3.3",
"pysqa==0.3.4",
"h5py>=3.6.0,<=3.15.1",

@@ -64,0 +64,0 @@ "pygraphviz>=1.10,<=1.14",

@@ -31,5 +31,5 @@ # file generated by setuptools-scm

__version__ = version = '1.7.4'
__version_tuple__ = version_tuple = (1, 7, 4)
__version__ = version = '1.8.0'
__version_tuple__ = version_tuple = (1, 8, 0)
__commit_id__ = commit_id = None

@@ -12,2 +12,3 @@ from typing import Callable, Optional, Union

check_refresh_rate,
check_wait_on_shutdown,
validate_number_of_cores,

@@ -71,2 +72,3 @@ )

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -113,2 +115,3 @@ Examples:

log_obj_size: bool = False,
wait: bool = True,
):

@@ -162,2 +165,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -194,2 +198,3 @@ """

log_obj_size=log_obj_size,
wait=wait,
),

@@ -220,2 +225,3 @@ max_cores=max_cores,

log_obj_size=log_obj_size,
wait=wait,
)

@@ -270,2 +276,3 @@ )

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -310,2 +317,3 @@ Examples:

log_obj_size: bool = False,
wait: bool = True,
):

@@ -357,2 +365,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -417,2 +426,3 @@ """

disable_dependencies=disable_dependencies,
wait=wait,
)

@@ -458,2 +468,3 @@ )

log_obj_size: bool = False,
wait: bool = True,
) -> Union[OneProcessTaskScheduler, BlockAllocationTaskScheduler]:

@@ -497,2 +508,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -519,2 +531,3 @@ Returns:

)
check_wait_on_shutdown(wait_on_shutdown=wait)
if "openmpi_oversubscribe" in resource_dict:

@@ -521,0 +534,0 @@ del resource_dict["openmpi_oversubscribe"]

@@ -10,2 +10,3 @@ from typing import Callable, Optional, Union

check_refresh_rate,
check_wait_on_shutdown,
validate_number_of_cores,

@@ -64,2 +65,3 @@ )

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -102,2 +104,3 @@ Examples:

log_obj_size: bool = False,
wait: bool = True,
):

@@ -148,2 +151,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -176,2 +180,3 @@ """

log_obj_size=log_obj_size,
wait=wait,
),

@@ -198,2 +203,3 @@ max_cores=max_cores,

log_obj_size=log_obj_size,
wait=wait,
)

@@ -241,2 +247,3 @@ )

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -279,2 +286,3 @@ Examples:

log_obj_size: bool = False,
wait: bool = True,
):

@@ -318,2 +326,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -358,2 +367,3 @@ """

execute_function=execute_in_subprocess,
wait=wait,
)

@@ -392,2 +402,3 @@ )

log_obj_size: bool = False,
wait: bool = True,
) -> Union[OneProcessTaskScheduler, BlockAllocationTaskScheduler]:

@@ -427,2 +438,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -444,2 +456,3 @@ Returns:

)
check_wait_on_shutdown(wait_on_shutdown=wait)
if "threads_per_core" in resource_dict:

@@ -446,0 +459,0 @@ del resource_dict["threads_per_core"]

@@ -9,2 +9,3 @@ from typing import Callable, Optional, Union

check_refresh_rate,
check_wait_on_shutdown,
validate_number_of_cores,

@@ -69,2 +70,3 @@ )

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -109,2 +111,3 @@ Examples:

log_obj_size: bool = False,
wait: bool = True,
):

@@ -156,2 +159,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -217,2 +221,3 @@ """

disable_dependencies=disable_dependencies,
wait=wait,
)

@@ -289,2 +294,3 @@ )

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -328,2 +334,3 @@ Examples:

log_obj_size: bool = False,
wait: bool = True,
):

@@ -378,2 +385,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -407,2 +415,3 @@ """

log_obj_size=log_obj_size,
wait=wait,
),

@@ -430,2 +439,3 @@ max_cores=max_cores,

log_obj_size=log_obj_size,
wait=wait,
)

@@ -445,2 +455,3 @@ )

log_obj_size: bool = False,
wait: bool = True,
) -> Union[OneProcessTaskScheduler, BlockAllocationTaskScheduler]:

@@ -485,2 +496,3 @@ """

log_obj_size (bool): Enable debug mode which reports the size of the communicated objects.
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -498,2 +510,3 @@ Returns:

check_init_function(block_allocation=block_allocation, init_function=init_function)
check_wait_on_shutdown(wait_on_shutdown=wait)
if block_allocation:

@@ -500,0 +513,0 @@ resource_dict["init_function"] = init_function

@@ -14,2 +14,3 @@ import os

"error": "error",
"resource_dict": "resource_dict",
"runtime": "runtime",

@@ -65,2 +66,8 @@ "queue_id": "queue_id",

data_dict["kwargs"] = {}
if "resource_dict" in hdf:
data_dict["resource_dict"] = cloudpickle.loads(
np.void(hdf["/resource_dict"])
)
else:
data_dict["resource_dict"] = {}
if "error_log_file" in hdf:

@@ -67,0 +74,0 @@ data_dict["error_log_file"] = cloudpickle.loads(

@@ -20,2 +20,14 @@ import inspect

def check_wait_on_shutdown(
wait_on_shutdown: bool,
) -> None:
"""
Check if wait_on_shutdown is False and raise a ValueError if it is.
"""
if not wait_on_shutdown:
raise ValueError(
"The wait_on_shutdown parameter is only supported for the executorlib.FluxClusterExecutor and executorlib.SlurmClusterExecutor."
)
def check_command_line_argument_lst(command_line_argument_lst: list[str]) -> None:

@@ -22,0 +34,0 @@ """

@@ -14,2 +14,3 @@ import os

cores: int = 1,
worker_id: int = 0,
openmpi_oversubscribe: bool = False,

@@ -24,5 +25,7 @@ ):

openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.
worker_id (int): The worker ID. Defaults to 0.
"""
self._cwd = cwd
self._cores = cores
self._worker_id = worker_id
self._openmpi_oversubscribe = openmpi_oversubscribe

@@ -74,2 +77,3 @@

cores: int = 1,
worker_id: int = 0,
openmpi_oversubscribe: bool = False,

@@ -85,2 +89,3 @@ threads_per_core: int = 1,

threads_per_core (int, optional): The number of threads per core. Defaults to 1.
worker_id (int): The worker ID. Defaults to 0.
openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.

@@ -91,2 +96,3 @@ """

cores=cores,
worker_id=worker_id,
openmpi_oversubscribe=openmpi_oversubscribe,

@@ -93,0 +99,0 @@ )

@@ -201,3 +201,5 @@ import contextlib

if self._process is not None and self._future_queue is not None:
self._future_queue.put({"shutdown": True, "wait": wait})
self._future_queue.put(
{"shutdown": True, "wait": wait, "cancel_futures": cancel_futures}
)
if wait and isinstance(self._process, Thread):

@@ -204,0 +206,0 @@ self._process.join()

@@ -60,2 +60,3 @@ import contextlib

pmi_mode: Optional[str] = None,
wait: bool = True,
) -> None:

@@ -76,2 +77,3 @@ """

pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None (Flux only)
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.

@@ -91,3 +93,3 @@ Returns:

if task_dict is not None and "shutdown" in task_dict and task_dict["shutdown"]:
if task_dict["wait"]:
if task_dict["wait"] and wait:
while len(memory_dict) > 0:

@@ -103,15 +105,29 @@ memory_dict = {

}
if (
terminate_function is not None
and terminate_function == terminate_subprocess
):
for task in process_dict.values():
terminate_function(task=task)
elif terminate_function is not None:
for queue_id in process_dict.values():
terminate_function(
queue_id=queue_id,
config_directory=pysqa_config_directory,
backend=backend,
if not task_dict["cancel_futures"] and wait:
if (
terminate_function is not None
and terminate_function == terminate_subprocess
):
for task in process_dict.values():
terminate_function(task=task)
elif terminate_function is not None:
for queue_id in process_dict.values():
terminate_function(
queue_id=queue_id,
config_directory=pysqa_config_directory,
backend=backend,
)
else:
memory_dict = {
key: _check_task_output(
task_key=key,
future_obj=value,
cache_directory=cache_dir_dict[key],
)
for key, value in memory_dict.items()
if not value.done()
}
for value in memory_dict.values():
if not value.done():
value.cancel()
future_queue.task_done()

@@ -118,0 +134,0 @@ future_queue.join()

@@ -38,2 +38,3 @@ from threading import Thread

pmi_mode: Optional[str] = None,
wait: bool = True,
):

@@ -54,2 +55,3 @@ """

pmi_mode (str): PMI interface to use (OpenMPI v5 requires pmix) default is None
wait (bool): Whether to wait for the completion of all tasks before shutting down the executor.
"""

@@ -78,2 +80,3 @@ super().__init__(max_cores=None)

"pmi_mode": pmi_mode,
"wait": wait,
}

@@ -104,2 +107,3 @@ self._set_process(

execute_function: Callable = execute_with_pysqa,
wait: bool = True,
):

@@ -135,2 +139,3 @@ if block_allocation:

pmi_mode=pmi_mode,
wait=wait,
)

@@ -211,3 +211,3 @@ import queue

error_log_file: Optional[str] = None,
worker_id: Optional[int] = None,
worker_id: int = 0,
stop_function: Optional[Callable] = None,

@@ -248,3 +248,3 @@ restart_limit: int = 0,

),
connections=spawner(cores=cores, **kwargs),
connections=spawner(cores=cores, worker_id=worker_id, **kwargs),
hostname_localhost=hostname_localhost,

@@ -251,0 +251,0 @@ log_obj_size=log_obj_size,

@@ -8,3 +8,2 @@ import inspect

import cloudpickle
import numpy as np

@@ -223,3 +222,7 @@ from executorlib.standalone.select import FutureSelector

if node["type"] == "input":
graph.add_node(node["id"], label=str(node["value"]), shape=node["shape"])
graph.add_node(
node["id"],
label=_short_object_name(node=node["value"]),
shape=node["shape"],
)
else:

@@ -250,2 +253,4 @@ graph.add_node(node["id"], label=str(node["name"]), shape=node["shape"])

"""
import numpy as np
pwd_nodes_lst = []

@@ -311,1 +316,29 @@ for n in node_lst:

json.dump(pwd_dict, f, indent=4)
def _short_object_name(node):
node_value_str = str(node)
if isinstance(node, tuple):
short_name = str(tuple(_short_object_name(node=el) for el in node))
elif isinstance(node, list):
short_name = str([_short_object_name(node=el) for el in node])
elif isinstance(node, dict):
short_name = str(
{
_short_object_name(node=key): _short_object_name(node=value)
for key, value in node.items()
}
)
elif "object at" in node_value_str:
short_name = node_value_str[1:-1].split()[0].split(".")[-1] + "()"
elif "<function" in node_value_str:
short_name = node_value_str.split()[1] + "()"
elif "\n" in node_value_str:
short_name = str(type(node)).split("'")[1].split(".")[-1] + "()"
elif "(" in node_value_str and ")" in node_value_str:
short_name = node_value_str.split("(")[0] + "()"
elif len(node_value_str) > 20:
short_name = node_value_str[:21] + "..."
else:
short_name = node_value_str
return short_name

@@ -34,2 +34,3 @@ import contextlib

num_nodes (int, optional): The number of compute nodes to use for executing the task. Defaults to None.
worker_id (int): The worker ID. Defaults to 0.
exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute notes. Defaults to

@@ -53,2 +54,3 @@ False.

num_nodes: Optional[int] = None,
worker_id: int = 0,
exclusive: bool = False,

@@ -65,2 +67,3 @@ priority: Optional[int] = None,

cores=cores,
worker_id=worker_id,
openmpi_oversubscribe=openmpi_oversubscribe,

@@ -127,8 +130,9 @@ )

os.makedirs(self._cwd, exist_ok=True)
file_prefix = "flux_" + str(self._worker_id)
if self._flux_log_files and self._cwd is not None:
jobspec.stderr = os.path.join(self._cwd, "flux.err")
jobspec.stdout = os.path.join(self._cwd, "flux.out")
jobspec.stderr = os.path.join(self._cwd, file_prefix + ".err")
jobspec.stdout = os.path.join(self._cwd, file_prefix + ".out")
elif self._flux_log_files:
jobspec.stderr = os.path.abspath("flux.err")
jobspec.stdout = os.path.abspath("flux.out")
jobspec.stderr = os.path.abspath(file_prefix + ".err")
jobspec.stdout = os.path.abspath(file_prefix + ".out")
if self._priority is not None:

@@ -135,0 +139,0 @@ self._future = self._flux_executor.submit(

@@ -24,2 +24,3 @@ import hashlib

num_nodes: Optional[int] = None,
worker_id: int = 0,
exclusive: bool = False,

@@ -42,2 +43,3 @@ openmpi_oversubscribe: bool = False,

num_nodes (int, optional): The number of compute nodes to use for executing the task. Defaults to None.
worker_id (int): The worker ID. Defaults to 0.
exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute notes. Defaults

@@ -54,2 +56,3 @@ to False.

cores=cores,
worker_id=worker_id,
openmpi_oversubscribe=openmpi_oversubscribe,

@@ -56,0 +59,0 @@ )

@@ -30,2 +30,3 @@ import os

num_nodes: Optional[int] = None,
worker_id: int = 0,
exclusive: bool = False,

@@ -45,2 +46,3 @@ openmpi_oversubscribe: bool = False,

num_nodes (int, optional): The number of compute nodes to use for executing the task. Defaults to None.
worker_id (int): The worker ID. Defaults to 0.
exclusive (bool): Whether to exclusively reserve the compute nodes, or allow sharing compute notes. Defaults to False.

@@ -54,2 +56,3 @@ openmpi_oversubscribe (bool, optional): Whether to oversubscribe the cores. Defaults to False.

cores=cores,
worker_id=worker_id,
openmpi_oversubscribe=openmpi_oversubscribe,

@@ -56,0 +59,0 @@ threads_per_core=threads_per_core,