Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

fedops

Package Overview
Dependencies
Maintainers
1
Versions
56
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

fedops - npm Package Compare versions

Comparing version
1.1.30.1
to
1.1.30.2
+1
-2
fedops.egg-info/PKG-INFO
Metadata-Version: 2.2
Name: fedops
Version: 1.1.30.1
Version: 1.1.30.2
Summary: FL Lifecycle Operations Management Platform

@@ -37,3 +37,2 @@ Home-page: https://github.com/gachon-CCLab/FedOps.git

Requires-Dist: grad-cam
Requires-Dist: optuna>=3.6
Dynamic: author

@@ -40,0 +39,0 @@ Dynamic: author-email

@@ -21,2 +21,1 @@ flwr>=1.0.0

grad-cam
optuna>=3.6

@@ -34,3 +34,2 @@ README.md

fedops/utils/fedco/best_keeper.py
fedops/utils/fedco/datasetting.py
fedops/utils/fedxai/__init__.py

@@ -37,0 +36,0 @@ fedops/utils/fedxai/gradcam.py

@@ -1,2 +0,1 @@

#client/app.py
import logging, json

@@ -3,0 +2,0 @@ import socket

@@ -1,2 +0,2 @@

#client/client_api.py
#client_api.py
import requests

@@ -3,0 +3,0 @@ import sys

@@ -1,2 +0,2 @@

#client/client_fl.py
#client_fl.py
from collections import OrderedDict

@@ -77,4 +77,5 @@ import json, logging

elif self.model_type in ["Pytorch"]:
keys = [k for k in self.model.state_dict().keys() if "bn" not in k] # Excluding parameters of BN layers
params_dict = zip(keys, parameters)
# keys = [k for k in self.model.state_dict().keys() if "bn" not in k] # Excluding parameters of BN layers
# params_dict = zip(keys, parameters)
params_dict = zip(self.model.state_dict().keys(), parameters)
state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})

@@ -94,3 +95,4 @@ # self.model.load_state_dict(state_dict, strict=True)

# Excluding parameters of BN layers
return [val.cpu().numpy() for name, val in self.model.state_dict().items() if "bn" not in name]
# return [val.cpu().numpy() for name, val in self.model.state_dict().items() if "bn" not in name]
return [val.cpu().numpy() for name, val in self.model.state_dict().items()]

@@ -97,0 +99,0 @@ elif self.model_type == "Huggingface":

@@ -1,2 +0,1 @@

# client/client_utils.py
import asyncio

@@ -3,0 +2,0 @@ import os

@@ -1,3 +0,1 @@

# client/client_wandb.py
import logging

@@ -4,0 +2,0 @@ import wandb

@@ -144,3 +144,4 @@ # server/app.py

best_nds = parameters_to_ndarrays(best_params)
keys = [k for k in model.state_dict().keys() if "bn" not in k]
# keys = [k for k in model.state_dict().keys() if "bn" not in k]
keys = model.state_dict().keys()
state_dict = OrderedDict({k: torch.tensor(v) for k, v in zip(keys, best_nds)})

@@ -201,4 +202,5 @@ model.load_state_dict(state_dict, strict=True)

import torch
keys = [k for k in model.state_dict().keys() if "bn" not in k]
params_dict = zip(keys, parameters_ndarrays)
# keys = [k for k in model.state_dict().keys() if "bn" not in k]
# params_dict = zip(keys, parameters_ndarrays)
params_dict = zip(model.state_dict().keys(), parameters_ndarrays)
state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})

@@ -338,3 +340,3 @@ model.load_state_dict(state_dict, strict=True)

except Exception as e:
logging.error('error: ', e)
logging.error("error: %s", e)
data_inform = {'FLSeReady': False}

@@ -341,0 +343,0 @@ server_api.ServerAPI(self.task_id).put_server_status(json.dumps(data_inform))

@@ -21,3 +21,2 @@ # Copyright 2020 Adap GmbH. All Rights Reserved.

"best_keeper"
"datasetting"
]

@@ -1,2 +0,2 @@

# utils/fedco/best_keeper.py
# server/best_keeper.py
import os, json, torch

@@ -3,0 +3,0 @@ from flwr.common import parameters_to_ndarrays, ndarrays_to_parameters

Metadata-Version: 2.2
Name: fedops
Version: 1.1.30.1
Version: 1.1.30.2
Summary: FL Lifecycle Operations Management Platform

@@ -37,3 +37,2 @@ Home-page: https://github.com/gachon-CCLab/FedOps.git

Requires-Dist: grad-cam
Requires-Dist: optuna>=3.6
Dynamic: author

@@ -40,0 +39,0 @@ Dynamic: author-email

@@ -7,3 +7,3 @@ from setuptools import setup, find_packages

name='fedops',
version='1.1.30.1',
version='1.1.30.2',
author='Semo Yang',

@@ -37,3 +37,2 @@ author_email='tpah20@gmail.com',

'grad-cam',
'optuna >= 3.6'
],

@@ -50,7 +49,2 @@ classifiers=[

],
)
)
# fedops/utils/fedco/datasetting.py
from __future__ import annotations
from typing import Dict, List, Tuple
import numpy as np
# -------- Public API --------
def build_parts(
targets: np.ndarray,
num_clients: int,
mode_str: str,
seed: int = 42,
) -> List[List[int]]:
"""
Build client index lists for Non-IID partitioning.
Args:
targets: 1D numpy array of integer class labels (length = N samples).
num_clients: number of clients to split into (>=1).
mode_str: one of
- "iid"
- "dirichlet:<alpha>" e.g., "dirichlet:0.3"
- "label_skew:<k>" e.g., "label_skew:2"
- "qty_skew:beta<b>" e.g., "qty_skew:beta0.5"
seed: RNG seed for reproducibility.
Returns:
List of length `num_clients`, where each item is a list of sample indices
assigned to that client.
"""
if num_clients < 1:
raise ValueError("num_clients must be >= 1")
if targets.ndim != 1:
raise ValueError("targets must be a 1D array of integer labels")
mode, params = _parse_mode(mode_str)
if num_clients == 1 or mode == "iid":
rng = np.random.default_rng(seed)
idxs = np.arange(len(targets))
rng.shuffle(idxs)
splits = np.array_split(idxs, num_clients)
return [s.tolist() for s in splits]
if mode == "dirichlet":
alpha = float(params["alpha"])
return _partition_dirichlet(targets, num_clients, alpha, seed)
if mode == "label_skew":
n_labels = int(params["n_labels"])
return _partition_label_skew(targets, num_clients, n_labels, seed)
if mode == "qty_skew":
beta = float(params["beta"])
return _partition_quantity_skew(targets, num_clients, beta, seed)
raise ValueError(f"Unsupported mode: {mode_str}")
# -------- Mode parsing --------
def _parse_mode(s: str) -> Tuple[str, Dict]:
"""
Parse mode string into (mode, params) dict.
"""
if not s:
return "iid", {}
s = s.strip().lower()
if s == "iid":
return "iid", {}
if s.startswith("dirichlet:"):
try:
alpha = float(s.split(":", 1)[1])
except Exception as e:
raise ValueError(f"Invalid dirichlet spec: {s}") from e
return "dirichlet", {"alpha": alpha}
if s.startswith("label_skew:"):
try:
n = int(s.split(":", 1)[1])
except Exception as e:
raise ValueError(f"Invalid label_skew spec: {s}") from e
return "label_skew", {"n_labels": n}
if s.startswith("qty_skew:beta"):
try:
beta = float(s.split("beta", 1)[1])
except Exception as e:
raise ValueError(f"Invalid qty_skew spec: {s}") from e
return "qty_skew", {"beta": beta}
# Fallback
return "iid", {}
# -------- Partition strategies --------
def _partition_dirichlet(
targets: np.ndarray,
num_clients: int,
alpha: float,
seed: int,
) -> List[List[int]]:
"""
Class-wise Dirichlet sampling over clients; lower alpha => more skew.
"""
if alpha <= 0:
raise ValueError("alpha must be > 0 for dirichlet")
rng = np.random.default_rng(seed)
n_classes = int(targets.max()) + 1
idx_by_class = [np.where(targets == c)[0] for c in range(n_classes)]
for arr in idx_by_class:
rng.shuffle(arr)
parts: List[List[int]] = [[] for _ in range(num_clients)]
for idxs in idx_by_class:
if len(idxs) == 0:
continue
p = rng.dirichlet([alpha] * num_clients)
counts = (p * len(idxs)).astype(int)
# Adjust rounding so sum(counts) == len(idxs)
while counts.sum() < len(idxs):
counts[int(np.argmax(p))] += 1
while counts.sum() > len(idxs):
counts[int(np.argmax(counts))] -= 1
start = 0
for k in range(num_clients):
take = int(counts[k])
if take > 0:
parts[k].extend(idxs[start : start + take].tolist())
start += take
for k in range(num_clients):
rng.shuffle(parts[k])
return parts
def _partition_label_skew(
targets: np.ndarray,
num_clients: int,
n_labels: int,
seed: int,
) -> List[List[int]]:
"""
Each client gets samples from only n_labels distinct classes (hard label skew).
"""
if n_labels < 1:
raise ValueError("n_labels must be >= 1")
rng = np.random.default_rng(seed)
n_classes = int(targets.max()) + 1
idx_by_class = [np.where(targets == c)[0] for c in range(n_classes)]
for arr in idx_by_class:
rng.shuffle(arr)
parts: List[List[int]] = [[] for _ in range(num_clients)]
# Round-robin class assignment (with wrap-around)
perm = rng.permutation(n_classes)
assigned = []
for k in range(num_clients):
start = (k * n_labels) % n_classes
assigned.append(set(perm[start : start + n_labels]))
# Distribute each class to the clients that were assigned that class
for c in range(n_classes):
candidates = [k for k in range(num_clients) if c in assigned[k]]
if not candidates:
candidates = [int(rng.integers(0, num_clients))]
splits = np.array_split(idx_by_class[c], len(candidates))
for k, chunk in zip(candidates, splits):
parts[k].extend(chunk.tolist())
for k in range(num_clients):
rng.shuffle(parts[k])
return parts
def _partition_quantity_skew(
targets: np.ndarray,
num_clients: int,
beta: float,
seed: int,
) -> List[List[int]]:
"""
Vary only the number of samples per client (no class preference).
Lower beta => more variance in quantities.
"""
if beta <= 0:
raise ValueError("beta must be > 0 for qty_skew")
rng = np.random.default_rng(seed)
idxs = np.arange(len(targets))
rng.shuffle(idxs)
p = rng.dirichlet([beta] * num_clients)
counts = (p * len(idxs)).astype(int)
# Adjust rounding
while counts.sum() < len(idxs):
counts[int(np.argmax(p))] += 1
while counts.sum() > len(idxs):
counts[int(np.argmax(counts))] -= 1
parts: List[List[int]] = []
start = 0
for k in range(num_clients):
take = int(counts[k])
parts.append(idxs[start : start + take].tolist())
start += take
return parts