Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details โ†’
Socket
Book a DemoInstallSign in
Socket

fedops

Package Overview
Dependencies
Maintainers
1
Versions
56
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

fedops - npm Package Compare versions

Comparing version
1.1.30.4
to
1.1.30.5
+2
-2
fedops.egg-info/PKG-INFO

@@ -1,4 +0,4 @@

Metadata-Version: 2.4
Metadata-Version: 2.2
Name: fedops
Version: 1.1.30.4
Version: 1.1.30.5
Summary: FL Lifecycle Operations Management Platform

@@ -5,0 +5,0 @@ Home-page: https://github.com/gachon-CCLab/FedOps.git

@@ -124,5 +124,9 @@ #client/client_fl.py

# Get hyperparameters for this round
batch_size: int = config.get("batch_size", self.cfg.batch_size)
epochs: int = config.get("local_epochs", self.cfg.num_epochs)
num_rounds: int = config.get("num_rounds", self.cfg.num_rounds)
# batch_size: int = config.get("batch_size", self.cfg.batch_size)
# epochs: int = config.get("local_epochs", self.cfg.num_epochs)
# num_rounds: int = config.get("num_rounds", self.cfg.num_rounds)
# Get hyperparameters for this round
batch_size: int = config["batch_size"]
epochs: int = config["local_epochs"]
num_rounds: int = config["num_rounds"]

@@ -133,2 +137,4 @@ # HPO override (์„œ๋ฒ„ ์ „๋žต์—์„œ ๋‚ด๋ ค์ค€ ๊ฐ’) ํ•˜์ดํผํŒŒ๋ผ๋ฏธํ„ฐ ํŠœ๋‹์„ ํ•˜๊ฒŒ๋˜๋ฉด ๋ฐ”๋€Œ๋‹ˆ๊นŒ.

hp_ep = config.get("hp_local_epochs", None) # โœจ
if hp_bs is not None: # โœจ

@@ -289,3 +295,4 @@ batch_size = int(hp_bs) # โœจ

# Get config values
batch_size: int = config.get("batch_size", self.cfg.batch_size)
# batch_size: int = config.get("batch_size", self.cfg.batch_size)
batch_size: int = config["batch_size"]

@@ -313,3 +320,3 @@ # Initialize test_loss, test_accuracy

test_loss, test_accuracy, metrics = self.test_torch(self.model, self.test_loader, self.cfg)
num_examples_test = len(self.test_loader.dataset) if hasattr(self.test_loader, "dataset") else len(self.test_loader)
num_examples_test = len(self.test_loader)

@@ -316,0 +323,0 @@ elif self.model_type == "Huggingface":

@@ -102,2 +102,16 @@ # fedmap/strategy.py

# --------- small helpers to read env knobs (optional) --------- #
def _get_int_env(name: str, default: int) -> int:
try:
return int(os.getenv(name, default))
except Exception:
return default
def _get_bool_env(name: str, default: bool) -> bool:
v = os.getenv(name)
if v is None:
return default
return v.lower() in ("1", "true", "yes", "y")
class ModalityAwareAggregation(fl.server.strategy.Strategy):

@@ -162,2 +176,8 @@ """

# --- student gating knobs (can be overridden by env) ---
self.use_student = _get_bool_env("FEDMAP_USE_STUDENT", True)
self.student_warmup = _get_int_env("FEDMAP_STUDENT_WARMUP", 5) # rounds before student can dominate
self.student_win_needed = _get_int_env("FEDMAP_STUDENT_WIN_NEEDED", 3) # consecutive wins needed
self._student_streak = 0 # internal counter
# ---------------- Flower Strategy API ----------------

@@ -345,6 +365,88 @@

# Convert best NDArrays -> Parameters
nds_final = best["nds"]
if nds_final is None:
# Fallback to uniform averaging (or student attention if valid)
# --- evaluate student attention directly (single eval) ---
def _metric_score_from_nds(nds):
loss, metrics = self.evaluate_fn(server_round, nds, {})
return float(metrics.get("test_f1_macro", metrics.get("accuracy", 0.0)) or 0.0)
score_T = float(best["score"])
nds_S = aggregate_with_attention(new_attn)
score_S = _metric_score_from_nds(nds_S) if nds_S is not None else -1.0
print(f"๐Ÿ“ˆ Teacher score={score_T:.6f} | Student score={score_S:.6f}")
# --------- Decide student vs teacher (KL-bound + probe + blend + streak gate) ---------
# Target max drop on server metric (e.g., macro-F1) compared to teacher
delta = 0.002 # tighten/loosen as desired
# Compute KL(teacher || student)
def _kl_t_s(p, q):
p = _safe_nan_to_num(p.to(_DEVICE))
q = _safe_nan_to_num(q.to(_DEVICE))
p = torch.clamp(p, 1e-12, 1.0)
q = torch.clamp(q, 1e-12, 1.0)
return (p * (p.log() - q.log())).sum()
kl_ts = _kl_t_s(_safe_nan_to_num(attn_teacher), _safe_nan_to_num(new_attn)).item()
# Small Lipschitz probe toward student to estimate local L
alpha = 0.1
with torch.no_grad():
w_alpha = _safe_nan_to_num((1 - alpha) * attn_teacher + alpha * new_attn)
nds_alpha = aggregate_with_attention(w_alpha)
score_alpha = _metric_score_from_nds(nds_alpha) if nds_alpha is not None else score_T
l1_step = torch.sum(torch.abs(w_alpha - attn_teacher)).item()
Lhat = abs(score_alpha - score_T) / max(l1_step, 1e-8)
import math
tau = 0.5 * (delta / max(Lhat, 1e-8)) ** 2 # Pinsker + Lipschitz bound
denom = math.sqrt(max(2.0 * kl_ts, 1e-16))
eta = 0.2 # larger โ†’ switch faster to student
beta = min(1.0, eta / denom) if denom > 0 else 1.0
# --- streak gate: student must be consistently good ---
student_ok = (score_S >= score_T - delta) and (kl_ts <= tau)
if student_ok:
self._student_streak += 1
else:
self._student_streak = 0
print(f"๐Ÿงช student_ok={student_ok} | streak={self._student_streak}/{self.student_win_needed} "
f"| warmup={self.student_warmup}")
if not self.use_student:
# hard-disable student: stick to teacher
w_final = attn_teacher
decision = "teacher-only (student disabled)"
elif server_round < self.student_warmup:
# warmup: allow only cautious blend toward student
beta_guarded = min(beta, 0.25)
w_final = _safe_nan_to_num((1 - beta_guarded) * attn_teacher + beta_guarded * new_attn)
decision = "teacher-dominant (warmup)"
elif self._student_streak >= self.student_win_needed:
# student has repeatedly matched/beaten teacher with small KL โ†’ let it dominate
beta_student = max(0.6, min(1.0, beta)) # strong lean to student
w_final = _safe_nan_to_num((1 - (1 - beta_student)) * new_attn + (1 - beta_student) * attn_teacher)
decision = "student-dominant (passed gate)"
else:
# default: trust-region blend using KL/Lipschitz logic
if kl_ts <= tau:
w_final = _safe_nan_to_num((1 - beta) * attn_teacher + beta * new_attn)
decision = "student/blend (KL small)"
else:
beta_guarded = min(beta, 0.25)
w_final = _safe_nan_to_num((1 - beta_guarded) * attn_teacher + beta_guarded * new_attn)
decision = "teacher-dominant (KL large)"
print(f"๐Ÿ”Ž KL(T||S)={kl_ts:.6f}, Lhat={Lhat:.6f}, tau={tau:.6f}, beta={beta:.3f} โ†’ {decision}")
nds_blend = aggregate_with_attention(w_final)
# Prefer blend โ†’ then teacher โ†’ then student/uniform fallback
if nds_blend is not None:
nds_final = nds_blend
elif best["nds"] is not None:
nds_final = best["nds"]
else:
n = len(weights_per_client)

@@ -384,7 +486,5 @@ fallback_attn = new_attn if new_attn.numel() == n else torch.ones(n, device=_DEVICE) / n

def evaluate(self, server_round: int, parameters: fl.common.Parameters):
if self.evaluate_fn is None:
return None # Flower will skip
nds = parameters_to_ndarrays(parameters)

@@ -391,0 +491,0 @@ loss, metrics = self.evaluate_fn(server_round, nds, {})

@@ -1,4 +0,4 @@

Metadata-Version: 2.4
Metadata-Version: 2.2
Name: fedops
Version: 1.1.30.4
Version: 1.1.30.5
Summary: FL Lifecycle Operations Management Platform

@@ -5,0 +5,0 @@ Home-page: https://github.com/gachon-CCLab/FedOps.git

@@ -7,3 +7,3 @@ from setuptools import setup, find_packages

name='fedops',
version='1.1.30.4',
version='1.1.30.5',
author='Semo Yang',

@@ -10,0 +10,0 @@ author_email='tpah20@gmail.com',