New Research: Supply Chain Attack on Axios Pulls Malicious Dependency from npm.Details →
Socket
Book a DemoSign in
Socket

cdopt

Package Overview
Dependencies
Maintainers
2
Versions
57
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

cdopt - pypi Package Compare versions

Comparing version
0.4.10
to
0.5.0
+1
-1
cdopt.egg-info/PKG-INFO
Metadata-Version: 2.1
Name: cdopt
Version: 0.4.10
Version: 0.5.0
Summary: A Python package for optimization on closed Riemannian manifolds

@@ -5,0 +5,0 @@ Home-page: https://cdopt.github.io/

from .linear import Linear_cdopt, Bilinear_cdopt, LazyLinear_cdopt
from .conv import Conv1d_cdopt, Conv2d_cdopt, Conv3d_cdopt, ConvTranspose1d_cdopt, ConvTranspose2d_cdopt, ConvTranspose3d_cdopt
from .rnn import RNNBase_cdopt, RNN_cdopt, LSTM_cdopt, GRU_cdopt, RNNCell_cdopt, LSTMCell_cdopt, GRUCell_cdopt
from .utils import get_quad_penalty, get_constraint_violation, get_constraint_violation_vector, wvt_flatten2d, wvt_flatten2d_transp, wvt_identical, wvt_transp
from .utils import get_quad_penalty, get_constraint_violation, get_constraint_violation_vector, wvt_flatten2d, wvt_flatten2d_transp, wvt_identical, wvt_transp, get_Amapped_params, get_named_params_manifolds, get_named_Amapped_params, get_params_manifolds, parameters_to_vector

@@ -11,2 +11,3 @@

"RNNCell_cdopt", "LSTMCell_cdopt", "GRUCell_cdopt",
"get_quad_penalty", "get_constraint_violation", "get_constraint_violation_vector", "wvt_flatten2d", "wvt_flatten2d_transp", "wvt_identical", "wvt_transp"]
"get_quad_penalty", "get_constraint_violation", "get_constraint_violation_vector", "wvt_flatten2d", "wvt_flatten2d_transp", "wvt_identical", "wvt_transp",
"get_Amapped_params", "get_named_params_manifolds", "get_named_Amapped_params", "get_params_manifolds", "parameters_to_vector"]

@@ -20,3 +20,3 @@ import torch

if hasattr(local_module, 'quad_penalty'):
quad_all = [ local_module.quad_penalty(), ]
quad_all = [ torch.sqrt(local_module.quad_penalty()), ]

@@ -52,3 +52,20 @@ else:

def single_module_get_Amapped_params(module:nn.Module):
if hasattr(module, 'manifold'):
manifold = getattr(module, 'manifold')
A_map = getattr(module, 'A')
else:
manifold = None
A_map = lambda X: X
members = module._parameters.items()
tmp = OrderedDict()
for k,v in members:
# print(k)
tmp[k] = A_map(v)
return tmp.items()
def _named_members(module, get_members_fn, prefix='', recurse=True):

@@ -84,3 +101,26 @@ r"""Helper method for yielding various names + members of modules."""

def get_named_Amapped_params(module:nn.Module, prefix: str = '', recurse: bool = True):
'''
This function relies on the torch.nn.module._name_members. Therefore, the order of the outputed manifolds is exactly the same as the
outputs of the torch.nn.module.parameters.
If the layer are set with no manifolds, the function will return a str; otherwise, the function
will return the manifolds that corresponds to the weights.
'''
# gen = _named_members(module, single_module_get_manifold, prefix = prefix, recurse = recurse)
gen = module._named_members(single_module_get_Amapped_params, prefix = prefix, recurse = recurse)
for name, Amapped_params in gen:
yield name, Amapped_params
def get_Amapped_params(module, prefix: str = '', recurse: bool = True):
gen = module._named_members(single_module_get_Amapped_params, prefix = prefix, recurse = recurse)
for name, Amapped_params in gen:
yield Amapped_params
def set_attributes(local_module: torch.nn.Module, attr_name:str, value):

@@ -97,6 +137,58 @@ if hasattr(local_module, 'attr_name'):

def _check_param_device(param: torch.Tensor, old_param_device) -> int:
r"""This helper function is to check if the parameters are located
in the same device. Currently, the conversion between model parameters
and single vector form is not supported for multiple allocations,
e.g. parameters in different GPUs, or mixture of CPU/GPU.
Args:
param ([Tensor]): a Tensor of a parameter of a model
old_param_device (int): the device where the first parameter of a
model is allocated.
Returns:
old_param_device (int): report device for the first time
"""
# Meet the first parameter
if old_param_device is None:
old_param_device = param.get_device() if param.is_cuda else -1
else:
warn = False
if param.is_cuda: # Check if in same GPU
warn = (param.get_device() != old_param_device)
else: # Check if in CPU
warn = (old_param_device != -1)
if warn:
raise TypeError('Found two parameters on different devices, '
'this is currently not supported.')
return old_param_device
def parameters_to_vector(parameters) -> torch.Tensor:
r"""Convert parameters to one vector
Args:
parameters (Iterable[Tensor]): an iterator of Tensors that are the
parameters of a model.
Returns:
The parameters represented by a single vector
"""
# Flag for the device where the parameter is located
param_device = None
vec = []
for param in parameters:
# Ensure the parameters are located in the same device
param_device = _check_param_device(param, param_device)
vec.append(param.flatten())
return torch.cat(vec)
# def set_forward_type(local_module: torch.nn.Module, forward_with_A: bool):

@@ -103,0 +195,0 @@ # if hasattr(local_module, 'forward_with_A'):

@@ -95,2 +95,3 @@ import torch

self.unsafe = unsafe
# self.forward_with_A = True

@@ -97,0 +98,0 @@ # In plain words:

@@ -56,21 +56,28 @@ import torch

if forward_with_A:
class manifold_module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.forward_with_A = forward_with_A
# if forward_with_A:
class manifold_module(nn.Module):
def __init__(self) -> None:
super().__init__()
self.forward_with_A = forward_with_A
def forward(self, X):
def forward(self, X):
if self.forward_with_A:
return A(X)
def right_inverse(self, AX):
return weight_to_var(AX)
else:
class manifold_module(nn.Module):
def forward(self, X):
else:
return var_to_weight(X)
def right_inverse(self, AX):
return weight_to_var(AX)
def right_inverse(self, AX):
return weight_to_var(AX)
# else:
# class manifold_module(nn.Module):
# def __init__(self) -> None:
# super().__init__()
# self.forward_with_A = forward_with_A
# def forward(self, X):
# return var_to_weight(X)
# def right_inverse(self, AX):
# return weight_to_var(AX)
P.register_parametrization(module, attr_name, manifold_module())

@@ -77,0 +84,0 @@

Metadata-Version: 2.1
Name: cdopt
Version: 0.4.10
Version: 0.5.0
Summary: A Python package for optimization on closed Riemannian manifolds

@@ -5,0 +5,0 @@ Home-page: https://cdopt.github.io/

@@ -8,3 +8,3 @@ import setuptools

name="cdopt",
version="0.4.10",
version="0.5.0",
author="Nachuan Xiao, Xiaoyin Hu, Xin Liu, Kim-Chuan Toh",

@@ -11,0 +11,0 @@ author_email="xnc@lsec.cc.ac.cn",