Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

qmb

Package Overview
Dependencies
Maintainers
1
Versions
9
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

qmb - npm Package Compare versions

Comparing version
0.0.7
to
0.0.8
+1
-1
PKG-INFO
Metadata-Version: 2.1
Name: qmb
Version: 0.0.7
Version: 0.0.8
Summary: Quantum Manybody Problem

@@ -5,0 +5,0 @@ Author-email: Hao Zhang <hzhangxyz@outlook.com>

Metadata-Version: 2.1
Name: qmb
Version: 0.0.7
Version: 0.0.8
Summary: Quantum Manybody Problem

@@ -5,0 +5,0 @@ Author-email: Hao Zhang <hzhangxyz@outlook.com>

@@ -15,3 +15,3 @@ # file generated by setuptools_scm

__version__ = version = '0.0.7'
__version_tuple__ = version_tuple = (0, 0, 7)
__version__ = version = '0.0.8'
__version_tuple__ = version_tuple = (0, 0, 8)

@@ -81,7 +81,4 @@ import logging

optimizer = torch.optim.Adam(network.parameters(), lr=self.learning_rate)
loss = None
amplitudes = None
def closure():
nonlocal loss, amplitudes
optimizer.zero_grad()

@@ -92,2 +89,3 @@ amplitudes = network(configs)

loss.backward()
loss.amplitudes = amplitudes
return loss

@@ -97,3 +95,3 @@

for i in range(self.local_step):
optimizer.step(closure)
loss = optimizer.step(closure)
logging.info("local optimizing, step %d, loss %.10f", i, loss.item())

@@ -110,3 +108,3 @@ if loss < self.local_loss:

torch.enable_grad(closure)()
amplitudes = amplitudes.cpu().detach().numpy()
amplitudes = loss.amplitudes.cpu().detach().numpy()
final_energy = ((amplitudes.conj() @ (hamiltonian @ amplitudes)) / (amplitudes.conj() @ amplitudes)).real

@@ -113,0 +111,0 @@ logging.info(

@@ -28,4 +28,4 @@ import logging

use_lbfgs: typing.Annotated[bool, tyro.conf.arg(aliases=["-2"])] = False
# Do not calculate deviation or energy when optimizing energy or deviation
omit_another: typing.Annotated[bool, tyro.conf.arg(aliases=["-i"])] = False
# Do not calculate deviation when optimizing energy
omit_deviation: typing.Annotated[bool, tyro.conf.arg(aliases=["-i"])] = False

@@ -40,3 +40,3 @@ def __post_init__(self):

logging.info(
"sampling count: %d, learning rate: %f, local step: %d, include outside: %a, use deviation: %a, fix outside: %a, use lbfgs: %a, omit another: %a",
"sampling count: %d, learning rate: %f, local step: %d, include outside: %a, use deviation: %a, fix outside: %a, use lbfgs: %a, omit deviation: %a",
self.sampling_count,

@@ -49,3 +49,3 @@ self.learning_rate,

self.use_lbfgs,
self.omit_another,
self.omit_deviation,
)

@@ -89,3 +89,10 @@

def closure():
# Optimizing deviation
optimizer.zero_grad()
# Calculate amplitudes i and amplitudes j
# When including outside, amplitudes j should be calculated individually, otherwise, it equals to amplitudes i
# It should be notices that sometimes we do not want to optimize small configurations
# So we calculate amplitudes j in no grad mode
# but the first several configurations in amplitudes j are duplicated with those in amplitudes i
# So cat them manually
amplitudes_i = network(configs_i)

@@ -101,10 +108,19 @@ if self.include_outside:

amplitudes_j = amplitudes_i
# <s|H|psi> will be used multiple times, calculate it first
# as we want to optimize deviation, every value should be calculated in grad mode, so we do not detach anything
hamiltonian_amplitudes_j = hamiltonian @ amplitudes_j
deviation = (hamiltonian_amplitudes_j / amplitudes_i).std()
# energy is just <psi|s> <s|H|psi> / <psi|s> <s|psi>
energy = (amplitudes_i.conj() @ hamiltonian_amplitudes_j) / (amplitudes_i.conj() @ amplitudes_i)
# we want to estimate variance of E_s - E with weight <psi|s><s|psi>
# where E_s = <s|H|psi>/<s|psi>
# the variance is (E_s - E).conj() @ (E_s - E) * <psi|s> <s|psi> / ... = (E_s <s|psi> - E <s|psi>).conj() @ (E_s <s|psi> - E <s|psi>) / ...
# so we calculate E_s <s|psi> - E <s|psi> first, which is just <s|H|psi> - <s|psi> E, we name it as `difference'
difference = hamiltonian_amplitudes_j - amplitudes_i * energy
# the numerator calculated, the following is the variance
variance = (difference.conj() @ difference) / (amplitudes_i.conj() @ amplitudes_i)
# calculate the deviation
deviation = variance.real.sqrt()
deviation.backward()
if self.omit_another:
deviation.energy = torch.tensor(torch.nan)
else:
with torch.no_grad():
deviation.energy = ((amplitudes_i.conj() @ hamiltonian_amplitudes_j) / (amplitudes_i.conj() @ amplitudes_i)).real
# As we have already calculated energy, embed it in deviation for logging
deviation.energy = energy.real
return deviation

@@ -119,3 +135,7 @@

def closure():
# Optimizing energy
optimizer.zero_grad()
# Calculate amplitudes i and amplitudes j
# When including outside, amplitudes j should be calculated individually, otherwise, it equals to amplitudes i
# Because of gradient formula, we always calculate amplitudes j in no grad mode
amplitudes_i = network(configs_i)

@@ -126,11 +146,24 @@ if self.include_outside:

else:
amplitudes_j = amplitudes_i
hamiltonian_amplitudes_j = hamiltonian @ amplitudes_j.detach()
energy = ((amplitudes_i.conj() @ hamiltonian_amplitudes_j) / (amplitudes_i.conj() @ amplitudes_i.detach())).real
energy.backward()
if self.omit_another:
energy.deviation = torch.tensor(torch.nan)
amplitudes_j = amplitudes_i.detach()
# <s|H|psi> will be used multiple times, calculate it first
# it should be notices that this <s|H|psi> is totally detached, since both hamiltonian and amplitudes j is detached
hamiltonian_amplitudes_j = hamiltonian @ amplitudes_j
# energy is just <psi|s> <s|H|psi> / <psi|s> <s|psi>
# we only calculate gradient on <psi|s>, both <s|H|psi> and <s|psi> should be detached
# since <s|H|psi> has been detached already, we detach <s|psi> here manually
energy = (amplitudes_i.conj() @ hamiltonian_amplitudes_j) / (amplitudes_i.conj() @ amplitudes_i.detach())
# Calculate deviation
# The variance is (E_s <s|psi> - E <s|psi>).conj() @ (E_s <s|psi> - E <s|psi>) / <psi|s> <s|psi>
# Calculate E_s <s|psi> - E <s|psi> first and name it as difference
if self.omit_deviation:
deviation = torch.tensor(torch.nan)
else:
with torch.no_grad():
energy.deviation = (hamiltonian_amplitudes_j / amplitudes_i).std()
difference = hamiltonian_amplitudes_j - amplitudes_i * energy
variance = (difference.conj() @ difference) / (amplitudes_i.conj() @ amplitudes_i)
deviation = variance.real.sqrt()
energy = energy.real
energy.backward()
# Embed the deviation which has been calculated in energy for logging
energy.deviation = deviation
return energy

@@ -137,0 +170,0 @@