Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoSign in
Socket

clease

Package Overview
Dependencies
Maintainers
2
Versions
38
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

clease - pypi Package Compare versions

Comparing version
1.0.4
to
1.0.5
+1
-1
clease.egg-info/PKG-INFO
Metadata-Version: 2.1
Name: clease
Version: 1.0.4
Version: 1.0.5
Summary: CLuster Expansion in Atomistic Simulation Environment

@@ -5,0 +5,0 @@ Home-page: https://gitlab.com/computationalmaterials/clease/

@@ -16,21 +16,21 @@ ase>=3.22

[all]
build
pytest-mock
pre-commit
pytest
pytest-cov
twine
tox>=4
sphinx
pyclean>=2.0.0
sphinx
pylint
black>=22.1.0
ipython
cython
twine
pip
pytest-mock
build
clang-format>=14.0.3
pytest-cov
pre-commit
tox>=3.24.0
sphinx_rtd_theme
clease-gui
pytest-benchmark[histogram]>=3.4.1
ipython
pylint
mock
cython
clease-gui
pip

@@ -62,2 +62,2 @@ [dev]

pytest-benchmark[histogram]>=3.4.1
tox>=3.24.0
tox>=4

@@ -1,1 +0,1 @@

1.0.4
1.0.5

@@ -185,3 +185,3 @@ """Calculator for Cluster Expansion."""

if self.atoms is None:
# We havn't yet initialized, so initialize with the passed atoms object.
# We haven't yet initialized, so initialize with the passed atoms object.
self.set_atoms(atoms)

@@ -218,18 +218,5 @@ else:

def clear_history(self) -> None:
"""Direct access to the clear history method"""
self.updater.clear_history()
def restore(self) -> None:
"""Restore the Atoms object to its original configuration and energy.
This method reverts the Atoms object to its oldest state stored in
memory. The state is restored to either
(1) an initial state when the calculator was attached, or
(2) the state at which the `clear_history()` method was invoked last
time.
NOTE: The maximum capacity for the history buffer is 1000 steps
"""
self.updater.undo_changes()
self.energy = self.updater.get_energy()
def update_energy(self) -> None:

@@ -375,3 +362,2 @@ """Update correlation function and get new energy."""

"""Apply a set of changes to the calculator, and evaluate the energy"""
self.reset()
# Apply the updates

@@ -383,9 +369,22 @@ self.update_cf(system_changes)

"""Revert a set of changes. The changes passed in should be the original
sequence of system changes which were applied. Restores the original results."""
self.restore() # Also restores results
sequence of system changes which were applied. Restores the original results.
Restore the Atoms object to its original configuration and energy.
This method reverts the Atoms object to its oldest state stored in
memory. The state is restored to either
(1) an initial state when the calculator was attached, or
(2) the state at which the `clear_history()` method was invoked last
time.
NOTE: The maximum capacity for the history buffer is 1000 steps
"""
self.updater.undo_changes()
self.energy = self.updater.get_energy()
def keep_system_changes(self) -> None:
"""A set of system changes are to be kept. Perform necessary actions to prepare
for a new evaluation."""
self.clear_history()
# Call clear_history directly intentionally, rather than using self.clear_history()
self.updater.clear_history()

@@ -392,0 +391,0 @@ def get_num_threads(self) -> int:

@@ -5,2 +5,3 @@ from itertools import product

import numpy as np
from scipy.spatial.distance import cdist
from ase import Atoms

@@ -298,3 +299,3 @@ from clease import tools

# Euclidean distance betweens elements i and j.
arr = np.linalg.norm(x[:, np.newaxis] - x, axis=-1, ord=2)
arr = cdist(x, x, metric="euclid")
# Round to 6 digits for numerical stability.

@@ -301,0 +302,0 @@ arr = np.round(arr, 6)

@@ -125,3 +125,3 @@ from __future__ import annotations

return self._all_cf_name_cache[num_bf]
# We havn't cached this name list yet, so we need to build it
# We haven't cached this name list yet, so we need to build it
logger.debug("Building all CF names for %s num bf", num_bf)

@@ -143,2 +143,16 @@ all_cf = self._build_all_cf_names(num_bf)

@property
def assume_no_self_interactions(self) -> bool:
"""Calculate if it's safe to assume no self-interactions
in the cluster."""
for cluster in self.clusters:
if cluster.indices is None:
# We haven't attached the translated indices yet
raise RuntimeError("Cluster has not yet been initialized to a template.")
ref = cluster.ref_indx
for indx_lst in cluster.indices:
if indx_lst.count(ref) != 1:
return False
return True
def __len__(self) -> int:

@@ -145,0 +159,0 @@ return len(self.clusters)

@@ -336,3 +336,3 @@ """Module that fits ECIs to energy data."""

if self.eci is None:
# getting ECI's was not allowed to fit, and we havn't run a fit yet.
# getting ECI's was not allowed to fit, and we haven't run a fit yet.
raise ValueError("ECI's has not been fit yet. Call the Evaluate.fit method first.")

@@ -339,0 +339,0 @@

"""Module for tools pertaining to geometry of atoms and cells."""
import itertools
import numpy as np
from ase import Atoms
__all__ = ("max_sphere_dia_in_cell",)
__all__ = ("max_sphere_dia_in_cell", "supercell_which_contains_sphere", "cell_wall_distances")
def max_sphere_dia_in_cell(cell: np.ndarray) -> float:
"""Find the diameter of the largest possible sphere which can be placed
inside a cell.
def supercell_which_contains_sphere(atoms: Atoms, diameter: float) -> Atoms:
"""Find the smallest supercell of an atoms object which can contain a sphere,
using only repetitions of (nx, ny, nz) (i.e. a diagonal P matrix).
For example, how large of a Death Star could be built inside a given atoms object?
The number of repetitions is stored in the info dictionary of the supercell under the
"repeats" keyword, i.e. sc.info["repeats"] is the number of times the supercell was repeated.
Args:
atoms (Atoms): The atoms object to be enlarged.
diameter (float): The diameter of the sphere which should be contained within the supercell.
Returns:
Atoms: The supercell which can contain a sphere of the given diameter.
"""
cell = atoms.get_cell()
dists = cell_wall_distances(cell)
# Find the number of repetitions required for each cell
# wall to be larger than the diameter, i.e. allowing
# a sphere with the provided diameter to exist within the cell.
reps = np.ceil(diameter / dists).astype(int)
assert reps.shape == (3,)
sc: Atoms = atoms * reps
assert cell_wall_distances(sc.get_cell()).min() >= diameter
sc.info["repeats"] = reps
return sc
def cell_wall_distances(cell: np.ndarray) -> np.ndarray:
"""Get the distances from each cell wall to the opposite cell wall of the cell.
Returns the distances in the order of the distances between the (b, c), (a, c) and (a, b)
planes, such that the shorest vector corresponds to being limited by the a, b or c vector,
respectively.
Args:
cell (np.ndarray): A (3 x 3) matrix which defines the cell parameters.

@@ -19,3 +48,3 @@ Raises a ValueError if the cell shape is wrong.

Returns:
float: The diameter of the largest sphere which can fit within the cell.
np.ndarray: The distance to each of the three cell walls.
"""

@@ -34,6 +63,11 @@ if not cell.shape == (3, 3):

PQ = midpoint - [0, 0, 0]
a, b, c = cell
def _iter_distances() -> float:
# Iterate all distances from cell wall to the midpoint
for v1, v2 in itertools.combinations(cell, 2):
# The ordering here ensures that the distances corresponds to the length
# a, b, c in a cubic cell (in that order)
# Alternatively: The vector which limits the size of the sphere
# is determined by the argmin of this array.
for v1, v2 in [(b, c), (a, c), (a, b)]:
# Find the normal vector to the plane spanned by v1 and v2

@@ -49,3 +83,19 @@ # Note: The astype(float) is to ensure that a cell defined by

return np.array(list(_iter_distances()), dtype=float)
def max_sphere_dia_in_cell(cell: np.ndarray) -> float:
"""Find the diameter of the largest possible sphere which can be placed
inside a cell.
For example, how large of a Death Star could be built inside a given atoms object?
Args:
cell (np.ndarray): A (3 x 3) matrix which defines the cell parameters.
Raises a ValueError if the cell shape is wrong.
Returns:
float: The diameter of the largest sphere which can fit within the cell.
"""
# The smallest possible distance is the radius of the largest sphere we can have.
return min(_iter_distances())
return cell_wall_distances(cell).min()

@@ -38,3 +38,3 @@ class Averager:

def __add__(self, other):
"""Add to Averager objects.
"""Add two Averager objects.

@@ -78,5 +78,6 @@ Parameter:

@property
def mean(self):
def mean(self) -> float:
"""Calculate the mean value of the measurements"""
if self._n_samples == 0:
return self._mean * self._ref_value
return (self._mean / self._n_samples) * self._ref_value
from typing import Union
from abc import ABC
from ase import Atoms
from ase.units import kB
from .mc_evaluator import MCEvaluator, construct_evaluator

@@ -60,2 +61,3 @@

self._temperature = value # pylint: disable=attribute-defined-outside-init
self.kT = value * kB # pylint: disable=attribute-defined-outside-init
self._on_temp_change()

@@ -62,0 +64,0 @@

@@ -7,3 +7,2 @@ from typing import List, Tuple, Sequence, Union

from ase import Atoms
from ase.units import kB
import numpy as np

@@ -78,6 +77,2 @@ from clease.datastructures import SystemChange, MCStep, SystemChanges

@property
def kT(self) -> float:
return kB * self.T
def _update_epr(self, current: int, choice: int, swaps: List[int], cum_rates: np.ndarray):

@@ -84,0 +79,0 @@ """

@@ -113,2 +113,4 @@ from typing import Union

super().__init__(atoms)
self.calc: Clease = self.atoms.calc # Keep a direct reference to the calculator
assert self.calc is self.atoms.calc

@@ -120,7 +122,2 @@ @property

@property
def calc(self) -> Clease:
"""Get the internal calculator object"""
return self.atoms.calc
def get_energy(self, applied_changes: SystemChanges = None) -> float:

@@ -127,0 +124,0 @@ return self.calc.get_energy()

"""Monte Carlo method for ase."""
from typing import Dict, Union, Iterator, Any
import sys
import datetime
from datetime import datetime
import time

@@ -12,3 +11,2 @@ import logging

from ase.units import kB
from clease.version import __version__
from clease.datastructures import SystemChanges, MCStep

@@ -261,12 +259,8 @@ from .mc_evaluator import CEMCEvaluator, MCEvaluator

@property
def meta_info(self):
def meta_info(self) -> Dict[str, str]:
"""Return dict with meta info."""
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime("%Y-%m-%d %H:%M:%S")
clease_version = str(__version__)
v_info = sys.version_info
# Get the timestamp with millisecond precision
timestamp = datetime.now().isoformat(timespec="milliseconds")
meta_info = {
"timestamp": st,
"python_version": f"{v_info.major}.{v_info.minor}.{v_info.micro}",
"clease_version": clease_version,
"timestamp": timestamp,
}

@@ -293,6 +287,8 @@ return meta_info

quantities["accept_rate"] = self.current_accept_rate
quantities["n_mc_steps"] = self.current_step
at_count = self.count_atoms()
for key, value in at_count.items():
name = f"{key}_conc"
conc = float(value) / len(self.atoms)
conc = value / len(self.atoms)
quantities[name] = conc

@@ -310,4 +306,4 @@

obs_avgs = {}
for obs in self.observers:
obs_avgs.update(obs[1].get_averages())
for obs in self.iter_observers():
obs_avgs.update(obs.get_averages())
return obs_avgs

@@ -325,3 +321,2 @@

"""
logger.debug("Applying changes to the system.")
self.evaluator.apply_system_changes(system_changes)

@@ -335,9 +330,3 @@ # Evaluate the energy quantity after applying the changes to the system.

self.new_energy += bias(system_changes)
logger.debug(
"Current energy: %.3f eV, new energy: %.3f eV",
self.current_energy,
self.new_energy,
)
accept = self._do_accept(self.current_energy, self.new_energy)
logger.debug("Change accepted? %s", accept)

@@ -366,14 +355,8 @@ if accept:

return True
kT = self.temperature * kB
energy_diff = new_energy - current_energy
probability = math.exp(-energy_diff / kT)
logger.debug(
"Energy difference: %.3e. Calculated probability: %.3f",
energy_diff,
probability,
)
probability = math.exp(-energy_diff / self.kT)
return random.random() <= probability
def _move_accepted(self, system_changes: SystemChanges) -> None:
logger.debug("Move accepted, updating things")
self.num_accepted += 1

@@ -384,3 +367,2 @@ self.generator.on_move_accepted(system_changes)

def _move_rejected(self, system_changes: SystemChanges) -> None:
logger.debug("Move rejected, undoing system changes: %s", system_changes)
self.generator.on_move_rejected(system_changes)

@@ -414,8 +396,2 @@

if self.current_step % interval == 0:
logger.debug(
"Executing observer %s at step %d with interval %d.",
obs.name,
self.current_step,
interval,
)
obs.observe_step(last_step)

@@ -422,0 +398,0 @@

import numpy as np
import attr
from clease.calculator import Clease
from clease.montecarlo.averager import Averager
from clease.datastructures.mc_step import MCStep
from .mc_observer import MCObserver
@attr.define(slots=True)
class SGCQuantities:
"""Container for observed quantities in the SGCObserver"""
energy: Averager = attr.field()
energy_sq: Averager = attr.field()
singlets: np.ndarray = attr.field()
singlets_sq: np.ndarray = attr.field()
singl_eng: np.ndarray = attr.field()
counter: int = attr.field(default=0)
def reset(self) -> None:
self.counter = 0
self.energy.clear()
self.energy_sq.clear()
self.singlets[:] = 0.0
self.singlets_sq[:] = 0.0
self.singl_eng[:] = 0.0
class SGCObserver(MCObserver):

@@ -15,2 +38,6 @@ """

Clease calculator
observe_singlets: bool
Whether the singlet values of the calculator are measured during each observation.
Measuring singlets is slightly more expensive, so this is disabled by default.
"""

@@ -20,66 +47,62 @@

def __init__(self, calc):
def __init__(self, calc: Clease, observe_singlets: bool = False):
self.observe_singlets = observe_singlets
super().__init__()
self.calc = calc
E = calc.get_potential_energy()
initial_energy = calc.get_potential_energy()
n_singlets = len(self.calc.get_singlets())
self.quantities = {
"singlets": np.zeros(n_singlets, dtype=np.float64),
"singlets_sq": np.zeros(n_singlets, dtype=np.float64),
"energy": Averager(ref_value=E),
"energy_sq": Averager(ref_value=E**2),
"singl_eng": np.zeros(n_singlets, dtype=np.float64),
"counter": 0,
}
self.quantities = SGCQuantities(
energy=Averager(ref_value=initial_energy),
energy_sq=Averager(ref_value=initial_energy**2),
singlets=np.zeros(n_singlets, dtype=np.float64),
singlets_sq=np.zeros(n_singlets, dtype=np.float64),
singl_eng=np.zeros(n_singlets, dtype=np.float64),
)
def reset(self):
"""Reset all variables to zero."""
self.quantities["singlets"][:] = 0.0
self.quantities["singlets_sq"][:] = 0.0
self.quantities["energy"].clear()
self.quantities["energy_sq"].clear()
self.quantities["singl_eng"][:] = 0.0
self.quantities["counter"] = 0
self.quantities.reset()
def __call__(self, system_changes):
"""Update all SGC parameters.
def get_current_energy(self) -> float:
"""Return the current energy of the attached calculator object."""
return self.calc.results["energy"]
Parameters:
def observe_step(self, mc_step: MCStep):
"""Update all SGC parameters."""
# Reference to avoid self. lookup multiple times
quantities = self.quantities
E = self.get_current_energy()
system_changes: list
System changes. See doc-string of
`clease.montecarlo.observers.MCObserver`
"""
self.quantities["counter"] += 1
new_singlets = self.calc.get_singlets()
quantities.counter += 1
quantities.energy += E
quantities.energy_sq += E * E
E = self.calc.results["energy"]
if self.observe_singlets:
# Only perform the singlet observations if requested.
new_singlets = self.calc.get_singlets()
quantities.singlets += new_singlets
quantities.singlets_sq += new_singlets * new_singlets
quantities.singl_eng += new_singlets * E
self.quantities["singlets"] += new_singlets
self.quantities["singlets_sq"] += new_singlets**2
self.quantities["energy"] += E
self.quantities["energy_sq"] += E**2
self.quantities["singl_eng"] += new_singlets * E
@property
def energy(self):
return self.quantities["energy"]
return self.quantities.energy
@property
def energy_sq(self):
return self.quantities["energy_sq"]
return self.quantities.energy_sq
@property
def singlets(self):
return self.quantities["singlets"]
return self.quantities.singlets
@property
def singl_eng(self):
return self.quantities["singl_eng"]
return self.quantities.singl_eng
@property
def counter(self):
return self.quantities["counter"]
return self.quantities.counter
def interval_ok(self, interval):
return interval == 1

@@ -1,5 +0,7 @@

from typing import Sequence, Dict
from typing import Sequence, Dict, Any
import numpy as np
from ase import Atoms
from ase.units import kB
from clease.calculator import Clease
from clease.settings import ClusterExpansionSettings
from .observers import SGCObserver

@@ -14,3 +16,2 @@ from .montecarlo import Montecarlo

# pylint: disable=too-many-instance-attributes
class SGCMonteCarlo(Montecarlo):

@@ -35,9 +36,19 @@ """

generator: TrialMoveGenerator = None,
observe_singlets: bool = False,
):
if not isinstance(atoms, Atoms):
raise ValueError(f"atoms must be an Atoms object, got {atoms!r}")
if not isinstance(atoms.calc, Clease):
raise ValueError(
f"Atoms must have a Clease calculator object attached, got {atoms.calc!r}."
)
if generator is None:
if len(symbols) <= 1:
raise ValueError("At least 2 symbols have to be specified")
generator = RandomFlip(symbols, atoms)
# Only select indices which are not considered background.
non_bkg = atoms.calc.settings.non_background_indices
generator = RandomFlip(symbols, atoms, indices=non_bkg)
self.averager = SGCObserver(atoms.calc)
self.averager = SGCObserver(atoms.calc, observe_singlets=observe_singlets)
super().__init__(atoms, temp, generator=generator)

@@ -53,3 +64,2 @@

self.chem_pot_in_eci = False
self.current_singlets = None

@@ -65,2 +75,6 @@ has_attached_obs = False

@property
def observe_singlets(self) -> bool:
return self.averager.observe_singlets
def _check_symbols(self):

@@ -79,2 +93,10 @@ """

@property
def calc(self) -> Clease:
return self.atoms.calc
@property
def settings(self) -> ClusterExpansionSettings:
return self.calc.settings
@property
def chemical_potential(self):

@@ -85,3 +107,3 @@ return self._chemical_potential

def chemical_potential(self, chem_pot: Dict[str, float]):
eci = self.atoms.calc.eci
eci = self.calc.eci
if any(key not in eci for key in chem_pot):

@@ -98,4 +120,4 @@ msg = "A chemical potential not being trackted is added. Make "

if self.chem_pot_in_eci:
self._reset_eci_to_original(self.atoms.calc.eci)
self._include_chemical_potential_in_eci(chem_pot, self.atoms.calc.eci)
self._reset_eci_to_original(self.calc.eci)
self._include_chemical_potential_in_eci(chem_pot, self.calc.eci)

@@ -123,3 +145,3 @@ def _include_chemical_potential_in_eci(self, chem_pot: Dict[str, float], eci: Dict[str, float]):

eci[key] = current_eci - chem_pot[key]
calc = self.atoms.calc
calc = self.calc
calc.update_eci(eci)

@@ -138,3 +160,3 @@ self.chem_pot_in_eci = True

eci_with_chem_pot[name] += val
calc = self.atoms.calc
calc = self.calc
calc.update_eci(eci_with_chem_pot)

@@ -148,3 +170,3 @@ self.chem_pot_in_eci = False

if self.chem_pot_in_eci:
self._reset_eci_to_original(self.atoms.calc.eci)
self._reset_eci_to_original(self.calc.eci)

@@ -177,3 +199,3 @@ def run(self, steps: int = 10, call_observers: bool = True, chem_pot: Dict[str, float] = None):

"""Convert singlets to composition."""
bf = self.atoms.calc.settings.basis_functions
bf = self.settings.basis_functions
matrix = np.zeros((len(self.symbols), len(self.symbols)))

@@ -194,3 +216,3 @@

for s, i in index.items():
name = s + "_conc"
name = f"{s}_conc"
res[name] = x[i]

@@ -205,3 +227,3 @@ return res

def get_thermodynamic_quantities(self, reset_eci=True):
def get_thermodynamic_quantities(self, reset_eci: bool = False) -> Dict[str, Any]:
"""Compute thermodynamic quantities.

@@ -212,52 +234,60 @@

reset_eci: bool
If True, the chemical potential will be removed from the ECIs
If True, the chemical potential will be removed from the ECIs.
"""
# Note - in order to correctly measure averagers from the SGC observer,
# we need some information from the SGC MC object. So we directly extract the averages
# from that observer here.
avg_obs = self.averager # Alias
N = self.averager.counter
quantities = {}
singlets = self.averager.singlets / N
singlets_sq = self.averager.quantities["singlets_sq"] / N
averages = {}
averages["energy"] = avg_obs.energy.mean
averages["sgc_energy"] = avg_obs.energy.mean
averages["sgc_heat_capacity"] = avg_obs.energy_sq.mean - avg_obs.energy.mean**2
quantities["sgc_energy"] = self.averager.energy.mean
quantities["sgc_heat_capacity"] = (
self.averager.energy_sq.mean - self.averager.energy.mean**2
)
averages["sgc_heat_capacity"] /= kB * self.temperature**2
quantities["sgc_heat_capacity"] /= kB * self.temperature**2
averages["temperature"] = self.temperature
averages["n_mc_steps"] = self.averager.counter
averages["accept_rate"] = self.current_accept_rate
quantities["energy"] = self.averager.energy.mean
natoms = len(self.atoms)
for i, chem_pot in enumerate(self.chem_pots):
quantities["energy"] += chem_pot * singlets[i] * natoms
# Singlets are more expensive to measure than the other quantities,
# so only measure them upon request.
if self.observe_singlets:
# Add singlets and chemical potential to the dictionary
# pylint: disable=consider-using-enumerate
singlets = avg_obs.singlets / N
singlets_sq = avg_obs.quantities["singlets_sq"] / N
quantities["temperature"] = self.temperature
quantities["n_mc_steps"] = self.averager.counter
quantities["accept_rate"] = self.current_accept_rate
averages["singlet_energy"] = avg_obs.energy.mean
natoms = len(self.atoms)
for i, chem_pot in enumerate(self.chem_pots):
averages["singlet_energy"] += chem_pot * singlets[i] * natoms
for i in range(len(singlets)):
name = f"singlet_{self.chem_pot_names[i]}"
averages[name] = singlets[i]
# Add singlets and chemical potential to the dictionary
# pylint: disable=consider-using-enumerate
for i in range(len(singlets)):
name = f"singlet_{self.chem_pot_names[i]}"
quantities[name] = singlets[i]
name = f"var_singlet_{self.chem_pot_names[i]}"
averages[name] = singlets_sq[i] - singlets[i] ** 2
name = f"var_singlet_{self.chem_pot_names[i]}"
quantities[name] = singlets_sq[i] - singlets[i] ** 2
# Measure concentration from the singlets
try:
avg_conc = self.singlet2composition(singlets)
averages.update(avg_conc)
# pylint: disable=broad-except
except Exception as exc:
print("Could not find average singlets!")
print(exc)
name = f"mu_{self.chem_pot_names[i]}"
quantities[name] = self.chem_pots[i]
# Always measure the chemical potentials.
for chem_pot_name, chem_pot in zip(self.chem_pot_names, self.chem_pots):
key = f"mu_{chem_pot_name}"
averages[key] = chem_pot
quantities.update(self.meta_info)
averages.update(self.meta_info)
try:
avg_conc = self.singlet2composition(singlets)
quantities.update(avg_conc)
# pylint: disable=broad-except
except Exception as exc:
print("Could not find average singlets!")
print(exc)
# Add information from observers
quantities.update(self._get_obs_averages())
averages.update(self._get_obs_averages())
if reset_eci:
self._reset_eci_to_original(self.atoms.calc.eci)
return quantities
return averages

@@ -5,2 +5,3 @@ from typing import Sequence, List, Set, Tuple, Dict

from abc import abstractmethod, ABC
import numpy as np
from ase import Atoms

@@ -155,2 +156,6 @@ from ase.data import chemical_symbols

self.atoms = atoms
# Hold a direct reference to the numbers array
self.numbers: np.ndarray = self.atoms.numbers
# Verify it's the same array in memory
assert self.numbers is atoms.numbers

@@ -179,7 +184,6 @@ if indices is None:

# array in atoms.symbols
old_symb = chemical_symbols[self.atoms.numbers[pos]]
new_symb = choice(self.flip_map[old_symb])
return [
SystemChange(index=pos, old_symb=old_symb, new_symb=new_symb, name=self.CHANGE_NAME)
]
old_symb = chemical_symbols[self.numbers[pos]]
flips = self.flip_map[old_symb]
new_symb = choice(flips)
return [SystemChange(pos, old_symb, new_symb, self.CHANGE_NAME)]

@@ -218,14 +222,4 @@

return [
SystemChange(
index=rand_pos_a,
old_symb=symb_a,
new_symb=symb_b,
name=self.CHANGE_NAME,
),
SystemChange(
index=rand_pos_b,
old_symb=symb_b,
new_symb=symb_a,
name=self.CHANGE_NAME,
),
SystemChange(rand_pos_a, symb_a, symb_b, self.CHANGE_NAME),
SystemChange(rand_pos_b, symb_b, symb_a, self.CHANGE_NAME),
]

@@ -232,0 +226,0 @@

@@ -151,3 +151,3 @@ """Module for generating new structures."""

else:
self.atoms.calc.restore()
self.atoms.calc.undo_system_changes()

@@ -154,0 +154,0 @@ # Create a new calculator and attach it to the generated structure

from pathlib import Path
from packaging.version import parse
__all__ = ("__version__", "version_info")
with Path(__file__).with_name("_version.txt").open("r") as f:

@@ -9,3 +11,1 @@ __version__ = parse(f.readline().strip())

version_info = __version__.release
__all__ = ("__version__", "version_info")

@@ -20,6 +20,12 @@ #ifndef BASIS_FUNCTION_H

/** Return the basis function value for a given decoration number and symbol ID */
double get(unsigned int dec_num, unsigned int symb_id) const;
/* Return the basis function value for a given decoration number and symbol ID */
inline double get(unsigned int dec_num, unsigned int symb_id) const {
/* This access is used in the inner loop of the spin product calculation (i.e. very
frequently), so we access with no bounds checking for performance reasons.
Also inline this method, as it's called very frequently in the hot path.
*/
return bfs[get_index(dec_num, symb_id)];
};
/** Return the size (number of basis functions) */
/* Return the size (number of basis functions) */
unsigned int size() const {

@@ -29,12 +35,2 @@ return num_bfs;

/* For preparing a vector of basis functions that maps a decoration number
to the old and new basis functions simultaneously.
This vector contains copies of the underlying basis functions.
The pair<double, double> is constructed as follows:
first -> Basis function of the *new* symbol
second -> Basis function of the *old* symbol
*/
std::vector<std::pair<double, double>> prepare_bfs_new_old(unsigned int new_id,
unsigned int old_id) const;
/** Stream operator */

@@ -50,3 +46,6 @@ friend std::ostream &operator<<(std::ostream &out, const BasisFunction &bf);

/** Return the corresponding index into the flattened array */
unsigned int get_index(unsigned int dec_num, unsigned int symb_id) const;
inline unsigned int get_index(unsigned int dec_num, unsigned int symb_id) const {
// Inline this method, as it's called very frequently in the hot path.
return dec_num * this->num_bf_values + symb_id;
}
void swap(const BasisFunction &other);

@@ -53,0 +52,0 @@ };

@@ -43,3 +43,10 @@ #ifndef CE_UPDATER_H

typedef std::map<std::string, std::vector<int>> tracker_t;
/* Collection of pointers to a cluster and equivalent decoration vector.
nullptr is used to indicate the Cluster is not used, generally for a given
translational symmetry group.*/
struct ClusterCache {
Cluster *cluster_ptr{nullptr};
equiv_deco_t *equiv_deco_ptr{nullptr};
double normalization{1.0};
};

@@ -58,4 +65,4 @@ /** Help structure used when the correlation functions have different decoration number */

int *trans_matrix_row;
// The basis functions for the new and old site symbols
std::vector<std::pair<double, double>> bfs_new_old;
unsigned int new_symb_id;
unsigned int old_symb_id;
};

@@ -83,3 +90,3 @@

/** Computes the energy based on the ECIs and the correlations functions */
/** Compute the energy based on the ECIs and the correlations functions */
double get_energy();

@@ -95,12 +102,2 @@

/** Computes the spin product for one element */
double spin_product_one_atom(int ref_indx, const Cluster &indx_list,
const std::vector<int> &dec, int ref_id) const;
// Calculate the change in spin product going from old_symb_id to new_symb_id
double spin_product_one_atom_delta(const SpinProductCache &sp_cache, const Cluster &indx_list,
const equiv_deco_t &equiv_deco) const;
SpinProductCache build_sp_cache(const SymbolChange &symb_change, unsigned int old_symb_id,
unsigned int new_symb_id) const;
/* Locate the original index for the "untranslated" site, given a

@@ -171,7 +168,2 @@ reference index, i.e. the inverse operation of the translation matrix.*/

/** CE updater should keep track of where the atoms are */
void set_atom_position_tracker(tracker_t *new_tracker) {
tracker = new_tracker;
};
/** Set the number of threads to use during CF updating */

@@ -217,3 +209,3 @@ void set_num_threads(unsigned int num) {

// bf_list basis_functions;
BasisFunction basis_functions;
BasisFunction *basis_functions{nullptr};

@@ -228,9 +220,33 @@ Status_t status{Status_t::NOT_INITIALIZED};

bool ignore_background_indices{true};
bool assume_no_self_interactions{false};
CFHistoryTracker *history{nullptr};
PyObject *atoms{nullptr};
tracker_t *tracker{nullptr}; // Do not own this pointer
std::vector<std::string> singlets;
// Pre-parsed names of the ECI values.
std::vector<ParsedName> m_parsed_names;
std::map<int, std::vector<ClusterCache>> m_cluster_by_symm;
/* Prepare the next CF array */
cf &get_next_cf(SymbolChange &symb_change);
/** Compute the spin product for one element */
double spin_product_one_atom(int ref_indx, const Cluster &cluster, const std::vector<int> &dec,
int ref_id) const;
// Calculate the change in spin product going from old_symb_id to new_symb_id
double spin_product_one_atom_delta(const SpinProductCache &sp_cache, const Cluster &cluster,
const deco_t &deco) const;
/* Calculate the change in spin product under the assumption that no self-interaction
is present, i.e. no site is present more than once in a figure within a cluster.*/
double spin_product_one_atom_delta_no_si(const SpinProductCache &sp_cache,
const Cluster &cluster, const deco_t &deco) const;
/* Construct a map from the symmetry group with pointers to the relevant clusters.
The pointer clusters are ordered in the same order as the ECI values.
A nullpointer is inserted when a cluster is either a 0- or 1-body cluster, or not in the
symmetry group.*/
void cluster_by_symm_group();
SpinProductCache build_sp_cache(const SymbolChange &symb_change) const;
// Cached number of non background sites

@@ -240,5 +256,2 @@ void count_non_bkg_sites();

/** Undos the latest changes keeping the tracker CE tracker updated */
void undo_changes_tracker(int num_steps);
/** Returns true if all decoration numbers are equal */

@@ -245,0 +258,0 @@ bool all_decoration_nums_equal(const std::vector<int> &dec_num) const;

@@ -30,3 +30,3 @@ #ifndef CF_HISTORY_TRACKER_H

/** Gets the system change and previus */
/** Gets the system change and previous */
void pop(SymbolChange **change);

@@ -33,0 +33,0 @@

@@ -22,3 +22,4 @@ #ifndef CLUSTER_LIST_H

/** Get a given cluster with name and symmetry group */
const Cluster &get(const std::string &name, unsigned int symm_group) const;
const Cluster &get(const std::string &name, unsigned int symm_group);
Cluster *get_ptr(const std::string &name, unsigned int symm_group);

@@ -33,4 +34,6 @@ /** Return the maximum index present in any of the clusters */

std::unordered_map<std::string, std::vector<Cluster>> clusters;
Cluster &find_cluster(const std::string &name, unsigned int symm_group);
};
#endif

@@ -12,6 +12,14 @@ #ifndef CLUSTER_H

typedef std::vector<int> deco_t;
typedef std::vector<std::vector<int>> cluster_t;
typedef std::vector<std::vector<int>> equiv_deco_t;
typedef std::vector<deco_t> equiv_deco_t;
typedef std::unordered_map<std::string, equiv_deco_t> all_equiv_deco_t;
struct ClusterSite {
public:
ClusterSite(unsigned int cluster_index, unsigned int lattice_index)
: cluster_index(cluster_index), lattice_index(lattice_index){};
unsigned int cluster_index;
unsigned int lattice_index;
};
class Cluster {

@@ -38,7 +46,10 @@ public:

};
unsigned int num_subclusters() const {
return figures.size();
inline unsigned int get_num_figures() const {
return this->num_figures;
};
const equiv_deco_t &get_equiv_deco(const std::string &dec_string) const;
const equiv_deco_t &get_equiv_deco(const std::vector<int> &deco) const;
equiv_deco_t *get_equiv_deco_ptr(const std::string &dec_str);
void unique_indices(std::set<int> &indices) const;

@@ -60,3 +71,11 @@

double max_cluster_dia;
// std::string descriptor;
const std::vector<int> &get_ref_cluster_sites() const {
return this->ref_cluster_site;
};
const std::vector<ClusterSite> &get_non_ref_sites() const {
return this->non_ref_sites;
};
private:

@@ -69,2 +88,6 @@ cluster_t figures;

std::vector<int> ref_cluster_site;
std::vector<ClusterSite> non_ref_sites;
unsigned int num_figures;
void all_deco(int n_bfs, std::vector<std::vector<int>> &all_deco) const;

@@ -71,0 +94,0 @@

@@ -50,6 +50,10 @@ #ifndef ROW_SPARSE_STRUCT_MATRIX_H

int *get_row(unsigned int row_index) const;
/** Look up a column in a pre-cached row, e.g. from the get_row method.
* Use together with get_row() to split the operation of the () operator.
* NOTE: For max performance this function does not perform any validity checks */
const int lookup_in_row(int *row, unsigned int index) const;
* NOTE: For max performance this function does not perform any validity checks
* Method is inlined, as it's called very frequently in the hot path.*/
inline const int lookup_in_row(int *row, unsigned int index) const {
return row[lookup[index]];
};

@@ -56,0 +60,0 @@ /** Matrix-like access operator. NOTE: For max performance this function does not perform any

@@ -18,3 +18,3 @@ #ifndef SYMBOLS_H

/** Return the symbol ID of the atom at site indx */
unsigned int id(unsigned int indx) const {
inline unsigned int id(unsigned int indx) const {
return symb_ids[indx];

@@ -21,0 +21,0 @@ };

@@ -42,25 +42,2 @@ #include "basis_function.hpp"

unsigned int BasisFunction::get_index(unsigned int dec_num, unsigned int symb_id) const {
return dec_num * this->num_bf_values + symb_id;
}
double BasisFunction::get(unsigned int dec_num, unsigned int symb_id) const {
/* This access is used in the inner loop of the spin product calculation (i.e. very frequently),
so we access with no bounds checking for performance reasons. */
return bfs[get_index(dec_num, symb_id)];
}
std::vector<std::pair<double, double>> BasisFunction::prepare_bfs_new_old(
unsigned int new_symb_id, unsigned int old_symb_id) const {
std::vector<std::pair<double, double>> prepped;
prepped.reserve(this->num_bf_values);
for (unsigned int dec_num = 0; dec_num < this->num_bf_values; dec_num++) {
double new_bf = this->get(dec_num, new_symb_id);
double old_bf = this->get(dec_num, old_symb_id);
prepped.emplace_back(std::make_pair(new_bf, old_bf));
}
return prepped;
}
void BasisFunction::swap(const BasisFunction &other) {

@@ -67,0 +44,0 @@ this->symb_ptr = other.symb_ptr;

@@ -42,4 +42,3 @@ #include "cf_history_tracker.hpp"

*next_change = changes[current];
current += 1;
current = current % max_history;
current = (current + 1) % max_history;
if (buffer_size < max_history) {

@@ -101,4 +100,3 @@ buffer_size += 1;

current++;
current = current % max_history;
current = (current + 1) % max_history;

@@ -105,0 +103,0 @@ if (buffer_size < max_history) {

@@ -14,8 +14,16 @@ #include "cluster_list.hpp"

bool ClusterList::is_in_symm_group(const std::string &name, unsigned int symm_group) const {
auto group = symm_group_by_name.at(name);
auto &group = symm_group_by_name.at(name);
return group.find(symm_group) != group.end();
}
const Cluster &ClusterList::get(const std::string &name, unsigned int symm_group) const {
for (const Cluster &cluster : clusters.at(name)) {
const Cluster &ClusterList::get(const std::string &name, unsigned int symm_group) {
return find_cluster(name, symm_group);
}
Cluster *ClusterList::get_ptr(const std::string &name, unsigned int symm_group) {
return &find_cluster(name, symm_group);
}
Cluster &ClusterList::find_cluster(const std::string &name, unsigned int symm_group) {
for (Cluster &cluster : clusters.at(name)) {
if (cluster.symm_group == symm_group) {

@@ -22,0 +30,0 @@ return cluster;

@@ -17,2 +17,19 @@ #include "cluster.hpp"

parse_info_dict(info_dict);
this->num_figures = figures.size();
ref_cluster_site.clear();
non_ref_sites.clear();
/* The non-ref sites are placed continuously and contiguously in a single array
for better cache locality.*/
for (const auto &fig : figures) {
for (unsigned int j = 0; j < fig.size(); j++) {
unsigned int site = (unsigned int)fig[j];
if (site == ref_indx) {
ref_cluster_site.push_back(j);
} else {
non_ref_sites.emplace_back(j, site);
}
}
}
}

@@ -198,2 +215,5 @@

}
equiv_deco_t *Cluster::get_equiv_deco_ptr(const std::string &dec_str) {
return &equiv_deco.at(dec_str);
}

@@ -200,0 +220,0 @@ const equiv_deco_t &Cluster::get_equiv_deco(const std::vector<int> &deco) const {

@@ -123,6 +123,2 @@ #include "row_sparse_struct_matrix.hpp"

const int RowSparseStructMatrix::lookup_in_row(int *row, unsigned int index) const {
return row[lookup[index]];
}
int RowSparseStructMatrix::get_with_validity_check(unsigned int row, unsigned int col) const {

@@ -129,0 +125,0 @@ if (row >= num_rows) {

@@ -23,3 +23,5 @@ #include "ce_updater.hpp"

delete symbols_with_id;
delete basis_functions;
symbols_with_id = nullptr;
basis_functions = nullptr;
}

@@ -110,2 +112,10 @@

PyObject *py_no_si = get_attr(cluster_list, "assume_no_self_interactions");
assume_no_self_interactions = PyObject_IsTrue(py_no_si);
Py_DECREF(py_no_si);
#ifdef PRINT_DEBUG
std::cout << "Assuming no self-interaction?: " << assume_no_self_interactions << std::endl;
#endif
for (unsigned int i = 0; i < num_clusters; i++) {

@@ -159,3 +169,3 @@ PyObject *py_cluster = PySequence_GetItem(cluster_list, i);

this->basis_functions = BasisFunction(basis_func_raw, *symbols_with_id);
this->basis_functions = new BasisFunction(basis_func_raw, *symbols_with_id);

@@ -252,3 +262,2 @@ #ifdef PRINT_DEBUG

// Use pointer arithmetics in the inner most loop
// const int *indx_list_ptr = &indx_list[i][0];
const int *indices = &indx_list[i][0];

@@ -259,3 +268,3 @@

int id = (trans_index == ref_indx) ? ref_id : symbols_with_id->id(trans_index);
sp_temp *= basis_functions.get(dec[j], id);
sp_temp *= basis_functions->get(dec[j], id);
}

@@ -282,11 +291,56 @@ sp += sp_temp;

double CEUpdater::spin_product_one_atom_delta_no_si(const SpinProductCache &sp_cache,
const Cluster &cluster,
const deco_t &deco) const {
/* Note: This function assumes no self-interaction within a cluster. */
// Figure out how many times we need to iterate
unsigned int num_indx = cluster.get_num_figures(); // Outer loop count
// Assume 1 ref site in a figure, so we iterate 1 less
unsigned int n_non_ref = cluster.get_size() - 1; // Inner loop count
int *tm_row = sp_cache.trans_matrix_row;
/* There are n_non_ref sites per each ref site, so the non_ref_site_ptr
iterates faster than the ref_site_ptr. Figures are placed contiguously
in a 1D array.*/
const ClusterSite *non_ref_site_ptr = &cluster.get_non_ref_sites()[0];
const int *ref_site_ptr = &cluster.get_ref_cluster_sites()[0];
// Keep track of the change in spin-product
double sp_delta = 0.0;
// Iterate each figure in the cluster. 1 reference site is assumed per cluster
for (unsigned int i = 0; i < num_indx; i++, ++ref_site_ptr) {
/* Calculate the spin product for both new and the old (ref)
The constant term to the spin product from the sites which didn't change.*/
const int dec_ref = deco[*ref_site_ptr];
double new_bf = basis_functions->get(dec_ref, sp_cache.new_symb_id);
double old_bf = basis_functions->get(dec_ref, sp_cache.old_symb_id);
double sp_change = new_bf - old_bf;
/* Iterate the remaining non-reference sites, as we already took care of the reference
site (assuming no self-interaction)*/
for (unsigned int j = 0; j < n_non_ref; j++, ++non_ref_site_ptr) {
const ClusterSite &site = *non_ref_site_ptr;
const int dec_j = deco[site.cluster_index];
const int trans_index = trans_matrix.lookup_in_row(tm_row, site.lattice_index);
sp_change *= basis_functions->get(dec_j, symbols_with_id->id(trans_index));
}
sp_delta += sp_change;
}
return sp_delta;
}
double CEUpdater::spin_product_one_atom_delta(const SpinProductCache &sp_cache,
const Cluster &cluster,
const equiv_deco_t &equiv_deco) const {
const Cluster &cluster, const deco_t &deco) const {
// Keep track of the change in spin-product
double sp_delta = 0.0;
// List of figures in the cluster
const vector<vector<int>> &indx_list = cluster.get();
// Account for the self-interaction, in case we updated 2 sites with 1 change
const vector<double> &dup_factors = cluster.get_duplication_factors();
const std::vector<double> &dup_factors = cluster.get_duplication_factors();
unsigned int num_indx = indx_list.size();

@@ -297,34 +351,30 @@ unsigned int n_memb = indx_list[0].size();

for (const vector<int> &dec : equiv_deco) {
// Iterate each site in the cluster
for (unsigned int i = 0; i < num_indx; i++) {
// Calculate the spin product for both new and the old (ref)
double sp_temp_new = 1.0;
double sp_temp_ref = 1.0;
// The constant term to the spin product from the sites which didn't change.
double sp_const = 1.0;
// Iterate each site in the cluster
for (unsigned int i = 0; i < num_indx; i++) {
// Use pointer arithmetics in the inner most loop
const int *indices = &indx_list[i][0];
// Use pointer arithmetics in the inner most loop
const int *indices = &indx_list[i][0];
// Calculate the spin product for both new and the old (ref)
double sp_temp_new = 1.0, sp_temp_ref = 1.0;
// The constant term to the spin product from the sites which didn't change.
double sp_const = dup_factors[i];
for (unsigned int j = 0; j < n_memb; j++) {
const int site_index = indices[j];
const int dec_j = dec[j];
if (site_index == sp_cache.original_index) {
// This site is the reference index.
// Look up the BF values directly for the new and old symbols
const std::pair<double, double> &ref_bf = sp_cache.bfs_new_old[dec_j];
sp_temp_new *= ref_bf.first;
sp_temp_ref *= ref_bf.second;
} else {
// Look up the symbol of the non-reference site, which hasn't changed.
const int trans_index = trans_matrix.lookup_in_row(tm_row, site_index);
sp_const *= basis_functions.get(dec_j, symbols_with_id->id(trans_index));
}
for (unsigned int j = 0; j < n_memb; j++) {
const int site_index = indices[j];
const int dec_j = deco[j];
if (site_index == sp_cache.original_index) {
// This site is the reference index.
// Look up the BF values directly for the new and old symbols
sp_temp_new *= basis_functions->get(dec_j, sp_cache.new_symb_id);
sp_temp_ref *= basis_functions->get(dec_j, sp_cache.old_symb_id);
} else {
// Look up the symbol of the non-reference site, which hasn't changed.
const int trans_index = trans_matrix.lookup_in_row(tm_row, site_index);
sp_const *= basis_functions->get(dec_j, symbols_with_id->id(trans_index));
}
// The change in spin-product is the difference in SP between the site(s) which changed,
// multiplied by the constant SP from the other un-changed sites (since these contribute
// equally before and after the change).
sp_delta += (sp_temp_new - sp_temp_ref) * sp_const * dup_factors[i];
}
// The change in spin-product is the difference in SP between the site(s) which
// changed, multiplied by the constant SP from the other un-changed sites (since
// these contribute equally before and after the change).
sp_delta += (sp_temp_new - sp_temp_ref) * sp_const;
}

@@ -348,5 +398,3 @@ return sp_delta;

SpinProductCache CEUpdater::build_sp_cache(const SymbolChange &symb_change,
unsigned int old_symb_id,
unsigned int new_symb_id) const {
SpinProductCache CEUpdater::build_sp_cache(const SymbolChange &symb_change) const {
int ref_indx = symb_change.indx;

@@ -358,17 +406,11 @@ // Look up the untranslated index of the reference index.

// Cache the basis functions for each decoration number specifically
// for the old and new symbols for faster lookup.
auto bfs_new_old = this->basis_functions.prepare_bfs_new_old(new_symb_id, old_symb_id);
unsigned int old_symb_id = symbols_with_id->get_symbol_id(symb_change.old_symb);
unsigned int new_symb_id = symbols_with_id->get_symbol_id(symb_change.new_symb);
SpinProductCache sp_cache = {ref_indx, orig_indx, trans_matrix_row, bfs_new_old};
SpinProductCache sp_cache = {ref_indx, orig_indx, trans_matrix_row, new_symb_id, old_symb_id};
return sp_cache;
}
void CEUpdater::update_cf(SymbolChange &symb_change) {
if (symb_change.old_symb == symb_change.new_symb) {
return;
}
cf &CEUpdater::get_next_cf(SymbolChange &symb_change) {
SymbolChange *symb_change_track;
cf &current_cf = history->get_current();
cf *next_cf_ptr = nullptr;

@@ -382,3 +424,10 @@ history->get_next(&next_cf_ptr, &symb_change_track);

symb_change_track->track_indx = symb_change.track_indx;
return next_cf;
}
void CEUpdater::update_cf(SymbolChange &symb_change) {
if (symb_change.old_symb == symb_change.new_symb) {
return;
}
if (is_background_index[symb_change.indx]) {

@@ -388,13 +437,12 @@ throw runtime_error("Attempting to move a background atom!");

unsigned int old_symb_id = symbols_with_id->id(symb_change.indx);
cf &current_cf = history->get_current();
cf &next_cf = get_next_cf(symb_change);
symbols_with_id->set_symbol(symb_change.indx, symb_change.new_symb);
unsigned int new_symb_id = symbols_with_id->id(symb_change.indx);
/* The following prepares a range of properties which will be
useful for all of the clusters, so we don't compute more
than we have to inside the main ECI loop (and especially inside the
spin-product loop). */
than we have to inside the main ECI loop */
SpinProductCache sp_cache = this->build_sp_cache(symb_change);
SpinProductCache sp_cache = this->build_sp_cache(symb_change, old_symb_id, new_symb_id);
if (atoms != nullptr) {

@@ -404,2 +452,5 @@ set_symbol_in_atoms(atoms, symb_change.indx, symb_change.new_symb);

int symm = trans_symm_group[symb_change.indx];
const std::vector<ClusterCache> &clusters_cache = m_cluster_by_symm[symm];
// Loop over all ECIs

@@ -409,7 +460,8 @@ // As work load for different clusters are different due to a different

#ifdef HAS_OMP
#pragma omp parallel for num_threads(this->cf_update_num_threads) schedule(dynamic)
bool is_par = this->cf_update_num_threads > 1;
#pragma omp parallel for if (is_par) num_threads(this->cf_update_num_threads) schedule(dynamic)
#endif
for (unsigned int i = 0; i < eci.size(); i++) {
// The pre-parsed version of the cluster name.
const ParsedName parsed = this->m_parsed_names[i];
const ParsedName &parsed = this->m_parsed_names[i];

@@ -425,4 +477,5 @@ // 0-body

unsigned int dec = parsed.dec_num;
const std::pair<double, double> &bf_ref = sp_cache.bfs_new_old[dec];
next_cf[i] = current_cf[i] + (bf_ref.first - bf_ref.second) / num_non_bkg_sites;
double new_bf = basis_functions->get(dec, sp_cache.new_symb_id);
double old_bf = basis_functions->get(dec, sp_cache.old_symb_id);
next_cf[i] = current_cf[i] + (new_bf - old_bf) / num_non_bkg_sites;
continue;

@@ -432,19 +485,28 @@ }

// n-body
int symm = trans_symm_group[symb_change.indx];
const ClusterCache &cluster_cache = clusters_cache[i];
const Cluster *cluster_ptr = cluster_cache.cluster_ptr;
if (!clusters.is_in_symm_group(parsed.prefix, symm)) {
if (cluster_ptr == nullptr) {
// This cluster was not present in the symmetry group.
next_cf[i] = current_cf[i];
continue;
}
// The cluster is in the symmetry group, so calculate the spin product
// change for this cluster.
const Cluster &cluster = *cluster_ptr;
const equiv_deco_t &equiv_deco = *cluster_cache.equiv_deco_ptr;
const Cluster &cluster = clusters.get(parsed.prefix, symm);
unsigned int size = cluster.size;
const equiv_deco_t &equiv_deco = cluster.get_equiv_deco(parsed.dec_str);
double delta_sp = 0.0;
// Calculate the change (delta) in spin product
double delta_sp = spin_product_one_atom_delta(sp_cache, cluster, equiv_deco);
for (const deco_t &deco : equiv_deco) {
if (this->assume_no_self_interactions) {
// Faster version for large cells with no self interaction
delta_sp += spin_product_one_atom_delta_no_si(sp_cache, cluster, deco);
} else {
// Safe fall-back version
delta_sp += spin_product_one_atom_delta(sp_cache, cluster, deco);
}
}
delta_sp *= (static_cast<double>(size) / equiv_deco.size());
delta_sp /= normalisation_factor.at(parsed.prefix);
delta_sp *= cluster_cache.normalization;
next_cf[i] = current_cf[i] + delta_sp;

@@ -460,7 +522,2 @@ }

void CEUpdater::undo_changes(int num_steps) {
if (tracker != nullptr) {
undo_changes_tracker(num_steps);
return;
}
int buf_size = history->history_size();

@@ -483,18 +540,2 @@

void CEUpdater::undo_changes_tracker(int num_steps) {
SymbolChange *last_change;
SymbolChange *first_change;
tracker_t &trk = *tracker;
for (int i = 0; i < num_steps; i++) {
history->pop(&last_change);
history->pop(&first_change);
symbols_with_id->set_symbol(last_change->indx, last_change->old_symb);
symbols_with_id->set_symbol(first_change->indx, first_change->old_symb);
trk[first_change->old_symb][first_change->track_indx] = first_change->indx;
trk[last_change->old_symb][last_change->track_indx] = last_change->indx;
}
symbols_with_id->set_symbol(first_change->indx, first_change->old_symb);
symbols_with_id->set_symbol(last_change->indx, last_change->old_symb);
}
double CEUpdater::calculate(PyObject *system_changes) {

@@ -558,7 +599,2 @@ unsigned int size = list_size(system_changes);

update_cf(system_changes[1]);
if (tracker != nullptr) {
tracker_t &trk = *tracker;
trk[system_changes[0].old_symb][system_changes[0].track_indx] = system_changes[1].indx;
trk[system_changes[1].old_symb][system_changes[1].track_indx] = system_changes[0].indx;
}

@@ -598,3 +634,3 @@ return get_energy();

obj->normalisation_factor = normalisation_factor;
obj->basis_functions = BasisFunction(basis_functions);
obj->basis_functions = new BasisFunction(*basis_functions);
obj->status = status;

@@ -609,3 +645,2 @@ obj->trans_matrix = trans_matrix;

obj->atoms = nullptr; // Left as nullptr by intention
obj->tracker = tracker;
return obj;

@@ -622,2 +657,43 @@ }

void CEUpdater::cluster_by_symm_group() {
m_cluster_by_symm.clear();
// Find unique symmetry groups
std::set<int> unique;
insert_in_set(this->trans_symm_group, unique);
for (const int symm : unique) {
if (symm == -1) {
// Background symmetry group
continue;
}
// 1 ClusterCache per ECI value
std::vector<ClusterCache> cluster_cache;
cluster_cache.reserve(this->m_parsed_names.size());
for (const ParsedName &parsed : this->m_parsed_names) {
ClusterCache cache;
if (parsed.size == 0 || parsed.size == 1 ||
!clusters.is_in_symm_group(parsed.prefix, symm)) {
/* Either 0- or 1-body cluster, or cluster is not in this
symmetry group. */
cluster_cache.push_back(cache);
continue;
}
Cluster *cluster_ptr = clusters.get_ptr(parsed.prefix, symm);
equiv_deco_t *equiv_ptr = cluster_ptr->get_equiv_deco_ptr(parsed.dec_str);
// Calculate the normalization of the resulting cluster functions
double normalization = static_cast<double>(cluster_ptr->size);
normalization /= equiv_ptr->size();
normalization /= normalisation_factor.at(parsed.prefix);
// Populate the new cache object
cache.cluster_ptr = cluster_ptr;
cache.equiv_deco_ptr = equiv_ptr;
cache.normalization = normalization;
cluster_cache.push_back(cache);
}
m_cluster_by_symm.insert({symm, cluster_cache});
}
}
void CEUpdater::set_eci(PyObject *pyeci) {

@@ -642,2 +718,4 @@ PyObject *key;

}
// Update the cluster pointers to match the order with the ECI's.
cluster_by_symm_group();
}

@@ -717,3 +795,4 @@

throw runtime_error(
"One site appears to be present in more than one translation symmetry group!");
"One site appears to be present in more than one translation symmetry "
"group!");
}

@@ -751,3 +830,4 @@ trans_symm_group[indx] = i;

throw invalid_argument(
"The length of sequence of swap move exceeds the buffer size for the history tracker");
"The length of sequence of swap move exceeds the buffer size for the history "
"tracker");
}

@@ -932,3 +1012,3 @@

if (!is_background_index[atom_no] || !ignore_background_indices) {
new_value += basis_functions.get(dec, symbols_with_id->id(atom_no));
new_value += basis_functions->get(dec, symbols_with_id->id(atom_no));
}

@@ -935,0 +1015,0 @@ }

@@ -7,2 +7,13 @@ .. _releasenotes:

1.0.5
======
* Added :py:func:`~clease.geometry.supercell_which_contains_sphere` and
:py:func:`~clease.geometry.cell_wall_distances`.
* :class:`~clease.montecarlo.observers.sgc_observer.SGCObserver` now only tracks the singlet averages
if ``observe_singlets=True``. The default observer which is created by the
:class:`~clease.montecarlo.sgc_montecarlo.SGCMonteCarlo` can be controlled by the
:class:`~clease.montecarlo.sgc_montecarlo.SGCMonteCarlo` ``observe_singlets`` keyword.
* ``reset_eci`` in :meth:`~clease.montecarlo.sgc_montecarlo.SGCMonteCarlo.get_thermodynamic_quantities`
now defaults to ``False``. This keyword is likely to be removed in the future, see #321.
1.0.4

@@ -9,0 +20,0 @@ ======

Metadata-Version: 2.1
Name: clease
Version: 1.0.4
Version: 1.0.5
Summary: CLuster Expansion in Atomistic Simulation Environment

@@ -5,0 +5,0 @@ Home-page: https://gitlab.com/computationalmaterials/clease/

@@ -82,3 +82,3 @@ import os

# Uncomment for profiling of the C++ code
# extra_comp_args.append("-pg")
# extra_comp_args.append("-g")

@@ -117,3 +117,3 @@ # Replaces the deprecated "distutils.sysconfig.get_python_inc()"

"pytest-benchmark[histogram]>=3.4.1",
"tox>=3.24.0",
"tox>=4",
),

@@ -120,0 +120,0 @@ # Extra nice-to-haves when developing CLEASE

Sorry, the diff of this file is too big to display