Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

clease

Package Overview
Dependencies
Maintainers
2
Versions
38
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

clease - npm Package Compare versions

Comparing version
1.0.6
to
1.1.0
+98
tests/test_atoms_manager.py
import pytest
from ase.build import bulk
from clease.settings.atoms_manager import AtomsManager
def test_binary():
atoms = bulk("Au") * (3, 3, 3)
# Tag even indices with 0 and odd indices with 1
for atom in atoms:
atom.tag = atom.index % 2
manager = AtomsManager(atoms)
ind_by_tag = manager.index_by_tag()
assert all(map(lambda x: x % 2 == 0, ind_by_tag[0]))
assert all(map(lambda x: x % 2 == 1, ind_by_tag[1]))
def test_group_by_symbol_single():
atoms = bulk("Au") * (3, 3, 3)
for atom in atoms:
if atom.index % 3 == 1:
atom.symbol = "Cu"
elif atom.index % 3 == 2:
atom.symbol = "X"
manager = AtomsManager(atoms)
ind_by_symbol = manager.index_by_symbol(["Au", "Cu", "X"])
for i, items in enumerate(ind_by_symbol):
assert all(map(lambda x: x % 3 == i, items))
def test_group_by_symbol_grouped():
atoms = bulk("Au") * (3, 4, 5)
for atom in atoms:
if atom.index % 4 == 1:
atom.symbol = "Cu"
elif atom.index % 4 == 2:
atom.symbol = "X"
elif atom.index % 4 == 3:
atom.symbol = "Ag"
manager = AtomsManager(atoms)
ind_by_symbol = manager.index_by_symbol(["Au", ["Cu", "X"], "Ag"])
assert all(map(lambda x: x % 4 == 0, ind_by_symbol[0]))
assert all(map(lambda x: x % 4 == 1 or x % 4 == 2, ind_by_symbol[1]))
assert all(map(lambda x: x % 4 == 3, ind_by_symbol[2]))
assert sorted(manager.unique_elements()) == ["Ag", "Au", "Cu", "X"]
def test_background_indices():
atoms = bulk("NaCl", crystalstructure="rocksalt", a=4.0)
atoms = atoms * (5, 5, 5)
# Chlorine sites are background indices
basis_elements = [["Na", "X"], ["Cl"]]
manager = AtomsManager(atoms)
bkg_indices = manager.single_element_sites(basis_elements)
cl_sites = [atom.index for atom in atoms if atom.symbol == "Cl"]
assert sorted(bkg_indices) == sorted(cl_sites)
# Extract unique elements
unique_elem = manager.unique_elements()
assert sorted(unique_elem) == ["Cl", "Na"]
# Try unique elements without background
unique_elem = manager.unique_elements(ignore=["Cl"])
assert sorted(unique_elem) == ["Na"]
@pytest.mark.parametrize(
"atoms1,atoms2",
[
(bulk("Au") * (3, 3, 3), bulk("Au") * (4, 4, 4)),
(bulk("Au") * (3, 3, 3), bulk("NaCl", crystalstructure="rocksalt", a=4.0)),
(
bulk("NaCl", crystalstructure="rocksalt", a=4.0),
bulk("NaCl", crystalstructure="rocksalt", a=4.0) * (2, 2, 2),
),
],
)
def test_equality(atoms1, atoms2):
m1 = AtomsManager(atoms1)
m2 = AtomsManager(atoms2)
assert m1 != m2
assert m1 != atoms1
assert m1 == AtomsManager(atoms1)
assert m2 == AtomsManager(atoms2)
assert m2 != "some_string"
m2.atoms = atoms1
assert m1 == m2
import pytest
from clease.basis_function import Polynomial, Trigonometric, BinaryLinear
all_bfs = (Polynomial, Trigonometric, BinaryLinear)
@pytest.fixture(params=all_bfs)
def bf_fun(request):
"""Fixture to run a test on all the avaialble basis functions"""
return request.param
@pytest.mark.parametrize(
"test",
[
{
"bf": Polynomial(["Au", "Cu", "X"]),
"expect": {"name": "polynomial", "unique_elements": ["Au", "Cu", "X"]},
},
{
"bf": Trigonometric(["Au", "Cu", "X"]),
"expect": {"name": "trigonometric", "unique_elements": ["Au", "Cu", "X"]},
},
{
"bf": BinaryLinear(["Au", "Cu", "X"]),
"expect": {
"name": "binary_linear",
"unique_elements": ["Au", "Cu", "X"],
"redundant_element": "Au",
},
},
{
"bf": BinaryLinear(["Au", "Cu", "X"], redundant_element="X"),
"expect": {
"name": "binary_linear",
"unique_elements": ["Au", "Cu", "X"],
"redundant_element": "X",
},
},
],
)
def test_todict(test):
dct_rep = test["bf"].todict()
assert dct_rep == test["expect"]
@pytest.mark.parametrize(
"test",
[
{
"bf": BinaryLinear(["Au", "Cu", "X"], redundant_element="X"),
"full_name": ("c4_d0012_1_1000", "c3_d0001_4_111"),
"ans": ("c4_d0012_1_CuAuAuAu", "c3_d0001_4_CuCuCu"),
},
{
"bf": BinaryLinear(["Au", "Cu", "Zn", "Ag"], redundant_element="Cu"),
"full_name": ("c4_d0001_10_1210", "c3_d0991_10_010"),
"ans": ("c4_d0001_10_AuZnAuAg", "c3_d0991_10_AgAuAg"),
},
{
"bf": Polynomial(["Au", "Cu", "X"]),
"full_name": ("c2_d0001_99_01", "c4_d0991_10_0122"),
"ans": ("c2_d0001_99_01", "c4_d0991_10_0122"),
},
],
)
def test_customize_full_cluster_name(test):
bf = test["bf"]
for i in range(len(test["ans"])):
name = bf.customize_full_cluster_name(test["full_name"][i])
assert name == test["ans"][i]
def test_num_unique_elements(bf_fun):
with pytest.raises(ValueError):
# Two of same symbols
bf_fun(["Au", "Au"])
with pytest.raises(ValueError):
# Just 1 symbols
bf_fun(["Au"])
assert bf_fun(["Au", "Ag"]).num_unique_elements == 2
assert bf_fun(["Au", "Au", "Ag"]).num_unique_elements == 2
# Test setting directly
bf = bf_fun(["Au", "Ag"])
assert bf.num_unique_elements == 2
bf.unique_elements = ["Au", "Au", "Ag"]
assert bf.num_unique_elements == 2
bf.unique_elements = ["Au", "Ag", "Cu"]
assert bf.num_unique_elements == 3
@pytest.mark.parametrize("bf_fun2", all_bfs)
def test_equality(bf_fun, bf_fun2):
ele = ["Au", "Ag"]
bf1 = bf_fun(ele)
bf2 = bf_fun2(ele)
# We have same atoms
if isinstance(bf1, type(bf2)):
# Same type of BF
assert bf1 == bf2
else:
# Different type of BF
assert bf1 != bf2
# Different atoms, always unequal
bf2.unique_elements = ["Au", "Zn"]
assert bf1 != bf2
# Test some things it should never be equal to
assert bf1 != "some_string"
assert bf1 != []
assert bf1 != None
assert bf1 not in [True, False]
@pytest.mark.parametrize("must_implement", ["get_spin_dict", "get_basis_functions"])
def test_get_must_implements(bf_fun, must_implement):
"""Check that subclasses implemented the necessary items."""
bf = bf_fun(["Au", "Ag", "Cu", "C"])
getattr(bf, must_implement)()
def test_save_load_roundtrip(bf_fun, make_tempfile, compare_dict):
file = make_tempfile("bf.json")
bf = bf_fun(["Au", "Ag", "Cu"])
with open(file, "w") as fd:
bf.save(fd)
with open(file) as fd:
bf_loaded = bf_fun.load(fd)
assert type(bf) is type(bf_loaded)
compare_dict(bf.todict(), bf_loaded.todict())
import pytest
from clease import ConvexHull
from ase.calculators.singlepoint import SinglePointCalculator
from ase.db import connect
from ase.build import bulk
import numpy as np
import os
@pytest.fixture
def rng():
return np.random.default_rng(28)
def test_binary(db_name):
# from matplotlib import pyplot as plt
# plt.switch_backend("agg")
db = connect(db_name)
# Create energies that we know are on the
# convex hull
cnv_hull_enegies = [-x * (8 - x) - x + 0.2 for x in range(9)]
for n_cu in range(9):
atoms = bulk("Au")
atoms = atoms * (2, 2, 2)
for i in range(n_cu):
atoms[i].symbol = "Cu"
calc = SinglePointCalculator(atoms, energy=cnv_hull_enegies[n_cu])
atoms.calc = calc
db.write(atoms, converged=True)
# Create a new structure with exactly the same
# composition, but higher energy
calc = SinglePointCalculator(atoms, energy=cnv_hull_enegies[n_cu] + 0.5)
atoms.calc = calc
db.write(atoms, converged=True)
cnv_hull = ConvexHull(db_name, conc_ranges={"Au": (0, 1)})
energies = []
comp = []
for row in db.select():
energies.append(row.energy / row.natoms)
count = row.count_atoms()
for k in ["Au", "Cu"]:
if k in count.keys():
count[k] /= row.natoms
else:
count[k] = 0.0
comp.append(count)
os.remove(db_name)
# Calculate distance to the convex hull
for c, tot_en in zip(comp, energies):
cnv_hull.cosine_similarity_convex_hull(c, tot_en)
# fig = cnv_hull.plot()
# assert len(fig.get_axes()) == 1
def test_syst_with_one_fixed_comp(db_name):
db = connect(db_name)
# Create energies that we know are on the
# convex hull
for n_cu in range(6):
atoms = bulk("Au")
atoms = atoms * (2, 2, 2)
atoms[0].symbol = "Zn"
atoms[1].symbol = "Zn"
for i in range(n_cu):
atoms[i + 2].symbol = "Cu"
calc = SinglePointCalculator(atoms, energy=np.random.rand())
atoms.calc = calc
db.write(atoms, converged=True)
cnv_hull = ConvexHull(db_name, conc_ranges={"Au": (0, 1)})
os.remove(db_name)
assert cnv_hull._unique_elem == ["Au", "Cu", "Zn"]
# fig = cnv_hull.plot()
# assert len(fig.get_axes()) == 1
def test_system_multiple_endpoints(db_name, rng):
db = connect(db_name)
atoms_orig = bulk("Au") * (1, 1, 2)
# Create some atoms objects at each extremum,
# with vastly different energies
energies = -350 * rng.random(25)
mult = {"Au": 1, "Zn": 2}
for en in energies:
for sym, m in mult.items():
atoms = atoms_orig.copy()
atoms.symbols = sym
calc = SinglePointCalculator(atoms, energy=en * m)
atoms.calc = calc
db.write(atoms, converged=True)
cnv_hull = ConvexHull(db_name, conc_ranges={"Au": (0, 1)})
end_points = cnv_hull.end_points
# Ensure the end-points correspond to the energy of the minimum energy structure
for sym, m in mult.items():
assert end_points[sym]["energy"] == pytest.approx(energies.min() * m / len(atoms_orig))
"""Unit tests for the corr function class."""
import pytest
import numpy as np
from ase.calculators.singlepoint import SinglePointCalculator
from clease.settings import CEBulk, Concentration
from clease.corr_func import CorrFunction, ClusterNotTrackedError
from clease import NewStructures
from clease.tools import wrap_and_sort_by_position
@pytest.fixture
def bc_settings(db_name):
basis_elements = [["Au", "Cu", "Si"]]
concentration = Concentration(basis_elements=basis_elements)
return CEBulk(
crystalstructure="fcc",
a=4.05,
size=[4, 4, 4],
concentration=concentration,
db_name=db_name,
max_cluster_dia=[5.73, 5.73],
)
def get_mic_dists(atoms, cluster):
"""Get the MIC dist."""
dists = []
for indx in cluster:
dist = atoms.get_distances(indx, cluster, mic=True)
dists.append(dist)
return dists
def test_trans_matrix(bc_settings):
"""Check that the MIC distance between atoms are correct."""
atoms = bc_settings.atoms
tm = bc_settings.trans_matrix
ref_dist = atoms.get_distance(0, 1, mic=True)
for indx in range(len(atoms)):
dist = atoms.get_distance(indx, tm[indx][1], mic=True)
assert dist == pytest.approx(ref_dist)
def test_supercell_consistency(db_name):
basis_elements = [["Li", "X"], ["O", "X"]]
concentration = Concentration(basis_elements=basis_elements)
settings = CEBulk(
crystalstructure="rocksalt",
a=4.05,
size=[1, 1, 1],
concentration=concentration,
db_name=db_name,
max_cluster_dia=[7.0, 4.0],
)
atoms = settings.atoms.copy()
cf = CorrFunction(settings)
cf_dict = cf.get_cf(atoms)
atoms = wrap_and_sort_by_position(atoms * (4, 3, 2))
cf_dict_sc = cf.get_cf(atoms)
assert pytest.approx(cf_dict) == cf_dict_sc
def test_error_message_for_non_existent_cluster(db_name):
basis_elements = [["Li", "X"], ["O", "X"]]
concentration = Concentration(basis_elements=basis_elements)
settings = CEBulk(
crystalstructure="rocksalt",
a=4.05,
size=[1, 1, 1],
concentration=concentration,
db_name=db_name,
max_cluster_dia=[7.0, 4.0],
)
corr = CorrFunction(settings)
atoms = settings.atoms
# No error should occure
corr.get_cf_by_names(atoms, ["c3_d0000_0_000"])
# Try a quadruplet: Have to raise error
with pytest.raises(ClusterNotTrackedError):
corr.get_cf_by_names(atoms, ["c4_d0001_0_0000"])
def test_reconfigure(bc_settings):
newStruct = NewStructures(bc_settings)
for i in range(10):
atoms = bc_settings.atoms.copy()
atoms.symbols = np.random.choice(["Al", "Mg", "Si"], size=len(atoms))
final = atoms.copy()
calc = SinglePointCalculator(final, energy=-0.2)
final.calc = calc
newStruct.insert_structure(init_struct=atoms, final_struct=final)
# Collect final_struct_ids
db = bc_settings.connect()
query = [("struct_type", "=", "initial")]
final_str_ids = [row.final_struct_id for row in db.select(query)]
cf = CorrFunction(bc_settings)
cf.reconfigure_db_entries()
# Confirm that the final_str_ids stays the same
final_str_ids_rec = [row.final_struct_id for row in db.select(query)]
assert final_str_ids == final_str_ids_rec
ids = cf.check_consistency_of_cf_table_entries()
assert len(ids) == 0
@pytest.mark.parametrize("value", [None, [1], 0])
def test_bad_settings(value):
"""Test passing something which isn't a ClusterExpansionSettings object."""
with pytest.raises(TypeError):
CorrFunction(value)
import pytest
import numpy as np
from clease.data_normalizer import DataNormalizer
def test_normalizer():
X = np.array([[1.0, -4.0], [-1.0, 2.0], [1.0, 2.0]])
coeff = [1.0, -2.0]
y = X.dot(coeff)
meanY = np.mean(y)
meanX = [1.0 / 3, 0.0]
stdX = [2.0 / np.sqrt(3.0), np.sqrt(12.0)]
normalizer = DataNormalizer()
X_norm, y_norm = normalizer.normalize(X, y)
assert np.allclose(normalizer.meanX, meanX)
assert np.allclose(normalizer.stdX, stdX)
assert normalizer.meanY == pytest.approx(meanY)
coeff_orig, _, _, _ = np.linalg.lstsq(X, y, rcond=-1)
coeff_norm, _, _, _ = np.linalg.lstsq(X_norm, y_norm, rcond=-1)
converted = normalizer.convert(coeff_norm)
assert np.allclose(coeff_orig, converted, rtol=0.2)
assert normalizer.bias(coeff_norm) == pytest.approx(0.0)
import subprocess
import pytest
from ase.db import connect
from ase.build import bulk
import clease
from clease import db_util, NewStructures
from clease.settings import CEBulk, Concentration
from clease.db_util import get_all_cf_names, get_all_cf, get_cf_tables
def test_get_all_cf_names(db_name):
cf = {"c0": 1.0, "c1_1": 0.5, "c2_d0000_0_00": 1.0}
db = connect(db_name)
for i in range(10):
db.write(bulk("Cu"), external_tables={"polynomial_cf": cf})
names = get_all_cf_names(db_name, "polynomial_cf")
assert sorted(names) == sorted(list(cf.keys()))
def test_get_cf(db_name):
cf = {"c0": 1.0, "c1_1": 0.5, "c2_d0000_0_00": 1.0}
db = connect(db_name)
for i in range(10):
db.write(bulk("Cu"), external_tables={"polynomial_cf": cf})
cf_from_db = get_all_cf(db_name, "polynomial_cf", 2)
assert cf_from_db == cf
def test_get_cf_tables(db_name):
cf = {"c0": 1.0, "c1_1": 0.5, "c2_d0000_0_00": 1.0}
cf2 = {"c0": 0.1, "c1_1": 0.5, "c2_d0000_0_00": -0.2}
db = connect(db_name)
for _ in range(10):
db.write(bulk("Cu"), external_tables={"polynomial_cf": cf, "trigonometric_cf": cf2})
cf_tab = get_cf_tables(db_name)
expect_tab = ["polynomial_cf", "trigonometric_cf"]
assert sorted(cf_tab) == expect_tab
def test_cli_runs(db_name):
cf = {"c0": 1.0, "c1_1": 0.5, "c2_d0000_0_00": 1.0}
cf2 = {"c0": 0.1, "c1_1": 0.5, "c2_d0000_0_00": -0.2}
db = connect(db_name)
for _ in range(10):
db.write(bulk("Cu"), external_tables={"polynomial_cf": cf, "trigonometric_cf": cf2})
cmds = [
["clease", "db", "--help"],
["clease", "db", "tab", db_name],
["clease", "db", "names", db_name],
["clease", "db", "cf", db_name, "1"],
]
for cmd in cmds:
return_code = subprocess.call(cmd)
assert return_code == 0
@pytest.mark.parametrize(
"meta",
[
{"mytable": {"test_A": 1, "test_B": 2}},
{
"table1": {"key1": "foo", "key2_": "bar"},
"table2": {"key3": "fiz", "key4": "buz"},
},
# Test an empty meta input
dict(),
],
)
def test_encode_decode_meta(meta):
assert db_util.decode_meta(db_util.encode_meta(meta)) == meta
@pytest.mark.parametrize(
"meta",
[
# Table name uses the delimiter
{"mytable__": {"foo": "bar"}},
# Table key uses the delimiter
{"mytable": {"foo__": "bar"}},
],
)
def test_encode_bad_key(meta):
with pytest.raises(db_util.InvalidMetaKey):
db_util.encode_meta(meta)
@pytest.mark.parametrize(
"table_name, key, expected",
[
("poly_cf", "time", "poly_cf__time"),
("mytable", "whatever", "mytable__whatever"),
],
)
def test_encode_meta_key(table_name, key, expected):
assert db_util.encode_meta_key(table_name, key) == expected
@pytest.mark.parametrize(
"key",
[
"poly_cf",
"time",
db_util.MetaTableKeys.CLEASE_CONFIGURE_VERSION,
],
)
def test_decode_bad_key(key):
with pytest.raises(db_util.InvalidMetaKey):
db_util.decode_meta_key(key)
@pytest.mark.parametrize(
"key, expected",
[
("poly_cf__time", ("poly_cf", "time")),
("mytable__anything", ("mytable", "anything")),
],
)
def test_decode_meta_key(key, expected):
assert db_util.decode_meta_key(key) == expected
@pytest.fixture
def fast_settings(db_name):
conc = Concentration([["Au", "Cu"]])
defaults = dict(crystalstructure="fcc", a=4.05, db_name=db_name, max_cluster_dia=[3.0])
settings = CEBulk(conc, **defaults)
newstruct = NewStructures(settings, struct_per_gen=3)
newstruct.generate_initial_pool()
return settings
def test_check_version_ok(fast_settings):
bf_name = fast_settings.basis_func_type.name
tab_name = f"{bf_name}_cf"
db_name = fast_settings.db_name
with connect(db_name) as con:
assert db_util.require_reconfigure_table(con, tab_name, 2, 3, 4) is False
@pytest.mark.parametrize("old_version", ["0.9", "0.10.0", "0.10"])
def test_check_outdated_version(old_version, fast_settings):
bf_name = fast_settings.basis_func_type.name
tab_name = f"{bf_name}_cf"
db_name = fast_settings.db_name
con = connect(db_name)
# Artificially change the metadata table for a single row
ext_tab = {
db_util.MetaTableKeys.CLEASE_META_TABLE: {
db_util.encode_meta_key(
tab_name, db_util.MetaTableKeys.CLEASE_CONFIGURE_VERSION
): old_version
}
}
con.update(2, external_tables=ext_tab)
assert db_util.require_reconfigure_table(con, tab_name, 2, 3, 4)
def test_missing_meta_ext_table(fast_settings):
bf_name = fast_settings.basis_func_type.name
tab_name = f"{bf_name}_cf"
db_name = fast_settings.db_name
con = connect(db_name)
con.delete_external_table(db_util.MetaTableKeys.CLEASE_META_TABLE)
assert db_util.require_reconfigure_table(con, tab_name, 2, 3, 4)
def test_new_row_with_single_table(db_name):
con = connect(db_name)
tab_name = "poly_cf"
atoms = bulk("Au", crystalstructure="fcc", a=4.0)
cf = {"c0": 0.0}
uid = db_util.new_row_with_single_table(con, atoms, tab_name, cf)
row = con.get(id=uid)
assert db_util.MetaTableKeys.CLEASE_META_TABLE in row
assert tab_name in row
def test_new_row_with_many_tables(db_name):
con = connect(db_name)
tab1 = "poly_cf"
tab2 = "some_other_cf"
atoms = bulk("Au", crystalstructure="fcc", a=4.0)
cf1 = {"c0": 0.0}
cf2 = {"c0": 1.0}
uid = db_util.new_row_with_many_tables(con, atoms, (tab1, tab2), (cf1, cf2))
row = con.get(id=uid)
assert db_util.MetaTableKeys.CLEASE_META_TABLE in row
assert tab1 in row
assert tab2 in row
def test_update_table(fast_settings):
con = connect(fast_settings.db_name)
row_id = 2
table_name = "dummy_cf"
cf = {"c0": 0.0, "c1": -1.5}
db_util.update_table(con, row_id, table_name, cf)
row = con.get(id=row_id)
assert table_name in row
meta = db_util.decode_meta(row[db_util.MetaTableKeys.CLEASE_META_TABLE])
assert table_name in meta
cf_loaded = row[table_name]
assert cf == pytest.approx(cf_loaded)
def test_meta(fast_settings):
con = connect(fast_settings.db_name)
row = con.get(id=2)
assert db_util.MetaTableKeys.CLEASE_META_TABLE in row
meta = db_util.decode_meta(row[db_util.MetaTableKeys.CLEASE_META_TABLE])
for tab_name, table in meta.items():
assert tab_name.endswith("_cf")
assert db_util.MetaTableKeys.TIME in table
assert db_util.MetaTableKeys.CLEASE_CONFIGURE_VERSION in table
assert table[db_util.MetaTableKeys.CLEASE_CONFIGURE_VERSION] == str(clease.__version__)
import os
import pytest
from ase.build import bulk
from clease.settings import CEBulk, Concentration
from clease import NewStructures
from clease.tools import wrap_and_sort_by_position
def test_formula_unit(db_name):
basis_elements = [["Li", "Ru", "X"], ["O", "X"]]
A_eq = [[0, 3, 0, 0, 0]]
b_eq = [1]
A_lb = [[0, 0, 0, 3, 0]]
b_lb = [2]
conc = Concentration(basis_elements=basis_elements, A_eq=A_eq, b_eq=b_eq, A_lb=A_lb, b_lb=b_lb)
settings = CEBulk(
crystalstructure="rocksalt",
a=4.0,
size=[2, 2, 3],
concentration=conc,
db_name=db_name,
max_cluster_dia=[4.0],
)
newstruct = NewStructures(settings=settings)
test = bulk(name="LiO", crystalstructure="rocksalt", a=4.0) * (2, 2, 3)
atoms = wrap_and_sort_by_position(test.copy())
atoms[0].symbol = "Ru"
atoms[1].symbol = "X"
atoms[4].symbol = "X"
fu = newstruct._get_formula_unit(atoms)
assert fu == "Li10Ru1X1_O11X1"
atoms = wrap_and_sort_by_position(test.copy())
fu = newstruct._get_formula_unit(atoms)
assert fu == "Li1_O1"
atoms = wrap_and_sort_by_position(test.copy())
replace_cat = [0, 1, 2, 5, 6, 14]
replace_an = [4, 7, 9, 13, 15, 20]
for cat in replace_cat:
atoms[cat].symbol = "X"
for an in replace_an:
atoms[an].symbol = "X"
fu = newstruct._get_formula_unit(atoms)
assert fu == "Li1X1_O1X1"
import pytest
import numpy as np
from ase.build import bulk
from clease.geometry import max_sphere_dia_in_cell, supercell_which_contains_sphere
@pytest.mark.parametrize(
"cell, expect",
[
# Ensure both "int" and "float" types are OK
(np.diag([1, 1, 1]).astype(int), 1.0),
(np.diag([1, 1, 1]).astype(float), 1.0),
(np.array([[0.0, 1.9, 1.9], [1.9, 0.0, 1.9], [1.9, 1.9, 0.0]]), 2.1939310229205775),
],
)
def test_sphere_in_cell(cell, expect):
dia = max_sphere_dia_in_cell(cell)
assert pytest.approx(dia) == expect
def test_sphere_in_cube():
"""Test that a cubic cell where we rotate the y-vector
around the z-axis recovers abs(cos(theta)).
"""
ang = np.linspace(0.0, 2 * np.pi, 200, endpoint=True)
def rot_z(t):
"""Helper function to make the rotation matrix around the z-axis
at angle theta."""
R = np.array([[np.cos(t), -np.sin(t), 0], [np.sin(t), np.cos(t), 0], [0, 0, 1]])
return R
def make_cell(t):
x = np.array([1, 0, 0])
R = rot_z(t)
y = R.dot([0, 1, 0])
z = np.array([0, 0, 1])
return np.vstack([x, y, z])
y = [max_sphere_dia_in_cell(make_cell(a)) for a in ang]
assert pytest.approx(y) == np.abs(np.cos(ang))
@pytest.mark.parametrize("dia", [15, 30, 35, 40, 41.5])
def test_sphere_in_sc(dia):
# Use a cubic cell, easier to reason about how much expansion a sphere causes.
atoms = bulk("NaCl", crystalstructure="rocksalt", a=3.3, cubic=True)
cell_vectors = np.linalg.norm(atoms.get_cell(), axis=1)
assert (cell_vectors < dia).all()
sc = supercell_which_contains_sphere(atoms, dia)
cell_vectors = np.linalg.norm(sc.get_cell(), axis=1)
assert (cell_vectors >= dia).all()
# The cell vector shouldn't be overly large.
assert (cell_vectors < 1.3 * dia).all()
@pytest.mark.parametrize(
"atoms, expect",
[
(bulk("NaCl", crystalstructure="rocksalt", a=3.8, cubic=True), (11, 11, 11)),
(bulk("NaCl", crystalstructure="rocksalt", a=3.8, cubic=False), (19, 19, 19)),
(bulk("Au", crystalstructure="fcc", a=3.6, cubic=False), (20, 20, 20)),
],
)
def test_sphere_repeats(atoms, expect):
dia = 40
sc = supercell_which_contains_sphere(atoms, dia)
assert "repeats" in sc.info
assert (sc.info["repeats"] == expect).all()
import unittest
import numpy as np
from clease.gramSchmidthMonomials import GramSchmidtMonimial
def bf_list_equal(d1, d2):
for i1, i2 in zip(d1, d2):
for k in i1.keys():
if abs(i1[k] - i2[k]) > 1e-10:
return False
return True
class TestGramSchmidtMonimial(unittest.TestCase):
def test_spin_dicts(self):
tests = [
{"symbols": ["Au", "Cu"], "expect": [{"Au": 1.0, "Cu": -1.0}]},
{
"symbols": ["Au", "Cu", "X"],
"expect": [
{"Au": np.sqrt(3.0 / 2.0), "Cu": -np.sqrt(3.0 / 2.0), "X": 0.0},
{
"Au": 1.0 / np.sqrt(2),
"Cu": 1.0 / np.sqrt(2.0),
"X": -np.sqrt(2.0),
},
],
},
]
for test in tests:
gram_schmidt = GramSchmidtMonimial(len(test["symbols"]))
gram_schmidt.build()
sp_dict = gram_schmidt.basis_functions(test["symbols"])
msg = "Expected\n{}\nGot\n{}".format(test["expect"], sp_dict)
self.assertTrue(bf_list_equal(test["expect"], sp_dict), msg=msg)
def test_orthonormality(self):
for num_symbs in range(2, 10):
gram_schmidt = GramSchmidtMonimial(num_symbs)
gram_schmidt.build()
bf_array = np.array(gram_schmidt.bf_values)
identity = np.eye(num_symbs)
dotProd = bf_array.T.dot(bf_array) / num_symbs
self.assertTrue(np.allclose(dotProd, identity, atol=1e-6))
if __name__ == "__main__":
unittest.main()
import unittest
from ase.build import bulk
from clease.mc_trajectory_extractor import MCTrajectoryExtractor
class TestMCTrajectoryExtractor(unittest.TestCase):
def test_is_related_by_swap(self):
atAl = bulk("Al") * (2, 2, 2)
atAl[0].symbol = "X"
atAl2 = bulk("Al") * (2, 2, 2)
atAl2[4].symbol = "X"
atAl3 = bulk("Al") * (2, 2, 2)
atAl3[0].symbol = "X"
atAl3[4].symbol = "X"
atAl4 = bulk("Al") * (2, 2, 2)
atAl4[0].symbol = "X"
atAl4[1].symbol = "Cu"
tests = [
{"atoms1": bulk("Al") * (2, 2, 2), "atoms2": bulk("Al"), "expect": False},
{"atoms1": atAl, "atoms2": atAl2, "expect": True},
{"atoms1": atAl, "atoms2": atAl3, "expect": False},
{"atoms1": atAl, "atoms2": atAl4, "expect": False},
]
extractor = MCTrajectoryExtractor()
for t in tests:
res = extractor.is_related_by_swap(t["atoms1"], t["atoms2"])
self.assertEqual(res, t["expect"])
def test_find_swaps(self):
atoms = bulk("Al") * (4, 4, 4)
atoms[0].symbol = "X"
atoms1 = atoms.copy()
atoms1[0].symbol = "Al"
atoms1[10].symbol = "X"
atoms2 = atoms.copy()
atoms2[0].symbol = "Al"
atoms2[3].symbol = "X"
# Structures with two X
atoms3 = atoms.copy()
atoms3[10].symbol = "X"
atoms4 = atoms.copy()
atoms4[32].symbol = "X"
all_atoms = [atoms, atoms1, atoms2, atoms3, atoms4]
expect = [(0, 1), (0, 2), (1, 2), (3, 4)]
extractor = MCTrajectoryExtractor()
swaps = extractor.find_swaps(all_atoms)
self.assertEqual(swaps, expect)
if __name__ == "__main__":
unittest.main()
"""Test case for the multiprocessing logger."""
import pytest
import multiprocessing as mp
from clease.mp_logger import MultiprocessHandler
import logging as lg
import os
if __name__ != "__main__":
msg = "Pickle error when file is not top level module"
pytest.skip(msg, allow_module_level=True)
log_file = "mp_logger_test.txt"
logger = lg.getLogger(__name__)
logger.setLevel(lg.INFO)
handler = MultiprocessHandler(log_file)
logger.addHandler(handler)
def function_logging(msg):
"""Log message via logger."""
logger.info(msg)
args = ["Indx: {}".format(i) for i in range(32)]
workers = mp.Pool(2)
workers.map(function_logging, args)
# Try the read the logging file back and ensure that
# it has the expected format
record_found = [False for _ in range(len(args))]
with open(log_file, "r") as infile:
lines = infile.readlines()
for line in lines:
# This will raise a ValueError if it does not find the line
# in args. In that case something went wrong with the logger
indx = args.index(line[:-1]) # Skip the trailing '\n'
os.remove(log_file)
import sys
import pytest
import numpy as np
import clease.plot_post_process as pp
from clease import Evaluate
try:
import _tkinter
except ImportError:
pytest.skip("Skipping: No tkinter", allow_module_level=True)
def test_plot_fit(bc_setting):
predict = {
"title": "plot_FIT_TEST",
"xlabel": r"E$_{DFT}$ (eV/atom)",
"ylabel": r"E$_{CE}$ (eV/atom)",
}
plot_args = {"title": predict["title"], "ylabel": predict["ylabel"]}
evaluator = Evaluate(bc_setting, fitting_scheme="l2", alpha=1e-6)
evaluator.fit()
# x-axis is the DFT energy
predict["x_axis_value"] = evaluator.e_dft
# y-axis is the CE energy
predict["y_axis_value"] = evaluator.get_energy_predict()
fig = pp.plot_fit(evaluator, plot_args)
assert "loocv" in fig.get_axes()[0].texts[0].get_text()
assert predict["title"] == fig.get_axes()[0].get_title()
assert predict["xlabel"] == fig.get_axes()[0].get_xlabel()
assert predict["ylabel"] == fig.get_axes()[0].get_ylabel()
assert predict["x_axis_value"] == pytest.approx(fig.gca().lines[1].get_xdata())
assert predict["y_axis_value"] == pytest.approx(fig.gca().lines[1].get_ydata())
def test_plot_fit_residual(bc_setting):
predict = {"title": "plot_FIT_TEST", "xlabel": "#OCC", "ylabel": "DFT_FIT"}
plot_args = {"title": predict["title"], "ylabel": predict["ylabel"]}
evaluator = Evaluate(bc_setting, fitting_scheme="l2", alpha=1e-6)
evaluator.fit()
delta_e = evaluator.get_energy_predict() - evaluator.e_dft
predict["delta_e"] = delta_e * 1000 # convert to meV/atom
fig = pp.plot_fit_residual(evaluator, plot_args)
assert predict["title"] == fig.get_axes()[0].get_title()
assert predict["xlabel"] == fig.get_axes()[1].get_xlabel()
assert predict["ylabel"] == fig.get_axes()[0].get_ylabel()
assert np.allclose(predict["delta_e"], fig.get_children()[1].get_lines()[1].get_ydata())
@pytest.mark.parametrize("ignore_sizes", [(), [0], [0, 1], [3, 4]])
def test_plot_eci(bc_setting, ignore_sizes):
predict = {
"title": "plot_FIT_TEST",
"xlabel": "Cluster diameter ($n^{th}$ nearest neighbor)",
"ylabel": "DFT_FIT",
}
plot_args = {"title": predict["title"], "ylabel": predict["ylabel"]}
evaluator = Evaluate(bc_setting, fitting_scheme="l2", alpha=1e-6)
evaluator.fit()
# x, y-axis values calculated by get_eci_by_size (or largest we keep)
sizes = evaluator.get_eci_by_size()
test_list = [v for s, v in sizes.items() if s not in ignore_sizes]
# Verify we get 5 eci's (0, 1, 2, 3, 4) except the ones we should ignore.
assert len(test_list) == (5 - len(ignore_sizes)), len(test_list)
# x, y-axis values of axhline
predict["axhline_xy"] = [[0.0, 0.0], [1.0, 0.0]]
fig = pp.plot_eci(evaluator, plot_args, ignore_sizes=ignore_sizes)
assert predict["title"] == fig.get_axes()[0].get_title()
assert predict["xlabel"] == fig.get_axes()[0].get_xlabel()
assert predict["ylabel"] == fig.get_axes()[0].get_ylabel()
ax = fig.gca()
# First the h-line is plotted, and then all of the ECI's
eci_lines = ax.lines[1:]
for ii, test in enumerate(test_list):
eci = test["eci"]
assert np.allclose(eci, eci_lines[ii].get_ydata())
assert np.allclose(predict["axhline_xy"], ax.axhline().get_xydata())
def test_plot_cv(bc_setting):
predict = {"title": "plot_FIT_TEST", "xlabel": "alpha", "ylabel": "DFT_FIT"}
plot_args = {"title": predict["title"], "ylabel": predict["ylabel"]}
evaluator = Evaluate(bc_setting, fitting_scheme="l2", alpha=1e-6)
alphas = [0.1, 0.2, 0.3, 0.4, 0.5]
evaluator.cv_for_alpha(alphas)
predict["alpha_cv"] = evaluator.cv_scores
fig = pp.plot_cv(evaluator, plot_args)
assert predict["title"] == fig.get_axes()[0].get_title()
assert predict["xlabel"] == fig.get_axes()[0].get_xlabel()
assert predict["ylabel"] == fig.get_axes()[0].get_ylabel()
true_list = fig.gca().get_lines()[0].get_xdata().tolist()
for predict, true in zip(predict["alpha_cv"], true_list):
assert predict["alpha"] == true
@pytest.mark.parametrize("interactive", [True, False])
def test_plot_ch(interactive, bc_setting):
evaluator = Evaluate(bc_setting, fitting_scheme="l2", alpha=1e-6)
evaluator.fit()
# Simply verify that we can run the plot convex hull plotting function.
pp.plot_convex_hull(evaluator, interactive=interactive)
@pytest.mark.parametrize(
"plot_name", ["plot_convex_hull", "plot_fit", "plot_fit_residual", "plot_eci"]
)
def test_plot_interactive_events(bc_setting, plot_name):
evaluator = Evaluate(bc_setting, fitting_scheme="l2", alpha=1e-6)
evaluator.fit()
# Simply verify that we can run the plot convex hull plotting function.
fnc = getattr(pp, plot_name)
fig1 = fnc(evaluator, interactive=False)
fig2 = fnc(evaluator, interactive=True)
# Ensure there are more events in the interactive one (the ones we added)
def get_events(fig, event_name):
return fig.canvas.callbacks.callbacks.get(event_name, {})
for event in ["button_press_event", "motion_notify_event"]:
events1 = get_events(fig1, event)
events2 = get_events(fig2, event)
# We should have 1 more event
# Since the object fell out of scope, and the event is only weak-ref'd,
# it is normally garbage collected, unless we wrap it.
# If more events are added in the future, the number of expected extra events should be
# adjusted accordingly.
if plot_name in {"plot_eci"} and event == "button_press_event":
# Doesn't add any extra click events
assert len(events1) == len(events2)
else:
assert len(events2) == len(events1) + 1
import numpy as np
import matplotlib
from clease.sparsifier import Sparsifier
from clease.regression import LinearRegression
def test_sparsifier():
matplotlib.use("Agg")
num_feat = 12
num_data = 100
X = np.zeros((num_data, num_feat))
x = np.linspace(0.0, 10.0, num_data)
for i in range(num_feat):
X[:, i] = x**i
y = 2.0 * X[:, 5] - 7.0 * X[:, 9]
sparsifier = Sparsifier()
fitter = LinearRegression()
selection, coeff = sparsifier.sparsify(fitter, X, y)
sparsifier.plot()
assert selection == [5, 9]
assert np.allclose(coeff, [2.0, -7.0])
from collections import Counter
import pytest
import numpy as np
from ase.build import bulk
from ase.neighborlist import neighbor_list
from ase import Atoms
from clease import StructureMapper
def rattled_gold_vac():
atoms = bulk("Au", a=3.9) * (3, 3, 3)
vac = [26, 8, 4, 0]
for i in vac:
del atoms[i]
atoms.rattle(stdev=0.5)
return atoms
def mg2sn16x6_initial():
numbers = [
50,
50,
12,
50,
12,
50,
50,
50,
0,
50,
0,
50,
50,
50,
0,
50,
0,
50,
50,
50,
0,
50,
0,
50,
]
positions = [
[0.0, 0.0, 0.0],
[0.0, 3.375, 3.375],
[1.6875, 1.6875, 1.6875],
[1.6875, 1.6875, 5.0625],
[1.6875, 5.0625, 1.6875],
[1.6875, 5.0625, 5.0625],
[3.375, 0.0, 3.375],
[3.375, 3.375, 0.0],
[5.0625, 1.6875, 1.6875],
[5.0625, 1.6875, 5.0625],
[5.0625, 5.0625, 1.6875],
[5.0625, 5.0625, 5.0625],
[6.75, 0.0, 0.0],
[6.75, 3.375, 3.375],
[8.4375, 1.6875, 1.6875],
[8.4375, 1.6875, 5.0625],
[8.4375, 5.0625, 1.6875],
[8.4375, 5.0625, 5.0625],
[10.125, 0.0, 3.375],
[10.125, 3.375, 0.0],
[11.8125, 1.6875, 1.6875],
[11.8125, 1.6875, 5.0625],
[11.8125, 5.0625, 1.6875],
[11.8125, 5.0625, 5.0625],
]
cell = [[13.5, 0.0, 0.0], [0.0, 6.75, 0.0], [0.0, 0.0, 6.75]]
return Atoms(numbers=numbers, positions=positions, cell=cell, pbc=[1, 1, 1])
def mg2sn16x6_final():
numbers = [50, 50, 12, 50, 12, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50, 50]
positions = [
[-0.73778024, 2.43e-06, 0.87741687],
[-0.73778051, 3.33346417, 2.01317287],
[1.68888309, 1.66673321, 1.44529559],
[1.68888312, 1.66673289, 4.33588704],
[1.68887889, 5.00020049, 1.44529525],
[1.68887853, 5.00019987, 4.3358867],
[4.11554227, -2.91e-06, 2.01317455],
[4.11554247, 3.33346954, 0.87741795],
[5.18664035, 1.666733, 4.33588744],
[5.18664335, 5.00019985, 4.33588717],
[6.95991542, 2.46e-06, 0.78537893],
[6.9599157, 3.33346432, 2.10521272],
[8.4444051, 1.66673259, 4.33588702],
[8.44440496, 5.00020033, 4.3358867],
[9.92889492, -2.76e-06, 2.10521253],
[9.92889457, 3.33346957, 0.78537809],
[11.70216672, 1.66673306, 4.33588657],
[11.70216972, 5.0001999, 4.33588625],
]
cell = [
[13.51104832407987, 1.175195482e-07, 1.1506351e-09],
[-4.9789844e-08, 6.666934187420602, -6.340775195e-07],
[-1.843332915e-07, -9.583360596e-07, 5.781182902099645],
]
return Atoms(numbers=numbers, positions=positions, cell=cell, pbc=[1, 1, 1])
def mg5sn14x5_initial():
numbers = [
50,
50,
12,
50,
50,
12,
50,
50,
0,
12,
12,
50,
50,
50,
0,
50,
0,
50,
50,
50,
0,
12,
0,
50,
]
positions = [
[0.0, 0.0, 0.0],
[0.0, 3.375, 3.375],
[1.6875, 1.6875, 1.6875],
[1.6875, 1.6875, 5.0625],
[1.6875, 5.0625, 1.6875],
[1.6875, 5.0625, 5.0625],
[3.375, 0.0, 3.375],
[3.375, 3.375, 0.0],
[5.0625, 1.6875, 1.6875],
[5.0625, 1.6875, 5.0625],
[5.0625, 5.0625, 1.6875],
[5.0625, 5.0625, 5.0625],
[6.75, 0.0, 0.0],
[6.75, 3.375, 3.375],
[8.4375, 1.6875, 1.6875],
[8.4375, 1.6875, 5.0625],
[8.4375, 5.0625, 1.6875],
[8.4375, 5.0625, 5.0625],
[10.125, 0.0, 3.375],
[10.125, 3.375, 0.0],
[11.8125, 1.6875, 1.6875],
[11.8125, 1.6875, 5.0625],
[11.8125, 5.0625, 1.6875],
[11.8125, 5.0625, 5.0625],
]
cell = [13.5, 6.75, 6.75]
return Atoms(numbers=numbers, positions=positions, cell=cell, pbc=[1, 1, 1])
def mg5sn14x5_final():
numbers = [
50,
50,
12,
50,
50,
12,
50,
50,
12,
12,
50,
50,
50,
50,
50,
50,
50,
12,
50,
]
positions = [
[-0.48140494, 0.0144833, 1.28457989],
[-0.48135256, 3.18073832, 1.3010022],
[2.68308299, 1.5976977, 1.29285962],
[0.74928077, 1.58449916, 3.86166103],
[2.50362203, 4.80619196, 1.30956503],
[0.79185251, 4.79302319, 3.87840805],
[3.85080856, -0.01562723, 3.85371676],
[3.85078189, 3.21122682, -1.26805259],
[6.68490289, 1.58458868, 3.86167725],
[5.40350685, 4.80638298, 1.30957084],
[6.7994324, 4.79310118, 3.87843199],
[8.25224158, 0.03646678, 1.28419469],
[8.25223131, 3.15901842, 1.30147444],
[9.87350442, 1.58468593, 3.86177105],
[9.95438636, 4.79320132, 3.87847087],
[11.77915648, 0.00864269, 1.28287849],
[11.77918874, 3.1869771, 1.30286801],
[13.12273331, 1.58469092, 3.861781],
[13.16078143, 4.7931881, 3.87847482],
]
cell = [
[15.54465893532526, 0.0001710673150968, 8.08448379135e-05],
[6.84993031655e-05, 6.417021143586033, 0.0334966864736102],
[2.0772942625e-05, -0.0262903932634979, 5.137727732454051],
]
return Atoms(numbers=numbers, cell=cell, positions=positions, pbc=[1, 1, 1])
def test_rattled_structures():
tests = [
{
"structure": bulk("Au", a=3.9) * (4, 4, 4),
"expect": bulk("Au", cubic=True, a=3.9),
},
{
"structure": bulk("MgO", "rocksalt", a=5.0) * (2, 2, 2),
"expect": bulk("MgO", "rocksalt", a=5.0, cubic=True),
},
]
mapper = StructureMapper()
for test in tests:
rattled = test["structure"].copy()
rattled.rattle(stdev=0.01, seed=0)
recovered = mapper.refine(rattled)
pos = np.sort(recovered.get_positions().ravel())
pos_expect = np.sort(test["expect"].get_positions().ravel())
assert np.allclose(pos, pos_expect)
def test_vacancies_no_distortion():
atoms = bulk("Au", a=3.9) * (3, 3, 3)
atoms[0].symbol = "X"
atoms[10].symbol = "X"
no_vac = atoms.copy()
del atoms[10]
del atoms[0]
mapper = StructureMapper(symprec=0.1)
recovered = mapper.refine(no_vac)
# Vacancy to Au ratio
count = Counter(recovered.numbers)
x_to_au = count[0] / count[79]
assert x_to_au == pytest.approx(2.0 / 25.0)
at_index, dist = neighbor_list("id", recovered, 3.0)
coordination = np.bincount(at_index)
assert np.all(coordination == 12)
assert np.allclose(dist, 3.9 / np.sqrt(2.0))
def test_vacancies_with_distortion():
tests = [
{
"structure": rattled_gold_vac(),
"template": bulk("Au", a=3.7) * (3, 3, 3),
"expect_vac": [0, 4, 8, 26],
},
{
"structure": mg2sn16x6_final(),
"template": mg2sn16x6_initial(),
"expect_vac": [8, 10, 14, 16, 20, 22],
},
{
"structure": mg5sn14x5_final(),
"template": mg5sn14x5_initial(),
"expect_vac": [8, 14, 16, 20, 22],
},
]
mapper = StructureMapper()
for test in tests:
recovered, _ = mapper.snap_to_lattice(test["structure"], test["template"])
for i in test["expect_vac"]:
assert recovered[i].symbol == "X"
import numpy as np
from clease.svd import SVD
def test_svd():
X = [[2, 2, 2], [1, -1, -1]]
svd = SVD()
assert not svd.has_matrices()
svd.calculate(X)
assert svd.has_matrices()
U = [[-0.978216, 0.207591], [0.207591, 0.978216]]
V = [[-0.496149, 0.868238], [-0.613937, -0.35083], [-0.613937, -0.35083]]
Vh = np.array(V).T
S = [3.52483, 1.60486]
assert np.allclose(svd.U, U)
assert np.allclose(svd.Vh, Vh)
assert np.allclose(svd.S, S)
"""Test suite for TemplateAtoms."""
import pytest
import numpy as np
import ase
from ase.build import bulk
from ase.spacegroup import crystal
from ase.build import niggli_reduce
from ase.db import connect
from clease.settings import CEBulk, Concentration
from clease.settings import template_atoms
from clease.settings.template_atoms import TemplateAtoms
from clease.settings.template_filters import (
AtomsFilter,
CellFilter,
SkewnessFilter,
DistanceBetweenFacetsFilter,
CellVectorDirectionFilter,
ValidConcentrationFilter,
)
from clease.tools import wrap_and_sort_by_position
class SettingsPlaceHolder:
"""
Dummy object that simply holds the few variables needed for the test.
Only purpose of this is to make the test fast
"""
atoms = None
index_by_basis = []
Concentration = None
class NumAtomsFilter(AtomsFilter):
def __init__(self, min_num_atoms):
self.min_num_atoms = min_num_atoms
def __call__(self, atoms):
return len(atoms) > self.min_num_atoms
class DummyCellFilter(CellFilter):
def __call__(self, cell):
return True
def get_settings_placeholder_valid_conc_filter(system):
"""
Helper functions that initialises various dummy settings classes to be
used together with the test_valid_conc_filter_class
"""
settings = SettingsPlaceHolder()
if system == "NaCl":
prim_cell = bulk("NaCl", crystalstructure="rocksalt", a=4.0)
settings.atoms = prim_cell
settings.index_by_basis = [[0], [1]]
# Force vacancy concentration to be exactly 2/3 of the Cl
# concentration
A_eq = [[0, 1, -2.0]]
b_eq = [0.0]
settings.concentration = Concentration(
basis_elements=[["Na"], ["Cl", "X"]], A_eq=A_eq, b_eq=b_eq
)
elif system == "LiNiMnCoO":
a = 2.825
b = 2.825
c = 13.840
alpha = 90
beta = 90
gamma = 120
spacegroup = 166
basis_elements = [["Li"], ["Ni", "Mn", "Co"], ["O"]]
basis = [(0.0, 0.0, 0.0), (0.0, 0.0, 0.5), (0.0, 0.0, 0.259)]
A_eq = None
b_eq = None
conc = Concentration(basis_elements=basis_elements, A_eq=A_eq, b_eq=b_eq)
prim_cell = crystal(
symbols=["Li", "Ni", "O"],
basis=basis,
spacegroup=spacegroup,
cellpar=[a, b, c, alpha, beta, gamma],
size=[1, 1, 1],
primitive_cell=True,
)
prim_cell = wrap_and_sort_by_position(prim_cell)
settings.concentration = conc
settings.index_by_basis = [[0], [2], [1, 3]]
settings.atoms = prim_cell
return settings
def check_NaCl_conc(templates):
for atoms in templates:
num_cl = sum(1 for atom in atoms if atom.symbol == "Cl")
assert 2.0 * num_cl / 3.0 == pytest.approx(np.round(2.0 * num_cl / 3.0))
return True
@pytest.fixture
def prim_cell():
return bulk("Cu", a=4.05, crystalstructure="fcc")
@pytest.fixture
def template_atoms_factory(prim_cell):
def _template_atoms_factory(**kwargs):
default_settings = {"supercell_factor": 27, "size": None, "skew_threshold": 4}
default_settings.update(**kwargs)
return TemplateAtoms(prim_cell, **default_settings)
return _template_atoms_factory
def test_fcc(template_atoms_factory):
template_atoms = template_atoms_factory()
templates = template_atoms.get_all_scaled_templates()
ref = [
[1, 1, 1],
[1, 1, 2],
[2, 2, 2],
[2, 2, 3],
[2, 2, 4],
[2, 2, 5],
[2, 3, 3],
[2, 3, 4],
[3, 3, 3],
]
ref = [np.diag(x).tolist() for x in ref]
sizes = [t.info["size"] for t in templates]
assert ref == sizes
@pytest.mark.parametrize(
"test",
[
{"system": "NaCl", "func": check_NaCl_conc},
{"system": "LiNiMnCoO", "func": lambda templ: len(templ) >= 1},
],
)
def test_valid_concentration_filter(test):
settings = get_settings_placeholder_valid_conc_filter(test["system"])
template_generator = TemplateAtoms(
settings.atoms, supercell_factor=20, skew_threshold=1000000000
)
conc_filter = ValidConcentrationFilter(settings.concentration, settings.index_by_basis)
# Check that you cannot attach an AtomsFilter as a cell
# filter
with pytest.raises(TypeError):
template_generator.add_cell_filter(conc_filter)
template_generator.clear_filters()
template_generator.add_atoms_filter(conc_filter)
templates = template_generator.get_all_scaled_templates()
assert test["func"](templates)
def test_dist_filter():
f = DistanceBetweenFacetsFilter(4.0)
cell = [[0.1, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
cell = np.array(cell)
assert not f(cell)
cell[0, 0] = 0.3
assert f(cell)
def test_fixed_vol(template_atoms_factory):
template_atoms = template_atoms_factory()
templates = template_atoms.get_fixed_volume_templates(num_prim_cells=4, num_templates=100)
# Conform that the conventional cell is present
found_conventional = False
conventional = [4.05, 4.05, 4.05, 90, 90, 90]
for atoms in templates:
niggli_reduce(atoms)
lengths_ang = atoms.cell.cellpar()
if np.allclose(lengths_ang, conventional):
found_conventional = True
break
assert found_conventional
def test_fixed_vol_with_conc_constraint(mocker, db_name):
mocker.patch("clease.settings.ClusterExpansionSettings.create_cluster_list_and_trans_matrix")
A_eq = [[3, -2]]
b_eq = [0]
conc = Concentration(basis_elements=[["Au", "Cu"]], A_eq=A_eq, b_eq=b_eq)
settings = CEBulk(
crystalstructure="fcc",
a=3.8,
size=[1, 1, 5],
db_name=db_name,
max_cluster_dia=[3.0],
concentration=conc,
supercell_factor=40,
)
settings.skew_threshold = 100
tmp = settings.template_atoms
sizes = [4, 5, 7, 10]
valid_size = [5, 10]
for s in sizes:
templates = tmp.get_fixed_volume_templates(num_prim_cells=s)
if s in valid_size:
assert len(templates) > 0
else:
assert len(templates) == 0
def test_remove_atoms_filter(template_atoms_factory):
template_atoms = template_atoms_factory(supercell_factor=3)
f = NumAtomsFilter(16)
template_atoms.add_atoms_filter(f)
assert len(template_atoms.atoms_filters) == 1
template_atoms.remove_filter(f)
assert len(template_atoms.atoms_filters) == 0
def test_remove_cell_filter(template_atoms_factory):
template_atoms = template_atoms_factory(supercell_factor=3)
num_cell_filters = len(template_atoms.cell_filters)
f = DummyCellFilter()
template_atoms.add_cell_filter(f)
assert len(template_atoms.cell_filters) == num_cell_filters + 1
template_atoms.remove_filter(f)
assert len(template_atoms.cell_filters) == num_cell_filters
def test_set_skewness_threshold(template_atoms_factory):
template_atoms = template_atoms_factory()
# Set the skewthreshold
template_atoms.skew_threshold = 100
# Check that the Skewness filter indeed has a value of 100
for f in template_atoms.cell_filters:
if isinstance(f, SkewnessFilter):
assert f.ratio == 100
def test_size_and_supercell(template_atoms_factory):
template_atoms = template_atoms_factory()
assert template_atoms.size is None
assert template_atoms.supercell_factor is not None
template_atoms.size = [3, 3, 3]
assert np.allclose(template_atoms.size, np.diag([3, 3, 3]))
assert template_atoms.supercell_factor is None
for _ in range(5):
t = template_atoms.weighted_random_template()
assert (t.cell == (template_atoms.prim_cell * (3, 3, 3)).get_cell()).all()
assert t.info["size"] == [[3, 0, 0], [0, 3, 0], [0, 0, 3]]
assert t == t
t_size = t
template_atoms.supercell_factor = 27
assert template_atoms.size is None
assert template_atoms.supercell_factor == 27
sizes = []
for _ in range(5):
t = template_atoms.weighted_random_template()
assert t != t_size
assert t == t
size = t.info["size"]
assert round(np.linalg.det(size)) <= template_atoms.supercell_factor
sizes.append(size)
for s0 in sizes:
# At least 1 size should be different for each size.
assert any(s0 != s for s in sizes)
def test_cell_direction_filter(db_name):
cubic_cell = bulk("Cu", a=4.05, crystalstructure="fcc", cubic=True)
db = connect(db_name)
db.write(cubic_cell, name="primitive_cell")
cell_filter = CellVectorDirectionFilter(cell_vector=2, direction=[0, 0, 1])
template_atoms = TemplateAtoms(cubic_cell, supercell_factor=1, size=None, skew_threshold=40000)
template_atoms.add_cell_filter(cell_filter)
templates = template_atoms.get_fixed_volume_templates(num_prim_cells=5, num_templates=20)
assert len(templates) > 1
for temp in templates:
_, _, a3 = temp.get_cell()
assert np.allclose(a3[:2], [0.0, 0.0])
def test_iterate_one_template(template_atoms_factory):
template_atoms = template_atoms_factory(supercell_factor=9)
iterator = template_atoms.iterate_all_templates(max_per_size=1)
# We should only ever have 1 size per template
atoms_prev = next(iterator)
count = 1
for atoms_new in iterator:
# Number of atoms should increase for each iteration
assert len(atoms_new) > len(atoms_prev)
count += 1
atoms_prev = atoms_new
# We won't necessarily get 1 template per size
assert count > 1
# This comes from checking the output.
# if the algorithm changes in the future, this _may_ change
# or if the settings in the test change
assert count == 4
def test_iterate_all_templates(template_atoms_factory):
template_atoms = template_atoms_factory(supercell_factor=6)
count = 0
for atoms_new in template_atoms.iterate_all_templates():
assert isinstance(atoms_new, ase.Atoms)
count += 1
assert count > 1
# Check the explicit method
assert count == len(template_atoms.get_all_templates())
# This comes from checking the output.
# if the algorithm changes in the future, this _may_ change
# or if the settings in the test change
assert count == 3
from itertools import product
import random
import pytest
import numpy as np
from ase.build import bulk
from ase.spacegroup import crystal
from ase.db import connect
from ase.geometry import wrap_positions
from clease.tools import (
min_distance_from_facet,
factorize,
all_integer_transform_matrices,
species_chempot2eci,
bf2matrix,
rate_bf_subsets,
select_bf_subsets,
cname_lt,
singlets2conc,
aic,
aicc,
bic,
get_extension,
add_file_extension,
equivalent_deco,
sort_cf_names,
split_dataset,
common_cf_names,
constraint_is_redundant,
remove_redundant_constraints,
remove_redundant_equations,
)
from clease import tools
from clease.basis_function import Polynomial
@pytest.fixture
def atoms():
return bulk("Al", crystalstructure="sc", a=4.0)
@pytest.mark.parametrize(
"test",
[
{
"deco": [1, 2, 3, 4],
"equiv_site": [[0, 1, 2]],
"result": [
[1, 2, 3, 4],
[1, 3, 2, 4],
[2, 1, 3, 4],
[2, 3, 1, 4],
[3, 1, 2, 4],
[3, 2, 1, 4],
],
},
{
"deco": [1, 2, 3, 4],
"equiv_site": [[0, 3]],
"result": [[1, 2, 3, 4], [4, 2, 3, 1]],
},
{"deco": [1, 2, 3, 4], "equiv_site": [], "result": [[1, 2, 3, 4]]},
],
)
def test_equivalent_deco(test):
deco = test["deco"]
equiv_site = test["equiv_site"]
assert test["result"] == equivalent_deco(deco, equiv_site)
def test_min_distance_from_facet():
a = 4.0
atoms = bulk("Al", crystalstructure="sc", a=a)
x = [3.0, 3.5, 3.9]
dist = min_distance_from_facet(x, atoms.get_cell())
assert dist == pytest.approx(0.1)
def test_factorize():
fact = sorted(list(factorize(10)))
assert fact == [2, 5]
fact = sorted(list(factorize(16)))
assert fact == [2, 2, 2, 2]
fact = sorted(list(factorize(24)))
assert fact == [2, 2, 2, 3]
def test_all_int_matrices():
arr = all_integer_transform_matrices(10)
assert sum(1 for _ in arr) == 582
@pytest.mark.parametrize(
"array,expect",
[
# Nested list
([[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]),
# Nested ndarray
(np.array([[1, 2, 3], [4, 5, 6]]), [[1, 2, 3], [4, 5, 6]]),
# Flat ndarray
(np.array([1, 2, 3]), [1, 2, 3]),
# Partial list/ndarray, nested
([[1, 2, 3], np.array([4, 5, 6])], [[1, 2, 3], [4, 5, 6]]),
],
)
def test_nested_array2list(array, expect):
assert tools.nested_array2list(array) == expect
def test_species_chempot2eci():
tests = [
{
"species": {"Au": 1.5},
"bf_list": [{"Au": 1.0, "Cu": -1.0}],
"expect": {"c1_0": 1.5},
},
{
"species": {"Au": 1.5, "Cu": 0.5},
"bf_list": [
{"Au": 0.3, "Cu": 1.2, "X": 3.0},
{"Au": -0.3, "Cu": 1.2, "X": -3.0},
],
"expect": {"c1_0": 65 / 24, "c1_1": -55 / 24},
},
]
for i, test in enumerate(tests):
eci = species_chempot2eci(test["bf_list"], test["species"])
msg = "Test #{} failed ".format(i)
msg += "Setup: {}".format(test)
for k, v in eci.items():
assert v == pytest.approx(test["expect"][k]), msg
def test_bf2matrix():
tests = [
{"bf": [{"Al": 1.0, "Mg": -1.0}], "expect": np.array([[1.0, -1.0]])},
{
"bf": [{"Li": 1.0, "O": 0.0, "X": 0.0}, {"Li": 0.0, "O": 1.0, "X": 0.0}],
"expect": np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]),
},
]
for test in tests:
mat = bf2matrix(test["bf"])
assert np.allclose(mat, test["expect"])
def test_subset_selection():
tests = [
{
"bfs": [
{"Al": 1.0, "Mg": 1.0, "Si": 0.0},
{"Al": 1.0, "Mg": 0.0, "Si": 1.0},
],
"elems": ["Al", "Mg"],
"expect": [1],
},
{
"bfs": [
{"Al": 1.0, "Mg": 1.0, "Si": 0.0},
{"Al": 1.0, "Mg": 0.0, "Si": 1.0},
],
"elems": ["Al", "Si"],
"expect": [0],
},
{
"bfs": [
{"Al": 1.0, "Mg": 1.0, "Si": 0.0, "X": 0.0},
{"Al": 1.0, "Mg": 0.0, "Si": 1.0, "X": 1.0},
{"Al": 0.0, "Mg": 1.0, "Si": 0.0, "X": 1.0},
],
"elems": ["Al", "Si", "X"],
"expect": [0, 2],
},
]
for test in tests:
selection = rate_bf_subsets(test["elems"], test["bfs"])[0][1]
assert selection == test["expect"]
@pytest.mark.parametrize(
"test",
[
{
"bfs": Polynomial(["Li", "O", "X", "V"]).get_basis_functions(),
"basis_elems": [["Li", "O"], ["X", "V"]],
},
{
"bfs": Polynomial(["Li", "O", "V"]).get_basis_functions(),
"basis_elems": [["Li", "O"], ["V", "Li"]],
},
{
"bfs": Polynomial(["Li", "O", "V", "X"]).get_basis_functions(),
"basis_elems": [["Li", "O", "X"], ["V", "Li"]],
},
{
"bfs": Polynomial(["Li", "O", "V", "X"]).get_basis_functions(),
"basis_elems": [["Li", "O", "X"], ["V", "Li"], ["O", "X"]],
},
{
"bfs": Polynomial(["Li", "O", "V", "X"]).get_basis_functions(),
"basis_elems": [["Li", "O", "X"], ["V", "Li"], ["O", "X"], ["V", "O", "X"]],
},
{
"bfs": Polynomial(["Li", "O", "X"]).get_basis_functions(),
"basis_elems": [["Li", "O", "X"], ["O", "Li"]],
},
],
)
def test_sublattice_bf_selection(test):
selection = select_bf_subsets(test["basis_elems"], test["bfs"])
# Confirm that all elements on each sublattice is distinguished
bfs = test["bfs"]
for s, elems in zip(selection, test["basis_elems"]):
assert len(s) == len(elems) - 1
distinguished = {}
for bf_indx in s:
for symb in product(elems, repeat=2):
if symb[0] == symb[1]:
continue
key = "-".join(sorted(symb))
diff = bfs[bf_indx][symb[0]] - bfs[bf_indx][symb[1]]
disting = distinguished.get(key, False)
distinguished[key] = disting or abs(diff) > 1e-4
for v in distinguished.values():
assert v, f"{distinguished}"
@pytest.mark.parametrize(
"test",
[
{"name1": "c0", "name2": "c1", "expect": True},
{"name1": "c1", "name2": "c1", "expect": False},
{"name1": "c2_d0000_0", "name2": "c1_0", "expect": False},
{"name1": "c0", "name2": "c0", "expect": False},
{"name1": "c1_0", "name2": "c1_1", "expect": True},
{"name1": "c4_d0000_10", "name2": "c3_d9999_9", "expect": False},
{"name1": "c4_d0000_10", "name2": "c4_d0000_9", "expect": False},
{"name1": "c2_d0200_9", "name2": "c2_d0200_29", "expect": True},
],
)
def test_cname_lt(test):
assert cname_lt(test["name1"], test["name2"]) == test["expect"]
@pytest.mark.parametrize(
"test",
[
{
"bf": [{"Au": 1.0, "Cu": -1.0}],
"cf": np.array([[1.0], [-1.0], [0.0]]),
"expect": [
{"Au": 1.0, "Cu": 0.0},
{"Au": 0.0, "Cu": 1.0},
{"Au": 0.5, "Cu": 0.5},
],
},
{
"bf": [{"Li": 1.0, "O": 0.0, "X": -1.0}, {"Li": 1.0, "O": -1.0, "X": 0.0}],
"cf": np.array([[1.0, 1.0], [-0.5, -0.5]]),
"expect": [
{"Li": 1.0, "O": 0.0, "X": 0.0},
{"Li": 0.0, "O": 0.5, "X": 0.5},
],
},
],
)
def test_singlet2conc(test):
conc = singlets2conc(test["bf"], test["cf"])
assert len(conc) == len(test["expect"])
for item1, item2 in zip(conc, test["expect"]):
assert item1 == item2
def test_aic():
mse = 2.0
n_feat = 3
n_data = 5
# expect = 2 * n_feat + n_data * np.log(mse)
expect = 9.465735902799727
assert expect == pytest.approx(aic(mse, n_feat, n_data))
# Test with arrays and random data
N = 20
n_feat = np.random.choice(np.arange(1, 1000), size=N)
n_data = np.random.choice(np.arange(1, 1000), size=N)
mse = np.random.random(N) + 1e-6 # Add small constant to avoid 0
calculated = aic(mse, n_feat, n_data)
expect = 2 * n_feat + n_data * np.log(mse)
assert np.allclose(calculated, expect)
def test_aicc():
mse = 2.0
n_feat = 3
n_data = 5
expect = 6.0 + 5 * np.log(mse) + 24.0
assert expect == pytest.approx(aicc(mse, n_feat, n_data))
def test_bic():
# Test with a pre-calculated example
mse = 2.0
n_feat = 3
n_data = 5
# expect = 3.0 * np.log(5) + 5 * np.log(mse)
expect = 8.294049640102028
assert expect == pytest.approx(bic(mse, n_feat, n_data))
# Test with arrays and random data
N = 20
n_feat = np.random.choice(np.arange(1, 1000), size=N)
n_data = np.random.choice(np.arange(1, 1000), size=N)
mse = np.random.random(N) + 1e-6 # Add small constant to avoid 0
calculated = bic(mse, n_feat, n_data)
expect = np.log(n_data) * n_feat + n_data * np.log(mse)
assert np.allclose(calculated, expect)
@pytest.mark.parametrize(
"fname,expect",
[
("data.csv", ".csv"),
("file", ""),
("double_ext.csv.json", ".json"),
],
)
def test_get_file_extension(fname, expect):
assert get_extension(fname) == expect
@pytest.mark.parametrize(
"test",
[
{"fname": "data.csv", "ext": ".csv", "expect": "data.csv"},
{"fname": "data", "ext": ".json", "expect": "data.json"},
# Some cases where we expect to fail
{"fname": "data.json", "ext": ".csv", "expect": None},
{"fname": "data.json", "ext": "", "expect": None},
{"fname": "", "ext": ".csv", "expect": None},
],
)
def test_add_proper_file_extension(test):
expect = test["expect"]
if expect is None:
# We should raise
with pytest.raises(ValueError):
add_file_extension("data.json", ".csv")
else:
fname = add_file_extension(test["fname"], test["ext"])
assert fname == expect
@pytest.mark.parametrize(
"names,expect",
[
(["c1_8", "c0_0", "c1_1"], ["c0_0", "c1_1", "c1_8"]),
(
["c2_d0010_0_00", "c2_d0009_0_00", "c0_0", "c1_1"],
["c0_0", "c1_1", "c2_d0009_0_00", "c2_d0010_0_00"],
),
(
["c3_d0008_0_00", "c2_d0009_0_00", "c0_0", "c1_1"],
["c0_0", "c1_1", "c2_d0009_0_00", "c3_d0008_0_00"],
),
],
)
def test_sort_cf_names(names, expect):
sorted_names = sort_cf_names(names)
assert isinstance(sorted_names, list)
assert sorted_names == expect
# Assert it's a new list
assert sorted_names is not names
assert sorted_names != names
# Should work on an iterable as well.
as_iter = iter(names)
assert not isinstance(as_iter, list)
sorted_names = sort_cf_names(as_iter)
assert isinstance(sorted_names, list)
assert sorted_names == expect
def test_split_dataset():
X = np.zeros((100, 10))
y = np.zeros(100)
# Case 1: Split without specifying groups
partitions = split_dataset(X, y, nsplits=5)
for p in partitions:
assert p["train_X"].shape == (80, 10)
assert len(p["train_y"]) == 80
assert p["validate_X"].shape == (20, 10)
assert len(p["validate_y"]) == 20
# Case 2: Specify groups and check that entries that belonds
# to the same groups is not split accross different partitions
groups = []
for i in range(len(X)):
y[i] = i % 20
X[i, :] = i % 20
X = X.astype(int)
y = y.astype(int)
groups.append(i % 20)
partitions = split_dataset(X, y, nsplits=5, groups=groups)
for p in partitions:
groups_train = set()
groups_validate = set()
flatX = p["train_X"].ravel().tolist()
groups_train = groups_train.union(set(flatX))
groups_train = groups_train.union(set(p["train_y"]))
flatX = p["validate_X"].ravel().tolist()
groups_validate = groups_validate.union(set(flatX))
groups_validate = groups_validate.union(set(p["validate_y"]))
# Make sure that the intersection between groups_train and groups
# validate is an empty set
assert not groups_train.intersection(groups_validate)
def test_common_cf_names(db_name):
db = connect(db_name)
table = "polynomial_cf"
atoms = bulk("Au")
cfs = [
{
"c1_1": 0.0,
"c2_d0000_0_00": 1.0,
"c3_d0000_0_000": -1.0,
},
{
"c1_1": 0.0,
"c2_d0000_0_00": 1.0,
"c3_d0000_0_000": -1.0,
},
{"c1_1": 0.2, "c3_d0000_0_000": 2.0},
]
for cf in cfs:
db.write(atoms, external_tables={table: cf})
ids = set([1, 2, 3])
with connect(db_name) as db:
cur = db.connection.cursor()
common = common_cf_names(ids, cur, table)
expect_common = set(["c1_1", "c3_d0000_0_000"])
assert common == expect_common
@pytest.mark.parametrize(
"test",
[
{
"A_lb": np.array([[1.0, 1.0]]),
"b_lb": np.array([0.0]),
"A_eq": None,
"b_eq": None,
"c_lb": np.array([1.0, 1.0]),
"d": -0.1,
"expect": True,
},
{
"A_lb": np.array([[1.0, 1.0]]),
"b_lb": np.array([0.0]),
"A_eq": None,
"b_eq": None,
"c_lb": np.array([1.0, 1.0]),
"d": 0.1,
"expect": False,
},
{
"A_lb": np.array([[1.0, 1.0, 0.0]]),
"b_lb": np.array([0.0]),
"A_eq": np.array([[1.0, 0.0, -1.0]]),
"b_eq": np.array([0.0]),
"c_lb": np.array([1.0, 1.0, 0.0]),
"d": 0.1,
"expect": False,
},
],
)
def test_constraint_is_redundant(test):
assert (
constraint_is_redundant(
test["A_lb"],
test["b_lb"],
test["c_lb"],
test["d"],
test["A_eq"],
test["b_eq"],
)
== test["expect"]
)
@pytest.mark.parametrize(
"test",
[
# Paulraj et al.: Example 2.2
{
"A_lb": np.array(
[
[-2.0, -1.0],
[-4.0, 0.0],
[-1.0, -3.0],
[-1.0, -2.0],
[0.0, -1.0],
[1.0, 1.0],
]
),
"b_lb": np.array([-8.0, -15.0, -9.0, -14.0, -4.0, -5.0]),
"A_lb_expect": np.array([[-2.0, -1.0], [-4.0, 0.0], [-1.0, -3.0]]),
"b_lb_expect": np.array([-8.0, -15.0, -9.0]),
},
# Telgen
{
"A_lb": np.array(
[
[-1.0, 1.0],
[-2.0, -1.0],
[-1.0, 0.0],
[1.0, -2.0],
[0.0, -2.0],
[-1.0, -1.0],
]
),
"b_lb": np.array([-2.0, -7.0, -2.0, -4.0, -5.0, -4.0]),
"A_lb_expect": np.array([[-1.0, 0.0], [1.0, -2.0], [0.0, -2.0], [-1.0, -1.0]]),
"b_lb_expect": np.array([-2.0, -4.0, -5.0, -4.0]),
},
],
ids=["Paulraj", "Telgen"],
)
# Silence warnings from solving the Telgen test
@pytest.mark.filterwarnings("ignore:Solving system with option", "ignore:Ill-conditioned matrix")
def test_remove_redundant_constraints(test):
# References:
#
# Paulraj et al.
# Paulraj, S., and P. Sumathi. "A comparative study of redundant constraints
# identification methods in linear programming problems." Mathematical Problems
# in Engineering 2010 (2010).
#
# Telgen
# Telgen, Jan. "Identifying redundant constraints and implicit equalities in
# systems of linear constraints." Management Science 29.10 (1983): 1209-1222.
A, b = remove_redundant_constraints(test["A_lb"], test["b_lb"])
assert np.allclose(A, test["A_lb_expect"])
assert np.allclose(b, test["b_lb_expect"])
@pytest.mark.parametrize(
"test",
[
{
"A": np.array([[1.0, 1.0], [2.0, 2.0]]),
"b": np.array([1.0, 2.0]),
"A_expect": [[1.0, 1.0]],
"b_expect": [1.0],
},
{
"A": np.array([[1.0, 1.0, 0.3], [2.0, 2.0, 0.6]]),
"b": np.array([1.0, 2.0, 0.0]),
"A_expect": [[1.0, 1.0, 0.3]],
"b_expect": [1.0],
},
{
"A": np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [7.0, 8.0, 9.0]]),
"b": np.array([150.0, 300.0, 450.0]),
"A_expect": [[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]],
"b_expect": [150.0, 300.0],
},
{
"A": np.array([[1.0, 1.0, 1.0], [2.0, 3.0, 4.0], [4.0, 3.0, 2.0]]),
"b": np.array([50.0, 158.0, 142.0]),
"A_expect": [[1.0, 1.0, 1.0], [2.0, 3.0, 4.0]],
"b_expect": [50.0, 158.0],
},
],
)
def test_remove_redundant_equations(test):
A, b = remove_redundant_equations(test["A"], test["b"])
assert np.allclose(A, test["A_expect"])
assert np.allclose(b, test["b_expect"])
def test_cubicness():
atoms1 = bulk("Na", cubic=False)
c1 = tools.get_cubicness(atoms1)
atoms2 = bulk("Na", cubic=True)
c2 = tools.get_cubicness(atoms2)
assert c2 == pytest.approx(0)
assert c2 < c1
def make_TaOX():
return crystal(
symbols=["O", "X", "O", "Ta"],
basis=[
(0.0, 0.0, 0.0),
(0.3894, 0.1405, 0.0),
(0.201, 0.3461, 0.5),
(0.2244, 0.3821, 0.0),
],
spacegroup=55,
cell=None,
cellpar=[6.25, 7.4, 3.83, 90, 90, 90],
ab_normal=(0, 0, 1),
primitive_cell=True,
size=[1, 1, 1],
)
@pytest.mark.parametrize(
"P, expect",
[
(np.array([[2, 0, 0], [0, 2, 0], [0, 0, 2]]), True),
(np.array([[2, 0, 1], [0, 2, 0], [0, 0, 2]]), False),
(np.array([[2, -1, 0], [0, 2, 0], [0, 0, 2]]), False),
(np.array([[1, 0, 0], [0, 1, 0], [0, 0, 3]]), True),
],
)
@pytest.mark.parametrize(
"prim",
[
bulk("NaCl", crystalstructure="rocksalt", a=3.0),
bulk("Au", crystalstructure="fcc", a=3.8),
make_TaOX(),
],
)
def test_is_trivial_supercell(prim, P, expect):
atoms = tools.make_supercell(prim, P)
assert tools.is_trivial_supercell(prim, atoms) == expect
@pytest.mark.parametrize(
"rep",
[
(1, 1, 1),
(3, 2, 1),
(1, 2, 3),
(5, 5, 5),
(8, 1, 1),
],
)
@pytest.mark.parametrize(
"prim",
[
bulk("NaCl", crystalstructure="rocksalt", a=3.0),
bulk("Au", crystalstructure="fcc", a=3.8),
make_TaOX(),
],
)
def test_get_repetition(prim, rep):
atoms = prim * rep
rep_tool = tools.get_repetition(prim, atoms)
assert all(rep_tool == rep)
@pytest.mark.parametrize(
"prim",
[
bulk("NaCl", crystalstructure="rocksalt", a=3.0),
bulk("Au", crystalstructure="fcc", a=3.8),
make_TaOX(),
],
)
@pytest.mark.parametrize("center", [(0.5, 0.5, 0.5), (0.2, 0.2, 0.2), (0.8, 0.5, 0.5)])
def test_wrap_3d(prim, center):
# Verify wrap_positions_3d yields the same wrapped
# positions as the ASE version. (only for fully periodic systems)
# Pre-computed inverse cell.
cell_T_inv = np.linalg.inv(prim.get_cell().T)
# Try rotating and translating some amount, and wrapping back into the primitive cell
# Repeat this process a few times
for _ in range(5):
atoms = prim * (5, 5, 5)
atoms.rotate(random.randint(0, 90), "z")
atoms.rotate(random.randint(0, 180), "x")
atoms.translate([random.uniform(-30, 30) for _ in range(3)])
exp = wrap_positions(atoms.get_positions(), prim.get_cell(), center=center)
res = tools.wrap_positions_3d(atoms.get_positions(), prim.get_cell(), center=center)
assert np.allclose(res, exp)
# Also with the pre-computed inverse cell
res = tools.wrap_positions_3d(
atoms.get_positions(),
prim.get_cell(),
cell_T_inv=cell_T_inv,
center=center,
)
assert np.allclose(res, exp)
@pytest.mark.parametrize(
"cname, exp",
[
("c0", 0),
("c0_blabla", 0),
("c1_0", 1),
("c10_d000_000_000", 10),
("c33_d000_000_000", 33),
],
)
def test_get_size_from_cname(cname, exp):
assert tools.get_size_from_cf_name(cname) == exp
@pytest.mark.parametrize(
"cname",
["0", "foobar", "d1"],
)
def test_get_size_from_cname_bad_name(cname):
with pytest.raises(ValueError):
tools.get_size_from_cf_name(cname)
@pytest.mark.parametrize(
"cname, exp",
[
("c0", 0),
("c0_blabla", 0),
("c1_0", 0),
("c10_d0004_000_000", 4),
("c33_d0303_000_000", 303),
],
)
def test_get_dia_from_cname(cname, exp):
assert tools.get_diameter_from_cf_name(cname) == exp
@pytest.mark.parametrize("cname", ["0", "d1", "c2_e0000_3", "g2_d0000_3", "c2_dd0000"])
def test_get_dia_bad_name(cname):
with pytest.raises(ValueError):
tools.get_diameter_from_cf_name(cname)
+53
-2
Metadata-Version: 2.1
Name: clease
Version: 1.0.6
Version: 1.1.0
Summary: CLuster Expansion in Atomistic Simulation Environment

@@ -22,8 +22,59 @@ Home-page: https://gitlab.com/computationalmaterials/clease/

Description-Content-Type: text/markdown
License-File: LICENSE.md
Requires-Dist: ase>=3.22
Requires-Dist: numpy<2
Requires-Dist: cython
Requires-Dist: matplotlib
Requires-Dist: spglib
Requires-Dist: scikit-learn
Requires-Dist: typing_extensions
Requires-Dist: Deprecated
Requires-Dist: click>=8.0.0
Requires-Dist: attrs>=21.4.0
Requires-Dist: scipy>=1.7.0
Requires-Dist: packaging
Requires-Dist: threadpoolctl
Provides-Extra: doc
Requires-Dist: sphinx; extra == "doc"
Requires-Dist: sphinx_rtd_theme; extra == "doc"
Provides-Extra: test
Requires-Dist: pytest; extra == "test"
Requires-Dist: pytest-mock; extra == "test"
Requires-Dist: mock; extra == "test"
Requires-Dist: pytest-benchmark[histogram]>=3.4.1; extra == "test"
Requires-Dist: tox>=4; extra == "test"
Provides-Extra: dev
Requires-Dist: pip; extra == "dev"
Requires-Dist: cython; extra == "dev"
Requires-Dist: pre-commit; extra == "dev"
Requires-Dist: ipython; extra == "dev"
Requires-Dist: twine; extra == "dev"
Requires-Dist: black>=22.1.0; extra == "dev"
Requires-Dist: clang-format>=14.0.3; extra == "dev"
Requires-Dist: ruff; extra == "dev"
Requires-Dist: pyclean>=2.0.0; extra == "dev"
Requires-Dist: pytest-cov; extra == "dev"
Requires-Dist: build; extra == "dev"
Provides-Extra: gui
Requires-Dist: clease-gui; extra == "gui"
Provides-Extra: all
License-File: LICENSE.md
Requires-Dist: black>=22.1.0; extra == "all"
Requires-Dist: mock; extra == "all"
Requires-Dist: twine; extra == "all"
Requires-Dist: ruff; extra == "all"
Requires-Dist: sphinx_rtd_theme; extra == "all"
Requires-Dist: pre-commit; extra == "all"
Requires-Dist: pyclean>=2.0.0; extra == "all"
Requires-Dist: pip; extra == "all"
Requires-Dist: pytest-cov; extra == "all"
Requires-Dist: clease-gui; extra == "all"
Requires-Dist: build; extra == "all"
Requires-Dist: sphinx; extra == "all"
Requires-Dist: pytest-mock; extra == "all"
Requires-Dist: pytest; extra == "all"
Requires-Dist: cython; extra == "all"
Requires-Dist: ipython; extra == "all"
Requires-Dist: clang-format>=14.0.3; extra == "all"
Requires-Dist: tox>=4; extra == "all"
Requires-Dist: pytest-benchmark[histogram]>=3.4.1; extra == "all"

@@ -30,0 +81,0 @@ # CLEASE

+16
-16
ase>=3.22
numpy
numpy<2
cython

@@ -16,21 +16,21 @@ matplotlib

[all]
clang-format>=14.0.3
ipython
black>=22.1.0
mock
twine
ruff
sphinx_rtd_theme
cython
black>=22.1.0
pre-commit
pytest
pytest-mock
mock
pyclean>=2.0.0
pytest-benchmark[histogram]>=3.4.1
tox>=4
pip
pytest-cov
clease-gui
build
pip
sphinx
clease-gui
pylint
twine
pytest-cov
pytest-mock
pytest
cython
ipython
clang-format>=14.0.3
tox>=4
pytest-benchmark[histogram]>=3.4.1

@@ -45,3 +45,3 @@ [dev]

clang-format>=14.0.3
pylint
ruff
pyclean>=2.0.0

@@ -48,0 +48,0 @@ pytest-cov

@@ -198,2 +198,19 @@ LICENSE.md

doc/source/resources/parallel_timing.png
doc/source/resources/view_clusters_example.png
doc/source/resources/view_clusters_example.png
tests/test_atoms_manager.py
tests/test_basis_functions.py
tests/test_convex_hull.py
tests/test_corr_func.py
tests/test_datanormalizer.py
tests/test_db_util.py
tests/test_formula_unit.py
tests/test_geometry.py
tests/test_gramSchmidtMonimial.py
tests/test_mc_trajectory_extractor.py
tests/test_mp_logger.py
tests/test_plotpostprocess.py
tests/test_sparsifier.py
tests/test_structure_mapper.py
tests/test_svd.py
tests/test_templates.py
tests/test_tools.py

@@ -1,2 +0,1 @@

# pylint: disable=undefined-variable
import logging

@@ -3,0 +2,0 @@ from .version import *

@@ -1,1 +0,1 @@

1.0.6
1.1.0
"""Module for setting up pseudospins and basis functions."""
from abc import ABC, abstractmethod
import math
from typing import List, Dict, Optional, Sequence
from typing import Dict, List, Optional, Sequence
import numpy as np
from clease.gramSchmidthMonomials import GramSchmidtMonimial
from clease.jsonio import jsonable
from clease.gramSchmidthMonomials import GramSchmidtMonimial

@@ -64,3 +66,2 @@ __all__ = (

# pylint: disable=no-self-use
def customize_full_cluster_name(self, full_cluster_name: str) -> str:

@@ -67,0 +68,0 @@ """Customize the full cluster names. Default is to do nothing."""

@@ -0,3 +1,3 @@

from ase.calculators.calculator import Calculator, PropertyNotImplementedError
import numpy as np
from ase.calculators.calculator import Calculator, PropertyNotImplementedError

@@ -38,3 +38,2 @@

"""Will never fail the check_state check"""
# pylint: disable=unused-argument
return []

@@ -1,5 +0,8 @@

from typing import Dict, Optional, Iterable, Set, List, Union, Sequence
from typing import Dict, Iterable, List, Optional, Sequence, Set, Union
from ase import Atoms
from clease.datastructures import SystemChange
from clease.settings import ClusterExpansionSettings
from .clease import Clease

@@ -84,3 +87,2 @@

):
if not eci_format_ok(eci.keys()):

@@ -200,3 +202,3 @@ raise ValueError(f"Invalid format of ECI names. Got\n{eci.keys()}")

def get_dBdP(self, cf: Dict[str, float] = None) -> float:
def get_dBdP(self, cf: Optional[Dict[str, float]] = None) -> float:
"""

@@ -235,4 +237,4 @@ Return the pressure derivative of the bulk modulus of the

self,
atoms: Atoms = None,
properties: List[str] = None,
atoms: Optional[Atoms] = None,
properties: Optional[List[str]] = None,
system_changes: Union[Sequence[SystemChange], None] = None,

@@ -239,0 +241,0 @@ ) -> float:

"""Calculator for Cluster Expansion."""
import contextlib
import sys
import contextlib
from typing import Dict, Optional, TextIO, Union, List, Sequence, Any
import numpy as np
from typing import Any, Dict, List, Optional, Sequence, TextIO, Union
from ase import Atoms
from ase.calculators.calculator import PropertyNotImplementedError
from clease_cxx import PyCEUpdater, has_parallel
import numpy as np
from clease.corr_func import CorrFunction
from clease.datastructures import SystemChange, SystemChanges
from clease.corr_func import CorrFunction
from clease.settings import ClusterExpansionSettings

@@ -27,3 +29,2 @@

# pylint: disable=too-many-instance-attributes, too-many-public-methods
class Clease:

@@ -56,3 +57,2 @@ """Class for calculating energy using CLEASE.

) -> None:
if not isinstance(settings, ClusterExpansionSettings):

@@ -88,3 +88,2 @@ msg = "settings must be CEBulk or CECrystal object."

else:
# pylint: disable=consider-using-with
logfile = open(logfile, "a")

@@ -165,4 +164,4 @@ self.logfile = logfile

atoms: Optional[Atoms] = None,
properties: List[str] = None,
system_changes: SystemChanges = None,
properties: Optional[List[str]] = None,
system_changes: Optional[SystemChanges] = None,
) -> float:

@@ -204,3 +203,5 @@ """Calculate the energy of the passed Atoms object.

def get_property(self, name: str, atoms: Atoms = None, allow_calculation: bool = True):
def get_property(
self, name: str, atoms: Optional[Atoms] = None, allow_calculation: bool = True
):
"""Get a property from the calculator.

@@ -216,3 +217,3 @@

def get_potential_energy(self, atoms: Atoms = None) -> float:
def get_potential_energy(self, atoms: Optional[Atoms] = None) -> float:
"""Calculate the energy from scratch with an atoms object"""

@@ -231,3 +232,5 @@ # self.set_atoms(atoms)

def calculation_required(self, atoms: Atoms, properties: Sequence[str] = None) -> bool:
def calculation_required(
self, atoms: Atoms, properties: Optional[Sequence[str]] = None
) -> bool:
"""Check whether a calculation is required for a given atoms object.

@@ -282,3 +285,3 @@ The ``properties`` argument only exists for compatibility reasons, and has no effect.

def update_cf(self, system_changes: SystemChanges = None) -> None:
def update_cf(self, system_changes: Optional[SystemChanges] = None) -> None:
"""Update correlation function based on the reference value.

@@ -285,0 +288,0 @@

from typing import Dict, Optional
from ase import Atoms
from clease.settings import ClusterExpansionSettings
from clease.tools import wrap_and_sort_by_position
from .clease import Clease

@@ -11,3 +14,3 @@

atoms: Atoms,
eci: Dict[str, float] = None,
eci: Optional[Dict[str, float]] = None,
num_threads: Optional[int] = None,

@@ -14,0 +17,0 @@ ) -> Atoms:

import click
from clease.settings import settings_from_json
from . import main

@@ -33,3 +35,2 @@

# method.
# pylint: disable=import-outside-toplevel
from ase.visualize import view

@@ -36,0 +37,0 @@

import click
from clease.db_util import get_all_cf_names, get_all_cf, get_cf_tables
from clease.db_util import get_all_cf, get_all_cf_names, get_cf_tables
from . import main

@@ -29,3 +31,3 @@

show_cf(db_name, db_id)
except Exception as exc: # pylint: disable=broad-except
except Exception as exc:
click.echo(f"An error occurred: {exc}")

@@ -32,0 +34,0 @@

#!/usr/bin/env python
from pathlib import Path, PurePath
from clease_cxx import has_parallel
import click
from clease_cxx import has_parallel
import clease

@@ -6,0 +8,0 @@ from clease.version import __version__

import click
from clease import tools
from clease.settings import settings_from_json
from clease import tools
from . import main

@@ -5,0 +7,0 @@

@@ -0,7 +1,9 @@

from copy import deepcopy
import sys
from typing import List, Sequence, Tuple, Dict
from copy import deepcopy
import numpy as np
from typing import Dict, List, Optional, Sequence, Tuple
from ase import Atoms
from ase.db import connect
import numpy as np
from clease.settings import ClusterExpansionSettings

@@ -54,3 +56,3 @@

settings: ClusterExpansionSettings,
select_cond: Sequence[Tuple[str, str, str]] = None,
select_cond: Optional[Sequence[Tuple[str, str, str]]] = None,
):

@@ -110,3 +112,3 @@ # Make copy such that we don't alter the settings object

def print_report(self, coverage: Dict[str, float] = None, file=sys.stdout) -> None:
def print_report(self, coverage: Optional[Dict[str, float]] = None, file=sys.stdout) -> None:
"""

@@ -113,0 +115,0 @@ Prints a nicely formatted report of coverage.

@@ -1,2 +0,1 @@

# pylint: disable=undefined-variable
from .cluster_fingerprint import *

@@ -3,0 +2,0 @@ from .cluster import *

@@ -0,7 +1,9 @@

from functools import total_ordering
from typing import Any, Iterable
from functools import total_ordering
import attr
import numpy as np
import attr
from clease.jsonio import jsonable, AttrSavable
from clease.jsonio import AttrSavable, jsonable
__all__ = ("ClusterFingerprint",)

@@ -70,4 +72,3 @@

"""The Fingerprint must be a 1d array."""
# pylint: disable=unused-argument, no-self-use
if not value.ndim == 1:
raise ValueError(f"Fingerprint must be a 1d array, got {value.ndim}")
from itertools import product
from typing import List, Tuple, Dict, Set, Iterator, Iterable, Union, Sequence
from math import sqrt
from typing import Dict, Iterable, Iterator, List, Optional, Sequence, Set, Tuple, Union
from ase import Atoms
import numpy as np
from scipy.spatial.distance import cdist
from ase import Atoms
from clease import tools
from clease.datastructures import FourVector, Figure
from clease.datastructures import Figure, FourVector
from .cluster import Cluster

@@ -85,3 +88,3 @@ from .cluster_fingerprint import ClusterFingerprint

def to_four_vector(self, cartesian: np.ndarray, sublattice: int = None) -> FourVector:
def to_four_vector(self, cartesian: np.ndarray, sublattice: Optional[int] = None) -> FourVector:
"""Translate a position in Cartesian coordinates to its FourVector"""

@@ -328,3 +331,2 @@ if cartesian.ndim != 1:

"""
# pylint: disable=no-self-use
return [[lut[fv] for fv in fig.components] for fig in cluster.figures]

@@ -408,3 +410,3 @@

# This is the full Figure object
components = [x0] + current
components = [x0, *current]
fig = Figure(components)

@@ -418,3 +420,3 @@ yield fig

rem = set.intersection(remaining, within_cutoff[next_item])
yield from recursive_yield(rem, current + [next_item])
yield from recursive_yield(rem, [*current, next_item])

@@ -421,0 +423,0 @@ for v in within_cutoff[x0]:

from copy import deepcopy
from ase.geometry import wrap_positions
import numpy as np
from scipy.spatial import cKDTree as KDTree
from ase.geometry import wrap_positions
from .cluster_list import ClusterList

@@ -6,0 +8,0 @@

from __future__ import annotations
import copy
from itertools import product
import copy
from typing import List, Dict, Any
import logging
from typing import Any, Dict, List
from ase import Atoms
from clease.tools import flatten, dec_string, list2str
from clease.jsonio import jsonable
from clease.tools import dec_string, flatten, list2str
from .cluster import Cluster
from .cluster_generator import ClusterGenerator
from .cluster import Cluster

@@ -24,3 +26,2 @@ logger = logging.getLogger(__name__)

class ClusterList:
# pylint: disable=too-many-public-methods
def __init__(self):

@@ -328,3 +329,3 @@ self._clusters = []

pref = k.rpartition("_")[0]
prefix_map[pref] = prefix_map.get(pref, []) + [k]
prefix_map[pref] = [*prefix_map.get(pref, []), k]

@@ -331,0 +332,0 @@ for k, v in prefix_map.items():

@@ -1,16 +0,17 @@

from typing import Sequence, Set, Dict, List, Iterator, Tuple, Callable
from copy import deepcopy
import functools
from itertools import product
import logging
from itertools import product
import functools
from copy import deepcopy
from typing import Callable, Dict, Iterator, List, Optional, Sequence, Set, Tuple
import ase
import numpy as np
import ase
from clease import tools
from clease.datastructures import FourVector, Figure, TransMatrix
from clease.datastructures import Figure, FourVector, TransMatrix
from .cluster import Cluster
from .cluster_fingerprint import ClusterFingerprint
from .cluster import Cluster
from .cluster_generator import ClusterGenerator
from .cluster_list import ClusterList
from .cluster_generator import ClusterGenerator
from .utils import name_clusters, size

@@ -33,3 +34,3 @@

def __init__(self, prim_cell: ase.Atoms, background_syms: Set[str] = None):
def __init__(self, prim_cell: ase.Atoms, background_syms: Optional[Set[str]] = None):
self._background_syms = background_syms or set()

@@ -392,3 +393,2 @@

"""Wrap FourVectors using the trivial shift+modulo operation"""
# pylint: disable=no-self-use
# We use the .tolist() method, faster to iterate the Python list than the NumPy array

@@ -405,3 +405,3 @@ # for building the subsequent FourVectors.

cell: np.ndarray,
cell_T_inv: np.ndarray = None,
cell_T_inv: Optional[np.ndarray] = None,
) -> Iterator[FourVector]:

@@ -408,0 +408,0 @@ """Generalized FourVector wrapping function."""

@@ -1,9 +0,12 @@

from typing import Sequence, Dict, Any, List
from functools import total_ordering
import numpy as np
from typing import Any, Dict, List, Sequence
from ase import Atoms
import attr
import numpy as np
from clease.datastructures import Figure
from clease.jsonio import jsonable, AttrSavable
from clease.jsonio import AttrSavable, jsonable
from clease.tools import equivalent_deco, list2str
from .cluster_fingerprint import ClusterFingerprint

@@ -41,3 +44,2 @@

"""Verify that we have the correct type in the "figures" field."""
# pylint: disable=unused-argument, no-self-use
for ii, v in enumerate(value):

@@ -44,0 +46,0 @@ if not isinstance(v, Figure):

@@ -0,5 +1,8 @@

from typing import Optional
from ase.db import connect
import matplotlib.pyplot as plt
import numpy as np
from scipy.spatial import ConvexHull as SciConvexHull
import matplotlib.pyplot as plt
from ase.db import connect
from clease.tools import invert_matrix

@@ -39,4 +42,2 @@

# pylint: disable=too-many-instance-attributes
def __init__(

@@ -48,3 +49,3 @@ self,

conc_scale=1.0,
conc_ranges: dict = None,
conc_ranges: Optional[dict] = None,
):

@@ -257,3 +258,2 @@ if conc_ranges is None:

"""
# pylint: disable=too-many-branches
# We only add the Convex Hull for the DFT

@@ -306,4 +306,2 @@ # data

c_hull = self.get_convex_hull(conc_var=elems[i])
# pylint cannot inspect scipy C things, false positive
# pylint: disable=no-member
for simpl in c_hull.simplices:

@@ -325,3 +323,2 @@ if self._is_lower_conv_hull(simpl):

"""Show all entries on the convex hull."""
# pylint: disable=import-outside-toplevel
from ase.gui.gui import GUI

@@ -332,4 +329,2 @@ from ase.gui.images import Images

indices = set()
# pylint cannot inspect scipy C things, false positive
# pylint: disable=no-member
for simplex in c_hull.simplices:

@@ -336,0 +331,0 @@ if self._is_lower_conv_hull(simplex):

"""Module for calculating correlation functions."""
from typing import Iterator, Tuple, Dict, Any
import logging
from typing import Any, Dict, Iterator, Tuple
from ase.atoms import Atoms
from clease_cxx import PyCEUpdater
from clease_cxx import PyCEUpdater
from . import db_util
from .settings import ClusterExpansionSettings
from .tools import wrap_and_sort_by_position
from . import db_util

@@ -11,0 +12,0 @@ logger = logging.getLogger(__name__)

@@ -0,11 +1,13 @@

from abc import ABC, abstractmethod
from collections import defaultdict
from itertools import combinations_with_replacement as cwr
import logging
from typing import Tuple, List, Dict, Set, Optional, Sequence
from collections import defaultdict
import sqlite3
from abc import ABC, abstractmethod
from itertools import combinations_with_replacement as cwr
from typing import Dict, List, Optional, Sequence, Set, Tuple
from ase.db import connect
import numpy as np
from ase.db import connect
from clease import db_util
from clease.tools import add_file_extension, sort_cf_names, get_ids
from clease.tools import add_file_extension, get_ids, sort_cf_names

@@ -745,3 +747,2 @@ logger = logging.getLogger(__name__)

"""
# pylint: disable=too-many-statements, too-many-locals
cf_getter = CorrelationFunctionGetter(

@@ -875,3 +876,2 @@ self.db_name, self.tab_name, self.cf_names, order=self.cf_order

for value, db_id in cur.fetchall():
# Extract only the ones that are present in the set of ids

@@ -878,0 +878,0 @@ if db_id in ids:

from typing import Tuple
import numpy as np

@@ -62,3 +63,2 @@

"""
# pylint: disable=no-self-use
std = np.std(X, axis=0, ddof=1)

@@ -80,3 +80,2 @@ return np.argwhere(std < tol)[:, 0]

"""
# pylint: disable=no-self-use
std = np.std(X, axis=0, ddof=1)

@@ -83,0 +82,0 @@ return np.argwhere(std >= tol)[:, 0]

@@ -1,3 +0,1 @@

# pylint: disable=undefined-variable
from .system_changes import *

@@ -4,0 +2,0 @@ from .four_vector import *

"""This module defines the "Figure" class, which is a collection of FourVector objects."""
from typing import Iterable, Any, Tuple
from typing import Any, Iterable, Optional, Tuple
import ase
import attr
import ase
import numpy as np
from clease.jsonio import jsonable, AttrSavable
from clease.jsonio import AttrSavable, jsonable
from .four_vector import FourVector

@@ -49,3 +52,2 @@

"""Perform a check that all elements in the components sequence are FourVector objects"""
# pylint: disable=unused-argument, no-self-use
# The signature of this function is dictated by attrs.

@@ -59,3 +61,5 @@ for ii, v in enumerate(value):

def to_cartesian(self, prim: ase.Atoms, transposed_cell: np.ndarray = None) -> np.ndarray:
def to_cartesian(
self, prim: ase.Atoms, transposed_cell: Optional[np.ndarray] = None
) -> np.ndarray:
"""Get the Figure in terms of the cartesian coordinates, as defined

@@ -80,3 +84,3 @@ by the primitive lattice.

def get_diameter(self, prim: ase.Atoms, transposed_cell: np.ndarray = None) -> float:
def get_diameter(self, prim: ase.Atoms, transposed_cell: Optional[np.ndarray] = None) -> float:
"""Calculate the diameter of the figure, as the maximum distance to the

@@ -83,0 +87,0 @@ geometric center of the figure in cartesian coordinates.

from __future__ import annotations
from collections import Counter
import copy
from collections import Counter
from itertools import product
from typing import NamedTuple, Tuple, List
from typing import List, NamedTuple, Optional, Tuple
from ase import Atoms
from ase.atoms import Cell
import attr
import numpy as np
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
from scipy.optimize import linear_sum_assignment
from ase.atoms import Cell
from ase import Atoms
import attr
from clease.jsonio import jsonable, AttrSavable
from clease.jsonio import AttrSavable, jsonable
__all__ = ("FourVector", "construct_four_vectors")

@@ -30,3 +33,3 @@

def to_cartesian(self, prim: Atoms, transposed_cell: np.ndarray = None) -> np.ndarray:
def to_cartesian(self, prim: Atoms, transposed_cell: Optional[np.ndarray] = None) -> np.ndarray:
"""Convert the four vector into cartesian coordinates

@@ -148,4 +151,4 @@

ranges = [range(bbox.lower[i], bbox.upper[i] + 1) for i in range(3)] + [range(len(prim))]
for ix, iy, iz, l in product(*ranges):
fv = FourVector(ix, iy, iz, l)
for ix, iy, iz, lattice in product(*ranges):
fv = FourVector(ix, iy, iz, lattice)
four_vecs.append(fv)

@@ -152,0 +155,0 @@ scaled_pos.append(fv.to_scaled(prim))

@@ -1,4 +0,7 @@

from typing import Dict, Any
from typing import Any, Dict
import attr
from clease.jsonio import AttrSavable, jsonable
from .system_changes import SystemChanges

@@ -5,0 +8,0 @@

@@ -0,6 +1,8 @@

import typing
from typing import Sequence, Tuple
import typing
import ase
from ase.data import atomic_numbers
import attr
from clease.jsonio import AttrSavable, jsonable

@@ -7,0 +9,0 @@

from __future__ import annotations
from typing import List, Dict
from typing import Dict, List
import attr

@@ -62,3 +64,2 @@ import numpy as np

def _validate_trans_matrix(self, attribute, value):
# pylint: disable=unused-argument, no-self-use
if not isinstance(value, list):

@@ -65,0 +66,0 @@ raise TypeError(f"Expected the trans matrix as a list, got {value!r}")

@@ -1,9 +0,11 @@

import logging
from collections import defaultdict
from typing import List, Dict, Set, Sequence, Union
from datetime import datetime
from packaging.version import parse, Version
import numpy as np
import logging
from typing import Dict, List, Sequence, Set, Union
import ase
from ase.db import connect
import numpy as np
from packaging.version import Version, parse
from .version import __version__ # Avoid triggering other imports

@@ -19,3 +21,3 @@

"""Toggle to disable/enable the "require_reconfigure_table" function."""
global REQUIRE_COMPATIBLE_TABLE_VERISON # pylint: disable=global-statement
global REQUIRE_COMPATIBLE_TABLE_VERISON
new = not REQUIRE_COMPATIBLE_TABLE_VERISON

@@ -68,3 +70,2 @@ logger.info(

table_name = MetaTableKeys.CLEASE_META_TABLE
# pylint: disable=protected-access
if not ase_connection._external_table_exists(table_name):

@@ -339,4 +340,4 @@ logger.debug("No metadata table was found")

db = connect(db_name)
ext_tab = db._get_external_table_names() # pylint: disable=protected-access
ext_tab = db._get_external_table_names()
cf_tables = [n for n in ext_tab if n.endswith("_cf")]
return cf_tables
"""Module that fits ECIs to energy data."""
# pylint: skip-file
import os
import sys
from collections import Counter, defaultdict
import json
import logging as lg
import multiprocessing as mp
from typing import Dict, List, Sequence, Optional
from collections import defaultdict, Counter
import os
import sys
from typing import Dict, List, Optional, Sequence
from ase.db import connect
from deprecated import deprecated
import numpy as np
from ase.db import connect
import threadpoolctl
from clease.cluster_coverage import ClusterCoverageChecker
from clease.data_manager import make_corr_func_data_manager
from clease.mp_logger import MultiprocessHandler
from clease.regression import LinearRegression
from clease.settings import ClusterExpansionSettings
from clease.regression import LinearRegression
from clease.mp_logger import MultiprocessHandler
from clease.tools import singlets2conc, get_ids, get_attribute
from clease.data_manager import make_corr_func_data_manager
from clease.cluster_coverage import ClusterCoverageChecker
from clease.tools import (
add_file_extension,
get_attribute,
get_diameter_from_cf_name,
get_ids,
get_size_from_cf_name,
singlets2conc,
sort_cf_names,
get_size_from_cf_name,
get_diameter_from_cf_name,
)

@@ -426,3 +427,4 @@

import matplotlib.pyplot as plt
from clease.interactive_plot import ShowStructureOnClick, AnnotatedAx
from clease.interactive_plot import AnnotatedAx, ShowStructureOnClick
import clease.plot_post_process as pp

@@ -689,3 +691,4 @@

# We need to limit NumPy's parallelization (and any other BLAS/OpenMP threading)
# as it'll spawn num_score * NUM_THREADS threads, which ultimately hurts the performance.
# as it'll spawn num_score * NUM_THREADS threads,
# which ultimately hurts the performance.
# We un-limit the threading again after the work is done.

@@ -909,4 +912,5 @@ with threadpoolctl.threadpool_limits(limits=1):

import matplotlib.pyplot as plt
from clease.interactive_plot import InteractivePlot, AnnotatedAx
from clease.interactive_plot import AnnotatedAx, InteractivePlot
if self.eci is None:

@@ -913,0 +917,0 @@ self.fit()

"""Module for tools pertaining to geometry of atoms and cells."""
from ase import Atoms
import numpy as np
from ase import Atoms

@@ -5,0 +5,0 @@ __all__ = ("max_sphere_dia_in_cell", "supercell_which_contains_sphere", "cell_wall_distances")

from typing import Dict, List
import numpy as np

@@ -177,3 +178,2 @@

"""
# pylint: disable=no-self-use
return value**power

@@ -1,4 +0,4 @@

from typing import Union, Sequence, Callable, List
from tkinter import TclError
import numpy as np
from typing import Callable, List, Sequence, Union
from ase.db import connect

@@ -8,2 +8,3 @@ from ase.gui.gui import GUI

import attr
import numpy as np

@@ -209,3 +210,2 @@

def on_click(self, event) -> None:
if not self.event_in_ax(event):

@@ -212,0 +212,0 @@ return

import json
from typing import Any, Dict
import attr
from ase.io import jsonio as aseio
from ase.utils import reader, writer
import attr

@@ -33,3 +34,2 @@ __all__ = ("encode", "decode", "read_json", "write_json", "jsonable")

def default(self, obj):
# pylint: disable=arguments-renamed
for encoder in self.encoders:

@@ -60,3 +60,2 @@ try:

# so it's not actually an issue.
# pylint: disable=import-outside-toplevel,cyclic-import
def create_clease_object(objtype, dct):

@@ -150,3 +149,3 @@ if objtype == "concentration":

obj = read_json(fd, **kwargs)
assert isinstance(obj, cls) # pylint: disable=isinstance-second-argument-not-valid-type
assert isinstance(obj, cls)
return obj

@@ -153,0 +152,0 @@

@@ -1,5 +0,5 @@

from typing import Optional, TextIO, Iterator
from contextlib import contextmanager
import logging
import sys
from contextlib import contextmanager
from typing import Iterator, Optional, TextIO

@@ -6,0 +6,0 @@ __all__ = ("log_stream", "log_stream_context", "get_root_clease_logger")

import logging
from matplotlib import pyplot as plt
import numpy as np
from matplotlib import pyplot as plt

@@ -19,3 +20,2 @@ logger = logging.getLogger(__name__)

"""
# pylint: disable=no-self-use
if len(atoms1) != len(atoms2):

@@ -66,3 +66,2 @@ return False

"""
# pylint: disable=no-self-use
dev = []

@@ -90,3 +89,2 @@ for s in swaps:

"""
# pylint: disable=no-self-use
return np.exp(-((x - mu) ** 2) / (2 * var)) / np.sqrt(2 * np.pi * var)

@@ -93,0 +91,0 @@

@@ -1,2 +0,1 @@

# pylint: disable=undefined-variable
from . import swap_move_index_tracker

@@ -3,0 +2,0 @@ from .mc_evaluator import *

@@ -0,4 +1,6 @@

from abc import ABC, abstractmethod
from typing import Dict, Union
from abc import ABC, abstractmethod
from ase import Atoms
from clease.datastructures import SystemChanges

@@ -5,0 +7,0 @@ from clease.montecarlo.mc_evaluator import MCEvaluator, construct_evaluator

@@ -0,5 +1,7 @@

from abc import ABC
from typing import Union
from abc import ABC
from ase import Atoms
from ase.units import kB
from .mc_evaluator import MCEvaluator, construct_evaluator

@@ -60,4 +62,4 @@

"""Set the internal temperature."""
self._temperature = value # pylint: disable=attribute-defined-outside-init
self.kT = value * kB # pylint: disable=attribute-defined-outside-init
self._temperature = value
self.kT = value * kB
self._on_temp_change()

@@ -64,0 +66,0 @@

@@ -1,6 +0,7 @@

# pylint: skip-file
# XXX: Some funny imports here. This file needs to be cleaned up some
from typing import Sequence
import numpy as np
from scipy.interpolate import interp1d
from clease.datastructures import SystemChange

@@ -33,3 +34,2 @@

# pylint: disable=no-self-use,unused-argument
def calculate_from_scratch(self, atoms):

@@ -36,0 +36,0 @@ """Calculate the bias potential from scratch.

from typing import Sequence
import numpy as np
from clease.datastructures import SystemChange
from .bias_potential import BiasPotential

@@ -5,0 +8,0 @@

import numpy as np
from clease.datastructures import SystemChanges
from .mc_constraint import MCConstraint

@@ -4,0 +6,0 @@

from typing import Sequence
from ase import Atoms
import numpy as np
from ase import Atoms
from clease.datastructures.system_changes import SystemChanges
from .mc_constraint import MCConstraint

@@ -6,0 +9,0 @@

from clease.datastructures.system_changes import SystemChanges
from .mc_constraint import MCConstraint

@@ -3,0 +4,0 @@

from typing import Sequence
from clease.datastructures import SystemChanges
from .mc_constraint import MCConstraint

@@ -4,0 +6,0 @@

from clease.datastructures import SystemChanges
from .mc_constraint import MCConstraint

@@ -3,0 +4,0 @@

from typing import Sequence
import numpy as np
from clease.datastructures import SystemChange
from .bias_potential import BiasPotential
# pylint: disable=too-many-instance-attributes
class GaussianKernelBiasPotential(BiasPotential):

@@ -39,3 +41,2 @@ """

def __init__(self, xmin=0.0, xmax=1.0, num_kernels=10, width=0.1, getter=None):
self.xmin = xmin

@@ -42,0 +43,0 @@ self.xmax = xmax

@@ -1,13 +0,16 @@

from typing import List, Tuple, Sequence, Union
import warnings
import logging
import random
import time
import random
from typing import List, Sequence, Tuple, Union
import warnings
from ase import Atoms
import numpy as np
from clease.datastructures import SystemChange, MCStep, SystemChanges
from .mc_evaluator import MCEvaluator
from clease.datastructures import MCStep, SystemChange, SystemChanges
from .barrier_models import BarrierModel
from .base import BaseMC
from .barrier_models import BarrierModel
from .kmc_events import KMCEventType
from .mc_evaluator import MCEvaluator
from .observers import MCObserver

@@ -133,3 +136,2 @@

"""
# pylint: disable=unused-argument
return self.attempt_freq

@@ -136,0 +138,0 @@

@@ -0,3 +1,4 @@

from abc import ABCMeta
from typing import List
from abc import ABCMeta
from ase import Atoms

@@ -4,0 +5,0 @@ from ase.neighborlist import neighbor_list

@@ -1,6 +0,8 @@

from typing import Union
import logging
from typing import Optional, Union
from ase import Atoms
from clease.calculator import Clease
from clease.datastructures import SystemChanges
from clease.calculator import Clease
from clease.settings.settings import ClusterExpansionSettings

@@ -29,3 +31,3 @@

def get_energy(self, applied_changes: SystemChanges = None) -> float:
def get_energy(self, applied_changes: Optional[SystemChanges] = None) -> float:
"""Evaluate the energy of a system.

@@ -47,3 +49,2 @@ If a change is sufficiently local/small, it there, in some situations,

"""
# pylint: disable=unused-argument
# system_changes are passed in (optionally) in order to allow for localized evaluations

@@ -81,3 +82,3 @@ # See discussion:

def keep_system_changes(self, system_changes: SystemChanges = None) -> None:
def keep_system_changes(self, system_changes: Optional[SystemChanges] = None) -> None:
"""A set of system changes are to be kept. Perform necessary actions to prepare

@@ -124,3 +125,3 @@ for a new evaluation."""

def get_energy(self, applied_changes: SystemChanges = None) -> float:
def get_energy(self, applied_changes: Optional[SystemChanges] = None) -> float:
return self.calc.get_energy()

@@ -156,3 +157,3 @@

def keep_system_changes(self, system_changes: SystemChanges = None) -> None:
def keep_system_changes(self, system_changes: Optional[SystemChanges] = None) -> None:
"""A set of system changes are to be kept. Perform necessary actions to prepare

@@ -159,0 +160,0 @@ for a new evaluation."""

@@ -0,7 +1,9 @@

from copy import deepcopy
import json
import logging
import time
import logging
import json
from copy import deepcopy
from ase.units import kB
import numpy as np
from ase.units import kB
from .constraints import CollectiveVariableConstraint

@@ -70,3 +72,2 @@

x = float(x)
# pylint: disable=broad-except
except Exception:

@@ -82,3 +83,2 @@ return False

x = float(x)
# pylint: disable=broad-except
except Exception:

@@ -184,3 +184,2 @@ return False

# pylint: disable=protected-access
self.mc._mc_step()

@@ -187,0 +186,0 @@ self.update()

"""Monte Carlo method for ase."""
from typing import Dict, Union, Iterator, Any
from collections import Counter
from datetime import datetime
import time
import logging
import math
import random
import math
from collections import Counter
import time
from typing import Any, Dict, Iterator, Optional, Union
from ase import Atoms
from ase.units import kB
from clease.datastructures import SystemChanges, MCStep
from .mc_evaluator import CEMCEvaluator, MCEvaluator
from clease.datastructures import MCStep, SystemChanges
from .averager import Averager
from .base import BaseMC
from .averager import Averager
from .bias_potential import BiasPotential
from .mc_evaluator import CEMCEvaluator, MCEvaluator
from .observers import MCObserver
from .trial_move_generator import TrialMoveGenerator, RandomSwap
from .trial_move_generator import RandomSwap, TrialMoveGenerator

@@ -22,3 +25,2 @@ logger = logging.getLogger(__name__)

# pylint: disable=too-many-instance-attributes
class Montecarlo(BaseMC):

@@ -46,3 +48,3 @@ """Class for running Monte Carlo at a fixed composition (canonical).

temp: float,
generator: TrialMoveGenerator = None,
generator: Optional[TrialMoveGenerator] = None,
):

@@ -49,0 +51,0 @@ # We cannot cause an energy calculation trigger in init,

@@ -1,2 +0,1 @@

# pylint: disable=undefined-variable
from .mc_observer import MCObserver

@@ -3,0 +2,0 @@ from .sgc_observer import SGCObserver

from typing import Dict
from clease.datastructures import SystemChanges

@@ -3,0 +4,0 @@ from clease.montecarlo.observers.mc_observer import MCObserver

@@ -1,3 +0,4 @@

from typing import Union
from pathlib import Path
from typing import Optional, Union
import numpy as np

@@ -18,3 +19,3 @@

def __init__(self, size: int = 1000, fname: Union[str, Path] = None):
def __init__(self, size: int = 1000, fname: Optional[Union[str, Path]] = None):
self._buffer = np.zeros(size)

@@ -21,0 +22,0 @@ self._next = 0

from typing import Dict
import ase
from clease.datastructures import MCStep, SystemChanges
from clease.montecarlo.averager import Averager
from .mc_observer import MCObserver

@@ -6,0 +9,0 @@

import numpy as np
from clease.datastructures import SystemChanges
from .mc_observer import MCObserver

@@ -4,0 +6,0 @@

import logging
import numpy as np
from clease.tools import add_file_extension
from .mc_observer import MCObserver

@@ -5,0 +8,0 @@

import time
import numpy as np
from .mc_observer import MCObserver

@@ -7,3 +9,2 @@

class EnergyPlotUpdater(MCObserver):
name = "EnergyPlotUpdater"

@@ -10,0 +11,0 @@

@@ -0,4 +1,6 @@

from pathlib import Path
from typing import List, Union
from pathlib import Path
import numpy as np
from .buffered_array import BufferedArray

@@ -5,0 +7,0 @@

import logging
import ase
import numpy as np
import ase
from clease.calculator import CleaseCacheCalculator
from clease.datastructures import MCStep
from clease.calculator import CleaseCacheCalculator
from .mc_observer import MCObserver

@@ -33,3 +36,2 @@

self.verbose = verbose
# Silence pylint
self.lowest_energy_cf = None

@@ -36,0 +38,0 @@ self.reset()

from abc import ABC
from ase import Atoms
from clease.datastructures import SystemChanges, MCStep
from clease.datastructures import MCStep, SystemChanges
class MCObserver(ABC):

@@ -38,3 +40,2 @@ """Base class for all MC observers.

"""Return averages in the form of a dictionary."""
# pylint: disable=no-self-use
return {}

@@ -56,3 +57,2 @@

"""
# pylint: disable=no-self-use, unused-argument
return True
from __future__ import annotations
from typing import Iterator, List
import ase
from clease.calculator import CleaseCacheCalculator
from clease.datastructures import MCStep
from clease.calculator import CleaseCacheCalculator
from .mc_observer import MCObserver

@@ -7,0 +11,0 @@

@@ -1,5 +0,8 @@

from typing import List, Dict, Tuple
from typing import Dict, List, Tuple
from ase.units import kB
import numpy as np
from clease.calculator import Clease
from .mc_observer import MCObserver

@@ -6,0 +9,0 @@

@@ -0,6 +1,8 @@

import attr
import numpy as np
import attr
from clease.calculator import Clease
from clease.datastructures.mc_step import MCStep
from clease.montecarlo.averager import Averager
from clease.datastructures.mc_step import MCStep
from .mc_observer import MCObserver

@@ -7,0 +9,0 @@

import numpy as np
from clease.datastructures import SystemChanges
from .mc_observer import MCObserver

@@ -4,0 +6,0 @@

from typing import Sequence
from ase.atoms import Atoms
from ase.io.trajectory import TrajectoryWriter
from clease.datastructures import SystemChange
from clease.tools import add_file_extension
from .mc_observer import MCObserver

@@ -7,0 +10,0 @@

import numpy as np
from .bias_potential import BiasPotential

@@ -3,0 +4,0 @@

@@ -1,10 +0,13 @@

from typing import Sequence, Dict, Any
import numpy as np
from typing import Any, Dict, Optional, Sequence
from ase import Atoms
from ase.units import kB
import numpy as np
from clease.calculator import Clease
from clease.settings import ClusterExpansionSettings
from .montecarlo import Montecarlo
from .observers import SGCObserver
from .montecarlo import Montecarlo
from .trial_move_generator import TrialMoveGenerator, RandomFlip
from .trial_move_generator import RandomFlip, TrialMoveGenerator

@@ -34,3 +37,3 @@

symbols: Sequence[str] = (),
generator: TrialMoveGenerator = None,
generator: Optional[TrialMoveGenerator] = None,
observe_singlets: bool = False,

@@ -164,3 +167,8 @@ ):

def run(self, steps: int = 10, call_observers: bool = True, chem_pot: Dict[str, float] = None):
def run(
self,
steps: int = 10,
call_observers: bool = True,
chem_pot: Optional[Dict[str, float]] = None,
):
"""

@@ -245,3 +253,2 @@ Run Monte Carlo simulation.

# Add singlets and chemical potential to the dictionary
# pylint: disable=consider-using-enumerate
singlets = avg_obs.singlets / N

@@ -265,3 +272,2 @@ singlets_sq = avg_obs.quantities["singlets_sq"] / N

averages.update(avg_conc)
# pylint: disable=broad-except
except Exception as exc:

@@ -268,0 +274,0 @@ print("Could not find average singlets!")

@@ -1,5 +0,6 @@

from typing import Sequence, List, Dict
import random
from typing import Dict, List, Optional, Sequence
import ase
import numpy as np
import ase

@@ -16,3 +17,3 @@

def __init__(self, atoms, indices: Sequence[int] = None):
def __init__(self, atoms, indices: Optional[Sequence[int]] = None):
self.tracker: Dict[str, List[int]] = {}

@@ -44,3 +45,3 @@ self.index_loc = None

def _init_tracker(self, atoms: ase.Atoms, indices: Sequence[int] = None) -> None:
def _init_tracker(self, atoms: ase.Atoms, indices: Optional[Sequence[int]] = None) -> None:
"""Initialize the tracker with the numbers."""

@@ -47,0 +48,0 @@ # Cache the unique symbols for faster access

@@ -1,10 +0,13 @@

from typing import Sequence, List, Set, Tuple, Dict
from abc import ABC, abstractmethod
import random
from random import choice
from abc import abstractmethod, ABC
import numpy as np
from typing import Dict, List, Optional, Sequence, Set, Tuple
from ase import Atoms
from ase.data import chemical_symbols
import numpy as np
from clease.datastructures import SystemChange
from clease.tools import flatten
from .constraints import MCConstraint

@@ -151,3 +154,5 @@ from .swap_move_index_tracker import SwapMoveIndexTracker

def __init__(self, symbols: Set[str], atoms: Atoms, indices: List[int] = None, **kwargs):
def __init__(
self, symbols: Set[str], atoms: Atoms, indices: Optional[List[int]] = None, **kwargs
):
super().__init__(**kwargs)

@@ -200,3 +205,3 @@ self.symbols = symbols

def __init__(self, atoms: Atoms, indices: List[int] = None, **kwargs):
def __init__(self, atoms: Atoms, indices: Optional[List[int]] = None, **kwargs):
super().__init__(**kwargs)

@@ -341,3 +346,3 @@ self.indices = indices

atoms: Atoms,
indices: Sequence[Sequence[int]] = None,
indices: Optional[Sequence[Sequence[int]]] = None,
**kwargs,

@@ -344,0 +349,0 @@ ):

"""Logger that can be used together with multiprocessing funcions."""
import logging as lg
import multiprocessing as mp
import threading
import multiprocessing as mp

@@ -38,3 +38,3 @@

break
except Exception as exc: # pylint: disable=broad-except
except Exception as exc:
print("An unexpected exception occured during logging.")

@@ -51,3 +51,3 @@ print(str(exc))

self.send(record)
except Exception: # pylint: disable=broad-except
except Exception:
self.handleError(record)

@@ -54,0 +54,0 @@

@@ -1,10 +0,14 @@

from typing import List, Tuple
from typing import List, Optional, Tuple
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.figure import Figure
from clease import Evaluate, ConvexHull
from clease import ConvexHull, Evaluate
from clease.evaluate import supports_alpha_cv
def plot_fit(evaluate: Evaluate, plot_args: dict = None, interactive: bool = False) -> Figure:
def plot_fit(
evaluate: Evaluate, plot_args: Optional[dict] = None, interactive: bool = False
) -> Figure:
"""

@@ -72,4 +76,3 @@ Figure object calculated (DFT) and predicted energies.

if interactive:
# pylint: disable=import-outside-toplevel
from clease.interactive_plot import ShowStructureOnClick, AnnotatedAx
from clease.interactive_plot import AnnotatedAx, ShowStructureOnClick

@@ -92,3 +95,3 @@ annotations = _make_annotations_plot_fit(evaluate)

def plot_fit_residual(
evaluate: Evaluate, plot_args: dict = None, interactive: bool = False
evaluate: Evaluate, plot_args: Optional[dict] = None, interactive: bool = False
) -> Figure:

@@ -136,4 +139,3 @@ """

if interactive:
# pylint: disable=import-outside-toplevel
from clease.interactive_plot import ShowStructureOnClick, AnnotatedAx
from clease.interactive_plot import AnnotatedAx, ShowStructureOnClick

@@ -157,3 +159,3 @@ annotations = _make_annotations_plot_fit(evaluate)

evaluate: Evaluate,
plot_args: dict = None,
plot_args: Optional[dict] = None,
ignore_sizes=(),

@@ -228,4 +230,3 @@ interactive: bool = False,

if interactive:
# pylint: disable=import-outside-toplevel
from clease.interactive_plot import InteractivePlot, AnnotatedAx
from clease.interactive_plot import AnnotatedAx, InteractivePlot

@@ -245,3 +246,3 @@ # Construct the annotated axis objects.

def plot_cv(evaluate: Evaluate, plot_args: dict = None) -> Figure:
def plot_cv(evaluate: Evaluate, plot_args: Optional[dict] = None) -> Figure:
"""

@@ -325,3 +326,2 @@ Figure object of CV values according to alpha values

# concentrations among the frames
# pylint: disable=protected-access
concs = {key: [] for key in cnv_hull._unique_elem}

@@ -345,4 +345,3 @@ for frame_conc in conc_per_frame:

if interactive:
# pylint: disable=import-outside-toplevel
from clease.interactive_plot import ShowStructureOnClick, AnnotatedAx
from clease.interactive_plot import AnnotatedAx, ShowStructureOnClick

@@ -349,0 +348,0 @@ ax_list = fig.get_axes()

@@ -1,2 +0,1 @@

# pylint: disable=undefined-variable
from .regression import *

@@ -3,0 +2,0 @@ from .generalized_ridge_regression import *

@@ -1,13 +0,15 @@

import time
from itertools import product
import json
import logging
from itertools import product
import time
from matplotlib import pyplot as plt
import numpy as np
from scipy.optimize import brentq
from scipy.special import polygamma
from scipy.optimize import brentq
from matplotlib import pyplot as plt
from clease.tools import invert_matrix
from .regression import LinearRegression
logger = logging.getLogger(__name__)

@@ -77,4 +79,2 @@

# pylint: disable=too-many-instance-attributes
def __init__(

@@ -95,3 +95,2 @@ self,

):
# pylint: disable=too-many-arguments
super().__init__()

@@ -426,3 +425,2 @@

"""
# pylint: disable=too-many-branches, too-many-statements
# XXX: Needs some cleaning

@@ -429,0 +427,0 @@ allowed_strategies = ["random", "max_increase"]

from typing import Tuple
import numpy as np
from .regression import LinearRegression

@@ -4,0 +6,0 @@

@@ -0,6 +1,9 @@

import logging
import os
import logging
from matplotlib import pyplot as plt
import numpy as np
from matplotlib import pyplot as plt
from clease.tools import aic, aicc, bic
from .regression import LinearRegression

@@ -19,3 +22,2 @@

# pylint: disable=too-many-instance-attributes
class GAFit:

@@ -195,3 +197,2 @@ """

"""Create a new generation."""
# pylint: disable=too-many-statements
new_generation = []

@@ -296,3 +297,3 @@ srt_indx = np.argsort(self.fitness)[::-1]

best_indx = np.argmax(self.fitness)
for i in range(len(self.individuals)): # pylint: disable=consider-using-enumerate
for i in range(len(self.individuals)):
if i == best_indx:

@@ -299,0 +300,0 @@ # Do not mutate the best individual

import logging
from typing import Optional
import numpy as np

@@ -43,3 +45,3 @@ from scipy.optimize import minimize

def __init__(self, alpha: np.ndarray = None) -> None:
def __init__(self, alpha: Optional[np.ndarray] = None) -> None:
super().__init__()

@@ -46,0 +48,0 @@ self.alpha = alpha

@@ -1,4 +0,4 @@

from typing import List, Sequence, Dict, Union, Callable, Tuple
import logging
import time
import logging
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union

@@ -10,5 +10,6 @@ import numpy as np

from clease.data_normalizer import DataNormalizer
from clease.tools import get_diameter_from_cf_name, split_dataset, get_size_from_cf_name
from clease.tools import get_diameter_from_cf_name, get_size_from_cf_name, split_dataset
from .constrained_ridge import ConstrainedRidge
from .regression import LinearRegression
from .constrained_ridge import ConstrainedRidge

@@ -81,3 +82,3 @@ logger = logging.getLogger(__name__)

normalize: bool = True,
cf_names: List[str] = None,
cf_names: Optional[List[str]] = None,
) -> None:

@@ -298,3 +299,2 @@ super().__init__()

"""
# pylint: disable=too-many-locals
best_param = None

@@ -301,0 +301,0 @@ best_cv = 0.0

"""Collection of classess to perform regression."""
from typing import Union, Optional, List
from typing import List, Optional, Union
import numpy as np
from numpy.linalg import pinv
from scipy.linalg import lstsq
from sklearn.linear_model import Lasso as skLasso
from scipy.linalg import lstsq
from clease.data_normalizer import DataNormalizer

@@ -56,11 +58,8 @@

def get_instance_array(self) -> List[object]:
# pylint: disable=no-self-use
return [LinearRegression()]
def is_scalar(self):
# pylint: disable=no-self-use
return False
def get_scalar_parameter(self): # pragma: no cover
# pylint: disable=no-self-use
raise ValueError("Fitting scheme is not described by a scalar parameter!")

@@ -173,3 +172,2 @@

) -> List[object]:
# pylint: disable=arguments-differ
if scale == "log":

@@ -227,3 +225,2 @@ alpha = np.logspace(

) -> List[object]:
# pylint: disable=arguments-differ
if scale == "log":

@@ -230,0 +227,0 @@ alpha = np.logspace(

import logging
import numpy as np
from .regression import LinearRegression, Tikhonov

@@ -4,0 +6,0 @@

@@ -1,3 +0,1 @@

# pylint: disable=undefined-variable
# For some reason, pylint detects a lot of cyclic imports here.
from .concentration import *

@@ -4,0 +2,0 @@ from . import template_filters

from __future__ import annotations
from typing import List, Sequence, Optional
from typing import List, Optional, Sequence
from ase import Atoms

@@ -17,3 +19,3 @@

def __init__(self, atoms: Atoms = None) -> None:
def __init__(self, atoms: Optional[Atoms] = None) -> None:
self.atoms = atoms

@@ -20,0 +22,0 @@

"""Class containing a manager for setting up concentrations of species."""
from collections import OrderedDict
from random import choice
import numpy as np
from scipy.optimize import minimize
from clease.jsonio import jsonable
from clease.tools import remove_redundant_constraints, remove_redundant_equations, invert_matrix
from clease.tools import remove_redundant_constraints, remove_redundant_equations

@@ -27,3 +29,2 @@ __all__ = ("Concentration",)

# pylint: disable=too-many-instance-attributes
@jsonable("concentration")

@@ -108,3 +109,2 @@ class Concentration:

self.num_concs = len([x for sub in self.basis_elements for x in sub])
self.fixed_element_constraint_added = False

@@ -320,3 +320,2 @@ if A_lb is not None:

"""Extract all the integers from a string."""
# pylint: disable=no-self-use,too-many-branches
if variable_range is None:

@@ -401,3 +400,2 @@ variable_symbols = []

"""
# pylint: disable=too-many-branches
if formulas is None or variable_range is None:

@@ -482,3 +480,2 @@ raise InvalidConstraintError("formula and variable range has to be provided!")

the passed variable."""
# pylint: disable=no-self-use
num_elements_with_variable = {k: 0 for k in variable_range.keys()}

@@ -524,3 +521,2 @@ for var in variable_range.keys():

"""Return the reference element for each variable."""
# pylint: disable=no-self-use
# reference element is the one that has its concentration specified

@@ -540,3 +536,2 @@ # with a clean representation (e.g., <x> or <y>)

"""Return index of the basis containing the passed varyable symbol."""
# pylint: disable=no-self-use
for basis, formula in enumerate(formulas):

@@ -573,3 +568,2 @@ if variable_symbol in formula:

"""
# pylint: disable=no-self-use
if not string[0].isdigit():

@@ -625,55 +619,2 @@ return 1

def _add_fixed_element_in_each_basis(self, nib=None):
"""Add constraints corresponding to the fixing elements in basis."""
if self.fixed_element_constraint_added:
return
indices = []
basis_of_index = []
start = 0
ranges = self.get_individual_comp_range()
min_range = 0.01
maxiter = 1000
self.orig_num_equality = self.A_eq.shape[0]
for i, basis in enumerate(self.basis_elements):
if self._is_linked_to_other_basis(i):
start += len(basis)
continue
iteration = 0
rng = 0.5 * min_range
while rng < min_range and iteration < maxiter:
iteration += 1
indx = choice(range(start, start + len(basis)))
rng = ranges[indx][1] - ranges[indx][0]
if iteration >= maxiter:
self.fixed_element_constraint_added = False
start += len(basis)
continue
indices.append(indx)
basis_of_index.append(i)
start += len(basis)
A = np.zeros((len(indices), self.num_concs))
b = np.zeros(len(indices))
for i, indx in enumerate(indices):
A[i, indx] = 1
rng = ranges[indx][1] - ranges[indx][0]
b[i] = np.random.rand() * rng + ranges[indx][0]
if nib is not None:
# Convert to closest rational number
n = nib[basis_of_index[i]]
b[i] = np.round((b[i] * n)) / n
# Add constraints
self.A_eq = np.vstack((self.A_eq, A))
self.b_eq = np.append(self.b_eq, b)
self.fixed_element_constraint_added = True
def _remove_fixed_element_in_each_basis_constraint(self):
"""Remove the last rows."""
if not self.fixed_element_constraint_added:
return
self.A_eq = self.A_eq[: self.orig_num_equality, :]
self.b_eq = self.b_eq[: self.orig_num_equality]
self.fixed_element_constraint_added = False
def get_individual_comp_range(self):

@@ -688,31 +629,44 @@ """Return the concentration range of each component."""

def get_random_concentration(self, nib=None) -> np.ndarray:
"""Generate a valid random concentration."""
def get_random_concentration(self, nib=None, sampling_attempts=200) -> np.ndarray:
"""Generate a valid random concentration.
Parameters:
sampling_attempts: int
Maximum number of attempts allowed to retrieve integer concentrations for
each element in the structure.
"""
assert self.A_eq.shape[0] == len(self.b_eq)
self._add_fixed_element_in_each_basis(nib=nib)
# Setup the constraints
constraints = self._get_constraints()
x0 = np.random.rand(self.num_concs)
pinv = invert_matrix(self.A_eq)
x_init = pinv.dot(self.b_eq)
# Find the closest vector to x0 that satisfies all constraints
opt_res = minimize(
objective_random,
x_init,
args=(x0,),
method="SLSQP",
jac=obj_jac_random,
constraints=constraints,
bounds=self.trivial_bounds,
)
self._remove_fixed_element_in_each_basis_constraint()
x = opt_res["x"]
for _ in range(sampling_attempts):
x_init = np.random.rand(self.num_concs)
x0 = np.random.rand(self.num_concs)
if not self.is_valid_conc(x):
msg = "Could not find valid concentration. Revise the constraints."
raise InvalidConcentrationError(msg)
return x
opt_res = minimize(
objective_random,
x_init,
args=(x0,),
method="SLSQP",
jac=obj_jac_random,
constraints=constraints,
bounds=self.trivial_bounds,
)
x = opt_res["x"]
if nib is None:
if self.is_valid_conc(x):
return x
else:
x_in_int = self.conc_in_int(nib, x)
x_from_int = self.to_float_conc(nib, x_in_int)
if self.is_valid_conc(x_from_int):
return x
msg = "Could not find valid concentration. Revise the constraints."
raise InvalidConcentrationError(msg)
def _get_interbasis_relations(self):

@@ -966,3 +920,2 @@ self._linked_basis = list(range(len(self.basis_elements)))

"""Jacobian of the equalitu constraint equation."""
# pylint: disable=unused-argument
return vec

@@ -979,3 +932,2 @@

"""Jacobian of the inequality constraint equations."""
# pylint: disable=unused-argument
return vec

@@ -10,3 +10,5 @@ """Definitions of Cluster Expansion settings for bulk.

from ase.spacegroup import crystal
from clease.tools import wrap_and_sort_by_position
from .concentration import Concentration

@@ -13,0 +15,0 @@ from .settings import ClusterExpansionSettings

@@ -1,6 +0,7 @@

from typing import Union, Tuple, Optional, Sequence
import numpy as np
from typing import Optional, Sequence, Tuple, Union
from ase import Atoms
from ase.build import surface
from ase.geometry import get_layers
import numpy as np

@@ -7,0 +8,0 @@ from clease.tools import make_supercell

@@ -7,23 +7,24 @@ """Definition of ClusterExpansionSettings Class.

from __future__ import annotations
from copy import deepcopy
import logging
from copy import deepcopy
from typing import List, Dict, Optional, Union, Sequence, Set, Any
from typing import Any, Dict, List, Optional, Sequence, Set, Union
import numpy as np
from ase import Atoms
from ase.db import connect
from ase.db.core import Database
import numpy as np
from clease.version import __version__
from clease.basis_function import BasisFunction, BinaryLinear, Polynomial, Trigonometric
from clease.cluster import Cluster, ClusterList, ClusterManager
from clease.datastructures import TransMatrix
from clease.jsonio import jsonable
from clease.tools import get_size_from_cf_name, wrap_and_sort_by_position
from clease.cluster import ClusterManager, ClusterList, Cluster
from clease.basis_function import Polynomial, Trigonometric, BinaryLinear, BasisFunction
from clease.datastructures import TransMatrix
from clease.version import __version__
from .atoms_manager import AtomsManager
from .concentration import Concentration
from .template_atoms import TemplateAtoms
from .template_filters import ValidConcentrationFilter
from .template_atoms import TemplateAtoms
from .atoms_manager import AtomsManager
__all__ = ("ClusterExpansionSettings",)

@@ -73,4 +74,2 @@

# pylint: disable=too-many-instance-attributes, too-many-public-methods
# Keys which are important for saving/loading

@@ -277,3 +276,2 @@ ARG_KEYS = ("prim_cell", "concentration")

def include_background_atoms(self, value: bool) -> None:
# pylint: disable=no-self-use
msg = "The include_background_atoms setter has been removed in version 0.11.3.\n"

@@ -587,3 +585,2 @@ msg += f"Please set 'include_background_atoms={value}' in the settings constructor, "

"""Display all clusters along with their names."""
# pylint: disable=import-outside-toplevel
from ase.gui.gui import GUI

@@ -633,3 +630,2 @@ from ase.gui.images import Images

"""
# pylint: disable=import-outside-toplevel
from ase.visualize import view

@@ -636,0 +632,0 @@

"""Class containing a manager for creating template atoms."""
from contextlib import contextmanager
from itertools import product
from typing import Iterator, List, Union, Optional
from contextlib import contextmanager
from typing import Iterator, List, Optional, Union
import ase
from ase.build.tools import niggli_reduce_cell
import numpy as np
from numpy.random import shuffle
import ase
from ase.build.tools import niggli_reduce_cell
from clease.tools import all_integer_transform_matrices, make_supercell
from .template_filters import (
AtomsFilter,
CellFilter,
AtomsFilter,
EquivalentCellsFilter,
SkewnessFilter,
EquivalentCellsFilter,
)

@@ -243,3 +246,3 @@

def iterate_all_templates(self, max_per_size: int = None) -> Iterator[ase.Atoms]:
def iterate_all_templates(self, max_per_size: Optional[int] = None) -> Iterator[ase.Atoms]:
"""Get all possible templates in an iterator.

@@ -246,0 +249,0 @@

from abc import ABC, abstractmethod
from itertools import product, permutations, combinations
from itertools import combinations, permutations, product
from ase import Atoms
import numpy as np
from ase import Atoms
from .concentration import (

@@ -64,3 +66,2 @@ Concentration,

"""Return the ratio between the maximum and the minimum diagonal."""
# pylint: disable=no-self-use
diag_lengths = []

@@ -92,3 +93,2 @@ for w in product([-1, 0, 1], repeat=3):

def _is_unitary(self, matrix):
# pylint: disable=no-self-use
return np.allclose(matrix.T.dot(matrix), np.identity(matrix.shape[0]))

@@ -141,7 +141,3 @@

try:
x = conc.get_random_concentration(nib=nib)
x_int = conc.conc_in_int(nib, x)
x_from_int = conc.to_float_conc(nib, x_int)
if not conc.is_valid_conc(x_from_int):
return False
conc.get_random_concentration(nib=nib)
except (

@@ -161,3 +157,2 @@ InvalidConcentrationError,

def _distance_between_facets(self, cell, span):
# pylint: disable=no-self-use
v1 = cell[span[0], :]

@@ -164,0 +159,0 @@ v2 = cell[span[1], :]

@@ -17,2 +17,2 @@ from .settings import ClusterExpansionSettings

"""
return ClusterExpansionSettings.load(fname) # pylint: disable=no-member
return ClusterExpansionSettings.load(fname)
from typing import List, Tuple
from matplotlib import pyplot as plt
import numpy as np
from matplotlib import pyplot as plt

@@ -6,0 +6,0 @@ from clease.regression import LinearRegression

@@ -1,2 +0,1 @@

# pylint: disable=undefined-variable
from .new_struct import *

@@ -3,0 +2,0 @@ from .structure_generator import *

"""Module for generating new structures for training."""
import os
from copy import deepcopy
from functools import reduce
from typing import List, Dict, Optional, Union, Any, Tuple
from itertools import product
import logging
from itertools import product
import os
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
from numpy.random import shuffle
from ase import Atoms

@@ -16,12 +13,13 @@ from ase.io import read

from ase.utils.structure_comparator import SymmetryEquivalenceCheck
import numpy as np
from numpy.random import shuffle
from clease import db_util
from clease.settings import ClusterExpansionSettings
from clease.corr_func import CorrFunction
from clease.montecarlo import TooFewElementsError
from clease.tools import wrap_and_sort_by_position, nested_list2str
from clease.tools import count_atoms
from .structure_generator import ProbeStructure, GSStructure, MetropolisTrajectory
from clease.settings import ClusterExpansionSettings
from clease.tools import count_atoms, nested_list2str, wrap_and_sort_by_position
from .structure_generator import GSStructure, MetropolisTrajectory, ProbeStructure
try:

@@ -67,3 +65,3 @@ from math import gcd

settings: ClusterExpansionSettings,
generation_number: int = None,
generation_number: Optional[int] = None,
struct_per_gen: int = 5,

@@ -488,3 +486,2 @@ check_db: bool = True,

# pylint: disable=too-many-branches
def _set_initial_structures(

@@ -639,2 +636,4 @@ self, atoms: Union[Atoms, List[Atoms]], random_composition: bool = False

conc = self.settings.concentration
nib = [len(indices) for indices in self.settings.index_by_basis]
if conc_type == "min":

@@ -645,8 +644,6 @@ x = conc.get_conc_min_component(index)

else:
nib = [len(x) for x in self.settings.index_by_basis]
x = conc.get_random_concentration(nib=nib)
num_atoms_in_basis = [len(indices) for indices in self.settings.index_by_basis]
num_to_insert = conc.conc_in_int(num_atoms_in_basis, x)
atoms = self._random_struct_at_conc(num_to_insert)
x_in_int = conc.conc_in_int(nib, x)
atoms = self._random_struct_at_conc(x_in_int)

@@ -838,3 +835,3 @@ if conc_type in ["min", "max"]:

def _get_kvp(self, formula_unit: str = None) -> Dict:
def _get_kvp(self, formula_unit: Optional[str] = None) -> Dict:
"""

@@ -909,3 +906,3 @@ Create a dictionary of key-value pairs and return it.

all_nums += [v for k, v in new_count.items()]
# pylint: disable=unnecessary-lambda
gcdp = reduce(lambda x, y: gcd(x, y), all_nums)

@@ -912,0 +909,0 @@ fu = ""

"""Module for generating new structures."""
from abc import ABC
from copy import deepcopy
import logging
import math
import os
import math
import time
import logging
from copy import deepcopy
from abc import ABC
from ase.db import connect
from ase.io.trajectory import TrajectoryReader
import numpy as np
from numpy.linalg import inv, pinv
from numpy.random import choice
from numpy.linalg import inv, pinv
from ase.db import connect
from ase.io.trajectory import TrajectoryReader
from clease.settings import ClusterExpansionSettings
from clease.calculator import Clease
from clease.corr_func import CorrFunction
from clease.tools import wrap_and_sort_by_position
from clease.calculator import Clease
from clease.montecarlo import Montecarlo, TooFewElementsError
from clease.montecarlo.constraints import ConstrainSwapByBasis
from clease.montecarlo.observers import LowestEnergyStructure, Snapshot
from clease.montecarlo.constraints import ConstrainSwapByBasis
from clease.settings import ClusterExpansionSettings
from clease.tools import wrap_and_sort_by_position

@@ -34,3 +33,2 @@ logger = logging.getLogger(__name__)

# pylint: disable=too-many-instance-attributes
class StructureGenerator(ABC):

@@ -173,7 +171,5 @@ """Base class for generating new strctures."""

"""Determine if we accept the last change."""
# pylint: disable=no-self-use
return True
def _estimate_temp_range(self):
# pylint: disable=no-self-use
return 1.0, 1e5

@@ -362,3 +358,2 @@

):
StructureGenerator.__init__(

@@ -365,0 +360,0 @@ self, settings, atoms, init_temp, final_temp, num_temp, num_steps_per_temp

from typing import Tuple
import spglib
from ase.atoms import Atoms, Cell
from ase.geometry import find_mic
import numpy as np
from scipy.optimize import linear_sum_assignment
from ase.atoms import Atoms, Cell
from ase.geometry import find_mic
import spglib

@@ -74,3 +75,2 @@ __all__ = ("TransformInfo", "StructureMapper")

"""
# pylint: disable=no-self-use
P = cell2.dot(np.linalg.inv(cell1))

@@ -77,0 +77,0 @@ return 0.5 * (P.T.dot(P) - np.eye(3))

@@ -1,14 +0,10 @@

# pylint: disable=too-many-lines
"""A collection of miscellaneous functions used for Cluster Expansion."""
import re
from collections.abc import Iterable
from itertools import chain, combinations, filterfalse, permutations, product
import logging
from pathlib import Path
from itertools import permutations, combinations, product, filterfalse, chain
from collections.abc import Iterable
from typing import List, Optional, Tuple, Dict, Set, Sequence, Union
import re
from typing import Dict, List, Optional, Sequence, Set, Tuple, Union
from typing import Iterable as tIterable
from typing_extensions import Protocol
from packaging.version import Version, parse
import numpy as np
from numpy.random import sample, shuffle
import ase

@@ -18,4 +14,8 @@ import ase.build.supercells as ase_sc

from ase.db.core import parse_selection
import numpy as np
from numpy.random import sample, shuffle
from packaging.version import Version, parse
from scipy.optimize import linprog
from scipy.spatial import cKDTree as KDTree
from scipy.optimize import linprog
from typing_extensions import Protocol

@@ -177,4 +177,4 @@ ASE_VERSION = parse(ase.__version__)

db_name=None,
custom_kvp_init: dict = None,
custom_kvp_final: dict = None,
custom_kvp_init: Optional[dict] = None,
custom_kvp_final: Optional[dict] = None,
):

@@ -275,3 +275,2 @@ """Update the database.

"""
# pylint: disable=import-outside-toplevel, cyclic-import
from clease.corr_func import CorrFunction

@@ -937,2 +936,3 @@ from clease.settings import ClusterExpansionSettings

"""
# Helper sorting function to define the order of the sorting.

@@ -1037,4 +1037,4 @@ def _sort_ordering(name: str):

d: float,
A_eq: np.ndarray = None,
b_eq: np.ndarray = None,
A_eq: Optional[np.ndarray] = None,
b_eq: Optional[np.ndarray] = None,
) -> bool:

@@ -1073,3 +1073,6 @@ """

def remove_redundant_constraints(
A_lb: np.ndarray, b_lb: np.ndarray, A_eq: np.ndarray = None, b_eq: np.ndarray = None
A_lb: np.ndarray,
b_lb: np.ndarray,
A_eq: Optional[np.ndarray] = None,
b_eq: Optional[np.ndarray] = None,
) -> Tuple[np.ndarray, np.ndarray]:

@@ -1284,3 +1287,3 @@ """

center=(0.5, 0.5, 0.5),
cell_T_inv: np.ndarray = None,
cell_T_inv: Optional[np.ndarray] = None,
eps: float = 1e-7,

@@ -1287,0 +1290,0 @@ ) -> np.ndarray:

from pathlib import Path
from packaging.version import parse

@@ -3,0 +4,0 @@

@@ -7,2 +7,7 @@ .. _releasenotes:

1.1.0
======
* Pinned `numpy < 2`
* Fixed an issue with sampling concentrations.
1.0.6

@@ -9,0 +14,0 @@ ======

Metadata-Version: 2.1
Name: clease
Version: 1.0.6
Version: 1.1.0
Summary: CLuster Expansion in Atomistic Simulation Environment

@@ -22,8 +22,59 @@ Home-page: https://gitlab.com/computationalmaterials/clease/

Description-Content-Type: text/markdown
License-File: LICENSE.md
Requires-Dist: ase>=3.22
Requires-Dist: numpy<2
Requires-Dist: cython
Requires-Dist: matplotlib
Requires-Dist: spglib
Requires-Dist: scikit-learn
Requires-Dist: typing_extensions
Requires-Dist: Deprecated
Requires-Dist: click>=8.0.0
Requires-Dist: attrs>=21.4.0
Requires-Dist: scipy>=1.7.0
Requires-Dist: packaging
Requires-Dist: threadpoolctl
Provides-Extra: doc
Requires-Dist: sphinx; extra == "doc"
Requires-Dist: sphinx_rtd_theme; extra == "doc"
Provides-Extra: test
Requires-Dist: pytest; extra == "test"
Requires-Dist: pytest-mock; extra == "test"
Requires-Dist: mock; extra == "test"
Requires-Dist: pytest-benchmark[histogram]>=3.4.1; extra == "test"
Requires-Dist: tox>=4; extra == "test"
Provides-Extra: dev
Requires-Dist: pip; extra == "dev"
Requires-Dist: cython; extra == "dev"
Requires-Dist: pre-commit; extra == "dev"
Requires-Dist: ipython; extra == "dev"
Requires-Dist: twine; extra == "dev"
Requires-Dist: black>=22.1.0; extra == "dev"
Requires-Dist: clang-format>=14.0.3; extra == "dev"
Requires-Dist: ruff; extra == "dev"
Requires-Dist: pyclean>=2.0.0; extra == "dev"
Requires-Dist: pytest-cov; extra == "dev"
Requires-Dist: build; extra == "dev"
Provides-Extra: gui
Requires-Dist: clease-gui; extra == "gui"
Provides-Extra: all
License-File: LICENSE.md
Requires-Dist: black>=22.1.0; extra == "all"
Requires-Dist: mock; extra == "all"
Requires-Dist: twine; extra == "all"
Requires-Dist: ruff; extra == "all"
Requires-Dist: sphinx_rtd_theme; extra == "all"
Requires-Dist: pre-commit; extra == "all"
Requires-Dist: pyclean>=2.0.0; extra == "all"
Requires-Dist: pip; extra == "all"
Requires-Dist: pytest-cov; extra == "all"
Requires-Dist: clease-gui; extra == "all"
Requires-Dist: build; extra == "all"
Requires-Dist: sphinx; extra == "all"
Requires-Dist: pytest-mock; extra == "all"
Requires-Dist: pytest; extra == "all"
Requires-Dist: cython; extra == "all"
Requires-Dist: ipython; extra == "all"
Requires-Dist: clang-format>=14.0.3; extra == "all"
Requires-Dist: tox>=4; extra == "all"
Requires-Dist: pytest-benchmark[histogram]>=3.4.1; extra == "all"

@@ -30,0 +81,0 @@ # CLEASE

[build-system]
# setuptools 61.0 broke, see !488
requires = ["setuptools >=58, !=61.0", "wheel", "Cython", "numpy"]
requires = ["setuptools >=58, !=61.0", "wheel", "Cython", "numpy<2"]
build-backend = "setuptools.build_meta"

@@ -9,1 +9,12 @@

target-version = ["py37"]
[tool.ruff]
select = ["E", "F", "NPY", "RUF", "I"]
exclude = ["tests", "__init__.py"]
ignore = ["NPY002", "RUF012"]
# Same as Black.
line-length = 100
target-version = "py37"
[tool.ruff.isort]
force-sort-within-sections = true

@@ -35,3 +35,3 @@ [metadata]

ase>=3.22
numpy
numpy<2
cython

@@ -38,0 +38,0 @@ matplotlib

import os
from pathlib import Path
import sysconfig
from setuptools import setup, Extension
from setuptools import Extension, setup
def src_folder():

@@ -127,3 +128,3 @@ candidates = ["cxx/src/", "clease/", "./"]

"clang-format>=14.0.3", # c++ style formatting
"pylint",
"ruff",
"pyclean>=2.0.0", # For removing __pycache__ and .pyc files

@@ -130,0 +131,0 @@ "pytest-cov",

Sorry, the diff of this file is too big to display