Latest Threat Research:SANDWORM_MODE: Shai-Hulud-Style npm Worm Hijacks CI Workflows and Poisons AI Toolchains.Details
Socket
Book a DemoInstallSign in
Socket

asdf

Package Overview
Dependencies
Maintainers
8
Versions
74
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

asdf - npm Package Compare versions

Comparing version
4.2.0
to
4.3.0
+219
asdf/_dump.py
from io import BytesIO
from asdf._asdf import AsdfFile, open_asdf
from asdf.util import NotSet
__all__ = ["dump", "dumps", "load", "loads"]
def dump(
tree,
fp,
*,
version=None,
extensions=None,
all_array_storage=NotSet,
all_array_compression=NotSet,
compression_kwargs=NotSet,
pad_blocks=False,
custom_schema=None,
):
"""
Write a tree to an ASDF file.
Parameters
----------
tree : object
The tree to dump.
fp : str or file-like object
A file-like object to write the ASDF data to.
version : str, optional
Version of the ASDF Standard to use. If not specified, the default
version will be used.
extensions : object, optional
Additional extensions to use when reading and writing the file.
May be an `asdf.extension.Extension` or a `list` of extensions.
all_array_storage : string, optional
If provided, override the array storage type of all blocks.
all_array_compression : string, optional
If provided, override the array compression type of all blocks.
compression_kwargs : dict, optional
If provided, override the compression parameters of all blocks.
pad_blocks : bool, optional
If provided, pad all blocks to the nearest multiple of the block size.
custom_schema : str, optional
Path to a custom schema file that will be used for a secondary
validation pass. This can be used to ensure that particular ASDF
files follow custom conventions beyond those enforced by the
standard.
"""
AsdfFile(tree, custom_schema=custom_schema, extensions=extensions).write_to(
fp,
version=version,
all_array_storage=all_array_storage,
all_array_compression=all_array_compression,
compression_kwargs=compression_kwargs,
pad_blocks=pad_blocks,
)
def dumps(
tree,
*,
version=None,
extensions=None,
all_array_storage=NotSet,
all_array_compression=NotSet,
compression_kwargs=NotSet,
pad_blocks=False,
custom_schema=None,
):
"""
Write tree to a string.
Parameters
----------
tree : object
The tree to dump.
version : str, optional
Version of the ASDF Standard to use. If not specified, the default
version will be used.
extensions : object, optional
Additional extensions to use when reading and writing the file.
May be an `asdf.extension.Extension` or a `list` of extensions.
all_array_storage : string, optional
If provided, override the array storage type of all blocks.
all_array_compression : string, optional
If provided, override the array compression type of all blocks.
compression_kwargs : dict, optional
If provided, override the compression parameters of all blocks.
pad_blocks : bool, optional
If provided, pad all blocks to the nearest multiple of the block size.
custom_schema : str, optional
Path to a custom schema file that will be used for a secondary
validation pass. This can be used to ensure that particular ASDF
files follow custom conventions beyond those enforced by the
standard.
Returns
-------
str
The ASDF data as a string.
"""
buff = BytesIO()
dump(
tree,
buff,
version=version,
extensions=extensions,
all_array_storage=all_array_storage,
all_array_compression=all_array_compression,
compression_kwargs=compression_kwargs,
pad_blocks=pad_blocks,
custom_schema=custom_schema,
)
return buff.getvalue()
def load(fp, *, uri=None, validate_checksums=False, extensions=None, custom_schema=None):
"""
Load the ASDF tree from a file-like object.
Parameters
----------
fp : str or file-like object
A file-like object to read the ASDF data from.
uri : str, optional
The URI for this ASDF file. Used to resolve relative
references against. If not provided, will be
automatically determined from the associated file object,
if possible and if created from `asdf.open`.
validate_checksums : bool, optional
If `True`, validate the blocks against their checksums.
extensions : object, optional
Additional extensions to use when reading and writing the file.
May be an `asdf.extension.Extension` or a `list` of extensions.
custom_schema : str, optional
Path to a custom schema file that will be used for a secondary
validation pass. This can be used to ensure that particular ASDF
files follow custom conventions beyond those enforced by the
standard.
Returns
-------
object:
The ASDF tree.
"""
with open_asdf(
fp,
lazy_load=False,
memmap=False,
lazy_tree=False,
uri=uri,
validate_checksums=validate_checksums,
extensions=extensions,
custom_schema=custom_schema,
) as af:
return af.tree
def loads(asdf_string, *, uri=None, validate_checksums=False, extensions=None, custom_schema=None):
"""
Load the ASDF tree from a string..
Parameters
----------
asdf_string : str
A string containing ASDF data.
uri : str, optional
The URI for this ASDF file. Used to resolve relative
references against. If not provided, will be
automatically determined from the associated file object,
if possible and if created from `asdf.open`.
validate_checksums : bool, optional
If `True`, validate the blocks against their checksums.
extensions : object, optional
Additional extensions to use when reading and writing the file.
May be an `asdf.extension.Extension` or a `list` of extensions.
custom_schema : str, optional
Path to a custom schema file that will be used for a secondary
validation pass. This can be used to ensure that particular ASDF
files follow custom conventions beyond those enforced by the
standard.
Returns
-------
object:
The ASDF tree.
"""
return load(
BytesIO(asdf_string),
uri=uri,
validate_checksums=validate_checksums,
extensions=extensions,
custom_schema=custom_schema,
)
from contextlib import nullcontext
import pytest
import asdf
from asdf.exceptions import ValidationError
# test cases with:
# - custom schema path
# - tree
# - expected error (or None)
TEST_CASES = [
(
"custom_schema.yaml",
{"stuff": 42, "other_stuff": "hello"},
".* is a required property",
),
(
"custom_schema.yaml",
{"foo": {"x": 42, "y": 10}, "bar": {"a": "hello", "b": "banjo"}},
None,
),
(
"custom_schema_definitions.yaml",
{"forb": {"biz": "hello", "baz": "world"}},
".* is a required property",
),
(
"custom_schema_definitions.yaml",
{"thing": {"biz": "hello", "baz": "world"}},
None,
),
(
"custom_schema_external_ref.yaml",
{"foo": asdf.tags.core.Software(name="Microsoft Windows", version="95")},
None,
),
(
"custom_schema_external_ref.yaml",
{"foo": False},
"False is not valid under any of the given schemas",
),
]
@pytest.fixture(params=[lambda x: x, str])
def as_pathlib(request):
"""
Fixture to test both pathlib.Path and str.
"""
return request.param
@pytest.fixture
def schema_name(request, test_data_path, as_pathlib):
"""
Fixture to convert the provided schema name to a path.
"""
return as_pathlib(test_data_path / request.param)
@pytest.mark.parametrize(
"schema_name, tree, expected_error",
TEST_CASES,
indirect=["schema_name"],
)
def test_custom_validation_write_to(tmp_path, schema_name, tree, expected_error):
asdf_file = tmp_path / "out.asdf"
ctx = pytest.raises(ValidationError, match=expected_error) if expected_error else nullcontext()
with ctx:
asdf.AsdfFile(tree, custom_schema=schema_name).write_to(asdf_file)
@pytest.mark.parametrize(
"schema_name, tree, expected_error",
TEST_CASES,
indirect=["schema_name"],
)
def test_custom_validation_open(tmp_path, schema_name, tree, expected_error):
asdf_file = tmp_path / "out.asdf"
asdf.AsdfFile(tree).write_to(asdf_file)
ctx = pytest.raises(ValidationError, match=expected_error) if expected_error else nullcontext()
with ctx, asdf.open(asdf_file, custom_schema=schema_name):
pass
@pytest.mark.parametrize(
"schema_name, tree, expected_error",
TEST_CASES,
indirect=["schema_name"],
)
def test_custom_validation_validate(schema_name, tree, expected_error):
ctx = pytest.raises(ValidationError, match=expected_error) if expected_error else nullcontext()
with ctx:
asdf.AsdfFile(tree, custom_schema=schema_name).validate()
@pytest.mark.parametrize(
"schema_name, tree, expected_error",
TEST_CASES,
indirect=["schema_name"],
)
def test_custom_validation_dumps(schema_name, tree, expected_error):
ctx = pytest.raises(ValidationError, match=expected_error) if expected_error else nullcontext()
with ctx:
asdf.dumps(tree, custom_schema=schema_name)
@pytest.mark.parametrize(
"schema_name, tree, expected_error",
TEST_CASES,
indirect=["schema_name"],
)
def test_custom_validation_loads(schema_name, tree, expected_error):
contents = asdf.dumps(tree)
ctx = pytest.raises(ValidationError, match=expected_error) if expected_error else nullcontext()
with ctx:
asdf.loads(contents, custom_schema=schema_name)
@pytest.mark.parametrize(
"schema_name, tree, expected_error",
TEST_CASES,
indirect=["schema_name"],
)
def test_custom_validation_dump(tmp_path, schema_name, tree, expected_error):
asdf_file = tmp_path / "out.asdf"
ctx = pytest.raises(ValidationError, match=expected_error) if expected_error else nullcontext()
with ctx:
asdf.dump(tree, asdf_file, custom_schema=schema_name)
@pytest.mark.parametrize(
"schema_name, tree, expected_error",
TEST_CASES,
indirect=["schema_name"],
)
def test_custom_validation_load(tmp_path, schema_name, tree, expected_error):
asdf_file = tmp_path / "out.asdf"
asdf.AsdfFile(tree).write_to(asdf_file)
ctx = pytest.raises(ValidationError, match=expected_error) if expected_error else nullcontext()
with ctx:
asdf.load(asdf_file, custom_schema=schema_name)
"""
Test dump, dumps, load and loads
These are thin wrappers around AsdfFile
so the tests here will be relatively simple.
"""
from asdf import dump, dumps, load, loads
from asdf._tests._helpers import assert_tree_match
def test_roundtrip(tmp_path, tree):
fn = tmp_path / "test.asdf"
dump(tree, fn)
rt = load(fn)
assert_tree_match(tree, rt)
def test_str_roundtrip(tree):
rt = loads(dumps(tree))
assert_tree_match(tree, rt)
+1
-1

@@ -11,3 +11,3 @@ name: build

build:
uses: OpenAstronomy/github-actions-workflows/.github/workflows/publish_pure_python.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/publish_pure_python.yml@v2
with:

@@ -14,0 +14,0 @@ upload_to_pypi: ${{ (github.event_name == 'release') && (github.event.action == 'released') }}

@@ -39,3 +39,3 @@ name: CI

needs: [pre-commit]
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
with:

@@ -64,3 +64,3 @@ submodules: false

jsonschema:
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
if: (github.repository == 'asdf-format/asdf' && (github.event_name == 'schedule' || github.event_name == 'push' || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'jsonschema')))

@@ -76,3 +76,3 @@ with:

needs: [core]
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
with:

@@ -88,3 +88,3 @@ submodules: false

needs: [core]
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
with:

@@ -100,3 +100,3 @@ submodules: false

needs: [core]
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
with:

@@ -107,12 +107,15 @@ submodules: false

envs: |
- linux: py310-devdeps-parallel
- linux: py311-devdeps-parallel
- linux: py312-devdeps-parallel
- linux: py313-devdeps-parallel
- linux: py310-coverage-devdeps-parallel
- linux: py311-coverage-devdeps-parallel
- linux: py312-coverage-devdeps-parallel
- linux: py313-coverage-devdeps-parallel
- linux: py314-coverage-devdeps-parallel
python-version: '3.14-dev'
# separate pytest so a failure here doesn't cause the whole suite to fail
- linux: py311-pytestdev-parallel
- linux: py311-coverage-pytestdev-parallel
coverage: codecov
oldest:
needs: [core]
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
with:

@@ -127,3 +130,3 @@ submodules: false

needs: [core]
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
with:

@@ -138,3 +141,3 @@ submodules: false

needs: [core]
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
with:

@@ -141,0 +144,0 @@ submodules: false

@@ -27,3 +27,3 @@ name: Downstream

asdf:
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
if: (github.repository == 'asdf-format/asdf' && (github.event_name == 'schedule' || github.event_name == 'push' || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'Downstream CI')))

@@ -41,3 +41,3 @@ with:

astropy:
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
if: (github.repository == 'asdf-format/asdf' && (github.event_name == 'schedule' || github.event_name == 'push' || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'Downstream CI')))

@@ -53,3 +53,3 @@ with:

stsci:
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
if: (github.repository == 'asdf-format/asdf' && (github.event_name == 'schedule' || github.event_name == 'push' || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'Downstream CI')))

@@ -69,3 +69,3 @@ with:

third-party:
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v1
uses: OpenAstronomy/github-actions-workflows/.github/workflows/tox.yml@v2
if: (github.repository == 'asdf-format/asdf' && (github.event_name == 'schedule' || github.event_name == 'push' || github.event_name == 'workflow_dispatch' || contains(github.event.pull_request.labels.*.name, 'Downstream CI')))

@@ -72,0 +72,0 @@ with:

Metadata-Version: 2.4
Name: asdf
Version: 4.2.0
Version: 4.3.0
Summary: Python implementation of the ASDF Standard

@@ -5,0 +5,0 @@ Author-email: The ASDF Developers <help@stsci.edu>

@@ -25,2 +25,3 @@ CHANGES.rst

asdf/_display.py
asdf/_dump.py
asdf/_entry_points.py

@@ -709,3 +710,5 @@ asdf/_helpers.py

asdf/_tests/test_config.py
asdf/_tests/test_custom_schema.py
asdf/_tests/test_deprecated.py
asdf/_tests/test_dump.py
asdf/_tests/test_entry_points.py

@@ -712,0 +715,0 @@ asdf/_tests/test_extension.py

@@ -14,4 +14,8 @@ """

"config_context",
"dump",
"dumps",
"get_config",
"info",
"load",
"loads",
"open",

@@ -24,2 +28,3 @@ ]

from ._convenience import info
from ._dump import dump, dumps, load, loads
from ._version import version as __version__

@@ -26,0 +31,0 @@ from .config import config_context, get_config

@@ -578,3 +578,3 @@ import copy

def _validate(self, tree, custom=True, reading=False):
def _validate(self, tree, reading=False):
with self._blocks.options_context():

@@ -586,5 +586,2 @@ # If we're validating on read then the tree

schema.validate(tagged_tree, self, reading=reading)
# Perform secondary validation pass if requested
if custom and self._custom_schema:
schema.validate(tagged_tree, self, self._custom_schema, reading=reading)

@@ -1374,3 +1371,3 @@ def validate(self):

def schema_info(self, key="description", path=None, preserve_list=True, refresh_extension_manager=False):
def schema_info(self, key="description", path=None, preserve_list=True, refresh_extension_manager=NotSet):
"""

@@ -1399,2 +1396,4 @@ Get a nested dictionary of the schema information for a given key, relative to the path.

"""
if refresh_extension_manager is not NotSet:
warnings.warn("refresh_extension_manager is deprecated", DeprecationWarning)

@@ -1422,3 +1421,3 @@ if isinstance(path, AsdfSearchResult):

show_values=display.DEFAULT_SHOW_VALUES,
refresh_extension_manager=False,
refresh_extension_manager=NotSet,
):

@@ -1448,2 +1447,4 @@ """

"""
if refresh_extension_manager is not NotSet:
warnings.warn("refresh_extension_manager is deprecated", DeprecationWarning)

@@ -1450,0 +1451,0 @@ lines = display.render_tree(

@@ -34,2 +34,7 @@ """

resolved_uri = generic_io.resolve_uri(base_uri, uri)
# if the uri only has a trailing "#" fragment, strip it
# this deals with python 3.14 changes where in prior versions
# urljoin removed this type of fragment
if resolved_uri.endswith("#"):
resolved_uri = resolved_uri[:-1]
if resolved_uri == "" or resolved_uri == base_uri:

@@ -36,0 +41,0 @@ return UseInternal

@@ -146,2 +146,4 @@ import numpy as np

ctx._blocks._set_array_storage(instance, "inline")
if not ctx._blocks._lazy_load:
return instance._make_array()
return instance

@@ -189,3 +191,3 @@

if not ctx._blocks._lazy_load:
instance._make_array()
return instance._make_array()
return instance

@@ -192,0 +194,0 @@

@@ -0,1 +1,3 @@

import warnings
from asdf.extension import Validator

@@ -5,9 +7,19 @@ from asdf.tags.core.ndarray import validate_datatype, validate_max_ndim, validate_ndim

def _warn_if_not_array(node, schema_property):
# warn here for non-ndarray tags, in a major version bump we can
# remove this and update the tags below to only match ndarrays
if not getattr(node, "_tag", "").startswith("tag:stsci.edu:asdf/core/ndarray-"):
warnings.warn(
f"Use of the {schema_property} validator with non-ndarray tags is deprecated. "
"Please define a custom validator for your tag",
DeprecationWarning,
)
class NdimValidator(Validator):
schema_property = "ndim"
# The validators in this module should really only be applied
# to ndarray-* tags, but that will have to be a 3.0 change.
tags = ["**"]
def validate(self, expected_ndim, node, schema):
_warn_if_not_array(node, self.schema_property)
yield from validate_ndim(None, expected_ndim, node, schema)

@@ -21,2 +33,3 @@

def validate(self, max_ndim, node, schema):
_warn_if_not_array(node, self.schema_property)
yield from validate_max_ndim(None, max_ndim, node, schema)

@@ -30,2 +43,3 @@

def validate(self, expected_datatype, node, schema):
_warn_if_not_array(node, self.schema_property)
yield from validate_datatype(None, expected_datatype, node, schema)

@@ -14,2 +14,3 @@ """

from ._node_info import create_tree
from .util import NotSet

@@ -36,3 +37,3 @@ __all__ = [

identifier="root",
refresh_extension_manager=False,
refresh_extension_manager=NotSet,
extension_manager=None,

@@ -39,0 +40,0 @@ ):

@@ -6,2 +6,3 @@ import re

from .treeutil import get_children, is_container
from .util import NotSet

@@ -142,3 +143,3 @@

def create_tree(key, node, identifier="root", filters=None, refresh_extension_manager=False, extension_manager=None):
def create_tree(key, node, identifier="root", filters=None, refresh_extension_manager=NotSet, extension_manager=None):
"""

@@ -158,2 +159,3 @@ Create a `NodeSchemaInfo` tree which can be filtered from a base node.

refresh_extension_manager : bool
DEPRECATED
If `True`, refresh the extension manager before looking up the

@@ -186,3 +188,3 @@ key. This is useful if you want to make sure that the schema

preserve_list=True,
refresh_extension_manager=False,
refresh_extension_manager=NotSet,
extension_manager=None,

@@ -207,2 +209,3 @@ ):

refresh_extension_manager : bool
DEPRECATED
If `True`, refresh the extension manager before looking up the

@@ -238,2 +241,5 @@ key. This is useful if you want to make sure that the schema

if refresh_extension_manager is NotSet:
refresh_extension_manager = False
af = AsdfFile()

@@ -368,3 +374,3 @@ if refresh_extension_manager:

def from_root_node(
cls, key, root_identifier, root_node, schema=None, refresh_extension_manager=False, extension_manager=None
cls, key, root_identifier, root_node, schema=None, refresh_extension_manager=NotSet, extension_manager=None
):

@@ -371,0 +377,0 @@ """

import importlib.resources
import numpy as np
import pytest

@@ -7,3 +8,2 @@

from . import create_large_tree, create_small_tree
from .httpserver import HTTPServer

@@ -14,3 +14,9 @@

def small_tree():
return create_small_tree()
x = np.arange(0, 10, dtype=float)
return {
"science_data": x,
"subset": x[3:-3],
"skipping": x[::2],
"not_shared": np.arange(10, 0, -1, dtype=np.uint8),
}

@@ -20,5 +26,34 @@

def large_tree():
return create_large_tree()
# These are designed to be big enough so they don't fit in a
# single block, but not so big that RAM/disk space for the tests
# is enormous.
x = np.zeros((256, 256))
y = np.ones((16, 16, 16))
return {
"science_data": x,
"more": y,
}
@pytest.fixture
def recursive_tree(small_tree):
a = small_tree.copy()
a["a"] = a
return a
@pytest.fixture(
params=[
"small_tree",
"large_tree",
"recursive_tree",
]
)
def tree(request):
"""
Metafixture for all tree fixtures.
"""
return request.getfixturevalue(request.param)
@pytest.fixture(autouse=True)

@@ -25,0 +60,0 @@ def _restore_default_config():

@@ -1108,1 +1108,17 @@ import contextlib

assert af["arr"]._array is None
@pytest.mark.parametrize(
"lazy_load, array_class",
(
(True, ndarray.NDArrayType),
(False, np.ndarray),
),
)
@pytest.mark.parametrize("lazy_tree", [True, False])
def test_lazy_load_array_class(tmp_path, lazy_load, lazy_tree, array_class):
file_path = tmp_path / "test.asdf"
asdf.AsdfFile({"arr": np.arange(100)}).write_to(file_path)
with asdf.open(file_path, lazy_load=lazy_load, lazy_tree=lazy_tree) as af:
assert type(af["arr"]) is array_class

@@ -18,10 +18,4 @@ import io

from . import _helpers as helpers
from . import create_large_tree, create_small_tree
@pytest.fixture(params=[create_small_tree, create_large_tree])
def tree(request):
return request.param()
@pytest.fixture(params=[True, False])

@@ -28,0 +22,0 @@ def has_fsspec(request, monkeypatch):

@@ -992,160 +992,2 @@ import contextlib

def test_custom_validation_bad(tmp_path, test_data_path):
custom_schema_path = test_data_path / "custom_schema.yaml"
asdf_file = str(tmp_path / "out.asdf")
# This tree does not conform to the custom schema
tree = {"stuff": 42, "other_stuff": "hello"}
# Creating file without custom schema should pass
with asdf.AsdfFile(tree) as ff:
ff.write_to(asdf_file)
# Creating file using custom schema should fail
af = asdf.AsdfFile(custom_schema=custom_schema_path)
af._tree = asdf.tags.core.AsdfObject(tree)
with pytest.raises(ValidationError, match=r".* is a required property"):
af.validate()
pass
# Opening file without custom schema should pass
with asdf.open(asdf_file):
pass
# Opening file with custom schema should fail
with (
pytest.raises(ValidationError, match=r".* is a required property"),
asdf.open(
asdf_file,
custom_schema=custom_schema_path,
),
):
pass
def test_custom_validation_good(tmp_path, test_data_path):
custom_schema_path = test_data_path / "custom_schema.yaml"
asdf_file = str(tmp_path / "out.asdf")
# This tree conforms to the custom schema
tree = {"foo": {"x": 42, "y": 10}, "bar": {"a": "hello", "b": "banjo"}}
with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:
ff.write_to(asdf_file)
with asdf.open(asdf_file, custom_schema=custom_schema_path):
pass
def test_custom_validation_pathlib(tmp_path, test_data_path):
"""
Make sure custom schema paths can be pathlib.Path objects
See https://github.com/asdf-format/asdf/issues/653 for discussion.
"""
custom_schema_path = test_data_path / "custom_schema.yaml"
asdf_file = str(tmp_path / "out.asdf")
# This tree conforms to the custom schema
tree = {"foo": {"x": 42, "y": 10}, "bar": {"a": "hello", "b": "banjo"}}
with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:
ff.write_to(asdf_file)
with asdf.open(asdf_file, custom_schema=custom_schema_path):
pass
def test_custom_validation_with_definitions_good(tmp_path, test_data_path):
custom_schema_path = test_data_path / "custom_schema_definitions.yaml"
asdf_file = str(tmp_path / "out.asdf")
# This tree conforms to the custom schema
tree = {"thing": {"biz": "hello", "baz": "world"}}
with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:
ff.write_to(asdf_file)
with asdf.open(asdf_file, custom_schema=custom_schema_path):
pass
def test_custom_validation_with_definitions_bad(tmp_path, test_data_path):
custom_schema_path = test_data_path / "custom_schema_definitions.yaml"
asdf_file = str(tmp_path / "out.asdf")
# This tree does NOT conform to the custom schema
tree = {"forb": {"biz": "hello", "baz": "world"}}
# Creating file without custom schema should pass
with asdf.AsdfFile(tree) as ff:
ff.write_to(asdf_file)
# Creating file with custom schema should fail
af = asdf.AsdfFile(custom_schema=custom_schema_path)
af._tree = asdf.tags.core.AsdfObject(tree)
with pytest.raises(ValidationError, match=r".* is a required property"):
af.validate()
# Opening file without custom schema should pass
with asdf.open(asdf_file):
pass
# Opening file with custom schema should fail
with (
pytest.raises(ValidationError, match=r".* is a required property"),
asdf.open(
asdf_file,
custom_schema=custom_schema_path,
),
):
pass
def test_custom_validation_with_external_ref_good(tmp_path, test_data_path):
custom_schema_path = test_data_path / "custom_schema_external_ref.yaml"
asdf_file = str(tmp_path / "out.asdf")
# This tree conforms to the custom schema
tree = {"foo": asdf.tags.core.Software(name="Microsoft Windows", version="95")}
with asdf.AsdfFile(tree, custom_schema=custom_schema_path) as ff:
ff.write_to(asdf_file)
with asdf.open(asdf_file, custom_schema=custom_schema_path):
pass
def test_custom_validation_with_external_ref_bad(tmp_path, test_data_path):
custom_schema_path = test_data_path / "custom_schema_external_ref.yaml"
asdf_file = str(tmp_path / "out.asdf")
# This tree does not conform to the custom schema
tree = {"foo": False}
# Creating file without custom schema should pass
with asdf.AsdfFile(tree) as ff:
ff.write_to(asdf_file)
# Creating file with custom schema should fail
af = asdf.AsdfFile(custom_schema=custom_schema_path)
af["foo"] = False
with pytest.raises(ValidationError, match=r"False is not valid under any of the given schemas"):
af.validate()
# Opening file without custom schema should pass
with asdf.open(asdf_file):
pass
# Opening file with custom schema should fail
with (
pytest.raises(ValidationError, match=r"False is not valid under any of the given schemas"),
asdf.open(
asdf_file,
custom_schema=custom_schema_path,
),
):
pass
@pytest.mark.parametrize(

@@ -1152,0 +994,0 @@ ("numpy_value", "valid_types"),

@@ -20,3 +20,3 @@ # file generated by setuptools-scm

__version__ = version = '4.2.0'
__version_tuple__ = version_tuple = (4, 2, 0)
__version__ = version = '4.3.0'
__version_tuple__ = version_tuple = (4, 3, 0)
import os
import warnings

@@ -12,2 +13,6 @@ import pytest

def pytest_collection_modifyitems(items):
# first check if warnings are already turned into errors
for wf in warnings.filters:
if wf == ("error", None, Warning, None, 0):
return
# Turn warnings into errors for all tests, this is needed

@@ -14,0 +19,0 @@ # as running tests through pyargs will not use settings

@@ -336,2 +336,5 @@ import copy

def _make_schema_loader(resolver):
if resolver is None:
resolver = _default_resolver
def load_schema(url):

@@ -401,2 +404,4 @@ # Check if this is a URI provided by the new

resolver : callable, optional
DEPRECATED arbitrary mapping of uris is no longer supported
Please register all required resources with the resource manager.
A callback function used to map URIs to other URIs. The

@@ -411,2 +416,4 @@ callable must take a string and return a string or `None`.

"""
if resolver is not None:
warnings.warn("resolver is deprecated, arbitrary mapping of uris is no longer supported", DeprecationWarning)
# We want to cache the work that went into constructing the schema, but returning

@@ -432,2 +439,4 @@ # the same object is treacherous, because users who mutate the result will not

"""
if resolver is None:
resolver = _default_resolver
# We can't use urllib.parse here because tag: URIs don't

@@ -459,4 +468,2 @@ # parse correctly.

def _load_schema_cached(url, resolver, resolve_references):
if resolver is None:
resolver = _default_resolver
loader = _make_schema_loader(resolver)

@@ -519,2 +526,3 @@ schema, url = loader(url)

url_mapping : callable, optional
DEPRECATED
A callable that takes one string argument and returns a string

@@ -534,2 +542,5 @@ to convert remote URLs into local ones.

"""
if url_mapping is not None:
warnings.warn("url_mapping is deprecated, arbitrary mapping of uris is no longer supported", DeprecationWarning)
if ctx is None:

@@ -627,3 +638,4 @@ from ._asdf import AsdfFile

Explicit schema to use. If not provided, the schema to use
is determined by the tag on instance (or subinstance).
is determined by the tag on instance (or subinstance) and
any custom schema provided to ``ctx``.

@@ -644,2 +656,4 @@ validators : dict, optional

if schema is None and ctx._custom_schema:
schema = ctx._custom_schema
validator = get_validator({} if schema is None else schema, ctx, validators, None, *args, **kwargs)

@@ -646,0 +660,0 @@ validator.validate(instance)

@@ -9,2 +9,3 @@ """

import typing
import warnings

@@ -330,3 +331,3 @@ from ._display import DEFAULT_MAX_COLS, DEFAULT_MAX_ROWS, DEFAULT_SHOW_VALUES, render_tree

def schema_info(self, key="description", preserve_list=True, refresh_extension_manager=False):
def schema_info(self, key="description", preserve_list=True, refresh_extension_manager=NotSet):
"""

@@ -343,2 +344,3 @@ Get a nested dictionary of the schema information for a given key, relative to this search result.

refresh_extension_manager : bool
DEPRECATED
If `True`, refresh the extension manager before looking up the

@@ -348,2 +350,4 @@ key. This is useful if you want to make sure that the schema

"""
if refresh_extension_manager is not NotSet:
warnings.warn("refresh_extension_manager is deprecated", DeprecationWarning)

@@ -350,0 +354,0 @@ return collect_schema_info(

@@ -512,3 +512,3 @@ import mmap

if in_ndim > max_ndim:
if in_ndim is None or in_ndim > max_ndim:
yield ValidationError(

@@ -515,0 +515,0 @@ f"Wrong number of dimensions: Expected max of {max_ndim}, got {in_ndim}",

@@ -0,1 +1,47 @@

4.3.0 (2025-07-16)
==================
Bugfix
------
- When ``lazy_load=False`` use ``ndarray`` instances for arrays (instead of
``NDArrayType``). (`#1929 <https://github.com/asdf-format/asdf/pull/1929>`_)
- Fix issue where custom schema provided to ``AsdfFile`` was ignored on
``write_to``. (`#1931 <https://github.com/asdf-format/asdf/pull/1931>`_)
Doc
---
- Expand extension documentation to cover tag vs ref, converter tag wildcards,
versioning and user documentation to cover get/set_array_compression. (`#1938
<https://github.com/asdf-format/asdf/pull/1938>`_)
Feature
-------
- Add ``dump`` ``load`` ``dumps`` and ``loads`` functions. (`#1930
<https://github.com/asdf-format/asdf/pull/1930>`_)
Removal
-------
- Deprecate ``resolver`` argument to ``asdf.schema.load_schema``. Arbitrary
mapping of uris is no longer supported. Instead register all uris with
resources using the resource manager. (`#1934
<https://github.com/asdf-format/asdf/pull/1934>`_)
- Deprecate ``refresh_extension_manager`` argument to ``info``, ``schema_info``
and ``SearchResult.schema_info``. (`#1935
<https://github.com/asdf-format/asdf/pull/1935>`_)
- Deprecate ``url_mapping`` argument to ``get_validator``. Arbitrary mapping of
urls is no longer supported. (`#1936
<https://github.com/asdf-format/asdf/pull/1936>`_)
- Deprecate use of ``ndim``, ``max_ndim`` and ``datatype`` validators for
non-ndarray objects. Please define a custom validator if this is needed for a
non-ndarray object. (`#1937
<https://github.com/asdf-format/asdf/pull/1937>`_)
4.2.0 (2025-05-30)

@@ -2,0 +48,0 @@ ==================

@@ -243,4 +243,6 @@ .. currentmodule:: asdf

You can easily `zlib <http://www.zlib.net/>`__ or `bzip2
<http://www.bzip.org>`__ compress all blocks:
`zlib <http://www.zlib.net/>`__ and `bzip2 <http://www.bzip.org>`__
are included in every asdf install. Passing one of these 4 character
codes as ``all_array_compression`` to `asdf.AsdfFile.write_to` will
compress all blocks with the corresponding algorithm.:

@@ -267,2 +269,18 @@ .. runcode::

Similarly, `asdf.config` can be used to configure compression of all
blocks by setting `asdf.config.AsdfConfig.all_array_compression`.
`asdf.AsdfFile.set_array_compression` can be used to set the compression
for a specific block. Similarly `asdf.AsdfFile.get_array_compression` can
be used to get the compression for a specific block.
.. code:: python
import asdf
import numpy as np
af = asdf.AsdfFile({"arr": np.arange(42)})
af.set_array_compression(af["arr"], "lz4")
assert af.get_array_compression(af["arr"]) == "lz4"
When reading a file with compressed blocks, the blocks will be automatically

@@ -269,0 +287,0 @@ decompressed when accessed. If a file with compressed blocks is read and then

@@ -189,2 +189,19 @@ .. currentmodule:: asdf.extension

Tag wildcards to support multiple versions
==========================================
As noted above `Converter.tags` can contain wildcard patterns
(``asdf://example.com/shapes/tags/rectangle-1.*`` to match all ``1.x.x`` versions
of the rectangle tag). When a wildcard is used the specific tag
versions should be defined in the manifest (or extension) that uses
the `Converter`. If a `Converter` with a tag wildcard is provided to an
extension with a manifest that contains no tags that match the pattern
the `Converter` will be ignored. No errors or warnings will be produced
when this extension is registered with asdf (as this can be a useful pattern).
However attempts to use the `Converter` can produce errors during
reading and writing (if it's expected that the `Converter` is used).
Developers are encouraged to write unit tests that check reading and
writing with any custom `Converter` instances.
.. _extending_converters_deferral:

@@ -191,0 +208,0 @@

@@ -315,2 +315,7 @@ .. currentmodule:: asdf.extension

It is important to consider the order of extensions registered via the entry point as
asdf will prefer using extensions earlier in the list. Put another way, when multiple
versions of an extension are registered the newer versions should be earlier in the list
of extensions.
Entry point performance considerations

@@ -440,5 +445,4 @@ --------------------------------------

schemas should work to ensure that ASDF files created with older extensions can
continue to be processed. This means that every time a schema version is increased
(with the possible exception of patch updates), a **new** schema file should be
created.
continue to be processed. This means that every time a schema version is increased,
a **new** schema file should be created.

@@ -451,4 +455,55 @@ For example, if we currently have a schema for ``xyz-1.0.0``, and we wish to

To expand on this example let's assume the ``xyz-1.0.0`` schema was linked
to tag ``tag/xyz-1.0.0``. The new ``xyz-1.1.0`` schema would often require:
- a new ``tag/xyz-1.1.0``
- an update to the corresponding `Converter` to support the new (and old)
tags. This might not be needed if the `Converter` uses a tag wildcard
that matches both tag versions and they can be treated the same way.
- a **new** manifest that lists the new tag and schema. Since manifests
are also versioned this update would trigger a new manifest version. The
same as with schemas the old manifest should be kept unmodified and a
**new** manifest made with the new tag and schema.
- a new `Extension` using the new manifest. The new `Extension` should
occur earlier in the list of registered extensions than the old version.
After this update is made, asdf will be able to open files with both the
old and new tags and write out files with the new tag. To expand on this,
when a file with an old tag is opened, asdf will look for an extension
that supports that tag. The new extension will be checked first (since
it occurs earlier in the list) but since the new manifest does not contain
the old tag the new extension will be skipped. Next the old extension
will be checked, support for the tag will be confirmed and the converted
included in that old extension will be used to handle the tag. On write,
asdf will again check the list of extensions. Except this time asdf
will see that the new extension supports the type and select the new
tag when writing the file.
For more details on the behavior of schema and tag versioning from a user
perspective, see :ref:`version_and_compat`, and also
:ref:`custom_type_versions`.
Versioning during development
-----------------------------
As described above every schema change can trigger tag, manifest and
extension version changes. This is critically important as it allows
asdf to open old files. However the above considerations largely apply
only to released versions of schemas and manifests. During development
of a package it is likely that several schemas will be changed and it
is not necessary to increase the manifest version for each of these updates.
Let's say we have a package ``libfoo`` that is currently released as version 1.2.3
and has a manifest ``manifest/foo-1.0.0`` listing tags ``tag/bar-1.0.0``
and ``tag/bam-1.0.0``. We make a change to ``schema/bar-1.0.0`` increasing
it's version to ``schema/bar-1.1.0`` (which triggers a new manifest
``manifest/foo-1.1.0``). However importantly we don't yet release these
changes. If we make a second change, this time creating ``schema/bam-1.1.0``
it's likely that no increase in manifest version is required (as no users
of ``libfoo`` have yet had the opportunity to create files with
``manifest/foo-1.1.0``). ``schema/bam-1.1.0`` can be added to
``manifest/foo-1.1.0`` and it's not until the next version of ``libfoo`` is
released do we need to have schema updates trigger manifest version increases.
This is general guidance. If it is likely that users are creating files
with a development version of ``libfoo`` then it may be worth increasing the
manifest version for every schema change.

@@ -45,6 +45,6 @@ .. _extending_schemas:

- type: number
- tag: tag:stsci.edu:asdf/core/ndarray-1.0.0
- tag: tag:stsci.edu:asdf/core/ndarray-1.*
unit:
description: The unit corresponding to the values
tag: tag:stsci.edu:asdf/unit/unit-1.0.0
tag: tag:stsci.edu:asdf/unit/unit-1.*
required: [value, unit]

@@ -162,8 +162,12 @@ ...

- tag: tag:stsci.edu:asdf/core/ndarray-1.0.0
- tag: tag:stsci.edu:asdf/core/ndarray-1.*
The second subschema contains a ``tag`` validator, which makes an
assertion regarding the YAML tag URI of the object assigned to ``value``.
In this subschema we're requiring the tag of an ndarray-1.0.0 object,
which is how n-dimensional arrays are represented in an ASDF tree.
In this subschema we're requiring a ndarray-1.* tag
which is how n-dimensional arrays are represented in an ASDF tree. The
``*`` is a wildcard allowing this ``tag`` validator to succeed for any
minor or bugfix version of ndarray that has a major version of ``1``.
This means a ndarray-1.0.0 tag will succeed as will ndarray-1.1.0 but
not ndarray-2.0.0.

@@ -179,6 +183,6 @@ The net effect of the ``anyOf`` combiner and its two subschemas is:

description: The unit corresponding to the values
tag: tag:stsci.edu:asdf/unit/unit-1.0.0
tag: tag:stsci.edu:asdf/unit/unit-1.*
The ``unit`` property has another bit of documentation and a
``tag`` validator that requires it to be a unit-1.0.0 object.
``tag`` validator that requires it to be any unit-1.* tagged object.

@@ -202,2 +206,43 @@ .. code-block:: yaml

Composing schemas with references and tags
==========================================
For checking complex and/or structures it can often be useful to reference
other schemas. In the above example the ``tag`` keyword was used to check
that ``value`` has the ``ndarray`` tag (and consequently is validated against
the ``ndarray`` schema). This is often the most useful way of referencing
other schemas for a few reasons:
- The wildcard allows flexible matching allowing minor and bugfix versions
of the referenced schema to be released without requiring an update
of the referring schema.
- Since the ``tag`` validator only checks the tag of the object the
schema associated with the tag is not reused during validation
of the referring schema (more on this below). In other words use of
``tag`` avoids a duplicate validation of the tagged object.
In some cases schema authors may chose to using an even more flexible
wildcard allowing major version changes (for example ``ndarray-*``).
This is not recommended as a major version change of a tag signifies
a breaking change and increases the likelihood the tagged object will
no longer behave like the old version.
``tag`` does have a few downsides:
- It is a custom validator added by asdf and not part of JSON Schema. If
the schemas are to be processed by non-asdf tools this might pose a challenge.
- It requires the tagged object have a particular tag (more on this below).
An alternative that doesn't have these downsides is to reference another
schema using a ``$ref``. This is a standard feature of JSON Schema and doesn't
consider the tag of the object. However ``$ref`` has a few downsides:
- When a tagged object is checked with a ``$ref`` the object will be validated
against the referenced schema twice. Once due to the tag triggering
validation against the corresponding schema and a second time due to the
``$ref``.
- ``$ref`` does not support wildcards and must refer to a specific (down to
the bugfix) version of a schema. This means that any update to the
referenced schema will require an update to the referring schema.
Checking schema syntax

@@ -204,0 +249,0 @@ ======================

Metadata-Version: 2.4
Name: asdf
Version: 4.2.0
Version: 4.3.0
Summary: Python implementation of the ASDF Standard

@@ -5,0 +5,0 @@ Author-email: The ASDF Developers <help@stsci.edu>

@@ -5,3 +5,4 @@ git+https://github.com/asdf-format/asdf-standard

git+https://github.com/python-attrs/attrs
fsspec[http] @ git+https://github.com/fsspec/filesystem_spec
numpy>=0.0.dev0

@@ -215,4 +215,2 @@ [tox]

[testenv:jwst]
deps=
pytest-xdist
change_dir = {env_tmp_dir}

@@ -235,3 +233,3 @@ allowlist_externals =

commands =
pytest --numprocesses auto jwst
pytest jwst

@@ -238,0 +236,0 @@ [testenv:stdatamodels]