You're Invited:Meet the Socket Team at RSAC and BSidesSF 2026, March 23–26.RSVP
Socket
Book a DemoSign in
Socket

aiolrucache

Package Overview
Dependencies
Maintainers
1
Versions
4
Alerts
File Explorer

Advanced tools

Socket logo

Install Socket

Detect and block malicious and high-risk dependencies

Install

aiolrucache - pypi Package Compare versions

Comparing version
0.1.0
to
0.1.1
+4
setup.cfg
[egg_info]
tag_build =
tag_date = 0
from pathlib import Path
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.develop import develop
class PostInstall(install):
def run(self) -> None:
super().run()
if self.install_lib:
install_dir = Path(self.install_lib)
marker_file = install_dir / "aiolrucache" / ".post_install_ran"
marker_file.parent.mkdir(parents=True, exist_ok=True)
marker_file.write_text("PostInstall hook executed successfully")
class PostDevelop(develop):
def run(self) -> None:
super().run()
print("post-develop hook ran successfully")
setup(cmdclass={"install": PostInstall, "develop": PostDevelop})
Metadata-Version: 2.4
Name: aiolrucache
Version: 0.1.1
Summary: An asyncio LRU cache with TTL support
Author-email: Pedro Lombard <pedrolombard@atomicmail.io>
License-Expression: MIT
Keywords: lru,cache,asyncio,ttl,async
Classifier: Development Status :: 4 - Beta
Classifier: Framework :: AsyncIO
Classifier: Intended Audience :: Developers
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.14
Classifier: Topic :: Software Development :: Libraries :: Python Modules
Classifier: Typing :: Typed
Requires-Python: >=3.14
Description-Content-Type: text/markdown
Provides-Extra: dev
Requires-Dist: pytest>=8.0; extra == "dev"
Requires-Dist: pytest-asyncio>=0.24; extra == "dev"
Requires-Dist: ruff>=0.9; extra == "dev"
Requires-Dist: ty>=0.0.23; extra == "dev"
# aiolrucache
An asyncio LRU cache with TTL support for Python 3.14+.
## Installation
```bash
pip install aiolrucache
```
## Features
- **AsyncLRU**: Async-aware LRU cache with TTL support
- **@alru_cache**: Decorator for async functions (like stdlib `@lru_cache`)
- **LRU**: Sync LRU cache with TTL support
- **@lru_cache**: Decorator for sync functions
- **Lazy TTL eviction**: Expired entries are evicted on access
- **Thread-safe**: Sync LRU uses threading locks
## Quick Start
### Class-based usage
```python
import asyncio
from aiolrucache import AsyncLRU
async def main():
cache = AsyncLRU(max_size=128, ttl=3600.0) # 1 hour TTL
await cache.set("key", {"data": "value"})
result = await cache.get("key")
async with AsyncLRU(max_size=64) as cache:
await cache.set("foo", "bar")
print(await cache.get("foo"))
asyncio.run(main())
```
### Decorator usage
```python
from aiolrucache import alru_cache, lru_cache
# Async
@alru_cache(max_size=64, ttl=1800.0)
async def fetch_data(url: str) -> dict:
# Expensive async operation
...
# Sync
@lru_cache(max_size=128)
def expensive_computation(n: int) -> int:
...
```
## API Reference
### AsyncLRU
```python
class AsyncLRU[K, V]:
def __init__(self, max_size: int, ttl: float | None = None) -> None:
"""
Args:
max_size: Maximum number of entries (required, must be positive)
ttl: Time-to-live in seconds (optional, None = no expiry)
"""
async def get(self, key: K) -> V | None: ...
async def set(self, key: K, value: V) -> None: ...
async def clear(self) -> None: ...
async def aclose(self) -> None: ...
def __len__(self) -> int: ...
def __contains__(self, key: K) -> bool: ...
def info(self) -> dict: ...
```
### @alru_cache Decorator
```python
@alru_cache(max_size: int = 128, *, ttl: float | None = None)
async def cached_func(arg: ArgType) -> ReturnType: ...
# Methods added to decorated function:
cached_func.cache_clear() # Clear the cache
cached_func.cache_info() # dict with max_size, ttl, size
```
### @lru_cache Decorator
Same API as `@alru_cache` but for sync functions.
## Development
```bash
# Install dev dependencies
uv sync --extra dev
# Run tests
pytest
# Run linter
ruff check src tests
# Run type checker
uvx ty check src
```
## License
MIT
[dev]
pytest>=8.0
pytest-asyncio>=0.24
ruff>=0.9
ty>=0.0.23
README.md
pyproject.toml
setup.py
src/aiolrucache/__init__.py
src/aiolrucache/_core.py
src/aiolrucache/async_.py
src/aiolrucache/decorators.py
src/aiolrucache.egg-info/PKG-INFO
src/aiolrucache.egg-info/SOURCES.txt
src/aiolrucache.egg-info/dependency_links.txt
src/aiolrucache.egg-info/requires.txt
src/aiolrucache.egg-info/top_level.txt
tests/test_async.py
tests/test_decorators.py
tests/test_sync.py
from __future__ import annotations
import asyncio
import pytest
import pytest_asyncio
from aiolrucache.async_ import AsyncLRU
class TestAsyncLRU:
@pytest_asyncio.fixture
async def cache(self) -> AsyncLRU[str, int]:
return AsyncLRU[str, int](max_size=3)
@pytest_asyncio.fixture
async def cache_with_ttl(self) -> AsyncLRU[str, int]:
return AsyncLRU[str, int](max_size=3, ttl=0.05)
@pytest.mark.asyncio
async def test_basic_get_set(self, cache: AsyncLRU[str, int]) -> None:
await cache.set("a", 1)
assert await cache.get("a") == 1
assert await cache.get("b") is None
@pytest.mark.asyncio
async def test_max_size_eviction(self, cache: AsyncLRU[str, int]) -> None:
await cache.set("a", 1)
await cache.set("b", 2)
await cache.set("c", 3)
await cache.set("d", 4)
assert await cache.get("a") is None
assert await cache.get("b") == 2
assert await cache.get("c") == 3
assert await cache.get("d") == 4
@pytest.mark.asyncio
async def test_lru_ordering(self, cache: AsyncLRU[str, int]) -> None:
await cache.set("a", 1)
await cache.set("b", 2)
await cache.set("c", 3)
await cache.get("a")
await cache.set("d", 4)
assert await cache.get("a") == 1
assert await cache.get("b") is None
@pytest.mark.asyncio
async def test_ttl_expiry(self, cache_with_ttl: AsyncLRU[str, int]) -> None:
await cache_with_ttl.set("a", 1)
assert await cache_with_ttl.get("a") == 1
await asyncio.sleep(0.06)
assert await cache_with_ttl.get("a") is None
@pytest.mark.asyncio
async def test_ttl_no_expiry(self, cache_with_ttl: AsyncLRU[str, int]) -> None:
await cache_with_ttl.set("a", 1)
await asyncio.sleep(0.01)
assert await cache_with_ttl.get("a") == 1
@pytest.mark.asyncio
async def test_clear(self, cache: AsyncLRU[str, int]) -> None:
await cache.set("a", 1)
await cache.set("b", 2)
await cache.clear()
assert await cache.get("a") is None
assert len(cache) == 0
@pytest.mark.asyncio
async def test_contains(self, cache: AsyncLRU[str, int]) -> None:
await cache.set("a", 1)
assert "a" in cache
assert "b" not in cache
@pytest.mark.asyncio
async def test_len(self, cache: AsyncLRU[str, int]) -> None:
assert len(cache) == 0
await cache.set("a", 1)
await cache.set("b", 2)
assert len(cache) == 2
@pytest.mark.asyncio
async def test_info(self, cache: AsyncLRU[str, int]) -> None:
info = cache.info()
assert info["max_size"] == 3
assert info["ttl"] is None
assert info["size"] == 0
@pytest.mark.asyncio
async def test_invalid_max_size(self) -> None:
with pytest.raises(ValueError, match="max_size must be positive"):
AsyncLRU[str, int](max_size=0)
@pytest.mark.asyncio
async def test_invalid_ttl(self) -> None:
with pytest.raises(ValueError, match="ttl must be positive"):
AsyncLRU[str, int](max_size=3, ttl=-1)
@pytest.mark.asyncio
async def test_context_manager(self) -> None:
async with AsyncLRU[str, int](max_size=3) as cache:
await cache.set("a", 1)
assert await cache.get("a") == 1
@pytest.mark.asyncio
async def test_aclose(self, cache: AsyncLRU[str, int]) -> None:
await cache.set("a", 1)
await cache.aclose()
assert len(cache) == 0
from __future__ import annotations
import asyncio
import pytest
from aiolrucache.decorators import alru_cache, lru_cache
class TestLruCacheDecorator:
def test_basic_caching(self) -> None:
call_count = 0
@lru_cache(max_size=3)
def expensive(a: int) -> int:
nonlocal call_count
call_count += 1
return a * 2
assert expensive(1) == 2
assert expensive(1) == 2
assert call_count == 1
def test_max_size_eviction(self) -> None:
call_count = 0
@lru_cache(max_size=2)
def expensive(a: int) -> int:
nonlocal call_count
call_count += 1
return a
expensive(1)
expensive(2)
expensive(1)
expensive(3)
expensive(1)
assert call_count == 3
def test_cache_clear(self) -> None:
call_count = 0
@lru_cache(max_size=3)
def expensive(a: int) -> int:
nonlocal call_count
call_count += 1
return a
expensive(1)
expensive(2)
expensive.cache_clear()
expensive(1)
assert call_count == 3
def test_cache_info(self) -> None:
@lru_cache(max_size=3)
def expensive(a: int) -> int:
return a
expensive(1)
expensive(2)
info = expensive.cache_info()
assert info["max_size"] == 3
assert info["ttl"] is None
assert info["size"] == 2
def test_with_kwargs(self) -> None:
call_count = 0
@lru_cache(max_size=3)
def expensive(a: int, b: int = 0) -> int:
nonlocal call_count
call_count += 1
return a + b
expensive(1)
expensive(1, b=0)
assert call_count == 2
class TestAlruCacheDecorator:
@pytest.mark.asyncio
async def test_basic_caching(self) -> None:
call_count = 0
@alru_cache(max_size=3)
async def expensive(a: int) -> int:
nonlocal call_count
call_count += 1
await asyncio.sleep(0.001)
return a * 2
assert await expensive(1) == 2
assert await expensive(1) == 2
assert call_count == 1
@pytest.mark.asyncio
async def test_max_size_eviction(self) -> None:
call_count = 0
@alru_cache(max_size=2)
async def expensive(a: int) -> int:
nonlocal call_count
call_count += 1
return a
await expensive(1)
await expensive(2)
await expensive(1)
await expensive(3)
await expensive(1)
assert call_count == 3
@pytest.mark.asyncio
async def test_cache_clear(self) -> None:
call_count = 0
@alru_cache(max_size=3)
async def expensive(a: int) -> int:
nonlocal call_count
call_count += 1
return a
await expensive(1)
await expensive(2)
await expensive.cache_clear()
await expensive(1)
assert call_count == 3
@pytest.mark.asyncio
async def test_cache_info(self) -> None:
@alru_cache(max_size=3)
async def expensive(a: int) -> int:
return a
await expensive(1)
await expensive(2)
info = expensive.cache_info()
assert info["max_size"] == 3
assert info["ttl"] is None
assert info["size"] == 2
@pytest.mark.asyncio
async def test_with_kwargs(self) -> None:
call_count = 0
@alru_cache(max_size=3)
async def expensive(a: int, b: int = 0) -> int:
nonlocal call_count
call_count += 1
return a + b
await expensive(1)
await expensive(1, b=0)
assert call_count == 2
@pytest.mark.asyncio
async def test_ttl_expiry(self) -> None:
call_count = 0
@alru_cache(max_size=3, ttl=0.05)
async def expensive(a: int) -> int:
nonlocal call_count
call_count += 1
return a
await expensive(1)
assert await expensive(1) == 1
assert call_count == 1
await asyncio.sleep(0.06)
assert await expensive(1) == 1
assert call_count == 2
from __future__ import annotations
import time
import pytest
from aiolrucache._core import LRUCache
class TestSyncLRU:
def test_basic_get_set(self) -> None:
cache = LRUCache[str, int](max_size=3)
cache.set("a", 1)
assert cache.get("a") == 1
assert cache.get("b") is None
def test_max_size_eviction(self) -> None:
cache = LRUCache[str, int](max_size=3)
cache.set("a", 1)
cache.set("b", 2)
cache.set("c", 3)
cache.set("d", 4)
assert cache.get("a") is None
assert cache.get("b") == 2
assert cache.get("c") == 3
assert cache.get("d") == 4
def test_lru_ordering(self) -> None:
cache = LRUCache[str, int](max_size=3)
cache.set("a", 1)
cache.set("b", 2)
cache.set("c", 3)
cache.get("a")
cache.set("d", 4)
assert cache.get("a") == 1
assert cache.get("b") is None
def test_ttl_expiry(self) -> None:
cache = LRUCache[str, int](max_size=3, ttl=0.05)
cache.set("a", 1)
assert cache.get("a") == 1
time.sleep(0.06)
assert cache.get("a") is None
def test_ttl_no_expiry(self) -> None:
cache = LRUCache[str, int](max_size=3, ttl=10.0)
cache.set("a", 1)
time.sleep(0.01)
assert cache.get("a") == 1
def test_clear(self) -> None:
cache = LRUCache[str, int](max_size=3)
cache.set("a", 1)
cache.set("b", 2)
cache.clear()
assert cache.get("a") is None
assert len(cache) == 0
def test_contains(self) -> None:
cache = LRUCache[str, int](max_size=3)
cache.set("a", 1)
assert "a" in cache
assert "b" not in cache
def test_len(self) -> None:
cache = LRUCache[str, int](max_size=3)
assert len(cache) == 0
cache.set("a", 1)
cache.set("b", 2)
assert len(cache) == 2
def test_info(self) -> None:
cache = LRUCache[str, int](max_size=3, ttl=5.0)
info = cache.info()
assert info["max_size"] == 3
assert info["ttl"] == 5.0
assert info["size"] == 0
def test_invalid_max_size(self) -> None:
with pytest.raises(ValueError, match="max_size must be positive"):
LRUCache[str, int](max_size=0)
def test_invalid_ttl(self) -> None:
with pytest.raises(ValueError, match="ttl must be positive"):
LRUCache[str, int](max_size=3, ttl=-1)
def test_update_existing_key(self) -> None:
cache = LRUCache[str, int](max_size=3)
cache.set("a", 1)
cache.set("a", 2)
assert cache.get("a") == 2
assert len(cache) == 1
def test_update_key_maintains_order(self) -> None:
cache = LRUCache[str, int](max_size=3)
cache.set("a", 1)
cache.set("b", 2)
cache.set("a", 10)
cache.set("c", 3)
cache.set("d", 4)
assert cache.get("a") == 10
assert cache.get("b") is None
+8
-9
Metadata-Version: 2.4
Name: aiolrucache
Version: 0.1.0
Version: 0.1.1
Summary: An asyncio LRU cache with TTL support
Author-email: Pedro Lombard <pedrolombard@atomicmail.io>
License: MIT
Keywords: async,asyncio,cache,lru,ttl
License-Expression: MIT
Keywords: lru,cache,asyncio,ttl,async
Classifier: Development Status :: 4 - Beta
Classifier: Framework :: AsyncIO
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python :: 3

@@ -17,8 +16,8 @@ Classifier: Programming Language :: Python :: 3.14

Requires-Python: >=3.14
Description-Content-Type: text/markdown
Provides-Extra: dev
Requires-Dist: pytest-asyncio>=0.24; extra == 'dev'
Requires-Dist: pytest>=8.0; extra == 'dev'
Requires-Dist: ruff>=0.9; extra == 'dev'
Requires-Dist: ty>=0.0.23; extra == 'dev'
Description-Content-Type: text/markdown
Requires-Dist: pytest>=8.0; extra == "dev"
Requires-Dist: pytest-asyncio>=0.24; extra == "dev"
Requires-Dist: ruff>=0.9; extra == "dev"
Requires-Dist: ty>=0.0.23; extra == "dev"

@@ -25,0 +24,0 @@ # aiolrucache

[project]
name = "aiolrucache"
version = "0.1.0"
version = "0.1.1"
description = "An asyncio LRU cache with TTL support"
readme = "README.md"
license = {text = "MIT"}
license = "MIT"
requires-python = ">=3.14"

@@ -13,3 +13,3 @@ authors = [{name = "Pedro Lombard", email = "pedrolombard@atomicmail.io"}]

"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3",

@@ -31,10 +31,10 @@ "Programming Language :: Python :: 3.14",

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
requires = ["setuptools>=61.0", "wheel"]
build-backend = "setuptools.build_meta"
[tool.hatch.build.targets.wheel]
packages = ["src/aiolrucache"]
[tool.setuptools]
py-modules = []
[tool.hatch.build.targets.sdist]
include = ["src/aiolrucache"]
[tool.setuptools.packages.find]
where = ["src"]

@@ -52,3 +52,1 @@ [tool.ruff]

known-first-party = ["aiolrucache"]
.venv/
__pycache__/
*.py[cod]
*.egg-info/
.pytest_cache/
.ruff_cache/
*.egg
dist/
build/
.eggs/
*.so
.env
.nox/
.tox/
.mypy_cache/
.dmypy.json
dmypy.json
uv.lock