diff --git a/pyjelly/integrations/generic/generic_sink.py b/pyjelly/integrations/generic/generic_sink.py index 083163f6..1a858063 100644 --- a/pyjelly/integrations/generic/generic_sink.py +++ b/pyjelly/integrations/generic/generic_sink.py @@ -185,7 +185,9 @@ def is_triples_sink(self) -> bool: return bool(self._store) and len(self._store[0]) == TRIPLE_ARITY def parse(self, input_file: IO[bytes]) -> None: - from pyjelly.integrations.generic.parse import parse_jelly_to_graph + from pyjelly.integrations.generic.parse import ( + parse_jelly_to_graph, + ) parsed_result = parse_jelly_to_graph(input_file) self._store = parsed_result._store @@ -193,6 +195,8 @@ def parse(self, input_file: IO[bytes]) -> None: self._identifier = parsed_result._identifier def serialize(self, output_file: IO[bytes]) -> None: - from pyjelly.integrations.generic.serialize import grouped_stream_to_file + from pyjelly.integrations.generic.serialize import ( + grouped_stream_to_file, + ) grouped_stream_to_file((sink for sink in [self]), output_file) diff --git a/pyjelly/serialize/streams.py b/pyjelly/serialize/streams.py index 8de332ed..e6fccf6c 100644 --- a/pyjelly/serialize/streams.py +++ b/pyjelly/serialize/streams.py @@ -146,7 +146,9 @@ def for_rdflib(cls, options: SerializerOptions | None = None) -> Stream: if cls is Stream: msg = "Stream is an abstract base class, use a subclass instead" raise TypeError(msg) - from pyjelly.integrations.rdflib.serialize import RDFLibTermEncoder + from pyjelly.integrations.rdflib.serialize import ( + RDFLibTermEncoder, + ) lookup_preset: LookupPreset | None = None if options is not None: diff --git a/pyproject.toml b/pyproject.toml index 2cfff228..fb137f28 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,8 +1,8 @@ [build-system] requires = [ - "hatchling>=1.24", - "hatch-mypyc; platform_python_implementation == 'CPython'", - "mypy>=1.8; platform_python_implementation == 'CPython'", + "hatchling>=1.24", + "hatch-mypyc; platform_python_implementation == 'CPython'", + "mypy>=1.8; platform_python_implementation == 'CPython'", ] build-backend = "hatchling.build" @@ -10,11 +10,11 @@ build-backend = "hatchling.build" only-include = ["pyjelly"] artifacts = [ - "*__mypyc*.*", - "**/*__mypyc*.*", - "**/*.so", - "**/*.pyd", - "**/*.dylib" + "*__mypyc*.*", + "**/*__mypyc*.*", + "**/*.so", + "**/*.pyd", + "**/*.dylib", ] @@ -27,29 +27,31 @@ require-cpython = true modules = ["pyjelly"] include = [ - "pyjelly/serialize/ioutils.py", - "pyjelly/integrations/generic/serialize.py", - "pyjelly/parse/lookup.py", - "pyjelly/parse/ioutils.py", - "pyjelly/serialize/streams.py", - "pyjelly/parse/decode.py", - "pyjelly/serialize/encode.py", - "pyjelly/serialize/lookup.py", + "pyjelly/serialize/ioutils.py", + "pyjelly/integrations/generic/serialize.py", + "pyjelly/parse/lookup.py", + "pyjelly/parse/ioutils.py", + "pyjelly/serialize/streams.py", + "pyjelly/parse/decode.py", + "pyjelly/serialize/encode.py", + "pyjelly/serialize/lookup.py", ] -mypy-args = [ - "--ignore-missing-imports", - "--no-warn-no-return" -] +mypy-args = ["--ignore-missing-imports", "--no-warn-no-return"] [tool.cibuildwheel] build = ["cp310-*", "cp311-*", "cp312-*", "cp313-*", "cp314-*"] -skip = ["*-win32"] +skip = ["*-win32"] test-extras = ["rdflib"] test-requires = [ - "mypy-extensions", - "pytest", "hypothesis", "inline-snapshot", "pytest-mock", - "pytest-subtests", "pytest-accept", "detect-test-pollution" + "mypy-extensions", + "pytest", + "hypothesis", + "inline-snapshot", + "pytest-mock", + "pytest-subtests", + "pytest-accept", + "detect-test-pollution", ] test-command = "cp -r {project}/pyjelly /tmp/pyjelly && cp -r {project}/tests /tmp/tests && cd /tmp && PYTHONPATH=/tmp python -m pytest -q --import-mode=importlib tests" @@ -67,9 +69,7 @@ name = "pyjelly" version = "0.0.0" readme = "README.md" description = "Jelly-RDF implementation for Python" -authors = [ - { name = "NeverBlink et al.", email = "contact@neverblink.eu" } -] +authors = [{ name = "NeverBlink et al.", email = "contact@neverblink.eu" }] requires-python = ">=3.10, <3.15" classifiers = [ "Development Status :: 4 - Beta", @@ -146,9 +146,9 @@ docs = [ "markupsafe<4.0.0", "linkchecker~=10.6.0", ] -# version 3.1 required for python 3.14 support -ci = ["cibuildwheel>=3.1.0,<4 ; python_version >= '3.11'"] - +# version 3.11 required for python 3.14 support +ci = ['cibuildwheel>=3.1.0,<4 ; python_version >= "3.11"'] +bench = ["pytest-benchmark>=5.2.1", "rdflib>=7.1.4"] [tool.uv] required-version = "~=0.9.0" @@ -157,12 +157,7 @@ default-groups = ["dev", "extras", "test", "types"] [tool.mypy] strict = true explicit_package_bases = true -exclude = [ - "build/", - "docs/", - ".*_pb2(_grpc)?\\.pyi?", - "examples/" -] +exclude = ["build/", "docs/", ".*_pb2(_grpc)?\\.pyi?", "examples/"] [[tool.mypy.overrides]] module = "google.protobuf.proto" @@ -174,7 +169,16 @@ extra-standard-library = ["typing_extensions"] [tool.pytest.ini_options] pythonpath = "." -addopts = ["--import-mode=importlib", "--doctest-modules", "--ignore-glob=docs/examples/**", "--ignore-glob=examples/**"] +addopts = [ + "--import-mode=importlib", + "--doctest-modules", + "--ignore-glob=docs/examples/**", + "--ignore-glob=examples/**", + "--ignore-glob=tests/utils/benchmark_*", + "--ignore=tests/benchmark_tests", + "-m", + "not benchmark", +] [tool.ruff] extend-exclude = ["*{_pb2,_pb2_grpc}.{py,pyi}"] @@ -184,9 +188,7 @@ packages = ["pyjelly"] [tool.ruff.lint] extend-per-file-ignores = { "tests/unit_tests/**" = ["PLR2004"] } -exclude = [ - "**examples/**", -] +exclude = ["**examples/**"] select = ["ALL"] ignore = [ # Description: Dynamically typed expressions (typing.Any) are disallowed diff --git a/tests/benchmark_tests/__init__.py b/tests/benchmark_tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/benchmark_tests/conftest.py b/tests/benchmark_tests/conftest.py new file mode 100644 index 00000000..0c2330c6 --- /dev/null +++ b/tests/benchmark_tests/conftest.py @@ -0,0 +1,170 @@ +from __future__ import annotations + +import io +from itertools import islice +from pathlib import Path + +import pytest +from rdflib import Dataset, Graph + +pytest.importorskip( + "pytest_benchmark", + reason="Install bench dependency group and run with -m benchmark", +) + + +def pytest_addoption(parser: pytest.Parser) -> None: + g = parser.getgroup("benchmark") + g.addoption("--in-nt", type=str, help="path to N-Triples file.") + g.addoption("--in-nq", type=str, help="path to N-Quads file.") + g.addoption( + "--in-jelly-triples", + type=str, + default=None, + help="optional Jelly triples file; if none, generated in-memory from nt file.", + ) + g.addoption( + "--in-jelly-quads", + type=str, + default=None, + help="optional Jelly quads file; if none, generated in-memory from nq slice.", + ) + + g.addoption( + "--limit-statements", + type=int, + default=5_000_000, + help="first N statements from input.", + ) + g.addoption( + "--warmup-rounds", + type=int, + default=5, + help="warmup rounds, not counted to evaluation.", + ) + g.addoption("--rounds", type=int, default=10, help="measured rounds.") + g.addoption("--iterations", type=int, default=1, help="iterations per round.") + + +def _slice_lines_to_bytes(path: Path, limit: int) -> bytes: + buf = io.BytesIO() + with path.open("rb") as f: + buf.writelines(islice(f, limit)) + return buf.getvalue() + + +@pytest.fixture(scope="session") +def limit_statements(request: pytest.FixtureRequest) -> int: + return int(request.config.getoption("--limit-statements")) + + +@pytest.fixture(scope="session") +def pedantic_cfg(request: pytest.FixtureRequest) -> dict[str, int]: + return { + "warmup_rounds": int(request.config.getoption("--warmup-rounds")), + "rounds": int(request.config.getoption("--rounds")), + "iterations": int(request.config.getoption("--iterations")), + } + + +@pytest.fixture(scope="session") +def nt_path(request: pytest.FixtureRequest) -> Path: + opt = request.config.getoption("--in-nt") + assert opt, "--in-nt is required" + p = Path(opt) + assert p.exists(), f"--in-nt not found: {p}" + return p + + +@pytest.fixture(scope="session") +def nq_path(request: pytest.FixtureRequest) -> Path: + opt = request.config.getoption("--in-nq") + assert opt, "--in-nq is required" + p = Path(opt) + assert p.exists(), f"--in-nq not found: {p}" + return p + + +@pytest.fixture(scope="session") +def jelly_triples_path(request: pytest.FixtureRequest) -> Path | None: + opt = request.config.getoption("--in-jelly-triples") + return Path(opt) if opt else None + + +@pytest.fixture(scope="session") +def jelly_quads_path(request: pytest.FixtureRequest) -> Path | None: + opt = request.config.getoption("--in-jelly-quads") + return Path(opt) if opt else None + + +@pytest.fixture(scope="session") +def nt_bytes_sliced(nt_path: Path, limit_statements: int) -> bytes: + return _slice_lines_to_bytes(nt_path, limit_statements) + + +@pytest.fixture(scope="session") +def nq_bytes_sliced(nq_path: Path, limit_statements: int) -> bytes: + return _slice_lines_to_bytes(nq_path, limit_statements) + + +@pytest.fixture(scope="session") +def nt_graph(nt_bytes_sliced: bytes) -> Graph: + g = Graph() + g.parse(data=nt_bytes_sliced, format="nt") + return g + + +@pytest.fixture(scope="session") +def nq_dataset(nq_bytes_sliced: bytes) -> Dataset: + ds = Dataset() + ds.parse(data=nq_bytes_sliced, format="nquads") + return ds + + +@pytest.fixture(scope="session") +def jelly_triples_bytes(jelly_triples_path: Path | None, nt_graph: Graph) -> bytes: + if jelly_triples_path and jelly_triples_path.exists(): + return jelly_triples_path.read_bytes() + return nt_graph.serialize(destination=None, format="jelly", encoding="utf-8") + + +@pytest.fixture(scope="session") +def jelly_quads_bytes(jelly_quads_path: Path | None, nq_dataset: Dataset) -> bytes: + if jelly_quads_path and jelly_quads_path.exists(): + return jelly_quads_path.read_bytes() + return nq_dataset.serialize(destination=None, format="jelly", encoding="utf-8") + + +def pytest_configure(config: pytest.Config) -> None: + config.addinivalue_line("markers", "benchmark: flat ser/des benchmarks") + config.addinivalue_line( + "markers", "triples: triples-only benchmarks (NT/Jelly-triples)" + ) + config.addinivalue_line("markers", "quads: quads-only benchmarks (NQ/Jelly-quads)") + + +def pytest_collection_modifyitems( + config: pytest.Config, items: list[pytest.Item] +) -> None: + has_nt = bool(config.getoption("--in-nt")) + has_nq = bool(config.getoption("--in-nq")) + + deselected: list[pytest.Item] = [] + selected: list[pytest.Item] = [] + + for it in items: + is_triples = it.get_closest_marker("triples") is not None + is_quads = it.get_closest_marker("quads") is not None + + if is_triples and not has_nt: + deselected.append(it) + continue + if is_quads and not has_nq: + deselected.append(it) + continue + + selected.append(it) + + if deselected: + config.hook.pytest_deselected(items=deselected) + items[:] = selected diff --git a/tests/benchmark_tests/jelly_rdflib.py b/tests/benchmark_tests/jelly_rdflib.py new file mode 100644 index 00000000..6d1929dc --- /dev/null +++ b/tests/benchmark_tests/jelly_rdflib.py @@ -0,0 +1,72 @@ +from __future__ import annotations + +import io +from contextlib import suppress + +from rdflib import Dataset, Graph + +from tests.utils.benchmark_io_utils import NullCounter + + +def parse_nt_bytes(nt_bytes: bytes) -> Graph: + g = Graph() + g.parse(data=nt_bytes, format="nt") + return g + + +def parse_jelly_triples_bytes(jelly_bytes: bytes) -> Graph: + g = Graph() + g.parse(data=jelly_bytes, format="jelly") + return g + + +def parse_nq_bytes(nq_bytes: bytes) -> Dataset: + ds = Dataset() + ds.parse(data=nq_bytes, format="nquads") + return ds + + +def parse_jelly_quads_bytes(jelly_bytes: bytes) -> Dataset: + ds = Dataset() + ds.parse(data=jelly_bytes, format="jelly") + return ds + + +def serialize_nt_stream(g: Graph) -> int: + sink = NullCounter() + buf = io.BufferedWriter(sink) + g.serialize(destination=buf, format="nt", encoding="utf-8") + buf.flush() + with suppress(io.UnsupportedOperation, ValueError): + buf.detach() + return sink.n + + +def serialize_jelly_triples_stream(g: Graph) -> int: + sink = NullCounter() + buf = io.BufferedWriter(sink) + g.serialize(destination=buf, format="jelly", encoding="utf-8") + buf.flush() + with suppress(io.UnsupportedOperation, ValueError): + buf.detach() + return sink.n + + +def serialize_nq_stream(ds: Dataset) -> int: + sink = NullCounter() + buf = io.BufferedWriter(sink) + ds.serialize(destination=buf, format="nquads", encoding="utf-8") + buf.flush() + with suppress(io.UnsupportedOperation, ValueError): + buf.detach() + return sink.n + + +def serialize_jelly_quads_stream(ds: Dataset) -> int: + sink = NullCounter() + buf = io.BufferedWriter(sink) + ds.serialize(destination=buf, format="jelly", encoding="utf-8") + buf.flush() + with suppress(io.UnsupportedOperation, ValueError): + buf.detach() + return sink.n diff --git a/tests/benchmark_tests/test_flat_deserialize.py b/tests/benchmark_tests/test_flat_deserialize.py new file mode 100644 index 00000000..faa311f2 --- /dev/null +++ b/tests/benchmark_tests/test_flat_deserialize.py @@ -0,0 +1,67 @@ +from __future__ import annotations + +import pytest +from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-not-found] + +from tests.benchmark_tests.jelly_rdflib import ( + parse_jelly_quads_bytes, + parse_jelly_triples_bytes, + parse_nq_bytes, + parse_nt_bytes, +) +from tests.utils.benchmark_throughput import print_throughput + +pytest.importorskip( + "pytest_benchmark", + reason="Install bench dependency group and run with -m benchmark", +) + +pytestmark = pytest.mark.benchmark + + +@pytest.mark.triples +def test_flat_triples_deserialize_nt( + benchmark: BenchmarkFixture, + nt_bytes_sliced: bytes, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic(parse_nt_bytes, args=(nt_bytes_sliced,), **pedantic_cfg) + print_throughput(benchmark, limit_statements, "triples: parse NT") + + +@pytest.mark.triples +def test_flat_triples_deserialize_jelly( + benchmark: BenchmarkFixture, + jelly_triples_bytes: bytes, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic( + parse_jelly_triples_bytes, args=(jelly_triples_bytes,), **pedantic_cfg + ) + print_throughput(benchmark, limit_statements, "triples: parse Jelly") + + +@pytest.mark.quads +def test_flat_quads_deserialize_nq( + benchmark: BenchmarkFixture, + nq_bytes_sliced: bytes, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic(parse_nq_bytes, args=(nq_bytes_sliced,), **pedantic_cfg) + print_throughput(benchmark, limit_statements, "quads: parse NQ") + + +@pytest.mark.quads +def test_flat_quads_deserialize_jelly( + benchmark: BenchmarkFixture, + jelly_quads_bytes: bytes, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic( + parse_jelly_quads_bytes, args=(jelly_quads_bytes,), **pedantic_cfg + ) + print_throughput(benchmark, limit_statements, "quads: parse Jelly") diff --git a/tests/benchmark_tests/test_flat_serialize.py b/tests/benchmark_tests/test_flat_serialize.py new file mode 100644 index 00000000..ae28f9cc --- /dev/null +++ b/tests/benchmark_tests/test_flat_serialize.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import pytest +from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-not-found] +from rdflib import Dataset, Graph + +from tests.benchmark_tests.jelly_rdflib import ( + serialize_jelly_quads_stream, + serialize_jelly_triples_stream, + serialize_nq_stream, + serialize_nt_stream, +) +from tests.utils.benchmark_throughput import print_throughput + +pytest.importorskip( + "pytest_benchmark", + reason="Install bench dependency group and run with -m benchmark", +) + +pytestmark = pytest.mark.benchmark + + +@pytest.mark.triples +def test_flat_triples_serialize_nt( + benchmark: BenchmarkFixture, + nt_graph: Graph, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic(serialize_nt_stream, args=(nt_graph,), **pedantic_cfg) + print_throughput(benchmark, limit_statements, "triples: serialize NT") + + +@pytest.mark.triples +def test_flat_triples_serialize_jelly( + benchmark: BenchmarkFixture, + nt_graph: Graph, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic(serialize_jelly_triples_stream, args=(nt_graph,), **pedantic_cfg) + print_throughput(benchmark, limit_statements, "triples: serialize Jelly") + + +@pytest.mark.quads +def test_flat_quads_serialize_nq( + benchmark: BenchmarkFixture, + nq_dataset: Dataset, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic(serialize_nq_stream, args=(nq_dataset,), **pedantic_cfg) + print_throughput(benchmark, limit_statements, "quads: serialize NQ") + + +@pytest.mark.quads +def test_flat_quads_serialize_jelly( + benchmark: BenchmarkFixture, + nq_dataset: Dataset, + pedantic_cfg: dict[str, int], + limit_statements: int, +) -> None: + benchmark.pedantic(serialize_jelly_quads_stream, args=(nq_dataset,), **pedantic_cfg) + print_throughput(benchmark, limit_statements, "quads: serialize Jelly") diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/utils/benchmark_io_utils.py b/tests/utils/benchmark_io_utils.py new file mode 100644 index 00000000..c09a40ae --- /dev/null +++ b/tests/utils/benchmark_io_utils.py @@ -0,0 +1,16 @@ +from __future__ import annotations + +import io + + +class NullCounter(io.RawIOBase): + def __init__(self) -> None: + self.n = 0 + + def writable(self) -> bool: + return True + + def write(self, b: bytes) -> int: # type: ignore[override] + ln = len(b) + self.n += ln + return ln diff --git a/tests/utils/benchmark_throughput.py b/tests/utils/benchmark_throughput.py new file mode 100644 index 00000000..5f586d51 --- /dev/null +++ b/tests/utils/benchmark_throughput.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +import logging + +from pytest_benchmark.fixture import BenchmarkFixture # type: ignore[import-not-found] + +log = logging.getLogger(__name__) + + +def print_throughput(bench: BenchmarkFixture, n_statements: int, label: str) -> None: + mean_s = None + st = getattr(bench, "stats", None) + if isinstance(st, dict): + mean_s = st.get("stats", {}).get("mean", None) + else: + inner = getattr(st, "stats", None) + if isinstance(inner, dict): + mean_s = inner.get("mean", None) + + if mean_s and mean_s > 0: + tps = n_statements / mean_s + bench.extra_info["statements"] = n_statements + bench.extra_info["mean_seconds"] = mean_s + bench.extra_info["throughput_statements_per_sec"] = tps + log.info( + "[%s] N=%s mean=%.6fs throughput=%.2f stmts/s", + label, + f"{n_statements:,}", + mean_s, + tps, + ) diff --git a/uv.lock b/uv.lock index dfac5600..86747c31 100644 --- a/uv.lock +++ b/uv.lock @@ -1212,6 +1212,15 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/50/1b/6921afe68c74868b4c9fa424dad3be35b095e16687989ebbb50ce4fceb7c/psutil-7.0.0-cp37-abi3-win_amd64.whl", hash = "sha256:4cf3d4eb1aa9b348dec30105c55cd9b7d4629285735a102beb4441e38db90553", size = 244885, upload-time = "2025-02-13T21:54:37.486Z" }, ] +[[package]] +name = "py-cpuinfo" +version = "9.0.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/37/a8/d832f7293ebb21690860d2e01d8115e5ff6f2ae8bbdc953f0eb0fa4bd2c7/py-cpuinfo-9.0.0.tar.gz", hash = "sha256:3cdbbf3fac90dc6f118bfd64384f309edeadd902d7c8fb17f02ffa1fc3f49690", size = 104716, upload-time = "2022-10-25T20:38:06.303Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/e0/a9/023730ba63db1e494a271cb018dcd361bd2c917ba7004c3e49d5daf795a2/py_cpuinfo-9.0.0-py3-none-any.whl", hash = "sha256:859625bc251f64e21f077d099d4162689c762b5d6a4c3c97553d56241c9674d5", size = 22335, upload-time = "2022-10-25T20:38:27.636Z" }, +] + [[package]] name = "pydantic" version = "2.11.4" @@ -1348,6 +1357,10 @@ rdflib = [ ] [package.dev-dependencies] +bench = [ + { name = "pytest-benchmark" }, + { name = "rdflib" }, +] ci = [ { name = "cibuildwheel", marker = "python_full_version >= '3.11'" }, ] @@ -1404,6 +1417,10 @@ requires-dist = [ provides-extras = ["rdflib"] [package.metadata.requires-dev] +bench = [ + { name = "pytest-benchmark", specifier = ">=5.2.1" }, + { name = "rdflib", specifier = ">=7.1.4" }, +] ci = [{ name = "cibuildwheel", marker = "python_full_version >= '3.11'", specifier = ">=3.1.0,<4" }] dev = [ { name = "mypy", specifier = ">=1.13.0" }, @@ -1502,6 +1519,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/23/4e/7677b01b773f9aa1b77ad1aa55c601cc8763966d691e7a341eaca1ffc09e/pytest_accept-0.1.12-py3-none-any.whl", hash = "sha256:189ccac80113920966c98ce4d6d1bfbe1b22f39b7e97a5c3a46c68db2ca84415", size = 15511, upload-time = "2024-12-08T22:57:01.785Z" }, ] +[[package]] +name = "pytest-benchmark" +version = "5.2.3" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "py-cpuinfo" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/24/34/9f732b76456d64faffbef6232f1f9dbec7a7c4999ff46282fa418bd1af66/pytest_benchmark-5.2.3.tar.gz", hash = "sha256:deb7317998a23c650fd4ff76e1230066a76cb45dcece0aca5607143c619e7779", size = 341340, upload-time = "2025-11-09T18:48:43.215Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/33/29/e756e715a48959f1c0045342088d7ca9762a2f509b945f362a316e9412b7/pytest_benchmark-5.2.3-py3-none-any.whl", hash = "sha256:bc839726ad20e99aaa0d11a127445457b4219bdb9e80a1afc4b51da7f96b0803", size = 45255, upload-time = "2025-11-09T18:48:39.765Z" }, +] + [[package]] name = "pytest-cov" version = "6.2.1"