From 33b4be3fd8447e9674ad47b595e2c99a2120143d Mon Sep 17 00:00:00 2001 From: Mariusz Gumowski Date: Tue, 9 Sep 2025 08:43:21 +0200 Subject: [PATCH] Performance metrics --- .github/workflows/pre_commit.yml | 2 +- docs/source/guides/index.md | 1 + docs/source/guides/performance_metrics.md | 269 ++++++++++++++++++ examples/metrics/README.md | 99 +++++++ examples/metrics/benchmark.py | 98 +++++++ pyproject.toml | 6 + src/model_api/metrics/__init__.py | 12 + src/model_api/metrics/performance.py | 156 ++++++++++ src/model_api/metrics/time_stat.py | 82 ++++++ src/model_api/models/model.py | 37 ++- src/model_api/performance_metrics.py | 144 ---------- src/model_api/pipelines/async_pipeline.py | 29 +- tests/unit/metrics/test_performancemetrics.py | 252 ++++++++++++++++ tests/unit/metrics/test_timestat.py | 62 ++++ uv.lock | 108 ++++++- 15 files changed, 1189 insertions(+), 168 deletions(-) create mode 100644 docs/source/guides/performance_metrics.md create mode 100644 examples/metrics/README.md create mode 100644 examples/metrics/benchmark.py create mode 100644 src/model_api/metrics/__init__.py create mode 100644 src/model_api/metrics/performance.py create mode 100644 src/model_api/metrics/time_stat.py delete mode 100644 src/model_api/performance_metrics.py create mode 100644 tests/unit/metrics/test_performancemetrics.py create mode 100644 tests/unit/metrics/test_timestat.py diff --git a/.github/workflows/pre_commit.yml b/.github/workflows/pre_commit.yml index fe42ade5..66ede247 100644 --- a/.github/workflows/pre_commit.yml +++ b/.github/workflows/pre_commit.yml @@ -55,4 +55,4 @@ jobs: uv sync --locked --extra tests --extra ovms - name: Run python unit tests run: | - uv run pytest tests/unit + uv run pytest tests/unit --cov diff --git a/docs/source/guides/index.md b/docs/source/guides/index.md index dbeb801b..80081099 100644 --- a/docs/source/guides/index.md +++ b/docs/source/guides/index.md @@ -5,4 +5,5 @@ :hidden: ./model-configuration +./performance_metrics ``` diff --git a/docs/source/guides/performance_metrics.md b/docs/source/guides/performance_metrics.md new file mode 100644 index 00000000..6280028d --- /dev/null +++ b/docs/source/guides/performance_metrics.md @@ -0,0 +1,269 @@ +# Performance Metrics + +The Model API provides comprehensive performance monitoring capabilities through the `PerformanceMetrics` class. This allows to measure and analyze the performance of model inference pipeline, including detailed timing information for each stage of the inference process. + +## Overview + +Performance metrics are automatically collected during model inference and include information for: + +- **Model loading time**: Time spent loading the model to the inference device +- **Preprocessing time**: Time spent on input data preprocessing +- **Inference time**: Time spent on actual model inference on the device +- **Postprocessing time**: Time spent on output data postprocessing +- **Total time**: Overall time for the complete inference pipeline +- **Total minimal time**: Overall minimum time for the complete inference pipeline +- **Total maxmium time**: Overall maximum time for the complete inference pipeline +- **Total frames**: Total number of inferences +- **FPS**: Frames Per Second + +Each metric provides statistical information including mean, standard deviation, and individual measurements. + +## Basic Usage + +### Accessing Performance Metrics + +Every model instance automatically collects performance metrics. You can access them using the `get_performance_metrics()` method: + +```python +from model_api.models import Model +import cv2 + +# Create a model +model = Model.create_model("path/to/your/model.xml") + +# Perform inference +image = cv2.imread("path/to/image.jpg") +result = model(image) + +# Get performance metrics +metrics = model.get_performance_metrics() +``` + +### Logging Performance Metrics + +The simplest way to view performance metrics is to use the built-in logging method: + +```python +# Configure logging +logging.basicConfig(level=logging.INFO, format='%(message)s') + +# Log all performance metrics to console +metrics.log_metrics() +``` + +This will output detailed performance information: + +```bash +============================================================ + 🚀 PERFORMANCE METRICS REPORT 🚀 +============================================================ + +📊 Model Loading: + Load Time: 2.497s + +⚙️ Processing Times (mean ± std): + Preprocess: 0.001s ± 0.000s + Inference: 0.570s ± 0.020s + Postprocess: 0.001s ± 0.000s + +📈 Total Time Statistics: + Mean: 0.572s ± 0.020s + Min: 0.556s + Max: 0.642s + +🎯 Performance Summary: + Total Frames: 100 + FPS: 1.75 +============================================================ +``` + +## Detailed Metrics Access + +### Individual Timing Statistics + +You can access individual timing statistics for more detailed analysis: + +```python +# Get specific timing statistics +load_time = metrics.get_load_time() +preprocess_time = metrics.get_preprocess_time() +inference_time = metrics.get_inference_time() +postprocess_time = metrics.get_postprocess_time() +total_time = metrics.get_total_time() +total_min_time = metrics.get_total_time_min() +total_max_time = metrics.get_total_time_max() + +# Access statistical information +print(f"Mean inference time: {inference_time.mean():.3f} seconds") +print(f"Standard deviation: {inference_time.stddev():.3f} seconds") +print(f"Total inference time: {inference_time.time:.3f} seconds") +print(f"Number of inferences: {inference_time.count}") +``` + +### Frame Rate and Throughput + +```python +# Get frames per second and total frame count +fps = metrics.get_fps() +total_frames = metrics.get_total_frames() + +print(f"Processed {total_frames} frames at {fps:.2f} FPS") +``` + +## Advanced Usage + +### Batch Processing Performance + +When processing multiple inputs, performance metrics accumulate across all inferences: + +```python +import cv2 +from model_api.models import DetectionModel + +model = DetectionModel.create_model("path/to/detection/model.xml") + +# Process multiple images +images = ["image1.jpg", "image2.jpg", "image3.jpg"] +for image_path in images: + image = cv2.imread(image_path) + result = model(image) + +# Get accumulated metrics for all inferences +metrics = model.get_performance_metrics() +metrics.log_metrics() +``` + +### Performance Monitoring During Inference + +```python +import cv2 +from model_api.models import ClassificationModel + +model = ClassificationModel.create_model("efficientnet-b0-pytorch") +image = cv2.imread("test_image.jpg") + +# Run multiple inferences and monitor performance +for i in range(100): + result = model(image) + + # Check performance every 10 inferences + if (i + 1) % 10 == 0: + metrics = model.get_performance_metrics() + print(f"After {i + 1} inferences:") + print(f" Mean inference time: {metrics.get_inference_time().mean():.3f}s") + print(f" Current FPS: {metrics.get_fps():.2f}") +``` + +## Performance Optimization Tips + +### Analyzing Bottlenecks + +Use performance metrics to identify bottlenecks in inference pipeline: + +```python +metrics = model.get_performance_metrics() + +preprocess_time = metrics.get_preprocess_time().mean() +inference_time = metrics.get_inference_time().mean() +postprocess_time = metrics.get_postprocess_time().mean() + +print("Time breakdown:") +print(f" Preprocessing: {preprocess_time:.3f}s ({preprocess_time/total:.1%})") +print(f" Inference: {inference_time:.3f}s ({inference_time/total:.1%})") +print(f" Postprocessing: {postprocess_time:.3f}s ({postprocess_time/total:.1%})") + +total = preprocess_time + inference_time + postprocess_time +``` + +### Warm-up Considerations + +The first few inferences may be slower due to system warm-up. Consider excluding them from performance analysis: + +```python +# Warm-up inferences +for _ in range(5): + model(image) + +# Reset metrics after warm-up +model.get_performance_metrics().reset() + +# Now measure actual performance +for _ in range(100): + model(image) + +metrics = model.get_performance_metrics() +metrics.log_metrics() +``` + +## Best Practices + +1. **Warm-up Period**: Always include a warm-up period before measuring performance for production benchmarks. + +2. **Multiple Runs**: Collect metrics over multiple inference runs to get statistically significant results. + +3. **Reset Between Tests**: Reset metrics when comparing different configurations or models. + +4. **Monitor All Stages**: Pay attention to all pipeline stages (preprocessing, inference, postprocessing) to identify bottlenecks. + +5. **Environment Consistency**: Ensure consistent testing conditions (device state, background processes, etc.) when comparing performance. + +## Example: Complete Performance Analysis + +```python +import cv2 +from model_api.models import DetectionModel + +def analyze_model_performance(model_path, test_images, warmup_runs=5, test_runs=100): + """Complete performance analysis example.""" + + # Load model + model = DetectionModel.create_model(model_path) + + # Load test image + image = cv2.imread(test_images[0]) + + print("Starting warm-up...") + # Warm-up runs + for _ in range(warmup_runs): + model(image) + + # Reset metrics after warm-up + model.get_performance_metrics().reset() + + print(f"Running {test_runs} test inferences...") + # Performance measurement runs + for i, image_path in enumerate(test_images[:test_runs]): + image = cv2.imread(image_path) + result = model(image) + + # Log progress + if (i + 1) % 10 == 0: + print(f" Completed {i + 1}/{test_runs}") + + # Analyze results + metrics = model.get_performance_metrics() + + print("\n" + "="*50) + print("PERFORMANCE ANALYSIS RESULTS") + print("="*50) + + metrics.log_metrics() + + # Additional analysis + inference_time = metrics.get_inference_time() + print(f"\nInference time analysis:") + print(f" Minimum: {min(inference_time.durations):.3f}s") + print(f" Maximum: {max(inference_time.durations):.3f}s") + print(f" Median: {sorted(inference_time.durations)[len(inference_time.durations)//2]:.3f}s") + + return metrics + +# Usage +if __name__ == "__main__": + model_path = "path/to/your/model.xml" + test_images = ["image1.jpg", "image2.jpg", "image3.jpg"] # Add more images + + metrics = analyze_model_performance(model_path, test_images) +``` + +This comprehensive performance monitoring system helps optimize model inference pipeline and ensure optimal performance in production deployments. diff --git a/examples/metrics/README.md b/examples/metrics/README.md new file mode 100644 index 00000000..3a8532f9 --- /dev/null +++ b/examples/metrics/README.md @@ -0,0 +1,99 @@ +# Benchmark - a metrics API example + +This example demonstrates how to use the Python API of OpenVINO Model API for performance analysis and metrics collection during model inference. This tutorial includes the following features: + +- Model performance measurement +- Configurable device selection (CPU, GPU, etc.) +- Automatic image dataset discovery +- Warm-up and test runs with customizable parameters +- Detailed inference time analysis +- Metrics logging and reporting +- Performance statistics calculation + +## Prerequisites + +Install Model API from source. Please refer to the main [README](../../../README.md) for details. + +## Run example + +To run the example, please execute the following command: + +```bash +python benchmark.py [options] +``` + +### Required Arguments + +- `model_path` - Path to the model file (.xml) +- `dataset_path` - Path to the dataset directory containing test images + +### Optional Arguments + +- `--device` - Device to run the model on (default: CPU) +- `--warmup-runs` - Number of warmup runs (default: 5) +- `--test-runs` - Number of test runs (default: 100) + +### Examples + +```bash +# Basic usage with CPU +python benchmark.py /path/to/model.xml /path/to/images + +# Use GPU with custom parameters +python benchmark.py /path/to/model.xml /path/to/images --device GPU --warmup-runs 10 --test-runs 50 + +# Show help +python benchmark.py --help +``` + +## Expected Output + +The example will display: + +- Number of images found in the dataset directory +- Progress updates during warm-up and test phases +- Comprehensive performance analysis results including timing statistics +- Detailed metrics about the model's inference performance on the specified device + +Example output + +```bash +OpenVINO Runtime + build: 2025.2.0-19140-c01cd93e24d-releases/2025/2 +Reading model model.xml +The model model.xml is loaded to CPU + Number of model infer requests: 2 +Starting warm-up... +Running 100 test inferences... + Completed 10/100 + Completed 20/100 + Completed 30/100 + Completed 40/100 + Completed 50/100 + Completed 60/100 + Completed 70/100 + Completed 80/100 + Completed 90/100 + Completed 100/100 +============================================================ + 🚀 PERFORMANCE METRICS REPORT 🚀 +============================================================ + +📊 Model Loading: + Load Time: 2.497s + +⚙️ Processing Times (mean ± std): + Preprocess: 0.001s ± 0.000s + Inference: 0.570s ± 0.020s + Postprocess: 0.001s ± 0.000s + +📈 Total Time Statistics: + Mean: 0.572s ± 0.020s + Min: 0.556s + Max: 0.642s + +🎯 Performance Summary: + Total Frames: 100 + FPS: 1.75 +============================================================ +``` diff --git a/examples/metrics/benchmark.py b/examples/metrics/benchmark.py new file mode 100644 index 00000000..6c896527 --- /dev/null +++ b/examples/metrics/benchmark.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2020-2024 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# +import argparse +import logging +import sys +from pathlib import Path + +import cv2 + +from model_api.models import Model + +logging.basicConfig( + level=logging.INFO, + format="%(message)s", +) + + +def get_image_files(dataset_path: str) -> list[str]: + """Get list of image files from the dataset directory.""" + image_extensions = ["*.jpg", "*.jpeg", "*.png", "*.bmp", "*.tiff", "*.tif"] + test_images: list[str] = [] + test_path = Path(dataset_path) + + for ext in image_extensions: + test_images.extend(str(p) for p in test_path.glob(ext)) + test_images.extend(str(p) for p in test_path.glob(ext.upper())) + + return test_images + + +def analyze_model_performance(model_path, test_images, device, warmup_runs, test_runs): + """Complete performance analysis example.""" + + # Load model + model = Model.create_model(model_path, device=device) + + # Load test image + image = cv2.imread(test_images[0]) + + print("Starting warm-up...") + # Warm-up runs + for _ in range(warmup_runs): + model(image) + + # Reset metrics after warm-up + model.get_performance_metrics().reset() + + print(f"Running {test_runs} test inferences...") + # Performance measurement runs + for i, image_path in enumerate(test_images[:test_runs]): + image = cv2.imread(image_path) + model(image) + # Log progress + if (i + 1) % 10 == 0: + print(f" Completed {i + 1}/{test_runs}") + + # Analyze results + metrics = model.get_performance_metrics() + metrics.log_metrics() + + return metrics + + +def main(): + parser = argparse.ArgumentParser(description="Benchmark - a model performance analysis with metrics collection") + parser.add_argument("model_path", help="Path to the model file (.xml)") + parser.add_argument("dataset_path", help="Path to the dataset directory containing test images") + parser.add_argument("--device", type=str, default="CPU", help="OpenVINO device to run the model on (default: CPU)") + parser.add_argument("--warmup-runs", type=int, default=5, help="Number of warmup runs (default: 5)") + parser.add_argument("--test-runs", type=int, default=100, help="Number of test runs (default: 100)") + + # Show help if no arguments are provided + if len(sys.argv) == 1: + parser.print_help() + return + + args = parser.parse_args() + + model_path = args.model_path + dataset_path = args.dataset_path + + # Get list of image files from the directory + test_images = get_image_files(dataset_path) + + print(f"Found {len(test_images)} images in {dataset_path}") + + if not test_images: + print("Error: No images found in the dataset directory!") + exit(1) + + analyze_model_performance(model_path, test_images, args.device, args.warmup_runs, args.test_runs) + + +if __name__ == "__main__": + main() diff --git a/pyproject.toml b/pyproject.toml index 2cd594e6..501d7ef6 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -39,6 +39,7 @@ tests = [ "pre-commit", "httpx", "pytest", + "pytest-cov", "pytest-mock", "ultralytics>=8.0.114,<=8.0.205", "onnx", @@ -246,3 +247,8 @@ notice-rgx = """ [tool.bandit] skips = ["B101", "B310"] + +[tool.coverage.report] +# Fail if total coverage is below 40% +fail_under = 40 +show_missing = true diff --git a/src/model_api/metrics/__init__.py b/src/model_api/metrics/__init__.py new file mode 100644 index 00000000..48200f42 --- /dev/null +++ b/src/model_api/metrics/__init__.py @@ -0,0 +1,12 @@ +# +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from .performance import PerformanceMetrics +from .time_stat import TimeStat + +__all__ = [ + "PerformanceMetrics", + "TimeStat", +] diff --git a/src/model_api/metrics/performance.py b/src/model_api/metrics/performance.py new file mode 100644 index 00000000..3504ee17 --- /dev/null +++ b/src/model_api/metrics/performance.py @@ -0,0 +1,156 @@ +# +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import logging + +from .time_stat import TimeStat + +logger = logging.getLogger(__name__) + + +class PerformanceMetrics: + """ + A class to represent performance metrics for a model. + """ + + def __init__(self): + """ + Initializes performance metrics. + """ + self.load_time = TimeStat() + self.preprocess_time = TimeStat() + self.inference_time = TimeStat() + self.postprocess_time = TimeStat() + self.total_time = TimeStat() + + def __add__(self, other): + """ + Adds two PerformanceMetrics objects. + """ + if not isinstance(other, PerformanceMetrics): + return NotImplemented + + new_metrics = PerformanceMetrics() + new_metrics.load_time = self.load_time + other.load_time + new_metrics.preprocess_time = self.preprocess_time + other.preprocess_time + new_metrics.inference_time = self.inference_time + other.inference_time + new_metrics.postprocess_time = self.postprocess_time + other.postprocess_time + return new_metrics + + def reset(self) -> None: + """ + Resets performance metrics to the initial state. + """ + self.preprocess_time.reset() + self.inference_time.reset() + self.postprocess_time.reset() + self.total_time.reset() + + def get_load_time(self) -> TimeStat: + """ + Returns the load time statistics. + + Returns: + TimeStat: Load time statistics object. + """ + return self.load_time + + def get_preprocess_time(self) -> TimeStat: + """ + Returns the preprocessing time statistics. + + Returns: + TimeStat: Preprocessing time statistics object. + """ + return self.preprocess_time + + def get_inference_time(self) -> TimeStat: + """ + Returns the inference time statistics. + + Returns: + TimeStat: Inference time statistics object. + """ + return self.inference_time + + def get_postprocess_time(self) -> TimeStat: + """ + Returns the postprocessing time statistics. + + Returns: + TimeStat: Postprocessing time statistics object. + """ + return self.postprocess_time + + def get_total_frames(self) -> int: + """ + Returns the total number of frames processed. + + Returns: + int: Total number of frames processed. + """ + return len(self.total_time.durations) + + def get_fps(self) -> float: + """ + Returns the Frames Per Second (FPS) statistics. + + Returns: + float: Frames Per Second. + """ + return self.get_total_frames() / sum(self.total_time.durations) if sum(self.total_time.durations) > 0 else 0.0 + + def get_total_time_min(self) -> float: + """ + Returns the minimum total time for processing a frame. + + Returns: + float: Minimum total time in seconds. + """ + return min(self.total_time.durations) if self.total_time.durations else 0.0 + + def get_total_time_max(self) -> float: + """ + Returns the maximum total time for processing a frame. + + Returns: + float: Maximum total time in seconds. + """ + return max(self.total_time.durations) if self.total_time.durations else 0.0 + + def log_metrics(self) -> None: + """ + Logs all performance metrics using the logging module. + """ + # Create the metrics report as a multi-line string + report_lines = [ + "", + "=" * 60, + "🚀 PERFORMANCE METRICS REPORT 🚀".center(60), + "=" * 60, + "", + "📊 Model Loading:", + f" Load Time: {self.load_time.mean():.3f}s", + "", + "⚙️ Processing Times (mean ± std):", + f" Preprocess: {self.preprocess_time.mean():.3f}s ± {self.preprocess_time.stddev():.3f}s", + f" Inference: {self.inference_time.mean():.3f}s ± {self.inference_time.stddev():.3f}s", + f" Postprocess: {self.postprocess_time.mean():.3f}s ± {self.postprocess_time.stddev():.3f}s", + "", + "📈 Total Time Statistics:", + f" Mean: {self.total_time.mean():.3f}s ± {self.total_time.stddev():.3f}s", + f" Min: {self.get_total_time_min():.3f}s", + f" Max: {self.get_total_time_max():.3f}s", + "", + "🎯 Performance Summary:", + f" Total Frames: {self.get_total_frames():,}", + f" FPS: {self.get_fps():.2f}", + "", + "=" * 60, + "", + ] + + # Log the entire report as a single info message + logger.info("\n".join(report_lines)) diff --git a/src/model_api/metrics/time_stat.py b/src/model_api/metrics/time_stat.py new file mode 100644 index 00000000..02657118 --- /dev/null +++ b/src/model_api/metrics/time_stat.py @@ -0,0 +1,82 @@ +# +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +from time import perf_counter + + +class TimeStat: + """ + A class to represent a statistical time metric. + """ + + def __init__(self): + """ + Initializes the TimeStat object. + """ + self.time = 0.0 + self.durations = [] + self.count = 0 + self.last_update_time = None + + def __add__(self, other): + """ + Adds two TimeStat objects. + + Returns: + TimeStat: A new TimeStat object representing the sum of the two. + """ + if not isinstance(other, TimeStat): + return NotImplemented + + new_stat = TimeStat() + new_stat.time = self.time + other.time + new_stat.durations = self.durations + other.durations + new_stat.count = self.count + other.count + return new_stat + + def update(self) -> None: + """ + Updates the statistics with the latest duration. + """ + time = perf_counter() + if self.last_update_time: + diff = time - self.last_update_time + self.time += diff + self.durations.append(diff) + self.count += 1 + self.last_update_time = None + else: + self.last_update_time = time + + def reset(self) -> None: + """ + Resets the statistics to their initial state. + """ + self.time = 0.0 + self.durations = [] + self.count = 0 + self.last_update_time = None + + def mean(self) -> float: + """ + Calculates the mean of the recorded durations. + + Returns: + float: The mean of the recorded durations. + """ + return self.time / self.count if self.count != 0 else 0.0 + + def stddev(self) -> float: + """ + Calculates the standard deviation of the recorded durations. + + Returns: + float: The standard deviation of the recorded durations. + """ + if self.count == 0: + return 0.0 + mean = self.mean() + variance = sum((x - mean) ** 2 for x in self.durations) / self.count + return variance**0.5 diff --git a/src/model_api/models/model.py b/src/model_api/models/model.py index 47168dac..2007720a 100644 --- a/src/model_api/models/model.py +++ b/src/model_api/models/model.py @@ -1,5 +1,5 @@ # -# Copyright (C) 2020-2024 Intel Corporation +# Copyright (C) 2020-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # @@ -17,6 +17,7 @@ get_user_config, ) from model_api.adapters.ovms_adapter import OVMSAdapter +from model_api.metrics import PerformanceMetrics if TYPE_CHECKING: from os import PathLike @@ -73,6 +74,7 @@ def __init__(self, inference_adapter: InferenceAdapter, configuration: dict = {} WrapperError: if the wrapper configuration is incorrect """ self.logger = log.getLogger() + self.perf = PerformanceMetrics() self.inference_adapter = inference_adapter if isinstance( self.inference_adapter, @@ -106,6 +108,15 @@ def get_model(self) -> Any: """ return self.inference_adapter.get_model() + def get_performance_metrics(self) -> PerformanceMetrics: + """ + Returns performance metrics of the model. + + Returns: + PerformanceMetrics: Performance metrics object. + """ + return self.perf + @classmethod def get_model_class(cls, name: str) -> Type: """ @@ -397,9 +408,18 @@ def __call__(self, inputs: ndarray): Returns: - postprocessed data in the format defined by wrapper """ + self.perf.total_time.update() + self.perf.preprocess_time.update() dict_data, input_meta = self.preprocess(inputs) + self.perf.preprocess_time.update() + self.perf.inference_time.update() raw_result = self.infer_sync(dict_data) - return self.postprocess(raw_result, input_meta) + self.perf.inference_time.update() + self.perf.postprocess_time.update() + result = self.postprocess(raw_result, input_meta) + self.perf.postprocess_time.update() + self.perf.total_time.update() + return result def infer_batch(self, inputs: list) -> list[Any]: """Applies preprocessing, asynchronous inference, postprocessing routines to a collection of inputs. @@ -443,7 +463,9 @@ def load(self, force: bool = False) -> None: """ if not self.model_loaded or force: self.model_loaded = True + self.perf.load_time.update() self.inference_adapter.load_model() + self.perf.load_time.update() def reshape(self, new_shape: dict): """ @@ -504,10 +526,15 @@ def infer_async(self, input_data: dict, user_data: Any): "The model is not loaded to the device. Please, create the wrapper " "with preload=True option or call load() method before infer_async()", ) + self.perf.total_time.update() + self.perf.preprocess_time.update() dict_data, meta = self.preprocess(input_data) + self.perf.preprocess_time.update() + self.perf.inference_time.update() self.inference_adapter.infer_async( dict_data, ( + self, meta, self.inference_adapter.get_raw_result, self.postprocess, @@ -521,9 +548,13 @@ def _process_callback(request, callback_data: Any): """ A wrapper for async inference callback. """ - meta, get_result_fn, postprocess_fn, callback_fn, user_data = callback_data + model, meta, get_result_fn, postprocess_fn, callback_fn, user_data = callback_data raw_result = get_result_fn(request) + model.perf.inference_time.update() + model.perf.postprocess_time.update() result = postprocess_fn(raw_result, meta) + model.perf.postprocess_time.update() + model.perf.total_time.update() callback_fn(result, user_data) def set_callback(self, callback_fn: Callable): diff --git a/src/model_api/performance_metrics.py b/src/model_api/performance_metrics.py deleted file mode 100644 index e3c52a42..00000000 --- a/src/model_api/performance_metrics.py +++ /dev/null @@ -1,144 +0,0 @@ -# -# Copyright (C) 2020-2024 Intel Corporation -# SPDX-License-Identifier: Apache-2.0 -# - -import logging as log -from time import perf_counter - -import cv2 - - -def put_highlighted_text( - frame, - message, - position, - font_face, - font_scale, - color, - thickness, -): - cv2.putText( - frame, - message, - position, - font_face, - font_scale, - (255, 255, 255), - thickness + 1, - ) # white border - cv2.putText(frame, message, position, font_face, font_scale, color, thickness) - - -class Statistic: - def __init__(self): - self.latency = 0.0 - self.period = 0.0 - self.frame_count = 0 - - def combine(self, other): - self.latency += other.latency - self.period += other.period - self.frame_count += other.frame_count - - -class PerformanceMetrics: - def __init__(self, time_window=1.0): - # 'time_window' defines the length of the timespan over which the 'current fps' value is calculated - self.time_window_size = time_window - self.last_moving_statistic = Statistic() - self.current_moving_statistic = Statistic() - self.total_statistic = Statistic() - self.last_update_time = None - - def update(self, last_request_start_time, frame=None): - current_time = perf_counter() - - if self.last_update_time is None: - self.last_update_time = last_request_start_time - - self.current_moving_statistic.latency += current_time - last_request_start_time - self.current_moving_statistic.period = current_time - self.last_update_time - self.current_moving_statistic.frame_count += 1 - - if current_time - self.last_update_time > self.time_window_size: - self.last_moving_statistic = self.current_moving_statistic - self.total_statistic.combine(self.last_moving_statistic) - self.current_moving_statistic = Statistic() - self.last_update_time = current_time - - if frame is not None: - self.paint_metrics(frame) - - def paint_metrics( - self, - frame, - position=(15, 30), - font_scale=0.75, - color=(200, 10, 10), - thickness=2, - ): - # Draw performance stats over frame - current_latency, current_fps = self.get_last() - if current_latency is not None: - put_highlighted_text( - frame, - f"Latency: {current_latency * 1e3:.1f} ms", - position, - cv2.FONT_HERSHEY_COMPLEX, - font_scale, - color, - thickness, - ) - if current_fps is not None: - put_highlighted_text( - frame, - f"FPS: {current_fps:.1f}", - (position[0], position[1] + 30), - cv2.FONT_HERSHEY_COMPLEX, - font_scale, - color, - thickness, - ) - - def get_last(self): - return ( - ( - self.last_moving_statistic.latency / self.last_moving_statistic.frame_count - if self.last_moving_statistic.frame_count != 0 - else None - ), - ( - self.last_moving_statistic.frame_count / self.last_moving_statistic.period - if self.last_moving_statistic.period != 0.0 - else None - ), - ) - - def get_total(self): - frame_count = self.total_statistic.frame_count + self.current_moving_statistic.frame_count - return ( - ( - ((self.total_statistic.latency + self.current_moving_statistic.latency) / frame_count) - if frame_count != 0 - else None - ), - ( - (frame_count / (self.total_statistic.period + self.current_moving_statistic.period)) - if frame_count != 0 - else None - ), - ) - - def get_latency(self): - return self.get_total()[0] * 1e3 - - def log_total(self): - total_latency, total_fps = self.get_total() - log.info("Metrics report:") - log.info( - f"\tLatency: {total_latency * 1e3:.1f} ms" if total_latency is not None else "\tLatency: N/A", - ) - log.info( - f"\tFPS: {total_fps:.1f}" if total_fps is not None else "\tFPS: N/A", - ) diff --git a/src/model_api/pipelines/async_pipeline.py b/src/model_api/pipelines/async_pipeline.py index 83dc974c..3bbd0651 100644 --- a/src/model_api/pipelines/async_pipeline.py +++ b/src/model_api/pipelines/async_pipeline.py @@ -1,12 +1,8 @@ # -# Copyright (C) 2020-2024 Intel Corporation +# Copyright (C) 2020-2025 Intel Corporation # SPDX-License-Identifier: Apache-2.0 # -from time import perf_counter - -from model_api.performance_metrics import PerformanceMetrics - class AsyncPipeline: def __init__(self, model): @@ -17,29 +13,24 @@ def __init__(self, model): self.callback_exceptions = [] self.model.inference_adapter.set_callback(self.callback) - self.preprocess_metrics = PerformanceMetrics() - self.inference_metrics = PerformanceMetrics() - self.postprocess_metrics = PerformanceMetrics() - def callback(self, request, callback_args): try: - id, meta, preprocessing_meta, start_time = callback_args + id, meta, preprocessing_meta = callback_args self.completed_results[id] = ( self.model.inference_adapter.copy_raw_result(request), meta, preprocessing_meta, - start_time, ) except Exception as e: # noqa: BLE001 TODO: Figure out the exact exception that might be raised self.callback_exceptions.append(e) def submit_data(self, inputs, id, meta={}): - preprocessing_start_time = perf_counter() + self.model.perf.preprocess_time.update() inputs, preprocessing_meta = self.model.preprocess(inputs) - self.preprocess_metrics.update(preprocessing_start_time) + self.model.perf.preprocess_time.update() - infer_start_time = perf_counter() - callback_data = id, meta, preprocessing_meta, infer_start_time + self.model.perf.inference_time.update() + callback_data = id, meta, preprocessing_meta self.model.infer_async_raw(inputs, callback_data) def get_raw_result(self, id): @@ -50,10 +41,10 @@ def get_raw_result(self, id): def get_result(self, id): result = self.get_raw_result(id) if result: - raw_result, meta, preprocess_meta, infer_start_time = result - self.inference_metrics.update(infer_start_time) + raw_result, meta, preprocess_meta = result + self.model.perf.inference_time.update() - postprocessing_start_time = perf_counter() + self.model.perf.postprocess_time.update() result = ( self.model.postprocess(raw_result, preprocess_meta), { @@ -61,7 +52,7 @@ def get_result(self, id): **preprocess_meta, }, ) - self.postprocess_metrics.update(postprocessing_start_time) + self.model.perf.postprocess_time.update() return result return None diff --git a/tests/unit/metrics/test_performancemetrics.py b/tests/unit/metrics/test_performancemetrics.py new file mode 100644 index 00000000..b95c6bf7 --- /dev/null +++ b/tests/unit/metrics/test_performancemetrics.py @@ -0,0 +1,252 @@ +# +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import unittest +from unittest.mock import MagicMock, patch + +from model_api.metrics import PerformanceMetrics, TimeStat + + +class TestPerformanceMetrics(unittest.TestCase): + def setUp(self): + """Set up test fixtures before each test method.""" + self.metrics = PerformanceMetrics() + + def test_initial_state(self): + """Test that PerformanceMetrics initializes with correct default state.""" + assert isinstance(self.metrics.load_time, TimeStat) + assert isinstance(self.metrics.preprocess_time, TimeStat) + assert isinstance(self.metrics.inference_time, TimeStat) + assert isinstance(self.metrics.postprocess_time, TimeStat) + assert isinstance(self.metrics.total_time, TimeStat) + + assert self.metrics.load_time.time == 0.0 + assert self.metrics.preprocess_time.time == 0.0 + assert self.metrics.inference_time.time == 0.0 + assert self.metrics.postprocess_time.time == 0.0 + assert self.metrics.total_time.time == 0.0 + assert self.metrics.get_total_frames() == 0 + assert self.metrics.get_fps() == 0.0 + + def test_reset(self): + """Test that reset method resets all time statistics.""" + # Simulate some time measurements by directly setting values + self.metrics.load_time.time = 1.0 + self.metrics.preprocess_time.time = 2.0 + self.metrics.inference_time.time = 3.0 + self.metrics.postprocess_time.time = 4.0 + self.metrics.total_time.time = 5.0 + + self.metrics.load_time.durations = [1.0] + self.metrics.preprocess_time.durations = [2.0] + self.metrics.inference_time.durations = [3.0] + self.metrics.postprocess_time.durations = [4.0] + self.metrics.total_time.durations = [5.0] + + # Reset and verify all are back to initial state + self.metrics.reset() + + assert self.metrics.load_time.time == 1.0 + assert self.metrics.preprocess_time.time == 0.0 + assert self.metrics.inference_time.time == 0.0 + assert self.metrics.postprocess_time.time == 0.0 + assert self.metrics.total_time.time == 0.0 + assert self.metrics.get_total_frames() == 0 + assert self.metrics.get_fps() == 0.0 + assert self.metrics.get_total_time_min() == 0.0 + assert self.metrics.get_total_time_max() == 0.0 + + assert self.metrics.load_time.durations == [1.0] + assert self.metrics.preprocess_time.durations == [] + assert self.metrics.inference_time.durations == [] + assert self.metrics.postprocess_time.durations == [] + assert self.metrics.total_time.durations == [] + + def test_get_load_time(self): + """Test get_load_time method returns the correct TimeStat object.""" + self.metrics.load_time.time = 1.23 + load_time = self.metrics.get_load_time() + assert load_time is self.metrics.load_time + assert isinstance(load_time, TimeStat) + assert load_time.time == 1.23 + + def test_get_preprocess_time(self): + """Test get_preprocess_time method returns the correct TimeStat object.""" + self.metrics.preprocess_time.time = 2.34 + preprocess_time = self.metrics.get_preprocess_time() + assert preprocess_time is self.metrics.preprocess_time + assert isinstance(preprocess_time, TimeStat) + assert preprocess_time.time == 2.34 + + def test_get_inference_time(self): + """Test get_inference_time method returns the correct TimeStat object.""" + self.metrics.inference_time.time = 3.45 + inference_time = self.metrics.get_inference_time() + assert inference_time is self.metrics.inference_time + assert isinstance(inference_time, TimeStat) + assert inference_time.time == 3.45 + + def test_get_postprocess_time(self): + """Test get_postprocess_time method returns the correct TimeStat object.""" + self.metrics.postprocess_time.time = 4.56 + postprocess_time = self.metrics.get_postprocess_time() + assert postprocess_time is self.metrics.postprocess_time + assert isinstance(postprocess_time, TimeStat) + assert postprocess_time.time == 4.56 + + def test_get_total_frames_empty(self): + """Test get_total_frames returns 0 when no frames processed.""" + assert self.metrics.get_total_frames() == 0 + + def test_get_total_frames_with_data(self): + """Test get_total_frames returns correct count when frames are processed.""" + self.metrics.total_time.durations = [1.0, 2.0, 3.0] + assert self.metrics.get_total_frames() == 3 + + def test_get_fps_no_data(self): + """Test get_fps returns 0.0 when no frames processed.""" + assert self.metrics.get_fps() == 0.0 + + def test_get_fps_with_data(self): + """Test get_fps calculates correctly when frames are processed.""" + self.metrics.total_time.durations = [1.0, 2.0, 3.0] + expected_fps = 3 / 6.0 + assert abs(self.metrics.get_fps() - expected_fps) < 1e-7 + + def test_get_fps_zero_total_time(self): + """Test get_fps returns 0.0 when total time is zero.""" + self.metrics.total_time.durations = [0.0, 0.0] + assert self.metrics.get_fps() == 0.0 + + def test_add_valid_metrics(self): + metrics1 = PerformanceMetrics() + metrics2 = PerformanceMetrics() + + # Set up some mock data + metrics1.load_time.time = 1.0 + metrics1.load_time.durations = [1.0] + metrics1.preprocess_time.time = 2.0 + metrics1.preprocess_time.durations = [2.0] + metrics1.inference_time.time = 3.0 + metrics1.inference_time.durations = [3.0] + metrics1.postprocess_time.time = 4.0 + metrics1.postprocess_time.durations = [4.0] + + metrics2.load_time.time = 0.5 + metrics2.load_time.durations = [0.5] + metrics2.preprocess_time.time = 1.5 + metrics2.preprocess_time.durations = [1.5] + metrics2.inference_time.time = 2.5 + metrics2.inference_time.durations = [2.5] + metrics2.postprocess_time.time = 3.5 + metrics2.postprocess_time.durations = [3.5] + + result = metrics1 + metrics2 + + assert isinstance(result, PerformanceMetrics) + assert result.load_time.time == 1.5 + assert result.preprocess_time.time == 3.5 + assert result.inference_time.time == 5.5 + assert result.postprocess_time.time == 7.5 + assert result.load_time.durations == [1.0, 0.5] + assert result.preprocess_time.durations == [2.0, 1.5] + assert result.inference_time.durations == [3.0, 2.5] + assert result.postprocess_time.durations == [4.0, 3.5] + + def test_add_invalid_type(self): + """Test adding PerformanceMetrics with invalid type returns NotImplemented.""" + result = self.metrics.__add__(42) + assert result == NotImplemented + + result = self.metrics.__add__("invalid") + assert result == NotImplemented + + result = self.metrics.__add__(None) + assert result == NotImplemented + + def test_add_missing_total_time_in_result(self): + """Test that addition doesn't include total_time in the result.""" + metrics1 = PerformanceMetrics() + metrics2 = PerformanceMetrics() + + metrics1.total_time.time = 10.0 + metrics1.total_time.durations = [10.0] + metrics2.total_time.time = 5.0 + metrics2.total_time.durations = [5.0] + + result = metrics1 + metrics2 + + assert result.total_time.time == 0.0 + assert result.total_time.durations == [] + + @patch("model_api.metrics.performance.logger") + def test_log_metrics_empty(self, mock_logger): + """Test log_metrics with empty metrics.""" + self.metrics.log_metrics() + + # Verify logger.info was called once + mock_logger.info.assert_called_once() + + # Get the logged content and verify it contains expected metrics + logged_content = mock_logger.info.call_args[0][0] + + assert "🚀 PERFORMANCE METRICS REPORT 🚀" in logged_content + assert "Load Time: 0.000s" in logged_content + assert "Preprocess: 0.000s ± 0.000s" in logged_content + assert "Inference: 0.000s ± 0.000s" in logged_content + assert "Postprocess: 0.000s ± 0.000s" in logged_content + assert "Mean: 0.000s ± 0.000s" in logged_content + assert "Min: 0.000s" in logged_content + assert "Max: 0.000s" in logged_content + assert "Total Frames: 0" in logged_content + assert "FPS: 0.00" in logged_content + + @patch("model_api.metrics.performance.logger") + def test_log_metrics_with_data(self, mock_logger): + """Test log_metrics with actual data.""" + self.metrics.load_time.mean = MagicMock(return_value=1.234) + self.metrics.preprocess_time.mean = MagicMock(return_value=2.345) + self.metrics.preprocess_time.stddev = MagicMock(return_value=0.123) + self.metrics.inference_time.mean = MagicMock(return_value=3.456) + self.metrics.inference_time.stddev = MagicMock(return_value=0.234) + self.metrics.postprocess_time.mean = MagicMock(return_value=4.567) + self.metrics.postprocess_time.stddev = MagicMock(return_value=0.345) + self.metrics.total_time.mean = MagicMock(return_value=10.123) + self.metrics.total_time.stddev = MagicMock(return_value=0.456) + self.metrics.total_time.durations = [1.0, 2.0, 3.0] # 3 frames + + with patch.object(self.metrics, "get_fps", return_value=12.34): + self.metrics.log_metrics() + + # Verify logger.info was called once + mock_logger.info.assert_called_once() + + # Get the logged content and verify it contains expected metrics + logged_content = mock_logger.info.call_args[0][0] + + assert "🚀 PERFORMANCE METRICS REPORT 🚀" in logged_content + assert "Load Time: 1.234s" in logged_content + assert "Preprocess: 2.345s ± 0.123s" in logged_content + assert "Inference: 3.456s ± 0.234s" in logged_content + assert "Postprocess: 4.567s ± 0.345s" in logged_content + assert "Mean: 10.123s ± 0.456s" in logged_content + assert "Min: 1.000s" in logged_content + assert "Max: 3.000s" in logged_content + assert "Total Frames: 3" in logged_content + assert "FPS: 12.34" in logged_content + + def test_integration_with_timestat(self): + """Test integration with actual TimeStat operations.""" + metrics = PerformanceMetrics() + + metrics.load_time.update() + metrics.load_time.update() + metrics.preprocess_time.update() + metrics.preprocess_time.update() + + assert len(metrics.load_time.durations) == 1 + assert len(metrics.preprocess_time.durations) == 1 + assert metrics.load_time.time > 0 + assert metrics.preprocess_time.time > 0 diff --git a/tests/unit/metrics/test_timestat.py b/tests/unit/metrics/test_timestat.py new file mode 100644 index 00000000..32289ba4 --- /dev/null +++ b/tests/unit/metrics/test_timestat.py @@ -0,0 +1,62 @@ +# +# Copyright (C) 2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# + +import unittest + +from model_api.metrics import TimeStat + + +class TestTimeStat(unittest.TestCase): + def test_initial_state(self): + stat = TimeStat() + assert stat.time == 0.0 + assert stat.durations == [] + assert stat.count == 0 + assert stat.mean() == 0.0 + assert stat.stddev() == 0.0 + + def test_update_increments(self): + stat = TimeStat() + stat.update() + assert len(stat.durations) == 0 + stat.update() + assert len(stat.durations) == 1 + assert abs(stat.time - stat.durations[0]) < 1e-7 + + def test_reset(self): + stat = TimeStat() + stat.update() + stat.reset() + assert stat.time == 0.0 + assert stat.durations == [] + assert stat.count == 0 + + def test_mean(self): + stat = TimeStat() + for _ in range(3): + stat.update() + expected_mean = stat.time / stat.count + assert abs(stat.mean() - expected_mean) < 1e-7 + + def test_stddev(self): + stat = TimeStat() + for _ in range(5): + stat.update() + assert stat.stddev() >= 0.0 + + def test_add(self): + stat1 = TimeStat() + stat2 = TimeStat() + for _ in range(2): + stat1.update() + for _ in range(3): + stat2.update() + stat3 = stat1 + stat2 + assert stat3.time == stat1.time + stat2.time + assert stat3.count == stat1.count + stat2.count + + def test_add_invalid(self): + stat = TimeStat() + assert stat.__add__(42) == NotImplemented diff --git a/uv.lock b/uv.lock index 834483f9..0965ac86 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.10" resolution-markers = [ "python_full_version >= '3.13' and sys_platform == 'darwin'", @@ -620,6 +620,96 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/0c/58/bd257695f39d05594ca4ad60df5bcb7e32247f9951fd09a9b8edb82d1daa/contourpy-1.3.3-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:3d1a3799d62d45c18bafd41c5fa05120b96a28079f2393af559b843d1a966a77", size = 225315, upload-time = "2025-07-26T12:02:58.801Z" }, ] +[[package]] +name = "coverage" +version = "7.10.6" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/14/70/025b179c993f019105b79575ac6edb5e084fb0f0e63f15cdebef4e454fb5/coverage-7.10.6.tar.gz", hash = "sha256:f644a3ae5933a552a29dbb9aa2f90c677a875f80ebea028e5a52a4f429044b90", size = 823736, upload-time = "2025-08-29T15:35:16.668Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/a8/1d/2e64b43d978b5bd184e0756a41415597dfef30fcbd90b747474bd749d45f/coverage-7.10.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:70e7bfbd57126b5554aa482691145f798d7df77489a177a6bef80de78860a356", size = 217025, upload-time = "2025-08-29T15:32:57.169Z" }, + { url = "https://files.pythonhosted.org/packages/23/62/b1e0f513417c02cc10ef735c3ee5186df55f190f70498b3702d516aad06f/coverage-7.10.6-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e41be6f0f19da64af13403e52f2dec38bbc2937af54df8ecef10850ff8d35301", size = 217419, upload-time = "2025-08-29T15:32:59.908Z" }, + { url = "https://files.pythonhosted.org/packages/e7/16/b800640b7a43e7c538429e4d7223e0a94fd72453a1a048f70bf766f12e96/coverage-7.10.6-cp310-cp310-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:c61fc91ab80b23f5fddbee342d19662f3d3328173229caded831aa0bd7595460", size = 244180, upload-time = "2025-08-29T15:33:01.608Z" }, + { url = "https://files.pythonhosted.org/packages/fb/6f/5e03631c3305cad187eaf76af0b559fff88af9a0b0c180d006fb02413d7a/coverage-7.10.6-cp310-cp310-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:10356fdd33a7cc06e8051413140bbdc6f972137508a3572e3f59f805cd2832fd", size = 245992, upload-time = "2025-08-29T15:33:03.239Z" }, + { url = "https://files.pythonhosted.org/packages/eb/a1/f30ea0fb400b080730125b490771ec62b3375789f90af0bb68bfb8a921d7/coverage-7.10.6-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:80b1695cf7c5ebe7b44bf2521221b9bb8cdf69b1f24231149a7e3eb1ae5fa2fb", size = 247851, upload-time = "2025-08-29T15:33:04.603Z" }, + { url = "https://files.pythonhosted.org/packages/02/8e/cfa8fee8e8ef9a6bb76c7bef039f3302f44e615d2194161a21d3d83ac2e9/coverage-7.10.6-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2e4c33e6378b9d52d3454bd08847a8651f4ed23ddbb4a0520227bd346382bbc6", size = 245891, upload-time = "2025-08-29T15:33:06.176Z" }, + { url = "https://files.pythonhosted.org/packages/93/a9/51be09b75c55c4f6c16d8d73a6a1d46ad764acca0eab48fa2ffaef5958fe/coverage-7.10.6-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:c8a3ec16e34ef980a46f60dc6ad86ec60f763c3f2fa0db6d261e6e754f72e945", size = 243909, upload-time = "2025-08-29T15:33:07.74Z" }, + { url = "https://files.pythonhosted.org/packages/e9/a6/ba188b376529ce36483b2d585ca7bdac64aacbe5aa10da5978029a9c94db/coverage-7.10.6-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7d79dabc0a56f5af990cc6da9ad1e40766e82773c075f09cc571e2076fef882e", size = 244786, upload-time = "2025-08-29T15:33:08.965Z" }, + { url = "https://files.pythonhosted.org/packages/d0/4c/37ed872374a21813e0d3215256180c9a382c3f5ced6f2e5da0102fc2fd3e/coverage-7.10.6-cp310-cp310-win32.whl", hash = "sha256:86b9b59f2b16e981906e9d6383eb6446d5b46c278460ae2c36487667717eccf1", size = 219521, upload-time = "2025-08-29T15:33:10.599Z" }, + { url = "https://files.pythonhosted.org/packages/8e/36/9311352fdc551dec5b973b61f4e453227ce482985a9368305880af4f85dd/coverage-7.10.6-cp310-cp310-win_amd64.whl", hash = "sha256:e132b9152749bd33534e5bd8565c7576f135f157b4029b975e15ee184325f528", size = 220417, upload-time = "2025-08-29T15:33:11.907Z" }, + { url = "https://files.pythonhosted.org/packages/d4/16/2bea27e212c4980753d6d563a0803c150edeaaddb0771a50d2afc410a261/coverage-7.10.6-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c706db3cabb7ceef779de68270150665e710b46d56372455cd741184f3868d8f", size = 217129, upload-time = "2025-08-29T15:33:13.575Z" }, + { url = "https://files.pythonhosted.org/packages/2a/51/e7159e068831ab37e31aac0969d47b8c5ee25b7d307b51e310ec34869315/coverage-7.10.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8e0c38dc289e0508ef68ec95834cb5d2e96fdbe792eaccaa1bccac3966bbadcc", size = 217532, upload-time = "2025-08-29T15:33:14.872Z" }, + { url = "https://files.pythonhosted.org/packages/e7/c0/246ccbea53d6099325d25cd208df94ea435cd55f0db38099dd721efc7a1f/coverage-7.10.6-cp311-cp311-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:752a3005a1ded28f2f3a6e8787e24f28d6abe176ca64677bcd8d53d6fe2ec08a", size = 247931, upload-time = "2025-08-29T15:33:16.142Z" }, + { url = "https://files.pythonhosted.org/packages/7d/fb/7435ef8ab9b2594a6e3f58505cc30e98ae8b33265d844007737946c59389/coverage-7.10.6-cp311-cp311-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:689920ecfd60f992cafca4f5477d55720466ad2c7fa29bb56ac8d44a1ac2b47a", size = 249864, upload-time = "2025-08-29T15:33:17.434Z" }, + { url = "https://files.pythonhosted.org/packages/51/f8/d9d64e8da7bcddb094d511154824038833c81e3a039020a9d6539bf303e9/coverage-7.10.6-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ec98435796d2624d6905820a42f82149ee9fc4f2d45c2c5bc5a44481cc50db62", size = 251969, upload-time = "2025-08-29T15:33:18.822Z" }, + { url = "https://files.pythonhosted.org/packages/43/28/c43ba0ef19f446d6463c751315140d8f2a521e04c3e79e5c5fe211bfa430/coverage-7.10.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:b37201ce4a458c7a758ecc4efa92fa8ed783c66e0fa3c42ae19fc454a0792153", size = 249659, upload-time = "2025-08-29T15:33:20.407Z" }, + { url = "https://files.pythonhosted.org/packages/79/3e/53635bd0b72beaacf265784508a0b386defc9ab7fad99ff95f79ce9db555/coverage-7.10.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:2904271c80898663c810a6b067920a61dd8d38341244a3605bd31ab55250dad5", size = 247714, upload-time = "2025-08-29T15:33:21.751Z" }, + { url = "https://files.pythonhosted.org/packages/4c/55/0964aa87126624e8c159e32b0bc4e84edef78c89a1a4b924d28dd8265625/coverage-7.10.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5aea98383463d6e1fa4e95416d8de66f2d0cb588774ee20ae1b28df826bcb619", size = 248351, upload-time = "2025-08-29T15:33:23.105Z" }, + { url = "https://files.pythonhosted.org/packages/eb/ab/6cfa9dc518c6c8e14a691c54e53a9433ba67336c760607e299bfcf520cb1/coverage-7.10.6-cp311-cp311-win32.whl", hash = "sha256:e3fb1fa01d3598002777dd259c0c2e6d9d5e10e7222976fc8e03992f972a2cba", size = 219562, upload-time = "2025-08-29T15:33:24.717Z" }, + { url = "https://files.pythonhosted.org/packages/5b/18/99b25346690cbc55922e7cfef06d755d4abee803ef335baff0014268eff4/coverage-7.10.6-cp311-cp311-win_amd64.whl", hash = "sha256:f35ed9d945bece26553d5b4c8630453169672bea0050a564456eb88bdffd927e", size = 220453, upload-time = "2025-08-29T15:33:26.482Z" }, + { url = "https://files.pythonhosted.org/packages/d8/ed/81d86648a07ccb124a5cf1f1a7788712b8d7216b593562683cd5c9b0d2c1/coverage-7.10.6-cp311-cp311-win_arm64.whl", hash = "sha256:99e1a305c7765631d74b98bf7dbf54eeea931f975e80f115437d23848ee8c27c", size = 219127, upload-time = "2025-08-29T15:33:27.777Z" }, + { url = "https://files.pythonhosted.org/packages/26/06/263f3305c97ad78aab066d116b52250dd316e74fcc20c197b61e07eb391a/coverage-7.10.6-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5b2dd6059938063a2c9fee1af729d4f2af28fd1a545e9b7652861f0d752ebcea", size = 217324, upload-time = "2025-08-29T15:33:29.06Z" }, + { url = "https://files.pythonhosted.org/packages/e9/60/1e1ded9a4fe80d843d7d53b3e395c1db3ff32d6c301e501f393b2e6c1c1f/coverage-7.10.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:388d80e56191bf846c485c14ae2bc8898aa3124d9d35903fef7d907780477634", size = 217560, upload-time = "2025-08-29T15:33:30.748Z" }, + { url = "https://files.pythonhosted.org/packages/b8/25/52136173c14e26dfed8b106ed725811bb53c30b896d04d28d74cb64318b3/coverage-7.10.6-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:90cb5b1a4670662719591aa92d0095bb41714970c0b065b02a2610172dbf0af6", size = 249053, upload-time = "2025-08-29T15:33:32.041Z" }, + { url = "https://files.pythonhosted.org/packages/cb/1d/ae25a7dc58fcce8b172d42ffe5313fc267afe61c97fa872b80ee72d9515a/coverage-7.10.6-cp312-cp312-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:961834e2f2b863a0e14260a9a273aff07ff7818ab6e66d2addf5628590c628f9", size = 251802, upload-time = "2025-08-29T15:33:33.625Z" }, + { url = "https://files.pythonhosted.org/packages/f5/7a/1f561d47743710fe996957ed7c124b421320f150f1d38523d8d9102d3e2a/coverage-7.10.6-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:bf9a19f5012dab774628491659646335b1928cfc931bf8d97b0d5918dd58033c", size = 252935, upload-time = "2025-08-29T15:33:34.909Z" }, + { url = "https://files.pythonhosted.org/packages/6c/ad/8b97cd5d28aecdfde792dcbf646bac141167a5cacae2cd775998b45fabb5/coverage-7.10.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:99c4283e2a0e147b9c9cc6bc9c96124de9419d6044837e9799763a0e29a7321a", size = 250855, upload-time = "2025-08-29T15:33:36.922Z" }, + { url = "https://files.pythonhosted.org/packages/33/6a/95c32b558d9a61858ff9d79580d3877df3eb5bc9eed0941b1f187c89e143/coverage-7.10.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:282b1b20f45df57cc508c1e033403f02283adfb67d4c9c35a90281d81e5c52c5", size = 248974, upload-time = "2025-08-29T15:33:38.175Z" }, + { url = "https://files.pythonhosted.org/packages/0d/9c/8ce95dee640a38e760d5b747c10913e7a06554704d60b41e73fdea6a1ffd/coverage-7.10.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8cdbe264f11afd69841bd8c0d83ca10b5b32853263ee62e6ac6a0ab63895f972", size = 250409, upload-time = "2025-08-29T15:33:39.447Z" }, + { url = "https://files.pythonhosted.org/packages/04/12/7a55b0bdde78a98e2eb2356771fd2dcddb96579e8342bb52aa5bc52e96f0/coverage-7.10.6-cp312-cp312-win32.whl", hash = "sha256:a517feaf3a0a3eca1ee985d8373135cfdedfbba3882a5eab4362bda7c7cf518d", size = 219724, upload-time = "2025-08-29T15:33:41.172Z" }, + { url = "https://files.pythonhosted.org/packages/36/4a/32b185b8b8e327802c9efce3d3108d2fe2d9d31f153a0f7ecfd59c773705/coverage-7.10.6-cp312-cp312-win_amd64.whl", hash = "sha256:856986eadf41f52b214176d894a7de05331117f6035a28ac0016c0f63d887629", size = 220536, upload-time = "2025-08-29T15:33:42.524Z" }, + { url = "https://files.pythonhosted.org/packages/08/3a/d5d8dc703e4998038c3099eaf77adddb00536a3cec08c8dcd556a36a3eb4/coverage-7.10.6-cp312-cp312-win_arm64.whl", hash = "sha256:acf36b8268785aad739443fa2780c16260ee3fa09d12b3a70f772ef100939d80", size = 219171, upload-time = "2025-08-29T15:33:43.974Z" }, + { url = "https://files.pythonhosted.org/packages/bd/e7/917e5953ea29a28c1057729c1d5af9084ab6d9c66217523fd0e10f14d8f6/coverage-7.10.6-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ffea0575345e9ee0144dfe5701aa17f3ba546f8c3bb48db62ae101afb740e7d6", size = 217351, upload-time = "2025-08-29T15:33:45.438Z" }, + { url = "https://files.pythonhosted.org/packages/eb/86/2e161b93a4f11d0ea93f9bebb6a53f113d5d6e416d7561ca41bb0a29996b/coverage-7.10.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:95d91d7317cde40a1c249d6b7382750b7e6d86fad9d8eaf4fa3f8f44cf171e80", size = 217600, upload-time = "2025-08-29T15:33:47.269Z" }, + { url = "https://files.pythonhosted.org/packages/0e/66/d03348fdd8df262b3a7fb4ee5727e6e4936e39e2f3a842e803196946f200/coverage-7.10.6-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3e23dd5408fe71a356b41baa82892772a4cefcf758f2ca3383d2aa39e1b7a003", size = 248600, upload-time = "2025-08-29T15:33:48.953Z" }, + { url = "https://files.pythonhosted.org/packages/73/dd/508420fb47d09d904d962f123221bc249f64b5e56aa93d5f5f7603be475f/coverage-7.10.6-cp313-cp313-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:0f3f56e4cb573755e96a16501a98bf211f100463d70275759e73f3cbc00d4f27", size = 251206, upload-time = "2025-08-29T15:33:50.697Z" }, + { url = "https://files.pythonhosted.org/packages/e9/1f/9020135734184f439da85c70ea78194c2730e56c2d18aee6e8ff1719d50d/coverage-7.10.6-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:db4a1d897bbbe7339946ffa2fe60c10cc81c43fab8b062d3fcb84188688174a4", size = 252478, upload-time = "2025-08-29T15:33:52.303Z" }, + { url = "https://files.pythonhosted.org/packages/a4/a4/3d228f3942bb5a2051fde28c136eea23a761177dc4ff4ef54533164ce255/coverage-7.10.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d8fd7879082953c156d5b13c74aa6cca37f6a6f4747b39538504c3f9c63d043d", size = 250637, upload-time = "2025-08-29T15:33:53.67Z" }, + { url = "https://files.pythonhosted.org/packages/36/e3/293dce8cdb9a83de971637afc59b7190faad60603b40e32635cbd15fbf61/coverage-7.10.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:28395ca3f71cd103b8c116333fa9db867f3a3e1ad6a084aa3725ae002b6583bc", size = 248529, upload-time = "2025-08-29T15:33:55.022Z" }, + { url = "https://files.pythonhosted.org/packages/90/26/64eecfa214e80dd1d101e420cab2901827de0e49631d666543d0e53cf597/coverage-7.10.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:61c950fc33d29c91b9e18540e1aed7d9f6787cc870a3e4032493bbbe641d12fc", size = 250143, upload-time = "2025-08-29T15:33:56.386Z" }, + { url = "https://files.pythonhosted.org/packages/3e/70/bd80588338f65ea5b0d97e424b820fb4068b9cfb9597fbd91963086e004b/coverage-7.10.6-cp313-cp313-win32.whl", hash = "sha256:160c00a5e6b6bdf4e5984b0ef21fc860bc94416c41b7df4d63f536d17c38902e", size = 219770, upload-time = "2025-08-29T15:33:58.063Z" }, + { url = "https://files.pythonhosted.org/packages/a7/14/0b831122305abcc1060c008f6c97bbdc0a913ab47d65070a01dc50293c2b/coverage-7.10.6-cp313-cp313-win_amd64.whl", hash = "sha256:628055297f3e2aa181464c3808402887643405573eb3d9de060d81531fa79d32", size = 220566, upload-time = "2025-08-29T15:33:59.766Z" }, + { url = "https://files.pythonhosted.org/packages/83/c6/81a83778c1f83f1a4a168ed6673eeedc205afb562d8500175292ca64b94e/coverage-7.10.6-cp313-cp313-win_arm64.whl", hash = "sha256:df4ec1f8540b0bcbe26ca7dd0f541847cc8a108b35596f9f91f59f0c060bfdd2", size = 219195, upload-time = "2025-08-29T15:34:01.191Z" }, + { url = "https://files.pythonhosted.org/packages/d7/1c/ccccf4bf116f9517275fa85047495515add43e41dfe8e0bef6e333c6b344/coverage-7.10.6-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:c9a8b7a34a4de3ed987f636f71881cd3b8339f61118b1aa311fbda12741bff0b", size = 218059, upload-time = "2025-08-29T15:34:02.91Z" }, + { url = "https://files.pythonhosted.org/packages/92/97/8a3ceff833d27c7492af4f39d5da6761e9ff624831db9e9f25b3886ddbca/coverage-7.10.6-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:8dd5af36092430c2b075cee966719898f2ae87b636cefb85a653f1d0ba5d5393", size = 218287, upload-time = "2025-08-29T15:34:05.106Z" }, + { url = "https://files.pythonhosted.org/packages/92/d8/50b4a32580cf41ff0423777a2791aaf3269ab60c840b62009aec12d3970d/coverage-7.10.6-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:b0353b0f0850d49ada66fdd7d0c7cdb0f86b900bb9e367024fd14a60cecc1e27", size = 259625, upload-time = "2025-08-29T15:34:06.575Z" }, + { url = "https://files.pythonhosted.org/packages/7e/7e/6a7df5a6fb440a0179d94a348eb6616ed4745e7df26bf2a02bc4db72c421/coverage-7.10.6-cp313-cp313t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:d6b9ae13d5d3e8aeca9ca94198aa7b3ebbc5acfada557d724f2a1f03d2c0b0df", size = 261801, upload-time = "2025-08-29T15:34:08.006Z" }, + { url = "https://files.pythonhosted.org/packages/3a/4c/a270a414f4ed5d196b9d3d67922968e768cd971d1b251e1b4f75e9362f75/coverage-7.10.6-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:675824a363cc05781b1527b39dc2587b8984965834a748177ee3c37b64ffeafb", size = 264027, upload-time = "2025-08-29T15:34:09.806Z" }, + { url = "https://files.pythonhosted.org/packages/9c/8b/3210d663d594926c12f373c5370bf1e7c5c3a427519a8afa65b561b9a55c/coverage-7.10.6-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:692d70ea725f471a547c305f0d0fc6a73480c62fb0da726370c088ab21aed282", size = 261576, upload-time = "2025-08-29T15:34:11.585Z" }, + { url = "https://files.pythonhosted.org/packages/72/d0/e1961eff67e9e1dba3fc5eb7a4caf726b35a5b03776892da8d79ec895775/coverage-7.10.6-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:851430a9a361c7a8484a36126d1d0ff8d529d97385eacc8dfdc9bfc8c2d2cbe4", size = 259341, upload-time = "2025-08-29T15:34:13.159Z" }, + { url = "https://files.pythonhosted.org/packages/3a/06/d6478d152cd189b33eac691cba27a40704990ba95de49771285f34a5861e/coverage-7.10.6-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:d9369a23186d189b2fc95cc08b8160ba242057e887d766864f7adf3c46b2df21", size = 260468, upload-time = "2025-08-29T15:34:14.571Z" }, + { url = "https://files.pythonhosted.org/packages/ed/73/737440247c914a332f0b47f7598535b29965bf305e19bbc22d4c39615d2b/coverage-7.10.6-cp313-cp313t-win32.whl", hash = "sha256:92be86fcb125e9bda0da7806afd29a3fd33fdf58fba5d60318399adf40bf37d0", size = 220429, upload-time = "2025-08-29T15:34:16.394Z" }, + { url = "https://files.pythonhosted.org/packages/bd/76/b92d3214740f2357ef4a27c75a526eb6c28f79c402e9f20a922c295c05e2/coverage-7.10.6-cp313-cp313t-win_amd64.whl", hash = "sha256:6b3039e2ca459a70c79523d39347d83b73f2f06af5624905eba7ec34d64d80b5", size = 221493, upload-time = "2025-08-29T15:34:17.835Z" }, + { url = "https://files.pythonhosted.org/packages/fc/8e/6dcb29c599c8a1f654ec6cb68d76644fe635513af16e932d2d4ad1e5ac6e/coverage-7.10.6-cp313-cp313t-win_arm64.whl", hash = "sha256:3fb99d0786fe17b228eab663d16bee2288e8724d26a199c29325aac4b0319b9b", size = 219757, upload-time = "2025-08-29T15:34:19.248Z" }, + { url = "https://files.pythonhosted.org/packages/d3/aa/76cf0b5ec00619ef208da4689281d48b57f2c7fde883d14bf9441b74d59f/coverage-7.10.6-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6008a021907be8c4c02f37cdc3ffb258493bdebfeaf9a839f9e71dfdc47b018e", size = 217331, upload-time = "2025-08-29T15:34:20.846Z" }, + { url = "https://files.pythonhosted.org/packages/65/91/8e41b8c7c505d398d7730206f3cbb4a875a35ca1041efc518051bfce0f6b/coverage-7.10.6-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:5e75e37f23eb144e78940b40395b42f2321951206a4f50e23cfd6e8a198d3ceb", size = 217607, upload-time = "2025-08-29T15:34:22.433Z" }, + { url = "https://files.pythonhosted.org/packages/87/7f/f718e732a423d442e6616580a951b8d1ec3575ea48bcd0e2228386805e79/coverage-7.10.6-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0f7cb359a448e043c576f0da00aa8bfd796a01b06aa610ca453d4dde09cc1034", size = 248663, upload-time = "2025-08-29T15:34:24.425Z" }, + { url = "https://files.pythonhosted.org/packages/e6/52/c1106120e6d801ac03e12b5285e971e758e925b6f82ee9b86db3aa10045d/coverage-7.10.6-cp314-cp314-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:c68018e4fc4e14b5668f1353b41ccf4bc83ba355f0e1b3836861c6f042d89ac1", size = 251197, upload-time = "2025-08-29T15:34:25.906Z" }, + { url = "https://files.pythonhosted.org/packages/3d/ec/3a8645b1bb40e36acde9c0609f08942852a4af91a937fe2c129a38f2d3f5/coverage-7.10.6-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:cd4b2b0707fc55afa160cd5fc33b27ccbf75ca11d81f4ec9863d5793fc6df56a", size = 252551, upload-time = "2025-08-29T15:34:27.337Z" }, + { url = "https://files.pythonhosted.org/packages/a1/70/09ecb68eeb1155b28a1d16525fd3a9b65fbe75337311a99830df935d62b6/coverage-7.10.6-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4cec13817a651f8804a86e4f79d815b3b28472c910e099e4d5a0e8a3b6a1d4cb", size = 250553, upload-time = "2025-08-29T15:34:29.065Z" }, + { url = "https://files.pythonhosted.org/packages/c6/80/47df374b893fa812e953b5bc93dcb1427a7b3d7a1a7d2db33043d17f74b9/coverage-7.10.6-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:f2a6a8e06bbda06f78739f40bfb56c45d14eb8249d0f0ea6d4b3d48e1f7c695d", size = 248486, upload-time = "2025-08-29T15:34:30.897Z" }, + { url = "https://files.pythonhosted.org/packages/4a/65/9f98640979ecee1b0d1a7164b589de720ddf8100d1747d9bbdb84be0c0fb/coverage-7.10.6-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:081b98395ced0d9bcf60ada7661a0b75f36b78b9d7e39ea0790bb4ed8da14747", size = 249981, upload-time = "2025-08-29T15:34:32.365Z" }, + { url = "https://files.pythonhosted.org/packages/1f/55/eeb6603371e6629037f47bd25bef300387257ed53a3c5fdb159b7ac8c651/coverage-7.10.6-cp314-cp314-win32.whl", hash = "sha256:6937347c5d7d069ee776b2bf4e1212f912a9f1f141a429c475e6089462fcecc5", size = 220054, upload-time = "2025-08-29T15:34:34.124Z" }, + { url = "https://files.pythonhosted.org/packages/15/d1/a0912b7611bc35412e919a2cd59ae98e7ea3b475e562668040a43fb27897/coverage-7.10.6-cp314-cp314-win_amd64.whl", hash = "sha256:adec1d980fa07e60b6ef865f9e5410ba760e4e1d26f60f7e5772c73b9a5b0713", size = 220851, upload-time = "2025-08-29T15:34:35.651Z" }, + { url = "https://files.pythonhosted.org/packages/ef/2d/11880bb8ef80a45338e0b3e0725e4c2d73ffbb4822c29d987078224fd6a5/coverage-7.10.6-cp314-cp314-win_arm64.whl", hash = "sha256:a80f7aef9535442bdcf562e5a0d5a5538ce8abe6bb209cfbf170c462ac2c2a32", size = 219429, upload-time = "2025-08-29T15:34:37.16Z" }, + { url = "https://files.pythonhosted.org/packages/83/c0/1f00caad775c03a700146f55536ecd097a881ff08d310a58b353a1421be0/coverage-7.10.6-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:0de434f4fbbe5af4fa7989521c655c8c779afb61c53ab561b64dcee6149e4c65", size = 218080, upload-time = "2025-08-29T15:34:38.919Z" }, + { url = "https://files.pythonhosted.org/packages/a9/c4/b1c5d2bd7cc412cbeb035e257fd06ed4e3e139ac871d16a07434e145d18d/coverage-7.10.6-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:6e31b8155150c57e5ac43ccd289d079eb3f825187d7c66e755a055d2c85794c6", size = 218293, upload-time = "2025-08-29T15:34:40.425Z" }, + { url = "https://files.pythonhosted.org/packages/3f/07/4468d37c94724bf6ec354e4ec2f205fda194343e3e85fd2e59cec57e6a54/coverage-7.10.6-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:98cede73eb83c31e2118ae8d379c12e3e42736903a8afcca92a7218e1f2903b0", size = 259800, upload-time = "2025-08-29T15:34:41.996Z" }, + { url = "https://files.pythonhosted.org/packages/82/d8/f8fb351be5fee31690cd8da768fd62f1cfab33c31d9f7baba6cd8960f6b8/coverage-7.10.6-cp314-cp314t-manylinux1_x86_64.manylinux_2_28_x86_64.manylinux_2_5_x86_64.whl", hash = "sha256:f863c08f4ff6b64fa8045b1e3da480f5374779ef187f07b82e0538c68cb4ff8e", size = 261965, upload-time = "2025-08-29T15:34:43.61Z" }, + { url = "https://files.pythonhosted.org/packages/e8/70/65d4d7cfc75c5c6eb2fed3ee5cdf420fd8ae09c4808723a89a81d5b1b9c3/coverage-7.10.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2b38261034fda87be356f2c3f42221fdb4171c3ce7658066ae449241485390d5", size = 264220, upload-time = "2025-08-29T15:34:45.387Z" }, + { url = "https://files.pythonhosted.org/packages/98/3c/069df106d19024324cde10e4ec379fe2fb978017d25e97ebee23002fbadf/coverage-7.10.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:0e93b1476b79eae849dc3872faeb0bf7948fd9ea34869590bc16a2a00b9c82a7", size = 261660, upload-time = "2025-08-29T15:34:47.288Z" }, + { url = "https://files.pythonhosted.org/packages/fc/8a/2974d53904080c5dc91af798b3a54a4ccb99a45595cc0dcec6eb9616a57d/coverage-7.10.6-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ff8a991f70f4c0cf53088abf1e3886edcc87d53004c7bb94e78650b4d3dac3b5", size = 259417, upload-time = "2025-08-29T15:34:48.779Z" }, + { url = "https://files.pythonhosted.org/packages/30/38/9616a6b49c686394b318974d7f6e08f38b8af2270ce7488e879888d1e5db/coverage-7.10.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:ac765b026c9f33044419cbba1da913cfb82cca1b60598ac1c7a5ed6aac4621a0", size = 260567, upload-time = "2025-08-29T15:34:50.718Z" }, + { url = "https://files.pythonhosted.org/packages/76/16/3ed2d6312b371a8cf804abf4e14895b70e4c3491c6e53536d63fd0958a8d/coverage-7.10.6-cp314-cp314t-win32.whl", hash = "sha256:441c357d55f4936875636ef2cfb3bee36e466dcf50df9afbd398ce79dba1ebb7", size = 220831, upload-time = "2025-08-29T15:34:52.653Z" }, + { url = "https://files.pythonhosted.org/packages/d5/e5/d38d0cb830abede2adb8b147770d2a3d0e7fecc7228245b9b1ae6c24930a/coverage-7.10.6-cp314-cp314t-win_amd64.whl", hash = "sha256:073711de3181b2e204e4870ac83a7c4853115b42e9cd4d145f2231e12d670930", size = 221950, upload-time = "2025-08-29T15:34:54.212Z" }, + { url = "https://files.pythonhosted.org/packages/f4/51/e48e550f6279349895b0ffcd6d2a690e3131ba3a7f4eafccc141966d4dea/coverage-7.10.6-cp314-cp314t-win_arm64.whl", hash = "sha256:137921f2bac5559334ba66122b753db6dc5d1cf01eb7b64eb412bb0d064ef35b", size = 219969, upload-time = "2025-08-29T15:34:55.83Z" }, + { url = "https://files.pythonhosted.org/packages/44/0c/50db5379b615854b5cf89146f8f5bd1d5a9693d7f3a987e269693521c404/coverage-7.10.6-py3-none-any.whl", hash = "sha256:92c4ecf6bf11b2e85fd4d8204814dc26e6a19f0c9d938c207c5cb0eadfcabbe3", size = 208986, upload-time = "2025-08-29T15:35:14.506Z" }, +] + +[package.optional-dependencies] +toml = [ + { name = "tomli", marker = "python_full_version <= '3.11'" }, +] + [[package]] name = "cycler" version = "0.12.1" @@ -2170,6 +2260,7 @@ tests = [ { name = "onnxruntime" }, { name = "pre-commit" }, { name = "pytest" }, + { name = "pytest-cov" }, { name = "pytest-mock" }, { name = "ultralytics" }, ] @@ -2191,6 +2282,7 @@ requires-dist = [ { name = "pre-commit", marker = "extra == 'tests'" }, { name = "pydata-sphinx-theme", marker = "extra == 'docs'" }, { name = "pytest", marker = "extra == 'tests'" }, + { name = "pytest-cov", marker = "extra == 'tests'" }, { name = "pytest-mock", marker = "extra == 'tests'" }, { name = "scipy", specifier = ">=1.5.4" }, { name = "sphinx", marker = "extra == 'docs'" }, @@ -2643,6 +2735,20 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/a8/a4/20da314d277121d6534b3a980b29035dcd51e6744bd79075a6ce8fa4eb8d/pytest-8.4.2-py3-none-any.whl", hash = "sha256:872f880de3fc3a5bdc88a11b39c9710c3497a547cfa9320bc3c5e62fbf272e79", size = 365750, upload-time = "2025-09-04T14:34:20.226Z" }, ] +[[package]] +name = "pytest-cov" +version = "6.2.1" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "coverage", extra = ["toml"] }, + { name = "pluggy" }, + { name = "pytest" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/18/99/668cade231f434aaa59bbfbf49469068d2ddd945000621d3d165d2e7dd7b/pytest_cov-6.2.1.tar.gz", hash = "sha256:25cc6cc0a5358204b8108ecedc51a9b57b34cc6b8c967cc2c01a4e00d8a67da2", size = 69432, upload-time = "2025-06-12T10:47:47.684Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/bc/16/4ea354101abb1287856baa4af2732be351c7bee728065aed451b678153fd/pytest_cov-6.2.1-py3-none-any.whl", hash = "sha256:f5bc4c23f42f1cdd23c70b1dab1bbaef4fc505ba950d53e0081d0730dd7e86d5", size = 24644, upload-time = "2025-06-12T10:47:45.932Z" }, +] + [[package]] name = "pytest-mock" version = "3.15.0"