From 13ba9e8081ab75e33a27fcfaa26316d8507a25e8 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:41:56 +0900 Subject: [PATCH] fix: validate execution assumption ranges in backtest model --- src/analysis/backtest_execution_model.py | 10 ++++++++ tests/test_backtest_execution_model.py | 32 ++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/src/analysis/backtest_execution_model.py b/src/analysis/backtest_execution_model.py index f4911f1..24798dc 100644 --- a/src/analysis/backtest_execution_model.py +++ b/src/analysis/backtest_execution_model.py @@ -3,6 +3,7 @@ from __future__ import annotations from dataclasses import dataclass +import math from random import Random from typing import Literal @@ -47,6 +48,15 @@ class BacktestExecutionModel: raise ValueError("partial fill ratios must be within (0,1]") if assumptions.partial_fill_min_ratio > assumptions.partial_fill_max_ratio: raise ValueError("partial_fill_min_ratio must be <= partial_fill_max_ratio") + for sess, bps in assumptions.slippage_bps_by_session.items(): + if not math.isfinite(bps) or bps < 0: + raise ValueError(f"slippage_bps must be finite and >= 0 for session={sess}") + for sess, rate in assumptions.failure_rate_by_session.items(): + if not math.isfinite(rate) or rate < 0 or rate > 1: + raise ValueError(f"failure_rate must be in [0,1] for session={sess}") + for sess, rate in assumptions.partial_fill_rate_by_session.items(): + if not math.isfinite(rate) or rate < 0 or rate > 1: + raise ValueError(f"partial_fill_rate must be in [0,1] for session={sess}") def simulate(self, request: ExecutionRequest) -> ExecutionResult: if request.qty <= 0: diff --git a/tests/test_backtest_execution_model.py b/tests/test_backtest_execution_model.py index aa0f41f..fb2fa58 100644 --- a/tests/test_backtest_execution_model.py +++ b/tests/test_backtest_execution_model.py @@ -74,3 +74,35 @@ def test_partial_fill_applies_when_rate_is_one() -> None: assert out.status == "PARTIAL" assert out.filled_qty == 4 assert out.avg_price == 100.0 + + +@pytest.mark.parametrize("bad_slip", [-1.0, float("nan"), float("inf")]) +def test_invalid_slippage_is_rejected(bad_slip: float) -> None: + with pytest.raises(ValueError, match="slippage_bps"): + BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": bad_slip}, + failure_rate_by_session={"US_PRE": 0.0}, + partial_fill_rate_by_session={"US_PRE": 0.0}, + ) + ) + + +@pytest.mark.parametrize("bad_rate", [-0.1, 1.1, float("nan")]) +def test_invalid_failure_or_partial_rates_are_rejected(bad_rate: float) -> None: + with pytest.raises(ValueError, match="failure_rate"): + BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": 10.0}, + failure_rate_by_session={"US_PRE": bad_rate}, + partial_fill_rate_by_session={"US_PRE": 0.0}, + ) + ) + with pytest.raises(ValueError, match="partial_fill_rate"): + BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": 10.0}, + failure_rate_by_session={"US_PRE": 0.0}, + partial_fill_rate_by_session={"US_PRE": bad_rate}, + ) + )