Compare commits
9 Commits
feature/is
...
feature/is
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
13ba9e8081 | ||
|
|
5b52f593a8 | ||
| 2798558bf3 | |||
|
|
2331d80915 | ||
|
|
7d72669cb8 | ||
| 74a4784b7a | |||
|
|
dc70311aed | ||
|
|
e56819e9e2 | ||
| cfd5351b58 |
@@ -149,6 +149,7 @@ TPM 티켓 운영 규칙:
|
||||
- TPM은 합의된 변경을 이슈로 등록하고 우선순위(`P0/P1/P2`)를 지정한다.
|
||||
- PR 본문에는 TPM이 지정한 우선순위와 범위가 그대로 반영되어야 한다.
|
||||
- 우선순위 변경은 TPM 제안 + Main Agent 승인으로만 가능하다.
|
||||
- PM/TPM/Dev/Reviewer/Verifier/Runtime Verifier는 주요 의사결정 시점마다 PR 코멘트를 남겨 결정 근거를 추적 가능 상태로 유지한다.
|
||||
|
||||
브랜치 운영 규칙:
|
||||
- TPM은 각 티켓에 대해 `ticket temp branch -> program feature branch` PR 경로를 지정한다.
|
||||
|
||||
@@ -50,6 +50,7 @@ Updated: 2026-02-26
|
||||
- PR 본문에 `REQ-*`, `TASK-*`, `TEST-*` 매핑 표 존재
|
||||
- `src/core/risk_manager.py` 변경 없음
|
||||
- 주요 의사결정 체크포인트(DCP-01~04) 중 해당 단계 Main Agent 확인 기록 존재
|
||||
- 주요 의사결정(리뷰 지적/수정 합의/검증 승인)에 대한 에이전트 PR 코멘트 존재
|
||||
- 티켓 PR의 base가 `main`이 아닌 program feature branch인지 확인
|
||||
|
||||
자동 점검:
|
||||
|
||||
@@ -22,6 +22,7 @@
|
||||
- Ticket-level development happens only on **ticket temp branches** cut from the program feature branch.
|
||||
- Ticket PR merges into program feature branch are allowed after verifier approval.
|
||||
- Until final user sign-off, `main` merge is prohibited.
|
||||
- 각 에이전트는 주요 의사결정(리뷰 지적, 수정 방향, 검증 승인)마다 PR 코멘트를 적극 작성해 의사결정 과정을 남긴다.
|
||||
|
||||
## Gitea CLI Formatting Troubleshooting
|
||||
|
||||
|
||||
52
src/analysis/backtest_cost_guard.py
Normal file
52
src/analysis/backtest_cost_guard.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Backtest cost/slippage/failure validation guard."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import math
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class BacktestCostModel:
|
||||
commission_bps: float | None = None
|
||||
slippage_bps_by_session: dict[str, float] | None = None
|
||||
failure_rate_by_session: dict[str, float] | None = None
|
||||
unfavorable_fill_required: bool = True
|
||||
|
||||
|
||||
def validate_backtest_cost_model(
|
||||
*,
|
||||
model: BacktestCostModel,
|
||||
required_sessions: list[str],
|
||||
) -> None:
|
||||
"""Raise ValueError when required cost assumptions are missing/invalid."""
|
||||
if (
|
||||
model.commission_bps is None
|
||||
or not math.isfinite(model.commission_bps)
|
||||
or model.commission_bps < 0
|
||||
):
|
||||
raise ValueError("commission_bps must be provided and >= 0")
|
||||
if not model.unfavorable_fill_required:
|
||||
raise ValueError("unfavorable_fill_required must be True")
|
||||
|
||||
slippage = model.slippage_bps_by_session or {}
|
||||
failure = model.failure_rate_by_session or {}
|
||||
|
||||
missing_slippage = [s for s in required_sessions if s not in slippage]
|
||||
if missing_slippage:
|
||||
raise ValueError(
|
||||
f"missing slippage_bps_by_session for sessions: {', '.join(missing_slippage)}"
|
||||
)
|
||||
|
||||
missing_failure = [s for s in required_sessions if s not in failure]
|
||||
if missing_failure:
|
||||
raise ValueError(
|
||||
f"missing failure_rate_by_session for sessions: {', '.join(missing_failure)}"
|
||||
)
|
||||
|
||||
for sess, bps in slippage.items():
|
||||
if not math.isfinite(bps) or bps < 0:
|
||||
raise ValueError(f"slippage bps must be >= 0 for session={sess}")
|
||||
for sess, rate in failure.items():
|
||||
if not math.isfinite(rate) or rate < 0 or rate > 1:
|
||||
raise ValueError(f"failure rate must be within [0,1] for session={sess}")
|
||||
103
src/analysis/backtest_execution_model.py
Normal file
103
src/analysis/backtest_execution_model.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Conservative backtest execution model."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import math
|
||||
from random import Random
|
||||
from typing import Literal
|
||||
|
||||
|
||||
OrderSide = Literal["BUY", "SELL"]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutionRequest:
|
||||
side: OrderSide
|
||||
session_id: str
|
||||
qty: int
|
||||
reference_price: float
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutionAssumptions:
|
||||
slippage_bps_by_session: dict[str, float]
|
||||
failure_rate_by_session: dict[str, float]
|
||||
partial_fill_rate_by_session: dict[str, float]
|
||||
partial_fill_min_ratio: float = 0.3
|
||||
partial_fill_max_ratio: float = 0.8
|
||||
seed: int = 0
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutionResult:
|
||||
status: Literal["FILLED", "PARTIAL", "REJECTED"]
|
||||
filled_qty: int
|
||||
avg_price: float
|
||||
slippage_bps: float
|
||||
reason: str
|
||||
|
||||
|
||||
class BacktestExecutionModel:
|
||||
"""Execution simulator with conservative unfavorable fill assumptions."""
|
||||
|
||||
def __init__(self, assumptions: ExecutionAssumptions) -> None:
|
||||
self.assumptions = assumptions
|
||||
self._rng = Random(assumptions.seed)
|
||||
if assumptions.partial_fill_min_ratio <= 0 or assumptions.partial_fill_max_ratio > 1:
|
||||
raise ValueError("partial fill ratios must be within (0,1]")
|
||||
if assumptions.partial_fill_min_ratio > assumptions.partial_fill_max_ratio:
|
||||
raise ValueError("partial_fill_min_ratio must be <= partial_fill_max_ratio")
|
||||
for sess, bps in assumptions.slippage_bps_by_session.items():
|
||||
if not math.isfinite(bps) or bps < 0:
|
||||
raise ValueError(f"slippage_bps must be finite and >= 0 for session={sess}")
|
||||
for sess, rate in assumptions.failure_rate_by_session.items():
|
||||
if not math.isfinite(rate) or rate < 0 or rate > 1:
|
||||
raise ValueError(f"failure_rate must be in [0,1] for session={sess}")
|
||||
for sess, rate in assumptions.partial_fill_rate_by_session.items():
|
||||
if not math.isfinite(rate) or rate < 0 or rate > 1:
|
||||
raise ValueError(f"partial_fill_rate must be in [0,1] for session={sess}")
|
||||
|
||||
def simulate(self, request: ExecutionRequest) -> ExecutionResult:
|
||||
if request.qty <= 0:
|
||||
raise ValueError("qty must be positive")
|
||||
if request.reference_price <= 0:
|
||||
raise ValueError("reference_price must be positive")
|
||||
|
||||
slippage_bps = self.assumptions.slippage_bps_by_session.get(request.session_id, 0.0)
|
||||
failure_rate = self.assumptions.failure_rate_by_session.get(request.session_id, 0.0)
|
||||
partial_rate = self.assumptions.partial_fill_rate_by_session.get(request.session_id, 0.0)
|
||||
|
||||
if self._rng.random() < failure_rate:
|
||||
return ExecutionResult(
|
||||
status="REJECTED",
|
||||
filled_qty=0,
|
||||
avg_price=0.0,
|
||||
slippage_bps=slippage_bps,
|
||||
reason="execution_failure",
|
||||
)
|
||||
|
||||
slip_mult = 1.0 + (slippage_bps / 10000.0 if request.side == "BUY" else -slippage_bps / 10000.0)
|
||||
exec_price = request.reference_price * slip_mult
|
||||
|
||||
if self._rng.random() < partial_rate:
|
||||
ratio = self._rng.uniform(
|
||||
self.assumptions.partial_fill_min_ratio,
|
||||
self.assumptions.partial_fill_max_ratio,
|
||||
)
|
||||
filled = max(1, min(request.qty - 1, int(request.qty * ratio)))
|
||||
return ExecutionResult(
|
||||
status="PARTIAL",
|
||||
filled_qty=filled,
|
||||
avg_price=exec_price,
|
||||
slippage_bps=slippage_bps,
|
||||
reason="partial_fill",
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
status="FILLED",
|
||||
filled_qty=request.qty,
|
||||
avg_price=exec_price,
|
||||
slippage_bps=slippage_bps,
|
||||
reason="filled",
|
||||
)
|
||||
74
src/analysis/walk_forward_split.py
Normal file
74
src/analysis/walk_forward_split.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Walk-forward splitter with purge/embargo controls."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class WalkForwardFold:
|
||||
train_indices: list[int]
|
||||
test_indices: list[int]
|
||||
|
||||
@property
|
||||
def train_size(self) -> int:
|
||||
return len(self.train_indices)
|
||||
|
||||
@property
|
||||
def test_size(self) -> int:
|
||||
return len(self.test_indices)
|
||||
|
||||
|
||||
def generate_walk_forward_splits(
|
||||
*,
|
||||
n_samples: int,
|
||||
train_size: int,
|
||||
test_size: int,
|
||||
step_size: int | None = None,
|
||||
purge_size: int = 0,
|
||||
embargo_size: int = 0,
|
||||
min_train_size: int = 1,
|
||||
) -> list[WalkForwardFold]:
|
||||
"""Generate chronological folds with purge/embargo leakage controls."""
|
||||
if n_samples <= 0:
|
||||
raise ValueError("n_samples must be positive")
|
||||
if train_size <= 0 or test_size <= 0:
|
||||
raise ValueError("train_size and test_size must be positive")
|
||||
if purge_size < 0 or embargo_size < 0:
|
||||
raise ValueError("purge_size and embargo_size must be >= 0")
|
||||
if min_train_size <= 0:
|
||||
raise ValueError("min_train_size must be positive")
|
||||
|
||||
step = step_size if step_size is not None else test_size
|
||||
if step <= 0:
|
||||
raise ValueError("step_size must be positive")
|
||||
|
||||
folds: list[WalkForwardFold] = []
|
||||
prev_test_end: int | None = None
|
||||
test_start = train_size + purge_size
|
||||
|
||||
while test_start + test_size <= n_samples:
|
||||
test_end = test_start + test_size - 1
|
||||
train_end = test_start - purge_size - 1
|
||||
if train_end < 0:
|
||||
break
|
||||
|
||||
train_start = max(0, train_end - train_size + 1)
|
||||
train_indices = list(range(train_start, train_end + 1))
|
||||
|
||||
if prev_test_end is not None and embargo_size > 0:
|
||||
emb_from = prev_test_end + 1
|
||||
emb_to = prev_test_end + embargo_size
|
||||
train_indices = [i for i in train_indices if i < emb_from or i > emb_to]
|
||||
|
||||
if len(train_indices) >= min_train_size:
|
||||
folds.append(
|
||||
WalkForwardFold(
|
||||
train_indices=train_indices,
|
||||
test_indices=list(range(test_start, test_end + 1)),
|
||||
)
|
||||
)
|
||||
prev_test_end = test_end
|
||||
test_start += step
|
||||
|
||||
return folds
|
||||
83
tests/test_backtest_cost_guard.py
Normal file
83
tests/test_backtest_cost_guard.py
Normal file
@@ -0,0 +1,83 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.backtest_cost_guard import BacktestCostModel, validate_backtest_cost_model
|
||||
|
||||
|
||||
def test_valid_backtest_cost_model_passes() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"])
|
||||
|
||||
|
||||
def test_missing_required_slippage_session_raises() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="missing slippage_bps_by_session.*US_PRE"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"])
|
||||
|
||||
|
||||
def test_missing_required_failure_rate_session_raises() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.01},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="missing failure_rate_by_session.*US_PRE"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"])
|
||||
|
||||
|
||||
def test_invalid_failure_rate_range_raises() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 1.2},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="failure rate must be within"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
|
||||
|
||||
def test_unfavorable_fill_requirement_cannot_be_disabled() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.02},
|
||||
unfavorable_fill_required=False,
|
||||
)
|
||||
with pytest.raises(ValueError, match="unfavorable_fill_required must be True"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_commission", [float("nan"), float("inf"), float("-inf")])
|
||||
def test_non_finite_commission_rejected(bad_commission: float) -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=bad_commission,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.02},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="commission_bps"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_slippage", [float("nan"), float("inf"), float("-inf")])
|
||||
def test_non_finite_slippage_rejected(bad_slippage: float) -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": bad_slippage},
|
||||
failure_rate_by_session={"KRX_REG": 0.02},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="slippage bps"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
108
tests/test_backtest_execution_model.py
Normal file
108
tests/test_backtest_execution_model.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.backtest_execution_model import (
|
||||
BacktestExecutionModel,
|
||||
ExecutionAssumptions,
|
||||
ExecutionRequest,
|
||||
)
|
||||
|
||||
|
||||
def test_buy_uses_unfavorable_slippage_direction() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 50.0},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
seed=1,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="BUY", session_id="US_PRE", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "FILLED"
|
||||
assert out.avg_price == pytest.approx(100.5)
|
||||
|
||||
|
||||
def test_sell_uses_unfavorable_slippage_direction() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 50.0},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
seed=1,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="SELL", session_id="US_PRE", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "FILLED"
|
||||
assert out.avg_price == pytest.approx(99.5)
|
||||
|
||||
|
||||
def test_failure_rate_can_reject_order() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 1.0},
|
||||
partial_fill_rate_by_session={"KRX_REG": 0.0},
|
||||
seed=42,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="BUY", session_id="KRX_REG", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "REJECTED"
|
||||
assert out.filled_qty == 0
|
||||
|
||||
|
||||
def test_partial_fill_applies_when_rate_is_one() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"KRX_REG": 0.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.0},
|
||||
partial_fill_rate_by_session={"KRX_REG": 1.0},
|
||||
partial_fill_min_ratio=0.4,
|
||||
partial_fill_max_ratio=0.4,
|
||||
seed=0,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="BUY", session_id="KRX_REG", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "PARTIAL"
|
||||
assert out.filled_qty == 4
|
||||
assert out.avg_price == 100.0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_slip", [-1.0, float("nan"), float("inf")])
|
||||
def test_invalid_slippage_is_rejected(bad_slip: float) -> None:
|
||||
with pytest.raises(ValueError, match="slippage_bps"):
|
||||
BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": bad_slip},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_rate", [-0.1, 1.1, float("nan")])
|
||||
def test_invalid_failure_or_partial_rates_are_rejected(bad_rate: float) -> None:
|
||||
with pytest.raises(ValueError, match="failure_rate"):
|
||||
BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 10.0},
|
||||
failure_rate_by_session={"US_PRE": bad_rate},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
)
|
||||
)
|
||||
with pytest.raises(ValueError, match="partial_fill_rate"):
|
||||
BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 10.0},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": bad_rate},
|
||||
)
|
||||
)
|
||||
92
tests/test_walk_forward_split.py
Normal file
92
tests/test_walk_forward_split.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.walk_forward_split import generate_walk_forward_splits
|
||||
|
||||
|
||||
def test_generates_sequential_folds() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=30,
|
||||
train_size=10,
|
||||
test_size=5,
|
||||
)
|
||||
assert len(folds) == 4
|
||||
assert folds[0].train_indices == list(range(0, 10))
|
||||
assert folds[0].test_indices == list(range(10, 15))
|
||||
assert folds[1].train_indices == list(range(5, 15))
|
||||
assert folds[1].test_indices == list(range(15, 20))
|
||||
|
||||
|
||||
def test_purge_removes_boundary_samples_before_test() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=25,
|
||||
train_size=8,
|
||||
test_size=4,
|
||||
purge_size=2,
|
||||
)
|
||||
first = folds[0]
|
||||
# test starts at 10, purge=2 => train end must be 7
|
||||
assert first.train_indices == list(range(0, 8))
|
||||
assert first.test_indices == list(range(10, 14))
|
||||
|
||||
|
||||
def test_embargo_excludes_post_test_samples_from_next_train() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=45,
|
||||
train_size=15,
|
||||
test_size=5,
|
||||
step_size=10,
|
||||
embargo_size=3,
|
||||
)
|
||||
assert len(folds) >= 2
|
||||
# Fold1 test: 15..19, next fold train window: 10..24.
|
||||
# embargo_size=3 should remove 20,21,22 from fold2 train.
|
||||
second_train = folds[1].train_indices
|
||||
assert 20 not in second_train
|
||||
assert 21 not in second_train
|
||||
assert 22 not in second_train
|
||||
assert 23 in second_train
|
||||
|
||||
|
||||
def test_respects_min_train_size_and_returns_empty_when_impossible() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=15,
|
||||
train_size=5,
|
||||
test_size=5,
|
||||
min_train_size=6,
|
||||
)
|
||||
assert folds == []
|
||||
|
||||
|
||||
def test_embargo_uses_last_accepted_fold_when_intermediate_fold_skips() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=30,
|
||||
train_size=5,
|
||||
test_size=3,
|
||||
step_size=5,
|
||||
embargo_size=1,
|
||||
min_train_size=5,
|
||||
)
|
||||
# 1st fold accepted, 2nd skipped by min_train_size, subsequent folds still generated.
|
||||
assert len(folds) == 3
|
||||
assert folds[0].test_indices == [5, 6, 7]
|
||||
assert folds[1].test_indices == [15, 16, 17]
|
||||
assert folds[2].test_indices == [25, 26, 27]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("n_samples", "train_size", "test_size"),
|
||||
[
|
||||
(0, 10, 2),
|
||||
(10, 0, 2),
|
||||
(10, 5, 0),
|
||||
],
|
||||
)
|
||||
def test_invalid_args_raise(n_samples: int, train_size: int, test_size: int) -> None:
|
||||
with pytest.raises(ValueError):
|
||||
generate_walk_forward_splits(
|
||||
n_samples=n_samples,
|
||||
train_size=train_size,
|
||||
test_size=test_size,
|
||||
)
|
||||
Reference in New Issue
Block a user