From c31a6a569d9480f932880a1846803410439b35ca Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:05:01 +0900 Subject: [PATCH 001/109] docs: enforce feature-branch team flow and mandatory runtime monitoring validation (#279) --- docs/ouroboros/40_acceptance_and_test_plan.md | 6 ++++++ docs/ouroboros/50_tpm_control_protocol.md | 8 ++++++++ .../60_repo_enforcement_checklist.md | 8 ++++++++ docs/workflow.md | 19 ++++++++++++++----- 4 files changed, 36 insertions(+), 5 deletions(-) diff --git a/docs/ouroboros/40_acceptance_and_test_plan.md b/docs/ouroboros/40_acceptance_and_test_plan.md index 182d344..c5e2c0d 100644 --- a/docs/ouroboros/40_acceptance_and_test_plan.md +++ b/docs/ouroboros/40_acceptance_and_test_plan.md @@ -43,6 +43,11 @@ Updated: 2026-02-26 - 기존 `tests/` 스위트 전량 실행 - 신규 기능 플래그 ON/OFF 비교 +4. 구동/모니터링 검증 (필수) +- 개발 완료 후 시스템을 실제 구동해 핵심 경로를 관찰 +- 필수 관찰 항목: 주문 차단 정책, Kill Switch 동작, 경보/예외 로그, 세션 전환 로그 +- Runtime Verifier 코멘트로 증적(실행 명령/요약 로그) 첨부 + ## 실행 명령 ```bash @@ -55,3 +60,4 @@ python3 scripts/validate_ouroboros_docs.py - 문서 검증 실패 시 구현 PR 병합 금지 - `REQ-*` 변경 후 테스트 매핑 누락 시 병합 금지 - 회귀 실패 시 원인 모듈 분리 후 재검증 +- 구동/모니터링 증적 누락 시 검증 승인 금지 diff --git a/docs/ouroboros/50_tpm_control_protocol.md b/docs/ouroboros/50_tpm_control_protocol.md index 406972f..b179790 100644 --- a/docs/ouroboros/50_tpm_control_protocol.md +++ b/docs/ouroboros/50_tpm_control_protocol.md @@ -150,6 +150,10 @@ TPM 티켓 운영 규칙: - PR 본문에는 TPM이 지정한 우선순위와 범위가 그대로 반영되어야 한다. - 우선순위 변경은 TPM 제안 + Main Agent 승인으로만 가능하다. +브랜치 운영 규칙: +- TPM은 각 티켓에 대해 `ticket temp branch -> program feature branch` PR 경로를 지정한다. +- 티켓 머지 대상은 항상 program feature branch이며, `main`은 최종 통합 단계에서만 사용한다. + ## Runtime Verification Protocol - Runtime Verifier는 테스트 통과 이후 실제 동작(스테이징/실운영)을 모니터링한다. @@ -159,6 +163,10 @@ TPM 티켓 운영 규칙: - 이슈 클로즈 규칙: - Dev 수정 완료 + Verifier 재검증 통과 + Runtime Verifier 재관측 정상 - 최종 클로즈 승인자는 Main Agent +- 개발 완료 필수 절차: + - 시스템 실제 구동(스테이징/로컬 실운영 모드) 실행 + - 모니터링 체크리스트(핵심 경보/주문 경로/예외 로그) 수행 + - 결과를 티켓/PR 코멘트에 증적으로 첨부하지 않으면 완료로 간주하지 않음 ## Server Reflection Rule (No-Merge by Default) diff --git a/docs/ouroboros/60_repo_enforcement_checklist.md b/docs/ouroboros/60_repo_enforcement_checklist.md index 2273ce5..58bbeb5 100644 --- a/docs/ouroboros/60_repo_enforcement_checklist.md +++ b/docs/ouroboros/60_repo_enforcement_checklist.md @@ -50,10 +50,12 @@ Updated: 2026-02-26 - PR 본문에 `REQ-*`, `TASK-*`, `TEST-*` 매핑 표 존재 - `src/core/risk_manager.py` 변경 없음 - 주요 의사결정 체크포인트(DCP-01~04) 중 해당 단계 Main Agent 확인 기록 존재 +- 티켓 PR의 base가 `main`이 아닌 program feature branch인지 확인 자동 점검: - 문서 검증 스크립트 통과 - 테스트 통과 +- 개발 완료 시 시스템 구동/모니터링 증적 코멘트 존재 ## 5) 감사 추적 @@ -92,3 +94,9 @@ Updated: 2026-02-26 - 서버 반영은 `브랜치 푸시 + PR 코멘트(리뷰/논의/검증승인)`까지를 기본으로 한다. - 기본 규칙에서 `tea pulls merge` 실행은 금지한다. - 사용자 명시 승인 시에만 예외적으로 머지를 허용한다(예외 근거를 PR 코멘트에 기록). + +## 10) 최종 main 병합 조건 + +- 모든 티켓이 program feature branch로 병합 완료 +- Runtime Verifier의 구동/모니터링 검증 완료 +- 사용자 최종 승인 코멘트 확인 후에만 `feature -> main` PR 머지 허용 diff --git a/docs/workflow.md b/docs/workflow.md index 7d446ee..6e76b81 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -5,14 +5,23 @@ **CRITICAL: All code changes MUST follow this workflow. Direct pushes to `main` are ABSOLUTELY PROHIBITED.** 1. **Create Gitea Issue First** — All features, bug fixes, and policy changes require a Gitea issue before any code is written -2. **Create Feature Branch** — Branch from `main` using format `feature/issue-{N}-{short-description}` - - After creating the branch, run `git pull origin main` and rebase to ensure the branch is up to date -3. **Implement Changes** — Write code, tests, and documentation on the feature branch -4. **Create Pull Request** — Submit PR to `main` branch referencing the issue number -5. **Review & Merge** — After approval, merge via PR (squash or merge commit) +2. **Create Program Feature Branch** — Branch from `main` for the whole development stream + - Format: `feature/{epic-or-stream-name}` +3. **Create Ticket Temp Branch** — Branch from the program feature branch per ticket + - Format: `feature/issue-{N}-{short-description}` +4. **Implement Per Ticket** — Write code, tests, and documentation on the ticket temp branch +5. **Create Pull Request to Program Feature Branch** — `feature/issue-N-* -> feature/{stream}` +6. **Review/Verify and Merge into Program Feature Branch** +7. **Final Integration PR to main** — Only after all ticket stages complete and explicit user approval **Never commit directly to `main`.** This policy applies to all changes, no exceptions. +## Branch Strategy (Mandatory) + +- Team operation default branch is the **program feature branch**, not `main`. +- Ticket-level development happens only on **ticket temp branches** cut from the program feature branch. +- Until final user sign-off, `main` merge is prohibited. + ## Gitea CLI Formatting Troubleshooting Issue/PR 본문 작성 시 줄바꿈(`\n`)이 문자열 그대로 저장되는 문제가 반복될 수 있다. 원인은 `-d "...\n..."` 형태에서 쉘/CLI가 이스케이프를 실제 개행으로 해석하지 않기 때문이다. From df6baee7f13e653b7ad699cdd00268175d506462 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:13:47 +0900 Subject: [PATCH 002/109] feat: add session-aware order policy guard for low-liquidity market-order rejection (#279) --- src/core/order_policy.py | 93 ++++++++++++++++++++++++++++++++++++++ src/main.py | 83 ++++++++++++++++++++++++++++++++++ tests/test_main.py | 73 ++++++++++++++++++++++++++++++ tests/test_order_policy.py | 40 ++++++++++++++++ 4 files changed, 289 insertions(+) create mode 100644 src/core/order_policy.py create mode 100644 tests/test_order_policy.py diff --git a/src/core/order_policy.py b/src/core/order_policy.py new file mode 100644 index 0000000..5fbb43a --- /dev/null +++ b/src/core/order_policy.py @@ -0,0 +1,93 @@ +"""Session-aware order policy guards. + +Default policy: +- Low-liquidity sessions must reject market orders (price <= 0). +""" + +from __future__ import annotations + +from dataclasses import dataclass +from datetime import UTC, datetime, time +from zoneinfo import ZoneInfo + +from src.markets.schedule import MarketInfo + +_LOW_LIQUIDITY_SESSIONS = {"NXT_AFTER", "US_PRE", "US_DAY", "US_AFTER"} + + +class OrderPolicyRejected(Exception): + """Raised when an order violates session policy.""" + + def __init__(self, message: str, *, session_id: str, market_code: str) -> None: + super().__init__(message) + self.session_id = session_id + self.market_code = market_code + + +@dataclass(frozen=True) +class SessionInfo: + session_id: str + is_low_liquidity: bool + + +def classify_session_id(market: MarketInfo, now: datetime | None = None) -> str: + """Classify current session by KST schedule used in v3 docs.""" + now = now or datetime.now(UTC) + # v3 session tables are explicitly defined in KST perspective. + kst_time = now.astimezone(ZoneInfo("Asia/Seoul")).timetz().replace(tzinfo=None) + + if market.code == "KR": + if time(8, 0) <= kst_time < time(8, 50): + return "NXT_PRE" + if time(9, 0) <= kst_time < time(15, 30): + return "KRX_REG" + if time(15, 30) <= kst_time < time(20, 0): + return "NXT_AFTER" + return "KR_OFF" + + if market.code.startswith("US"): + if time(10, 0) <= kst_time < time(18, 0): + return "US_DAY" + if time(18, 0) <= kst_time < time(23, 30): + return "US_PRE" + if time(23, 30) <= kst_time or kst_time < time(6, 0): + return "US_REG" + if time(6, 0) <= kst_time < time(7, 0): + return "US_AFTER" + return "US_OFF" + + return "GENERIC_REG" + + +def get_session_info(market: MarketInfo, now: datetime | None = None) -> SessionInfo: + session_id = classify_session_id(market, now) + return SessionInfo(session_id=session_id, is_low_liquidity=session_id in _LOW_LIQUIDITY_SESSIONS) + + +def validate_order_policy( + *, + market: MarketInfo, + order_type: str, + price: float, + now: datetime | None = None, +) -> SessionInfo: + """Validate order against session policy and return resolved session info.""" + info = get_session_info(market, now) + + is_market_order = price <= 0 + if info.is_low_liquidity and is_market_order: + raise OrderPolicyRejected( + f"Market order is forbidden in low-liquidity session ({info.session_id})", + session_id=info.session_id, + market_code=market.code, + ) + + # Guard against accidental unsupported actions. + if order_type not in {"BUY", "SELL"}: + raise OrderPolicyRejected( + f"Unsupported order_type={order_type}", + session_id=info.session_id, + market_code=market.code, + ) + + return info diff --git a/src/main.py b/src/main.py index f1679a4..c9f36b9 100644 --- a/src/main.py +++ b/src/main.py @@ -28,6 +28,7 @@ from src.context.scheduler import ContextScheduler from src.context.store import ContextStore from src.core.criticality import CriticalityAssessor from src.core.kill_switch import KillSwitchOrchestrator +from src.core.order_policy import OrderPolicyRejected, validate_order_policy from src.core.priority_queue import PriorityTaskQueue from src.core.risk_manager import CircuitBreakerTripped, FatFingerRejected, RiskManager from src.db import ( @@ -1005,6 +1006,22 @@ async def trading_cycle( order_price = kr_round_down(current_price * 1.002) else: order_price = kr_round_down(current_price * 0.998) + try: + validate_order_policy( + market=market, + order_type=decision.action, + price=float(order_price), + ) + except OrderPolicyRejected as exc: + logger.warning( + "Order policy rejected %s %s (%s): %s [session=%s]", + decision.action, + stock_code, + market.name, + exc, + exc.session_id, + ) + return result = await broker.send_order( stock_code=stock_code, order_type=decision.action, @@ -1027,6 +1044,22 @@ async def trading_cycle( overseas_price = round(current_price * 1.002, _price_decimals) else: overseas_price = round(current_price * 0.998, _price_decimals) + try: + validate_order_policy( + market=market, + order_type=decision.action, + price=float(overseas_price), + ) + except OrderPolicyRejected as exc: + logger.warning( + "Order policy rejected %s %s (%s): %s [session=%s]", + decision.action, + stock_code, + market.name, + exc, + exc.session_id, + ) + return result = await overseas_broker.send_overseas_order( exchange_code=market.exchange_code, stock_code=stock_code, @@ -1271,6 +1304,11 @@ async def handle_domestic_pending_orders( f"Invalid price ({last_price}) for {stock_code}" ) new_price = kr_round_down(last_price * 0.996) + validate_order_policy( + market=MARKETS["KR"], + order_type="SELL", + price=float(new_price), + ) await broker.send_order( stock_code=stock_code, order_type="SELL", @@ -1444,6 +1482,19 @@ async def handle_overseas_pending_orders( f"Invalid price ({last_price}) for {stock_code}" ) new_price = round(last_price * 0.996, 4) + market_info = next( + ( + m for m in MARKETS.values() + if m.exchange_code == order_exchange and not m.is_domestic + ), + None, + ) + if market_info is not None: + validate_order_policy( + market=market_info, + order_type="SELL", + price=float(new_price), + ) await overseas_broker.send_overseas_order( exchange_code=order_exchange, stock_code=stock_code, @@ -2012,6 +2063,22 @@ async def run_daily_session( order_price = kr_round_down( stock_data["current_price"] * 0.998 ) + try: + validate_order_policy( + market=market, + order_type=decision.action, + price=float(order_price), + ) + except OrderPolicyRejected as exc: + logger.warning( + "Order policy rejected %s %s (%s): %s [session=%s]", + decision.action, + stock_code, + market.name, + exc, + exc.session_id, + ) + continue result = await broker.send_order( stock_code=stock_code, order_type=decision.action, @@ -2024,6 +2091,22 @@ async def run_daily_session( order_price = round(stock_data["current_price"] * 1.005, 4) else: order_price = stock_data["current_price"] + try: + validate_order_policy( + market=market, + order_type=decision.action, + price=float(order_price), + ) + except OrderPolicyRejected as exc: + logger.warning( + "Order policy rejected %s %s (%s): %s [session=%s]", + decision.action, + stock_code, + market.name, + exc, + exc.session_id, + ) + continue result = await overseas_broker.send_overseas_order( exchange_code=market.exchange_code, stock_code=stock_code, diff --git a/tests/test_main.py b/tests/test_main.py index 8d7cb33..8c540ca 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -8,6 +8,7 @@ import pytest from src.config import Settings from src.context.layer import ContextLayer from src.context.scheduler import ScheduleResult +from src.core.order_policy import OrderPolicyRejected from src.core.risk_manager import CircuitBreakerTripped, FatFingerRejected from src.db import init_db, log_trade from src.evolution.scorecard import DailyScorecard @@ -5116,3 +5117,75 @@ async def test_kill_switch_block_skips_actionable_order_execution() -> None: KILL_SWITCH.clear_block() broker.send_order.assert_not_called() + + +@pytest.mark.asyncio +async def test_order_policy_rejection_skips_order_execution() -> None: + """Order policy rejection must prevent order submission.""" + db_conn = init_db(":memory:") + decision_logger = DecisionLogger(db_conn) + + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.5, 0.0)) + broker.get_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "50000", + "pchs_amt_smtl_amt": "50000", + } + ], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + settings = MagicMock() + settings.POSITION_SIZING_ENABLED = False + settings.CONFIDENCE_THRESHOLD = 80 + + with patch( + "src.main.validate_order_policy", + side_effect=OrderPolicyRejected( + "rejected", + session_id="NXT_AFTER", + market_code="KR", + ), + ): + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_buy_match())), + playbook=_make_playbook(), + risk=MagicMock(), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="005930", + scan_candidates={}, + settings=settings, + ) + + broker.send_order.assert_not_called() diff --git a/tests/test_order_policy.py b/tests/test_order_policy.py new file mode 100644 index 0000000..0f25aba --- /dev/null +++ b/tests/test_order_policy.py @@ -0,0 +1,40 @@ +from datetime import UTC, datetime + +import pytest + +from src.core.order_policy import OrderPolicyRejected, classify_session_id, validate_order_policy +from src.markets.schedule import MARKETS + + +def test_classify_kr_nxt_after() -> None: + # 2026-02-26 16:00 KST == 07:00 UTC + now = datetime(2026, 2, 26, 7, 0, tzinfo=UTC) + assert classify_session_id(MARKETS["KR"], now) == "NXT_AFTER" + + +def test_classify_us_pre() -> None: + # 2026-02-26 19:00 KST == 10:00 UTC + now = datetime(2026, 2, 26, 10, 0, tzinfo=UTC) + assert classify_session_id(MARKETS["US_NASDAQ"], now) == "US_PRE" + + +def test_reject_market_order_in_low_liquidity_session() -> None: + now = datetime(2026, 2, 26, 10, 0, tzinfo=UTC) # 19:00 KST -> US_PRE + with pytest.raises(OrderPolicyRejected): + validate_order_policy( + market=MARKETS["US_NASDAQ"], + order_type="BUY", + price=0.0, + now=now, + ) + + +def test_allow_limit_order_in_low_liquidity_session() -> None: + now = datetime(2026, 2, 26, 10, 0, tzinfo=UTC) # 19:00 KST -> US_PRE + info = validate_order_policy( + market=MARKETS["US_NASDAQ"], + order_type="BUY", + price=100.0, + now=now, + ) + assert info.session_id == "US_PRE" From 3ffad58d57da06d6eb6ba980430be8a0a0d838ac Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:19:51 +0900 Subject: [PATCH 003/109] docs: allow ticket->feature merges without user approval; keep main gated (#279) --- docs/ouroboros/50_tpm_control_protocol.md | 8 ++++---- docs/ouroboros/60_repo_enforcement_checklist.md | 8 ++++---- docs/workflow.md | 3 ++- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/ouroboros/50_tpm_control_protocol.md b/docs/ouroboros/50_tpm_control_protocol.md index b179790..619f6a8 100644 --- a/docs/ouroboros/50_tpm_control_protocol.md +++ b/docs/ouroboros/50_tpm_control_protocol.md @@ -168,11 +168,11 @@ TPM 티켓 운영 규칙: - 모니터링 체크리스트(핵심 경보/주문 경로/예외 로그) 수행 - 결과를 티켓/PR 코멘트에 증적으로 첨부하지 않으면 완료로 간주하지 않음 -## Server Reflection Rule (No-Merge by Default) +## Server Reflection Rule -- 서버 반영 기본 규칙은 `브랜치 푸시 + PR 생성/코멘트`까지로 제한한다. -- 기본 흐름에서 검증 승인 후 자동/수동 머지 실행은 금지한다. -- 예외는 사용자 명시 승인 시에만 허용되며, Main Agent가 예외 근거를 PR에 기록한다. +- `ticket temp branch -> program feature branch` 머지는 검증 승인 후 자동/수동 진행 가능하다. +- `program feature branch -> main` 머지는 사용자 명시 승인 시에만 허용한다. +- Main 병합 시 Main Agent가 승인 근거를 PR 코멘트에 기록한다. ## Acceptance Matrix (PM Scenario -> Dev Tasks -> Verifier Checks) diff --git a/docs/ouroboros/60_repo_enforcement_checklist.md b/docs/ouroboros/60_repo_enforcement_checklist.md index 58bbeb5..02f40e9 100644 --- a/docs/ouroboros/60_repo_enforcement_checklist.md +++ b/docs/ouroboros/60_repo_enforcement_checklist.md @@ -89,11 +89,11 @@ Updated: 2026-02-26 - `REPLAN-REQUEST`는 Main Agent 승인 전 \"제안\" 상태로 유지 - 승인된 재계획은 `REQ/TASK/TEST` 문서를 동시 갱신해야 유효 -## 9) 서버 반영 규칙 (No-Merge by Default) +## 9) 서버 반영 규칙 -- 서버 반영은 `브랜치 푸시 + PR 코멘트(리뷰/논의/검증승인)`까지를 기본으로 한다. -- 기본 규칙에서 `tea pulls merge` 실행은 금지한다. -- 사용자 명시 승인 시에만 예외적으로 머지를 허용한다(예외 근거를 PR 코멘트에 기록). +- 티켓 PR(`feature/issue-* -> feature/{stream}`)은 검증 승인 후 머지 가능하다. +- 최종 통합 PR(`feature/{stream} -> main`)은 사용자 명시 승인 전 `tea pulls merge` 실행 금지. +- Main 병합 시 승인 근거 코멘트 필수. ## 10) 최종 main 병합 조건 diff --git a/docs/workflow.md b/docs/workflow.md index 6e76b81..f3bb3b0 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -11,7 +11,7 @@ - Format: `feature/issue-{N}-{short-description}` 4. **Implement Per Ticket** — Write code, tests, and documentation on the ticket temp branch 5. **Create Pull Request to Program Feature Branch** — `feature/issue-N-* -> feature/{stream}` -6. **Review/Verify and Merge into Program Feature Branch** +6. **Review/Verify and Merge into Program Feature Branch** — user approval not required 7. **Final Integration PR to main** — Only after all ticket stages complete and explicit user approval **Never commit directly to `main`.** This policy applies to all changes, no exceptions. @@ -20,6 +20,7 @@ - Team operation default branch is the **program feature branch**, not `main`. - Ticket-level development happens only on **ticket temp branches** cut from the program feature branch. +- Ticket PR merges into program feature branch are allowed after verifier approval. - Until final user sign-off, `main` merge is prohibited. ## Gitea CLI Formatting Troubleshooting From 356d085ab070c0187b9cda5945751af0c54a3eaf Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:31:29 +0900 Subject: [PATCH 004/109] feat: implement blackout queue and recovery revalidation (TASK-CODE-008) --- src/config.py | 3 + src/core/blackout_manager.py | 100 ++++++++++++++ src/main.py | 235 +++++++++++++++++++++++++++++++++ tests/test_blackout_manager.py | 81 ++++++++++++ tests/test_main.py | 160 ++++++++++++++++++++++ 5 files changed, 579 insertions(+) create mode 100644 src/core/blackout_manager.py create mode 100644 tests/test_blackout_manager.py diff --git a/src/config.py b/src/config.py index 0a10619..7ff1c4d 100644 --- a/src/config.py +++ b/src/config.py @@ -64,6 +64,9 @@ class Settings(BaseSettings): TRADE_MODE: str = Field(default="daily", pattern="^(daily|realtime)$") DAILY_SESSIONS: int = Field(default=4, ge=1, le=10) SESSION_INTERVAL_HOURS: int = Field(default=6, ge=1, le=24) + ORDER_BLACKOUT_ENABLED: bool = True + ORDER_BLACKOUT_WINDOWS_KST: str = "23:30-00:10" + ORDER_BLACKOUT_QUEUE_MAX: int = Field(default=500, ge=10, le=5000) # Pre-Market Planner PRE_MARKET_MINUTES: int = Field(default=30, ge=10, le=120) diff --git a/src/core/blackout_manager.py b/src/core/blackout_manager.py new file mode 100644 index 0000000..9078735 --- /dev/null +++ b/src/core/blackout_manager.py @@ -0,0 +1,100 @@ +"""Blackout policy and queued order-intent manager.""" + +from __future__ import annotations + +from collections import deque +from dataclasses import dataclass +from datetime import UTC, datetime, time +from zoneinfo import ZoneInfo + + +@dataclass(frozen=True) +class BlackoutWindow: + start: time + end: time + + def contains(self, kst_time: time) -> bool: + if self.start <= self.end: + return self.start <= kst_time < self.end + return kst_time >= self.start or kst_time < self.end + + +@dataclass +class QueuedOrderIntent: + market_code: str + exchange_code: str + stock_code: str + order_type: str + quantity: int + price: float + source: str + queued_at: datetime + attempts: int = 0 + + +def parse_blackout_windows_kst(raw: str) -> list[BlackoutWindow]: + """Parse comma-separated KST windows like '23:30-00:10,11:20-11:30'.""" + windows: list[BlackoutWindow] = [] + for token in raw.split(","): + span = token.strip() + if not span or "-" not in span: + continue + start_raw, end_raw = [part.strip() for part in span.split("-", 1)] + try: + start_h, start_m = [int(v) for v in start_raw.split(":", 1)] + end_h, end_m = [int(v) for v in end_raw.split(":", 1)] + except (ValueError, TypeError): + continue + if not (0 <= start_h <= 23 and 0 <= end_h <= 23): + continue + if not (0 <= start_m <= 59 and 0 <= end_m <= 59): + continue + windows.append(BlackoutWindow(start=time(start_h, start_m), end=time(end_h, end_m))) + return windows + + +class BlackoutOrderManager: + """Tracks blackout mode and queues order intents until recovery.""" + + def __init__( + self, + *, + enabled: bool, + windows: list[BlackoutWindow], + max_queue_size: int = 500, + ) -> None: + self.enabled = enabled + self._windows = windows + self._queue: deque[QueuedOrderIntent] = deque() + self._was_blackout = False + self._max_queue_size = max_queue_size + + @property + def pending_count(self) -> int: + return len(self._queue) + + def in_blackout(self, now: datetime | None = None) -> bool: + if not self.enabled or not self._windows: + return False + now = now or datetime.now(UTC) + kst_now = now.astimezone(ZoneInfo("Asia/Seoul")).timetz().replace(tzinfo=None) + return any(window.contains(kst_now) for window in self._windows) + + def enqueue(self, intent: QueuedOrderIntent) -> bool: + if len(self._queue) >= self._max_queue_size: + return False + self._queue.append(intent) + return True + + def pop_recovery_batch(self, now: datetime | None = None) -> list[QueuedOrderIntent]: + in_blackout_now = self.in_blackout(now) + batch: list[QueuedOrderIntent] = [] + if not in_blackout_now and self._queue: + while self._queue: + batch.append(self._queue.popleft()) + self._was_blackout = in_blackout_now + return batch + + def requeue(self, intent: QueuedOrderIntent) -> None: + if len(self._queue) < self._max_queue_size: + self._queue.append(intent) diff --git a/src/main.py b/src/main.py index c9f36b9..61aee8a 100644 --- a/src/main.py +++ b/src/main.py @@ -27,6 +27,11 @@ from src.context.layer import ContextLayer from src.context.scheduler import ContextScheduler from src.context.store import ContextStore from src.core.criticality import CriticalityAssessor +from src.core.blackout_manager import ( + BlackoutOrderManager, + QueuedOrderIntent, + parse_blackout_windows_kst, +) from src.core.kill_switch import KillSwitchOrchestrator from src.core.order_policy import OrderPolicyRejected, validate_order_policy from src.core.priority_queue import PriorityTaskQueue @@ -53,6 +58,11 @@ from src.strategy.scenario_engine import ScenarioEngine logger = logging.getLogger(__name__) KILL_SWITCH = KillSwitchOrchestrator() +BLACKOUT_ORDER_MANAGER = BlackoutOrderManager( + enabled=False, + windows=[], + max_queue_size=500, +) def safe_float(value: str | float | None, default: float = 0.0) -> float: @@ -461,6 +471,171 @@ async def build_overseas_symbol_universe( return ordered_unique +def _build_queued_order_intent( + *, + market: MarketInfo, + stock_code: str, + order_type: str, + quantity: int, + price: float, + source: str, +) -> QueuedOrderIntent: + return QueuedOrderIntent( + market_code=market.code, + exchange_code=market.exchange_code, + stock_code=stock_code, + order_type=order_type, + quantity=quantity, + price=price, + source=source, + queued_at=datetime.now(UTC), + ) + + +def _maybe_queue_order_intent( + *, + market: MarketInfo, + stock_code: str, + order_type: str, + quantity: int, + price: float, + source: str, +) -> bool: + if not BLACKOUT_ORDER_MANAGER.in_blackout(): + return False + + queued = BLACKOUT_ORDER_MANAGER.enqueue( + _build_queued_order_intent( + market=market, + stock_code=stock_code, + order_type=order_type, + quantity=quantity, + price=price, + source=source, + ) + ) + if queued: + logger.warning( + "Blackout active: queued order intent %s %s (%s) qty=%d price=%.4f source=%s pending=%d", + order_type, + stock_code, + market.code, + quantity, + price, + source, + BLACKOUT_ORDER_MANAGER.pending_count, + ) + else: + logger.error( + "Blackout queue full: dropped order intent %s %s (%s) qty=%d source=%s", + order_type, + stock_code, + market.code, + quantity, + source, + ) + return True + + +async def process_blackout_recovery_orders( + *, + broker: KISBroker, + overseas_broker: OverseasBroker, + db_conn: Any, +) -> None: + intents = BLACKOUT_ORDER_MANAGER.pop_recovery_batch() + if not intents: + return + + logger.info( + "Blackout recovery started: processing %d queued intents", + len(intents), + ) + for intent in intents: + market = MARKETS.get(intent.market_code) + if market is None: + continue + + open_position = get_open_position(db_conn, intent.stock_code, market.code) + if intent.order_type == "BUY" and open_position is not None: + logger.info( + "Drop stale queued BUY %s (%s): position already open", + intent.stock_code, + market.code, + ) + continue + if intent.order_type == "SELL" and open_position is None: + logger.info( + "Drop stale queued SELL %s (%s): no open position", + intent.stock_code, + market.code, + ) + continue + + try: + validate_order_policy( + market=market, + order_type=intent.order_type, + price=float(intent.price), + ) + if market.is_domestic: + result = await broker.send_order( + stock_code=intent.stock_code, + order_type=intent.order_type, + quantity=intent.quantity, + price=intent.price, + ) + else: + result = await overseas_broker.send_overseas_order( + exchange_code=market.exchange_code, + stock_code=intent.stock_code, + order_type=intent.order_type, + quantity=intent.quantity, + price=intent.price, + ) + + accepted = result.get("rt_cd", "0") == "0" + if accepted: + logger.info( + "Recovered queued order executed: %s %s (%s) qty=%d price=%.4f source=%s", + intent.order_type, + intent.stock_code, + market.code, + intent.quantity, + intent.price, + intent.source, + ) + continue + logger.warning( + "Recovered queued order rejected: %s %s (%s) qty=%d msg=%s", + intent.order_type, + intent.stock_code, + market.code, + intent.quantity, + result.get("msg1"), + ) + except Exception as exc: + if isinstance(exc, OrderPolicyRejected): + logger.info( + "Drop queued intent by policy: %s %s (%s): %s", + intent.order_type, + intent.stock_code, + market.code, + exc, + ) + continue + logger.warning( + "Recovered queued order failed: %s %s (%s): %s", + intent.order_type, + intent.stock_code, + market.code, + exc, + ) + if intent.attempts < 2: + intent.attempts += 1 + BLACKOUT_ORDER_MANAGER.requeue(intent) + + async def trading_cycle( broker: KISBroker, overseas_broker: OverseasBroker, @@ -1022,6 +1197,15 @@ async def trading_cycle( exc.session_id, ) return + if _maybe_queue_order_intent( + market=market, + stock_code=stock_code, + order_type=decision.action, + quantity=quantity, + price=float(order_price), + source="trading_cycle", + ): + return result = await broker.send_order( stock_code=stock_code, order_type=decision.action, @@ -1060,6 +1244,15 @@ async def trading_cycle( exc.session_id, ) return + if _maybe_queue_order_intent( + market=market, + stock_code=stock_code, + order_type=decision.action, + quantity=quantity, + price=float(overseas_price), + source="trading_cycle", + ): + return result = await overseas_broker.send_overseas_order( exchange_code=market.exchange_code, stock_code=stock_code, @@ -1583,6 +1776,11 @@ async def run_daily_session( # Process each open market for market in open_markets: + await process_blackout_recovery_orders( + broker=broker, + overseas_broker=overseas_broker, + db_conn=db_conn, + ) # Use market-local date for playbook keying market_today = datetime.now(market.timezone).date() @@ -2079,6 +2277,15 @@ async def run_daily_session( exc.session_id, ) continue + if _maybe_queue_order_intent( + market=market, + stock_code=stock_code, + order_type=decision.action, + quantity=quantity, + price=float(order_price), + source="run_daily_session", + ): + continue result = await broker.send_order( stock_code=stock_code, order_type=decision.action, @@ -2107,6 +2314,15 @@ async def run_daily_session( exc.session_id, ) continue + if _maybe_queue_order_intent( + market=market, + stock_code=stock_code, + order_type=decision.action, + quantity=quantity, + price=float(order_price), + source="run_daily_session", + ): + continue result = await overseas_broker.send_overseas_order( exchange_code=market.exchange_code, stock_code=stock_code, @@ -2345,6 +2561,19 @@ def _apply_dashboard_flag(settings: Settings, dashboard_flag: bool) -> Settings: async def run(settings: Settings) -> None: """Main async loop — iterate over open markets on a timer.""" + global BLACKOUT_ORDER_MANAGER + BLACKOUT_ORDER_MANAGER = BlackoutOrderManager( + enabled=settings.ORDER_BLACKOUT_ENABLED, + windows=parse_blackout_windows_kst(settings.ORDER_BLACKOUT_WINDOWS_KST), + max_queue_size=settings.ORDER_BLACKOUT_QUEUE_MAX, + ) + logger.info( + "Blackout manager initialized: enabled=%s windows=%s queue_max=%d", + settings.ORDER_BLACKOUT_ENABLED, + settings.ORDER_BLACKOUT_WINDOWS_KST, + settings.ORDER_BLACKOUT_QUEUE_MAX, + ) + broker = KISBroker(settings) overseas_broker = OverseasBroker(broker) brain = GeminiClient(settings) @@ -2944,6 +3173,12 @@ async def run(settings: Settings) -> None: if shutdown.is_set(): break + await process_blackout_recovery_orders( + broker=broker, + overseas_broker=overseas_broker, + db_conn=db_conn, + ) + # Notify market open if it just opened if not _market_states.get(market.code, False): try: diff --git a/tests/test_blackout_manager.py b/tests/test_blackout_manager.py new file mode 100644 index 0000000..0a1bd5e --- /dev/null +++ b/tests/test_blackout_manager.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +from datetime import UTC, datetime + +from src.core.blackout_manager import ( + BlackoutOrderManager, + QueuedOrderIntent, + parse_blackout_windows_kst, +) + + +def test_parse_blackout_windows_kst() -> None: + windows = parse_blackout_windows_kst("23:30-00:10,11:20-11:30,invalid") + assert len(windows) == 2 + + +def test_blackout_manager_handles_cross_midnight_window() -> None: + manager = BlackoutOrderManager( + enabled=True, + windows=parse_blackout_windows_kst("23:30-00:10"), + max_queue_size=10, + ) + # 2026-01-01 23:40 KST = 2026-01-01 14:40 UTC + assert manager.in_blackout(datetime(2026, 1, 1, 14, 40, tzinfo=UTC)) + # 2026-01-02 00:20 KST = 2026-01-01 15:20 UTC + assert not manager.in_blackout(datetime(2026, 1, 1, 15, 20, tzinfo=UTC)) + + +def test_recovery_batch_only_after_blackout_exit() -> None: + manager = BlackoutOrderManager( + enabled=True, + windows=parse_blackout_windows_kst("23:30-00:10"), + max_queue_size=10, + ) + intent = QueuedOrderIntent( + market_code="KR", + exchange_code="KRX", + stock_code="005930", + order_type="BUY", + quantity=1, + price=100.0, + source="test", + queued_at=datetime.now(UTC), + ) + assert manager.enqueue(intent) + + # Inside blackout: no pop yet + inside_blackout = datetime(2026, 1, 1, 14, 40, tzinfo=UTC) + assert manager.pop_recovery_batch(inside_blackout) == [] + + # Outside blackout: pop full batch once + outside_blackout = datetime(2026, 1, 1, 15, 20, tzinfo=UTC) + batch = manager.pop_recovery_batch(outside_blackout) + assert len(batch) == 1 + assert manager.pending_count == 0 + + +def test_requeued_intent_is_processed_next_non_blackout_cycle() -> None: + manager = BlackoutOrderManager( + enabled=True, + windows=parse_blackout_windows_kst("23:30-00:10"), + max_queue_size=10, + ) + intent = QueuedOrderIntent( + market_code="KR", + exchange_code="KRX", + stock_code="005930", + order_type="BUY", + quantity=1, + price=100.0, + source="test", + queued_at=datetime.now(UTC), + ) + manager.enqueue(intent) + outside_blackout = datetime(2026, 1, 1, 15, 20, tzinfo=UTC) + first_batch = manager.pop_recovery_batch(outside_blackout) + assert len(first_batch) == 1 + + manager.requeue(first_batch[0]) + second_batch = manager.pop_recovery_batch(outside_blackout) + assert len(second_batch) == 1 diff --git a/tests/test_main.py b/tests/test_main.py index 8c540ca..934b113 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -27,6 +27,7 @@ from src.main import ( _start_dashboard_server, handle_domestic_pending_orders, handle_overseas_pending_orders, + process_blackout_recovery_orders, run_daily_session, safe_float, sync_positions_from_broker, @@ -5189,3 +5190,162 @@ async def test_order_policy_rejection_skips_order_execution() -> None: ) broker.send_order.assert_not_called() + + +@pytest.mark.asyncio +async def test_blackout_queues_order_and_skips_submission() -> None: + """When blackout is active, order submission is replaced by queueing.""" + db_conn = init_db(":memory:") + decision_logger = DecisionLogger(db_conn) + + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.5, 0.0)) + broker.get_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "50000", + "pchs_amt_smtl_amt": "50000", + } + ], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + settings = MagicMock() + settings.POSITION_SIZING_ENABLED = False + settings.CONFIDENCE_THRESHOLD = 80 + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + blackout_manager = MagicMock() + blackout_manager.in_blackout.return_value = True + blackout_manager.enqueue.return_value = True + blackout_manager.pending_count = 1 + + with patch("src.main.BLACKOUT_ORDER_MANAGER", blackout_manager): + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_buy_match())), + playbook=_make_playbook(), + risk=MagicMock(), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="005930", + scan_candidates={}, + settings=settings, + ) + + broker.send_order.assert_not_called() + blackout_manager.enqueue.assert_called_once() + + +@pytest.mark.asyncio +async def test_process_blackout_recovery_executes_valid_intents() -> None: + """Recovery must execute queued intents that pass revalidation.""" + db_conn = init_db(":memory:") + broker = MagicMock() + broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + overseas_broker = MagicMock() + + market = MagicMock() + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + intent = MagicMock() + intent.market_code = "KR" + intent.stock_code = "005930" + intent.order_type = "BUY" + intent.quantity = 1 + intent.price = 100.0 + intent.source = "test" + intent.attempts = 0 + + blackout_manager = MagicMock() + blackout_manager.pop_recovery_batch.return_value = [intent] + + with ( + patch("src.main.BLACKOUT_ORDER_MANAGER", blackout_manager), + patch("src.main.MARKETS", {"KR": market}), + patch("src.main.get_open_position", return_value=None), + patch("src.main.validate_order_policy"), + ): + await process_blackout_recovery_orders( + broker=broker, + overseas_broker=overseas_broker, + db_conn=db_conn, + ) + + broker.send_order.assert_called_once() + + +@pytest.mark.asyncio +async def test_process_blackout_recovery_drops_policy_rejected_intent() -> None: + """Policy-rejected queued intents must not be requeued.""" + db_conn = init_db(":memory:") + broker = MagicMock() + broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + overseas_broker = MagicMock() + + market = MagicMock() + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + intent = MagicMock() + intent.market_code = "KR" + intent.stock_code = "005930" + intent.order_type = "BUY" + intent.quantity = 1 + intent.price = 100.0 + intent.source = "test" + intent.attempts = 0 + + blackout_manager = MagicMock() + blackout_manager.pop_recovery_batch.return_value = [intent] + + with ( + patch("src.main.BLACKOUT_ORDER_MANAGER", blackout_manager), + patch("src.main.MARKETS", {"KR": market}), + patch("src.main.get_open_position", return_value=None), + patch( + "src.main.validate_order_policy", + side_effect=OrderPolicyRejected( + "blocked", + session_id="NXT_AFTER", + market_code="KR", + ), + ), + ): + await process_blackout_recovery_orders( + broker=broker, + overseas_broker=overseas_broker, + db_conn=db_conn, + ) + + broker.send_order.assert_not_called() + blackout_manager.requeue.assert_not_called() From 25401ac132f0ce18f5a45f6d40e8e20c77562b6a Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:38:26 +0900 Subject: [PATCH 005/109] feat: enforce operational kill switch callbacks in runtime flow (TASK-CODE-003) --- src/core/blackout_manager.py | 5 + src/main.py | 204 ++++++++++++++++++++++++++++++----- tests/test_main.py | 65 +++++++++++ 3 files changed, 249 insertions(+), 25 deletions(-) diff --git a/src/core/blackout_manager.py b/src/core/blackout_manager.py index 9078735..b0bc68b 100644 --- a/src/core/blackout_manager.py +++ b/src/core/blackout_manager.py @@ -98,3 +98,8 @@ class BlackoutOrderManager: def requeue(self, intent: QueuedOrderIntent) -> None: if len(self._queue) < self._max_queue_size: self._queue.append(intent) + + def clear(self) -> int: + count = len(self._queue) + self._queue.clear() + return count diff --git a/src/main.py b/src/main.py index 61aee8a..3230116 100644 --- a/src/main.py +++ b/src/main.py @@ -636,6 +636,167 @@ async def process_blackout_recovery_orders( BLACKOUT_ORDER_MANAGER.requeue(intent) +def _resolve_kill_switch_markets( + *, + settings: Settings | None, + current_market: MarketInfo | None, +) -> list[MarketInfo]: + if settings is not None: + markets: list[MarketInfo] = [] + seen: set[str] = set() + for market_code in settings.enabled_market_list: + market = MARKETS.get(market_code) + if market is None or market.code in seen: + continue + markets.append(market) + seen.add(market.code) + if markets: + return markets + if current_market is not None: + return [current_market] + return [] + + +async def _cancel_pending_orders_for_kill_switch( + *, + broker: KISBroker, + overseas_broker: OverseasBroker, + markets: list[MarketInfo], +) -> None: + domestic = [m for m in markets if m.is_domestic] + overseas = [m for m in markets if not m.is_domestic] + + if domestic: + try: + orders = await broker.get_domestic_pending_orders() + except Exception as exc: + logger.warning("KillSwitch: failed to fetch domestic pending orders: %s", exc) + orders = [] + for order in orders: + try: + stock_code = order.get("pdno", "") + orgn_odno = order.get("orgn_odno", "") + krx_fwdg_ord_orgno = order.get("ord_gno_brno", "") + psbl_qty = int(order.get("psbl_qty", "0") or "0") + if not stock_code or not orgn_odno or psbl_qty <= 0: + continue + await broker.cancel_domestic_order( + stock_code=stock_code, + orgn_odno=orgn_odno, + krx_fwdg_ord_orgno=krx_fwdg_ord_orgno, + qty=psbl_qty, + ) + except Exception as exc: + logger.warning("KillSwitch: domestic cancel failed: %s", exc) + + us_exchanges = frozenset({"NASD", "NYSE", "AMEX"}) + exchange_codes: list[str] = [] + seen_us = False + for market in overseas: + exc_code = market.exchange_code + if exc_code in us_exchanges: + if not seen_us: + exchange_codes.append("NASD") + seen_us = True + elif exc_code not in exchange_codes: + exchange_codes.append(exc_code) + + for exchange_code in exchange_codes: + try: + orders = await overseas_broker.get_overseas_pending_orders(exchange_code) + except Exception as exc: + logger.warning( + "KillSwitch: failed to fetch overseas pending orders for %s: %s", + exchange_code, + exc, + ) + continue + for order in orders: + try: + stock_code = order.get("pdno", "") + odno = order.get("odno", "") + nccs_qty = int(order.get("nccs_qty", "0") or "0") + order_exchange = order.get("ovrs_excg_cd") or exchange_code + if not stock_code or not odno or nccs_qty <= 0: + continue + await overseas_broker.cancel_overseas_order( + exchange_code=order_exchange, + stock_code=stock_code, + odno=odno, + qty=nccs_qty, + ) + except Exception as exc: + logger.warning("KillSwitch: overseas cancel failed: %s", exc) + + +async def _refresh_order_state_for_kill_switch( + *, + broker: KISBroker, + overseas_broker: OverseasBroker, + markets: list[MarketInfo], +) -> None: + seen_overseas: set[str] = set() + for market in markets: + try: + if market.is_domestic: + await broker.get_balance() + elif market.exchange_code not in seen_overseas: + seen_overseas.add(market.exchange_code) + await overseas_broker.get_overseas_balance(market.exchange_code) + except Exception as exc: + logger.warning( + "KillSwitch: refresh state failed for %s/%s: %s", + market.code, + market.exchange_code, + exc, + ) + + +def _reduce_risk_for_kill_switch() -> None: + dropped = BLACKOUT_ORDER_MANAGER.clear() + logger.critical("KillSwitch: reduced queued order risk by clearing %d queued intents", dropped) + + +async def _trigger_emergency_kill_switch( + *, + reason: str, + broker: KISBroker, + overseas_broker: OverseasBroker, + telegram: TelegramClient, + settings: Settings | None, + current_market: MarketInfo | None, + stock_code: str, + pnl_pct: float, + threshold: float, +) -> Any: + markets = _resolve_kill_switch_markets(settings=settings, current_market=current_market) + return await KILL_SWITCH.trigger( + reason=reason, + cancel_pending_orders=lambda: _cancel_pending_orders_for_kill_switch( + broker=broker, + overseas_broker=overseas_broker, + markets=markets, + ), + refresh_order_state=lambda: _refresh_order_state_for_kill_switch( + broker=broker, + overseas_broker=overseas_broker, + markets=markets, + ), + reduce_risk=_reduce_risk_for_kill_switch, + snapshot_state=lambda: logger.critical( + "KillSwitch snapshot %s/%s pnl=%.2f threshold=%.2f", + current_market.code if current_market else "UNKNOWN", + stock_code, + pnl_pct, + threshold, + ), + notify=lambda: telegram.notify_circuit_breaker( + pnl_pct=pnl_pct, + threshold=threshold, + ), + ) + + async def trading_cycle( broker: KISBroker, overseas_broker: OverseasBroker, @@ -1151,15 +1312,16 @@ async def trading_cycle( logger.warning("Fat finger notification failed: %s", notify_exc) raise # Re-raise to prevent trade except CircuitBreakerTripped as exc: - ks_report = await KILL_SWITCH.trigger( + ks_report = await _trigger_emergency_kill_switch( reason=f"circuit_breaker:{market.code}:{stock_code}:{exc.pnl_pct:.2f}", - snapshot_state=lambda: logger.critical( - "KillSwitch snapshot %s/%s pnl=%.2f threshold=%.2f", - market.code, - stock_code, - exc.pnl_pct, - exc.threshold, - ), + broker=broker, + overseas_broker=overseas_broker, + telegram=telegram, + settings=settings, + current_market=market, + stock_code=stock_code, + pnl_pct=exc.pnl_pct, + threshold=exc.threshold, ) if ks_report.errors: logger.critical( @@ -2218,26 +2380,18 @@ async def run_daily_session( logger.warning("Fat finger notification failed: %s", notify_exc) continue # Skip this order except CircuitBreakerTripped as exc: - ks_report = await KILL_SWITCH.trigger( + ks_report = await _trigger_emergency_kill_switch( reason=f"daily_circuit_breaker:{market.code}:{stock_code}:{exc.pnl_pct:.2f}", - snapshot_state=lambda: logger.critical( - "Daily KillSwitch snapshot %s/%s pnl=%.2f threshold=%.2f", - market.code, - stock_code, - exc.pnl_pct, - exc.threshold, - ), + broker=broker, + overseas_broker=overseas_broker, + telegram=telegram, + settings=settings, + current_market=market, + stock_code=stock_code, + pnl_pct=exc.pnl_pct, + threshold=exc.threshold, ) logger.critical("Circuit breaker tripped — stopping session") - try: - await telegram.notify_circuit_breaker( - pnl_pct=exc.pnl_pct, - threshold=exc.threshold, - ) - except Exception as notify_exc: - logger.warning( - "Circuit breaker notification failed: %s", notify_exc - ) if ks_report.errors: logger.critical( "Daily KillSwitch step errors for %s/%s: %s", diff --git a/tests/test_main.py b/tests/test_main.py index 934b113..719063d 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -15,6 +15,7 @@ from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger from src.main import ( KILL_SWITCH, + _trigger_emergency_kill_switch, _apply_dashboard_flag, _determine_order_quantity, _extract_avg_price_from_balance, @@ -5349,3 +5350,67 @@ async def test_process_blackout_recovery_drops_policy_rejected_intent() -> None: broker.send_order.assert_not_called() blackout_manager.requeue.assert_not_called() + + +@pytest.mark.asyncio +async def test_trigger_emergency_kill_switch_executes_operational_steps() -> None: + """Emergency kill switch should execute cancel/refresh/reduce/notify callbacks.""" + broker = MagicMock() + broker.get_domestic_pending_orders = AsyncMock( + return_value=[ + { + "pdno": "005930", + "orgn_odno": "1", + "ord_gno_brno": "01", + "psbl_qty": "3", + } + ] + ) + broker.cancel_domestic_order = AsyncMock(return_value={"rt_cd": "0"}) + broker.get_balance = AsyncMock(return_value={"output1": [], "output2": []}) + + overseas_broker = MagicMock() + overseas_broker.get_overseas_pending_orders = AsyncMock(return_value=[]) + overseas_broker.get_overseas_balance = AsyncMock(return_value={"output1": [], "output2": []}) + + telegram = MagicMock() + telegram.notify_circuit_breaker = AsyncMock() + + settings = MagicMock() + settings.enabled_market_list = ["KR"] + + market = MagicMock() + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + with ( + patch("src.main.MARKETS", {"KR": market}), + patch("src.main.BLACKOUT_ORDER_MANAGER.clear", return_value=2), + ): + report = await _trigger_emergency_kill_switch( + reason="test", + broker=broker, + overseas_broker=overseas_broker, + telegram=telegram, + settings=settings, + current_market=market, + stock_code="005930", + pnl_pct=-3.2, + threshold=-3.0, + ) + + assert report.steps == [ + "block_new_orders", + "cancel_pending_orders", + "refresh_order_state", + "reduce_risk", + "snapshot_state", + "notify", + ] + broker.cancel_domestic_order.assert_called_once() + broker.get_balance.assert_called_once() + telegram.notify_circuit_breaker.assert_called_once_with( + pnl_pct=-3.2, + threshold=-3.0, + ) From 0a4e69d40c926b78058cd8162f64dcf5ebd75629 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:41:13 +0900 Subject: [PATCH 006/109] fix: record kill switch cancel failures and add failure-path tests --- src/main.py | 30 ++++++++++++++++++++++----- tests/test_main.py | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+), 5 deletions(-) diff --git a/src/main.py b/src/main.py index 3230116..2fe9ef4 100644 --- a/src/main.py +++ b/src/main.py @@ -663,6 +663,7 @@ async def _cancel_pending_orders_for_kill_switch( overseas_broker: OverseasBroker, markets: list[MarketInfo], ) -> None: + failures: list[str] = [] domestic = [m for m in markets if m.is_domestic] overseas = [m for m in markets if not m.is_domestic] @@ -673,21 +674,28 @@ async def _cancel_pending_orders_for_kill_switch( logger.warning("KillSwitch: failed to fetch domestic pending orders: %s", exc) orders = [] for order in orders: + stock_code = str(order.get("pdno", "")) try: - stock_code = order.get("pdno", "") orgn_odno = order.get("orgn_odno", "") krx_fwdg_ord_orgno = order.get("ord_gno_brno", "") psbl_qty = int(order.get("psbl_qty", "0") or "0") if not stock_code or not orgn_odno or psbl_qty <= 0: continue - await broker.cancel_domestic_order( + cancel_result = await broker.cancel_domestic_order( stock_code=stock_code, orgn_odno=orgn_odno, krx_fwdg_ord_orgno=krx_fwdg_ord_orgno, qty=psbl_qty, ) + if cancel_result.get("rt_cd") != "0": + failures.append( + "domestic cancel failed for" + f" {stock_code}: rt_cd={cancel_result.get('rt_cd')}" + f" msg={cancel_result.get('msg1')}" + ) except Exception as exc: logger.warning("KillSwitch: domestic cancel failed: %s", exc) + failures.append(f"domestic cancel exception for {stock_code}: {exc}") us_exchanges = frozenset({"NASD", "NYSE", "AMEX"}) exchange_codes: list[str] = [] @@ -712,21 +720,33 @@ async def _cancel_pending_orders_for_kill_switch( ) continue for order in orders: + stock_code = str(order.get("pdno", "")) + order_exchange = str(order.get("ovrs_excg_cd") or exchange_code) try: - stock_code = order.get("pdno", "") odno = order.get("odno", "") nccs_qty = int(order.get("nccs_qty", "0") or "0") - order_exchange = order.get("ovrs_excg_cd") or exchange_code if not stock_code or not odno or nccs_qty <= 0: continue - await overseas_broker.cancel_overseas_order( + cancel_result = await overseas_broker.cancel_overseas_order( exchange_code=order_exchange, stock_code=stock_code, odno=odno, qty=nccs_qty, ) + if cancel_result.get("rt_cd") != "0": + failures.append( + "overseas cancel failed for" + f" {order_exchange}/{stock_code}: rt_cd={cancel_result.get('rt_cd')}" + f" msg={cancel_result.get('msg1')}" + ) except Exception as exc: logger.warning("KillSwitch: overseas cancel failed: %s", exc) + failures.append( + f"overseas cancel exception for {order_exchange}/{stock_code}: {exc}" + ) + + if failures: + raise RuntimeError("; ".join(failures[:3])) async def _refresh_order_state_for_kill_switch( diff --git a/tests/test_main.py b/tests/test_main.py index 719063d..3dee447 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -5414,3 +5414,54 @@ async def test_trigger_emergency_kill_switch_executes_operational_steps() -> Non pnl_pct=-3.2, threshold=-3.0, ) + + +@pytest.mark.asyncio +async def test_trigger_emergency_kill_switch_records_cancel_failure() -> None: + """Cancel API rejection should be captured in kill switch errors.""" + broker = MagicMock() + broker.get_domestic_pending_orders = AsyncMock( + return_value=[ + { + "pdno": "005930", + "orgn_odno": "1", + "ord_gno_brno": "01", + "psbl_qty": "3", + } + ] + ) + broker.cancel_domestic_order = AsyncMock(return_value={"rt_cd": "1", "msg1": "fail"}) + broker.get_balance = AsyncMock(return_value={"output1": [], "output2": []}) + + overseas_broker = MagicMock() + overseas_broker.get_overseas_pending_orders = AsyncMock(return_value=[]) + overseas_broker.get_overseas_balance = AsyncMock(return_value={"output1": [], "output2": []}) + + telegram = MagicMock() + telegram.notify_circuit_breaker = AsyncMock() + + settings = MagicMock() + settings.enabled_market_list = ["KR"] + + market = MagicMock() + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + with ( + patch("src.main.MARKETS", {"KR": market}), + patch("src.main.BLACKOUT_ORDER_MANAGER.clear", return_value=0), + ): + report = await _trigger_emergency_kill_switch( + reason="test-fail", + broker=broker, + overseas_broker=overseas_broker, + telegram=telegram, + settings=settings, + current_market=market, + stock_code="005930", + pnl_pct=-3.2, + threshold=-3.0, + ) + + assert any(err.startswith("cancel_pending_orders:") for err in report.errors) From bb391d502cc0f12572a680de5fd8841df9f96a35 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:45:18 +0900 Subject: [PATCH 007/109] feat: add triple barrier labeler with first-touch logic (TASK-CODE-004) --- src/analysis/triple_barrier.py | 111 +++++++++++++++++++++++++++++++++ tests/test_triple_barrier.py | 91 +++++++++++++++++++++++++++ 2 files changed, 202 insertions(+) create mode 100644 src/analysis/triple_barrier.py create mode 100644 tests/test_triple_barrier.py diff --git a/src/analysis/triple_barrier.py b/src/analysis/triple_barrier.py new file mode 100644 index 0000000..cf7a961 --- /dev/null +++ b/src/analysis/triple_barrier.py @@ -0,0 +1,111 @@ +"""Triple barrier labeler utilities. + +Implements first-touch labeling with upper/lower/time barriers. +""" + +from __future__ import annotations + +from dataclasses import dataclass +from typing import Literal, Sequence + + +TieBreakMode = Literal["stop_first", "take_first"] + + +@dataclass(frozen=True) +class TripleBarrierSpec: + take_profit_pct: float + stop_loss_pct: float + max_holding_bars: int + tie_break: TieBreakMode = "stop_first" + + +@dataclass(frozen=True) +class TripleBarrierLabel: + label: int # +1 take-profit first, -1 stop-loss first, 0 timeout + touched: Literal["take_profit", "stop_loss", "time"] + touch_bar: int + entry_price: float + upper_barrier: float + lower_barrier: float + + +def label_with_triple_barrier( + *, + highs: Sequence[float], + lows: Sequence[float], + closes: Sequence[float], + entry_index: int, + side: int, + spec: TripleBarrierSpec, +) -> TripleBarrierLabel: + """Label one entry using triple-barrier first-touch rule. + + Args: + highs/lows/closes: OHLC components with identical length. + entry_index: Entry bar index in the sequences. + side: +1 for long, -1 for short. + spec: Barrier specification. + """ + if side not in {1, -1}: + raise ValueError("side must be +1 or -1") + if len(highs) != len(lows) or len(highs) != len(closes): + raise ValueError("highs, lows, closes lengths must match") + if entry_index < 0 or entry_index >= len(closes): + raise IndexError("entry_index out of range") + if spec.max_holding_bars <= 0: + raise ValueError("max_holding_bars must be positive") + + entry_price = float(closes[entry_index]) + if entry_price <= 0: + raise ValueError("entry price must be positive") + + if side == 1: + upper = entry_price * (1.0 + spec.take_profit_pct) + lower = entry_price * (1.0 - spec.stop_loss_pct) + else: + # For short side, favorable move is down. + upper = entry_price * (1.0 + spec.stop_loss_pct) + lower = entry_price * (1.0 - spec.take_profit_pct) + + last_index = min(len(closes) - 1, entry_index + spec.max_holding_bars) + for idx in range(entry_index + 1, last_index + 1): + h = float(highs[idx]) + l = float(lows[idx]) + + up_touch = h >= upper + down_touch = l <= lower + if not up_touch and not down_touch: + continue + + if up_touch and down_touch: + if spec.tie_break == "stop_first": + touched = "stop_loss" if side == 1 else "take_profit" + label = -1 if side == 1 else 1 + else: + touched = "take_profit" if side == 1 else "stop_loss" + label = 1 if side == 1 else -1 + elif up_touch: + touched = "take_profit" if side == 1 else "stop_loss" + label = 1 if side == 1 else -1 + else: + touched = "stop_loss" if side == 1 else "take_profit" + label = -1 if side == 1 else 1 + + return TripleBarrierLabel( + label=label, + touched=touched, + touch_bar=idx, + entry_price=entry_price, + upper_barrier=upper, + lower_barrier=lower, + ) + + return TripleBarrierLabel( + label=0, + touched="time", + touch_bar=last_index, + entry_price=entry_price, + upper_barrier=upper, + lower_barrier=lower, + ) diff --git a/tests/test_triple_barrier.py b/tests/test_triple_barrier.py new file mode 100644 index 0000000..73efc47 --- /dev/null +++ b/tests/test_triple_barrier.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier + + +def test_long_take_profit_first() -> None: + highs = [100, 101, 103] + lows = [100, 99.6, 100] + closes = [100, 100, 102] + spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3) + out = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=1, + spec=spec, + ) + assert out.label == 1 + assert out.touched == "take_profit" + assert out.touch_bar == 2 + + +def test_long_stop_loss_first() -> None: + highs = [100, 100.5, 101] + lows = [100, 98.8, 99] + closes = [100, 99.5, 100] + spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3) + out = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=1, + spec=spec, + ) + assert out.label == -1 + assert out.touched == "stop_loss" + assert out.touch_bar == 1 + + +def test_time_barrier_timeout() -> None: + highs = [100, 100.8, 100.7] + lows = [100, 99.3, 99.4] + closes = [100, 100, 100] + spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.02, max_holding_bars=2) + out = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=1, + spec=spec, + ) + assert out.label == 0 + assert out.touched == "time" + assert out.touch_bar == 2 + + +def test_tie_break_stop_first_default() -> None: + highs = [100, 102.1] + lows = [100, 98.9] + closes = [100, 100] + spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=1) + out = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=1, + spec=spec, + ) + assert out.label == -1 + assert out.touched == "stop_loss" + + +def test_short_side_inverts_barrier_semantics() -> None: + highs = [100, 100.5, 101.2] + lows = [100, 97.8, 98.0] + closes = [100, 99, 99] + spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3) + out = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=-1, + spec=spec, + ) + assert out.label == 1 + assert out.touched == "take_profit" From 9f64c9944aee684ff1df3a598eb2af2b16916af5 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:47:09 +0900 Subject: [PATCH 008/109] fix: correct short-side tie-break semantics in triple barrier --- src/analysis/triple_barrier.py | 8 +++---- tests/test_triple_barrier.py | 40 ++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 4 deletions(-) diff --git a/src/analysis/triple_barrier.py b/src/analysis/triple_barrier.py index cf7a961..f609496 100644 --- a/src/analysis/triple_barrier.py +++ b/src/analysis/triple_barrier.py @@ -80,11 +80,11 @@ def label_with_triple_barrier( if up_touch and down_touch: if spec.tie_break == "stop_first": - touched = "stop_loss" if side == 1 else "take_profit" - label = -1 if side == 1 else 1 + touched = "stop_loss" + label = -1 else: - touched = "take_profit" if side == 1 else "stop_loss" - label = 1 if side == 1 else -1 + touched = "take_profit" + label = 1 elif up_touch: touched = "take_profit" if side == 1 else "stop_loss" label = 1 if side == 1 else -1 diff --git a/tests/test_triple_barrier.py b/tests/test_triple_barrier.py index 73efc47..1fff8e3 100644 --- a/tests/test_triple_barrier.py +++ b/tests/test_triple_barrier.py @@ -89,3 +89,43 @@ def test_short_side_inverts_barrier_semantics() -> None: ) assert out.label == 1 assert out.touched == "take_profit" + + +def test_short_tie_break_modes() -> None: + highs = [100, 101.1] + lows = [100, 97.9] + closes = [100, 100] + + stop_first = TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.01, + max_holding_bars=1, + tie_break="stop_first", + ) + out_stop = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=-1, + spec=stop_first, + ) + assert out_stop.label == -1 + assert out_stop.touched == "stop_loss" + + take_first = TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.01, + max_holding_bars=1, + tie_break="take_first", + ) + out_take = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=-1, + spec=take_first, + ) + assert out_take.label == 1 + assert out_take.touched == "take_profit" From 4d9f3e2cfc3b9277dd2db21879c66a794e157f5f Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:50:12 +0900 Subject: [PATCH 009/109] feat: enforce overseas buy guard with USD buffer threshold (TASK-V3-014) --- src/config.py | 1 + src/main.py | 51 +++++++++++++++++++++++++++++++ tests/test_main.py | 75 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 127 insertions(+) diff --git a/src/config.py b/src/config.py index 7ff1c4d..34a886a 100644 --- a/src/config.py +++ b/src/config.py @@ -59,6 +59,7 @@ class Settings(BaseSettings): # KIS VTS overseas balance API returns errors for most accounts. # This value is used as a fallback when the balance API returns 0 in paper mode. PAPER_OVERSEAS_CASH: float = Field(default=50000.0, ge=0.0) + USD_BUFFER_MIN: float = Field(default=1000.0, ge=0.0) # Trading frequency mode (daily = batch API calls, realtime = per-stock calls) TRADE_MODE: str = Field(default="daily", pattern="^(daily|realtime)$") diff --git a/src/main.py b/src/main.py index 2fe9ef4..548ece0 100644 --- a/src/main.py +++ b/src/main.py @@ -429,6 +429,21 @@ def _determine_order_quantity( return quantity +def _should_block_overseas_buy_for_fx_buffer( + *, + market: MarketInfo, + action: str, + total_cash: float, + order_amount: float, + settings: Settings | None, +) -> tuple[bool, float, float]: + if market.is_domestic or action != "BUY" or settings is None: + return False, total_cash - order_amount, 0.0 + remaining = total_cash - order_amount + required = settings.USD_BUFFER_MIN + return remaining < required, remaining, required + + async def build_overseas_symbol_universe( db_conn: Any, overseas_broker: OverseasBroker, @@ -1292,6 +1307,24 @@ async def trading_cycle( ) return order_amount = current_price * quantity + fx_blocked, remaining_cash, required_buffer = _should_block_overseas_buy_for_fx_buffer( + market=market, + action=decision.action, + total_cash=total_cash, + order_amount=order_amount, + settings=settings, + ) + if fx_blocked: + logger.warning( + "Skip BUY %s (%s): FX buffer guard (remaining=%.2f, required=%.2f, cash=%.2f, order=%.2f)", + stock_code, + market.name, + remaining_cash, + required_buffer, + total_cash, + order_amount, + ) + return # 4. Check BUY cooldown (set when a prior BUY failed due to insufficient balance) if decision.action == "BUY" and buy_cooldown is not None: @@ -2360,6 +2393,24 @@ async def run_daily_session( ) continue order_amount = stock_data["current_price"] * quantity + fx_blocked, remaining_cash, required_buffer = _should_block_overseas_buy_for_fx_buffer( + market=market, + action=decision.action, + total_cash=total_cash, + order_amount=order_amount, + settings=settings, + ) + if fx_blocked: + logger.warning( + "Skip BUY %s (%s): FX buffer guard (remaining=%.2f, required=%.2f, cash=%.2f, order=%.2f)", + stock_code, + market.name, + remaining_cash, + required_buffer, + total_cash, + order_amount, + ) + continue # Check BUY cooldown (insufficient balance) if decision.action == "BUY": diff --git a/tests/test_main.py b/tests/test_main.py index 3dee447..9c465fb 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -3690,6 +3690,81 @@ class TestOverseasBrokerIntegration: # DB도 브로커도 보유 없음 → BUY 주문이 실행되어야 함 (회귀 테스트) overseas_broker.send_overseas_order.assert_called_once() + @pytest.mark.asyncio + async def test_overseas_buy_blocked_by_usd_buffer_guard(self) -> None: + """Overseas BUY must be blocked when USD buffer would be breached.""" + db_conn = init_db(":memory:") + + overseas_broker = MagicMock() + overseas_broker.get_overseas_price = AsyncMock( + return_value={"output": {"last": "182.50"}} + ) + overseas_broker.get_overseas_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [ + { + "frcr_evlu_tota": "50000.00", + "frcr_buy_amt_smtl": "0.00", + } + ], + } + ) + overseas_broker.get_overseas_buying_power = AsyncMock( + return_value={"output": {"ovrs_ord_psbl_amt": "50000.00"}} + ) + overseas_broker.send_overseas_order = AsyncMock(return_value={"msg1": "주문접수"}) + + engine = MagicMock(spec=ScenarioEngine) + engine.evaluate = MagicMock(return_value=_make_buy_match("AAPL")) + + market = MagicMock() + market.name = "NASDAQ" + market.code = "US_NASDAQ" + market.exchange_code = "NASD" + market.is_domestic = False + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + decision_logger = MagicMock() + decision_logger.log_decision = MagicMock(return_value="decision-id") + + settings = MagicMock() + settings.POSITION_SIZING_ENABLED = False + settings.CONFIDENCE_THRESHOLD = 80 + settings.USD_BUFFER_MIN = 49900.0 + settings.MODE = "paper" + settings.PAPER_OVERSEAS_CASH = 50000.0 + + await trading_cycle( + broker=MagicMock(), + overseas_broker=overseas_broker, + scenario_engine=engine, + playbook=_make_playbook(market="US"), + risk=MagicMock(), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="AAPL", + scan_candidates={}, + settings=settings, + ) + + overseas_broker.send_overseas_order.assert_not_called() + # --------------------------------------------------------------------------- # _retry_connection — unit tests (issue #209) From b206c23fc90d56bff7fd9030b3b6bd254db139c4 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 00:52:44 +0900 Subject: [PATCH 010/109] fix: scope USD buffer guard to US markets and add boundary tests --- src/main.py | 7 ++++++- tests/test_main.py | 44 +++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 49 insertions(+), 2 deletions(-) diff --git a/src/main.py b/src/main.py index 548ece0..6d8e8bb 100644 --- a/src/main.py +++ b/src/main.py @@ -437,7 +437,12 @@ def _should_block_overseas_buy_for_fx_buffer( order_amount: float, settings: Settings | None, ) -> tuple[bool, float, float]: - if market.is_domestic or action != "BUY" or settings is None: + if ( + market.is_domestic + or not market.code.startswith("US") + or action != "BUY" + or settings is None + ): return False, total_cash - order_amount, 0.0 remaining = total_cash - order_amount required = settings.USD_BUFFER_MIN diff --git a/tests/test_main.py b/tests/test_main.py index 9c465fb..61887a0 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -15,6 +15,7 @@ from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger from src.main import ( KILL_SWITCH, + _should_block_overseas_buy_for_fx_buffer, _trigger_emergency_kill_switch, _apply_dashboard_flag, _determine_order_quantity, @@ -3798,7 +3799,6 @@ class TestRetryConnection: with patch("src.main.asyncio.sleep") as mock_sleep: mock_sleep.return_value = None result = await _retry_connection(flaky, label="flaky") - assert result == "ok" assert call_count == 2 mock_sleep.assert_called_once() @@ -3853,6 +3853,48 @@ class TestRetryConnection: assert call_count == 1 # No retry for non-ConnectionError +def test_fx_buffer_guard_applies_only_to_us_and_respects_boundary() -> None: + settings = MagicMock() + settings.USD_BUFFER_MIN = 1000.0 + + us_market = MagicMock() + us_market.is_domestic = False + us_market.code = "US_NASDAQ" + + blocked, remaining, required = _should_block_overseas_buy_for_fx_buffer( + market=us_market, + action="BUY", + total_cash=5000.0, + order_amount=4001.0, + settings=settings, + ) + assert blocked + assert remaining == 999.0 + assert required == 1000.0 + + blocked_eq, _, _ = _should_block_overseas_buy_for_fx_buffer( + market=us_market, + action="BUY", + total_cash=5000.0, + order_amount=4000.0, + settings=settings, + ) + assert not blocked_eq + + jp_market = MagicMock() + jp_market.is_domestic = False + jp_market.code = "JP" + blocked_jp, _, required_jp = _should_block_overseas_buy_for_fx_buffer( + market=jp_market, + action="BUY", + total_cash=5000.0, + order_amount=4500.0, + settings=settings, + ) + assert not blocked_jp + assert required_jp == 0.0 + + # run_daily_session — daily CB baseline (daily_start_eval) tests (issue #207) # --------------------------------------------------------------------------- From e56819e9e238b996dbaca0f42405604804952438 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:28:11 +0900 Subject: [PATCH 011/109] feat: add walk-forward splitter with purge and embargo controls (TASK-CODE-005) --- src/analysis/walk_forward_split.py | 75 +++++++++++++++++++++++++++++ tests/test_walk_forward_split.py | 76 ++++++++++++++++++++++++++++++ 2 files changed, 151 insertions(+) create mode 100644 src/analysis/walk_forward_split.py create mode 100644 tests/test_walk_forward_split.py diff --git a/src/analysis/walk_forward_split.py b/src/analysis/walk_forward_split.py new file mode 100644 index 0000000..2ff7837 --- /dev/null +++ b/src/analysis/walk_forward_split.py @@ -0,0 +1,75 @@ +"""Walk-forward splitter with purge/embargo controls.""" + +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class WalkForwardFold: + train_indices: list[int] + test_indices: list[int] + + @property + def train_size(self) -> int: + return len(self.train_indices) + + @property + def test_size(self) -> int: + return len(self.test_indices) + + +def generate_walk_forward_splits( + *, + n_samples: int, + train_size: int, + test_size: int, + step_size: int | None = None, + purge_size: int = 0, + embargo_size: int = 0, + min_train_size: int = 1, +) -> list[WalkForwardFold]: + """Generate chronological folds with purge/embargo leakage controls.""" + if n_samples <= 0: + raise ValueError("n_samples must be positive") + if train_size <= 0 or test_size <= 0: + raise ValueError("train_size and test_size must be positive") + if purge_size < 0 or embargo_size < 0: + raise ValueError("purge_size and embargo_size must be >= 0") + if min_train_size <= 0: + raise ValueError("min_train_size must be positive") + + step = step_size if step_size is not None else test_size + if step <= 0: + raise ValueError("step_size must be positive") + + folds: list[WalkForwardFold] = [] + prev_test_end: int | None = None + test_start = train_size + purge_size + + while test_start + test_size <= n_samples: + test_end = test_start + test_size - 1 + train_end = test_start - purge_size - 1 + if train_end < 0: + break + + train_start = max(0, train_end - train_size + 1) + train_indices = list(range(train_start, train_end + 1)) + + if prev_test_end is not None and embargo_size > 0: + emb_from = prev_test_end + 1 + emb_to = prev_test_end + embargo_size + train_indices = [i for i in train_indices if i < emb_from or i > emb_to] + + if len(train_indices) >= min_train_size: + folds.append( + WalkForwardFold( + train_indices=train_indices, + test_indices=list(range(test_start, test_end + 1)), + ) + ) + + prev_test_end = test_end + test_start += step + + return folds diff --git a/tests/test_walk_forward_split.py b/tests/test_walk_forward_split.py new file mode 100644 index 0000000..c5003b8 --- /dev/null +++ b/tests/test_walk_forward_split.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import pytest + +from src.analysis.walk_forward_split import generate_walk_forward_splits + + +def test_generates_sequential_folds() -> None: + folds = generate_walk_forward_splits( + n_samples=30, + train_size=10, + test_size=5, + ) + assert len(folds) == 4 + assert folds[0].train_indices == list(range(0, 10)) + assert folds[0].test_indices == list(range(10, 15)) + assert folds[1].train_indices == list(range(5, 15)) + assert folds[1].test_indices == list(range(15, 20)) + + +def test_purge_removes_boundary_samples_before_test() -> None: + folds = generate_walk_forward_splits( + n_samples=25, + train_size=8, + test_size=4, + purge_size=2, + ) + first = folds[0] + # test starts at 10, purge=2 => train end must be 7 + assert first.train_indices == list(range(0, 8)) + assert first.test_indices == list(range(10, 14)) + + +def test_embargo_excludes_post_test_samples_from_next_train() -> None: + folds = generate_walk_forward_splits( + n_samples=45, + train_size=15, + test_size=5, + step_size=10, + embargo_size=3, + ) + assert len(folds) >= 2 + # Fold1 test: 15..19, next fold train window: 10..24. + # embargo_size=3 should remove 20,21,22 from fold2 train. + second_train = folds[1].train_indices + assert 20 not in second_train + assert 21 not in second_train + assert 22 not in second_train + assert 23 in second_train + + +def test_respects_min_train_size_and_returns_empty_when_impossible() -> None: + folds = generate_walk_forward_splits( + n_samples=15, + train_size=5, + test_size=5, + min_train_size=6, + ) + assert folds == [] + + +@pytest.mark.parametrize( + ("n_samples", "train_size", "test_size"), + [ + (0, 10, 2), + (10, 0, 2), + (10, 5, 0), + ], +) +def test_invalid_args_raise(n_samples: int, train_size: int, test_size: int) -> None: + with pytest.raises(ValueError): + generate_walk_forward_splits( + n_samples=n_samples, + train_size=train_size, + test_size=test_size, + ) From dc70311aed1300cd551aee939375df43af8bf454 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:32:09 +0900 Subject: [PATCH 012/109] fix: keep embargo tied to accepted folds and enforce PR-comment decision logs --- docs/ouroboros/50_tpm_control_protocol.md | 1 + docs/ouroboros/60_repo_enforcement_checklist.md | 1 + docs/workflow.md | 1 + src/analysis/walk_forward_split.py | 3 +-- tests/test_walk_forward_split.py | 16 ++++++++++++++++ 5 files changed, 20 insertions(+), 2 deletions(-) diff --git a/docs/ouroboros/50_tpm_control_protocol.md b/docs/ouroboros/50_tpm_control_protocol.md index 619f6a8..ec52e6d 100644 --- a/docs/ouroboros/50_tpm_control_protocol.md +++ b/docs/ouroboros/50_tpm_control_protocol.md @@ -149,6 +149,7 @@ TPM 티켓 운영 규칙: - TPM은 합의된 변경을 이슈로 등록하고 우선순위(`P0/P1/P2`)를 지정한다. - PR 본문에는 TPM이 지정한 우선순위와 범위가 그대로 반영되어야 한다. - 우선순위 변경은 TPM 제안 + Main Agent 승인으로만 가능하다. +- PM/TPM/Dev/Reviewer/Verifier/Runtime Verifier는 주요 의사결정 시점마다 PR 코멘트를 남겨 결정 근거를 추적 가능 상태로 유지한다. 브랜치 운영 규칙: - TPM은 각 티켓에 대해 `ticket temp branch -> program feature branch` PR 경로를 지정한다. diff --git a/docs/ouroboros/60_repo_enforcement_checklist.md b/docs/ouroboros/60_repo_enforcement_checklist.md index 02f40e9..2573aea 100644 --- a/docs/ouroboros/60_repo_enforcement_checklist.md +++ b/docs/ouroboros/60_repo_enforcement_checklist.md @@ -50,6 +50,7 @@ Updated: 2026-02-26 - PR 본문에 `REQ-*`, `TASK-*`, `TEST-*` 매핑 표 존재 - `src/core/risk_manager.py` 변경 없음 - 주요 의사결정 체크포인트(DCP-01~04) 중 해당 단계 Main Agent 확인 기록 존재 +- 주요 의사결정(리뷰 지적/수정 합의/검증 승인)에 대한 에이전트 PR 코멘트 존재 - 티켓 PR의 base가 `main`이 아닌 program feature branch인지 확인 자동 점검: diff --git a/docs/workflow.md b/docs/workflow.md index f3bb3b0..814fe8c 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -22,6 +22,7 @@ - Ticket-level development happens only on **ticket temp branches** cut from the program feature branch. - Ticket PR merges into program feature branch are allowed after verifier approval. - Until final user sign-off, `main` merge is prohibited. +- 각 에이전트는 주요 의사결정(리뷰 지적, 수정 방향, 검증 승인)마다 PR 코멘트를 적극 작성해 의사결정 과정을 남긴다. ## Gitea CLI Formatting Troubleshooting diff --git a/src/analysis/walk_forward_split.py b/src/analysis/walk_forward_split.py index 2ff7837..2d2a3f4 100644 --- a/src/analysis/walk_forward_split.py +++ b/src/analysis/walk_forward_split.py @@ -68,8 +68,7 @@ def generate_walk_forward_splits( test_indices=list(range(test_start, test_end + 1)), ) ) - - prev_test_end = test_end + prev_test_end = test_end test_start += step return folds diff --git a/tests/test_walk_forward_split.py b/tests/test_walk_forward_split.py index c5003b8..5375798 100644 --- a/tests/test_walk_forward_split.py +++ b/tests/test_walk_forward_split.py @@ -59,6 +59,22 @@ def test_respects_min_train_size_and_returns_empty_when_impossible() -> None: assert folds == [] +def test_embargo_uses_last_accepted_fold_when_intermediate_fold_skips() -> None: + folds = generate_walk_forward_splits( + n_samples=30, + train_size=5, + test_size=3, + step_size=5, + embargo_size=1, + min_train_size=5, + ) + # 1st fold accepted, 2nd skipped by min_train_size, subsequent folds still generated. + assert len(folds) == 3 + assert folds[0].test_indices == [5, 6, 7] + assert folds[1].test_indices == [15, 16, 17] + assert folds[2].test_indices == [25, 26, 27] + + @pytest.mark.parametrize( ("n_samples", "train_size", "test_size"), [ From 7d72669cb838f9657e652b34176ff6dbeef991b3 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:34:44 +0900 Subject: [PATCH 013/109] feat: enforce mandatory backtest cost assumptions (TASK-CODE-006) --- src/analysis/backtest_cost_guard.py | 47 +++++++++++++++++++++++ tests/test_backtest_cost_guard.py | 59 +++++++++++++++++++++++++++++ 2 files changed, 106 insertions(+) create mode 100644 src/analysis/backtest_cost_guard.py create mode 100644 tests/test_backtest_cost_guard.py diff --git a/src/analysis/backtest_cost_guard.py b/src/analysis/backtest_cost_guard.py new file mode 100644 index 0000000..2f4a5bb --- /dev/null +++ b/src/analysis/backtest_cost_guard.py @@ -0,0 +1,47 @@ +"""Backtest cost/slippage/failure validation guard.""" + +from __future__ import annotations + +from dataclasses import dataclass + + +@dataclass(frozen=True) +class BacktestCostModel: + commission_bps: float | None = None + slippage_bps_by_session: dict[str, float] | None = None + failure_rate_by_session: dict[str, float] | None = None + unfavorable_fill_required: bool = True + + +def validate_backtest_cost_model( + *, + model: BacktestCostModel, + required_sessions: list[str], +) -> None: + """Raise ValueError when required cost assumptions are missing/invalid.""" + if model.commission_bps is None or model.commission_bps < 0: + raise ValueError("commission_bps must be provided and >= 0") + if not model.unfavorable_fill_required: + raise ValueError("unfavorable_fill_required must be True") + + slippage = model.slippage_bps_by_session or {} + failure = model.failure_rate_by_session or {} + + missing_slippage = [s for s in required_sessions if s not in slippage] + if missing_slippage: + raise ValueError( + f"missing slippage_bps_by_session for sessions: {', '.join(missing_slippage)}" + ) + + missing_failure = [s for s in required_sessions if s not in failure] + if missing_failure: + raise ValueError( + f"missing failure_rate_by_session for sessions: {', '.join(missing_failure)}" + ) + + for sess, bps in slippage.items(): + if bps < 0: + raise ValueError(f"slippage bps must be >= 0 for session={sess}") + for sess, rate in failure.items(): + if rate < 0 or rate > 1: + raise ValueError(f"failure rate must be within [0,1] for session={sess}") diff --git a/tests/test_backtest_cost_guard.py b/tests/test_backtest_cost_guard.py new file mode 100644 index 0000000..417925f --- /dev/null +++ b/tests/test_backtest_cost_guard.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import pytest + +from src.analysis.backtest_cost_guard import BacktestCostModel, validate_backtest_cost_model + + +def test_valid_backtest_cost_model_passes() -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0}, + failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08}, + unfavorable_fill_required=True, + ) + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"]) + + +def test_missing_required_slippage_session_raises() -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": 10.0}, + failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08}, + unfavorable_fill_required=True, + ) + with pytest.raises(ValueError, match="missing slippage_bps_by_session.*US_PRE"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"]) + + +def test_missing_required_failure_rate_session_raises() -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0}, + failure_rate_by_session={"KRX_REG": 0.01}, + unfavorable_fill_required=True, + ) + with pytest.raises(ValueError, match="missing failure_rate_by_session.*US_PRE"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"]) + + +def test_invalid_failure_rate_range_raises() -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": 10.0}, + failure_rate_by_session={"KRX_REG": 1.2}, + unfavorable_fill_required=True, + ) + with pytest.raises(ValueError, match="failure rate must be within"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"]) + + +def test_unfavorable_fill_requirement_cannot_be_disabled() -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": 10.0}, + failure_rate_by_session={"KRX_REG": 0.02}, + unfavorable_fill_required=False, + ) + with pytest.raises(ValueError, match="unfavorable_fill_required must be True"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"]) From 2331d80915d7ad4cd100ccd501fd524c387249e0 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:36:38 +0900 Subject: [PATCH 014/109] fix: reject non-finite backtest cost assumptions --- src/analysis/backtest_cost_guard.py | 11 ++++++++--- tests/test_backtest_cost_guard.py | 24 ++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 3 deletions(-) diff --git a/src/analysis/backtest_cost_guard.py b/src/analysis/backtest_cost_guard.py index 2f4a5bb..8f2cf98 100644 --- a/src/analysis/backtest_cost_guard.py +++ b/src/analysis/backtest_cost_guard.py @@ -3,6 +3,7 @@ from __future__ import annotations from dataclasses import dataclass +import math @dataclass(frozen=True) @@ -19,7 +20,11 @@ def validate_backtest_cost_model( required_sessions: list[str], ) -> None: """Raise ValueError when required cost assumptions are missing/invalid.""" - if model.commission_bps is None or model.commission_bps < 0: + if ( + model.commission_bps is None + or not math.isfinite(model.commission_bps) + or model.commission_bps < 0 + ): raise ValueError("commission_bps must be provided and >= 0") if not model.unfavorable_fill_required: raise ValueError("unfavorable_fill_required must be True") @@ -40,8 +45,8 @@ def validate_backtest_cost_model( ) for sess, bps in slippage.items(): - if bps < 0: + if not math.isfinite(bps) or bps < 0: raise ValueError(f"slippage bps must be >= 0 for session={sess}") for sess, rate in failure.items(): - if rate < 0 or rate > 1: + if not math.isfinite(rate) or rate < 0 or rate > 1: raise ValueError(f"failure rate must be within [0,1] for session={sess}") diff --git a/tests/test_backtest_cost_guard.py b/tests/test_backtest_cost_guard.py index 417925f..6c73a30 100644 --- a/tests/test_backtest_cost_guard.py +++ b/tests/test_backtest_cost_guard.py @@ -57,3 +57,27 @@ def test_unfavorable_fill_requirement_cannot_be_disabled() -> None: ) with pytest.raises(ValueError, match="unfavorable_fill_required must be True"): validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"]) + + +@pytest.mark.parametrize("bad_commission", [float("nan"), float("inf"), float("-inf")]) +def test_non_finite_commission_rejected(bad_commission: float) -> None: + model = BacktestCostModel( + commission_bps=bad_commission, + slippage_bps_by_session={"KRX_REG": 10.0}, + failure_rate_by_session={"KRX_REG": 0.02}, + unfavorable_fill_required=True, + ) + with pytest.raises(ValueError, match="commission_bps"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"]) + + +@pytest.mark.parametrize("bad_slippage", [float("nan"), float("inf"), float("-inf")]) +def test_non_finite_slippage_rejected(bad_slippage: float) -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": bad_slippage}, + failure_rate_by_session={"KRX_REG": 0.02}, + unfavorable_fill_required=True, + ) + with pytest.raises(ValueError, match="slippage bps"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"]) From 5b52f593a805c8c09ab49e7ff643fef360945399 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:40:23 +0900 Subject: [PATCH 015/109] feat: add conservative backtest execution simulator (TASK-CODE-010) --- src/analysis/backtest_execution_model.py | 93 ++++++++++++++++++++++++ tests/test_backtest_execution_model.py | 76 +++++++++++++++++++ 2 files changed, 169 insertions(+) create mode 100644 src/analysis/backtest_execution_model.py create mode 100644 tests/test_backtest_execution_model.py diff --git a/src/analysis/backtest_execution_model.py b/src/analysis/backtest_execution_model.py new file mode 100644 index 0000000..f4911f1 --- /dev/null +++ b/src/analysis/backtest_execution_model.py @@ -0,0 +1,93 @@ +"""Conservative backtest execution model.""" + +from __future__ import annotations + +from dataclasses import dataclass +from random import Random +from typing import Literal + + +OrderSide = Literal["BUY", "SELL"] + + +@dataclass(frozen=True) +class ExecutionRequest: + side: OrderSide + session_id: str + qty: int + reference_price: float + + +@dataclass(frozen=True) +class ExecutionAssumptions: + slippage_bps_by_session: dict[str, float] + failure_rate_by_session: dict[str, float] + partial_fill_rate_by_session: dict[str, float] + partial_fill_min_ratio: float = 0.3 + partial_fill_max_ratio: float = 0.8 + seed: int = 0 + + +@dataclass(frozen=True) +class ExecutionResult: + status: Literal["FILLED", "PARTIAL", "REJECTED"] + filled_qty: int + avg_price: float + slippage_bps: float + reason: str + + +class BacktestExecutionModel: + """Execution simulator with conservative unfavorable fill assumptions.""" + + def __init__(self, assumptions: ExecutionAssumptions) -> None: + self.assumptions = assumptions + self._rng = Random(assumptions.seed) + if assumptions.partial_fill_min_ratio <= 0 or assumptions.partial_fill_max_ratio > 1: + raise ValueError("partial fill ratios must be within (0,1]") + if assumptions.partial_fill_min_ratio > assumptions.partial_fill_max_ratio: + raise ValueError("partial_fill_min_ratio must be <= partial_fill_max_ratio") + + def simulate(self, request: ExecutionRequest) -> ExecutionResult: + if request.qty <= 0: + raise ValueError("qty must be positive") + if request.reference_price <= 0: + raise ValueError("reference_price must be positive") + + slippage_bps = self.assumptions.slippage_bps_by_session.get(request.session_id, 0.0) + failure_rate = self.assumptions.failure_rate_by_session.get(request.session_id, 0.0) + partial_rate = self.assumptions.partial_fill_rate_by_session.get(request.session_id, 0.0) + + if self._rng.random() < failure_rate: + return ExecutionResult( + status="REJECTED", + filled_qty=0, + avg_price=0.0, + slippage_bps=slippage_bps, + reason="execution_failure", + ) + + slip_mult = 1.0 + (slippage_bps / 10000.0 if request.side == "BUY" else -slippage_bps / 10000.0) + exec_price = request.reference_price * slip_mult + + if self._rng.random() < partial_rate: + ratio = self._rng.uniform( + self.assumptions.partial_fill_min_ratio, + self.assumptions.partial_fill_max_ratio, + ) + filled = max(1, min(request.qty - 1, int(request.qty * ratio))) + return ExecutionResult( + status="PARTIAL", + filled_qty=filled, + avg_price=exec_price, + slippage_bps=slippage_bps, + reason="partial_fill", + ) + + return ExecutionResult( + status="FILLED", + filled_qty=request.qty, + avg_price=exec_price, + slippage_bps=slippage_bps, + reason="filled", + ) diff --git a/tests/test_backtest_execution_model.py b/tests/test_backtest_execution_model.py new file mode 100644 index 0000000..aa0f41f --- /dev/null +++ b/tests/test_backtest_execution_model.py @@ -0,0 +1,76 @@ +from __future__ import annotations + +import pytest + +from src.analysis.backtest_execution_model import ( + BacktestExecutionModel, + ExecutionAssumptions, + ExecutionRequest, +) + + +def test_buy_uses_unfavorable_slippage_direction() -> None: + model = BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": 50.0}, + failure_rate_by_session={"US_PRE": 0.0}, + partial_fill_rate_by_session={"US_PRE": 0.0}, + seed=1, + ) + ) + out = model.simulate( + ExecutionRequest(side="BUY", session_id="US_PRE", qty=10, reference_price=100.0) + ) + assert out.status == "FILLED" + assert out.avg_price == pytest.approx(100.5) + + +def test_sell_uses_unfavorable_slippage_direction() -> None: + model = BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": 50.0}, + failure_rate_by_session={"US_PRE": 0.0}, + partial_fill_rate_by_session={"US_PRE": 0.0}, + seed=1, + ) + ) + out = model.simulate( + ExecutionRequest(side="SELL", session_id="US_PRE", qty=10, reference_price=100.0) + ) + assert out.status == "FILLED" + assert out.avg_price == pytest.approx(99.5) + + +def test_failure_rate_can_reject_order() -> None: + model = BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"KRX_REG": 10.0}, + failure_rate_by_session={"KRX_REG": 1.0}, + partial_fill_rate_by_session={"KRX_REG": 0.0}, + seed=42, + ) + ) + out = model.simulate( + ExecutionRequest(side="BUY", session_id="KRX_REG", qty=10, reference_price=100.0) + ) + assert out.status == "REJECTED" + assert out.filled_qty == 0 + + +def test_partial_fill_applies_when_rate_is_one() -> None: + model = BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"KRX_REG": 0.0}, + failure_rate_by_session={"KRX_REG": 0.0}, + partial_fill_rate_by_session={"KRX_REG": 1.0}, + partial_fill_min_ratio=0.4, + partial_fill_max_ratio=0.4, + seed=0, + ) + ) + out = model.simulate( + ExecutionRequest(side="BUY", session_id="KRX_REG", qty=10, reference_price=100.0) + ) + assert out.status == "PARTIAL" + assert out.filled_qty == 4 + assert out.avg_price == 100.0 From 13ba9e8081ab75e33a27fcfaa26316d8507a25e8 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:41:56 +0900 Subject: [PATCH 016/109] fix: validate execution assumption ranges in backtest model --- src/analysis/backtest_execution_model.py | 10 ++++++++ tests/test_backtest_execution_model.py | 32 ++++++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/src/analysis/backtest_execution_model.py b/src/analysis/backtest_execution_model.py index f4911f1..24798dc 100644 --- a/src/analysis/backtest_execution_model.py +++ b/src/analysis/backtest_execution_model.py @@ -3,6 +3,7 @@ from __future__ import annotations from dataclasses import dataclass +import math from random import Random from typing import Literal @@ -47,6 +48,15 @@ class BacktestExecutionModel: raise ValueError("partial fill ratios must be within (0,1]") if assumptions.partial_fill_min_ratio > assumptions.partial_fill_max_ratio: raise ValueError("partial_fill_min_ratio must be <= partial_fill_max_ratio") + for sess, bps in assumptions.slippage_bps_by_session.items(): + if not math.isfinite(bps) or bps < 0: + raise ValueError(f"slippage_bps must be finite and >= 0 for session={sess}") + for sess, rate in assumptions.failure_rate_by_session.items(): + if not math.isfinite(rate) or rate < 0 or rate > 1: + raise ValueError(f"failure_rate must be in [0,1] for session={sess}") + for sess, rate in assumptions.partial_fill_rate_by_session.items(): + if not math.isfinite(rate) or rate < 0 or rate > 1: + raise ValueError(f"partial_fill_rate must be in [0,1] for session={sess}") def simulate(self, request: ExecutionRequest) -> ExecutionResult: if request.qty <= 0: diff --git a/tests/test_backtest_execution_model.py b/tests/test_backtest_execution_model.py index aa0f41f..fb2fa58 100644 --- a/tests/test_backtest_execution_model.py +++ b/tests/test_backtest_execution_model.py @@ -74,3 +74,35 @@ def test_partial_fill_applies_when_rate_is_one() -> None: assert out.status == "PARTIAL" assert out.filled_qty == 4 assert out.avg_price == 100.0 + + +@pytest.mark.parametrize("bad_slip", [-1.0, float("nan"), float("inf")]) +def test_invalid_slippage_is_rejected(bad_slip: float) -> None: + with pytest.raises(ValueError, match="slippage_bps"): + BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": bad_slip}, + failure_rate_by_session={"US_PRE": 0.0}, + partial_fill_rate_by_session={"US_PRE": 0.0}, + ) + ) + + +@pytest.mark.parametrize("bad_rate", [-0.1, 1.1, float("nan")]) +def test_invalid_failure_or_partial_rates_are_rejected(bad_rate: float) -> None: + with pytest.raises(ValueError, match="failure_rate"): + BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": 10.0}, + failure_rate_by_session={"US_PRE": bad_rate}, + partial_fill_rate_by_session={"US_PRE": 0.0}, + ) + ) + with pytest.raises(ValueError, match="partial_fill_rate"): + BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session={"US_PRE": 10.0}, + failure_rate_by_session={"US_PRE": 0.0}, + partial_fill_rate_by_session={"US_PRE": bad_rate}, + ) + ) From 7bc4e8833500dbd840d48b50e460d3c4664a2e2f Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:44:05 +0900 Subject: [PATCH 017/109] feat: separate strategy and fx pnl fields in trade logs (TASK-CODE-011) --- src/db.py | 30 ++++++++++++++++++++++++++---- tests/test_db.py | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 4 deletions(-) diff --git a/src/db.py b/src/db.py index 9c24584..c8638c4 100644 --- a/src/db.py +++ b/src/db.py @@ -31,8 +31,11 @@ def init_db(db_path: str) -> sqlite3.Connection: quantity INTEGER, price REAL, pnl REAL DEFAULT 0.0, + strategy_pnl REAL DEFAULT 0.0, + fx_pnl REAL DEFAULT 0.0, market TEXT DEFAULT 'KR', exchange_code TEXT DEFAULT 'KRX', + selection_context TEXT, decision_id TEXT, mode TEXT DEFAULT 'paper' ) @@ -53,6 +56,10 @@ def init_db(db_path: str) -> sqlite3.Connection: conn.execute("ALTER TABLE trades ADD COLUMN decision_id TEXT") if "mode" not in columns: conn.execute("ALTER TABLE trades ADD COLUMN mode TEXT DEFAULT 'paper'") + if "strategy_pnl" not in columns: + conn.execute("ALTER TABLE trades ADD COLUMN strategy_pnl REAL DEFAULT 0.0") + if "fx_pnl" not in columns: + conn.execute("ALTER TABLE trades ADD COLUMN fx_pnl REAL DEFAULT 0.0") # Context tree tables for multi-layered memory management conn.execute( @@ -171,6 +178,8 @@ def log_trade( quantity: int = 0, price: float = 0.0, pnl: float = 0.0, + strategy_pnl: float | None = None, + fx_pnl: float | None = None, market: str = "KR", exchange_code: str = "KRX", selection_context: dict[str, any] | None = None, @@ -187,7 +196,9 @@ def log_trade( rationale: AI decision rationale quantity: Number of shares price: Trade price - pnl: Profit/loss + pnl: Total profit/loss (backward compatibility) + strategy_pnl: Strategy PnL component + fx_pnl: FX PnL component market: Market code exchange_code: Exchange code selection_context: Scanner selection data (RSI, volume_ratio, signal, score) @@ -196,15 +207,24 @@ def log_trade( """ # Serialize selection context to JSON context_json = json.dumps(selection_context) if selection_context else None + if strategy_pnl is None and fx_pnl is None: + strategy_pnl = pnl + fx_pnl = 0.0 + elif strategy_pnl is None: + strategy_pnl = pnl - float(fx_pnl or 0.0) + elif fx_pnl is None: + fx_pnl = pnl - float(strategy_pnl) + if pnl == 0.0 and (strategy_pnl or fx_pnl): + pnl = float(strategy_pnl) + float(fx_pnl) conn.execute( """ INSERT INTO trades ( timestamp, stock_code, action, confidence, rationale, - quantity, price, pnl, market, exchange_code, selection_context, decision_id, - mode + quantity, price, pnl, strategy_pnl, fx_pnl, + market, exchange_code, selection_context, decision_id, mode ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, ( datetime.now(UTC).isoformat(), @@ -215,6 +235,8 @@ def log_trade( quantity, price, pnl, + strategy_pnl, + fx_pnl, market, exchange_code, context_json, diff --git a/tests/test_db.py b/tests/test_db.py index ead224a..9705ca2 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -155,6 +155,8 @@ def test_mode_column_exists_in_schema() -> None: cursor = conn.execute("PRAGMA table_info(trades)") columns = {row[1] for row in cursor.fetchall()} assert "mode" in columns + assert "strategy_pnl" in columns + assert "fx_pnl" in columns def test_mode_migration_adds_column_to_existing_db() -> None: @@ -190,6 +192,52 @@ def test_mode_migration_adds_column_to_existing_db() -> None: cursor = conn.execute("PRAGMA table_info(trades)") columns = {row[1] for row in cursor.fetchall()} assert "mode" in columns + assert "strategy_pnl" in columns + assert "fx_pnl" in columns conn.close() finally: os.unlink(db_path) + + +def test_log_trade_stores_strategy_and_fx_pnl_separately() -> None: + conn = init_db(":memory:") + log_trade( + conn=conn, + stock_code="AAPL", + action="SELL", + confidence=90, + rationale="fx split", + pnl=120.0, + strategy_pnl=100.0, + fx_pnl=20.0, + market="US_NASDAQ", + exchange_code="NASD", + ) + row = conn.execute( + "SELECT pnl, strategy_pnl, fx_pnl FROM trades ORDER BY id DESC LIMIT 1" + ).fetchone() + assert row is not None + assert row[0] == 120.0 + assert row[1] == 100.0 + assert row[2] == 20.0 + + +def test_log_trade_backward_compat_sets_strategy_pnl_from_pnl() -> None: + conn = init_db(":memory:") + log_trade( + conn=conn, + stock_code="005930", + action="SELL", + confidence=80, + rationale="legacy", + pnl=50.0, + market="KR", + exchange_code="KRX", + ) + row = conn.execute( + "SELECT pnl, strategy_pnl, fx_pnl FROM trades ORDER BY id DESC LIMIT 1" + ).fetchone() + assert row is not None + assert row[0] == 50.0 + assert row[1] == 50.0 + assert row[2] == 0.0 From 34cf081c961883a224377313b39a2ecaad1a35c7 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:46:22 +0900 Subject: [PATCH 018/109] fix: backfill split pnl migration and harden partial pnl inputs --- src/db.py | 14 ++++++++++++-- tests/test_db.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/src/db.py b/src/db.py index c8638c4..96fb1c7 100644 --- a/src/db.py +++ b/src/db.py @@ -60,6 +60,16 @@ def init_db(db_path: str) -> sqlite3.Connection: conn.execute("ALTER TABLE trades ADD COLUMN strategy_pnl REAL DEFAULT 0.0") if "fx_pnl" not in columns: conn.execute("ALTER TABLE trades ADD COLUMN fx_pnl REAL DEFAULT 0.0") + # Backfill legacy rows where only pnl existed before split accounting columns. + conn.execute( + """ + UPDATE trades + SET strategy_pnl = pnl, fx_pnl = 0.0 + WHERE pnl != 0.0 + AND strategy_pnl = 0.0 + AND fx_pnl = 0.0 + """ + ) # Context tree tables for multi-layered memory management conn.execute( @@ -211,9 +221,9 @@ def log_trade( strategy_pnl = pnl fx_pnl = 0.0 elif strategy_pnl is None: - strategy_pnl = pnl - float(fx_pnl or 0.0) + strategy_pnl = pnl - float(fx_pnl or 0.0) if pnl != 0.0 else 0.0 elif fx_pnl is None: - fx_pnl = pnl - float(strategy_pnl) + fx_pnl = pnl - float(strategy_pnl) if pnl != 0.0 else 0.0 if pnl == 0.0 and (strategy_pnl or fx_pnl): pnl = float(strategy_pnl) + float(fx_pnl) diff --git a/tests/test_db.py b/tests/test_db.py index 9705ca2..49b822c 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -184,6 +184,13 @@ def test_mode_migration_adds_column_to_existing_db() -> None: decision_id TEXT )""" ) + old_conn.execute( + """ + INSERT INTO trades ( + timestamp, stock_code, action, confidence, rationale, quantity, price, pnl + ) VALUES ('2026-01-01T00:00:00+00:00', 'AAPL', 'SELL', 90, 'legacy', 1, 100.0, 123.45) + """ + ) old_conn.commit() old_conn.close() @@ -194,6 +201,13 @@ def test_mode_migration_adds_column_to_existing_db() -> None: assert "mode" in columns assert "strategy_pnl" in columns assert "fx_pnl" in columns + migrated = conn.execute( + "SELECT pnl, strategy_pnl, fx_pnl FROM trades WHERE stock_code='AAPL' LIMIT 1" + ).fetchone() + assert migrated is not None + assert migrated[0] == 123.45 + assert migrated[1] == 123.45 + assert migrated[2] == 0.0 conn.close() finally: os.unlink(db_path) @@ -241,3 +255,25 @@ def test_log_trade_backward_compat_sets_strategy_pnl_from_pnl() -> None: assert row[0] == 50.0 assert row[1] == 50.0 assert row[2] == 0.0 + + +def test_log_trade_partial_fx_input_does_not_infer_negative_strategy_pnl() -> None: + conn = init_db(":memory:") + log_trade( + conn=conn, + stock_code="AAPL", + action="SELL", + confidence=70, + rationale="fx only", + pnl=0.0, + fx_pnl=10.0, + market="US_NASDAQ", + exchange_code="NASD", + ) + row = conn.execute( + "SELECT pnl, strategy_pnl, fx_pnl FROM trades ORDER BY id DESC LIMIT 1" + ).fetchone() + assert row is not None + assert row[0] == 10.0 + assert row[1] == 0.0 + assert row[2] == 10.0 From b2b02b6f570f297079e5a7f14462c51b38617e96 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:49:04 +0900 Subject: [PATCH 019/109] feat: enforce session_id persistence in trade ledger (TASK-CODE-007) --- src/db.py | 24 ++++++++++++++++++++++-- tests/test_db.py | 22 +++++++++++++++++++++- 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/src/db.py b/src/db.py index 96fb1c7..9925883 100644 --- a/src/db.py +++ b/src/db.py @@ -8,6 +8,9 @@ from datetime import UTC, datetime from pathlib import Path from typing import Any +from src.core.order_policy import classify_session_id +from src.markets.schedule import MARKETS + def init_db(db_path: str) -> sqlite3.Connection: """Initialize the trade logs database and return a connection.""" @@ -35,6 +38,7 @@ def init_db(db_path: str) -> sqlite3.Connection: fx_pnl REAL DEFAULT 0.0, market TEXT DEFAULT 'KR', exchange_code TEXT DEFAULT 'KRX', + session_id TEXT DEFAULT 'UNKNOWN', selection_context TEXT, decision_id TEXT, mode TEXT DEFAULT 'paper' @@ -56,6 +60,8 @@ def init_db(db_path: str) -> sqlite3.Connection: conn.execute("ALTER TABLE trades ADD COLUMN decision_id TEXT") if "mode" not in columns: conn.execute("ALTER TABLE trades ADD COLUMN mode TEXT DEFAULT 'paper'") + if "session_id" not in columns: + conn.execute("ALTER TABLE trades ADD COLUMN session_id TEXT DEFAULT 'UNKNOWN'") if "strategy_pnl" not in columns: conn.execute("ALTER TABLE trades ADD COLUMN strategy_pnl REAL DEFAULT 0.0") if "fx_pnl" not in columns: @@ -70,6 +76,13 @@ def init_db(db_path: str) -> sqlite3.Connection: AND fx_pnl = 0.0 """ ) + conn.execute( + """ + UPDATE trades + SET session_id = 'UNKNOWN' + WHERE session_id IS NULL OR session_id = '' + """ + ) # Context tree tables for multi-layered memory management conn.execute( @@ -192,6 +205,7 @@ def log_trade( fx_pnl: float | None = None, market: str = "KR", exchange_code: str = "KRX", + session_id: str | None = None, selection_context: dict[str, any] | None = None, decision_id: str | None = None, mode: str = "paper", @@ -211,12 +225,17 @@ def log_trade( fx_pnl: FX PnL component market: Market code exchange_code: Exchange code + session_id: Session identifier (if omitted, auto-derived from market) selection_context: Scanner selection data (RSI, volume_ratio, signal, score) decision_id: Unique decision identifier for audit linking mode: Trading mode ('paper' or 'live') for data separation """ # Serialize selection context to JSON context_json = json.dumps(selection_context) if selection_context else None + resolved_session_id = session_id or "UNKNOWN" + market_info = MARKETS.get(market) + if session_id is None and market_info is not None: + resolved_session_id = classify_session_id(market_info) if strategy_pnl is None and fx_pnl is None: strategy_pnl = pnl fx_pnl = 0.0 @@ -232,9 +251,9 @@ def log_trade( INSERT INTO trades ( timestamp, stock_code, action, confidence, rationale, quantity, price, pnl, strategy_pnl, fx_pnl, - market, exchange_code, selection_context, decision_id, mode + market, exchange_code, session_id, selection_context, decision_id, mode ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, ( datetime.now(UTC).isoformat(), @@ -249,6 +268,7 @@ def log_trade( fx_pnl, market, exchange_code, + resolved_session_id, context_json, decision_id, mode, diff --git a/tests/test_db.py b/tests/test_db.py index 49b822c..aa1b938 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -155,6 +155,7 @@ def test_mode_column_exists_in_schema() -> None: cursor = conn.execute("PRAGMA table_info(trades)") columns = {row[1] for row in cursor.fetchall()} assert "mode" in columns + assert "session_id" in columns assert "strategy_pnl" in columns assert "fx_pnl" in columns @@ -199,15 +200,17 @@ def test_mode_migration_adds_column_to_existing_db() -> None: cursor = conn.execute("PRAGMA table_info(trades)") columns = {row[1] for row in cursor.fetchall()} assert "mode" in columns + assert "session_id" in columns assert "strategy_pnl" in columns assert "fx_pnl" in columns migrated = conn.execute( - "SELECT pnl, strategy_pnl, fx_pnl FROM trades WHERE stock_code='AAPL' LIMIT 1" + "SELECT pnl, strategy_pnl, fx_pnl, session_id FROM trades WHERE stock_code='AAPL' LIMIT 1" ).fetchone() assert migrated is not None assert migrated[0] == 123.45 assert migrated[1] == 123.45 assert migrated[2] == 0.0 + assert migrated[3] == "UNKNOWN" conn.close() finally: os.unlink(db_path) @@ -277,3 +280,20 @@ def test_log_trade_partial_fx_input_does_not_infer_negative_strategy_pnl() -> No assert row[0] == 10.0 assert row[1] == 0.0 assert row[2] == 10.0 + + +def test_log_trade_persists_explicit_session_id() -> None: + conn = init_db(":memory:") + log_trade( + conn=conn, + stock_code="AAPL", + action="BUY", + confidence=70, + rationale="session test", + market="US_NASDAQ", + exchange_code="NASD", + session_id="US_PRE", + ) + row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone() + assert row is not None + assert row[0] == "US_PRE" From 694d73b212b75c02f49d7f501b240cf5ea46fc46 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:51:00 +0900 Subject: [PATCH 020/109] fix: lazy session resolver and one-time session_id backfill --- src/db.py | 40 ++++++++++++++++++++++++++-------------- tests/test_db.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 14 deletions(-) diff --git a/src/db.py b/src/db.py index 9925883..4a0c9f0 100644 --- a/src/db.py +++ b/src/db.py @@ -8,9 +8,6 @@ from datetime import UTC, datetime from pathlib import Path from typing import Any -from src.core.order_policy import classify_session_id -from src.markets.schedule import MARKETS - def init_db(db_path: str) -> sqlite3.Connection: """Initialize the trade logs database and return a connection.""" @@ -60,8 +57,10 @@ def init_db(db_path: str) -> sqlite3.Connection: conn.execute("ALTER TABLE trades ADD COLUMN decision_id TEXT") if "mode" not in columns: conn.execute("ALTER TABLE trades ADD COLUMN mode TEXT DEFAULT 'paper'") + session_id_added = False if "session_id" not in columns: conn.execute("ALTER TABLE trades ADD COLUMN session_id TEXT DEFAULT 'UNKNOWN'") + session_id_added = True if "strategy_pnl" not in columns: conn.execute("ALTER TABLE trades ADD COLUMN strategy_pnl REAL DEFAULT 0.0") if "fx_pnl" not in columns: @@ -76,13 +75,14 @@ def init_db(db_path: str) -> sqlite3.Connection: AND fx_pnl = 0.0 """ ) - conn.execute( - """ - UPDATE trades - SET session_id = 'UNKNOWN' - WHERE session_id IS NULL OR session_id = '' - """ - ) + if session_id_added: + conn.execute( + """ + UPDATE trades + SET session_id = 'UNKNOWN' + WHERE session_id IS NULL OR session_id = '' + """ + ) # Context tree tables for multi-layered memory management conn.execute( @@ -232,10 +232,7 @@ def log_trade( """ # Serialize selection context to JSON context_json = json.dumps(selection_context) if selection_context else None - resolved_session_id = session_id or "UNKNOWN" - market_info = MARKETS.get(market) - if session_id is None and market_info is not None: - resolved_session_id = classify_session_id(market_info) + resolved_session_id = _resolve_session_id(market=market, session_id=session_id) if strategy_pnl is None and fx_pnl is None: strategy_pnl = pnl fx_pnl = 0.0 @@ -277,6 +274,21 @@ def log_trade( conn.commit() +def _resolve_session_id(*, market: str, session_id: str | None) -> str: + if session_id: + return session_id + try: + from src.core.order_policy import classify_session_id + from src.markets.schedule import MARKETS + + market_info = MARKETS.get(market) + if market_info is not None: + return classify_session_id(market_info) + except Exception: + pass + return "UNKNOWN" + + def get_latest_buy_trade( conn: sqlite3.Connection, stock_code: str, market: str ) -> dict[str, Any] | None: diff --git a/tests/test_db.py b/tests/test_db.py index aa1b938..bbd600e 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -297,3 +297,35 @@ def test_log_trade_persists_explicit_session_id() -> None: row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone() assert row is not None assert row[0] == "US_PRE" + + +def test_log_trade_auto_derives_session_id_when_not_provided() -> None: + conn = init_db(":memory:") + log_trade( + conn=conn, + stock_code="005930", + action="BUY", + confidence=70, + rationale="auto session", + market="KR", + exchange_code="KRX", + ) + row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone() + assert row is not None + assert row[0] != "UNKNOWN" + + +def test_log_trade_unknown_market_falls_back_to_unknown_session() -> None: + conn = init_db(":memory:") + log_trade( + conn=conn, + stock_code="X", + action="BUY", + confidence=70, + rationale="unknown market", + market="MARS", + exchange_code="MARS", + ) + row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone() + assert row is not None + assert row[0] == "UNKNOWN" From 2742628b784fb8958861855afcbc204d06b697a8 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 08:55:24 +0900 Subject: [PATCH 021/109] feat: prioritize kill-switch over overnight exception policy (TASK-CODE-012) --- src/config.py | 1 + src/main.py | 62 ++++++++++++++++++++++++++++++++-- tests/test_main.py | 83 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 143 insertions(+), 3 deletions(-) diff --git a/src/config.py b/src/config.py index 34a886a..0e60e32 100644 --- a/src/config.py +++ b/src/config.py @@ -60,6 +60,7 @@ class Settings(BaseSettings): # This value is used as a fallback when the balance API returns 0 in paper mode. PAPER_OVERSEAS_CASH: float = Field(default=50000.0, ge=0.0) USD_BUFFER_MIN: float = Field(default=1000.0, ge=0.0) + OVERNIGHT_EXCEPTION_ENABLED: bool = True # Trading frequency mode (daily = batch API calls, realtime = per-stock calls) TRADE_MODE: str = Field(default="daily", pattern="^(daily|realtime)$") diff --git a/src/main.py b/src/main.py index 6d8e8bb..fd80768 100644 --- a/src/main.py +++ b/src/main.py @@ -33,7 +33,11 @@ from src.core.blackout_manager import ( parse_blackout_windows_kst, ) from src.core.kill_switch import KillSwitchOrchestrator -from src.core.order_policy import OrderPolicyRejected, validate_order_policy +from src.core.order_policy import ( + OrderPolicyRejected, + get_session_info, + validate_order_policy, +) from src.core.priority_queue import PriorityTaskQueue from src.core.risk_manager import CircuitBreakerTripped, FatFingerRejected, RiskManager from src.db import ( @@ -63,6 +67,7 @@ BLACKOUT_ORDER_MANAGER = BlackoutOrderManager( windows=[], max_queue_size=500, ) +_SESSION_CLOSE_WINDOWS = {"NXT_AFTER", "US_AFTER"} def safe_float(value: str | float | None, default: float = 0.0) -> float: @@ -449,6 +454,21 @@ def _should_block_overseas_buy_for_fx_buffer( return remaining < required, remaining, required +def _should_force_exit_for_overnight( + *, + market: MarketInfo, + settings: Settings | None, +) -> bool: + session_id = get_session_info(market).session_id + if session_id not in _SESSION_CLOSE_WINDOWS: + return False + if KILL_SWITCH.new_orders_blocked: + return True + if settings is None: + return False + return not settings.OVERNIGHT_EXCEPTION_ENABLED + + async def build_overseas_symbol_universe( db_conn: Any, overseas_broker: OverseasBroker, @@ -1214,6 +1234,23 @@ async def trading_cycle( loss_pct, take_profit_threshold, ) + if decision.action == "HOLD" and _should_force_exit_for_overnight( + market=market, + settings=settings, + ): + decision = TradeDecision( + action="SELL", + confidence=max(decision.confidence, 85), + rationale=( + "Forced exit by overnight policy" + " (session close window / kill switch priority)" + ), + ) + logger.info( + "Overnight policy override for %s (%s): HOLD -> SELL", + stock_code, + market.name, + ) logger.info( "Decision for %s (%s): %s (confidence=%d)", stock_code, @@ -1274,7 +1311,7 @@ async def trading_cycle( trade_price = current_price trade_pnl = 0.0 if decision.action in ("BUY", "SELL"): - if KILL_SWITCH.new_orders_blocked: + if KILL_SWITCH.new_orders_blocked and decision.action == "BUY": logger.critical( "KillSwitch block active: skip %s order for %s (%s)", decision.action, @@ -2323,6 +2360,25 @@ async def run_daily_session( stock_code, market.name, ) + if decision.action == "HOLD": + daily_open = get_open_position(db_conn, stock_code, market.code) + if daily_open and _should_force_exit_for_overnight( + market=market, + settings=settings, + ): + decision = TradeDecision( + action="SELL", + confidence=max(decision.confidence, 85), + rationale=( + "Forced exit by overnight policy" + " (session close window / kill switch priority)" + ), + ) + logger.info( + "Daily overnight policy override for %s (%s): HOLD -> SELL", + stock_code, + market.name, + ) # Log decision context_snapshot = { @@ -2363,7 +2419,7 @@ async def run_daily_session( trade_pnl = 0.0 order_succeeded = True if decision.action in ("BUY", "SELL"): - if KILL_SWITCH.new_orders_blocked: + if KILL_SWITCH.new_orders_blocked and decision.action == "BUY": logger.critical( "KillSwitch block active: skip %s order for %s (%s)", decision.action, diff --git a/tests/test_main.py b/tests/test_main.py index 61887a0..f7a7213 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -15,6 +15,7 @@ from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger from src.main import ( KILL_SWITCH, + _should_force_exit_for_overnight, _should_block_overseas_buy_for_fx_buffer, _trigger_emergency_kill_switch, _apply_dashboard_flag, @@ -5310,6 +5311,88 @@ async def test_order_policy_rejection_skips_order_execution() -> None: broker.send_order.assert_not_called() +def test_overnight_policy_prioritizes_killswitch_over_exception() -> None: + market = MagicMock() + with patch("src.main.get_session_info", return_value=MagicMock(session_id="US_AFTER")): + settings = MagicMock() + settings.OVERNIGHT_EXCEPTION_ENABLED = True + try: + KILL_SWITCH.new_orders_blocked = True + assert _should_force_exit_for_overnight(market=market, settings=settings) + finally: + KILL_SWITCH.clear_block() + + +@pytest.mark.asyncio +async def test_kill_switch_block_does_not_block_sell_reduction() -> None: + """KillSwitch should block BUY entries, but allow SELL risk reduction orders.""" + db_conn = init_db(":memory:") + decision_logger = DecisionLogger(db_conn) + + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.5, 0.0)) + broker.get_balance = AsyncMock( + return_value={ + "output1": [{"pdno": "005930", "ord_psbl_qty": "3"}], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "50000", + "pchs_amt_smtl_amt": "50000", + } + ], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + settings = MagicMock() + settings.POSITION_SIZING_ENABLED = False + settings.CONFIDENCE_THRESHOLD = 80 + settings.OVERNIGHT_EXCEPTION_ENABLED = True + settings.MODE = "paper" + + try: + KILL_SWITCH.new_orders_blocked = True + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_sell_match())), + playbook=_make_playbook(), + risk=MagicMock(), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="005930", + scan_candidates={}, + settings=settings, + ) + finally: + KILL_SWITCH.clear_block() + + broker.send_order.assert_called_once() + + @pytest.mark.asyncio async def test_blackout_queues_order_and_skips_submission() -> None: """When blackout is active, order submission is replaced by queueing.""" From dbf57b50682e1f55a3f5eb23b89b0d4b74bac95d Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 22:13:11 +0900 Subject: [PATCH 022/109] governance: enforce runtime verification coverage gates (#301) --- .gitea/ISSUE_TEMPLATE/runtime_verification.md | 41 ++++++++++ .gitea/PULL_REQUEST_TEMPLATE.md | 38 +++++++++ .github/workflows/ci.yml | 6 ++ docs/agent-constraints.md | 7 ++ docs/commands.md | 13 ++++ docs/ouroboros/50_tpm_control_protocol.md | 12 +++ .../60_repo_enforcement_checklist.md | 5 ++ docs/workflow.md | 34 ++++++++ scripts/runtime_verify_monitor.sh | 78 +++++++++++++++++++ scripts/validate_governance_assets.py | 61 +++++++++++++++ 10 files changed, 295 insertions(+) create mode 100644 .gitea/ISSUE_TEMPLATE/runtime_verification.md create mode 100644 .gitea/PULL_REQUEST_TEMPLATE.md create mode 100755 scripts/runtime_verify_monitor.sh create mode 100644 scripts/validate_governance_assets.py diff --git a/.gitea/ISSUE_TEMPLATE/runtime_verification.md b/.gitea/ISSUE_TEMPLATE/runtime_verification.md new file mode 100644 index 0000000..3e27fda --- /dev/null +++ b/.gitea/ISSUE_TEMPLATE/runtime_verification.md @@ -0,0 +1,41 @@ +--- +name: Runtime Verification Incident +about: 실운영/스테이징 동작 검증 중 발견된 이상 징후 등록 +title: "[RUNTIME-VERIFY][SCN-XXX] " +labels: runtime, verification +--- + +## Summary + +- 현상: +- 최초 관측 시각(UTC): + +## Reproduction / Observation + +- 실행 모드(`live`/`paper`): +- 세션(`NXT`, `US_PRE`, `US_DAY`, `US_AFTER`, ...): +- 실행 커맨드: +- 로그 경로: + +## Expected vs Actual + +- Expected: +- Actual: + +## Requirement Mapping + +- REQ: +- TASK: +- TEST: + +## Temporary Mitigation + +- 즉시 완화책: + +## Close Criteria + +- [ ] Dev 수정 반영 +- [ ] Verifier 재검증 PASS +- [ ] Runtime Verifier 재관측 PASS +- [ ] `NOT_OBSERVED = 0` + diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..99f5345 --- /dev/null +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,38 @@ +## Linked Issue + +- Closes #N + +## Scope + +- REQ: `REQ-...` +- TASK: `TASK-...` +- TEST: `TEST-...` + +## Main -> Verifier Directive Contract + +- Scope: 대상 요구사항/코드/로그 경로 +- Method: 실행 커맨드 + 관측 포인트 +- PASS criteria: +- FAIL criteria: +- NOT_OBSERVED criteria: +- Evidence format: PR 코멘트 `Coverage Matrix` + +## Verifier Coverage Matrix (Required) + +| Item | Evidence | Status (PASS/FAIL/NOT_OBSERVED) | +|---|---|---| +| REQ-... | 링크/로그 | PASS | + +`NOT_OBSERVED`가 1개라도 있으면 승인/머지 금지. + +## Gitea Preflight + +- [ ] `docs/commands.md`와 `docs/workflow.md` 트러블슈팅 선확인 +- [ ] `tea` 사용 (`gh` 미사용) + +## Runtime Evidence + +- 시스템 실제 구동 커맨드: +- 모니터링 로그 경로: +- 이상 징후/이슈 링크: + diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6fcd55a..756de37 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,6 +21,12 @@ jobs: - name: Install dependencies run: pip install ".[dev]" + - name: Validate governance assets + run: python3 scripts/validate_governance_assets.py + + - name: Validate Ouroboros docs + run: python3 scripts/validate_ouroboros_docs.py + - name: Lint run: ruff check src/ tests/ diff --git a/docs/agent-constraints.md b/docs/agent-constraints.md index 6d50b67..0e955fb 100644 --- a/docs/agent-constraints.md +++ b/docs/agent-constraints.md @@ -12,6 +12,8 @@ It is distinct from `docs/requirements-log.md`, which records **project/product 1. **Workflow enforcement** - Follow `docs/workflow.md` for all changes. + - Before any Gitea issue/PR/comment operation, read `docs/commands.md` and `docs/workflow.md` troubleshooting section. + - Use `tea` for Gitea operations; do not use GitHub CLI (`gh`) in this repository workflow. - Create a Gitea issue before any code or documentation change. - Work on a feature branch `feature/issue-{N}-{short-description}` and open a PR. - Never commit directly to `main`. @@ -43,3 +45,8 @@ It is distinct from `docs/requirements-log.md`, which records **project/product - When work requires guidance, consult the relevant `docs/` policies first. - Any code change must be accompanied by relevant documentation updates. - Persist user constraints across sessions by recording them in this document. + +### 2026-02-27 + +- All agents must pre-read `docs/commands.md` and `docs/workflow.md` troubleshooting before running Gitea issue/PR/comment commands. +- `gh` CLI is prohibited for repository ticket/PR operations; use `tea` (or documented Gitea API fallback only). diff --git a/docs/commands.md b/docs/commands.md index b72df71..666c0b3 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -4,6 +4,13 @@ **Critical: Learn from failures. Never repeat the same failed command without modification.** +## Repository VCS Rule (Mandatory) + +- 이 저장소의 티켓/PR/코멘트 작업은 Gitea 기준으로 수행한다. +- `gh`(GitHub CLI) 명령 사용은 금지한다. +- 기본 도구는 `tea`이며, `tea` 미지원 케이스만 Gitea API를 fallback으로 사용한다. +- 실행 전 `docs/workflow.md`의 `Gitea CLI Formatting Troubleshooting`을 반드시 확인한다. + ### tea CLI (Gitea Command Line Tool) #### ❌ TTY Error - Interactive Confirmation Fails @@ -140,6 +147,12 @@ python -m src.main --mode=paper # Run with dashboard enabled python -m src.main --mode=paper --dashboard +# Runtime verification monitor (NOT_OBSERVED detection) +bash scripts/runtime_verify_monitor.sh + +# Follow runtime verification log +tail -f data/overnight/runtime_verify_*.log + # Docker docker compose up -d ouroboros # Run agent docker compose --profile test up test # Run tests in container diff --git a/docs/ouroboros/50_tpm_control_protocol.md b/docs/ouroboros/50_tpm_control_protocol.md index ec52e6d..56de1f3 100644 --- a/docs/ouroboros/50_tpm_control_protocol.md +++ b/docs/ouroboros/50_tpm_control_protocol.md @@ -34,6 +34,12 @@ Main Agent 아이디에이션 책임: - DCP-03 구현 착수: Phase 2 종료 전 Main Agent 승인 필수 - DCP-04 배포 승인: Phase 4 종료 후 Main Agent 최종 승인 필수 +Main/Verifier 사고 재발 방지 규칙: +- Main Agent는 검증 위임 시 `Directive Contract`를 충족하지 않으면 검증 착수 금지 +- Verifier Agent는 지시 누락/모호성 발견 시 즉시 `BLOCKED`를 선언하고 보완 요청 +- Verifier Agent는 `미관측(NOT_OBSERVED)` 항목을 PASS로 보고할 수 없다 +- Runtime 검증에서 요구 세션 증적이 없으면 "정상"이 아니라 `미검증 이상`으로 이슈화한다 + ## Phase Control Gates ### Phase 0: Scenario Intake and Scope Lock @@ -112,6 +118,8 @@ Exit criteria: Control checks: - Verifier가 테스트 증적(로그/리포트/실행 커맨드) 첨부 +- Verifier가 `Coverage Matrix`(`REQ/TASK/TEST` x `PASS/FAIL/NOT_OBSERVED`) 첨부 +- `NOT_OBSERVED` 항목 수가 0인지 확인(0이 아니면 Gate 실패) - Runtime Verifier가 스테이징/실운영 모니터링 계획 승인 - 산출물: 수용 승인 레코드 @@ -150,6 +158,8 @@ TPM 티켓 운영 규칙: - PR 본문에는 TPM이 지정한 우선순위와 범위가 그대로 반영되어야 한다. - 우선순위 변경은 TPM 제안 + Main Agent 승인으로만 가능하다. - PM/TPM/Dev/Reviewer/Verifier/Runtime Verifier는 주요 의사결정 시점마다 PR 코멘트를 남겨 결정 근거를 추적 가능 상태로 유지한다. +- PM/TPM/Dev/Reviewer/Verifier/Runtime Verifier는 이슈/PR/코멘트 조작 전에 `docs/commands.md`와 `docs/workflow.md`의 Gitea 트러블슈팅 섹션을 선참조해야 한다. +- 저장소 협업에서 GitHub CLI(`gh`) 사용은 금지하며, Gitea 작업은 `tea`(필요 시 문서화된 API fallback)만 허용한다. 브랜치 운영 규칙: - TPM은 각 티켓에 대해 `ticket temp branch -> program feature branch` PR 경로를 지정한다. @@ -168,6 +178,8 @@ TPM 티켓 운영 규칙: - 시스템 실제 구동(스테이징/로컬 실운영 모드) 실행 - 모니터링 체크리스트(핵심 경보/주문 경로/예외 로그) 수행 - 결과를 티켓/PR 코멘트에 증적으로 첨부하지 않으면 완료로 간주하지 않음 + - 세션별 필수 관측 포인트(`NXT`, `US_PRE`, `US_DAY`, `US_AFTER` 등) 중 미관측 항목은 `NOT_OBSERVED`로 기록 + - `NOT_OBSERVED` 존재 시 승인 금지 + Runtime 이슈 발행 ## Server Reflection Rule diff --git a/docs/ouroboros/60_repo_enforcement_checklist.md b/docs/ouroboros/60_repo_enforcement_checklist.md index 2573aea..b1100f3 100644 --- a/docs/ouroboros/60_repo_enforcement_checklist.md +++ b/docs/ouroboros/60_repo_enforcement_checklist.md @@ -48,6 +48,7 @@ Updated: 2026-02-26 병합 전 체크리스트: - 이슈 연결(`Closes #N`) 존재 - PR 본문에 `REQ-*`, `TASK-*`, `TEST-*` 매핑 표 존재 +- Main -> Verifier Directive Contract(범위/방법/합격/실패/미관측/증적 형식) 기재 - `src/core/risk_manager.py` 변경 없음 - 주요 의사결정 체크포인트(DCP-01~04) 중 해당 단계 Main Agent 확인 기록 존재 - 주요 의사결정(리뷰 지적/수정 합의/검증 승인)에 대한 에이전트 PR 코멘트 존재 @@ -57,6 +58,10 @@ Updated: 2026-02-26 - 문서 검증 스크립트 통과 - 테스트 통과 - 개발 완료 시 시스템 구동/모니터링 증적 코멘트 존재 +- 이슈/PR 조작 전에 `docs/commands.md` 및 `docs/workflow.md` 트러블슈팅 확인 코멘트 존재 +- `gh` CLI 미사용, `tea` 사용 증적 존재 +- Verifier `Coverage Matrix` 첨부(PASS/FAIL/NOT_OBSERVED) +- `NOT_OBSERVED` 항목 0 확인(0이 아니면 머지 금지) ## 5) 감사 추적 diff --git a/docs/workflow.md b/docs/workflow.md index 814fe8c..0a24ac9 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -16,6 +16,20 @@ **Never commit directly to `main`.** This policy applies to all changes, no exceptions. +## Agent Gitea Preflight (Mandatory) + +Gitea 이슈/PR/코멘트 작업 전에 모든 에이전트는 아래를 먼저 확인해야 한다. + +1. `docs/commands.md`의 `tea CLI` 실패 사례/해결 패턴 확인 +2. 본 문서의 `Gitea CLI Formatting Troubleshooting` 확인 +3. 명령 실행 전 `gh`(GitHub CLI) 사용 금지 확인 + +강제 규칙: +- 이 저장소 협업 명령은 `tea`를 기본으로 사용한다. +- `gh issue`, `gh pr` 등 GitHub CLI 명령은 사용 금지다. +- `tea` 실패 시 동일 명령 재시도 전에 원인/수정사항을 PR 코멘트에 남긴다. +- 필요한 경우에만 Gitea API(`localhost:3000`)를 fallback으로 사용한다. + ## Branch Strategy (Mandatory) - Team operation default branch is the **program feature branch**, not `main`. @@ -137,6 +151,22 @@ task_tool( Use `run_in_background=True` for independent tasks that don't block subsequent work. +### Main -> Verifier Directive Contract (Mandatory) + +메인 에이전트가 검증 에이전트에 작업을 위임할 때, 아래 6개를 누락하면 지시가 무효다. + +1. 검증 대상 범위: `REQ-*`, `TASK-*`, 코드/로그 경로 +2. 검증 방법: 실행 커맨드와 관측 포인트(예: 세션별 로그 키워드) +3. 합격 기준: PASS 조건을 수치/문구로 명시 +4. 실패 기준: FAIL 조건을 수치/문구로 명시 +5. 미관측 기준: `NOT_OBSERVED` 조건과 즉시 에스컬레이션 규칙 +6. 증적 형식: PR 코멘트에 `Coverage Matrix` 표로 제출 + +`NOT_OBSERVED` 처리 규칙: +- 요구사항 항목이 관측되지 않았으면 PASS로 간주 금지 +- `NOT_OBSERVED`는 운영상 `FAIL`과 동일하게 처리 +- `NOT_OBSERVED`가 하나라도 있으면 승인/머지 금지 + ## Code Review Checklist **CRITICAL: Every PR review MUST verify plan-implementation consistency.** @@ -170,3 +200,7 @@ Before approving any PR, the reviewer (human or agent) must check ALL of the fol - [ ] PR references the Gitea issue number - [ ] Feature branch follows naming convention (`feature/issue-N-description`) - [ ] Commit messages are clear and descriptive +- [ ] 이슈/PR 작업 전에 `docs/commands.md`와 본 문서 트러블슈팅 섹션을 확인했다 +- [ ] `gh` 명령을 사용하지 않고 `tea`(또는 허용된 Gitea API fallback)만 사용했다 +- [ ] Main -> Verifier 지시가 Directive Contract 6개 항목을 모두 포함한다 +- [ ] Verifier 결과에 `Coverage Matrix`(PASS/FAIL/NOT_OBSERVED)가 있고, `NOT_OBSERVED=0`이다 diff --git a/scripts/runtime_verify_monitor.sh b/scripts/runtime_verify_monitor.sh new file mode 100755 index 0000000..6c878a4 --- /dev/null +++ b/scripts/runtime_verify_monitor.sh @@ -0,0 +1,78 @@ +#!/usr/bin/env bash +# Runtime verification monitor with NOT_OBSERVED detection. + +set -euo pipefail + +ROOT_DIR="${ROOT_DIR:-/home/agentson/repos/The-Ouroboros}" +LOG_DIR="${LOG_DIR:-$ROOT_DIR/data/overnight}" +INTERVAL_SEC="${INTERVAL_SEC:-60}" +MAX_HOURS="${MAX_HOURS:-24}" + +cd "$ROOT_DIR" + +OUT_LOG="$LOG_DIR/runtime_verify_$(date +%Y%m%d_%H%M%S).log" +END_TS=$(( $(date +%s) + MAX_HOURS*3600 )) + +log() { + printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$1" | tee -a "$OUT_LOG" >/dev/null +} + +check_signal() { + local name="$1" + local pattern="$2" + local run_log="$3" + + if rg -q "$pattern" "$run_log"; then + log "[COVERAGE] ${name}=PASS pattern=${pattern}" + return 0 + fi + log "[COVERAGE] ${name}=NOT_OBSERVED pattern=${pattern}" + return 1 +} + +log "[INFO] runtime verify monitor started interval=${INTERVAL_SEC}s max_hours=${MAX_HOURS}" + +while true; do + now=$(date +%s) + if [ "$now" -ge "$END_TS" ]; then + log "[INFO] monitor completed (time window reached)" + exit 0 + fi + + latest_run="$(ls -t "$LOG_DIR"/run_*.log 2>/dev/null | head -n1 || true)" + if [ -z "$latest_run" ]; then + log "[ANOMALY] no run log found" + sleep "$INTERVAL_SEC" + continue + fi + + # Basic liveness hints. + app_pid="$(cat "$LOG_DIR/app.pid" 2>/dev/null || true)" + wd_pid="$(cat "$LOG_DIR/watchdog.pid" 2>/dev/null || true)" + app_alive=0 + wd_alive=0 + port_alive=0 + [ -n "$app_pid" ] && kill -0 "$app_pid" 2>/dev/null && app_alive=1 + [ -n "$wd_pid" ] && kill -0 "$wd_pid" 2>/dev/null && wd_alive=1 + ss -ltnp 2>/dev/null | rg -q ':8080' && port_alive=1 + log "[HEARTBEAT] run_log=$latest_run app_alive=$app_alive watchdog_alive=$wd_alive port8080=$port_alive" + + # Coverage matrix rows (session paths and policy gate evidence). + not_observed=0 + check_signal "LIVE_MODE" "Mode: live" "$latest_run" || not_observed=$((not_observed+1)) + check_signal "KR_LOOP" "Processing market: Korea Exchange" "$latest_run" || not_observed=$((not_observed+1)) + check_signal "NXT_PATH" "NXT_PRE|NXT_AFTER|session=NXT_" "$latest_run" || not_observed=$((not_observed+1)) + check_signal "US_PRE_PATH" "US_PRE|session=US_PRE" "$latest_run" || not_observed=$((not_observed+1)) + check_signal "US_DAY_PATH" "US_DAY|session=US_DAY|Processing market: .*NASDAQ|Processing market: .*NYSE|Processing market: .*AMEX" "$latest_run" || not_observed=$((not_observed+1)) + check_signal "US_AFTER_PATH" "US_AFTER|session=US_AFTER" "$latest_run" || not_observed=$((not_observed+1)) + check_signal "ORDER_POLICY_SESSION" "Order policy rejected .*\\[session=" "$latest_run" || not_observed=$((not_observed+1)) + + if [ "$not_observed" -gt 0 ]; then + log "[ANOMALY] coverage_not_observed=$not_observed (treat as FAIL)" + else + log "[OK] coverage complete (NOT_OBSERVED=0)" + fi + + sleep "$INTERVAL_SEC" +done + diff --git a/scripts/validate_governance_assets.py b/scripts/validate_governance_assets.py new file mode 100644 index 0000000..e7058a2 --- /dev/null +++ b/scripts/validate_governance_assets.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +"""Validate persistent governance assets for agent workflow safety.""" + +from __future__ import annotations + +import sys +from pathlib import Path + + +def must_contain(path: Path, required: list[str], errors: list[str]) -> None: + if not path.exists(): + errors.append(f"missing file: {path}") + return + text = path.read_text(encoding="utf-8") + for token in required: + if token not in text: + errors.append(f"{path}: missing required token -> {token}") + + +def main() -> int: + errors: list[str] = [] + + pr_template = Path(".gitea/PULL_REQUEST_TEMPLATE.md") + issue_template = Path(".gitea/ISSUE_TEMPLATE/runtime_verification.md") + + must_contain( + pr_template, + [ + "Closes #N", + "Main -> Verifier Directive Contract", + "Coverage Matrix", + "NOT_OBSERVED", + "tea", + "gh", + ], + errors, + ) + must_contain( + issue_template, + [ + "[RUNTIME-VERIFY][SCN-XXX]", + "Requirement Mapping", + "Close Criteria", + "NOT_OBSERVED = 0", + ], + errors, + ) + + if errors: + print("[FAIL] governance asset validation failed") + for err in errors: + print(f"- {err}") + return 1 + + print("[OK] governance assets validated") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) + From 4a404875a929500bbf4aa2e692ba060c272fb11e Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 22:30:13 +0900 Subject: [PATCH 023/109] fix: include extended KR/US sessions in realtime market scheduling (#301) --- src/main.py | 16 ++++++++++-- src/markets/schedule.py | 47 ++++++++++++++++++++++++++++++++--- tests/test_market_schedule.py | 32 ++++++++++++++++++++++++ 3 files changed, 89 insertions(+), 6 deletions(-) diff --git a/src/main.py b/src/main.py index fd80768..cf168e7 100644 --- a/src/main.py +++ b/src/main.py @@ -3408,7 +3408,10 @@ async def run(settings: Settings) -> None: _run_context_scheduler(context_scheduler, now=datetime.now(UTC)) # Get currently open markets - open_markets = get_open_markets(settings.enabled_market_list) + open_markets = get_open_markets( + settings.enabled_market_list, + include_extended_sessions=True, + ) if not open_markets: # Notify market close for any markets that were open @@ -3437,7 +3440,8 @@ async def run(settings: Settings) -> None: # No markets open — wait until next market opens try: next_market, next_open_time = get_next_market_open( - settings.enabled_market_list + settings.enabled_market_list, + include_extended_sessions=True, ) now = datetime.now(UTC) wait_seconds = (next_open_time - now).total_seconds() @@ -3459,6 +3463,14 @@ async def run(settings: Settings) -> None: if shutdown.is_set(): break + session_info = get_session_info(market) + logger.info( + "Market session active: %s (%s) session=%s", + market.code, + market.name, + session_info.session_id, + ) + await process_blackout_recovery_orders( broker=broker, overseas_broker=overseas_broker, diff --git a/src/markets/schedule.py b/src/markets/schedule.py index b7daf22..9d142d9 100644 --- a/src/markets/schedule.py +++ b/src/markets/schedule.py @@ -1,7 +1,7 @@ """Market schedule management with timezone support.""" from dataclasses import dataclass -from datetime import datetime, time, timedelta +from datetime import UTC, datetime, time, timedelta from zoneinfo import ZoneInfo @@ -181,7 +181,10 @@ def is_market_open(market: MarketInfo, now: datetime | None = None) -> bool: def get_open_markets( - enabled_markets: list[str] | None = None, now: datetime | None = None + enabled_markets: list[str] | None = None, + now: datetime | None = None, + *, + include_extended_sessions: bool = False, ) -> list[MarketInfo]: """ Get list of currently open markets. @@ -196,17 +199,31 @@ def get_open_markets( if enabled_markets is None: enabled_markets = list(MARKETS.keys()) + def is_available(market: MarketInfo) -> bool: + if not include_extended_sessions: + return is_market_open(market, now) + if market.code == "KR" or market.code.startswith("US"): + # Import lazily to avoid module cycle at import-time. + from src.core.order_policy import classify_session_id + + session_id = classify_session_id(market, now) + return session_id not in {"KR_OFF", "US_OFF"} + return is_market_open(market, now) + open_markets = [ MARKETS[code] for code in enabled_markets - if code in MARKETS and is_market_open(MARKETS[code], now) + if code in MARKETS and is_available(MARKETS[code]) ] return sorted(open_markets, key=lambda m: m.code) def get_next_market_open( - enabled_markets: list[str] | None = None, now: datetime | None = None + enabled_markets: list[str] | None = None, + now: datetime | None = None, + *, + include_extended_sessions: bool = False, ) -> tuple[MarketInfo, datetime]: """ Find the next market that will open and when. @@ -233,6 +250,21 @@ def get_next_market_open( next_open_time: datetime | None = None next_market: MarketInfo | None = None + def first_extended_open_after(market: MarketInfo, start_utc: datetime) -> datetime | None: + # Search minute-by-minute for KR/US session transition into active window. + # Bounded to 7 days to match existing behavior. + from src.core.order_policy import classify_session_id + + ts = start_utc.astimezone(ZoneInfo("UTC")).replace(second=0, microsecond=0) + prev_active = classify_session_id(market, ts) not in {"KR_OFF", "US_OFF"} + for _ in range(7 * 24 * 60): + ts = ts + timedelta(minutes=1) + active = classify_session_id(market, ts) not in {"KR_OFF", "US_OFF"} + if active and not prev_active: + return ts + prev_active = active + return None + for code in enabled_markets: if code not in MARKETS: continue @@ -240,6 +272,13 @@ def get_next_market_open( market = MARKETS[code] market_now = now.astimezone(market.timezone) + if include_extended_sessions and (market.code == "KR" or market.code.startswith("US")): + ext_open = first_extended_open_after(market, now.astimezone(UTC)) + if ext_open and (next_open_time is None or ext_open < next_open_time): + next_open_time = ext_open + next_market = market + continue + # Calculate next open time for this market for days_ahead in range(7): # Check next 7 days check_date = market_now.date() + timedelta(days=days_ahead) diff --git a/tests/test_market_schedule.py b/tests/test_market_schedule.py index f3a9de7..49110bc 100644 --- a/tests/test_market_schedule.py +++ b/tests/test_market_schedule.py @@ -147,6 +147,24 @@ class TestGetOpenMarkets: codes = [m.code for m in open_markets] assert codes == sorted(codes) + def test_get_open_markets_us_pre_extended_session(self) -> None: + """US premarket should be considered open when extended sessions enabled.""" + # Monday 2026-02-02 08:30 EST = 13:30 UTC (premarket window) + test_time = datetime(2026, 2, 2, 13, 30, tzinfo=ZoneInfo("UTC")) + + regular = get_open_markets( + enabled_markets=["US_NASDAQ", "US_NYSE", "US_AMEX"], + now=test_time, + ) + assert regular == [] + + extended = get_open_markets( + enabled_markets=["US_NASDAQ", "US_NYSE", "US_AMEX"], + now=test_time, + include_extended_sessions=True, + ) + assert {m.code for m in extended} == {"US_NASDAQ", "US_NYSE", "US_AMEX"} + class TestGetNextMarketOpen: """Test get_next_market_open function.""" @@ -201,6 +219,20 @@ class TestGetNextMarketOpen: ) assert market.code == "KR" + def test_get_next_market_open_prefers_extended_session(self) -> None: + """Extended lookup should return premarket open time before regular open.""" + # Monday 2026-02-02 07:00 EST = 12:00 UTC + # By v3 KST session rules, US is OFF only in KST 07:00-10:00 (UTC 22:00-01:00). + # At 12:00 UTC market is active, so next OFF->ON transition is 01:00 UTC next day. + test_time = datetime(2026, 2, 2, 12, 0, tzinfo=ZoneInfo("UTC")) + market, next_open = get_next_market_open( + enabled_markets=["US_NASDAQ"], + now=test_time, + include_extended_sessions=True, + ) + assert market.code == "US_NASDAQ" + assert next_open == datetime(2026, 2, 3, 1, 0, tzinfo=ZoneInfo("UTC")) + class TestExpandMarketCodes: """Test shorthand market expansion.""" From d912471d0e5274b458771377f114008c6781ee86 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 22:46:18 +0900 Subject: [PATCH 024/109] process: enforce process-change-first and staged ticket maturity (#306) --- .gitea/PULL_REQUEST_TEMPLATE.md | 9 +++++++ docs/ouroboros/50_tpm_control_protocol.md | 10 ++++++++ .../60_repo_enforcement_checklist.md | 3 +++ docs/workflow.md | 25 +++++++++++++++++++ 4 files changed, 47 insertions(+) diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 99f5345..4bda55f 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -8,6 +8,11 @@ - TASK: `TASK-...` - TEST: `TEST-...` +## Ticket Stage + +- Current stage: `Implemented` / `Integrated` / `Observed` / `Accepted` +- Previous stage evidence link: + ## Main -> Verifier Directive Contract - Scope: 대상 요구사항/코드/로그 경로 @@ -36,3 +41,7 @@ - 모니터링 로그 경로: - 이상 징후/이슈 링크: +## Approval Gate + +- [ ] Static Verifier approval comment linked +- [ ] Runtime Verifier approval comment linked diff --git a/docs/ouroboros/50_tpm_control_protocol.md b/docs/ouroboros/50_tpm_control_protocol.md index 56de1f3..3921a64 100644 --- a/docs/ouroboros/50_tpm_control_protocol.md +++ b/docs/ouroboros/50_tpm_control_protocol.md @@ -121,6 +121,7 @@ Control checks: - Verifier가 `Coverage Matrix`(`REQ/TASK/TEST` x `PASS/FAIL/NOT_OBSERVED`) 첨부 - `NOT_OBSERVED` 항목 수가 0인지 확인(0이 아니면 Gate 실패) - Runtime Verifier가 스테이징/실운영 모니터링 계획 승인 +- 정적 Verifier 승인 + Runtime Verifier 승인 2개 모두 확인 - 산출물: 수용 승인 레코드 ### Phase 5: Release and Post-Release Control @@ -160,6 +161,15 @@ TPM 티켓 운영 규칙: - PM/TPM/Dev/Reviewer/Verifier/Runtime Verifier는 주요 의사결정 시점마다 PR 코멘트를 남겨 결정 근거를 추적 가능 상태로 유지한다. - PM/TPM/Dev/Reviewer/Verifier/Runtime Verifier는 이슈/PR/코멘트 조작 전에 `docs/commands.md`와 `docs/workflow.md`의 Gitea 트러블슈팅 섹션을 선참조해야 한다. - 저장소 협업에서 GitHub CLI(`gh`) 사용은 금지하며, Gitea 작업은 `tea`(필요 시 문서화된 API fallback)만 허용한다. +- 재발 방지/운영 규칙 변경이 합의되면, 기능 구현 이전에 process 티켓을 먼저 생성/머지해야 한다. +- process 티켓 미반영 상태에서 구현 티켓 진행 시 TPM이 즉시 `BLOCKED` 처리한다. + +티켓 성숙도 단계 (Mandatory): +- `Implemented`: 코드/문서 변경 완료 +- `Integrated`: 호출 경로/파이프라인 연결 확인 +- `Observed`: 런타임/실행 증적 확보 +- `Accepted`: Verifier + Runtime Verifier 승인 완료 +- 단계는 순차 전진만 허용되며, 단계 점프는 허용되지 않는다. 브랜치 운영 규칙: - TPM은 각 티켓에 대해 `ticket temp branch -> program feature branch` PR 경로를 지정한다. diff --git a/docs/ouroboros/60_repo_enforcement_checklist.md b/docs/ouroboros/60_repo_enforcement_checklist.md index b1100f3..0b5f809 100644 --- a/docs/ouroboros/60_repo_enforcement_checklist.md +++ b/docs/ouroboros/60_repo_enforcement_checklist.md @@ -49,6 +49,7 @@ Updated: 2026-02-26 - 이슈 연결(`Closes #N`) 존재 - PR 본문에 `REQ-*`, `TASK-*`, `TEST-*` 매핑 표 존재 - Main -> Verifier Directive Contract(범위/방법/합격/실패/미관측/증적 형식) 기재 +- process-change-first 대상이면 process 티켓 PR이 선머지됨 - `src/core/risk_manager.py` 변경 없음 - 주요 의사결정 체크포인트(DCP-01~04) 중 해당 단계 Main Agent 확인 기록 존재 - 주요 의사결정(리뷰 지적/수정 합의/검증 승인)에 대한 에이전트 PR 코멘트 존재 @@ -62,6 +63,8 @@ Updated: 2026-02-26 - `gh` CLI 미사용, `tea` 사용 증적 존재 - Verifier `Coverage Matrix` 첨부(PASS/FAIL/NOT_OBSERVED) - `NOT_OBSERVED` 항목 0 확인(0이 아니면 머지 금지) +- 티켓 단계 기록(`Implemented` -> `Integrated` -> `Observed` -> `Accepted`) 존재 +- 정적 Verifier 승인 + Runtime Verifier 승인 2개 확인 ## 5) 감사 추적 diff --git a/docs/workflow.md b/docs/workflow.md index 0a24ac9..7e39d72 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -167,6 +167,28 @@ Use `run_in_background=True` for independent tasks that don't block subsequent w - `NOT_OBSERVED`는 운영상 `FAIL`과 동일하게 처리 - `NOT_OBSERVED`가 하나라도 있으면 승인/머지 금지 +### Process-Change-First Rule (Mandatory) + +재발 방지/운영 규칙 변경이 결정되면, 기능 구현 티켓보다 먼저 서버(feature branch)에 반영해야 한다. + +- 순서: `process ticket merge` -> `implementation ticket start` +- process ticket 미반영 상태에서 기능 티켓 코딩/머지 금지 +- 세션 전환 시에도 동일 규칙 유지 + +### Ticket Maturity Stages (Mandatory) + +모든 티켓은 아래 4단계를 순서대로 통과해야 한다. + +1. `Implemented`: 코드/문서 변경 완료 +2. `Integrated`: 호출 경로/파이프라인 연결 완료 +3. `Observed`: 런타임/실행 증적 확보 완료 +4. `Accepted`: 정적 Verifier + Runtime Verifier 승인 완료 + +강제 규칙: +- 단계 점프 금지 (예: Implemented -> Accepted 금지) +- `Observed` 전에는 완료 선언 금지 +- `Accepted` 전에는 머지 금지 + ## Code Review Checklist **CRITICAL: Every PR review MUST verify plan-implementation consistency.** @@ -204,3 +226,6 @@ Before approving any PR, the reviewer (human or agent) must check ALL of the fol - [ ] `gh` 명령을 사용하지 않고 `tea`(또는 허용된 Gitea API fallback)만 사용했다 - [ ] Main -> Verifier 지시가 Directive Contract 6개 항목을 모두 포함한다 - [ ] Verifier 결과에 `Coverage Matrix`(PASS/FAIL/NOT_OBSERVED)가 있고, `NOT_OBSERVED=0`이다 +- [ ] Process-change-first 대상이면 해당 process PR이 먼저 머지되었다 +- [ ] 티켓 단계가 `Implemented -> Integrated -> Observed -> Accepted` 순서로 기록되었다 +- [ ] 정적 Verifier와 Runtime Verifier 승인 코멘트가 모두 존재한다 From b1610f14c594fcf3c0ddfb4efb73494134d0a2b7 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 23:08:29 +0900 Subject: [PATCH 025/109] process: enforce session handover gate across sessions (#308) --- .gitea/PULL_REQUEST_TEMPLATE.md | 6 + .gitea/workflows/ci.yml | 38 +++++ .github/workflows/ci.yml | 3 + docs/agent-constraints.md | 6 + docs/commands.md | 13 ++ .../60_repo_enforcement_checklist.md | 3 +- docs/workflow.md | 13 ++ scripts/session_handover_check.py | 138 ++++++++++++++++++ scripts/validate_governance_assets.py | 36 ++++- workflow/session-handover.md | 26 ++++ 10 files changed, 280 insertions(+), 2 deletions(-) create mode 100644 .gitea/workflows/ci.yml create mode 100755 scripts/session_handover_check.py create mode 100644 workflow/session-handover.md diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 4bda55f..90edcf4 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -35,6 +35,12 @@ - [ ] `docs/commands.md`와 `docs/workflow.md` 트러블슈팅 선확인 - [ ] `tea` 사용 (`gh` 미사용) +## Session Handover Gate + +- [ ] `python3 scripts/session_handover_check.py --strict` 통과 +- [ ] `workflow/session-handover.md` 최신 엔트리가 현재 브랜치/당일(UTC) 기준으로 갱신됨 +- 최신 handover 엔트리 heading: + ## Runtime Evidence - 시스템 실제 구동 커맨드: diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml new file mode 100644 index 0000000..f73be37 --- /dev/null +++ b/.gitea/workflows/ci.yml @@ -0,0 +1,38 @@ +name: Gitea CI + +on: + pull_request: + push: + branches: + - main + - feature/** + +jobs: + test: + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: pip install ".[dev]" + + - name: Session handover gate + run: python3 scripts/session_handover_check.py --strict + + - name: Validate governance assets + run: python3 scripts/validate_governance_assets.py + + - name: Validate Ouroboros docs + run: python3 scripts/validate_ouroboros_docs.py + + - name: Lint + run: ruff check src/ tests/ + + - name: Run tests with coverage + run: pytest -v --cov=src --cov-report=term-missing --cov-fail-under=80 diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 756de37..67cf621 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,6 +21,9 @@ jobs: - name: Install dependencies run: pip install ".[dev]" + - name: Session handover gate + run: python3 scripts/session_handover_check.py --strict + - name: Validate governance assets run: python3 scripts/validate_governance_assets.py diff --git a/docs/agent-constraints.md b/docs/agent-constraints.md index 0e955fb..1e6754b 100644 --- a/docs/agent-constraints.md +++ b/docs/agent-constraints.md @@ -32,6 +32,11 @@ It is distinct from `docs/requirements-log.md`, which records **project/product (or in a dedicated policy doc) and reference it when working. - Keep entries short and concrete, with dates. +5. **Session start handover gate** + - Before implementation/verification work, run `python3 scripts/session_handover_check.py --strict`. + - Keep `workflow/session-handover.md` updated with a same-day entry for the active branch. + - If the check fails, stop and fix handover artifacts first. + ## Change Control - Changes to this file follow the same workflow as code changes. @@ -50,3 +55,4 @@ It is distinct from `docs/requirements-log.md`, which records **project/product - All agents must pre-read `docs/commands.md` and `docs/workflow.md` troubleshooting before running Gitea issue/PR/comment commands. - `gh` CLI is prohibited for repository ticket/PR operations; use `tea` (or documented Gitea API fallback only). +- Session start must pass `python3 scripts/session_handover_check.py --strict`, with branch-matched entry in `workflow/session-handover.md`. diff --git a/docs/commands.md b/docs/commands.md index 666c0b3..aeb0210 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -11,6 +11,16 @@ - 기본 도구는 `tea`이며, `tea` 미지원 케이스만 Gitea API를 fallback으로 사용한다. - 실행 전 `docs/workflow.md`의 `Gitea CLI Formatting Troubleshooting`을 반드시 확인한다. +## Session Handover Preflight (Mandatory) + +- 세션 시작 직후(코드 변경 전) 아래 명령을 먼저 실행한다. + +```bash +python3 scripts/session_handover_check.py --strict +``` + +- 실패 시 `workflow/session-handover.md` 최신 엔트리를 보강한 뒤 재실행한다. + ### tea CLI (Gitea Command Line Tool) #### ❌ TTY Error - Interactive Confirmation Fails @@ -150,6 +160,9 @@ python -m src.main --mode=paper --dashboard # Runtime verification monitor (NOT_OBSERVED detection) bash scripts/runtime_verify_monitor.sh +# Session handover gate (must pass before implementation) +python3 scripts/session_handover_check.py --strict + # Follow runtime verification log tail -f data/overnight/runtime_verify_*.log diff --git a/docs/ouroboros/60_repo_enforcement_checklist.md b/docs/ouroboros/60_repo_enforcement_checklist.md index 0b5f809..989248c 100644 --- a/docs/ouroboros/60_repo_enforcement_checklist.md +++ b/docs/ouroboros/60_repo_enforcement_checklist.md @@ -3,7 +3,7 @@ Doc-ID: DOC-OPS-002 Version: 1.0.0 Status: active Owner: tpm -Updated: 2026-02-26 +Updated: 2026-02-27 --> # 저장소 강제 설정 체크리스트 @@ -58,6 +58,7 @@ Updated: 2026-02-26 자동 점검: - 문서 검증 스크립트 통과 - 테스트 통과 +- `python3 scripts/session_handover_check.py --strict` 통과 - 개발 완료 시 시스템 구동/모니터링 증적 코멘트 존재 - 이슈/PR 조작 전에 `docs/commands.md` 및 `docs/workflow.md` 트러블슈팅 확인 코멘트 존재 - `gh` CLI 미사용, `tea` 사용 증적 존재 diff --git a/docs/workflow.md b/docs/workflow.md index 7e39d72..d8ba052 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -30,6 +30,19 @@ Gitea 이슈/PR/코멘트 작업 전에 모든 에이전트는 아래를 먼저 - `tea` 실패 시 동일 명령 재시도 전에 원인/수정사항을 PR 코멘트에 남긴다. - 필요한 경우에만 Gitea API(`localhost:3000`)를 fallback으로 사용한다. +## Session Handover Gate (Mandatory) + +새 세션에서 구현/검증을 시작하기 전에 아래를 선행해야 한다. + +1. `docs/workflow.md`, `docs/commands.md`, `docs/agent-constraints.md` 재확인 +2. `workflow/session-handover.md`에 최신 세션 엔트리 추가 +3. `python3 scripts/session_handover_check.py --strict` 통과 확인 + +강제 규칙: +- handover check 실패 상태에서 코드 수정/이슈 상태 전이/PR 생성 금지 +- 최신 handover 엔트리는 현재 작업 브랜치를 명시해야 한다 +- 최신 handover 엔트리는 당일(UTC) 날짜를 포함해야 한다 + ## Branch Strategy (Mandatory) - Team operation default branch is the **program feature branch**, not `main`. diff --git a/scripts/session_handover_check.py b/scripts/session_handover_check.py new file mode 100755 index 0000000..229301e --- /dev/null +++ b/scripts/session_handover_check.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +"""Session handover preflight gate. + +This script enforces a minimal handover record per working branch so that +new sessions cannot start implementation without reading the required docs +and recording current intent. +""" + +from __future__ import annotations + +import argparse +import subprocess +import sys +from datetime import UTC, datetime +from pathlib import Path + +REQUIRED_DOCS = ( + Path("docs/workflow.md"), + Path("docs/commands.md"), + Path("docs/agent-constraints.md"), +) +HANDOVER_LOG = Path("workflow/session-handover.md") + + +def _run_git(*args: str) -> str: + try: + return ( + subprocess.check_output(["git", *args], stderr=subprocess.DEVNULL) + .decode("utf-8") + .strip() + ) + except Exception: + return "" + + +def _current_branch() -> str: + branch = _run_git("branch", "--show-current") + if branch: + return branch + return _run_git("rev-parse", "--abbrev-ref", "HEAD") + + +def _latest_entry(text: str) -> str: + chunks = text.split("\n### ") + if not chunks: + return "" + if chunks[0].startswith("### "): + chunks[0] = chunks[0][4:] + latest = chunks[-1].strip() + if not latest: + return "" + if not latest.startswith("### "): + latest = f"### {latest}" + return latest + + +def _check_required_files(errors: list[str]) -> None: + for path in REQUIRED_DOCS: + if not path.exists(): + errors.append(f"missing required document: {path}") + if not HANDOVER_LOG.exists(): + errors.append(f"missing handover log: {HANDOVER_LOG}") + + +def _check_handover_entry( + *, + branch: str, + strict: bool, + errors: list[str], +) -> None: + if not HANDOVER_LOG.exists(): + return + text = HANDOVER_LOG.read_text(encoding="utf-8") + latest = _latest_entry(text) + if not latest: + errors.append("handover log has no session entry") + return + + required_tokens = ( + "- branch:", + "- docs_checked:", + "- open_issues_reviewed:", + "- next_ticket:", + ) + for token in required_tokens: + if token not in latest: + errors.append(f"latest handover entry missing token: {token}") + + if strict: + today_utc = datetime.now(UTC).date().isoformat() + if today_utc not in latest: + errors.append( + f"latest handover entry must contain today's UTC date ({today_utc})" + ) + branch_token = f"- branch: {branch}" + if branch_token not in latest: + errors.append( + "latest handover entry must target current branch " + f"({branch_token})" + ) + + +def main() -> int: + parser = argparse.ArgumentParser( + description="Validate session handover gate requirements." + ) + parser.add_argument( + "--strict", + action="store_true", + help="Enforce today-date and current-branch match on latest handover entry.", + ) + args = parser.parse_args() + + errors: list[str] = [] + _check_required_files(errors) + + branch = _current_branch() + if not branch: + errors.append("cannot resolve current git branch") + elif branch in {"main", "master"}: + errors.append(f"working branch must not be {branch}") + + _check_handover_entry(branch=branch, strict=args.strict, errors=errors) + + if errors: + print("[FAIL] session handover check failed") + for err in errors: + print(f"- {err}") + return 1 + + print("[OK] session handover check passed") + print(f"[OK] branch={branch}") + print(f"[OK] handover_log={HANDOVER_LOG}") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/scripts/validate_governance_assets.py b/scripts/validate_governance_assets.py index e7058a2..80680d7 100644 --- a/scripts/validate_governance_assets.py +++ b/scripts/validate_governance_assets.py @@ -22,6 +22,10 @@ def main() -> int: pr_template = Path(".gitea/PULL_REQUEST_TEMPLATE.md") issue_template = Path(".gitea/ISSUE_TEMPLATE/runtime_verification.md") + workflow_doc = Path("docs/workflow.md") + commands_doc = Path("docs/commands.md") + handover_script = Path("scripts/session_handover_check.py") + handover_log = Path("workflow/session-handover.md") must_contain( pr_template, @@ -32,6 +36,8 @@ def main() -> int: "NOT_OBSERVED", "tea", "gh", + "Session Handover Gate", + "session_handover_check.py --strict", ], errors, ) @@ -45,6 +51,35 @@ def main() -> int: ], errors, ) + must_contain( + workflow_doc, + [ + "Session Handover Gate (Mandatory)", + "session_handover_check.py --strict", + ], + errors, + ) + must_contain( + commands_doc, + [ + "Session Handover Preflight (Mandatory)", + "session_handover_check.py --strict", + ], + errors, + ) + must_contain( + handover_log, + [ + "Session Handover Log", + "- branch:", + "- docs_checked:", + "- open_issues_reviewed:", + "- next_ticket:", + ], + errors, + ) + if not handover_script.exists(): + errors.append(f"missing file: {handover_script}") if errors: print("[FAIL] governance asset validation failed") @@ -58,4 +93,3 @@ def main() -> int: if __name__ == "__main__": sys.exit(main()) - diff --git a/workflow/session-handover.md b/workflow/session-handover.md new file mode 100644 index 0000000..4233291 --- /dev/null +++ b/workflow/session-handover.md @@ -0,0 +1,26 @@ +# Session Handover Log + +목적: 세션 시작 시 인수인계 확인을 기록하고, 구현/검증 작업 시작 전에 공통 컨텍스트를 강제한다. + +작성 규칙: +- 세션 시작마다 최신 엔트리를 맨 아래에 추가한다. +- `docs/workflow.md`, `docs/commands.md`, `docs/agent-constraints.md`를 먼저 확인한 뒤 기록한다. +- 각 엔트리는 현재 작업 브랜치 기준으로 작성한다. + +템플릿: + +```md +### YYYY-MM-DD | session= +- branch: +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #... +- next_ticket: #... +- risks_or_notes: ... +``` + +### 2026-02-27 | session=handover-gate-bootstrap +- branch: feature/v3-session-policy-stream +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #304, #305, #306 +- next_ticket: #304 +- risks_or_notes: 세션 시작 게이트를 문서/스크립트/CI로 강제 적용 From 85a59542f81d26bd36ca9ed90a1bd90f2a922886 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 23:21:54 +0900 Subject: [PATCH 026/109] process: harden implementation-start gate before coding --- docs/agent-constraints.md | 11 +++++++++++ docs/workflow.md | 13 +++++++++++++ scripts/session_handover_check.py | 8 ++++++++ workflow/session-handover.md | 17 +++++++++++++++++ 4 files changed, 49 insertions(+) diff --git a/docs/agent-constraints.md b/docs/agent-constraints.md index 1e6754b..7a9f9c5 100644 --- a/docs/agent-constraints.md +++ b/docs/agent-constraints.md @@ -37,6 +37,11 @@ It is distinct from `docs/requirements-log.md`, which records **project/product - Keep `workflow/session-handover.md` updated with a same-day entry for the active branch. - If the check fails, stop and fix handover artifacts first. +6. **Process-change-first execution gate** + - If process/governance change is required, merge the process ticket to the feature branch first. + - Do not start code/test edits for implementation tickets until process merge evidence is confirmed. + - Subagents must be constrained to read-only exploration until the process gate is satisfied. + ## Change Control - Changes to this file follow the same workflow as code changes. @@ -56,3 +61,9 @@ It is distinct from `docs/requirements-log.md`, which records **project/product - All agents must pre-read `docs/commands.md` and `docs/workflow.md` troubleshooting before running Gitea issue/PR/comment commands. - `gh` CLI is prohibited for repository ticket/PR operations; use `tea` (or documented Gitea API fallback only). - Session start must pass `python3 scripts/session_handover_check.py --strict`, with branch-matched entry in `workflow/session-handover.md`. + +### 2026-02-27 + +- Apply process-change-first as an execution gate: process ticket must be merged before implementation ticket coding. +- Handover entry must record concrete `next_ticket` and `process_gate_checked`; placeholders are not allowed in strict gate. +- Before process merge confirmation, all subagent tasks must remain read-only (analysis only). diff --git a/docs/workflow.md b/docs/workflow.md index d8ba052..1b07639 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -188,6 +188,19 @@ Use `run_in_background=True` for independent tasks that don't block subsequent w - process ticket 미반영 상태에서 기능 티켓 코딩/머지 금지 - 세션 전환 시에도 동일 규칙 유지 +### Implementation Start Gate (Mandatory) + +구현 티켓을 시작하기 전에 아래 3개를 모두 만족해야 한다. + +1. `process ticket merge` 증적 확인 (feature branch 반영 커밋/PR) +2. `workflow/session-handover.md` 최신 엔트리에 `next_ticket`과 `process_gate_checked` 기록 +3. `python3 scripts/session_handover_check.py --strict` 통과 + +강제 규칙: +- 위 3개 중 하나라도 불충족이면 코드/테스트 수정 금지 +- 서브에이전트 지시도 동일하게 제한한다 (`process merged 확인 전 read-only 탐색만 허용`) +- 성급 착수 발견 시 구현 작업을 즉시 중단하고 handover/proces gate부터 복구한다 + ### Ticket Maturity Stages (Mandatory) 모든 티켓은 아래 4단계를 순서대로 통과해야 한다. diff --git a/scripts/session_handover_check.py b/scripts/session_handover_check.py index 229301e..b2ded16 100755 --- a/scripts/session_handover_check.py +++ b/scripts/session_handover_check.py @@ -81,6 +81,7 @@ def _check_handover_entry( "- docs_checked:", "- open_issues_reviewed:", "- next_ticket:", + "- process_gate_checked:", ) for token in required_tokens: if token not in latest: @@ -98,6 +99,13 @@ def _check_handover_entry( "latest handover entry must target current branch " f"({branch_token})" ) + if "- next_ticket: #TBD" in latest: + errors.append("latest handover entry must not use placeholder next_ticket (#TBD)") + if "merged_to_feature_branch=no" in latest: + errors.append( + "process gate indicates not merged; implementation must stay blocked " + "(merged_to_feature_branch=no)" + ) def main() -> int: diff --git a/workflow/session-handover.md b/workflow/session-handover.md index 4233291..68085f6 100644 --- a/workflow/session-handover.md +++ b/workflow/session-handover.md @@ -15,6 +15,7 @@ - docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md - open_issues_reviewed: #... - next_ticket: #... +- process_gate_checked: process_ticket=#..., merged_to_feature_branch=yes|no|n/a - risks_or_notes: ... ``` @@ -24,3 +25,19 @@ - open_issues_reviewed: #304, #305, #306 - next_ticket: #304 - risks_or_notes: 세션 시작 게이트를 문서/스크립트/CI로 강제 적용 + +### 2026-02-27 | session=codex-handover-start +- branch: feature/v3-session-policy-stream +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #306, #308, #309 +- next_ticket: #304 +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: 미추적 로컬 파일 존재(문서/DB/lock)로 커밋 범위 분리 필요 + +### 2026-02-27 | session=codex-process-gate-hardening +- branch: feature/issue-304-runtime-staged-exit-semantics +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #304, #305 +- next_ticket: #304 +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: process-change-first 실행 게이트를 문서+스크립트로 강화 From 98dab2e06eb44b67629c54a87af0c252bab9839a Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 23:48:52 +0900 Subject: [PATCH 027/109] fix: apply staged exit semantics in runtime paths (#304) --- src/main.py | 245 +++++++++++++++++-------- tests/test_main.py | 343 +++++++++++++++++++++++++++++++++++ workflow/session-handover.md | 8 + 3 files changed, 521 insertions(+), 75 deletions(-) diff --git a/src/main.py b/src/main.py index cf168e7..cc158a2 100644 --- a/src/main.py +++ b/src/main.py @@ -68,6 +68,8 @@ BLACKOUT_ORDER_MANAGER = BlackoutOrderManager( max_queue_size=500, ) _SESSION_CLOSE_WINDOWS = {"NXT_AFTER", "US_AFTER"} +_RUNTIME_EXIT_STATES: dict[str, PositionState] = {} +_RUNTIME_EXIT_PEAKS: dict[str, float] = {} def safe_float(value: str | float | None, default: float = 0.0) -> float: @@ -469,6 +471,118 @@ def _should_force_exit_for_overnight( return not settings.OVERNIGHT_EXCEPTION_ENABLED +def _build_runtime_position_key( + *, + market_code: str, + stock_code: str, + open_position: dict[str, Any], +) -> str: + decision_id = str(open_position.get("decision_id") or "") + timestamp = str(open_position.get("timestamp") or "") + return f"{market_code}:{stock_code}:{decision_id}:{timestamp}" + + +def _clear_runtime_exit_cache_for_symbol(*, market_code: str, stock_code: str) -> None: + prefix = f"{market_code}:{stock_code}:" + stale_keys = [key for key in _RUNTIME_EXIT_STATES if key.startswith(prefix)] + for key in stale_keys: + _RUNTIME_EXIT_STATES.pop(key, None) + _RUNTIME_EXIT_PEAKS.pop(key, None) + + +def _apply_staged_exit_override_for_hold( + *, + decision: TradeDecision, + market: MarketInfo, + stock_code: str, + open_position: dict[str, Any] | None, + market_data: dict[str, Any], + stock_playbook: Any | None, +) -> TradeDecision: + """Apply v2 staged exit semantics for HOLD positions using runtime state.""" + if decision.action != "HOLD" or not open_position: + return decision + + entry_price = safe_float(open_position.get("price"), 0.0) + current_price = safe_float(market_data.get("current_price"), 0.0) + if entry_price <= 0 or current_price <= 0: + return decision + + stop_loss_threshold = -2.0 + take_profit_threshold = 3.0 + if stock_playbook and stock_playbook.scenarios: + stop_loss_threshold = stock_playbook.scenarios[0].stop_loss_pct + take_profit_threshold = stock_playbook.scenarios[0].take_profit_pct + + runtime_key = _build_runtime_position_key( + market_code=market.code, + stock_code=stock_code, + open_position=open_position, + ) + current_state = _RUNTIME_EXIT_STATES.get(runtime_key, PositionState.HOLDING) + prev_peak = _RUNTIME_EXIT_PEAKS.get(runtime_key, 0.0) + peak_hint = max( + safe_float(market_data.get("peak_price"), 0.0), + safe_float(market_data.get("session_high_price"), 0.0), + ) + peak_price = max(entry_price, current_price, prev_peak, peak_hint) + + exit_eval = evaluate_exit( + current_state=current_state, + config=ExitRuleConfig( + hard_stop_pct=stop_loss_threshold, + be_arm_pct=max(0.5, take_profit_threshold * 0.4), + arm_pct=take_profit_threshold, + ), + inp=ExitRuleInput( + current_price=current_price, + entry_price=entry_price, + peak_price=peak_price, + atr_value=safe_float(market_data.get("atr_value"), 0.0), + pred_down_prob=safe_float(market_data.get("pred_down_prob"), 0.0), + liquidity_weak=safe_float(market_data.get("volume_ratio"), 1.0) < 1.0, + ), + ) + _RUNTIME_EXIT_STATES[runtime_key] = exit_eval.state + _RUNTIME_EXIT_PEAKS[runtime_key] = peak_price + + if not exit_eval.should_exit: + return decision + + pnl_pct = (current_price - entry_price) / entry_price * 100.0 + if exit_eval.reason == "hard_stop": + rationale = ( + f"Stop-loss triggered ({pnl_pct:.2f}% <= " + f"{stop_loss_threshold:.2f}%)" + ) + elif exit_eval.reason == "arm_take_profit": + rationale = ( + f"Take-profit triggered ({pnl_pct:.2f}% >= " + f"{take_profit_threshold:.2f}%)" + ) + elif exit_eval.reason == "atr_trailing_stop": + rationale = "ATR trailing-stop triggered" + elif exit_eval.reason == "be_lock_threat": + rationale = "Break-even lock threat detected" + elif exit_eval.reason == "model_liquidity_exit": + rationale = "Model/liquidity exit triggered" + else: + rationale = f"Exit rule triggered ({exit_eval.reason})" + + logger.info( + "Staged exit override for %s (%s): HOLD -> SELL (reason=%s, state=%s)", + stock_code, + market.name, + exit_eval.reason, + exit_eval.state.value, + ) + return TradeDecision( + action="SELL", + confidence=max(decision.confidence, 90), + rationale=rationale, + ) + + async def build_overseas_symbol_universe( db_conn: Any, overseas_broker: OverseasBroker, @@ -977,6 +1091,11 @@ async def trading_cycle( "foreigner_net": foreigner_net, "price_change_pct": price_change_pct, } + session_high_price = safe_float( + price_output.get("high") or price_output.get("ovrs_hgpr") or price_output.get("stck_hgpr") + ) + if session_high_price > 0: + market_data["session_high_price"] = session_high_price # Enrich market_data with scanner metrics for scenario engine market_candidates = scan_candidates.get(market.code, {}) @@ -1175,82 +1294,36 @@ async def trading_cycle( if decision.action == "HOLD": open_position = get_open_position(db_conn, stock_code, market.code) - if open_position: - entry_price = safe_float(open_position.get("price"), 0.0) - if entry_price > 0 and current_price > 0: - loss_pct = (current_price - entry_price) / entry_price * 100 - stop_loss_threshold = -2.0 - take_profit_threshold = 3.0 - if stock_playbook and stock_playbook.scenarios: - stop_loss_threshold = stock_playbook.scenarios[0].stop_loss_pct - take_profit_threshold = stock_playbook.scenarios[0].take_profit_pct - - exit_eval = evaluate_exit( - current_state=PositionState.HOLDING, - config=ExitRuleConfig( - hard_stop_pct=stop_loss_threshold, - be_arm_pct=max(0.5, take_profit_threshold * 0.4), - arm_pct=take_profit_threshold, - ), - inp=ExitRuleInput( - current_price=current_price, - entry_price=entry_price, - peak_price=max(entry_price, current_price), - atr_value=0.0, - pred_down_prob=0.0, - liquidity_weak=market_data.get("volume_ratio", 1.0) < 1.0, - ), - ) - - if exit_eval.reason == "hard_stop": - decision = TradeDecision( - action="SELL", - confidence=95, - rationale=( - f"Stop-loss triggered ({loss_pct:.2f}% <= " - f"{stop_loss_threshold:.2f}%)" - ), - ) - logger.info( - "Stop-loss override for %s (%s): %.2f%% <= %.2f%%", - stock_code, - market.name, - loss_pct, - stop_loss_threshold, - ) - elif exit_eval.reason == "arm_take_profit": - decision = TradeDecision( - action="SELL", - confidence=90, - rationale=( - f"Take-profit triggered ({loss_pct:.2f}% >= " - f"{take_profit_threshold:.2f}%)" - ), - ) - logger.info( - "Take-profit override for %s (%s): %.2f%% >= %.2f%%", - stock_code, - market.name, - loss_pct, - take_profit_threshold, - ) - if decision.action == "HOLD" and _should_force_exit_for_overnight( + if not open_position: + _clear_runtime_exit_cache_for_symbol( + market_code=market.code, + stock_code=stock_code, + ) + decision = _apply_staged_exit_override_for_hold( + decision=decision, + market=market, + stock_code=stock_code, + open_position=open_position, + market_data=market_data, + stock_playbook=stock_playbook, + ) + if open_position and decision.action == "HOLD" and _should_force_exit_for_overnight( market=market, settings=settings, - ): - decision = TradeDecision( - action="SELL", - confidence=max(decision.confidence, 85), - rationale=( - "Forced exit by overnight policy" - " (session close window / kill switch priority)" - ), - ) - logger.info( - "Overnight policy override for %s (%s): HOLD -> SELL", - stock_code, - market.name, - ) + ): + decision = TradeDecision( + action="SELL", + confidence=max(decision.confidence, 85), + rationale=( + "Forced exit by overnight policy" + " (session close window / kill switch priority)" + ), + ) + logger.info( + "Overnight policy override for %s (%s): HOLD -> SELL", + stock_code, + market.name, + ) logger.info( "Decision for %s (%s): %s (confidence=%d)", stock_code, @@ -2190,6 +2263,14 @@ async def run_daily_session( "foreigner_net": foreigner_net, "price_change_pct": price_change_pct, } + if not market.is_domestic: + session_high_price = safe_float( + price_data.get("output", {}).get("high") + or price_data.get("output", {}).get("ovrs_hgpr") + or price_data.get("output", {}).get("stck_hgpr") + ) + if session_high_price > 0: + stock_data["session_high_price"] = session_high_price # Enrich with scanner metrics cand = candidate_map.get(stock_code) if cand: @@ -2317,6 +2398,7 @@ async def run_daily_session( ) for stock_data in stocks_data: stock_code = stock_data["stock_code"] + stock_playbook = playbook.get_stock_playbook(stock_code) match = scenario_engine.evaluate( playbook, stock_code, stock_data, portfolio_data, ) @@ -2362,7 +2444,20 @@ async def run_daily_session( ) if decision.action == "HOLD": daily_open = get_open_position(db_conn, stock_code, market.code) - if daily_open and _should_force_exit_for_overnight( + if not daily_open: + _clear_runtime_exit_cache_for_symbol( + market_code=market.code, + stock_code=stock_code, + ) + decision = _apply_staged_exit_override_for_hold( + decision=decision, + market=market, + stock_code=stock_code, + open_position=daily_open, + market_data=stock_data, + stock_playbook=stock_playbook, + ) + if daily_open and decision.action == "HOLD" and _should_force_exit_for_overnight( market=market, settings=settings, ): diff --git a/tests/test_main.py b/tests/test_main.py index f7a7213..63ee0da 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -15,6 +15,8 @@ from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger from src.main import ( KILL_SWITCH, + _RUNTIME_EXIT_PEAKS, + _RUNTIME_EXIT_STATES, _should_force_exit_for_overnight, _should_block_overseas_buy_for_fx_buffer, _trigger_emergency_kill_switch, @@ -42,6 +44,7 @@ from src.strategy.models import ( StockCondition, StockScenario, ) +from src.strategy.position_state_machine import PositionState from src.strategy.scenario_engine import ScenarioEngine, ScenarioMatch @@ -87,8 +90,12 @@ def _make_sell_match(stock_code: str = "005930") -> ScenarioMatch: def _reset_kill_switch_state() -> None: """Prevent cross-test leakage from global kill-switch state.""" KILL_SWITCH.clear_block() + _RUNTIME_EXIT_STATES.clear() + _RUNTIME_EXIT_PEAKS.clear() yield KILL_SWITCH.clear_block() + _RUNTIME_EXIT_STATES.clear() + _RUNTIME_EXIT_PEAKS.clear() class TestExtractAvgPriceFromBalance: @@ -2337,6 +2344,218 @@ async def test_hold_not_overridden_when_between_stop_loss_and_take_profit() -> N broker.send_order.assert_not_called() +@pytest.mark.asyncio +async def test_hold_overridden_to_sell_on_be_lock_threat_after_state_arms() -> None: + """Staged exit must use runtime state (BE_LOCK -> be_lock_threat -> SELL).""" + db_conn = init_db(":memory:") + decision_logger = DecisionLogger(db_conn) + + buy_decision_id = decision_logger.log_decision( + stock_code="005930", + market="KR", + exchange_code="KRX", + action="BUY", + confidence=90, + rationale="entry", + context_snapshot={}, + input_data={}, + ) + log_trade( + conn=db_conn, + stock_code="005930", + action="BUY", + confidence=90, + rationale="entry", + quantity=1, + price=100.0, + market="KR", + exchange_code="KRX", + decision_id=buy_decision_id, + ) + + broker = MagicMock() + broker.get_current_price = AsyncMock(side_effect=[(102.0, 2.0, 0.0), (99.0, -1.0, 0.0)]) + broker.get_balance = AsyncMock( + return_value={ + "output1": [{"pdno": "005930", "ord_psbl_qty": "1"}], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "10000", + "pchs_amt_smtl_amt": "90000", + } + ], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + scenario = StockScenario( + condition=StockCondition(rsi_below=30), + action=ScenarioAction.BUY, + confidence=88, + stop_loss_pct=-5.0, + take_profit_pct=3.0, + rationale="staged exit policy", + ) + playbook = DayPlaybook( + date=date(2026, 2, 8), + market="KR", + stock_playbooks=[ + {"stock_code": "005930", "stock_name": "Samsung", "scenarios": [scenario]} + ], + ) + engine = MagicMock(spec=ScenarioEngine) + engine.evaluate = MagicMock(return_value=_make_hold_match()) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + for _ in range(2): + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=engine, + playbook=playbook, + risk=MagicMock(), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="005930", + scan_candidates={}, + ) + + broker.send_order.assert_called_once() + assert broker.send_order.call_args.kwargs["order_type"] == "SELL" + + +@pytest.mark.asyncio +async def test_runtime_exit_cache_cleared_when_position_closed() -> None: + """Runtime staged-exit cache must be cleared when no open position exists.""" + db_conn = init_db(":memory:") + decision_logger = DecisionLogger(db_conn) + + buy_decision_id = decision_logger.log_decision( + stock_code="005930", + market="KR", + exchange_code="KRX", + action="BUY", + confidence=90, + rationale="entry", + context_snapshot={}, + input_data={}, + ) + log_trade( + conn=db_conn, + stock_code="005930", + action="BUY", + confidence=90, + rationale="entry", + quantity=1, + price=100.0, + market="KR", + exchange_code="KRX", + decision_id=buy_decision_id, + ) + + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.0, 0.0)) + broker.get_balance = AsyncMock( + return_value={ + "output1": [{"pdno": "005930", "ord_psbl_qty": "1"}], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "10000", + "pchs_amt_smtl_amt": "90000", + } + ], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + _RUNTIME_EXIT_STATES[f"{market.code}:005930:{buy_decision_id}:dummy-ts"] = PositionState.BE_LOCK + _RUNTIME_EXIT_PEAKS[f"{market.code}:005930:{buy_decision_id}:dummy-ts"] = 120.0 + + # Close position first so trading_cycle observes no open position. + sell_decision_id = decision_logger.log_decision( + stock_code="005930", + market="KR", + exchange_code="KRX", + action="SELL", + confidence=90, + rationale="manual close", + context_snapshot={}, + input_data={}, + ) + log_trade( + conn=db_conn, + stock_code="005930", + action="SELL", + confidence=90, + rationale="manual close", + quantity=1, + price=100.0, + market="KR", + exchange_code="KRX", + decision_id=sell_decision_id, + ) + + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_hold_match())), + playbook=_make_playbook(), + risk=MagicMock(), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="005930", + scan_candidates={}, + ) + + assert not [k for k in _RUNTIME_EXIT_STATES if k.startswith("KR:005930:")] + assert not [k for k in _RUNTIME_EXIT_PEAKS if k.startswith("KR:005930:")] + + @pytest.mark.asyncio async def test_stop_loss_not_triggered_when_current_price_is_zero() -> None: """HOLD must stay HOLD when current_price=0 even if entry_price is set (issue #251). @@ -4135,6 +4354,130 @@ class TestDailyCBBaseline: assert result == 55000.0 +@pytest.mark.asyncio +async def test_run_daily_session_applies_staged_exit_override_on_hold() -> None: + """run_daily_session must apply HOLD staged exit semantics (issue #304).""" + from src.analysis.smart_scanner import ScanCandidate + + db_conn = init_db(":memory:") + log_trade( + conn=db_conn, + stock_code="005930", + action="BUY", + confidence=90, + rationale="entry", + quantity=1, + price=100.0, + market="KR", + exchange_code="KRX", + decision_id="buy-d1", + ) + + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + MODE="paper", + ) + + broker = MagicMock() + broker.get_balance = AsyncMock( + return_value={ + "output1": [{"pdno": "005930", "ord_psbl_qty": "1"}], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "10000", + "pchs_amt_smtl_amt": "90000", + } + ], + } + ) + broker.get_current_price = AsyncMock(return_value=(95.0, -5.0, 0.0)) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + market.timezone = __import__("zoneinfo").ZoneInfo("Asia/Seoul") + + scenario = StockScenario( + condition=StockCondition(rsi_below=30), + action=ScenarioAction.BUY, + confidence=88, + stop_loss_pct=-2.0, + take_profit_pct=3.0, + rationale="stop loss policy", + ) + playbook = DayPlaybook( + date=date(2026, 2, 8), + market="KR", + stock_playbooks=[ + {"stock_code": "005930", "stock_name": "Samsung", "scenarios": [scenario]} + ], + ) + playbook_store = MagicMock() + playbook_store.load = MagicMock(return_value=playbook) + + smart_scanner = MagicMock() + smart_scanner.scan = AsyncMock( + return_value=[ + ScanCandidate( + stock_code="005930", + name="Samsung", + price=95.0, + volume=1_000_000.0, + volume_ratio=2.0, + rsi=42.0, + signal="momentum", + score=80.0, + ) + ] + ) + + scenario_engine = MagicMock(spec=ScenarioEngine) + scenario_engine.evaluate = MagicMock(return_value=_make_hold_match("005930")) + + risk = MagicMock() + risk.check_circuit_breaker = MagicMock() + risk.validate_order = MagicMock() + + decision_logger = MagicMock() + decision_logger.log_decision = MagicMock(return_value="d1") + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + async def _passthrough(fn, *a, label: str = "", **kw): # type: ignore[override] + return await fn(*a, **kw) + + with patch("src.main.get_open_markets", return_value=[market]), \ + patch("src.main._retry_connection", new=_passthrough): + await run_daily_session( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=scenario_engine, + playbook_store=playbook_store, + pre_market_planner=MagicMock(), + risk=risk, + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock(), + criticality_assessor=MagicMock(), + telegram=telegram, + settings=settings, + smart_scanner=smart_scanner, + daily_start_eval=0.0, + ) + + broker.send_order.assert_called_once() + assert broker.send_order.call_args.kwargs["order_type"] == "SELL" + + # --------------------------------------------------------------------------- # sync_positions_from_broker — startup DB sync tests (issue #206) # --------------------------------------------------------------------------- diff --git a/workflow/session-handover.md b/workflow/session-handover.md index 68085f6..ea9f9cd 100644 --- a/workflow/session-handover.md +++ b/workflow/session-handover.md @@ -41,3 +41,11 @@ - next_ticket: #304 - process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes - risks_or_notes: process-change-first 실행 게이트를 문서+스크립트로 강화 + +### 2026-02-27 | session=codex-handover-start-2 +- branch: feature/issue-304-runtime-staged-exit-semantics +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #304, #305 +- next_ticket: #304 +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: handover 재시작 요청으로 세션 엔트리 추가, 미추적 산출물(AMS/NAS/NYS, DB, lock, xlsx) 커밋 분리 필요 From c00525eb4daac2bed597e200d98b42c11033baf8 Mon Sep 17 00:00:00 2001 From: agentson Date: Fri, 27 Feb 2026 23:58:52 +0900 Subject: [PATCH 028/109] feat: integrate v2 backtest pipeline for triple barrier and walk-forward (#305) --- docs/requirements-log.md | 33 ++++ src/analysis/backtest_pipeline.py | 187 ++++++++++++++++++++ tests/test_backtest_pipeline_integration.py | 136 ++++++++++++++ workflow/session-handover.md | 16 ++ 4 files changed, 372 insertions(+) create mode 100644 src/analysis/backtest_pipeline.py create mode 100644 tests/test_backtest_pipeline_integration.py diff --git a/docs/requirements-log.md b/docs/requirements-log.md index cd629a9..bd18e04 100644 --- a/docs/requirements-log.md +++ b/docs/requirements-log.md @@ -355,3 +355,36 @@ Order result: 모의투자 매수주문이 완료 되었습니다. ✓ - `TestOverseasGhostPositionClose` 2개: ghost-close 로그 확인, 일반 오류 무시 **이슈/PR:** #235, PR #236 + +--- + +## 2026-02-27 + +### v2 백테스트 파이프라인 통합 (#305) + +**배경:** +- `TripleBarrier`, `WalkForward`, `BacktestCostGuard`는 개별 모듈로 존재했으나, + 하나의 실행 경로로 연결된 파이프라인이 없어 통합 검증이 불가능했다. + +**구현 내용:** + +1. `src/analysis/backtest_pipeline.py` + - `run_v2_backtest_pipeline()` 추가: + - `validate_backtest_cost_model()` 선검증(fail-fast) + - `label_with_triple_barrier()`로 entry 라벨 생성 + - `generate_walk_forward_splits()`로 fold 생성 + - fold별 baseline(`B0`, `B1`, `M1`) score 산출 + - 결과 아티팩트 계약 구조(`BacktestPipelineResult`) 정의 + - leakage 검사 유틸 `fold_has_leakage()` 제공 + +2. `tests/test_backtest_pipeline_integration.py` 신규 + - happy path 통합 검증 + - cost guard 실패 fail-fast 검증 + - purge/embargo 기반 누수 방지 검증 + - 동일 입력 재실행 결정성 검증 + +**검증:** +- `pytest -q tests/test_backtest_pipeline_integration.py tests/test_triple_barrier.py tests/test_walk_forward_split.py tests/test_backtest_cost_guard.py tests/test_backtest_execution_model.py` +- `ruff check src/analysis/backtest_pipeline.py tests/test_backtest_pipeline_integration.py` + +**이슈/PR:** #305 diff --git a/src/analysis/backtest_pipeline.py b/src/analysis/backtest_pipeline.py new file mode 100644 index 0000000..4b0701f --- /dev/null +++ b/src/analysis/backtest_pipeline.py @@ -0,0 +1,187 @@ +"""Integrated v2 backtest pipeline. + +Wires TripleBarrier labeling + WalkForward split + CostGuard validation +into a single deterministic orchestration path. +""" + +from __future__ import annotations + +from collections.abc import Sequence +from dataclasses import dataclass +from statistics import mean +from typing import Literal + +from src.analysis.backtest_cost_guard import BacktestCostModel, validate_backtest_cost_model +from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier +from src.analysis.walk_forward_split import WalkForwardFold, generate_walk_forward_splits + + +@dataclass(frozen=True) +class BacktestBar: + high: float + low: float + close: float + session_id: str + + +@dataclass(frozen=True) +class WalkForwardConfig: + train_size: int + test_size: int + step_size: int | None = None + purge_size: int = 0 + embargo_size: int = 0 + min_train_size: int = 1 + + +@dataclass(frozen=True) +class BaselineScore: + name: Literal["B0", "B1", "M1"] + accuracy: float + + +@dataclass(frozen=True) +class BacktestFoldResult: + fold_index: int + train_indices: list[int] + test_indices: list[int] + train_label_distribution: dict[int, int] + test_label_distribution: dict[int, int] + baseline_scores: list[BaselineScore] + + +@dataclass(frozen=True) +class BacktestPipelineResult: + run_id: str + n_bars: int + n_entries: int + required_sessions: list[str] + label_distribution: dict[int, int] + folds: list[BacktestFoldResult] + + +def run_v2_backtest_pipeline( + *, + bars: Sequence[BacktestBar], + entry_indices: Sequence[int], + side: int, + triple_barrier_spec: TripleBarrierSpec, + walk_forward: WalkForwardConfig, + cost_model: BacktestCostModel, + required_sessions: list[str] | None = None, +) -> BacktestPipelineResult: + """Run v2 integrated pipeline (cost guard -> labels -> walk-forward baselines).""" + if not bars: + raise ValueError("bars must not be empty") + if not entry_indices: + raise ValueError("entry_indices must not be empty") + + resolved_sessions = ( + sorted(set(required_sessions)) + if required_sessions is not None + else sorted({bar.session_id for bar in bars}) + ) + validate_backtest_cost_model(model=cost_model, required_sessions=resolved_sessions) + + highs = [float(bar.high) for bar in bars] + lows = [float(bar.low) for bar in bars] + closes = [float(bar.close) for bar in bars] + normalized_entries = sorted(set(int(i) for i in entry_indices)) + if normalized_entries[0] < 0 or normalized_entries[-1] >= len(bars): + raise IndexError("entry index out of range") + + labels_by_bar_index: dict[int, int] = {} + for idx in normalized_entries: + labels_by_bar_index[idx] = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=idx, + side=side, + spec=triple_barrier_spec, + ).label + + ordered_labels = [labels_by_bar_index[idx] for idx in normalized_entries] + folds = generate_walk_forward_splits( + n_samples=len(normalized_entries), + train_size=walk_forward.train_size, + test_size=walk_forward.test_size, + step_size=walk_forward.step_size, + purge_size=walk_forward.purge_size, + embargo_size=walk_forward.embargo_size, + min_train_size=walk_forward.min_train_size, + ) + + fold_results: list[BacktestFoldResult] = [] + for fold_idx, fold in enumerate(folds): + train_labels = [ordered_labels[i] for i in fold.train_indices] + test_labels = [ordered_labels[i] for i in fold.test_indices] + if not test_labels: + continue + fold_results.append( + BacktestFoldResult( + fold_index=fold_idx, + train_indices=fold.train_indices, + test_indices=fold.test_indices, + train_label_distribution=_label_dist(train_labels), + test_label_distribution=_label_dist(test_labels), + baseline_scores=[ + BaselineScore(name="B0", accuracy=_baseline_b0(train_labels, test_labels)), + BaselineScore(name="B1", accuracy=_score_constant(1, test_labels)), + BaselineScore( + name="M1", + accuracy=_score_constant(_m1_pred(train_labels), test_labels), + ), + ], + ) + ) + + return BacktestPipelineResult( + run_id=_build_run_id( + n_entries=len(normalized_entries), + n_folds=len(fold_results), + sessions=resolved_sessions, + ), + n_bars=len(bars), + n_entries=len(normalized_entries), + required_sessions=resolved_sessions, + label_distribution=_label_dist(ordered_labels), + folds=fold_results, + ) + + +def _label_dist(labels: Sequence[int]) -> dict[int, int]: + dist: dict[int, int] = {-1: 0, 0: 0, 1: 0} + for val in labels: + if val in dist: + dist[val] += 1 + return dist + + +def _score_constant(pred: int, actual: Sequence[int]) -> float: + return mean(1.0 if pred == label else 0.0 for label in actual) + + +def _baseline_b0(train_labels: Sequence[int], test_labels: Sequence[int]) -> float: + if not train_labels: + return _score_constant(0, test_labels) + # Majority-class baseline from training fold. + choices = (-1, 0, 1) + pred = max(choices, key=lambda c: train_labels.count(c)) + return _score_constant(pred, test_labels) + + +def _m1_pred(train_labels: Sequence[int]) -> int: + if not train_labels: + return 0 + return train_labels[-1] + + +def _build_run_id(*, n_entries: int, n_folds: int, sessions: Sequence[str]) -> str: + sess_key = "_".join(sessions) + return f"v2p-e{n_entries}-f{n_folds}-s{sess_key}" + + +def fold_has_leakage(fold: WalkForwardFold) -> bool: + """Utility for tests/verification: True when train/test overlap exists.""" + return bool(set(fold.train_indices).intersection(fold.test_indices)) diff --git a/tests/test_backtest_pipeline_integration.py b/tests/test_backtest_pipeline_integration.py new file mode 100644 index 0000000..60dca91 --- /dev/null +++ b/tests/test_backtest_pipeline_integration.py @@ -0,0 +1,136 @@ +from __future__ import annotations + +from src.analysis.backtest_cost_guard import BacktestCostModel +from src.analysis.backtest_pipeline import ( + BacktestBar, + WalkForwardConfig, + fold_has_leakage, + run_v2_backtest_pipeline, +) +from src.analysis.triple_barrier import TripleBarrierSpec +from src.analysis.walk_forward_split import generate_walk_forward_splits + + +def _bars() -> list[BacktestBar]: + closes = [100.0, 101.0, 102.0, 101.5, 103.0, 102.5, 104.0, 103.5, 105.0, 104.5, 106.0, 105.5] + bars: list[BacktestBar] = [] + for i, close in enumerate(closes): + bars.append( + BacktestBar( + high=close + 1.0, + low=close - 1.0, + close=close, + session_id="KRX_REG" if i % 2 == 0 else "US_PRE", + ) + ) + return bars + + +def _cost_model() -> BacktestCostModel: + return BacktestCostModel( + commission_bps=3.0, + slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0}, + failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08}, + unfavorable_fill_required=True, + ) + + +def test_pipeline_happy_path_returns_fold_and_artifact_contract() -> None: + out = run_v2_backtest_pipeline( + bars=_bars(), + entry_indices=[0, 1, 2, 3, 4, 5, 6, 7], + side=1, + triple_barrier_spec=TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.01, + max_holding_bars=3, + ), + walk_forward=WalkForwardConfig( + train_size=4, + test_size=2, + step_size=2, + purge_size=1, + embargo_size=1, + min_train_size=3, + ), + cost_model=_cost_model(), + ) + + assert out.run_id.startswith("v2p-e8-f") + assert out.n_bars == 12 + assert out.n_entries == 8 + assert out.required_sessions == ["KRX_REG", "US_PRE"] + assert len(out.folds) > 0 + assert set(out.label_distribution) == {-1, 0, 1} + for fold in out.folds: + names = {score.name for score in fold.baseline_scores} + assert names == {"B0", "B1", "M1"} + for score in fold.baseline_scores: + assert 0.0 <= score.accuracy <= 1.0 + + +def test_pipeline_cost_guard_fail_fast() -> None: + bad = BacktestCostModel( + commission_bps=3.0, + slippage_bps_by_session={"KRX_REG": 10.0}, + failure_rate_by_session={"KRX_REG": 0.01}, + unfavorable_fill_required=True, + ) + try: + run_v2_backtest_pipeline( + bars=_bars(), + entry_indices=[0, 1, 2, 3], + side=1, + triple_barrier_spec=TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.01, + max_holding_bars=3, + ), + walk_forward=WalkForwardConfig(train_size=2, test_size=1), + cost_model=bad, + required_sessions=["KRX_REG", "US_PRE"], + ) + except ValueError as exc: + assert "missing slippage_bps_by_session" in str(exc) + else: + raise AssertionError("expected cost guard validation error") + + +def test_pipeline_fold_leakage_guard() -> None: + folds = generate_walk_forward_splits( + n_samples=12, + train_size=6, + test_size=2, + step_size=2, + purge_size=1, + embargo_size=1, + min_train_size=5, + ) + assert folds + for fold in folds: + assert not fold_has_leakage(fold) + + +def test_pipeline_deterministic_seed_free_deterministic_result() -> None: + cfg = dict( + bars=_bars(), + entry_indices=[0, 1, 2, 3, 4, 5, 6, 7], + side=1, + triple_barrier_spec=TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.01, + max_holding_bars=3, + ), + walk_forward=WalkForwardConfig( + train_size=4, + test_size=2, + step_size=2, + purge_size=1, + embargo_size=1, + min_train_size=3, + ), + cost_model=_cost_model(), + ) + out1 = run_v2_backtest_pipeline(**cfg) + out2 = run_v2_backtest_pipeline(**cfg) + assert out1 == out2 diff --git a/workflow/session-handover.md b/workflow/session-handover.md index ea9f9cd..8f2b222 100644 --- a/workflow/session-handover.md +++ b/workflow/session-handover.md @@ -49,3 +49,19 @@ - next_ticket: #304 - process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes - risks_or_notes: handover 재시작 요청으로 세션 엔트리 추가, 미추적 산출물(AMS/NAS/NYS, DB, lock, xlsx) 커밋 분리 필요 + +### 2026-02-27 | session=codex-issue305-start +- branch: feature/v3-session-policy-stream +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #305 +- next_ticket: #305 +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: #305 구현을 위해 분석/백테스트 모듈 통합 경로 점검 시작 + +### 2026-02-27 | session=codex-issue305-ticket-branch +- branch: feature/issue-305-backtest-pipeline-integration +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #305 +- next_ticket: #305 +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: 티켓 브랜치 분기 후 strict gate 재통과를 위한 엔트리 추가 From 8396dc1606572cdf633667e78d28f6d37b49735b Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 03:25:00 +0900 Subject: [PATCH 029/109] process: automate backtest gate for PR/push/schedule (#314) --- .github/workflows/backtest-gate.yml | 66 +++++++++++++++++ docs/testing.md | 23 ++++++ docs/workflow.md | 15 ++++ scripts/backtest_gate.sh | 106 ++++++++++++++++++++++++++++ workflow/session-handover.md | 16 +++++ 5 files changed, 226 insertions(+) create mode 100644 .github/workflows/backtest-gate.yml create mode 100755 scripts/backtest_gate.sh diff --git a/.github/workflows/backtest-gate.yml b/.github/workflows/backtest-gate.yml new file mode 100644 index 0000000..21bd8bf --- /dev/null +++ b/.github/workflows/backtest-gate.yml @@ -0,0 +1,66 @@ +name: Backtest Gate + +on: + pull_request: + branches: ["**"] + push: + branches: + - "feature/**" + schedule: + # Daily scheduled gate (KST 01:20) + - cron: "20 16 * * *" + workflow_dispatch: + inputs: + mode: + description: "backtest mode (auto|smoke|full)" + required: false + default: "auto" + base_ref: + description: "git base ref for changed-file diff" + required: false + default: "origin/main" + +jobs: + backtest-gate: + runs-on: ubuntu-latest + concurrency: + group: backtest-gate-${{ github.ref }} + cancel-in-progress: true + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + + - name: Set up Python 3.11 + uses: actions/setup-python@v5 + with: + python-version: "3.11" + + - name: Install dependencies + run: pip install ".[dev]" + + - name: Resolve base ref + id: base + run: | + if [ "${{ github.event_name }}" = "pull_request" ]; then + echo "ref=origin/${{ github.base_ref }}" >> "$GITHUB_OUTPUT" + elif [ "${{ github.event_name }}" = "workflow_dispatch" ] && [ -n "${{ github.event.inputs.base_ref }}" ]; then + echo "ref=${{ github.event.inputs.base_ref }}" >> "$GITHUB_OUTPUT" + else + echo "ref=origin/main" >> "$GITHUB_OUTPUT" + fi + + - name: Run backtest gate + env: + BASE_REF: ${{ steps.base.outputs.ref }} + BACKTEST_MODE: ${{ github.event_name == 'workflow_dispatch' && github.event.inputs.mode || 'auto' }} + FORCE_FULL_BACKTEST: ${{ github.event_name == 'schedule' && 'true' || 'false' }} + run: bash scripts/backtest_gate.sh + + - name: Upload backtest logs + if: always() + uses: actions/upload-artifact@v4 + with: + name: backtest-gate-logs + path: data/backtest-gate/*.log diff --git a/docs/testing.md b/docs/testing.md index 3ab75a5..83aa6af 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -181,6 +181,29 @@ pytest -v --cov=src --cov-report=term-missing **Note:** `main.py` has lower coverage as it contains the main loop which is tested via integration/manual testing. +## Backtest Automation Gate + +백테스트 관련 검증은 `scripts/backtest_gate.sh`와 `.github/workflows/backtest-gate.yml`로 자동 실행된다. + +- PR: 변경 파일 기준 `auto` 모드 +- `feature/**` push: 변경 파일 기준 `auto` 모드 +- Daily schedule: `full` 강제 실행 +- Manual dispatch: `mode`(`auto|smoke|full`) 지정 가능 + +실행 기준: +- `src/analysis/`, `src/strategy/`, `src/strategies/`, `src/main.py`, `src/markets/`, `src/broker/` +- 백테스트 핵심 테스트 파일 변경 +- `docs/ouroboros/` 변경 + +`auto` 모드에서 백테스트 민감 영역 변경이 없으면 게이트는 `skip` 처리되며 실패로 간주하지 않는다. + +로컬 수동 실행: +```bash +bash scripts/backtest_gate.sh +BACKTEST_MODE=full bash scripts/backtest_gate.sh +BASE_REF=origin/feature/v3-session-policy-stream BACKTEST_MODE=auto bash scripts/backtest_gate.sh +``` + ## Test Configuration ### `pyproject.toml` diff --git a/docs/workflow.md b/docs/workflow.md index 1b07639..9fb3c24 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -51,6 +51,21 @@ Gitea 이슈/PR/코멘트 작업 전에 모든 에이전트는 아래를 먼저 - Until final user sign-off, `main` merge is prohibited. - 각 에이전트는 주요 의사결정(리뷰 지적, 수정 방향, 검증 승인)마다 PR 코멘트를 적극 작성해 의사결정 과정을 남긴다. +## Backtest Gate Policy (Mandatory) + +사람 의존도를 줄이기 위해 백테스트 검증은 자동 게이트를 기본으로 한다. + +- 워크플로우: `.github/workflows/backtest-gate.yml` +- 실행 스크립트: `scripts/backtest_gate.sh` +- 기본 모드: `auto` (변경 파일 기반 실행/skip 판정) +- 정기 스케줄: daily `full` 강제 실행 +- 수동 재실행: workflow dispatch + `mode` 지정 + +강제 규칙: +- 백테스트 민감 변경(PR/feature push)에서 게이트 실패 시 머지 금지 +- 스케줄 게이트 실패 시 이슈 등록 후 원인/복구 계획 기록 +- `python` 대신 `python3` 기준으로 실행한다 + ## Gitea CLI Formatting Troubleshooting Issue/PR 본문 작성 시 줄바꿈(`\n`)이 문자열 그대로 저장되는 문제가 반복될 수 있다. 원인은 `-d "...\n..."` 형태에서 쉘/CLI가 이스케이프를 실제 개행으로 해석하지 않기 때문이다. diff --git a/scripts/backtest_gate.sh b/scripts/backtest_gate.sh new file mode 100755 index 0000000..ef413b6 --- /dev/null +++ b/scripts/backtest_gate.sh @@ -0,0 +1,106 @@ +#!/usr/bin/env bash +# Backtest gate for PR/push/scheduled verification. + +set -euo pipefail + +MODE="${BACKTEST_MODE:-auto}" # auto | smoke | full +BASE_REF="${BASE_REF:-origin/main}" # used when MODE=auto +FORCE_FULL="${FORCE_FULL_BACKTEST:-false}" +LOG_DIR="${LOG_DIR:-data/backtest-gate}" + +mkdir -p "$LOG_DIR" +STAMP="$(date -u +%Y%m%d_%H%M%S)" +LOG_FILE="$LOG_DIR/backtest_gate_${STAMP}.log" + +log() { + printf '%s %s\n' "$(date -u +%Y-%m-%dT%H:%M:%SZ)" "$1" | tee -a "$LOG_FILE" +} + +run_cmd() { + log "[RUN] $*" + "$@" 2>&1 | tee -a "$LOG_FILE" +} + +resolve_mode_from_changes() { + if [ "$FORCE_FULL" = "true" ]; then + echo "full" + return + fi + + if ! git rev-parse --verify "$BASE_REF" >/dev/null 2>&1; then + log "[WARN] BASE_REF not found: $BASE_REF; fallback to full" + echo "full" + return + fi + + changed_files="$(git diff --name-only "$BASE_REF"...HEAD || true)" + if [ -z "$changed_files" ]; then + log "[INFO] no changed files between $BASE_REF...HEAD; skip backtest gate" + echo "skip" + return + fi + + log "[INFO] changed files from $BASE_REF...HEAD:" + while IFS= read -r line; do + [ -n "$line" ] && log " - $line" + done <<< "$changed_files" + + # Backtest-sensitive areas: analysis/strategy/runtime execution semantics. + if printf '%s\n' "$changed_files" | rg -q \ + '^(src/analysis/|src/strategy/|src/strategies/|src/main.py|src/markets/|src/broker/|tests/test_backtest_|tests/test_triple_barrier.py|tests/test_walk_forward_split.py|tests/test_main.py|docs/ouroboros/)' + then + echo "full" + else + echo "skip" + fi +} + +SMOKE_TESTS=( + tests/test_backtest_pipeline_integration.py + tests/test_triple_barrier.py + tests/test_walk_forward_split.py + tests/test_backtest_cost_guard.py + tests/test_backtest_execution_model.py +) + +FULL_TESTS=( + tests/test_backtest_pipeline_integration.py + tests/test_triple_barrier.py + tests/test_walk_forward_split.py + tests/test_backtest_cost_guard.py + tests/test_backtest_execution_model.py + tests/test_main.py +) + +main() { + log "[INFO] backtest gate started mode=$MODE base_ref=$BASE_REF force_full=$FORCE_FULL" + + selected_mode="$MODE" + if [ "$MODE" = "auto" ]; then + selected_mode="$(resolve_mode_from_changes)" + fi + + case "$selected_mode" in + skip) + log "[PASS] backtest gate skipped (no backtest-sensitive changes)" + exit 0 + ;; + smoke) + run_cmd python3 -m pytest -q "${SMOKE_TESTS[@]}" + log "[PASS] smoke backtest gate passed" + ;; + full) + run_cmd python3 -m pytest -q "${SMOKE_TESTS[@]}" + # Runtime semantics tied to v2 staged-exit must remain covered in full gate. + run_cmd python3 -m pytest -q tests/test_main.py -k \ + "staged_exit_override or runtime_exit_cache_cleared or run_daily_session_applies_staged_exit_override_on_hold" + log "[PASS] full backtest gate passed" + ;; + *) + log "[FAIL] invalid BACKTEST_MODE=$selected_mode (expected auto|smoke|full)" + exit 2 + ;; + esac +} + +main "$@" diff --git a/workflow/session-handover.md b/workflow/session-handover.md index 8f2b222..89d09c8 100644 --- a/workflow/session-handover.md +++ b/workflow/session-handover.md @@ -65,3 +65,19 @@ - next_ticket: #305 - process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes - risks_or_notes: 티켓 브랜치 분기 후 strict gate 재통과를 위한 엔트리 추가 + +### 2026-02-27 | session=codex-backtest-gate-automation +- branch: feature/v3-session-policy-stream +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #304, #305 +- next_ticket: (create) backtest automation gate +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: 백테스트 자동화 누락 재발 방지 위해 이슈/티켓 브랜치/PR 절차로 즉시 정규화 + +### 2026-02-27 | session=codex-issue314-ticket-branch +- branch: feature/issue-314-backtest-gate-automation +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #314 +- next_ticket: #314 +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: 백테스트 자동 게이트 도입 티켓 브랜치 strict gate 통과용 엔트리 From dd51ffb6ace4f38c14a278e584455929dbd81596 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 09:37:16 +0900 Subject: [PATCH 030/109] process: enforce forbidden runtime invariants in monitor (#316) --- docs/commands.md | 5 +++- docs/workflow.md | 5 ++++ scripts/runtime_verify_monitor.sh | 42 ++++++++++++++++++++++++++++--- workflow/session-handover.md | 8 ++++++ 4 files changed, 56 insertions(+), 4 deletions(-) diff --git a/docs/commands.md b/docs/commands.md index aeb0210..f70a844 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -157,9 +157,12 @@ python -m src.main --mode=paper # Run with dashboard enabled python -m src.main --mode=paper --dashboard -# Runtime verification monitor (NOT_OBSERVED detection) +# Runtime verification monitor (coverage + forbidden invariants) bash scripts/runtime_verify_monitor.sh +# Runtime monitor with explicit policy timezone (example: KST) +POLICY_TZ=Asia/Seoul bash scripts/runtime_verify_monitor.sh + # Session handover gate (must pass before implementation) python3 scripts/session_handover_check.py --strict diff --git a/docs/workflow.md b/docs/workflow.md index 9fb3c24..288fe70 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -195,6 +195,11 @@ Use `run_in_background=True` for independent tasks that don't block subsequent w - `NOT_OBSERVED`는 운영상 `FAIL`과 동일하게 처리 - `NOT_OBSERVED`가 하나라도 있으면 승인/머지 금지 +`FORBIDDEN` 처리 규칙: +- 정책 위반 신호(예: 주말 `session=KRX_REG`)는 `FORBIDDEN=HIT`으로 별도 기록한다 +- `FORBIDDEN=HIT`은 즉시 `P0 FAIL`로 간주하고 모니터링 승인 불가 +- 실시간 모니터는 `alive`만으로 정상 판정하지 않는다(정책 불변식 통과가 필수) + ### Process-Change-First Rule (Mandatory) 재발 방지/운영 규칙 변경이 결정되면, 기능 구현 티켓보다 먼저 서버(feature branch)에 반영해야 한다. diff --git a/scripts/runtime_verify_monitor.sh b/scripts/runtime_verify_monitor.sh index 6c878a4..c2fbd58 100755 --- a/scripts/runtime_verify_monitor.sh +++ b/scripts/runtime_verify_monitor.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Runtime verification monitor with NOT_OBSERVED detection. +# Runtime verification monitor with coverage + forbidden invariant checks. set -euo pipefail @@ -7,6 +7,7 @@ ROOT_DIR="${ROOT_DIR:-/home/agentson/repos/The-Ouroboros}" LOG_DIR="${LOG_DIR:-$ROOT_DIR/data/overnight}" INTERVAL_SEC="${INTERVAL_SEC:-60}" MAX_HOURS="${MAX_HOURS:-24}" +POLICY_TZ="${POLICY_TZ:-Asia/Seoul}" cd "$ROOT_DIR" @@ -30,7 +31,20 @@ check_signal() { return 1 } -log "[INFO] runtime verify monitor started interval=${INTERVAL_SEC}s max_hours=${MAX_HOURS}" +check_forbidden() { + local name="$1" + local pattern="$2" + local run_log="$3" + + if rg -q "$pattern" "$run_log"; then + log "[FORBIDDEN] ${name}=HIT pattern=${pattern}" + return 1 + fi + log "[FORBIDDEN] ${name}=CLEAR pattern=${pattern}" + return 0 +} + +log "[INFO] runtime verify monitor started interval=${INTERVAL_SEC}s max_hours=${MAX_HOURS} policy_tz=${POLICY_TZ}" while true; do now=$(date +%s) @@ -73,6 +87,28 @@ while true; do log "[OK] coverage complete (NOT_OBSERVED=0)" fi + # Forbidden invariants: must never happen under given policy context. + forbidden_hits=0 + policy_dow="$(TZ="$POLICY_TZ" date +%u)" # 1..7 (Mon..Sun) + is_weekend=0 + if [ "$policy_dow" -ge 6 ]; then + is_weekend=1 + fi + + if [ "$is_weekend" -eq 1 ]; then + # Weekend policy: KR regular session loop must never appear. + check_forbidden "WEEKEND_KR_SESSION_ACTIVE" \ + "Market session active: KR|session=KRX_REG|Processing market: Korea Exchange" \ + "$latest_run" || forbidden_hits=$((forbidden_hits+1)) + else + log "[FORBIDDEN] WEEKEND_KR_SESSION_ACTIVE=SKIP reason=weekday" + fi + + if [ "$forbidden_hits" -gt 0 ]; then + log "[P0] forbidden_invariant_hits=$forbidden_hits (treat as immediate FAIL)" + else + log "[OK] forbidden invariants clear" + fi + sleep "$INTERVAL_SEC" done - diff --git a/workflow/session-handover.md b/workflow/session-handover.md index 89d09c8..a3fd61b 100644 --- a/workflow/session-handover.md +++ b/workflow/session-handover.md @@ -81,3 +81,11 @@ - next_ticket: #314 - process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes - risks_or_notes: 백테스트 자동 게이트 도입 티켓 브랜치 strict gate 통과용 엔트리 + +### 2026-02-28 | session=codex-issue316-forbidden-monitor +- branch: feature/issue-316-weekend-forbidden-monitor +- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md +- open_issues_reviewed: #316 +- next_ticket: #316 +- process_gate_checked: process_ticket=#306,#308 merged_to_feature_branch=yes +- risks_or_notes: 모니터 판정을 liveness 중심에서 policy invariant(FORBIDDEN) 중심으로 전환 From ab9ea56efaa9e25e4c0a134f0cc83e4b9a5f67a4 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 13:04:15 +0900 Subject: [PATCH 031/109] docs: consolidate implementation audit updates and add restructure plan --- docs/ouroboros/80_implementation_audit.md | 564 ++++++++++++++++++++++ docs/ouroboros/82_doc_restructure_plan.md | 96 ++++ docs/ouroboros/README.md | 1 + 3 files changed, 661 insertions(+) create mode 100644 docs/ouroboros/80_implementation_audit.md create mode 100644 docs/ouroboros/82_doc_restructure_plan.md diff --git a/docs/ouroboros/80_implementation_audit.md b/docs/ouroboros/80_implementation_audit.md new file mode 100644 index 0000000..0ffefcd --- /dev/null +++ b/docs/ouroboros/80_implementation_audit.md @@ -0,0 +1,564 @@ + + +# v2/v3 구현 감사 및 수익률 분석 보고서 + +작성일: 2026-02-28 +대상 기간: 2026-02-25 ~ 2026-02-28 (실거래) +분석 브랜치: `feature/v3-session-policy-stream` + +--- + +## 1. 계획 대비 구현 감사 + +### 1.1 v2 구현 상태: 100% 완료 + +| REQ-ID | 요구사항 | 구현 파일 | 상태 | +|--------|----------|-----------|------| +| REQ-V2-001 | 4-상태 매도 상태기계 (HOLDING→BE_LOCK→ARMED→EXITED) | `src/strategy/position_state_machine.py` | ✅ 완료 | +| REQ-V2-002 | 즉시 최상위 상태 승격 (갭 대응) | `position_state_machine.py:51-70` | ✅ 완료 | +| REQ-V2-003 | EXITED 우선 평가 | `position_state_machine.py:38-48` | ✅ 완료 | +| REQ-V2-004 | 4중 청산 로직 (Hard/BE/ATR Trailing/Model) | `src/strategy/exit_rules.py` | ✅ 완료 | +| REQ-V2-005 | Triple Barrier 라벨링 | `src/analysis/triple_barrier.py` | ✅ 완료 | +| REQ-V2-006 | Walk-Forward + Purge/Embargo 검증 | `src/analysis/walk_forward_split.py` | ✅ 완료 | +| REQ-V2-007 | 비용/슬리피지/체결실패 모델 필수 | `src/analysis/backtest_cost_guard.py` | ✅ 완료 | +| REQ-V2-008 | Kill Switch 실행 순서 (Block→Cancel→Refresh→Reduce→Snapshot) | `src/core/kill_switch.py` | ✅ 완료 | + +### 1.2 v3 구현 상태: ~75% 완료 + +| REQ-ID | 요구사항 | 상태 | 갭 설명 | +|--------|----------|------|---------| +| REQ-V3-001 | 모든 신호/주문/로그에 session_id 포함 | ⚠️ 부분 | 아래 GAP-1, GAP-2 참조 | +| REQ-V3-002 | 세션 전환 훅 + 리스크 파라미터 재로딩 | ⚠️ 부분 | 아래 GAP-3 참조 | +| REQ-V3-003 | 블랙아웃 윈도우 정책 | ✅ 완료 | `src/core/blackout_manager.py` | +| REQ-V3-004 | 블랙아웃 큐 + 복구 시 재검증 | ⚠️ 부분 | 아래 GAP-4 참조 (부분 해소) | +| REQ-V3-005 | 저유동 세션 시장가 금지 | ✅ 완료 | `src/core/order_policy.py` | +| REQ-V3-006 | 보수적 백테스트 체결 (불리 방향) | ✅ 완료 | `src/analysis/backtest_execution_model.py` | +| REQ-V3-007 | FX 손익 분리 (전략 PnL vs 환율 PnL) | ⚠️ 코드 완료 / 운영 미반영 | `src/db.py` 스키마·함수 완료, 운영 데이터 `fx_pnl` 전부 0 | +| REQ-V3-008 | 오버나잇 예외 vs Kill Switch 우선순위 | ✅ 완료 | `src/main.py:459-471` | + +### 1.3 운영 거버넌스: ~20% 완료 + +| REQ-ID | 요구사항 | 상태 | 갭 설명 | +|--------|----------|------|---------| +| REQ-OPS-001 | 타임존 명시 (KST/UTC) | ⚠️ 부분 | DB 기록은 UTC, 세션은 KST. 일부 로그에서 타임존 미표기 | +| REQ-OPS-002 | 정책 변경 시 레지스트리 업데이트 강제 | ❌ 미구현 | CI 자동 검증 없음 | +| REQ-OPS-003 | TASK-REQ 매핑 강제 | ❌ 미구현 | PR 단위 자동 검증 없음 | + +--- + +## 2. 구현 갭 상세 + +### GAP-1: DecisionLogger에 session_id 미포함 (CRITICAL) + +- **위치**: `src/logging/decision_logger.py:40` +- **문제**: `log_decision()` 함수에 `session_id` 파라미터가 없음 +- **영향**: 어떤 세션에서 전략적 의사결정이 내려졌는지 추적 불가 +- **요구사항**: REQ-V3-001 + +### GAP-2: src/main.py 거래 로그에 session_id 미전달 (CRITICAL) + +- **위치**: `src/main.py` line 1625, 1682, 2769 +- **문제**: `log_trade()` 호출 시 `session_id` 파라미터를 전달하지 않음 +- **현상**: 시장 코드 기반 자동 추론에 의존 → 실제 런타임 세션과 불일치 가능 +- **요구사항**: REQ-V3-001 + +### GAP-3: 세션 전환 시 리스크 파라미터 재로딩 없음 (HIGH) + +- **위치**: `src/main.py` 전체 +- **문제**: 리스크 파라미터가 시작 시 한 번만 로딩되고, 세션 경계 변경 시 재로딩 메커니즘 없음 +- **영향**: NXT_AFTER(저유동) → KRX_REG(정규장) 전환 시에도 동일 파라미터 사용 +- **요구사항**: REQ-V3-002 + +### GAP-4: 블랙아웃 복구 시 재검증 부분 해소, DB 기록 미구현 (HIGH) + +- **위치**: `src/core/blackout_manager.py:89-96`, `src/main.py:694-791` +- **상태**: `pop_recovery_batch()` 자체는 단순 dequeue이나, 실행 경로에서 부분 재검증 수행: + - stale BUY 드롭 (포지션 이미 존재 시) — `src/main.py:713-720` + - stale SELL 드롭 (포지션 부재 시) — `src/main.py:721-727` + - `validate_order_policy()` 호출 — `src/main.py:729-734` +- **잔여 갭**: 가격 유효성(시세 변동), 세션 변경에 따른 파라미터 재적용은 미구현 +- **신규 발견**: 블랙아웃 복구 주문이 `log_trade()` 없이 실행되어 거래 DB에 기록되지 않음 → 성과 리포트 불일치 유발 +- **요구사항**: REQ-V3-004 + +### GAP-5: 시간장벽이 봉 개수 고정 (MEDIUM) + +- **위치**: `src/analysis/triple_barrier.py:19` +- **문제**: `max_holding_bars` (고정 봉 수) 사용, v3 계획의 `max_holding_minutes` (캘린더 시간) 미반영 +- **요구사항**: REQ-V2-005 / v3 확장 + +--- + +## 3. 실거래 수익률 분석 + +### 3.1 종합 성적 + +| 지표 | 값 | +|------|-----| +| 총 실현 손익 | **-52,481** (KRW + USD 혼합, 통화 분리 집계는 3.4 참조) | +| 총 거래 기록 | 19,130건 (BUY 121, SELL 46, HOLD 18,963) | +| 집계 기준 | UTC `2026-02-25T00:00:00` ~ `2026-02-28T00:00:00`, SELL 45건 (기간 외 1건 제외) | +| 승률 | **39.1%** (18승 / 46매도, 0손익 포함 기준) | +| 평균 수익 거래 | +6,107 | +| 평균 손실 거래 | -7,382 | +| 최대 수익 거래 | +46,350 KRW (452260 KR) | +| 최대 손실 거래 | -26,400 KRW (000370 KR) | +| 운영 모드 | LIVE (실계좌) | + +### 3.2 일별 손익 + +> 주의: 아래 초기 집계 건수는 DB 실측치와 일부 불일치 (02-26: 문서 10건 vs DB 14건, 02-27: 문서 21건 vs DB 22건). +> 정확한 재현은 3.8 표준 집계 SQL 참조. + +| 날짜 | 매도 수 (초기 집계) | 승 | 패 | 일간 손익 | +|------|---------------------|----|----|-----------| +| 02-25 | 9 | 8 | 1 | +63.21 (USD, 미세 수익) | +| 02-26 | 10 | 5 | 5 | **-32,083.40** (KR 대량 손실) | +| 02-27 | 21 | 5 | 16 | **-20,461.11** (고빈도 매매, 대부분 손실) | + +### 3.3 시장별 손익 + +| 시장 | 매도 수 | 승률 | 총 손익 | +|------|---------|------|---------| +| **KR** | 17 | 38.5% (0손익 제외, 5/13) | **-56,735 KRW** | +| US_AMEX | 12 | 75% | +4,476 USD | +| US_NASDAQ | 4 | 0% | -177 USD | +| US_NYSE | 13 | 30.8% | -45 USD | + +**KR 시장이 손실의 주 원인.** US는 AMEX 제외 시 대체로 손실 또는 보합. + +### 3.4 재계산 주석 반영 (통화 분리) + +> 산식 주석: 기존 표의 `총 실현 손익 -52,481`은 KRW/USD를 단순 합산한 값으로, 회계적으로 해석 불가. +> 아래는 같은 기간(2026-02-25~2026-02-27, SELL 45건)을 통화별로 분리한 결과. + +| 통화 | 매도 수 | 승/패 | 실현 손익 | +|------|---------|-------|-----------| +| KRW | 17 | 5승 / 8패 (4건 0손익) | **-56,735 KRW** | +| USD | 28 | 13승 / 14패 (1건 0손익) | **+4,253.70 USD** | + +### 3.5 재계산 주석 반영 (기존 보유 청산 성과 분리) + +> 분리 기준: 각 SELL의 직전 BUY가 `rationale LIKE '[startup-sync]%'` 인 경우를 +> `기존 보유(시작 시점 동기화 포지션) 청산`으로 분류. + +| 구분 | 통화 | 매도 수 | 손익 | +|------|------|---------|------| +| 기존 보유 청산분 | KRW | 10 | **+12,230 KRW** | +| 기존 보유 청산분 | USD | 2 | **+21.03 USD** | +| 신규/전략 진입분만 | KRW | 7 | **-68,965 KRW** | +| 신규/전략 진입분만 | USD | 26 | **+4,232.67 USD** | + +추가로, 요청 취지(“기존 보유 수익 종목 정리 수익 제외”)에 맞춰 **기존 보유 청산 중 수익(+PnL)만 제외**하면: + +- KRW: `-56,735` → **-113,885 KRW** (기존 보유 수익 +57,150 KRW 제거) +- USD: `+4,253.70` → **+4,232.67 USD** (기존 보유 수익 +21.03 USD 제거) + +즉, 기존 성과표는 기보유 청산 이익(특히 KR 452260 +46,350 KRW)을 전략 성과에 포함해 +전략 자체 손익을 과대평가한 상태다. + +### 3.6 데이터 무결성 점검 (모의투자 혼합 여부 + USD 과대수익 원인) + +- `mode` 점검 결과: `live` 19,130건, `paper` 0건 + → **모의투자 혼합은 확인되지 않음**. +- 다만 USD 손익에는 **체결 매칭 이상치 1건**이 존재: + - `CRCA` SELL(15주, $35.14, +4,612.15 USD) vs 직전 BUY(146주, $3.5499) + - BUY/SELL 수량 불일치(146→15) 상태에서 PnL이 계산되어, 역분할/동기화 이슈 가능성이 큼. + +보수적 재집계(2026-02-25~2026-02-27, USD SELL 28건): + +| 집계 기준 | USD 손익 | 환산 KRW (참고) | KRW 합산 참고값 | +|-----------|----------|-----------------|-----------------| +| 원집계 | **+4,253.70 USD** | +6,167,865 | -56,735 + 6,167,865 = **+6,111,130** | +| 기존보유(startup-sync) 제외 | **+4,232.67 USD** | +6,137,372 | -68,965 + 6,137,372 = **+6,068,407** | +| 수량 일치 체결만 포함 | **-358.45 USD** | -519,753 | -56,735 + (-519,753) = **-576,488** | +| 기존보유 제외 + 수량 일치 체결만 포함 | **-379.48 USD** | -550,246 | -68,965 + (-550,246) = **-619,211** | + +> 가정 환율: **1 USD = 1,450 KRW** (2026-02-28 기준 참고 환율). +> 환산 KRW 및 합산값은 비교용 보조지표이며, 회계/정산 기준값과는 분리해 해석해야 한다. + +결론적으로 USD 구간의 플러스 성과는 실질적으로 `CRCA` 이상치 1건 영향이 지배적이며, +해당 거래를 무결성 필터로 제외하면 USD 성과는 손실 구간으로 전환된다. + +### 3.7 데이터 품질 이슈 및 집계 정의 + +#### 3.7.1 기간/건수 표기 (반영 완료) + +- 3.1에 UTC 기준 기간 명시 + SELL 45건(기간 외 1건 제외) 주석 추가. +- 3.2 일별 표에 DB 실측치 불일치 주의 문구 추가. 정확한 재현은 3.8 SQL 참조. + +#### 3.7.2 승률 정의 (반영 완료) + +- 종합 승률 39.1%(18/46): 0손익 포함 기준 — 3.1에 명시. +- KR 시장 승률 38.5%(5/13): 0손익 제외 기준 — 3.3에 명시. + +#### 3.7.3 startup-sync 중복 기록 + +- `BUY + [startup-sync]`가 76건 기록됨(동일 종목 반복 동기화 다수). +- `BUY price=0`도 38건 존재해, PnL 매칭 시 원가 기준이 흔들릴 여지가 큼. +- 성과 집계 시 `startup-sync`는 별도 레이어(초기 포지션 인식 이벤트)로 분리 저장 권장. +- 3.5에서 startup-sync 분리 집계 제공. + +#### 3.7.4 티커-거래소 드리프트 (ROOT-7 반영 완료) + +- 예: `CCUP/CRCA/FIGS/LLY` 등 동일 티커가 `US_AMEX/US_NASDAQ/US_NYSE`에 혼재 기록. +- 포지션 키를 `(ticker)`로만 쓰면 오매칭 위험 → ROOT-7으로 등재. + +#### 3.7.5 FX PnL 분리 항목 미활성 (1.2 반영 완료) + +- 스키마상 `strategy_pnl`, `fx_pnl` 컬럼이 있으나 SELL 전체 기준 `fx_pnl`은 전부 0. +- 1.2에서 REQ-V3-007 상태를 "⚠️ 코드 완료 / 운영 미반영"으로 변경. + +### 3.8 표준 집계 SQL (재현용) + +아래 SQL을 기준 쿼리로 고정하면, 성과표를 항상 같은 규칙으로 재생산할 수 있다. + +```sql +-- Base: 기간 + LIVE + SELL + 직전 BUY 메타 매칭 +WITH base AS ( + SELECT * + FROM trades + WHERE mode='live' + AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT + s.id, + s.timestamp, + s.stock_code, + s.market, + s.exchange_code, + s.quantity AS sell_qty, + s.price AS sell_price, + s.pnl, + COALESCE(( + SELECT b.rationale + FROM trades b + WHERE b.mode='live' + AND b.action='BUY' + AND b.stock_code=s.stock_code + AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC + LIMIT 1 + ), '') AS prev_buy_rationale, + ( + SELECT b.quantity + FROM trades b + WHERE b.mode='live' + AND b.action='BUY' + AND b.stock_code=s.stock_code + AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC + LIMIT 1 + ) AS prev_buy_qty + FROM base s +) +SELECT * FROM labeled; +``` + +```sql +-- Q1) 통화 분리 손익 (혼합 금지) +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.*, + s.quantity AS sell_qty, + COALESCE((SELECT b.rationale FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, + COUNT(*) AS sells, + ROUND(SUM(pnl),2) AS pnl_sum +FROM labeled +GROUP BY ccy +ORDER BY ccy; +``` + +```sql +-- Q2) 기존 보유(startup-sync) 제외 성과 +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.*, + s.quantity AS sell_qty, + COALESCE((SELECT b.rationale FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, + COUNT(*) AS sells, + ROUND(SUM(pnl),2) AS pnl_sum +FROM labeled +WHERE prev_buy_rationale NOT LIKE '[startup-sync]%' +GROUP BY ccy +ORDER BY ccy; +``` + +```sql +-- Q3) 수량 일치 체결만 포함(무결성 필터) +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.*, + s.quantity AS sell_qty, + COALESCE((SELECT b.rationale FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, + COUNT(*) AS sells, + ROUND(SUM(pnl),2) AS pnl_sum +FROM labeled +WHERE prev_buy_qty = sell_qty +GROUP BY ccy +ORDER BY ccy; +``` + +```sql +-- Q4) 이상치 목록 (수량 불일치) +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.id, s.timestamp, s.stock_code, s.market, s.quantity AS sell_qty, s.pnl, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + id, timestamp, stock_code, market, sell_qty, prev_buy_qty, ROUND(pnl,2) AS pnl +FROM labeled +WHERE prev_buy_qty IS NOT NULL + AND prev_buy_qty != sell_qty +ORDER BY ABS(pnl) DESC; +``` + +--- + +## 4. 수익률 저조 근본 원인 분석 + +### ROOT-1: hard_stop_pct 기본값(-2%)이 KR 소형주 변동성 대비 과소 + +- **현재 설정**: `stop_loss_threshold = -2.0` (`src/main.py:511`), staged exit의 `hard_stop_pct`로 전달 +- **v2 계획**: ATR 기반 동적 trailing stop (ExitPrice = PeakPrice - k × ATR) +- **실제 동작**: staged exit는 호출되나, `atr_value`/`pred_down_prob` 등 피처가 0.0으로 공급되어 hard_stop 편향 발동 (ROOT-5 참조) +- **증거**: + - 000370: 매수 8,040 → 24분 후 -2.74% 손절 + - 033340: 매수 2,080 → 18분 후 -3.13% 손절 + - 229000: -3.7%, -3.25%, -3.2% 반복 손절 + +### ROOT-2: 동일 종목 반복 매매 (재진입 쿨다운 미구현) + +- **문제**: 손절 후 동일 종목 즉시 재매수 → 고가 재진입 → 재손절 반복 +- **최악 사례**: 종목 229000 + | 매수가 | 매도가 | 손익 | 보유 시간 | + |--------|--------|------|-----------| + | 5,670 | 5,460 | -24,780 | 0.5h | + | 5,540 | 5,360 | -21,780 | 0.7h | + | 5,310 | 5,580 | +34,020 (승) | 0.8h | + | 5,620 | 5,440 | -21,420 | 1.5h | +- **순손실**: 하루 한 종목에서 **-33,960 KRW** + +### ROOT-3: 미국 페니스탁/마이크로캡 무분별 진입 + +- **문제**: $2 이하 종목에 confidence 85~90으로 진입, 오버나잇 대폭락 +- **사례**: + | 종목 | 손실률 | 보유시간 | + |------|--------|----------| + | ALBT | -27.7% | ~23h | + | SMJF | -15.9% | ~23h | + | KAPA | -18.2% | ~23h | + | CURX | -10.6% | ~23h | + | CELT | -8.3% | ~23h | + +### ROOT-4: 진화 전략 코드 생성기 문법 오류 + +- **위치**: `src/strategies/v20260227_*_evolved.py` +- **문제**: 중첩 `def evaluate` 정의 (들여쓰기 오류) +- **영향**: 런타임 실패 → 기본 전략으로 폴백 → 진화 시스템 사실상 무효 + +### ROOT-5: v2 청산 로직이 부분 통합되었으나 실효성 부족 (HIGH) + +- **현재 상태**: `src/main.py:500-583`에서 `evaluate_exit()` 기반 staged exit override가 동작함 + - 상태기계(HOLDING→BE_LOCK→ARMED→EXITED) 전이 구현 + - 4중 청산(hard stop, BE lock threat, ATR trailing, model/liquidity exit) 평가 +- **실효성 문제**: + - `hard_stop_pct`에 고정 `-2.0`이 기본값으로 들어가 v2 계획의 ATR 적응형 의도와 괴리 + - `be_arm_pct`/`arm_pct`가 playbook의 `take_profit_pct`에서 기계적 파생(`* 0.4`)되어 v2 계획의 독립 파라미터 튜닝 불가 + - `atr_value`, `pred_down_prob` 등 런타임 피처가 대부분 0.0으로 들어와 사실상 hard stop만 발동 +- **결론**: 코드 통합은 되었으나, 피처 공급과 파라미터 설정이 미비하여 v2 설계 가치가 실현되지 않는 상태 + +### ROOT-6: SELL 손익 계산이 부분청산/수량 불일치에 취약 (CRITICAL) + +- **위치**: `src/main.py:1658-1663`, `src/main.py:2755-2760` +- **문제**: PnL 계산이 실제 매도 수량(`sell_qty`)이 아닌 직전 BUY의 `buy_qty`를 사용 + - `trade_pnl = (trade_price - buy_price) * buy_qty` +- **영향**: 부분청산, 역분할/액분할, startup-sync 후 수량 드리프트 시 손익 과대/과소 계상 +- **실증**: CRCA 이상치(BUY 146주 → SELL 15주에서 PnL +4,612 USD) 가 이 버그와 정합 + +### ROOT-7: BUY 매칭 키에 exchange_code 미포함 — 잠재 오매칭 리스크 (HIGH) + +- **위치**: `src/db.py:292-313` +- **문제**: `get_latest_buy_trade()`가 `(stock_code, market)`만으로 매칭, `exchange_code` 미사용 +- **성격**: 현재 즉시 발생하는 확정 버그가 아닌, 동일 티커가 다중 거래소에 혼재 기록될 때 증폭되는 구조 리스크 +- **영향**: 데이터 드리프트 조건(예: CCUP/CRCA 등 다중 exchange 기록)에서 오매칭 → 손익 왜곡 가능 + +--- + +## 5. 수익률 개선 방안 + +### 5.1 즉시 적용 가능 (파라미터/로직 수정) + +| 우선순위 | 방안 | 예상 효과 | 난이도 | +|----------|------|-----------|--------| +| P0 | KR 손절선 확대: -2% → -4~5% 또는 ATR 기반 | 노이즈 손절 대폭 감소 | 낮음 | +| P0 | 재진입 쿨다운: 손절 후 동일 종목 1~2시간 매수 차단 | churn & burn 패턴 제거 | 낮음 | +| P1 | US 최소 가격 필터: $5 이하 종목 진입 차단 | 페니스탁 대폭락 방지 | 낮음 | +| P1 | 진화 전략 코드 생성 시 syntax 검증 추가 | 진화 시스템 정상화 | 낮음 | + +### 5.2 구조적 개선 (아키텍처 변경) + +| 우선순위 | 방안 | 예상 효과 | 난이도 | +|----------|------|-----------|--------| +| **P0** | **SELL PnL 계산을 sell_qty 기준으로 수정 (ROOT-6)** | 손익 계상 정확도 확보, 이상치 제거 | 낮음 | +| **P0** | **v2 staged exit에 실제 피처 공급 (atr_value, pred_down_prob) + 독립 파라미터 설정 (ROOT-5)** | v2 설계 가치 실현, 수익 보호 | 중간 | +| P0 | BUY 매칭 키에 exchange_code 추가 (ROOT-7) | 오매칭 방지 | 낮음 | +| P0 | 블랙아웃 복구 주문에 `log_trade()` 추가 (GAP-4) | DB/성과 리포트 정합성 | 낮음 | +| P1 | 세션 전환 시 리스크 파라미터 동적 재로딩 (GAP-3 해소) | 세션별 최적 파라미터 적용 | 중간 | +| P1 | session_id를 거래 로그/의사결정 로그에 명시적 전달 (GAP-1,2 해소) | 세션별 성과 분석 가능 | 낮음 | +| P2 | 블랙아웃 복구 시 가격/세션 재검증 강화 (GAP-4 잔여) | 세션 변경 후 무효 주문 방지 | 중간 | + +### 5.3 권장 실행 순서 + +``` +Phase 1 (즉시): 파라미터 조정 + → KR 손절 확대 + 재진입 쿨다운 + US 가격 필터 + → 예상: 가장 큰 손실 패턴 2개(노이즈 손절, 반복 매매) 즉시 제거 + +Phase 2 (단기): 데이터 정합성 + v2 실효화 + → SELL PnL을 sell_qty 기준으로 수정 + → BUY 매칭 키에 exchange_code 추가 + → 블랙아웃 복구 주문 DB 기록 추가 + → v2 staged exit에 실제 피처(ATR, pred_down_prob) 공급 + 독립 파라미터 설정 + → session_id 명시적 전달 + → 예상: 손익 정확도 확보 + 수익 구간 보호 메커니즘 실효화 + +Phase 3 (중기): v3 세션 최적화 + → 세션 전환 훅 + 파라미터 재로딩 + → 블랙아웃 재검증 + → 운영 거버넌스 CI 자동화 +``` + +--- + +## 6. 테스트 커버리지 현황 + +### 테스트 존재 (통과) + +- ✅ 상태기계 승격 (`test_strategy_state_machine.py`) +- ✅ 4중 청산 규칙 (`test_strategy_exit_rules.py`) +- ✅ Triple Barrier 라벨링 (`test_triple_barrier.py`) +- ✅ Walk-Forward + Purge/Embargo (`test_walk_forward_split.py`) +- ✅ 백테스트 비용 검증 (`test_backtest_cost_guard.py`) +- ✅ Kill Switch 순서 (`test_kill_switch.py`) +- ✅ 블랙아웃 관리 (`test_blackout_manager.py`) +- ✅ 주문 정책 저유동 거부 (`test_order_policy.py`) +- ✅ FX 손익 분리 (`test_db.py`) + +### 테스트 미존재 + +- ❌ 세션 전환 훅 콜백 +- ❌ 세션 경계 리스크 파라미터 재로딩 +- ❌ DecisionLogger session_id 캡처 +- ❌ 실거래 경로 ↔ v2 상태기계 통합 테스트 (피처 공급 포함) +- ❌ 블랙아웃 복구 주문의 DB 기록 검증 +- ❌ SELL PnL 계산 시 수량 불일치 케이스 + +### 테스트 존재 (재점검으로 확인) + +- ✅ 블랙아웃 복구 후 유효 intent 실행 (`tests/test_main.py:5811`) +- ✅ 블랙아웃 복구 후 정책 거부 intent 드롭 (`tests/test_main.py:5851`) + +### 6.1 재점검 반영 이력 (2026-02-28) + +아래 코멘트들은 코드 대조 검증 후 본문에 반영 완료됨: + +1. ROOT-5: “완전 미통합” → “부분 통합 + 실효성 부족”으로 정정 (본문 반영) +2. GAP-4: “재검증 없음” → “부분 해소 + DB 기록 미구현”으로 정정 (본문 반영) +3. 블랙아웃 복구 DB 미기록: GAP-4에 통합 + 개선 방안 5.2에 P0 추가 +4. SELL PnL buy_qty 버그: ROOT-6으로 신규 등재 (CRITICAL) +5. BUY 매칭 exchange_code 누락: ROOT-7로 신규 등재 (HIGH) +6. 경로 표기: `main.py` → `src/main.py`로 정규화 완료 +7. 테스트 섹션: 블랙아웃 복구 테스트 존재 확인, “테스트 존재 (재점검)” 항목으로 이동 + +### 6.2 정밀 검토 반영 이력 (2026-02-28) + +아래 코멘트들은 검증 후 본문에 반영 완료됨: + +1. 기간 기준 통일: 3.1에 UTC 기준 명시 + SELL 45건(기간 외 1건 제외) 주석 추가 +2. ROOT-1 ↔ ROOT-5 정합성: ROOT-1 문구를 “staged exit 호출되나 hard_stop 편향”으로 정정 +3. REQ-V3-007 2단 표기: “⚠️ 코드 완료 / 운영 미반영”으로 상태 변경 +4. ROOT-7 톤 조정: “잠재 오매칭 리스크”로 표현 변경, 확정 버그 → 구조 리스크로 재분류 +5. 3.6 USD 손익 표에 환산 KRW(가정 환율 1,450) + KRW 합산 참고값 병기 +6. 3.2 일별 표에 DB 실측치 불일치 주의 문구 추가 +7. 3.3 KR 승률에 “0손익 제외” 기준 명시 +8. 3.7 코멘트들을 세부 항목(3.7.1~3.7.5)으로 정리, 각 항목에 반영 상태 표기 + +--- + +*끝.* diff --git a/docs/ouroboros/82_doc_restructure_plan.md b/docs/ouroboros/82_doc_restructure_plan.md new file mode 100644 index 0000000..8bf0c84 --- /dev/null +++ b/docs/ouroboros/82_doc_restructure_plan.md @@ -0,0 +1,96 @@ + + +# 문서 재구조화 계획: 감사 → 실행 파이프라인 + +## Context + +80_implementation_audit.md는 v2/v3 구현 감사와 수익률 분석을 수행했으나, 여러 차례 리뷰를 거치면서 리뷰 이력/데이터 품질 논의/SQL 쿼리 등이 혼재되어 **실행 문서로 사용하기 어려운 상태**다. + +목표: 이 감사 결과를 바탕으로 **티켓 생성 → 개발 설계 → 구현/리뷰 → 검증 → 실환경 테스트**까지 일관되게 진행할 수 있는 문서 체계를 만든다. + +## 변경 사항 + +### 1. 80_implementation_audit.md 정리 (감사 기록 문서) + +**역할**: 현재 상태의 팩트 기록. "무엇이 문제인가"에만 집중. + +정리 내용: +- Section 3: P&L 분석을 핵심 수치만 남기고 간결화 + - 3.1(종합), 3.3(시장별), 3.4(통화 분리), 3.5(전략 진입분 분리), 3.6(무결성 결론) 유지 + - 3.2 일별 손익: 주의 문구 제거, 본문으로 통합 + - 3.7 데이터 품질: 핵심 결론만 남기고 세부 항목 제거 + - 3.8 SQL: 별도 파일(`scripts/audit_queries.sql`)로 분리, 본문에서 참조만 +- Section 6.1, 6.2 리뷰 반영 이력: 전부 제거 (git history로 추적 가능) +- Section 6 테스트: "재점검으로 확인" 항목을 "테스트 존재" 항목에 통합 +- 신규 Section 7: 후속 문서 링크 (85_ 참조) + +### 2. 85_loss_recovery_action_plan.md 신규 작성 (실행 계획 문서) + +**역할**: "어떻게 고칠 것인가". 티켓 생성부터 실환경 검증까지의 실행 청사진. + +구조: +``` +## 1. 요약 +- 목표: 손실 구간 탈출을 위한 7개 ROOT/5개 GAP 해소 +- 성공 기준 (정량) + +## 2. Phase별 작업 분해 +### Phase 1: 즉시 파라미터/로직 수정 (손실 출혈 차단) + 각 항목마다: + - ROOT/GAP 참조 + - Gitea 이슈 제목/설명 템플릿 + - 변경 대상 파일 + 현재 동작 + 목표 동작 + - 수용 기준 (acceptance criteria) + - 테스트 계획 + - 의존성/차단 관계 + +### Phase 2: 데이터 정합성 + v2 실효화 + (동일 형식) + +### Phase 3: v3 세션 최적화 + (동일 형식) + +## 3. 검증 계획 +- 단위 테스트 기준 +- 통합 테스트 시나리오 (백테스트 파이프라인 활용) +- 실환경 검증: 소액 live 운용으로 직접 검증 + (paper trading 제외 — 실환경과 괴리가 커 검증 신뢰도 부족) +- Phase별 실환경 투입 기준: + 단위/통합 테스트 통과 → 소액 live → 모니터링 → 정상 확인 후 본운용 + +## 4. 의존성 그래프 +- Phase 간 blocking 관계 +- Phase 내 작업 순서 + +## 5. 롤백 계획 +- 각 Phase 실패 시 롤백 절차 +``` + +### 3. README.md 업데이트 + +- 85_ 문서 링크 추가 + +## 작업 순서 + +1. 80_ 정리 (노이즈 제거, SQL 분리, 리뷰 이력 삭제) +2. `scripts/audit_queries.sql` 작성 (80_에서 분리한 SQL) +3. 85_ 신규 작성 (실행 계획) +4. README.md 업데이트 + +## 작성하지 않는 것 + +- 30_code_level_work_orders.md, 40_acceptance_and_test_plan.md 업데이트: 85_를 기반으로 실제 구현 시점에 업데이트 (지금은 실행 계획 수립까지만) +- 01_requirements_registry.md: ROOT/GAP에서 파생되는 신규 REQ는 구현 착수 시 등록 +- Gitea 이슈 생성: 85_ 문서 확정 후 별도 진행 + +## 검증 + +- 80_: 감사 팩트만 남았는지, 리뷰 이력이 제거되었는지 확인 +- 85_: Phase별 작업이 Gitea 이슈로 바로 전환 가능한 수준인지 확인 +- 85_ 각 항목에 수용 기준과 테스트 계획이 포함되었는지 확인 diff --git a/docs/ouroboros/README.md b/docs/ouroboros/README.md index f3a2c60..4605605 100644 --- a/docs/ouroboros/README.md +++ b/docs/ouroboros/README.md @@ -22,6 +22,7 @@ Updated: 2026-02-26 8. TPM 제어 프로토콜/수용 매트릭스: [50_tpm_control_protocol.md](./50_tpm_control_protocol.md) 9. 저장소 강제 설정 체크리스트: [60_repo_enforcement_checklist.md](./60_repo_enforcement_checklist.md) 10. 메인 에이전트 아이디에이션 백로그: [70_main_agent_ideation.md](./70_main_agent_ideation.md) +11. v2/v3 구현 감사 및 수익률 분석: [80_implementation_audit.md](./80_implementation_audit.md) ## 운영 규칙 From ca5fa7376947f33b33045e8d3dd8ebc8bab76b70 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 13:21:15 +0900 Subject: [PATCH 032/109] docs: restructure audit docs and create loss recovery action plan (#331) - Clean up 80_implementation_audit.md: remove review history (6.1/6.2), extract SQL queries, condense data quality section - Create 85_loss_recovery_action_plan.md with 13 action items across 3 phases (Phase 1: stop bleeding, Phase 2: data integrity + v2, Phase 3: v3 session optimization) - Extract standard audit SQL queries to scripts/audit_queries.sql - Update docs/ouroboros/README.md with 85_ link - Create Gitea issues #318-#330 for all 13 action items Co-Authored-By: Claude Opus 4.6 --- docs/ouroboros/80_implementation_audit.md | 253 +---------- .../ouroboros/85_loss_recovery_action_plan.md | 392 ++++++++++++++++++ docs/ouroboros/README.md | 1 + scripts/audit_queries.sql | 184 ++++++++ 4 files changed, 599 insertions(+), 231 deletions(-) create mode 100644 docs/ouroboros/85_loss_recovery_action_plan.md create mode 100644 scripts/audit_queries.sql diff --git a/docs/ouroboros/80_implementation_audit.md b/docs/ouroboros/80_implementation_audit.md index 0ffefcd..cfe6e14 100644 --- a/docs/ouroboros/80_implementation_audit.md +++ b/docs/ouroboros/80_implementation_audit.md @@ -112,14 +112,13 @@ Updated: 2026-02-28 ### 3.2 일별 손익 -> 주의: 아래 초기 집계 건수는 DB 실측치와 일부 불일치 (02-26: 문서 10건 vs DB 14건, 02-27: 문서 21건 vs DB 22건). -> 정확한 재현은 3.8 표준 집계 SQL 참조. - -| 날짜 | 매도 수 (초기 집계) | 승 | 패 | 일간 손익 | -|------|---------------------|----|----|-----------| +| 날짜 | 매도 수 | 승 | 패 | 일간 손익 | +|------|---------|----|----|-----------| | 02-25 | 9 | 8 | 1 | +63.21 (USD, 미세 수익) | -| 02-26 | 10 | 5 | 5 | **-32,083.40** (KR 대량 손실) | -| 02-27 | 21 | 5 | 16 | **-20,461.11** (고빈도 매매, 대부분 손실) | +| 02-26 | 14 | 5 | 5 | **-32,083.40** (KR 대량 손실) | +| 02-27 | 22 | 5 | 16 | **-20,461.11** (고빈도 매매, 대부분 손실) | + +> 정확한 재현: `scripts/audit_queries.sql` 참조. ### 3.3 시장별 손익 @@ -185,208 +184,21 @@ Updated: 2026-02-28 결론적으로 USD 구간의 플러스 성과는 실질적으로 `CRCA` 이상치 1건 영향이 지배적이며, 해당 거래를 무결성 필터로 제외하면 USD 성과는 손실 구간으로 전환된다. -### 3.7 데이터 품질 이슈 및 집계 정의 +### 3.7 데이터 품질 이슈 요약 -#### 3.7.1 기간/건수 표기 (반영 완료) - -- 3.1에 UTC 기준 기간 명시 + SELL 45건(기간 외 1건 제외) 주석 추가. -- 3.2 일별 표에 DB 실측치 불일치 주의 문구 추가. 정확한 재현은 3.8 SQL 참조. - -#### 3.7.2 승률 정의 (반영 완료) - -- 종합 승률 39.1%(18/46): 0손익 포함 기준 — 3.1에 명시. -- KR 시장 승률 38.5%(5/13): 0손익 제외 기준 — 3.3에 명시. - -#### 3.7.3 startup-sync 중복 기록 - -- `BUY + [startup-sync]`가 76건 기록됨(동일 종목 반복 동기화 다수). -- `BUY price=0`도 38건 존재해, PnL 매칭 시 원가 기준이 흔들릴 여지가 큼. -- 성과 집계 시 `startup-sync`는 별도 레이어(초기 포지션 인식 이벤트)로 분리 저장 권장. -- 3.5에서 startup-sync 분리 집계 제공. - -#### 3.7.4 티커-거래소 드리프트 (ROOT-7 반영 완료) - -- 예: `CCUP/CRCA/FIGS/LLY` 등 동일 티커가 `US_AMEX/US_NASDAQ/US_NYSE`에 혼재 기록. -- 포지션 키를 `(ticker)`로만 쓰면 오매칭 위험 → ROOT-7으로 등재. - -#### 3.7.5 FX PnL 분리 항목 미활성 (1.2 반영 완료) - -- 스키마상 `strategy_pnl`, `fx_pnl` 컬럼이 있으나 SELL 전체 기준 `fx_pnl`은 전부 0. -- 1.2에서 REQ-V3-007 상태를 "⚠️ 코드 완료 / 운영 미반영"으로 변경. +- **startup-sync 중복**: BUY 76건 반복 동기화, price=0 38건 → PnL 매칭 왜곡 가능. 분리 집계는 3.5 참조. +- **티커-거래소 드리프트**: 동일 티커가 다중 거래소에 혼재 기록 → ROOT-7 참조. +- **FX PnL 미활성**: 스키마 존재, 운영 데이터 전부 0 → REQ-V3-007 참조. ### 3.8 표준 집계 SQL (재현용) -아래 SQL을 기준 쿼리로 고정하면, 성과표를 항상 같은 규칙으로 재생산할 수 있다. +성과표 재현을 위한 기준 쿼리는 [`scripts/audit_queries.sql`](../../scripts/audit_queries.sql)에 분리되어 있다. -```sql --- Base: 기간 + LIVE + SELL + 직전 BUY 메타 매칭 -WITH base AS ( - SELECT * - FROM trades - WHERE mode='live' - AND action='SELL' - AND timestamp >= '2026-02-25T00:00:00+00:00' - AND timestamp < '2026-02-28T00:00:00+00:00' -), -labeled AS ( - SELECT - s.id, - s.timestamp, - s.stock_code, - s.market, - s.exchange_code, - s.quantity AS sell_qty, - s.price AS sell_price, - s.pnl, - COALESCE(( - SELECT b.rationale - FROM trades b - WHERE b.mode='live' - AND b.action='BUY' - AND b.stock_code=s.stock_code - AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC - LIMIT 1 - ), '') AS prev_buy_rationale, - ( - SELECT b.quantity - FROM trades b - WHERE b.mode='live' - AND b.action='BUY' - AND b.stock_code=s.stock_code - AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC - LIMIT 1 - ) AS prev_buy_qty - FROM base s -) -SELECT * FROM labeled; -``` - -```sql --- Q1) 통화 분리 손익 (혼합 금지) -WITH base AS ( - SELECT * FROM trades - WHERE mode='live' AND action='SELL' - AND timestamp >= '2026-02-25T00:00:00+00:00' - AND timestamp < '2026-02-28T00:00:00+00:00' -), -labeled AS ( - SELECT s.*, - s.quantity AS sell_qty, - COALESCE((SELECT b.rationale FROM trades b - WHERE b.mode='live' AND b.action='BUY' - AND b.stock_code=s.stock_code AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, - (SELECT b.quantity FROM trades b - WHERE b.mode='live' AND b.action='BUY' - AND b.stock_code=s.stock_code AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty - FROM base s -) -SELECT - CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, - COUNT(*) AS sells, - ROUND(SUM(pnl),2) AS pnl_sum -FROM labeled -GROUP BY ccy -ORDER BY ccy; -``` - -```sql --- Q2) 기존 보유(startup-sync) 제외 성과 -WITH base AS ( - SELECT * FROM trades - WHERE mode='live' AND action='SELL' - AND timestamp >= '2026-02-25T00:00:00+00:00' - AND timestamp < '2026-02-28T00:00:00+00:00' -), -labeled AS ( - SELECT s.*, - s.quantity AS sell_qty, - COALESCE((SELECT b.rationale FROM trades b - WHERE b.mode='live' AND b.action='BUY' - AND b.stock_code=s.stock_code AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, - (SELECT b.quantity FROM trades b - WHERE b.mode='live' AND b.action='BUY' - AND b.stock_code=s.stock_code AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty - FROM base s -) -SELECT - CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, - COUNT(*) AS sells, - ROUND(SUM(pnl),2) AS pnl_sum -FROM labeled -WHERE prev_buy_rationale NOT LIKE '[startup-sync]%' -GROUP BY ccy -ORDER BY ccy; -``` - -```sql --- Q3) 수량 일치 체결만 포함(무결성 필터) -WITH base AS ( - SELECT * FROM trades - WHERE mode='live' AND action='SELL' - AND timestamp >= '2026-02-25T00:00:00+00:00' - AND timestamp < '2026-02-28T00:00:00+00:00' -), -labeled AS ( - SELECT s.*, - s.quantity AS sell_qty, - COALESCE((SELECT b.rationale FROM trades b - WHERE b.mode='live' AND b.action='BUY' - AND b.stock_code=s.stock_code AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, - (SELECT b.quantity FROM trades b - WHERE b.mode='live' AND b.action='BUY' - AND b.stock_code=s.stock_code AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty - FROM base s -) -SELECT - CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, - COUNT(*) AS sells, - ROUND(SUM(pnl),2) AS pnl_sum -FROM labeled -WHERE prev_buy_qty = sell_qty -GROUP BY ccy -ORDER BY ccy; -``` - -```sql --- Q4) 이상치 목록 (수량 불일치) -WITH base AS ( - SELECT * FROM trades - WHERE mode='live' AND action='SELL' - AND timestamp >= '2026-02-25T00:00:00+00:00' - AND timestamp < '2026-02-28T00:00:00+00:00' -), -labeled AS ( - SELECT s.id, s.timestamp, s.stock_code, s.market, s.quantity AS sell_qty, s.pnl, - (SELECT b.quantity FROM trades b - WHERE b.mode='live' AND b.action='BUY' - AND b.stock_code=s.stock_code AND b.market=s.market - AND b.timestamp < s.timestamp - ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty - FROM base s -) -SELECT - id, timestamp, stock_code, market, sell_qty, prev_buy_qty, ROUND(pnl,2) AS pnl -FROM labeled -WHERE prev_buy_qty IS NOT NULL - AND prev_buy_qty != sell_qty -ORDER BY ABS(pnl) DESC; -``` +- **Base**: 기간 + LIVE + SELL + 직전 BUY 메타 매칭 +- **Q1**: 통화 분리 손익 (KRW/USD 혼합 금지) +- **Q2**: 기존 보유(startup-sync) 제외 성과 +- **Q3**: 수량 일치 체결만 포함 (무결성 필터) +- **Q4**: 이상치 목록 (수량 불일치) --- @@ -519,6 +331,8 @@ Phase 3 (중기): v3 세션 최적화 - ✅ 블랙아웃 관리 (`test_blackout_manager.py`) - ✅ 주문 정책 저유동 거부 (`test_order_policy.py`) - ✅ FX 손익 분리 (`test_db.py`) +- ✅ 블랙아웃 복구 후 유효 intent 실행 (`tests/test_main.py:5811`) +- ✅ 블랙아웃 복구 후 정책 거부 intent 드롭 (`tests/test_main.py:5851`) ### 테스트 미존재 @@ -529,35 +343,12 @@ Phase 3 (중기): v3 세션 최적화 - ❌ 블랙아웃 복구 주문의 DB 기록 검증 - ❌ SELL PnL 계산 시 수량 불일치 케이스 -### 테스트 존재 (재점검으로 확인) +--- -- ✅ 블랙아웃 복구 후 유효 intent 실행 (`tests/test_main.py:5811`) -- ✅ 블랙아웃 복구 후 정책 거부 intent 드롭 (`tests/test_main.py:5851`) +## 7. 후속 문서 -### 6.1 재점검 반영 이력 (2026-02-28) - -아래 코멘트들은 코드 대조 검증 후 본문에 반영 완료됨: - -1. ROOT-5: “완전 미통합” → “부분 통합 + 실효성 부족”으로 정정 (본문 반영) -2. GAP-4: “재검증 없음” → “부분 해소 + DB 기록 미구현”으로 정정 (본문 반영) -3. 블랙아웃 복구 DB 미기록: GAP-4에 통합 + 개선 방안 5.2에 P0 추가 -4. SELL PnL buy_qty 버그: ROOT-6으로 신규 등재 (CRITICAL) -5. BUY 매칭 exchange_code 누락: ROOT-7로 신규 등재 (HIGH) -6. 경로 표기: `main.py` → `src/main.py`로 정규화 완료 -7. 테스트 섹션: 블랙아웃 복구 테스트 존재 확인, “테스트 존재 (재점검)” 항목으로 이동 - -### 6.2 정밀 검토 반영 이력 (2026-02-28) - -아래 코멘트들은 검증 후 본문에 반영 완료됨: - -1. 기간 기준 통일: 3.1에 UTC 기준 명시 + SELL 45건(기간 외 1건 제외) 주석 추가 -2. ROOT-1 ↔ ROOT-5 정합성: ROOT-1 문구를 “staged exit 호출되나 hard_stop 편향”으로 정정 -3. REQ-V3-007 2단 표기: “⚠️ 코드 완료 / 운영 미반영”으로 상태 변경 -4. ROOT-7 톤 조정: “잠재 오매칭 리스크”로 표현 변경, 확정 버그 → 구조 리스크로 재분류 -5. 3.6 USD 손익 표에 환산 KRW(가정 환율 1,450) + KRW 합산 참고값 병기 -6. 3.2 일별 표에 DB 실측치 불일치 주의 문구 추가 -7. 3.3 KR 승률에 “0손익 제외” 기준 명시 -8. 3.7 코멘트들을 세부 항목(3.7.1~3.7.5)으로 정리, 각 항목에 반영 상태 표기 +- **실행 계획**: [85_loss_recovery_action_plan.md](./85_loss_recovery_action_plan.md) — ROOT/GAP 해소를 위한 Phase별 작업 분해 및 Gitea 이슈 연결 +- **표준 집계 SQL**: [scripts/audit_queries.sql](../../scripts/audit_queries.sql) --- diff --git a/docs/ouroboros/85_loss_recovery_action_plan.md b/docs/ouroboros/85_loss_recovery_action_plan.md new file mode 100644 index 0000000..6004955 --- /dev/null +++ b/docs/ouroboros/85_loss_recovery_action_plan.md @@ -0,0 +1,392 @@ + + +# 손실 복구 실행 계획 + +작성일: 2026-02-28 +기반 문서: [80_implementation_audit.md](./80_implementation_audit.md) (ROOT 7개 + GAP 5개) + +--- + +## 1. 요약 + +### 1.1 목표 + +80_implementation_audit.md에서 식별된 7개 근본 원인(ROOT-1~7)과 5개 구현 갭(GAP-1~5)을 해소하여 실거래 손실 구간에서 탈출한다. + +### 1.2 성공 기준 (정량) + +| 지표 | 현재 | 목표 | +|------|------|------| +| KR 시장 승률 | 38.5% | >= 50% | +| 동일 종목 반복 매매 (일간) | 최대 4회 | <= 2회 | +| US 페니스탁($5 이하) 진입 | 무제한 | 0건 | +| SELL PnL 수량 불일치 건 | 존재 | 0건 | +| 블랙아웃 복구 주문 DB 누락 | 존재 | 0건 | +| session_id 누락 거래 로그 | 다수 | 0건 | +| 진화 전략 syntax 오류율 | 100% (확인된 3건 모두) | 0% | + +--- + +## 2. Phase별 작업 분해 + +### Phase 1: 즉시 — 손실 출혈 차단 + +가장 큰 손실 패턴(노이즈 손절, 반복 매매, 페니스탁)을 즉시 제거한다. + +--- + +#### ACT-01: KR 손절선 ATR 기반 동적 확대 + +- **ROOT 참조**: ROOT-1 (hard_stop_pct -2%가 KR 소형주 변동성 대비 과소) +- **Gitea 이슈**: feat: KR 손절선 ATR 기반 동적 확대 (-2% → ATR 적응형) +- **Gitea 이슈 번호**: #318 +- **변경 대상 파일**: `src/main.py`, `src/strategy/exit_rules.py`, `src/config.py` +- **현재 동작**: `hard_stop_pct = -2.0` 고정값으로 모든 시장에 동일 적용 +- **목표 동작**: KR 시장은 ATR(14) 기반 동적 손절선 적용. 최소 -2%, 최대 -7%, 기본값은 `k * ATR / entry_price * 100` (k=2.0) +- **수용 기준**: + - ATR 값이 존재할 때 동적 손절선이 계산됨 + - ATR 미제공 시 기존 -2% 폴백 + - KR 이외 시장은 기존 동작 유지 +- **테스트 계획**: + - 단위: ATR 기반 손절선 계산 로직 테스트 (경계값: ATR=0, ATR=극단값) + - 통합: 백테스트 파이프라인에서 KR 종목 손절 빈도 비교 +- **의존성**: 없음 + +--- + +#### ACT-02: 손절 후 동일 종목 재진입 쿨다운 + +- **ROOT 참조**: ROOT-2 (동일 종목 반복 매매) +- **Gitea 이슈**: feat: 손절 후 동일 종목 재진입 쿨다운 (1~2시간) +- **Gitea 이슈 번호**: #319 +- **변경 대상 파일**: `src/main.py`, `src/config.py` +- **현재 동작**: 손절 후 동일 종목 즉시 재매수 가능 +- **목표 동작**: 손절(SELL with pnl < 0) 후 동일 종목은 `COOLDOWN_MINUTES` (기본 120분) 동안 매수 차단 +- **수용 기준**: + - 손절 기록이 있는 종목에 대해 쿨다운 시간 내 BUY 시도 시 거부 + - 쿨다운 경과 후 정상 진입 허용 + - 익절(pnl >= 0)에는 쿨다운 미적용 +- **테스트 계획**: + - 단위: 쿨다운 시간 내/외 매수 시도 테스트 + - 통합: 229000 유사 패턴 백테스트 시나리오 +- **의존성**: 없음 + +--- + +#### ACT-03: US $5 이하 종목 진입 차단 필터 + +- **ROOT 참조**: ROOT-3 (미국 페니스탁 무분별 진입) +- **Gitea 이슈**: feat: US $5 이하 종목 진입 차단 필터 +- **Gitea 이슈 번호**: #320 +- **변경 대상 파일**: `src/main.py`, `src/config.py` +- **현재 동작**: 가격 제한 없이 모든 US 종목 진입 가능 +- **목표 동작**: US 시장 BUY 시 현재가 $5 이하이면 진입 차단. 임계값은 `US_MIN_PRICE` 환경변수로 설정 가능 +- **수용 기준**: + - $5 이하 종목 BUY 시도 시 거부 + 로그 기록 + - $5 초과 종목은 기존 동작 유지 + - KR 등 다른 시장에는 미적용 +- **테스트 계획**: + - 단위: 가격별 필터 동작 테스트 (경계값: $4.99, $5.00, $5.01) +- **의존성**: 없음 + +--- + +#### ACT-04: 진화 전략 코드 생성 시 syntax 검증 추가 + +- **ROOT 참조**: ROOT-4 (진화 전략 문법 오류) +- **Gitea 이슈**: fix: 진화 전략 코드 생성 시 syntax 검증 추가 +- **Gitea 이슈 번호**: #321 +- **변경 대상 파일**: `src/evolution/optimizer.py` +- **현재 동작**: 생성된 Python 코드를 검증 없이 파일로 저장 +- **목표 동작**: `ast.parse()` + `compile()` 로 syntax 검증 후 통과한 코드만 저장. 실패 시 로그 경고 + 기존 전략 유지 +- **수용 기준**: + - syntax 오류가 있는 코드는 저장되지 않음 + - 검증 실패 시 기존 전략으로 폴백 + - 검증 실패 로그가 기록됨 +- **테스트 계획**: + - 단위: 정상 코드/오류 코드 검증 테스트 + - 기존 `v20260227_*_evolved.py` 파일로 회귀 테스트 +- **의존성**: 없음 + +--- + +### Phase 2: 단기 — 데이터 정합성 + v2 실효화 + +손익 계산 정확도를 확보하고, v2 청산 로직을 실효화한다. + +--- + +#### ACT-05: SELL PnL 계산을 sell_qty 기준으로 수정 + +- **ROOT 참조**: ROOT-6 (CRITICAL — PnL 계산이 buy_qty 사용) +- **Gitea 이슈**: fix(critical): SELL PnL 계산을 sell_qty 기준으로 수정 +- **Gitea 이슈 번호**: #322 +- **변경 대상 파일**: `src/main.py` (line 1658-1663, 2755-2760) +- **현재 동작**: `trade_pnl = (trade_price - buy_price) * buy_qty` — 직전 BUY 수량 사용 +- **목표 동작**: `trade_pnl = (trade_price - buy_price) * sell_qty` — 실제 매도 수량 사용 +- **수용 기준**: + - 부분청산 시 매도 수량 기준 PnL 계산 + - 기존 전량 매도(buy_qty == sell_qty) 케이스는 동일 결과 + - CRCA 유사 이상치 재발 불가 +- **테스트 계획**: + - 단위: 전량 매도, 부분 매도, 수량 불일치 케이스별 PnL 검증 + - DB: Q4 쿼리(`scripts/audit_queries.sql`)로 이상치 0건 확인 +- **의존성**: 없음 + +--- + +#### ACT-06: BUY 매칭 키에 exchange_code 추가 + +- **ROOT 참조**: ROOT-7 (BUY 매칭 키에 exchange_code 미포함) +- **Gitea 이슈**: fix: BUY 매칭 키에 exchange_code 추가 +- **Gitea 이슈 번호**: #323 +- **변경 대상 파일**: `src/db.py` (line 292-313) +- **현재 동작**: `get_latest_buy_trade()`가 `(stock_code, market)`만으로 매칭 +- **목표 동작**: `exchange_code`가 존재할 때 매칭 키에 포함. NULL인 경우 기존 동작 유지 (하위 호환) +- **수용 기준**: + - 동일 티커 다중 거래소 기록 시 정확한 BUY 매칭 + - exchange_code가 NULL인 레거시 데이터에서도 정상 동작 +- **테스트 계획**: + - 단위: 동일 티커 다중 exchange 매칭 테스트 + - 단위: exchange_code NULL 하위 호환 테스트 +- **의존성**: 없음 + +--- + +#### ACT-07: 블랙아웃 복구 주문에 log_trade() 추가 + +- **ROOT 참조**: GAP-4 (블랙아웃 복구 주문 DB 미기록) +- **Gitea 이슈**: fix: 블랙아웃 복구 주문에 log_trade() 추가 +- **Gitea 이슈 번호**: #324 +- **변경 대상 파일**: `src/main.py` (line 694-791, 블랙아웃 복구 실행 경로) +- **현재 동작**: 블랙아웃 복구 주문이 실행되나 `log_trade()` 호출 없음 → DB에 기록 안 됨 +- **목표 동작**: 복구 주문 실행 후 `log_trade()` 호출하여 DB에 기록. rationale에 `[blackout-recovery]` prefix 추가 +- **수용 기준**: + - 블랙아웃 복구 주문이 trades 테이블에 기록됨 + - rationale로 복구 주문 식별 가능 + - 성과 리포트에 복구 주문 포함 +- **테스트 계획**: + - 단위: 복구 주문 실행 후 DB 기록 존재 확인 + - 통합: 블랙아웃 시나리오 end-to-end 테스트 +- **의존성**: 없음 + +--- + +#### ACT-08: v2 staged exit에 실제 피처 공급 + +- **ROOT 참조**: ROOT-5 (v2 청산 로직 실효성 부족) +- **Gitea 이슈**: feat: v2 staged exit에 실제 피처(ATR, pred_down_prob) 공급 +- **Gitea 이슈 번호**: #325 +- **변경 대상 파일**: `src/main.py` (line 500-583), `src/strategy/exit_rules.py`, `src/analysis/technical.py` +- **현재 동작**: `atr_value=0.0`, `pred_down_prob=0.0`으로 공급 → hard stop만 발동 +- **목표 동작**: + - `atr_value`: 보유 종목의 ATR(14) 실시간 계산하여 공급 + - `pred_down_prob`: 최소한 RSI 기반 하락 확률 추정값 공급 (추후 ML 모델로 대체 가능) + - `be_arm_pct`/`arm_pct`: 독립 파라미터로 설정 가능 (take_profit_pct * 0.4 기계적 파생 제거) +- **수용 기준**: + - `evaluate_exit()` 호출 시 atr_value > 0 (ATR 계산 가능한 종목) + - ATR trailing stop이 실제 발동 가능 + - be_arm_pct/arm_pct 독립 설정 가능 +- **테스트 계획**: + - 단위: 피처 공급 경로별 값 검증 + - 통합: 상태기계 전이 시나리오 (HOLDING→BE_LOCK→ARMED→EXITED) +- **의존성**: ACT-01 (ATR 계산 인프라 공유) + +--- + +#### ACT-09: session_id를 거래/의사결정 로그에 명시적 전달 + +- **ROOT 참조**: GAP-1 (DecisionLogger session_id 미포함), GAP-2 (log_trade session_id 미전달) +- **Gitea 이슈**: feat: session_id를 거래/의사결정 로그에 명시적 전달 +- **Gitea 이슈 번호**: #326 +- **변경 대상 파일**: `src/logging/decision_logger.py`, `src/main.py` (line 1625, 1682, 2769), `src/db.py` +- **현재 동작**: + - `log_decision()`: session_id 파라미터 없음 + - `log_trade()`: session_id 미전달, 시장 코드 기반 자동 추론에 의존 +- **목표 동작**: + - `log_decision()`: session_id 파라미터 추가, 로그에 기록 + - `log_trade()` 호출 시 런타임 session_id 명시적 전달 +- **수용 기준**: + - 모든 SELL/BUY 로그에 session_id 필드 존재 + - 의사결정 로그에 session_id 필드 존재 + - session_id가 실제 런타임 세션과 일치 +- **테스트 계획**: + - 단위: log_decision() session_id 캡처 테스트 + - 단위: log_trade() session_id 전달 테스트 +- **의존성**: 없음 + +--- + +### Phase 3: 중기 — v3 세션 최적화 + +세션 경계 처리와 운영 거버넌스를 강화한다. + +--- + +#### ACT-10: 세션 전환 시 리스크 파라미터 동적 재로딩 + +- **ROOT 참조**: GAP-3 (세션 전환 시 리스크 파라미터 재로딩 없음) +- **Gitea 이슈**: feat: 세션 전환 시 리스크 파라미터 동적 재로딩 +- **Gitea 이슈 번호**: #327 +- **변경 대상 파일**: `src/main.py`, `src/config.py` +- **현재 동작**: 리스크 파라미터가 시작 시 한 번만 로딩 +- **목표 동작**: 세션 경계 변경 이벤트 시 해당 세션의 리스크 파라미터를 재로딩. 세션별 프로파일 지원 +- **수용 기준**: + - NXT_AFTER → KRX_REG 전환 시 파라미터 재로딩 확인 + - 재로딩 이벤트 로그 기록 + - 재로딩 실패 시 기존 파라미터 유지 (안전 폴백) +- **테스트 계획**: + - 단위: 세션 전환 훅 콜백 테스트 + - 단위: 재로딩 실패 시 폴백 테스트 +- **의존성**: ACT-09 (session_id 인프라) + +--- + +#### ACT-11: 블랙아웃 복구 시 가격/세션 재검증 강화 + +- **ROOT 참조**: GAP-4 잔여 (가격 유효성, 세션 변경 재적용 미구현) +- **Gitea 이슈**: feat: 블랙아웃 복구 시 가격/세션 재검증 강화 +- **Gitea 이슈 번호**: #328 +- **변경 대상 파일**: `src/main.py` (line 694-791), `src/core/blackout_manager.py` +- **현재 동작**: stale BUY/SELL 드롭 + order_policy 검증만 수행 +- **목표 동작**: + - 복구 시 현재 시세 조회하여 가격 유효성 검증 (진입가 대비 급등/급락 시 드롭) + - 세션 변경 시 새 세션의 파라미터로 재검증 +- **수용 기준**: + - 블랙아웃 전후 가격 변동 > 임계값(예: 5%) 시 주문 드롭 + - 세션 변경 시 새 세션 파라미터로 재평가 +- **테스트 계획**: + - 단위: 가격 변동 시나리오별 드롭/실행 테스트 + - 통합: 블랙아웃 + 세션 전환 복합 시나리오 +- **의존성**: ACT-07 (복구 주문 DB 기록), ACT-10 (세션 파라미터 재로딩) + +--- + +#### ACT-12: Triple Barrier 시간장벽을 캘린더 시간(분) 기반으로 전환 + +- **ROOT 참조**: GAP-5 (시간장벽이 봉 개수 고정) +- **Gitea 이슈**: feat: Triple Barrier 시간장벽을 캘린더 시간(분) 기반으로 전환 +- **Gitea 이슈 번호**: #329 +- **변경 대상 파일**: `src/analysis/triple_barrier.py` +- **현재 동작**: `max_holding_bars` (고정 봉 수) 사용 +- **목표 동작**: `max_holding_minutes` (캘린더 시간) 기반으로 전환. 봉 주기와 무관하게 일정 시간 경과 시 장벽 도달 +- **수용 기준**: + - 분 단위 시간장벽이 봉 주기 변경에도 일관 동작 + - 기존 max_holding_bars 하위 호환 (deprecated 경고) +- **테스트 계획**: + - 단위: 다양한 봉 주기(1분, 5분, 15분)에서 시간장벽 일관성 테스트 + - 기존 triple_barrier 테스트 회귀 확인 +- **의존성**: 없음 + +--- + +#### ACT-13: CI 자동 검증 (정책 레지스트리 + TASK-REQ 매핑) + +- **ROOT 참조**: REQ-OPS-002 (정책 변경 시 레지스트리 업데이트 강제), REQ-OPS-003 (TASK-REQ 매핑 강제) +- **Gitea 이슈**: infra: CI 자동 검증 (정책 레지스트리 + TASK-REQ 매핑) +- **Gitea 이슈 번호**: #330 +- **변경 대상 파일**: `.gitea/workflows/`, `scripts/validate_governance_assets.py` +- **현재 동작**: CI 자동 검증 없음. 문서 검증은 수동 실행 +- **목표 동작**: + - PR 시 정책 레지스트리(`01_requirements_registry.md`) 변경 여부 자동 검증 + - TASK/이슈가 REQ-ID를 참조하는지 자동 검증 +- **수용 기준**: + - 정책 파일 변경 시 레지스트리 미업데이트면 CI 실패 + - 새 이슈/PR에 REQ-ID 미참조 시 경고 +- **테스트 계획**: + - CI 파이프라인 자체 테스트 (정상/실패 케이스) +- **의존성**: 없음 + +--- + +## 3. 검증 계획 + +### 3.1 단위 테스트 + +- 모든 ACT 항목에 대해 개별 테스트 작성 +- 커버리지 >= 80% 유지 +- 기존 551개 테스트 전체 통과 확인 + +### 3.2 통합 테스트 + +- 백테스트 파이프라인: Phase 1 적용 전후 KR 시장 손절 빈도, 반복 매매 횟수, 승률 비교 +- 상태기계 통합: Phase 2 피처 공급 후 4중 청산 로직 end-to-end 시나리오 +- 블랙아웃 복합: Phase 3 세션 전환 + 블랙아웃 복구 시나리오 + +### 3.3 실환경 검증 + +- Paper trading은 실환경과 괴리가 커 검증 신뢰도 부족 → **소액 live 운용**으로 검증 +- Phase별 투입 기준: 단위/통합 테스트 통과 → 소액 live (1~2일) → 모니터링 → 정상 확인 후 본운용 + +--- + +## 4. 의존성 그래프 + +``` +Phase 1 (병렬 실행 가능) + ACT-01 #318 ─┐ + ACT-02 #319 │ (모두 독립) + ACT-03 #320 │ + ACT-04 #321 ─┘ + +Phase 2 + ACT-05 #322 ─┐ + ACT-06 #323 │ (대부분 독립) + ACT-07 #324 │ + ACT-09 #326 ─┘ + ACT-08 #325 ←── ACT-01 #318 (ATR 인프라 공유) + +Phase 3 + ACT-10 #327 ←── ACT-09 #326 (session_id 인프라) + ACT-11 #328 ←── ACT-07 #324, ACT-10 #327 + ACT-12 #329 (독립) + ACT-13 #330 (독립) +``` + +### Phase 간 관계 + +- Phase 1 → Phase 2: Phase 1 완료가 Phase 2의 전제 조건은 아니나, Phase 1로 출혈 차단 후 Phase 2 진행 권장 +- Phase 2 → Phase 3: ACT-09(session_id)가 ACT-10(세션 재로딩)의 전제, ACT-07+ACT-10이 ACT-11의 전제 + +--- + +## 5. 롤백 계획 + +### Phase 1 롤백 + +- 각 ACT는 독립적이므로 개별 revert 가능 +- 손절선(ACT-01): 기존 -2% 고정값으로 복원 +- 쿨다운(ACT-02): 쿨다운 체크 제거 +- 가격 필터(ACT-03): 필터 조건 제거 +- syntax 검증(ACT-04): 검증 스킵, 기존 저장 로직 복원 + +### Phase 2 롤백 + +- PnL 수정(ACT-05): buy_qty 기준으로 복원 (단, 데이터 정합성 후퇴 감수) +- exchange_code(ACT-06): 매칭 키에서 제거 +- 블랙아웃 DB(ACT-07): log_trade() 호출 제거 +- 피처 공급(ACT-08): 0.0 공급으로 복원 +- session_id(ACT-09): 파라미터 제거, 자동 추론 복원 + +### Phase 3 롤백 + +- 세션 재로딩(ACT-10): 시작 시 1회 로딩으로 복원 +- 블랙아웃 재검증(ACT-11): 기존 stale 드롭만 유지 +- 시간장벽(ACT-12): max_holding_bars로 복원 +- CI(ACT-13): CI 워크플로우 제거 + +### 롤백 절차 + +1. 해당 ACT의 PR branch에서 `git revert` 수행 +2. 기존 테스트 전체 통과 확인 +3. 실환경 투입 전 소액 live 검증 + +--- + +*끝.* diff --git a/docs/ouroboros/README.md b/docs/ouroboros/README.md index 4605605..6e53e6c 100644 --- a/docs/ouroboros/README.md +++ b/docs/ouroboros/README.md @@ -23,6 +23,7 @@ Updated: 2026-02-26 9. 저장소 강제 설정 체크리스트: [60_repo_enforcement_checklist.md](./60_repo_enforcement_checklist.md) 10. 메인 에이전트 아이디에이션 백로그: [70_main_agent_ideation.md](./70_main_agent_ideation.md) 11. v2/v3 구현 감사 및 수익률 분석: [80_implementation_audit.md](./80_implementation_audit.md) +12. 손실 복구 실행 계획: [85_loss_recovery_action_plan.md](./85_loss_recovery_action_plan.md) ## 운영 규칙 diff --git a/scripts/audit_queries.sql b/scripts/audit_queries.sql new file mode 100644 index 0000000..15bc8eb --- /dev/null +++ b/scripts/audit_queries.sql @@ -0,0 +1,184 @@ +-- audit_queries.sql +-- 용도: 80_implementation_audit.md 성과표 재현을 위한 표준 집계 SQL +-- 대상 DB: trading.db (SQLite) +-- 기간: 2026-02-25 ~ 2026-02-28 (UTC) +-- 참조: docs/ouroboros/80_implementation_audit.md Section 3 + +------------------------------------------------------------------------ +-- Base: 기간 + LIVE + SELL + 직전 BUY 메타 매칭 +------------------------------------------------------------------------ +-- 모든 후속 쿼리의 기반이 되는 CTE. +-- prev_buy_rationale: 직전 BUY의 rationale (startup-sync 분류용) +-- prev_buy_qty: 직전 BUY 수량 (수량 일치 무결성 필터용) +------------------------------------------------------------------------ + +WITH base AS ( + SELECT * + FROM trades + WHERE mode='live' + AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT + s.id, + s.timestamp, + s.stock_code, + s.market, + s.exchange_code, + s.quantity AS sell_qty, + s.price AS sell_price, + s.pnl, + COALESCE(( + SELECT b.rationale + FROM trades b + WHERE b.mode='live' + AND b.action='BUY' + AND b.stock_code=s.stock_code + AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC + LIMIT 1 + ), '') AS prev_buy_rationale, + ( + SELECT b.quantity + FROM trades b + WHERE b.mode='live' + AND b.action='BUY' + AND b.stock_code=s.stock_code + AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC + LIMIT 1 + ) AS prev_buy_qty + FROM base s +) +SELECT * FROM labeled; + +------------------------------------------------------------------------ +-- Q1) 통화 분리 손익 (KRW/USD 혼합 금지) +------------------------------------------------------------------------ + +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.*, + s.quantity AS sell_qty, + COALESCE((SELECT b.rationale FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, + COUNT(*) AS sells, + ROUND(SUM(pnl),2) AS pnl_sum +FROM labeled +GROUP BY ccy +ORDER BY ccy; + +------------------------------------------------------------------------ +-- Q2) 기존 보유(startup-sync) 제외 성과 +------------------------------------------------------------------------ + +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.*, + s.quantity AS sell_qty, + COALESCE((SELECT b.rationale FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, + COUNT(*) AS sells, + ROUND(SUM(pnl),2) AS pnl_sum +FROM labeled +WHERE prev_buy_rationale NOT LIKE '[startup-sync]%' +GROUP BY ccy +ORDER BY ccy; + +------------------------------------------------------------------------ +-- Q3) 수량 일치 체결만 포함 (무결성 필터) +------------------------------------------------------------------------ + +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.*, + s.quantity AS sell_qty, + COALESCE((SELECT b.rationale FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1), '') AS prev_buy_rationale, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + CASE WHEN market='KR' THEN 'KRW' ELSE 'USD' END AS ccy, + COUNT(*) AS sells, + ROUND(SUM(pnl),2) AS pnl_sum +FROM labeled +WHERE prev_buy_qty = sell_qty +GROUP BY ccy +ORDER BY ccy; + +------------------------------------------------------------------------ +-- Q4) 이상치 목록 (수량 불일치) +------------------------------------------------------------------------ + +WITH base AS ( + SELECT * FROM trades + WHERE mode='live' AND action='SELL' + AND timestamp >= '2026-02-25T00:00:00+00:00' + AND timestamp < '2026-02-28T00:00:00+00:00' +), +labeled AS ( + SELECT s.id, s.timestamp, s.stock_code, s.market, s.quantity AS sell_qty, s.pnl, + (SELECT b.quantity FROM trades b + WHERE b.mode='live' AND b.action='BUY' + AND b.stock_code=s.stock_code AND b.market=s.market + AND b.timestamp < s.timestamp + ORDER BY b.timestamp DESC, b.id DESC LIMIT 1) AS prev_buy_qty + FROM base s +) +SELECT + id, timestamp, stock_code, market, sell_qty, prev_buy_qty, ROUND(pnl,2) AS pnl +FROM labeled +WHERE prev_buy_qty IS NOT NULL + AND prev_buy_qty != sell_qty +ORDER BY ABS(pnl) DESC; From 2f3b2149d5f6c3a86e4b25d097060179b772e38d Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:35:35 +0900 Subject: [PATCH 033/109] fix: add syntax guard for evolved strategy generation (#321) --- src/evolution/optimizer.py | 39 +++++++++++++++++++------------- tests/test_evolution.py | 46 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+), 15 deletions(-) diff --git a/src/evolution/optimizer.py b/src/evolution/optimizer.py index bd4a99b..c9ef719 100644 --- a/src/evolution/optimizer.py +++ b/src/evolution/optimizer.py @@ -9,6 +9,7 @@ This module: from __future__ import annotations +import ast import json import logging import sqlite3 @@ -28,24 +29,24 @@ from src.logging.decision_logger import DecisionLogger logger = logging.getLogger(__name__) STRATEGIES_DIR = Path("src/strategies") -STRATEGY_TEMPLATE = textwrap.dedent("""\ - \"\"\"Auto-generated strategy: {name} +STRATEGY_TEMPLATE = """\ +\"\"\"Auto-generated strategy: {name} - Generated at: {timestamp} - Rationale: {rationale} - \"\"\" +Generated at: {timestamp} +Rationale: {rationale} +\"\"\" - from __future__ import annotations - from typing import Any - from src.strategies.base import BaseStrategy +from __future__ import annotations +from typing import Any +from src.strategies.base import BaseStrategy - class {class_name}(BaseStrategy): - \"\"\"Strategy: {name}\"\"\" +class {class_name}(BaseStrategy): + \"\"\"Strategy: {name}\"\"\" - def evaluate(self, market_data: dict[str, Any]) -> dict[str, Any]: - {body} -""") + def evaluate(self, market_data: dict[str, Any]) -> dict[str, Any]: +{body} +""" class EvolutionOptimizer: @@ -235,7 +236,8 @@ class EvolutionOptimizer: file_path = STRATEGIES_DIR / file_name # Indent the body for the class method - indented_body = textwrap.indent(body, " ") + normalized_body = textwrap.dedent(body).strip() + indented_body = textwrap.indent(normalized_body, " ") # Generate rationale from patterns rationale = f"Auto-evolved from {len(failures)} failures. " @@ -247,9 +249,16 @@ class EvolutionOptimizer: timestamp=datetime.now(UTC).isoformat(), rationale=rationale, class_name=class_name, - body=indented_body.strip(), + body=indented_body.rstrip(), ) + try: + parsed = ast.parse(content, filename=str(file_path)) + compile(parsed, filename=str(file_path), mode="exec") + except SyntaxError as exc: + logger.warning("Generated strategy failed syntax validation: %s", exc) + return None + file_path.write_text(content) logger.info("Generated strategy file: %s", file_path) return file_path diff --git a/tests/test_evolution.py b/tests/test_evolution.py index 3b10ef1..d5ad349 100644 --- a/tests/test_evolution.py +++ b/tests/test_evolution.py @@ -245,6 +245,52 @@ async def test_generate_strategy_creates_file(optimizer: EvolutionOptimizer, tmp assert "def evaluate" in strategy_path.read_text() +@pytest.mark.asyncio +async def test_generate_strategy_saves_valid_python_code( + optimizer: EvolutionOptimizer, tmp_path: Path, +) -> None: + """Test that syntactically valid generated code is saved.""" + failures = [{"decision_id": "1", "timestamp": "2024-01-15T09:30:00+00:00"}] + + mock_response = Mock() + mock_response.text = ( + 'price = market_data.get("current_price", 0)\n' + 'if price > 0:\n' + ' return {"action": "BUY", "confidence": 80, "rationale": "Positive price"}\n' + 'return {"action": "HOLD", "confidence": 50, "rationale": "No signal"}\n' + ) + + with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)): + with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path): + strategy_path = await optimizer.generate_strategy(failures) + + assert strategy_path is not None + assert strategy_path.exists() + + +@pytest.mark.asyncio +async def test_generate_strategy_blocks_invalid_python_code( + optimizer: EvolutionOptimizer, tmp_path: Path, caplog: pytest.LogCaptureFixture, +) -> None: + """Test that syntactically invalid generated code is not saved.""" + failures = [{"decision_id": "1", "timestamp": "2024-01-15T09:30:00+00:00"}] + + mock_response = Mock() + mock_response.text = ( + 'if market_data.get("current_price", 0) > 0\n' + ' return {"action": "BUY", "confidence": 80, "rationale": "broken"}\n' + ) + + with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)): + with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path): + with caplog.at_level("WARNING"): + strategy_path = await optimizer.generate_strategy(failures) + + assert strategy_path is None + assert list(tmp_path.glob("*.py")) == [] + assert "failed syntax validation" in caplog.text + + @pytest.mark.asyncio async def test_generate_strategy_handles_api_error(optimizer: EvolutionOptimizer) -> None: """Test that generate_strategy handles Gemini API errors gracefully.""" From c641097fe7718b280c5742ef6d45bb7249ea2751 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:35:55 +0900 Subject: [PATCH 034/109] feat: support minute-based triple barrier horizon (#329) --- src/analysis/triple_barrier.py | 44 +++++++++++++++++++++++----- tests/test_triple_barrier.py | 53 ++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 8 deletions(-) diff --git a/src/analysis/triple_barrier.py b/src/analysis/triple_barrier.py index f609496..793250d 100644 --- a/src/analysis/triple_barrier.py +++ b/src/analysis/triple_barrier.py @@ -5,7 +5,9 @@ Implements first-touch labeling with upper/lower/time barriers. from __future__ import annotations +import warnings from dataclasses import dataclass +from datetime import datetime, timedelta from typing import Literal, Sequence @@ -16,9 +18,18 @@ TieBreakMode = Literal["stop_first", "take_first"] class TripleBarrierSpec: take_profit_pct: float stop_loss_pct: float - max_holding_bars: int + max_holding_bars: int | None = None + max_holding_minutes: int | None = None tie_break: TieBreakMode = "stop_first" + def __post_init__(self) -> None: + if self.max_holding_minutes is None and self.max_holding_bars is None: + raise ValueError("one of max_holding_minutes or max_holding_bars must be set") + if self.max_holding_minutes is not None and self.max_holding_minutes <= 0: + raise ValueError("max_holding_minutes must be positive") + if self.max_holding_bars is not None and self.max_holding_bars <= 0: + raise ValueError("max_holding_bars must be positive") + @dataclass(frozen=True) class TripleBarrierLabel: @@ -35,6 +46,7 @@ def label_with_triple_barrier( highs: Sequence[float], lows: Sequence[float], closes: Sequence[float], + timestamps: Sequence[datetime] | None = None, entry_index: int, side: int, spec: TripleBarrierSpec, @@ -53,8 +65,6 @@ def label_with_triple_barrier( raise ValueError("highs, lows, closes lengths must match") if entry_index < 0 or entry_index >= len(closes): raise IndexError("entry_index out of range") - if spec.max_holding_bars <= 0: - raise ValueError("max_holding_bars must be positive") entry_price = float(closes[entry_index]) if entry_price <= 0: @@ -68,13 +78,31 @@ def label_with_triple_barrier( upper = entry_price * (1.0 + spec.stop_loss_pct) lower = entry_price * (1.0 - spec.take_profit_pct) - last_index = min(len(closes) - 1, entry_index + spec.max_holding_bars) + if spec.max_holding_minutes is not None: + if timestamps is None: + raise ValueError("timestamps are required when max_holding_minutes is set") + if len(timestamps) != len(closes): + raise ValueError("timestamps length must match OHLC lengths") + expiry_timestamp = timestamps[entry_index] + timedelta(minutes=spec.max_holding_minutes) + last_index = entry_index + for idx in range(entry_index + 1, len(closes)): + if timestamps[idx] > expiry_timestamp: + break + last_index = idx + else: + assert spec.max_holding_bars is not None + warnings.warn( + "TripleBarrierSpec.max_holding_bars is deprecated; use max_holding_minutes with timestamps instead.", + DeprecationWarning, + stacklevel=2, + ) + last_index = min(len(closes) - 1, entry_index + spec.max_holding_bars) for idx in range(entry_index + 1, last_index + 1): - h = float(highs[idx]) - l = float(lows[idx]) + high_price = float(highs[idx]) + low_price = float(lows[idx]) - up_touch = h >= upper - down_touch = l <= lower + up_touch = high_price >= upper + down_touch = low_price <= lower if not up_touch and not down_touch: continue diff --git a/tests/test_triple_barrier.py b/tests/test_triple_barrier.py index 1fff8e3..ba82a5e 100644 --- a/tests/test_triple_barrier.py +++ b/tests/test_triple_barrier.py @@ -1,5 +1,9 @@ from __future__ import annotations +from datetime import UTC, datetime, timedelta + +import pytest + from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier @@ -129,3 +133,52 @@ def test_short_tie_break_modes() -> None: ) assert out_take.label == 1 assert out_take.touched == "take_profit" + + +def test_minutes_time_barrier_consistent_across_sampling() -> None: + base = datetime(2026, 2, 28, 9, 0, tzinfo=UTC) + highs = [100.0, 100.5, 100.6, 100.4] + lows = [100.0, 99.6, 99.4, 99.5] + closes = [100.0, 100.1, 100.0, 100.0] + spec = TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.02, + max_holding_minutes=5, + ) + + out_1m = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + timestamps=[base + timedelta(minutes=i) for i in range(4)], + entry_index=0, + side=1, + spec=spec, + ) + out_5m = label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + timestamps=[base + timedelta(minutes=5 * i) for i in range(4)], + entry_index=0, + side=1, + spec=spec, + ) + assert out_1m.touch_bar == 3 + assert out_5m.touch_bar == 1 + + +def test_bars_mode_emits_deprecation_warning() -> None: + highs = [100, 101, 103] + lows = [100, 99.6, 100] + closes = [100, 100, 102] + spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3) + with pytest.deprecated_call(match="max_holding_bars is deprecated"): + label_with_triple_barrier( + highs=highs, + lows=lows, + closes=closes, + entry_index=0, + side=1, + spec=spec, + ) From 2e394cd17c52fae3d86798da4951d1dff147d8b0 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:36:05 +0900 Subject: [PATCH 035/109] infra: enforce governance registry sync checks in CI (#330) --- .gitea/workflows/ci.yml | 15 ++++++- scripts/validate_governance_assets.py | 61 +++++++++++++++++++++++++++ 2 files changed, 75 insertions(+), 1 deletion(-) diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml index f73be37..39fb10d 100644 --- a/.gitea/workflows/ci.yml +++ b/.gitea/workflows/ci.yml @@ -13,6 +13,8 @@ jobs: steps: - name: Checkout uses: actions/checkout@v4 + with: + fetch-depth: 0 - name: Set up Python uses: actions/setup-python@v5 @@ -26,7 +28,18 @@ jobs: run: python3 scripts/session_handover_check.py --strict - name: Validate governance assets - run: python3 scripts/validate_governance_assets.py + run: | + RANGE="" + if [ "${{ github.event_name }}" = "pull_request" ] && [ -n "${{ github.event.pull_request.base.sha }}" ]; then + RANGE="${{ github.event.pull_request.base.sha }}...${{ github.sha }}" + elif [ -n "${{ github.event.before }}" ] && [ "${{ github.event.before }}" != "0000000000000000000000000000000000000000" ]; then + RANGE="${{ github.event.before }}...${{ github.sha }}" + fi + if [ -n "$RANGE" ]; then + python3 scripts/validate_governance_assets.py "$RANGE" + else + python3 scripts/validate_governance_assets.py + fi - name: Validate Ouroboros docs run: python3 scripts/validate_ouroboros_docs.py diff --git a/scripts/validate_governance_assets.py b/scripts/validate_governance_assets.py index 80680d7..82e94d4 100644 --- a/scripts/validate_governance_assets.py +++ b/scripts/validate_governance_assets.py @@ -3,9 +3,12 @@ from __future__ import annotations +import subprocess import sys from pathlib import Path +REQUIREMENTS_REGISTRY = "docs/ouroboros/01_requirements_registry.md" + def must_contain(path: Path, required: list[str], errors: list[str]) -> None: if not path.exists(): @@ -17,8 +20,64 @@ def must_contain(path: Path, required: list[str], errors: list[str]) -> None: errors.append(f"{path}: missing required token -> {token}") +def normalize_changed_path(path: str) -> str: + normalized = path.strip().replace("\\", "/") + if normalized.startswith("./"): + normalized = normalized[2:] + return normalized + + +def is_policy_file(path: str) -> bool: + normalized = normalize_changed_path(path) + if not normalized.endswith(".md"): + return False + if not normalized.startswith("docs/ouroboros/"): + return False + return normalized != REQUIREMENTS_REGISTRY + + +def load_changed_files(args: list[str], errors: list[str]) -> list[str]: + if not args: + return [] + + # Single range input (e.g. BASE..HEAD or BASE...HEAD) + if len(args) == 1 and ".." in args[0]: + range_spec = args[0] + try: + completed = subprocess.run( + ["git", "diff", "--name-only", range_spec], + check=True, + capture_output=True, + text=True, + ) + except (subprocess.CalledProcessError, FileNotFoundError) as exc: + errors.append(f"failed to load changed files from range '{range_spec}': {exc}") + return [] + return [ + normalize_changed_path(line) + for line in completed.stdout.splitlines() + if line.strip() + ] + + return [normalize_changed_path(path) for path in args if path.strip()] + + +def validate_registry_sync(changed_files: list[str], errors: list[str]) -> None: + if not changed_files: + return + + changed_set = set(changed_files) + policy_changed = any(is_policy_file(path) for path in changed_set) + registry_changed = REQUIREMENTS_REGISTRY in changed_set + if policy_changed and not registry_changed: + errors.append( + "policy file changed without updating docs/ouroboros/01_requirements_registry.md" + ) + + def main() -> int: errors: list[str] = [] + changed_files = load_changed_files(sys.argv[1:], errors) pr_template = Path(".gitea/PULL_REQUEST_TEMPLATE.md") issue_template = Path(".gitea/ISSUE_TEMPLATE/runtime_verification.md") @@ -81,6 +140,8 @@ def main() -> int: if not handover_script.exists(): errors.append(f"missing file: {handover_script}") + validate_registry_sync(changed_files, errors) + if errors: print("[FAIL] governance asset validation failed") for err in errors: From 11b9ad126f8988603ff46c7cda3973f2872c5f2a Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:37:32 +0900 Subject: [PATCH 036/109] feat: propagate runtime session_id across decision and trade logs (#326) --- src/db.py | 22 +++++++++++++++ src/logging/decision_logger.py | 34 +++++++++++++---------- src/main.py | 8 ++++++ tests/test_db.py | 50 ++++++++++++++++++++++++++++++++++ tests/test_decision_logger.py | 23 +++++++++++++++- 5 files changed, 122 insertions(+), 15 deletions(-) diff --git a/src/db.py b/src/db.py index 4a0c9f0..0839521 100644 --- a/src/db.py +++ b/src/db.py @@ -109,6 +109,7 @@ def init_db(db_path: str) -> sqlite3.Connection: stock_code TEXT NOT NULL, market TEXT NOT NULL, exchange_code TEXT NOT NULL, + session_id TEXT DEFAULT 'UNKNOWN', action TEXT NOT NULL, confidence INTEGER NOT NULL, rationale TEXT NOT NULL, @@ -121,6 +122,27 @@ def init_db(db_path: str) -> sqlite3.Connection: ) """ ) + decision_columns = { + row[1] + for row in conn.execute("PRAGMA table_info(decision_logs)").fetchall() + } + if "session_id" not in decision_columns: + conn.execute("ALTER TABLE decision_logs ADD COLUMN session_id TEXT DEFAULT 'UNKNOWN'") + conn.execute( + """ + UPDATE decision_logs + SET session_id = 'UNKNOWN' + WHERE session_id IS NULL OR session_id = '' + """ + ) + if "outcome_pnl" not in decision_columns: + conn.execute("ALTER TABLE decision_logs ADD COLUMN outcome_pnl REAL") + if "outcome_accuracy" not in decision_columns: + conn.execute("ALTER TABLE decision_logs ADD COLUMN outcome_accuracy INTEGER") + if "reviewed" not in decision_columns: + conn.execute("ALTER TABLE decision_logs ADD COLUMN reviewed INTEGER DEFAULT 0") + if "review_notes" not in decision_columns: + conn.execute("ALTER TABLE decision_logs ADD COLUMN review_notes TEXT") conn.execute( """ diff --git a/src/logging/decision_logger.py b/src/logging/decision_logger.py index b2f52a5..cd19b28 100644 --- a/src/logging/decision_logger.py +++ b/src/logging/decision_logger.py @@ -19,6 +19,7 @@ class DecisionLog: stock_code: str market: str exchange_code: str + session_id: str action: str confidence: int rationale: str @@ -47,6 +48,7 @@ class DecisionLogger: rationale: str, context_snapshot: dict[str, Any], input_data: dict[str, Any], + session_id: str | None = None, ) -> str: """Log a trading decision with full context. @@ -59,20 +61,22 @@ class DecisionLogger: rationale: Reasoning for the decision context_snapshot: L1-L7 context snapshot at decision time input_data: Market data inputs (price, volume, orderbook, etc.) + session_id: Runtime session identifier Returns: decision_id: Unique identifier for this decision """ decision_id = str(uuid.uuid4()) timestamp = datetime.now(UTC).isoformat() + resolved_session = session_id or "UNKNOWN" self.conn.execute( """ INSERT INTO decision_logs ( decision_id, timestamp, stock_code, market, exchange_code, - action, confidence, rationale, context_snapshot, input_data + session_id, action, confidence, rationale, context_snapshot, input_data ) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) """, ( decision_id, @@ -80,6 +84,7 @@ class DecisionLogger: stock_code, market, exchange_code, + resolved_session, action, confidence, rationale, @@ -106,7 +111,7 @@ class DecisionLogger: query = """ SELECT decision_id, timestamp, stock_code, market, exchange_code, - action, confidence, rationale, context_snapshot, input_data, + session_id, action, confidence, rationale, context_snapshot, input_data, outcome_pnl, outcome_accuracy, reviewed, review_notes FROM decision_logs WHERE reviewed = 0 AND confidence >= ? @@ -168,7 +173,7 @@ class DecisionLogger: """ SELECT decision_id, timestamp, stock_code, market, exchange_code, - action, confidence, rationale, context_snapshot, input_data, + session_id, action, confidence, rationale, context_snapshot, input_data, outcome_pnl, outcome_accuracy, reviewed, review_notes FROM decision_logs WHERE decision_id = ? @@ -196,7 +201,7 @@ class DecisionLogger: """ SELECT decision_id, timestamp, stock_code, market, exchange_code, - action, confidence, rationale, context_snapshot, input_data, + session_id, action, confidence, rationale, context_snapshot, input_data, outcome_pnl, outcome_accuracy, reviewed, review_notes FROM decision_logs WHERE confidence >= ? @@ -223,13 +228,14 @@ class DecisionLogger: stock_code=row[2], market=row[3], exchange_code=row[4], - action=row[5], - confidence=row[6], - rationale=row[7], - context_snapshot=json.loads(row[8]), - input_data=json.loads(row[9]), - outcome_pnl=row[10], - outcome_accuracy=row[11], - reviewed=bool(row[12]), - review_notes=row[13], + session_id=row[5] or "UNKNOWN", + action=row[6], + confidence=row[7], + rationale=row[8], + context_snapshot=json.loads(row[9]), + input_data=json.loads(row[10]), + outcome_pnl=row[11], + outcome_accuracy=row[12], + reviewed=bool(row[13]), + review_notes=row[14], ) diff --git a/src/main.py b/src/main.py index cc158a2..97f9fd8 100644 --- a/src/main.py +++ b/src/main.py @@ -217,6 +217,7 @@ async def sync_positions_from_broker( price=avg_price, market=log_market, exchange_code=market.exchange_code, + session_id=get_session_info(market).session_id, mode=settings.MODE, ) logger.info( @@ -1368,10 +1369,12 @@ async def trading_cycle( "pnl_pct": pnl_pct, } + runtime_session_id = get_session_info(market).session_id decision_id = decision_logger.log_decision( stock_code=stock_code, market=market.code, exchange_code=market.exchange_code, + session_id=runtime_session_id, action=decision.action, confidence=decision.confidence, rationale=decision.rationale, @@ -1636,6 +1639,7 @@ async def trading_cycle( pnl=0.0, market=market.code, exchange_code=market.exchange_code, + session_id=runtime_session_id, mode=settings.MODE if settings else "paper", ) logger.info("Order result: %s", result.get("msg1", "OK")) @@ -1690,6 +1694,7 @@ async def trading_cycle( pnl=trade_pnl, market=market.code, exchange_code=market.exchange_code, + session_id=runtime_session_id, selection_context=selection_context, decision_id=decision_id, mode=settings.MODE if settings else "paper", @@ -2497,10 +2502,12 @@ async def run_daily_session( "pnl_pct": pnl_pct, } + runtime_session_id = get_session_info(market).session_id decision_id = decision_logger.log_decision( stock_code=stock_code, market=market.code, exchange_code=market.exchange_code, + session_id=runtime_session_id, action=decision.action, confidence=decision.confidence, rationale=decision.rationale, @@ -2777,6 +2784,7 @@ async def run_daily_session( pnl=trade_pnl, market=market.code, exchange_code=market.exchange_code, + session_id=runtime_session_id, decision_id=decision_id, mode=settings.MODE, ) diff --git a/tests/test_db.py b/tests/test_db.py index bbd600e..9bd190d 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -329,3 +329,53 @@ def test_log_trade_unknown_market_falls_back_to_unknown_session() -> None: row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone() assert row is not None assert row[0] == "UNKNOWN" + + +def test_decision_logs_session_id_migration_backfills_unknown() -> None: + import sqlite3 + + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f: + db_path = f.name + try: + old_conn = sqlite3.connect(db_path) + old_conn.execute( + """ + CREATE TABLE decision_logs ( + decision_id TEXT PRIMARY KEY, + timestamp TEXT NOT NULL, + stock_code TEXT NOT NULL, + market TEXT NOT NULL, + exchange_code TEXT NOT NULL, + action TEXT NOT NULL, + confidence INTEGER NOT NULL, + rationale TEXT NOT NULL, + context_snapshot TEXT NOT NULL, + input_data TEXT NOT NULL + ) + """ + ) + old_conn.execute( + """ + INSERT INTO decision_logs ( + decision_id, timestamp, stock_code, market, exchange_code, + action, confidence, rationale, context_snapshot, input_data + ) VALUES ( + 'd1', '2026-01-01T00:00:00+00:00', 'AAPL', 'US_NASDAQ', 'NASD', + 'BUY', 80, 'legacy row', '{}', '{}' + ) + """ + ) + old_conn.commit() + old_conn.close() + + conn = init_db(db_path) + columns = {row[1] for row in conn.execute("PRAGMA table_info(decision_logs)").fetchall()} + assert "session_id" in columns + row = conn.execute( + "SELECT session_id FROM decision_logs WHERE decision_id='d1'" + ).fetchone() + assert row is not None + assert row[0] == "UNKNOWN" + conn.close() + finally: + os.unlink(db_path) diff --git a/tests/test_decision_logger.py b/tests/test_decision_logger.py index 652d3c3..dec3a64 100644 --- a/tests/test_decision_logger.py +++ b/tests/test_decision_logger.py @@ -49,7 +49,7 @@ def test_log_decision_creates_record(logger: DecisionLogger, db_conn: sqlite3.Co # Verify record exists in database cursor = db_conn.execute( - "SELECT decision_id, action, confidence FROM decision_logs WHERE decision_id = ?", + "SELECT decision_id, action, confidence, session_id FROM decision_logs WHERE decision_id = ?", (decision_id,), ) row = cursor.fetchone() @@ -57,6 +57,7 @@ def test_log_decision_creates_record(logger: DecisionLogger, db_conn: sqlite3.Co assert row[0] == decision_id assert row[1] == "BUY" assert row[2] == 85 + assert row[3] == "UNKNOWN" def test_log_decision_stores_context_snapshot(logger: DecisionLogger) -> None: @@ -84,6 +85,24 @@ def test_log_decision_stores_context_snapshot(logger: DecisionLogger) -> None: assert decision is not None assert decision.context_snapshot == context_snapshot assert decision.input_data == input_data + assert decision.session_id == "UNKNOWN" + + +def test_log_decision_stores_explicit_session_id(logger: DecisionLogger) -> None: + decision_id = logger.log_decision( + stock_code="AAPL", + market="US_NASDAQ", + exchange_code="NASD", + action="BUY", + confidence=88, + rationale="session check", + context_snapshot={}, + input_data={}, + session_id="US_PRE", + ) + decision = logger.get_decision_by_id(decision_id) + assert decision is not None + assert decision.session_id == "US_PRE" def test_get_unreviewed_decisions(logger: DecisionLogger) -> None: @@ -278,6 +297,7 @@ def test_decision_log_dataclass() -> None: stock_code="005930", market="KR", exchange_code="KRX", + session_id="KRX_REG", action="BUY", confidence=85, rationale="Test", @@ -286,6 +306,7 @@ def test_decision_log_dataclass() -> None: ) assert log.decision_id == "test-uuid" + assert log.session_id == "KRX_REG" assert log.action == "BUY" assert log.confidence == 85 assert log.reviewed is False From 6d7e6557d239f15fa99cb95a5df618e40e595fd6 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:38:10 +0900 Subject: [PATCH 037/109] fix: compute SELL decision outcome using sell quantity (#322) --- src/main.py | 8 ++++---- tests/test_main.py | 3 +++ 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/main.py b/src/main.py index cc158a2..6f8272c 100644 --- a/src/main.py +++ b/src/main.py @@ -1658,8 +1658,8 @@ async def trading_cycle( buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code) if buy_trade and buy_trade.get("price") is not None: buy_price = float(buy_trade["price"]) - buy_qty = int(buy_trade.get("quantity") or 1) - trade_pnl = (trade_price - buy_price) * buy_qty + sell_qty = int(quantity or 0) + trade_pnl = (trade_price - buy_price) * sell_qty decision_logger.update_outcome( decision_id=buy_trade["decision_id"], pnl=trade_pnl, @@ -2755,8 +2755,8 @@ async def run_daily_session( buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code) if buy_trade and buy_trade.get("price") is not None: buy_price = float(buy_trade["price"]) - buy_qty = int(buy_trade.get("quantity") or 1) - trade_pnl = (trade_price - buy_price) * buy_qty + sell_qty = int(quantity or 0) + trade_pnl = (trade_price - buy_price) * sell_qty decision_logger.update_outcome( decision_id=buy_trade["decision_id"], pnl=trade_pnl, diff --git a/tests/test_main.py b/tests/test_main.py index 63ee0da..cdb2651 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -2750,6 +2750,9 @@ async def test_sell_order_uses_broker_balance_qty_not_db() -> None: assert call_kwargs["order_type"] == "SELL" # Must use broker-confirmed qty (5), NOT DB-recorded ordered qty (10) assert call_kwargs["quantity"] == 5 + updated_buy = decision_logger.get_decision_by_id(buy_decision_id) + assert updated_buy is not None + assert updated_buy.outcome_pnl == -25.0 @pytest.mark.asyncio From 5c107d243500649a15996878629f34008eea87ba Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:39:30 +0900 Subject: [PATCH 038/109] fix: persist blackout recovery executions to trades log (#324) --- src/main.py | 14 ++++++++++++++ tests/test_main.py | 14 ++++++++++++++ 2 files changed, 28 insertions(+) diff --git a/src/main.py b/src/main.py index cc158a2..fd8cccc 100644 --- a/src/main.py +++ b/src/main.py @@ -750,6 +750,20 @@ async def process_blackout_recovery_orders( accepted = result.get("rt_cd", "0") == "0" if accepted: + runtime_session_id = get_session_info(market).session_id + log_trade( + conn=db_conn, + stock_code=intent.stock_code, + action=intent.order_type, + confidence=0, + rationale=f"[blackout-recovery] {intent.source}", + quantity=intent.quantity, + price=float(intent.price), + pnl=0.0, + market=market.code, + exchange_code=market.exchange_code, + session_id=runtime_session_id, + ) logger.info( "Recovered queued order executed: %s %s (%s) qty=%d price=%.4f source=%s", intent.order_type, diff --git a/tests/test_main.py b/tests/test_main.py index 63ee0da..74263f2 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -5837,6 +5837,7 @@ async def test_process_blackout_recovery_executes_valid_intents() -> None: patch("src.main.MARKETS", {"KR": market}), patch("src.main.get_open_position", return_value=None), patch("src.main.validate_order_policy"), + patch("src.main.get_session_info", return_value=MagicMock(session_id="KRX_REG")), ): await process_blackout_recovery_orders( broker=broker, @@ -5845,6 +5846,19 @@ async def test_process_blackout_recovery_executes_valid_intents() -> None: ) broker.send_order.assert_called_once() + row = db_conn.execute( + """ + SELECT action, quantity, session_id, rationale + FROM trades + WHERE stock_code = '005930' + ORDER BY id DESC LIMIT 1 + """ + ).fetchone() + assert row is not None + assert row[0] == "BUY" + assert row[1] == 1 + assert row[2] == "KRX_REG" + assert row[3].startswith("[blackout-recovery]") @pytest.mark.asyncio From 08607eaa567b0f9f6785ba1ada5e62a4c5bee290 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:40:19 +0900 Subject: [PATCH 039/109] feat: block US BUY entries below minimum price threshold (#320) --- src/config.py | 1 + src/main.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/src/config.py b/src/config.py index 0e60e32..7f27aeb 100644 --- a/src/config.py +++ b/src/config.py @@ -60,6 +60,7 @@ class Settings(BaseSettings): # This value is used as a fallback when the balance API returns 0 in paper mode. PAPER_OVERSEAS_CASH: float = Field(default=50000.0, ge=0.0) USD_BUFFER_MIN: float = Field(default=1000.0, ge=0.0) + US_MIN_PRICE: float = Field(default=5.0, ge=0.0) OVERNIGHT_EXCEPTION_ENABLED: bool = True # Trading frequency mode (daily = batch API calls, realtime = per-stock calls) diff --git a/src/main.py b/src/main.py index cc158a2..a0c716e 100644 --- a/src/main.py +++ b/src/main.py @@ -1291,6 +1291,24 @@ async def trading_cycle( stock_code, market.name, ) + elif market.code.startswith("US"): + min_price = float(getattr(settings, "US_MIN_PRICE", 5.0) if settings else 5.0) + if current_price <= min_price: + decision = TradeDecision( + action="HOLD", + confidence=decision.confidence, + rationale=( + f"US minimum price filter blocked BUY " + f"(price={current_price:.4f} <= {min_price:.4f})" + ), + ) + logger.info( + "BUY suppressed for %s (%s): US min price filter %.4f <= %.4f", + stock_code, + market.name, + current_price, + min_price, + ) if decision.action == "HOLD": open_position = get_open_position(db_conn, stock_code, market.code) @@ -2442,6 +2460,24 @@ async def run_daily_session( stock_code, market.name, ) + elif market.code.startswith("US"): + min_price = float(getattr(settings, "US_MIN_PRICE", 5.0)) + if stock_data["current_price"] <= min_price: + decision = TradeDecision( + action="HOLD", + confidence=decision.confidence, + rationale=( + f"US minimum price filter blocked BUY " + f"(price={stock_data['current_price']:.4f} <= {min_price:.4f})" + ), + ) + logger.info( + "BUY suppressed for %s (%s): US min price filter %.4f <= %.4f", + stock_code, + market.name, + stock_data["current_price"], + min_price, + ) if decision.action == "HOLD": daily_open = get_open_position(db_conn, stock_code, market.code) if not daily_open: From fd0246769a26af30380e79e80794800bda237c0c Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 17:13:56 +0900 Subject: [PATCH 040/109] test: add sell qty fallback guard and quantity-basis coverage (#322) --- src/main.py | 17 +++++++++++++++-- tests/test_main.py | 13 +++++++++++++ 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/src/main.py b/src/main.py index 6f8272c..17fcbbd 100644 --- a/src/main.py +++ b/src/main.py @@ -110,6 +110,14 @@ DAILY_TRADE_SESSIONS = 4 # Number of trading sessions per day TRADE_SESSION_INTERVAL_HOURS = 6 # Hours between sessions +def _resolve_sell_qty_for_pnl(*, sell_qty: int | None, buy_qty: int | None) -> int: + """Choose quantity basis for SELL outcome PnL with safe fallback.""" + resolved_sell = int(sell_qty or 0) + if resolved_sell > 0: + return resolved_sell + return max(0, int(buy_qty or 0)) + + async def _retry_connection(coro_factory: Any, *args: Any, label: str = "", **kwargs: Any) -> Any: """Call an async function retrying on ConnectionError with exponential backoff. @@ -1658,7 +1666,8 @@ async def trading_cycle( buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code) if buy_trade and buy_trade.get("price") is not None: buy_price = float(buy_trade["price"]) - sell_qty = int(quantity or 0) + buy_qty = int(buy_trade.get("quantity") or 0) + sell_qty = _resolve_sell_qty_for_pnl(sell_qty=quantity, buy_qty=buy_qty) trade_pnl = (trade_price - buy_price) * sell_qty decision_logger.update_outcome( decision_id=buy_trade["decision_id"], @@ -2755,7 +2764,11 @@ async def run_daily_session( buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code) if buy_trade and buy_trade.get("price") is not None: buy_price = float(buy_trade["price"]) - sell_qty = int(quantity or 0) + buy_qty = int(buy_trade.get("quantity") or 0) + sell_qty = _resolve_sell_qty_for_pnl( + sell_qty=quantity, + buy_qty=buy_qty, + ) trade_pnl = (trade_price - buy_price) * sell_qty decision_logger.update_outcome( decision_id=buy_trade["decision_id"], diff --git a/tests/test_main.py b/tests/test_main.py index cdb2651..d5ff5c3 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -27,6 +27,7 @@ from src.main import ( _extract_held_qty_from_balance, _handle_market_close, _retry_connection, + _resolve_sell_qty_for_pnl, _run_context_scheduler, _run_evolution_loop, _start_dashboard_server, @@ -119,6 +120,18 @@ class TestExtractAvgPriceFromBalance: result = _extract_avg_price_from_balance(balance, "005930", is_domestic=True) assert result == 0.0 + +def test_resolve_sell_qty_for_pnl_prefers_sell_qty() -> None: + assert _resolve_sell_qty_for_pnl(sell_qty=30, buy_qty=100) == 30 + + +def test_resolve_sell_qty_for_pnl_uses_buy_qty_fallback_when_sell_qty_missing() -> None: + assert _resolve_sell_qty_for_pnl(sell_qty=None, buy_qty=12) == 12 + + +def test_resolve_sell_qty_for_pnl_returns_zero_when_both_missing() -> None: + assert _resolve_sell_qty_for_pnl(sell_qty=None, buy_qty=None) == 0 + def test_returns_zero_when_field_empty_string(self) -> None: """Returns 0.0 when pchs_avg_pric is an empty string.""" balance = {"output1": [{"pdno": "005930", "pchs_avg_pric": ""}]} From 9267f1fb778553c512cf0e1c2d9f7178ac95ef1f Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 17:15:10 +0900 Subject: [PATCH 041/109] test: add US minimum price boundary and KR-scope coverage (#320) --- tests/test_main.py | 143 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 143 insertions(+) diff --git a/tests/test_main.py b/tests/test_main.py index 63ee0da..a016634 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -5654,6 +5654,149 @@ async def test_order_policy_rejection_skips_order_execution() -> None: broker.send_order.assert_not_called() +@pytest.mark.asyncio +@pytest.mark.parametrize( + ("price", "should_block"), + [ + (4.99, True), + (5.00, True), + (5.01, False), + ], +) +async def test_us_min_price_filter_boundary(price: float, should_block: bool) -> None: + db_conn = init_db(":memory:") + decision_logger = DecisionLogger(db_conn) + + broker = MagicMock() + broker.get_balance = AsyncMock(return_value={"output1": [], "output2": [{}]}) + + overseas_broker = MagicMock() + overseas_broker.get_overseas_price = AsyncMock( + return_value={"output": {"last": str(price), "rate": "0.0"}} + ) + overseas_broker.get_overseas_balance = AsyncMock( + return_value={"output1": [], "output2": [{"frcr_evlu_tota": "10000", "frcr_buy_amt_smtl": "0"}]} + ) + overseas_broker.get_overseas_buying_power = AsyncMock( + return_value={"output": {"ovrs_ord_psbl_amt": "10000"}} + ) + overseas_broker.send_overseas_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + + market = MagicMock() + market.name = "NASDAQ" + market.code = "US_NASDAQ" + market.exchange_code = "NASD" + market.is_domestic = False + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + settings = MagicMock() + settings.POSITION_SIZING_ENABLED = False + settings.CONFIDENCE_THRESHOLD = 80 + settings.MODE = "paper" + settings.PAPER_OVERSEAS_CASH = 50000 + settings.US_MIN_PRICE = 5.0 + settings.USD_BUFFER_MIN = 1000.0 + + await trading_cycle( + broker=broker, + overseas_broker=overseas_broker, + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_buy_match("AAPL"))), + playbook=_make_playbook("US_NASDAQ"), + risk=MagicMock(validate_order=MagicMock(), check_circuit_breaker=MagicMock()), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="AAPL", + scan_candidates={}, + settings=settings, + ) + + if should_block: + overseas_broker.send_overseas_order.assert_not_called() + else: + overseas_broker.send_overseas_order.assert_called_once() + + +@pytest.mark.asyncio +async def test_us_min_price_filter_not_applied_to_kr_market() -> None: + db_conn = init_db(":memory:") + decision_logger = DecisionLogger(db_conn) + + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(4.0, 0.0, 0.0)) + broker.get_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "50000", + "pchs_amt_smtl_amt": "50000", + } + ], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_fat_finger = AsyncMock() + telegram.notify_circuit_breaker = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + settings = MagicMock() + settings.POSITION_SIZING_ENABLED = False + settings.CONFIDENCE_THRESHOLD = 80 + settings.MODE = "paper" + settings.US_MIN_PRICE = 5.0 + settings.USD_BUFFER_MIN = 1000.0 + + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_buy_match("005930"))), + playbook=_make_playbook(), + risk=MagicMock(validate_order=MagicMock(), check_circuit_breaker=MagicMock()), + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), + set_context=MagicMock(), + ), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=telegram, + market=market, + stock_code="005930", + scan_candidates={}, + settings=settings, + ) + + broker.send_order.assert_called_once() + + def test_overnight_policy_prioritizes_killswitch_over_exception() -> None: market = MagicMock() with patch("src.main.get_session_info", return_value=MagicMock(session_id="US_AFTER")): From 92261da414684e28099940583a0b7e8037426247 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:38:53 +0900 Subject: [PATCH 042/109] fix: include exchange_code in latest BUY matching key (#323) --- src/db.py | 53 +++++++++++++++++++++++++++++++++++------------- src/main.py | 14 +++++++++++-- tests/test_db.py | 38 +++++++++++++++++++++++++++++++++- 3 files changed, 88 insertions(+), 17 deletions(-) diff --git a/src/db.py b/src/db.py index 0839521..e161de3 100644 --- a/src/db.py +++ b/src/db.py @@ -312,22 +312,47 @@ def _resolve_session_id(*, market: str, session_id: str | None) -> str: def get_latest_buy_trade( - conn: sqlite3.Connection, stock_code: str, market: str + conn: sqlite3.Connection, + stock_code: str, + market: str, + exchange_code: str | None = None, ) -> dict[str, Any] | None: """Fetch the most recent BUY trade for a stock and market.""" - cursor = conn.execute( - """ - SELECT decision_id, price, quantity - FROM trades - WHERE stock_code = ? - AND market = ? - AND action = 'BUY' - AND decision_id IS NOT NULL - ORDER BY timestamp DESC - LIMIT 1 - """, - (stock_code, market), - ) + if exchange_code: + cursor = conn.execute( + """ + SELECT decision_id, price, quantity + FROM trades + WHERE stock_code = ? + AND market = ? + AND action = 'BUY' + AND decision_id IS NOT NULL + AND ( + exchange_code = ? + OR exchange_code IS NULL + OR exchange_code = '' + ) + ORDER BY + CASE WHEN exchange_code = ? THEN 0 ELSE 1 END, + timestamp DESC + LIMIT 1 + """, + (stock_code, market, exchange_code, exchange_code), + ) + else: + cursor = conn.execute( + """ + SELECT decision_id, price, quantity + FROM trades + WHERE stock_code = ? + AND market = ? + AND action = 'BUY' + AND decision_id IS NOT NULL + ORDER BY timestamp DESC + LIMIT 1 + """, + (stock_code, market), + ) row = cursor.fetchone() if not row: return None diff --git a/src/main.py b/src/main.py index 97f9fd8..4bb7b60 100644 --- a/src/main.py +++ b/src/main.py @@ -1659,7 +1659,12 @@ async def trading_cycle( logger.warning("Telegram notification failed: %s", exc) if decision.action == "SELL" and order_succeeded: - buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code) + buy_trade = get_latest_buy_trade( + db_conn, + stock_code, + market.code, + exchange_code=market.exchange_code, + ) if buy_trade and buy_trade.get("price") is not None: buy_price = float(buy_trade["price"]) buy_qty = int(buy_trade.get("quantity") or 1) @@ -2759,7 +2764,12 @@ async def run_daily_session( continue if decision.action == "SELL" and order_succeeded: - buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code) + buy_trade = get_latest_buy_trade( + db_conn, + stock_code, + market.code, + exchange_code=market.exchange_code, + ) if buy_trade and buy_trade.get("price") is not None: buy_price = float(buy_trade["price"]) buy_qty = int(buy_trade.get("quantity") or 1) diff --git a/tests/test_db.py b/tests/test_db.py index 9bd190d..fb2feb9 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -3,7 +3,7 @@ import tempfile import os -from src.db import get_open_position, init_db, log_trade +from src.db import get_latest_buy_trade, get_open_position, init_db, log_trade def test_get_open_position_returns_latest_buy() -> None: @@ -331,6 +331,42 @@ def test_log_trade_unknown_market_falls_back_to_unknown_session() -> None: assert row[0] == "UNKNOWN" +def test_get_latest_buy_trade_prefers_exchange_code_match() -> None: + conn = init_db(":memory:") + log_trade( + conn=conn, + stock_code="AAPL", + action="BUY", + confidence=80, + rationale="legacy", + quantity=10, + price=120.0, + market="US_NASDAQ", + exchange_code="", + decision_id="legacy-buy", + ) + log_trade( + conn=conn, + stock_code="AAPL", + action="BUY", + confidence=85, + rationale="matched", + quantity=5, + price=125.0, + market="US_NASDAQ", + exchange_code="NASD", + decision_id="matched-buy", + ) + matched = get_latest_buy_trade( + conn, + stock_code="AAPL", + market="US_NASDAQ", + exchange_code="NASD", + ) + assert matched is not None + assert matched["decision_id"] == "matched-buy" + + def test_decision_logs_session_id_migration_backfills_unknown() -> None: import sqlite3 From 2406a80782d99104f3342a63f61c47e1379347be Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 17:40:51 +0900 Subject: [PATCH 043/109] test: add governance validator unit coverage (#330) --- tests/test_validate_governance_assets.py | 81 ++++++++++++++++++++++++ 1 file changed, 81 insertions(+) create mode 100644 tests/test_validate_governance_assets.py diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py new file mode 100644 index 0000000..a3a8519 --- /dev/null +++ b/tests/test_validate_governance_assets.py @@ -0,0 +1,81 @@ +from __future__ import annotations + +import importlib.util +from pathlib import Path +from types import SimpleNamespace + + +def _load_module(): + script_path = Path(__file__).resolve().parents[1] / "scripts" / "validate_governance_assets.py" + spec = importlib.util.spec_from_file_location("validate_governance_assets", script_path) + assert spec is not None + assert spec.loader is not None + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def test_is_policy_file_detects_ouroboros_policy_docs() -> None: + module = _load_module() + assert module.is_policy_file("docs/ouroboros/85_loss_recovery_action_plan.md") + assert not module.is_policy_file("docs/ouroboros/01_requirements_registry.md") + assert not module.is_policy_file("docs/workflow.md") + assert not module.is_policy_file("docs/ouroboros/notes.txt") + + +def test_validate_registry_sync_requires_registry_update_when_policy_changes() -> None: + module = _load_module() + errors: list[str] = [] + module.validate_registry_sync( + ["docs/ouroboros/85_loss_recovery_action_plan.md"], + errors, + ) + assert errors + assert "policy file changed without updating" in errors[0] + + +def test_validate_registry_sync_passes_when_registry_included() -> None: + module = _load_module() + errors: list[str] = [] + module.validate_registry_sync( + [ + "docs/ouroboros/85_loss_recovery_action_plan.md", + "docs/ouroboros/01_requirements_registry.md", + ], + errors, + ) + assert errors == [] + + +def test_load_changed_files_supports_explicit_paths() -> None: + module = _load_module() + errors: list[str] = [] + changed = module.load_changed_files( + ["./docs/ouroboros/85_loss_recovery_action_plan.md", " src/main.py "], + errors, + ) + assert errors == [] + assert changed == [ + "docs/ouroboros/85_loss_recovery_action_plan.md", + "src/main.py", + ] + + +def test_load_changed_files_with_range_uses_git_diff(monkeypatch) -> None: + module = _load_module() + errors: list[str] = [] + + def fake_run(cmd, check, capture_output, text): # noqa: ANN001 + assert cmd[:3] == ["git", "diff", "--name-only"] + assert check is True + assert capture_output is True + assert text is True + return SimpleNamespace(stdout="docs/ouroboros/85_loss_recovery_action_plan.md\nsrc/main.py\n") + + monkeypatch.setattr(module.subprocess, "run", fake_run) + changed = module.load_changed_files(["abc...def"], errors) + assert errors == [] + assert changed == [ + "docs/ouroboros/85_loss_recovery_action_plan.md", + "src/main.py", + ] From 82808a8493e8f22d49cef9d6ea74266e1e5f557d Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 14:41:14 +0900 Subject: [PATCH 044/109] feat: enforce stop-loss reentry cooldown window (#319) --- src/config.py | 1 + src/main.py | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 70 insertions(+) diff --git a/src/config.py b/src/config.py index 7f27aeb..656044c 100644 --- a/src/config.py +++ b/src/config.py @@ -61,6 +61,7 @@ class Settings(BaseSettings): PAPER_OVERSEAS_CASH: float = Field(default=50000.0, ge=0.0) USD_BUFFER_MIN: float = Field(default=1000.0, ge=0.0) US_MIN_PRICE: float = Field(default=5.0, ge=0.0) + STOPLOSS_REENTRY_COOLDOWN_MINUTES: int = Field(default=120, ge=1, le=1440) OVERNIGHT_EXCEPTION_ENABLED: bool = True # Trading frequency mode (daily = batch API calls, realtime = per-stock calls) diff --git a/src/main.py b/src/main.py index 1172b86..2499550 100644 --- a/src/main.py +++ b/src/main.py @@ -70,6 +70,7 @@ BLACKOUT_ORDER_MANAGER = BlackoutOrderManager( _SESSION_CLOSE_WINDOWS = {"NXT_AFTER", "US_AFTER"} _RUNTIME_EXIT_STATES: dict[str, PositionState] = {} _RUNTIME_EXIT_PEAKS: dict[str, float] = {} +_STOPLOSS_REENTRY_COOLDOWN_UNTIL: dict[str, float] = {} def safe_float(value: str | float | None, default: float = 0.0) -> float: @@ -118,6 +119,16 @@ def _resolve_sell_qty_for_pnl(*, sell_qty: int | None, buy_qty: int | None) -> i return max(0, int(buy_qty or 0)) +def _stoploss_cooldown_key(*, market: MarketInfo, stock_code: str) -> str: + return f"{market.code}:{stock_code}" + + +def _stoploss_cooldown_minutes(settings: Settings | None) -> int: + if settings is None: + return 120 + return max(1, int(getattr(settings, "STOPLOSS_REENTRY_COOLDOWN_MINUTES", 120))) + + async def _retry_connection(coro_factory: Any, *args: Any, label: str = "", **kwargs: Any) -> Any: """Call an async function retrying on ConnectionError with exponential backoff. @@ -1332,6 +1343,23 @@ async def trading_cycle( current_price, min_price, ) + if decision.action == "BUY": + cooldown_key = _stoploss_cooldown_key(market=market, stock_code=stock_code) + now_epoch = datetime.now(UTC).timestamp() + cooldown_until = _STOPLOSS_REENTRY_COOLDOWN_UNTIL.get(cooldown_key, 0.0) + if now_epoch < cooldown_until: + remaining = int(cooldown_until - now_epoch) + decision = TradeDecision( + action="HOLD", + confidence=decision.confidence, + rationale=f"Stop-loss reentry cooldown active ({remaining}s remaining)", + ) + logger.info( + "BUY suppressed for %s (%s): stop-loss cooldown active (%ds remaining)", + stock_code, + market.name, + remaining, + ) if decision.action == "HOLD": open_position = get_open_position(db_conn, stock_code, market.code) @@ -1715,6 +1743,18 @@ async def trading_cycle( pnl=trade_pnl, accuracy=1 if trade_pnl > 0 else 0, ) + if trade_pnl < 0: + cooldown_key = _stoploss_cooldown_key(market=market, stock_code=stock_code) + cooldown_minutes = _stoploss_cooldown_minutes(settings) + _STOPLOSS_REENTRY_COOLDOWN_UNTIL[cooldown_key] = ( + datetime.now(UTC).timestamp() + cooldown_minutes * 60 + ) + logger.info( + "Stop-loss cooldown set for %s (%s): %d minutes", + stock_code, + market.name, + cooldown_minutes, + ) # 6. Log trade with selection context (skip if order was rejected) if decision.action in ("BUY", "SELL") and not order_succeeded: @@ -2511,6 +2551,23 @@ async def run_daily_session( stock_data["current_price"], min_price, ) + if decision.action == "BUY": + cooldown_key = _stoploss_cooldown_key(market=market, stock_code=stock_code) + now_epoch = datetime.now(UTC).timestamp() + cooldown_until = _STOPLOSS_REENTRY_COOLDOWN_UNTIL.get(cooldown_key, 0.0) + if now_epoch < cooldown_until: + remaining = int(cooldown_until - now_epoch) + decision = TradeDecision( + action="HOLD", + confidence=decision.confidence, + rationale=f"Stop-loss reentry cooldown active ({remaining}s remaining)", + ) + logger.info( + "BUY suppressed for %s (%s): stop-loss cooldown active (%ds remaining)", + stock_code, + market.name, + remaining, + ) if decision.action == "HOLD": daily_open = get_open_position(db_conn, stock_code, market.code) if not daily_open: @@ -2842,6 +2899,18 @@ async def run_daily_session( pnl=trade_pnl, accuracy=1 if trade_pnl > 0 else 0, ) + if trade_pnl < 0: + cooldown_key = _stoploss_cooldown_key(market=market, stock_code=stock_code) + cooldown_minutes = _stoploss_cooldown_minutes(settings) + _STOPLOSS_REENTRY_COOLDOWN_UNTIL[cooldown_key] = ( + datetime.now(UTC).timestamp() + cooldown_minutes * 60 + ) + logger.info( + "Stop-loss cooldown set for %s (%s): %d minutes", + stock_code, + market.name, + cooldown_minutes, + ) # Log trade (skip if order was rejected by API) if decision.action in ("BUY", "SELL") and not order_succeeded: From 5f53b02da81fd5146b2b63d6aa82a3753f122ee9 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 17:16:26 +0900 Subject: [PATCH 045/109] test: add stop-loss reentry cooldown behavioral coverage (#319) --- tests/test_main.py | 102 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 102 insertions(+) diff --git a/tests/test_main.py b/tests/test_main.py index ff7f4ed..4de28aa 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -15,6 +15,7 @@ from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger from src.main import ( KILL_SWITCH, + _STOPLOSS_REENTRY_COOLDOWN_UNTIL, _RUNTIME_EXIT_PEAKS, _RUNTIME_EXIT_STATES, _should_force_exit_for_overnight, @@ -93,10 +94,12 @@ def _reset_kill_switch_state() -> None: KILL_SWITCH.clear_block() _RUNTIME_EXIT_STATES.clear() _RUNTIME_EXIT_PEAKS.clear() + _STOPLOSS_REENTRY_COOLDOWN_UNTIL.clear() yield KILL_SWITCH.clear_block() _RUNTIME_EXIT_STATES.clear() _RUNTIME_EXIT_PEAKS.clear() + _STOPLOSS_REENTRY_COOLDOWN_UNTIL.clear() class TestExtractAvgPriceFromBalance: @@ -2053,6 +2056,105 @@ async def test_sell_updates_original_buy_decision_outcome() -> None: assert updated_buy is not None assert updated_buy.outcome_pnl == 20.0 assert updated_buy.outcome_accuracy == 1 + assert "KR:005930" not in _STOPLOSS_REENTRY_COOLDOWN_UNTIL + + +@pytest.mark.asyncio +async def test_stoploss_reentry_cooldown_blocks_buy_when_active() -> None: + _STOPLOSS_REENTRY_COOLDOWN_UNTIL["KR:005930"] = datetime.now(UTC).timestamp() + 300 + db_conn = init_db(":memory:") + + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.0, 0.0)) + broker.get_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [{"tot_evlu_amt": "100000", "dnca_tot_amt": "50000", "pchs_amt_smtl_amt": "50000"}], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_buy_match("005930"))), + playbook=_make_playbook(), + risk=MagicMock(validate_order=MagicMock(), check_circuit_breaker=MagicMock()), + db_conn=db_conn, + decision_logger=DecisionLogger(db_conn), + context_store=MagicMock(get_latest_timeframe=MagicMock(return_value=None), set_context=MagicMock()), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=MagicMock( + notify_trade_execution=AsyncMock(), + notify_fat_finger=AsyncMock(), + notify_circuit_breaker=AsyncMock(), + notify_scenario_matched=AsyncMock(), + ), + market=market, + stock_code="005930", + scan_candidates={}, + settings=MagicMock(POSITION_SIZING_ENABLED=False, CONFIDENCE_THRESHOLD=80, MODE="paper"), + ) + + broker.send_order.assert_not_called() + + +@pytest.mark.asyncio +async def test_stoploss_reentry_cooldown_allows_buy_after_expiry() -> None: + _STOPLOSS_REENTRY_COOLDOWN_UNTIL["KR:005930"] = datetime.now(UTC).timestamp() - 10 + db_conn = init_db(":memory:") + + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.0, 0.0)) + broker.get_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [{"tot_evlu_amt": "100000", "dnca_tot_amt": "50000", "pchs_amt_smtl_amt": "50000"}], + } + ) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + await trading_cycle( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=MagicMock(evaluate=MagicMock(return_value=_make_buy_match("005930"))), + playbook=_make_playbook(), + risk=MagicMock(validate_order=MagicMock(), check_circuit_breaker=MagicMock()), + db_conn=db_conn, + decision_logger=DecisionLogger(db_conn), + context_store=MagicMock(get_latest_timeframe=MagicMock(return_value=None), set_context=MagicMock()), + criticality_assessor=MagicMock( + assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), + get_timeout=MagicMock(return_value=5.0), + ), + telegram=MagicMock( + notify_trade_execution=AsyncMock(), + notify_fat_finger=AsyncMock(), + notify_circuit_breaker=AsyncMock(), + notify_scenario_matched=AsyncMock(), + ), + market=market, + stock_code="005930", + scan_candidates={}, + settings=MagicMock(POSITION_SIZING_ENABLED=False, CONFIDENCE_THRESHOLD=80, MODE="paper"), + ) + + broker.send_order.assert_called_once() @pytest.mark.asyncio From 8bba85da1e7e32172427180a02e0b984ead59e9d Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 18:30:52 +0900 Subject: [PATCH 046/109] feat: add KR ATR-based dynamic hard-stop threshold (#318) --- src/config.py | 3 +++ src/main.py | 34 +++++++++++++++++++++++++++++++++- tests/test_main.py | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 1 deletion(-) diff --git a/src/config.py b/src/config.py index 656044c..7f0a367 100644 --- a/src/config.py +++ b/src/config.py @@ -62,6 +62,9 @@ class Settings(BaseSettings): USD_BUFFER_MIN: float = Field(default=1000.0, ge=0.0) US_MIN_PRICE: float = Field(default=5.0, ge=0.0) STOPLOSS_REENTRY_COOLDOWN_MINUTES: int = Field(default=120, ge=1, le=1440) + KR_ATR_STOP_MULTIPLIER_K: float = Field(default=2.0, ge=0.1, le=10.0) + KR_ATR_STOP_MIN_PCT: float = Field(default=-2.0, le=0.0) + KR_ATR_STOP_MAX_PCT: float = Field(default=-7.0, le=0.0) OVERNIGHT_EXCEPTION_ENABLED: bool = True # Trading frequency mode (daily = batch API calls, realtime = per-stock calls) diff --git a/src/main.py b/src/main.py index 2499550..bc9a926 100644 --- a/src/main.py +++ b/src/main.py @@ -119,6 +119,27 @@ def _resolve_sell_qty_for_pnl(*, sell_qty: int | None, buy_qty: int | None) -> i return max(0, int(buy_qty or 0)) +def _compute_kr_dynamic_stop_loss_pct( + *, + entry_price: float, + atr_value: float, + fallback_stop_loss_pct: float, + settings: Settings | None, +) -> float: + """Compute KR dynamic hard-stop threshold in percent.""" + if entry_price <= 0 or atr_value <= 0: + return fallback_stop_loss_pct + + k = float(getattr(settings, "KR_ATR_STOP_MULTIPLIER_K", 2.0) if settings else 2.0) + min_pct = float(getattr(settings, "KR_ATR_STOP_MIN_PCT", -2.0) if settings else -2.0) + max_pct = float(getattr(settings, "KR_ATR_STOP_MAX_PCT", -7.0) if settings else -7.0) + if max_pct > min_pct: + min_pct, max_pct = max_pct, min_pct + + dynamic_stop_pct = -((k * atr_value) / entry_price) * 100.0 + return max(max_pct, min(min_pct, dynamic_stop_pct)) + + def _stoploss_cooldown_key(*, market: MarketInfo, stock_code: str) -> str: return f"{market.code}:{stock_code}" @@ -518,6 +539,7 @@ def _apply_staged_exit_override_for_hold( open_position: dict[str, Any] | None, market_data: dict[str, Any], stock_playbook: Any | None, + settings: Settings | None = None, ) -> TradeDecision: """Apply v2 staged exit semantics for HOLD positions using runtime state.""" if decision.action != "HOLD" or not open_position: @@ -533,6 +555,14 @@ def _apply_staged_exit_override_for_hold( if stock_playbook and stock_playbook.scenarios: stop_loss_threshold = stock_playbook.scenarios[0].stop_loss_pct take_profit_threshold = stock_playbook.scenarios[0].take_profit_pct + atr_value = safe_float(market_data.get("atr_value"), 0.0) + if market.code == "KR": + stop_loss_threshold = _compute_kr_dynamic_stop_loss_pct( + entry_price=entry_price, + atr_value=atr_value, + fallback_stop_loss_pct=stop_loss_threshold, + settings=settings, + ) runtime_key = _build_runtime_position_key( market_code=market.code, @@ -558,7 +588,7 @@ def _apply_staged_exit_override_for_hold( current_price=current_price, entry_price=entry_price, peak_price=peak_price, - atr_value=safe_float(market_data.get("atr_value"), 0.0), + atr_value=atr_value, pred_down_prob=safe_float(market_data.get("pred_down_prob"), 0.0), liquidity_weak=safe_float(market_data.get("volume_ratio"), 1.0) < 1.0, ), @@ -1375,6 +1405,7 @@ async def trading_cycle( open_position=open_position, market_data=market_data, stock_playbook=stock_playbook, + settings=settings, ) if open_position and decision.action == "HOLD" and _should_force_exit_for_overnight( market=market, @@ -2582,6 +2613,7 @@ async def run_daily_session( open_position=daily_open, market_data=stock_data, stock_playbook=stock_playbook, + settings=settings, ) if daily_open and decision.action == "HOLD" and _should_force_exit_for_overnight( market=market, diff --git a/tests/test_main.py b/tests/test_main.py index 4de28aa..e98a659 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -32,6 +32,7 @@ from src.main import ( _run_context_scheduler, _run_evolution_loop, _start_dashboard_server, + _compute_kr_dynamic_stop_loss_pct, handle_domestic_pending_orders, handle_overseas_pending_orders, process_blackout_recovery_orders, @@ -135,6 +136,51 @@ def test_resolve_sell_qty_for_pnl_uses_buy_qty_fallback_when_sell_qty_missing() def test_resolve_sell_qty_for_pnl_returns_zero_when_both_missing() -> None: assert _resolve_sell_qty_for_pnl(sell_qty=None, buy_qty=None) == 0 + +def test_compute_kr_dynamic_stop_loss_pct_falls_back_without_atr() -> None: + out = _compute_kr_dynamic_stop_loss_pct( + entry_price=100.0, + atr_value=0.0, + fallback_stop_loss_pct=-2.0, + settings=None, + ) + assert out == -2.0 + + +def test_compute_kr_dynamic_stop_loss_pct_clamps_to_min_and_max() -> None: + # Small ATR -> clamp to min (-2%) + out_small = _compute_kr_dynamic_stop_loss_pct( + entry_price=100.0, + atr_value=0.2, + fallback_stop_loss_pct=-2.0, + settings=None, + ) + assert out_small == -2.0 + + # Large ATR -> clamp to max (-7%) + out_large = _compute_kr_dynamic_stop_loss_pct( + entry_price=100.0, + atr_value=10.0, + fallback_stop_loss_pct=-2.0, + settings=None, + ) + assert out_large == -7.0 + + +def test_compute_kr_dynamic_stop_loss_pct_uses_settings_values() -> None: + settings = MagicMock( + KR_ATR_STOP_MULTIPLIER_K=3.0, + KR_ATR_STOP_MIN_PCT=-1.5, + KR_ATR_STOP_MAX_PCT=-6.0, + ) + out = _compute_kr_dynamic_stop_loss_pct( + entry_price=100.0, + atr_value=1.0, + fallback_stop_loss_pct=-2.0, + settings=settings, + ) + assert out == -3.0 + def test_returns_zero_when_field_empty_string(self) -> None: """Returns 0.0 when pchs_avg_pric is an empty string.""" balance = {"output1": [{"pdno": "005930", "pchs_avg_pric": ""}]} From 62cd8a81a4faf8930a5f09e21869eaa32a1dcb7c Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 18:35:32 +0900 Subject: [PATCH 047/109] feat: feed staged-exit with ATR/RSI runtime features (#325) --- src/config.py | 2 + src/main.py | 114 +++++++++++++++++++++++++++++++++++++++++++-- tests/test_main.py | 97 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 210 insertions(+), 3 deletions(-) diff --git a/src/config.py b/src/config.py index 7f0a367..eeb4f1f 100644 --- a/src/config.py +++ b/src/config.py @@ -61,6 +61,8 @@ class Settings(BaseSettings): PAPER_OVERSEAS_CASH: float = Field(default=50000.0, ge=0.0) USD_BUFFER_MIN: float = Field(default=1000.0, ge=0.0) US_MIN_PRICE: float = Field(default=5.0, ge=0.0) + STAGED_EXIT_BE_ARM_PCT: float = Field(default=1.2, gt=0.0, le=30.0) + STAGED_EXIT_ARM_PCT: float = Field(default=3.0, gt=0.0, le=100.0) STOPLOSS_REENTRY_COOLDOWN_MINUTES: int = Field(default=120, ge=1, le=1440) KR_ATR_STOP_MULTIPLIER_K: float = Field(default=2.0, ge=0.1, le=10.0) KR_ATR_STOP_MIN_PCT: float = Field(default=-2.0, le=0.0) diff --git a/src/main.py b/src/main.py index bc9a926..1349e5c 100644 --- a/src/main.py +++ b/src/main.py @@ -71,6 +71,7 @@ _SESSION_CLOSE_WINDOWS = {"NXT_AFTER", "US_AFTER"} _RUNTIME_EXIT_STATES: dict[str, PositionState] = {} _RUNTIME_EXIT_PEAKS: dict[str, float] = {} _STOPLOSS_REENTRY_COOLDOWN_UNTIL: dict[str, float] = {} +_VOLATILITY_ANALYZER = VolatilityAnalyzer() def safe_float(value: str | float | None, default: float = 0.0) -> float: @@ -150,6 +151,90 @@ def _stoploss_cooldown_minutes(settings: Settings | None) -> int: return max(1, int(getattr(settings, "STOPLOSS_REENTRY_COOLDOWN_MINUTES", 120))) +def _estimate_pred_down_prob_from_rsi(rsi: float | str | None) -> float: + """Estimate downside probability from RSI using a simple linear mapping.""" + if rsi is None: + return 0.5 + rsi_value = max(0.0, min(100.0, safe_float(rsi, 50.0))) + return rsi_value / 100.0 + + +async def _compute_kr_atr_value( + *, + broker: KISBroker, + stock_code: str, + period: int = 14, +) -> float: + """Compute ATR(period) for KR stocks using daily OHLC.""" + days = max(period + 1, 30) + try: + daily_prices = await _retry_connection( + broker.get_daily_prices, + stock_code, + days=days, + label=f"daily_prices:{stock_code}", + ) + except ConnectionError as exc: + logger.warning("ATR source unavailable for %s: %s", stock_code, exc) + return 0.0 + except Exception as exc: + logger.warning("Unexpected ATR fetch failure for %s: %s", stock_code, exc) + return 0.0 + + if not isinstance(daily_prices, list): + return 0.0 + + highs: list[float] = [] + lows: list[float] = [] + closes: list[float] = [] + for row in daily_prices: + if not isinstance(row, dict): + continue + high = safe_float(row.get("high"), 0.0) + low = safe_float(row.get("low"), 0.0) + close = safe_float(row.get("close"), 0.0) + if high <= 0 or low <= 0 or close <= 0: + continue + highs.append(high) + lows.append(low) + closes.append(close) + + if len(highs) < period + 1 or len(lows) < period + 1 or len(closes) < period + 1: + return 0.0 + return max(0.0, _VOLATILITY_ANALYZER.calculate_atr(highs, lows, closes, period=period)) + + +async def _inject_staged_exit_features( + *, + market: MarketInfo, + stock_code: str, + open_position: dict[str, Any] | None, + market_data: dict[str, Any], + broker: KISBroker | None, +) -> None: + """Inject ATR/pred_down_prob used by staged exit evaluation.""" + if not open_position: + return + + if "pred_down_prob" not in market_data: + market_data["pred_down_prob"] = _estimate_pred_down_prob_from_rsi( + market_data.get("rsi") + ) + + existing_atr = safe_float(market_data.get("atr_value"), 0.0) + if existing_atr > 0: + return + + if market.is_domestic and broker is not None: + market_data["atr_value"] = await _compute_kr_atr_value( + broker=broker, + stock_code=stock_code, + ) + return + + market_data["atr_value"] = 0.0 + + async def _retry_connection(coro_factory: Any, *args: Any, label: str = "", **kwargs: Any) -> Any: """Call an async function retrying on ConnectionError with exponential backoff. @@ -563,6 +648,15 @@ def _apply_staged_exit_override_for_hold( fallback_stop_loss_pct=stop_loss_threshold, settings=settings, ) + if settings is None: + be_arm_pct = max(0.5, take_profit_threshold * 0.4) + arm_pct = take_profit_threshold + else: + be_arm_pct = max(0.1, float(getattr(settings, "STAGED_EXIT_BE_ARM_PCT", 1.2))) + arm_pct = max( + be_arm_pct, + float(getattr(settings, "STAGED_EXIT_ARM_PCT", 3.0)), + ) runtime_key = _build_runtime_position_key( market_code=market.code, @@ -581,8 +675,8 @@ def _apply_staged_exit_override_for_hold( current_state=current_state, config=ExitRuleConfig( hard_stop_pct=stop_loss_threshold, - be_arm_pct=max(0.5, take_profit_threshold * 0.4), - arm_pct=take_profit_threshold, + be_arm_pct=be_arm_pct, + arm_pct=arm_pct, ), inp=ExitRuleInput( current_price=current_price, @@ -608,7 +702,7 @@ def _apply_staged_exit_override_for_hold( elif exit_eval.reason == "arm_take_profit": rationale = ( f"Take-profit triggered ({pnl_pct:.2f}% >= " - f"{take_profit_threshold:.2f}%)" + f"{arm_pct:.2f}%)" ) elif exit_eval.reason == "atr_trailing_stop": rationale = "ATR trailing-stop triggered" @@ -1398,6 +1492,13 @@ async def trading_cycle( market_code=market.code, stock_code=stock_code, ) + await _inject_staged_exit_features( + market=market, + stock_code=stock_code, + open_position=open_position, + market_data=market_data, + broker=broker, + ) decision = _apply_staged_exit_override_for_hold( decision=decision, market=market, @@ -2606,6 +2707,13 @@ async def run_daily_session( market_code=market.code, stock_code=stock_code, ) + await _inject_staged_exit_features( + market=market, + stock_code=stock_code, + open_position=daily_open, + market_data=stock_data, + broker=broker, + ) decision = _apply_staged_exit_override_for_hold( decision=decision, market=market, diff --git a/tests/test_main.py b/tests/test_main.py index e98a659..bd2ea2b 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -16,6 +16,10 @@ from src.logging.decision_logger import DecisionLogger from src.main import ( KILL_SWITCH, _STOPLOSS_REENTRY_COOLDOWN_UNTIL, + _apply_staged_exit_override_for_hold, + _compute_kr_atr_value, + _estimate_pred_down_prob_from_rsi, + _inject_staged_exit_features, _RUNTIME_EXIT_PEAKS, _RUNTIME_EXIT_STATES, _should_force_exit_for_overnight, @@ -181,6 +185,99 @@ def test_compute_kr_dynamic_stop_loss_pct_uses_settings_values() -> None: ) assert out == -3.0 + +def test_estimate_pred_down_prob_from_rsi_uses_linear_mapping() -> None: + assert _estimate_pred_down_prob_from_rsi(None) == 0.5 + assert _estimate_pred_down_prob_from_rsi(0.0) == 0.0 + assert _estimate_pred_down_prob_from_rsi(50.0) == 0.5 + assert _estimate_pred_down_prob_from_rsi(100.0) == 1.0 + + +@pytest.mark.asyncio +async def test_compute_kr_atr_value_returns_zero_on_short_series() -> None: + broker = MagicMock() + broker.get_daily_prices = AsyncMock( + return_value=[{"high": 101.0, "low": 99.0, "close": 100.0}] * 10 + ) + + atr = await _compute_kr_atr_value(broker=broker, stock_code="005930") + assert atr == 0.0 + + +@pytest.mark.asyncio +async def test_inject_staged_exit_features_sets_pred_down_prob_and_atr_for_kr() -> None: + market = MagicMock() + market.is_domestic = True + stock_data: dict[str, float] = {"rsi": 65.0} + + broker = MagicMock() + broker.get_daily_prices = AsyncMock( + return_value=[ + {"high": 102.0 + i, "low": 98.0 + i, "close": 100.0 + i} + for i in range(40) + ] + ) + + await _inject_staged_exit_features( + market=market, + stock_code="005930", + open_position={"price": 100.0, "quantity": 1}, + market_data=stock_data, + broker=broker, + ) + + assert stock_data["pred_down_prob"] == pytest.approx(0.65) + assert stock_data["atr_value"] > 0.0 + + +def test_apply_staged_exit_uses_independent_arm_threshold_settings() -> None: + market = MagicMock() + market.code = "KR" + market.name = "Korea" + + decision = MagicMock() + decision.action = "HOLD" + decision.confidence = 70 + decision.rationale = "hold" + + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + STAGED_EXIT_BE_ARM_PCT=2.2, + STAGED_EXIT_ARM_PCT=5.4, + ) + + captured: dict[str, float] = {} + + def _fake_eval(**kwargs): # type: ignore[no-untyped-def] + cfg = kwargs["config"] + captured["be_arm_pct"] = cfg.be_arm_pct + captured["arm_pct"] = cfg.arm_pct + + class _Out: + should_exit = False + reason = "none" + state = PositionState.HOLDING + + return _Out() + + with patch("src.main.evaluate_exit", side_effect=_fake_eval): + out = _apply_staged_exit_override_for_hold( + decision=decision, + market=market, + stock_code="005930", + open_position={"price": 100.0, "quantity": 1, "decision_id": "d1", "timestamp": "t1"}, + market_data={"current_price": 101.0, "rsi": 60.0, "pred_down_prob": 0.6}, + stock_playbook=None, + settings=settings, + ) + + assert out is decision + assert captured["be_arm_pct"] == pytest.approx(2.2) + assert captured["arm_pct"] == pytest.approx(5.4) + def test_returns_zero_when_field_empty_string(self) -> None: """Returns 0.0 when pchs_avg_pric is an empty string.""" balance = {"output1": [{"pdno": "005930", "pchs_avg_pric": ""}]} From 5facd22ef9a6dee4342ad7d15e3d728d1d0e93d7 Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 21:04:06 +0900 Subject: [PATCH 048/109] feat: reload session risk profile on session transitions (#327) --- src/config.py | 2 + src/main.py | 221 ++++++++++++++++++++++++++++++++++++++++++--- tests/test_main.py | 48 ++++++++++ 3 files changed, 256 insertions(+), 15 deletions(-) diff --git a/src/config.py b/src/config.py index eeb4f1f..d12a346 100644 --- a/src/config.py +++ b/src/config.py @@ -68,6 +68,8 @@ class Settings(BaseSettings): KR_ATR_STOP_MIN_PCT: float = Field(default=-2.0, le=0.0) KR_ATR_STOP_MAX_PCT: float = Field(default=-7.0, le=0.0) OVERNIGHT_EXCEPTION_ENABLED: bool = True + SESSION_RISK_RELOAD_ENABLED: bool = True + SESSION_RISK_PROFILES_JSON: str = "{}" # Trading frequency mode (daily = batch API calls, realtime = per-stock calls) TRADE_MODE: str = Field(default="daily", pattern="^(daily|realtime)$") diff --git a/src/main.py b/src/main.py index 1349e5c..6248817 100644 --- a/src/main.py +++ b/src/main.py @@ -72,6 +72,10 @@ _RUNTIME_EXIT_STATES: dict[str, PositionState] = {} _RUNTIME_EXIT_PEAKS: dict[str, float] = {} _STOPLOSS_REENTRY_COOLDOWN_UNTIL: dict[str, float] = {} _VOLATILITY_ANALYZER = VolatilityAnalyzer() +_SESSION_RISK_PROFILES_RAW = "{}" +_SESSION_RISK_PROFILES_MAP: dict[str, dict[str, Any]] = {} +_SESSION_RISK_LAST_BY_MARKET: dict[str, str] = {} +_SESSION_RISK_OVERRIDES_BY_MARKET: dict[str, dict[str, Any]] = {} def safe_float(value: str | float | None, default: float = 0.0) -> float: @@ -122,6 +126,7 @@ def _resolve_sell_qty_for_pnl(*, sell_qty: int | None, buy_qty: int | None) -> i def _compute_kr_dynamic_stop_loss_pct( *, + market: MarketInfo | None = None, entry_price: float, atr_value: float, fallback_stop_loss_pct: float, @@ -131,9 +136,24 @@ def _compute_kr_dynamic_stop_loss_pct( if entry_price <= 0 or atr_value <= 0: return fallback_stop_loss_pct - k = float(getattr(settings, "KR_ATR_STOP_MULTIPLIER_K", 2.0) if settings else 2.0) - min_pct = float(getattr(settings, "KR_ATR_STOP_MIN_PCT", -2.0) if settings else -2.0) - max_pct = float(getattr(settings, "KR_ATR_STOP_MAX_PCT", -7.0) if settings else -7.0) + k = _resolve_market_setting( + market=market, + settings=settings, + key="KR_ATR_STOP_MULTIPLIER_K", + default=2.0, + ) + min_pct = _resolve_market_setting( + market=market, + settings=settings, + key="KR_ATR_STOP_MIN_PCT", + default=-2.0, + ) + max_pct = _resolve_market_setting( + market=market, + settings=settings, + key="KR_ATR_STOP_MAX_PCT", + default=-7.0, + ) if max_pct > min_pct: min_pct, max_pct = max_pct, min_pct @@ -145,10 +165,123 @@ def _stoploss_cooldown_key(*, market: MarketInfo, stock_code: str) -> str: return f"{market.code}:{stock_code}" -def _stoploss_cooldown_minutes(settings: Settings | None) -> int: +def _parse_session_risk_profiles(settings: Settings | None) -> dict[str, dict[str, Any]]: if settings is None: - return 120 - return max(1, int(getattr(settings, "STOPLOSS_REENTRY_COOLDOWN_MINUTES", 120))) + return {} + global _SESSION_RISK_PROFILES_RAW, _SESSION_RISK_PROFILES_MAP + raw = str(getattr(settings, "SESSION_RISK_PROFILES_JSON", "{}") or "{}") + if raw == _SESSION_RISK_PROFILES_RAW: + return _SESSION_RISK_PROFILES_MAP + + parsed_map: dict[str, dict[str, Any]] = {} + try: + decoded = json.loads(raw) + if isinstance(decoded, dict): + for session_id, session_values in decoded.items(): + if isinstance(session_id, str) and isinstance(session_values, dict): + parsed_map[session_id] = session_values + except (ValueError, TypeError) as exc: + logger.warning("Invalid SESSION_RISK_PROFILES_JSON; using defaults: %s", exc) + parsed_map = {} + + _SESSION_RISK_PROFILES_RAW = raw + _SESSION_RISK_PROFILES_MAP = parsed_map + return _SESSION_RISK_PROFILES_MAP + + +def _coerce_setting_value(*, value: Any, default: Any) -> Any: + if isinstance(default, bool): + if isinstance(value, bool): + return value + if isinstance(value, str): + return value.strip().lower() in {"1", "true", "yes", "on"} + if isinstance(value, (int, float)): + return value != 0 + return default + if isinstance(default, int) and not isinstance(default, bool): + try: + return int(value) + except (ValueError, TypeError): + return default + if isinstance(default, float): + return safe_float(value, float(default)) + if isinstance(default, str): + return str(value) + return value + + +def _session_risk_overrides( + *, + market: MarketInfo | None, + settings: Settings | None, +) -> dict[str, Any]: + if market is None or settings is None: + return {} + if not bool(getattr(settings, "SESSION_RISK_RELOAD_ENABLED", True)): + return {} + + session_id = get_session_info(market).session_id + previous_session = _SESSION_RISK_LAST_BY_MARKET.get(market.code) + if previous_session == session_id: + return _SESSION_RISK_OVERRIDES_BY_MARKET.get(market.code, {}) + + profile_map = _parse_session_risk_profiles(settings) + merged: dict[str, Any] = {} + default_profile = profile_map.get("default") + if isinstance(default_profile, dict): + merged.update(default_profile) + session_profile = profile_map.get(session_id) + if isinstance(session_profile, dict): + merged.update(session_profile) + + _SESSION_RISK_LAST_BY_MARKET[market.code] = session_id + _SESSION_RISK_OVERRIDES_BY_MARKET[market.code] = merged + if previous_session is None: + logger.info( + "Session risk profile initialized for %s: %s (overrides=%s)", + market.code, + session_id, + ",".join(sorted(merged.keys())) if merged else "none", + ) + else: + logger.info( + "Session risk profile reloaded for %s: %s -> %s (overrides=%s)", + market.code, + previous_session, + session_id, + ",".join(sorted(merged.keys())) if merged else "none", + ) + return merged + + +def _resolve_market_setting( + *, + market: MarketInfo | None, + settings: Settings | None, + key: str, + default: Any, +) -> Any: + if settings is None: + return default + + fallback = getattr(settings, key, default) + overrides = _session_risk_overrides(market=market, settings=settings) + if key not in overrides: + return fallback + return _coerce_setting_value(value=overrides[key], default=fallback) + + +def _stoploss_cooldown_minutes( + settings: Settings | None, + market: MarketInfo | None = None, +) -> int: + minutes = _resolve_market_setting( + market=market, + settings=settings, + key="STOPLOSS_REENTRY_COOLDOWN_MINUTES", + default=120, + ) + return max(1, int(minutes)) def _estimate_pred_down_prob_from_rsi(rsi: float | str | None) -> float: @@ -578,7 +711,14 @@ def _should_block_overseas_buy_for_fx_buffer( ): return False, total_cash - order_amount, 0.0 remaining = total_cash - order_amount - required = settings.USD_BUFFER_MIN + required = float( + _resolve_market_setting( + market=market, + settings=settings, + key="USD_BUFFER_MIN", + default=1000.0, + ) + ) return remaining < required, remaining, required @@ -594,7 +734,13 @@ def _should_force_exit_for_overnight( return True if settings is None: return False - return not settings.OVERNIGHT_EXCEPTION_ENABLED + overnight_enabled = _resolve_market_setting( + market=market, + settings=settings, + key="OVERNIGHT_EXCEPTION_ENABLED", + default=True, + ) + return not bool(overnight_enabled) def _build_runtime_position_key( @@ -643,6 +789,7 @@ def _apply_staged_exit_override_for_hold( atr_value = safe_float(market_data.get("atr_value"), 0.0) if market.code == "KR": stop_loss_threshold = _compute_kr_dynamic_stop_loss_pct( + market=market, entry_price=entry_price, atr_value=atr_value, fallback_stop_loss_pct=stop_loss_threshold, @@ -652,10 +799,27 @@ def _apply_staged_exit_override_for_hold( be_arm_pct = max(0.5, take_profit_threshold * 0.4) arm_pct = take_profit_threshold else: - be_arm_pct = max(0.1, float(getattr(settings, "STAGED_EXIT_BE_ARM_PCT", 1.2))) + be_arm_pct = max( + 0.1, + float( + _resolve_market_setting( + market=market, + settings=settings, + key="STAGED_EXIT_BE_ARM_PCT", + default=1.2, + ) + ), + ) arm_pct = max( be_arm_pct, - float(getattr(settings, "STAGED_EXIT_ARM_PCT", 3.0)), + float( + _resolve_market_setting( + market=market, + settings=settings, + key="STAGED_EXIT_ARM_PCT", + default=3.0, + ) + ), ) runtime_key = _build_runtime_position_key( @@ -1148,6 +1312,7 @@ async def trading_cycle( ) -> None: """Execute one trading cycle for a single stock.""" cycle_start_time = asyncio.get_event_loop().time() + _session_risk_overrides(market=market, settings=settings) # 1. Fetch market data price_output: dict[str, Any] = {} # Populated for overseas markets; used for fallback metrics @@ -1397,7 +1562,14 @@ async def trading_cycle( # 2.1. Apply market_outlook-based BUY confidence threshold if decision.action == "BUY": - base_threshold = (settings.CONFIDENCE_THRESHOLD if settings else 80) + base_threshold = int( + _resolve_market_setting( + market=market, + settings=settings, + key="CONFIDENCE_THRESHOLD", + default=80, + ) + ) outlook = playbook.market_outlook if outlook == MarketOutlook.BEARISH: min_confidence = 90 @@ -1450,7 +1622,14 @@ async def trading_cycle( market.name, ) elif market.code.startswith("US"): - min_price = float(getattr(settings, "US_MIN_PRICE", 5.0) if settings else 5.0) + min_price = float( + _resolve_market_setting( + market=market, + settings=settings, + key="US_MIN_PRICE", + default=5.0, + ) + ) if current_price <= min_price: decision = TradeDecision( action="HOLD", @@ -1877,7 +2056,7 @@ async def trading_cycle( ) if trade_pnl < 0: cooldown_key = _stoploss_cooldown_key(market=market, stock_code=stock_code) - cooldown_minutes = _stoploss_cooldown_minutes(settings) + cooldown_minutes = _stoploss_cooldown_minutes(settings, market=market) _STOPLOSS_REENTRY_COOLDOWN_UNTIL[cooldown_key] = ( datetime.now(UTC).timestamp() + cooldown_minutes * 60 ) @@ -2329,6 +2508,7 @@ async def run_daily_session( # Process each open market for market in open_markets: + _session_risk_overrides(market=market, settings=settings) await process_blackout_recovery_orders( broker=broker, overseas_broker=overseas_broker, @@ -2666,7 +2846,14 @@ async def run_daily_session( market.name, ) elif market.code.startswith("US"): - min_price = float(getattr(settings, "US_MIN_PRICE", 5.0)) + min_price = float( + _resolve_market_setting( + market=market, + settings=settings, + key="US_MIN_PRICE", + default=5.0, + ) + ) if stock_data["current_price"] <= min_price: decision = TradeDecision( action="HOLD", @@ -3041,7 +3228,10 @@ async def run_daily_session( ) if trade_pnl < 0: cooldown_key = _stoploss_cooldown_key(market=market, stock_code=stock_code) - cooldown_minutes = _stoploss_cooldown_minutes(settings) + cooldown_minutes = _stoploss_cooldown_minutes( + settings, + market=market, + ) _STOPLOSS_REENTRY_COOLDOWN_UNTIL[cooldown_key] = ( datetime.now(UTC).timestamp() + cooldown_minutes * 60 ) @@ -3849,6 +4039,7 @@ async def run(settings: Settings) -> None: break session_info = get_session_info(market) + _session_risk_overrides(market=market, settings=settings) logger.info( "Market session active: %s (%s) session=%s", market.code, diff --git a/tests/test_main.py b/tests/test_main.py index bd2ea2b..c2dbc6b 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -15,6 +15,8 @@ from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger from src.main import ( KILL_SWITCH, + _SESSION_RISK_LAST_BY_MARKET, + _SESSION_RISK_OVERRIDES_BY_MARKET, _STOPLOSS_REENTRY_COOLDOWN_UNTIL, _apply_staged_exit_override_for_hold, _compute_kr_atr_value, @@ -32,10 +34,12 @@ from src.main import ( _extract_held_qty_from_balance, _handle_market_close, _retry_connection, + _resolve_market_setting, _resolve_sell_qty_for_pnl, _run_context_scheduler, _run_evolution_loop, _start_dashboard_server, + _stoploss_cooldown_minutes, _compute_kr_dynamic_stop_loss_pct, handle_domestic_pending_orders, handle_overseas_pending_orders, @@ -99,11 +103,15 @@ def _reset_kill_switch_state() -> None: KILL_SWITCH.clear_block() _RUNTIME_EXIT_STATES.clear() _RUNTIME_EXIT_PEAKS.clear() + _SESSION_RISK_LAST_BY_MARKET.clear() + _SESSION_RISK_OVERRIDES_BY_MARKET.clear() _STOPLOSS_REENTRY_COOLDOWN_UNTIL.clear() yield KILL_SWITCH.clear_block() _RUNTIME_EXIT_STATES.clear() _RUNTIME_EXIT_PEAKS.clear() + _SESSION_RISK_LAST_BY_MARKET.clear() + _SESSION_RISK_OVERRIDES_BY_MARKET.clear() _STOPLOSS_REENTRY_COOLDOWN_UNTIL.clear() @@ -186,6 +194,46 @@ def test_compute_kr_dynamic_stop_loss_pct_uses_settings_values() -> None: assert out == -3.0 +def test_resolve_market_setting_uses_session_profile_override() -> None: + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + SESSION_RISK_PROFILES_JSON='{"US_PRE": {"US_MIN_PRICE": 7.5}}', + ) + market = MagicMock() + market.code = "US_NASDAQ" + + with patch("src.main.get_session_info", return_value=MagicMock(session_id="US_PRE")): + value = _resolve_market_setting( + market=market, + settings=settings, + key="US_MIN_PRICE", + default=5.0, + ) + + assert value == pytest.approx(7.5) + + +def test_stoploss_cooldown_minutes_uses_session_override() -> None: + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + STOPLOSS_REENTRY_COOLDOWN_MINUTES=120, + SESSION_RISK_PROFILES_JSON='{"NXT_AFTER": {"STOPLOSS_REENTRY_COOLDOWN_MINUTES": 45}}', + ) + market = MagicMock() + market.code = "KR" + + with patch("src.main.get_session_info", return_value=MagicMock(session_id="NXT_AFTER")): + value = _stoploss_cooldown_minutes(settings, market=market) + + assert value == 45 + + def test_estimate_pred_down_prob_from_rsi_uses_linear_mapping() -> None: assert _estimate_pred_down_prob_from_rsi(None) == 0.5 assert _estimate_pred_down_prob_from_rsi(0.0) == 0.0 From 42c06929eae722ea816eb58141361ed1dd50f7cd Mon Sep 17 00:00:00 2001 From: agentson Date: Sat, 28 Feb 2026 22:20:59 +0900 Subject: [PATCH 049/109] test: add session-risk reload edge-case coverage (#327) --- tests/test_main.py | 76 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) diff --git a/tests/test_main.py b/tests/test_main.py index c2dbc6b..86e67d1 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -4,6 +4,7 @@ from datetime import UTC, date, datetime from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest +import src.main as main_module from src.config import Settings from src.context.layer import ContextLayer @@ -17,6 +18,7 @@ from src.main import ( KILL_SWITCH, _SESSION_RISK_LAST_BY_MARKET, _SESSION_RISK_OVERRIDES_BY_MARKET, + _SESSION_RISK_PROFILES_MAP, _STOPLOSS_REENTRY_COOLDOWN_UNTIL, _apply_staged_exit_override_for_hold, _compute_kr_atr_value, @@ -105,6 +107,8 @@ def _reset_kill_switch_state() -> None: _RUNTIME_EXIT_PEAKS.clear() _SESSION_RISK_LAST_BY_MARKET.clear() _SESSION_RISK_OVERRIDES_BY_MARKET.clear() + _SESSION_RISK_PROFILES_MAP.clear() + main_module._SESSION_RISK_PROFILES_RAW = "__reset__" _STOPLOSS_REENTRY_COOLDOWN_UNTIL.clear() yield KILL_SWITCH.clear_block() @@ -112,6 +116,8 @@ def _reset_kill_switch_state() -> None: _RUNTIME_EXIT_PEAKS.clear() _SESSION_RISK_LAST_BY_MARKET.clear() _SESSION_RISK_OVERRIDES_BY_MARKET.clear() + _SESSION_RISK_PROFILES_MAP.clear() + main_module._SESSION_RISK_PROFILES_RAW = "__reset__" _STOPLOSS_REENTRY_COOLDOWN_UNTIL.clear() @@ -234,6 +240,76 @@ def test_stoploss_cooldown_minutes_uses_session_override() -> None: assert value == 45 +def test_resolve_market_setting_ignores_profile_when_reload_disabled() -> None: + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + US_MIN_PRICE=5.0, + SESSION_RISK_RELOAD_ENABLED=False, + SESSION_RISK_PROFILES_JSON='{"US_PRE": {"US_MIN_PRICE": 9.5}}', + ) + market = MagicMock() + market.code = "US_NASDAQ" + + with patch("src.main.get_session_info", return_value=MagicMock(session_id="US_PRE")): + value = _resolve_market_setting( + market=market, + settings=settings, + key="US_MIN_PRICE", + default=5.0, + ) + + assert value == pytest.approx(5.0) + + +def test_resolve_market_setting_falls_back_on_invalid_profile_json() -> None: + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + US_MIN_PRICE=5.0, + SESSION_RISK_PROFILES_JSON="{invalid-json", + ) + market = MagicMock() + market.code = "US_NASDAQ" + + with patch("src.main.get_session_info", return_value=MagicMock(session_id="US_PRE")): + value = _resolve_market_setting( + market=market, + settings=settings, + key="US_MIN_PRICE", + default=5.0, + ) + + assert value == pytest.approx(5.0) + + +def test_resolve_market_setting_coerces_bool_string_override() -> None: + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + OVERNIGHT_EXCEPTION_ENABLED=True, + SESSION_RISK_PROFILES_JSON='{"US_AFTER": {"OVERNIGHT_EXCEPTION_ENABLED": "false"}}', + ) + market = MagicMock() + market.code = "US_NASDAQ" + + with patch("src.main.get_session_info", return_value=MagicMock(session_id="US_AFTER")): + value = _resolve_market_setting( + market=market, + settings=settings, + key="OVERNIGHT_EXCEPTION_ENABLED", + default=True, + ) + + assert value is False + + def test_estimate_pred_down_prob_from_rsi_uses_linear_mapping() -> None: assert _estimate_pred_down_prob_from_rsi(None) == 0.5 assert _estimate_pred_down_prob_from_rsi(0.0) == 0.0 From 0ceb2dfdc9e7879fd9cb4a9a15b1c6f8203d3cae Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 09:33:28 +0900 Subject: [PATCH 050/109] feat: revalidate blackout recovery orders by price/session context (#328) --- src/config.py | 2 ++ src/main.py | 60 ++++++++++++++++++++++++++++++++++++++++++++++ tests/test_main.py | 51 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 113 insertions(+) diff --git a/src/config.py b/src/config.py index d12a346..671b95b 100644 --- a/src/config.py +++ b/src/config.py @@ -78,6 +78,8 @@ class Settings(BaseSettings): ORDER_BLACKOUT_ENABLED: bool = True ORDER_BLACKOUT_WINDOWS_KST: str = "23:30-00:10" ORDER_BLACKOUT_QUEUE_MAX: int = Field(default=500, ge=10, le=5000) + BLACKOUT_RECOVERY_PRICE_REVALIDATION_ENABLED: bool = True + BLACKOUT_RECOVERY_MAX_PRICE_DRIFT_PCT: float = Field(default=5.0, ge=0.0, le=100.0) # Pre-Market Planner PRE_MARKET_MINUTES: int = Field(default=30, ge=10, le=120) diff --git a/src/main.py b/src/main.py index 6248817..da6f3e9 100644 --- a/src/main.py +++ b/src/main.py @@ -1004,6 +1004,7 @@ async def process_blackout_recovery_orders( broker: KISBroker, overseas_broker: OverseasBroker, db_conn: Any, + settings: Settings | None = None, ) -> None: intents = BLACKOUT_ORDER_MANAGER.pop_recovery_batch() if not intents: @@ -1035,6 +1036,63 @@ async def process_blackout_recovery_orders( continue try: + revalidation_enabled = bool( + _resolve_market_setting( + market=market, + settings=settings, + key="BLACKOUT_RECOVERY_PRICE_REVALIDATION_ENABLED", + default=True, + ) + ) + if revalidation_enabled: + if market.is_domestic: + current_price, _, _ = await _retry_connection( + broker.get_current_price, + intent.stock_code, + label=f"recovery_price:{market.code}:{intent.stock_code}", + ) + else: + price_data = await _retry_connection( + overseas_broker.get_overseas_price, + market.exchange_code, + intent.stock_code, + label=f"recovery_price:{market.code}:{intent.stock_code}", + ) + current_price = safe_float(price_data.get("output", {}).get("last"), 0.0) + + queued_price = float(intent.price) + max_drift_pct = float( + _resolve_market_setting( + market=market, + settings=settings, + key="BLACKOUT_RECOVERY_MAX_PRICE_DRIFT_PCT", + default=5.0, + ) + ) + if queued_price <= 0 or current_price <= 0: + logger.info( + "Drop queued intent by price revalidation (invalid price): %s %s (%s) queued=%.4f current=%.4f", + intent.order_type, + intent.stock_code, + market.code, + queued_price, + current_price, + ) + continue + drift_pct = abs(current_price - queued_price) / queued_price * 100.0 + if drift_pct > max_drift_pct: + logger.info( + "Drop queued intent by price revalidation: %s %s (%s) queued=%.4f current=%.4f drift=%.2f%% max=%.2f%%", + intent.order_type, + intent.stock_code, + market.code, + queued_price, + current_price, + drift_pct, + max_drift_pct, + ) + continue + validate_order_policy( market=market, order_type=intent.order_type, @@ -2513,6 +2571,7 @@ async def run_daily_session( broker=broker, overseas_broker=overseas_broker, db_conn=db_conn, + settings=settings, ) # Use market-local date for playbook keying market_today = datetime.now(market.timezone).date() @@ -4051,6 +4110,7 @@ async def run(settings: Settings) -> None: broker=broker, overseas_broker=overseas_broker, db_conn=db_conn, + settings=settings, ) # Notify market open if it just opened diff --git a/tests/test_main.py b/tests/test_main.py index 86e67d1..c958354 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -6340,6 +6340,7 @@ async def test_process_blackout_recovery_executes_valid_intents() -> None: """Recovery must execute queued intents that pass revalidation.""" db_conn = init_db(":memory:") broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.0, 0.0)) broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) overseas_broker = MagicMock() @@ -6394,6 +6395,7 @@ async def test_process_blackout_recovery_drops_policy_rejected_intent() -> None: """Policy-rejected queued intents must not be requeued.""" db_conn = init_db(":memory:") broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(100.0, 0.0, 0.0)) broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) overseas_broker = MagicMock() @@ -6437,6 +6439,55 @@ async def test_process_blackout_recovery_drops_policy_rejected_intent() -> None: blackout_manager.requeue.assert_not_called() +@pytest.mark.asyncio +async def test_process_blackout_recovery_drops_intent_on_excessive_price_drift() -> None: + """Queued intent is dropped when current market price drift exceeds threshold.""" + db_conn = init_db(":memory:") + broker = MagicMock() + broker.get_current_price = AsyncMock(return_value=(106.0, 0.0, 0.0)) + broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + overseas_broker = MagicMock() + + market = MagicMock() + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + intent = MagicMock() + intent.market_code = "KR" + intent.stock_code = "005930" + intent.order_type = "BUY" + intent.quantity = 1 + intent.price = 100.0 + intent.source = "test" + intent.attempts = 0 + + blackout_manager = MagicMock() + blackout_manager.pop_recovery_batch.return_value = [intent] + + with ( + patch("src.main.BLACKOUT_ORDER_MANAGER", blackout_manager), + patch("src.main.MARKETS", {"KR": market}), + patch("src.main.get_open_position", return_value=None), + patch("src.main.validate_order_policy") as validate_policy, + ): + await process_blackout_recovery_orders( + broker=broker, + overseas_broker=overseas_broker, + db_conn=db_conn, + settings=Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + BLACKOUT_RECOVERY_MAX_PRICE_DRIFT_PCT=5.0, + ), + ) + + broker.send_order.assert_not_called() + validate_policy.assert_not_called() + + @pytest.mark.asyncio async def test_trigger_emergency_kill_switch_executes_operational_steps() -> None: """Emergency kill switch should execute cancel/refresh/reduce/notify callbacks.""" From 5fae9765e791b3f3556d1a02babcf0e4a1566337 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 09:40:00 +0900 Subject: [PATCH 051/109] test: add blackout recovery overseas/failure revalidation coverage (#328) --- tests/test_main.py | 94 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) diff --git a/tests/test_main.py b/tests/test_main.py index c958354..6bba964 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -6488,6 +6488,100 @@ async def test_process_blackout_recovery_drops_intent_on_excessive_price_drift() validate_policy.assert_not_called() +@pytest.mark.asyncio +async def test_process_blackout_recovery_drops_overseas_intent_on_excessive_price_drift() -> None: + """Overseas queued intent is dropped when price drift exceeds threshold.""" + db_conn = init_db(":memory:") + broker = MagicMock() + broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + overseas_broker = MagicMock() + overseas_broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "106.0"}}) + overseas_broker.send_overseas_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + + market = MagicMock() + market.code = "US_NASDAQ" + market.exchange_code = "NASD" + market.is_domestic = False + + intent = MagicMock() + intent.market_code = "US_NASDAQ" + intent.stock_code = "AAPL" + intent.order_type = "BUY" + intent.quantity = 1 + intent.price = 100.0 + intent.source = "test" + intent.attempts = 0 + + blackout_manager = MagicMock() + blackout_manager.pop_recovery_batch.return_value = [intent] + + with ( + patch("src.main.BLACKOUT_ORDER_MANAGER", blackout_manager), + patch("src.main.MARKETS", {"US_NASDAQ": market}), + patch("src.main.get_open_position", return_value=None), + patch("src.main.validate_order_policy") as validate_policy, + ): + await process_blackout_recovery_orders( + broker=broker, + overseas_broker=overseas_broker, + db_conn=db_conn, + settings=Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + BLACKOUT_RECOVERY_MAX_PRICE_DRIFT_PCT=5.0, + ), + ) + + overseas_broker.send_overseas_order.assert_not_called() + validate_policy.assert_not_called() + + +@pytest.mark.asyncio +async def test_process_blackout_recovery_requeues_intent_when_price_lookup_fails() -> None: + """Price lookup failure must requeue intent for a later retry.""" + db_conn = init_db(":memory:") + broker = MagicMock() + broker.get_current_price = AsyncMock(side_effect=ConnectionError("price API down")) + broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + overseas_broker = MagicMock() + + market = MagicMock() + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + + intent = MagicMock() + intent.market_code = "KR" + intent.stock_code = "005930" + intent.order_type = "BUY" + intent.quantity = 1 + intent.price = 100.0 + intent.source = "test" + intent.attempts = 0 + + blackout_manager = MagicMock() + blackout_manager.pop_recovery_batch.return_value = [intent] + + with ( + patch("src.main.BLACKOUT_ORDER_MANAGER", blackout_manager), + patch("src.main.MARKETS", {"KR": market}), + patch("src.main.get_open_position", return_value=None), + patch("src.main.validate_order_policy") as validate_policy, + ): + await process_blackout_recovery_orders( + broker=broker, + overseas_broker=overseas_broker, + db_conn=db_conn, + ) + + broker.send_order.assert_not_called() + validate_policy.assert_not_called() + blackout_manager.requeue.assert_called_once_with(intent) + assert intent.attempts == 1 + + @pytest.mark.asyncio async def test_trigger_emergency_kill_switch_executes_operational_steps() -> None: """Emergency kill switch should execute cancel/refresh/reduce/notify callbacks.""" From 701350fb65cf08a3188fdf8720f9199f2043165c Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 09:44:24 +0900 Subject: [PATCH 052/109] feat: switch backtest triple barrier to calendar-minute horizon (#329) --- src/analysis/backtest_pipeline.py | 13 +++++++ tests/test_backtest_pipeline_integration.py | 38 +++++++++++++++++++-- 2 files changed, 48 insertions(+), 3 deletions(-) diff --git a/src/analysis/backtest_pipeline.py b/src/analysis/backtest_pipeline.py index 4b0701f..a41001c 100644 --- a/src/analysis/backtest_pipeline.py +++ b/src/analysis/backtest_pipeline.py @@ -8,6 +8,7 @@ from __future__ import annotations from collections.abc import Sequence from dataclasses import dataclass +from datetime import datetime from statistics import mean from typing import Literal @@ -22,6 +23,7 @@ class BacktestBar: low: float close: float session_id: str + timestamp: datetime | None = None @dataclass(frozen=True) @@ -86,16 +88,27 @@ def run_v2_backtest_pipeline( highs = [float(bar.high) for bar in bars] lows = [float(bar.low) for bar in bars] closes = [float(bar.close) for bar in bars] + timestamps = [bar.timestamp for bar in bars] normalized_entries = sorted(set(int(i) for i in entry_indices)) if normalized_entries[0] < 0 or normalized_entries[-1] >= len(bars): raise IndexError("entry index out of range") + resolved_timestamps: list[datetime] | None = None + if triple_barrier_spec.max_holding_minutes is not None: + if any(ts is None for ts in timestamps): + raise ValueError( + "BacktestBar.timestamp is required for all bars when " + "triple_barrier_spec.max_holding_minutes is set" + ) + resolved_timestamps = [ts for ts in timestamps if ts is not None] + labels_by_bar_index: dict[int, int] = {} for idx in normalized_entries: labels_by_bar_index[idx] = label_with_triple_barrier( highs=highs, lows=lows, closes=closes, + timestamps=resolved_timestamps, entry_index=idx, side=side, spec=triple_barrier_spec, diff --git a/tests/test_backtest_pipeline_integration.py b/tests/test_backtest_pipeline_integration.py index 60dca91..c0ad496 100644 --- a/tests/test_backtest_pipeline_integration.py +++ b/tests/test_backtest_pipeline_integration.py @@ -1,5 +1,7 @@ from __future__ import annotations +from datetime import UTC, datetime, timedelta + from src.analysis.backtest_cost_guard import BacktestCostModel from src.analysis.backtest_pipeline import ( BacktestBar, @@ -12,6 +14,7 @@ from src.analysis.walk_forward_split import generate_walk_forward_splits def _bars() -> list[BacktestBar]: + base_ts = datetime(2026, 2, 28, 0, 0, tzinfo=UTC) closes = [100.0, 101.0, 102.0, 101.5, 103.0, 102.5, 104.0, 103.5, 105.0, 104.5, 106.0, 105.5] bars: list[BacktestBar] = [] for i, close in enumerate(closes): @@ -21,6 +24,7 @@ def _bars() -> list[BacktestBar]: low=close - 1.0, close=close, session_id="KRX_REG" if i % 2 == 0 else "US_PRE", + timestamp=base_ts + timedelta(minutes=i), ) ) return bars @@ -43,7 +47,7 @@ def test_pipeline_happy_path_returns_fold_and_artifact_contract() -> None: triple_barrier_spec=TripleBarrierSpec( take_profit_pct=0.02, stop_loss_pct=0.01, - max_holding_bars=3, + max_holding_minutes=3, ), walk_forward=WalkForwardConfig( train_size=4, @@ -84,7 +88,7 @@ def test_pipeline_cost_guard_fail_fast() -> None: triple_barrier_spec=TripleBarrierSpec( take_profit_pct=0.02, stop_loss_pct=0.01, - max_holding_bars=3, + max_holding_minutes=3, ), walk_forward=WalkForwardConfig(train_size=2, test_size=1), cost_model=bad, @@ -119,7 +123,7 @@ def test_pipeline_deterministic_seed_free_deterministic_result() -> None: triple_barrier_spec=TripleBarrierSpec( take_profit_pct=0.02, stop_loss_pct=0.01, - max_holding_bars=3, + max_holding_minutes=3, ), walk_forward=WalkForwardConfig( train_size=4, @@ -134,3 +138,31 @@ def test_pipeline_deterministic_seed_free_deterministic_result() -> None: out1 = run_v2_backtest_pipeline(**cfg) out2 = run_v2_backtest_pipeline(**cfg) assert out1 == out2 + + +def test_pipeline_rejects_minutes_spec_when_timestamp_missing() -> None: + bars = _bars() + bars[2] = BacktestBar( + high=bars[2].high, + low=bars[2].low, + close=bars[2].close, + session_id=bars[2].session_id, + timestamp=None, + ) + try: + run_v2_backtest_pipeline( + bars=bars, + entry_indices=[0, 1, 2, 3], + side=1, + triple_barrier_spec=TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.01, + max_holding_minutes=3, + ), + walk_forward=WalkForwardConfig(train_size=2, test_size=1), + cost_model=_cost_model(), + ) + except ValueError as exc: + assert "BacktestBar.timestamp is required" in str(exc) + else: + raise AssertionError("expected timestamp validation error") From 273a3c182a51b3ccc2fee0d2ac1c67a79c8c1ea6 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 09:50:45 +0900 Subject: [PATCH 053/109] refactor: simplify timestamp normalization after non-null validation (#329) --- src/analysis/backtest_pipeline.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/analysis/backtest_pipeline.py b/src/analysis/backtest_pipeline.py index a41001c..ba49289 100644 --- a/src/analysis/backtest_pipeline.py +++ b/src/analysis/backtest_pipeline.py @@ -11,6 +11,7 @@ from dataclasses import dataclass from datetime import datetime from statistics import mean from typing import Literal +from typing import cast from src.analysis.backtest_cost_guard import BacktestCostModel, validate_backtest_cost_model from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier @@ -100,7 +101,7 @@ def run_v2_backtest_pipeline( "BacktestBar.timestamp is required for all bars when " "triple_barrier_spec.max_holding_minutes is set" ) - resolved_timestamps = [ts for ts in timestamps if ts is not None] + resolved_timestamps = cast(list[datetime], timestamps) labels_by_bar_index: dict[int, int] = {} for idx in normalized_entries: From c31ee37f13aada5b4f6f1530ef61939670d3c48c Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 09:58:45 +0900 Subject: [PATCH 054/109] infra: enforce governance sync and TASK-REQ mapping in CI (#330) --- .github/workflows/ci.yml | 14 ++++++- scripts/validate_governance_assets.py | 50 ++++++++++++++++++++++++ tests/test_validate_governance_assets.py | 35 +++++++++++++++++ 3 files changed, 98 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 67cf621..6560a1c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -25,7 +25,19 @@ jobs: run: python3 scripts/session_handover_check.py --strict - name: Validate governance assets - run: python3 scripts/validate_governance_assets.py + env: + GOVERNANCE_PR_TITLE: ${{ github.event.pull_request.title }} + GOVERNANCE_PR_BODY: ${{ github.event.pull_request.body }} + run: | + if [ "${{ github.event_name }}" = "pull_request" ]; then + RANGE="${{ github.event.pull_request.base.sha }}...${{ github.sha }}" + python3 scripts/validate_governance_assets.py "$RANGE" + elif [ "${{ github.event_name }}" = "push" ]; then + RANGE="${{ github.event.before }}...${{ github.sha }}" + python3 scripts/validate_governance_assets.py "$RANGE" + else + python3 scripts/validate_governance_assets.py + fi - name: Validate Ouroboros docs run: python3 scripts/validate_ouroboros_docs.py diff --git a/scripts/validate_governance_assets.py b/scripts/validate_governance_assets.py index 82e94d4..5872de1 100644 --- a/scripts/validate_governance_assets.py +++ b/scripts/validate_governance_assets.py @@ -5,9 +5,16 @@ from __future__ import annotations import subprocess import sys +import os +import re from pathlib import Path REQUIREMENTS_REGISTRY = "docs/ouroboros/01_requirements_registry.md" +TASK_WORK_ORDERS_DOC = "docs/ouroboros/30_code_level_work_orders.md" +TASK_DEF_LINE = re.compile(r"^-\s+`(?PTASK-[A-Z0-9-]+-\d{3})`(?P.*)$") +REQ_ID_IN_LINE = re.compile(r"\bREQ-[A-Z0-9-]+-\d{3}\b") +TASK_ID_IN_TEXT = re.compile(r"\bTASK-[A-Z0-9-]+-\d{3}\b") +TEST_ID_IN_TEXT = re.compile(r"\bTEST-[A-Z0-9-]+-\d{3}\b") def must_contain(path: Path, required: list[str], errors: list[str]) -> None: @@ -75,8 +82,45 @@ def validate_registry_sync(changed_files: list[str], errors: list[str]) -> None: ) +def validate_task_req_mapping(errors: list[str], *, task_doc: Path | None = None) -> None: + path = task_doc or Path(TASK_WORK_ORDERS_DOC) + if not path.exists(): + errors.append(f"missing file: {path}") + return + + text = path.read_text(encoding="utf-8") + found_task = False + for line in text.splitlines(): + m = TASK_DEF_LINE.match(line.strip()) + if not m: + continue + found_task = True + if not REQ_ID_IN_LINE.search(m.group("body")): + errors.append( + f"{path}: TASK without REQ mapping -> {m.group('task_id')}" + ) + if not found_task: + errors.append(f"{path}: no TASK definitions found") + + +def validate_pr_traceability(warnings: list[str]) -> None: + title = os.getenv("GOVERNANCE_PR_TITLE", "").strip() + body = os.getenv("GOVERNANCE_PR_BODY", "").strip() + if not title and not body: + return + + text = f"{title}\n{body}" + if not REQ_ID_IN_LINE.search(text): + warnings.append("PR text missing REQ-ID reference") + if not TASK_ID_IN_TEXT.search(text): + warnings.append("PR text missing TASK-ID reference") + if not TEST_ID_IN_TEXT.search(text): + warnings.append("PR text missing TEST-ID reference") + + def main() -> int: errors: list[str] = [] + warnings: list[str] = [] changed_files = load_changed_files(sys.argv[1:], errors) pr_template = Path(".gitea/PULL_REQUEST_TEMPLATE.md") @@ -141,6 +185,8 @@ def main() -> int: errors.append(f"missing file: {handover_script}") validate_registry_sync(changed_files, errors) + validate_task_req_mapping(errors) + validate_pr_traceability(warnings) if errors: print("[FAIL] governance asset validation failed") @@ -149,6 +195,10 @@ def main() -> int: return 1 print("[OK] governance assets validated") + if warnings: + print(f"[WARN] governance advisory: {len(warnings)}") + for warn in warnings: + print(f"- {warn}") return 0 diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py index a3a8519..3a0bc0b 100644 --- a/tests/test_validate_governance_assets.py +++ b/tests/test_validate_governance_assets.py @@ -79,3 +79,38 @@ def test_load_changed_files_with_range_uses_git_diff(monkeypatch) -> None: "docs/ouroboros/85_loss_recovery_action_plan.md", "src/main.py", ] + + +def test_validate_task_req_mapping_reports_missing_req_reference(tmp_path) -> None: + module = _load_module() + doc = tmp_path / "work_orders.md" + doc.write_text( + "- `TASK-OPS-999` no req mapping line\n", + encoding="utf-8", + ) + errors: list[str] = [] + module.validate_task_req_mapping(errors, task_doc=doc) + assert errors + assert "TASK without REQ mapping" in errors[0] + + +def test_validate_task_req_mapping_passes_when_req_present(tmp_path) -> None: + module = _load_module() + doc = tmp_path / "work_orders.md" + doc.write_text( + "- `TASK-OPS-999` (`REQ-OPS-001`): enforce timezone labels\n", + encoding="utf-8", + ) + errors: list[str] = [] + module.validate_task_req_mapping(errors, task_doc=doc) + assert errors == [] + + +def test_validate_pr_traceability_warns_when_req_missing(monkeypatch) -> None: + module = _load_module() + monkeypatch.setenv("GOVERNANCE_PR_TITLE", "feat: update policy checker") + monkeypatch.setenv("GOVERNANCE_PR_BODY", "Refs: TASK-OPS-001 TEST-ACC-007") + warnings: list[str] = [] + module.validate_pr_traceability(warnings) + assert warnings + assert "PR text missing REQ-ID reference" in warnings From e9de950becd9d08e211391505b5ed2515ac71c53 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 10:06:25 +0900 Subject: [PATCH 055/109] ci: wire governance traceability env in gitea workflow and guard zero SHA (#330) --- .gitea/workflows/ci.yml | 3 +++ .github/workflows/ci.yml | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml index 39fb10d..9fa9522 100644 --- a/.gitea/workflows/ci.yml +++ b/.gitea/workflows/ci.yml @@ -28,6 +28,9 @@ jobs: run: python3 scripts/session_handover_check.py --strict - name: Validate governance assets + env: + GOVERNANCE_PR_TITLE: ${{ github.event.pull_request.title }} + GOVERNANCE_PR_BODY: ${{ github.event.pull_request.body }} run: | RANGE="" if [ "${{ github.event_name }}" = "pull_request" ] && [ -n "${{ github.event.pull_request.base.sha }}" ]; then diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6560a1c..da84fc7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,11 +29,13 @@ jobs: GOVERNANCE_PR_TITLE: ${{ github.event.pull_request.title }} GOVERNANCE_PR_BODY: ${{ github.event.pull_request.body }} run: | + RANGE="" if [ "${{ github.event_name }}" = "pull_request" ]; then RANGE="${{ github.event.pull_request.base.sha }}...${{ github.sha }}" - python3 scripts/validate_governance_assets.py "$RANGE" - elif [ "${{ github.event_name }}" = "push" ]; then + elif [ "${{ github.event_name }}" = "push" ] && [ "${{ github.event.before }}" != "0000000000000000000000000000000000000000" ]; then RANGE="${{ github.event.before }}...${{ github.sha }}" + fi + if [ -n "$RANGE" ]; then python3 scripts/validate_governance_assets.py "$RANGE" else python3 scripts/validate_governance_assets.py From 2df787757afee9daf409cce624533b9c73955450 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 10:14:59 +0900 Subject: [PATCH 056/109] test: enforce explicit runtime session_id propagation in realtime/daily logs (#326) --- tests/test_main.py | 110 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 1 deletion(-) diff --git a/tests/test_main.py b/tests/test_main.py index 6bba964..a597d23 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -1836,7 +1836,10 @@ class TestScenarioEngineIntegration: signal="oversold", score=85.0, ) - with patch("src.main.log_trade"): + with ( + patch("src.main.log_trade"), + patch("src.main.get_session_info", return_value=MagicMock(session_id="KRX_REG")), + ): await trading_cycle( broker=mock_broker, overseas_broker=MagicMock(), @@ -2190,6 +2193,7 @@ class TestScenarioEngineIntegration: decision_logger.log_decision.assert_called_once() call_kwargs = decision_logger.log_decision.call_args.kwargs + assert call_kwargs["session_id"] == "KRX_REG" assert "scenario_match" in call_kwargs["context_snapshot"] assert call_kwargs["context_snapshot"]["scenario_match"]["rsi"] == 45.0 @@ -4863,6 +4867,110 @@ async def test_run_daily_session_applies_staged_exit_override_on_hold() -> None: assert broker.send_order.call_args.kwargs["order_type"] == "SELL" +@pytest.mark.asyncio +async def test_run_daily_session_passes_runtime_session_id_to_decision_and_trade_logs() -> None: + """Daily session must explicitly forward runtime session_id to decision/trade logs.""" + from src.analysis.smart_scanner import ScanCandidate + + db_conn = init_db(":memory:") + settings = Settings( + KIS_APP_KEY="k", + KIS_APP_SECRET="s", + KIS_ACCOUNT_NO="12345678-01", + GEMINI_API_KEY="g", + MODE="paper", + ) + + broker = MagicMock() + broker.get_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [ + { + "tot_evlu_amt": "100000", + "dnca_tot_amt": "50000", + "pchs_amt_smtl_amt": "50000", + } + ], + } + ) + broker.get_current_price = AsyncMock(return_value=(100.0, 1.0, 0.0)) + broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + + market = MagicMock() + market.name = "Korea" + market.code = "KR" + market.exchange_code = "KRX" + market.is_domestic = True + market.timezone = __import__("zoneinfo").ZoneInfo("Asia/Seoul") + + smart_scanner = MagicMock() + smart_scanner.scan = AsyncMock( + return_value=[ + ScanCandidate( + stock_code="005930", + name="Samsung", + price=100.0, + volume=1_000_000.0, + volume_ratio=2.0, + rsi=45.0, + signal="momentum", + score=80.0, + ) + ] + ) + + playbook_store = MagicMock() + playbook_store.load = MagicMock(return_value=_make_playbook("KR")) + + scenario_engine = MagicMock(spec=ScenarioEngine) + scenario_engine.evaluate = MagicMock(return_value=_make_buy_match("005930")) + + risk = MagicMock() + risk.check_circuit_breaker = MagicMock() + risk.validate_order = MagicMock() + + decision_logger = MagicMock() + decision_logger.log_decision = MagicMock(return_value="d1") + + telegram = MagicMock() + telegram.notify_trade_execution = AsyncMock() + telegram.notify_scenario_matched = AsyncMock() + + async def _passthrough(fn, *a, label: str = "", **kw): # type: ignore[override] + return await fn(*a, **kw) + + with ( + patch("src.main.get_open_position", return_value=None), + patch("src.main.get_open_markets", return_value=[market]), + patch("src.main.get_session_info", return_value=MagicMock(session_id="KRX_REG")), + patch("src.main._retry_connection", new=_passthrough), + patch("src.main.log_trade") as mock_log_trade, + ): + await run_daily_session( + broker=broker, + overseas_broker=MagicMock(), + scenario_engine=scenario_engine, + playbook_store=playbook_store, + pre_market_planner=MagicMock(), + risk=risk, + db_conn=db_conn, + decision_logger=decision_logger, + context_store=MagicMock(), + criticality_assessor=MagicMock(), + telegram=telegram, + settings=settings, + smart_scanner=smart_scanner, + daily_start_eval=0.0, + ) + + decision_logger.log_decision.assert_called_once() + assert decision_logger.log_decision.call_args.kwargs["session_id"] == "KRX_REG" + assert mock_log_trade.call_count >= 1 + for call in mock_log_trade.call_args_list: + assert call.kwargs.get("session_id") == "KRX_REG" + + # --------------------------------------------------------------------------- # sync_positions_from_broker — startup DB sync tests (issue #206) # --------------------------------------------------------------------------- From db316c539ba2cc358488787a6ff06ce638f06e75 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 13:37:04 +0900 Subject: [PATCH 057/109] test: align daily session mock order response with rt_cd success path (#326) --- tests/test_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_main.py b/tests/test_main.py index a597d23..309a8f7 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -2281,7 +2281,7 @@ async def test_sell_updates_original_buy_decision_outcome() -> None: ], } ) - broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) overseas_broker = MagicMock() engine = MagicMock(spec=ScenarioEngine) From e3a3aada831cbce475dd53562b1cef48fe5cc749 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 13:38:57 +0900 Subject: [PATCH 058/109] test: set rt_cd success response in session-id daily regression test (#326) --- tests/test_main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_main.py b/tests/test_main.py index 309a8f7..bacedc1 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -4895,7 +4895,7 @@ async def test_run_daily_session_passes_runtime_session_id_to_decision_and_trade } ) broker.get_current_price = AsyncMock(return_value=(100.0, 1.0, 0.0)) - broker.send_order = AsyncMock(return_value={"msg1": "OK"}) + broker.send_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) market = MagicMock() market.name = "Korea" From 6b34367656ce48a7b4a6198b7db33433f9ead1f9 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 17:06:56 +0900 Subject: [PATCH 059/109] =?UTF-8?q?docs:=20v2/v3=20=EA=B5=AC=ED=98=84=20?= =?UTF-8?q?=EA=B0=90=EC=82=AC=20=EB=AC=B8=EC=84=9C=20=ED=94=BC=EB=93=9C?= =?UTF-8?q?=EB=B0=B1=20=EC=A0=84=EC=B2=B4=20=EB=B0=98=EC=98=81=20(#349)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 11회 리뷰 사이클에서 남긴 [코멘트]를 모두 본문에 반영하고 블록을 제거한다. 변경 문서: - docs/architecture.md: SmartScanner 동작 모드(both), 대시보드 10 API, DB 스키마(session_id/fx_pnl/mode), config 변수 갱신 - docs/commands.md: /api/pnl/history, /api/positions 엔드포인트 추가 - docs/testing.md: 테스트 수 고정값 제거, SmartScanner fallback 최신화, Dashboard 10 API routes 반영 - README.md: 고정 수치 제거, Gitea CI 명시, 파일별 수치 'CI 기준 변동' 표기 - CLAUDE.md: SmartScanner 섹션명 변경, 고정 수치 제거 - docs/requirements-log.md: #318~#331 구현 항목 추가 - docs/ouroboros/80_implementation_audit.md: ROOT-5/6/7 분리, REQ-V3-008 함수명 병기, v3 ~85% / 거버넌스 ~60%로 갱신 - docs/ouroboros/85_loss_recovery_action_plan.md: ACT-07 함수명 병기, 테스트 수 갱신, 6.1/6.2 정확도 개선 - docs/ouroboros/60_repo_enforcement_checklist.md: CI job/step 구분 표 추가 - docs/ouroboros/README.md: 50_* 문서 (A)/(B) 보조 표기 Closes #349 --- CLAUDE.md | 8 +- README.md | 27 ++- docs/architecture.md | 126 ++++++++++++- docs/commands.md | 4 +- .../60_repo_enforcement_checklist.md | 12 +- docs/ouroboros/80_implementation_audit.md | 172 +++++++++++------- .../ouroboros/85_loss_recovery_action_plan.md | 91 +++++---- docs/ouroboros/README.md | 6 +- docs/requirements-log.md | 125 ++++++++++++- docs/testing.md | 10 +- 10 files changed, 447 insertions(+), 134 deletions(-) diff --git a/CLAUDE.md b/CLAUDE.md index 6c15dca..9387fa9 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -81,9 +81,9 @@ SCANNER_TOP_N=3 # Max candidates per scan - **Evolution-ready** — Selection context logged for strategy optimization - **Fault-tolerant** — Falls back to static watchlist on API failure -### Realtime Mode Only +### Trading Mode Integration -Smart Scanner runs in `TRADE_MODE=realtime` only. Daily mode uses static watchlists for batch efficiency. +Smart Scanner runs in both `TRADE_MODE=realtime` and `daily` paths. On API failure, domestic stocks fall back to a static watchlist; overseas stocks fall back to a dynamic universe (active positions, recent holdings). ## Documentation @@ -122,7 +122,7 @@ src/ ├── broker/ # KIS API client (domestic + overseas) ├── context/ # L1-L7 hierarchical memory system ├── core/ # Risk manager (READ-ONLY) -├── dashboard/ # FastAPI read-only monitoring (8 API endpoints) +├── dashboard/ # FastAPI read-only monitoring (10 API endpoints) ├── data/ # External data integration (news, market data, calendar) ├── evolution/ # Self-improvement (optimizer, daily review, scorecard) ├── logging/ # Decision logger (audit trail) @@ -133,7 +133,7 @@ src/ ├── main.py # Trading loop orchestrator └── config.py # Settings (from .env) -tests/ # 551 tests across 25 files +tests/ # 998 tests across 41 files docs/ # Extended documentation ``` diff --git a/README.md b/README.md index eba289e..587ac0c 100644 --- a/README.md +++ b/README.md @@ -39,7 +39,7 @@ KIS(한국투자증권) API로 매매하고, Google Gemini로 판단하며, 자 | 컨텍스트 | `src/context/` | L1-L7 계층형 메모리 시스템 | | 분석 | `src/analysis/` | RSI, ATR, Smart Volatility Scanner | | 알림 | `src/notifications/` | 텔레그램 양방향 (알림 + 9개 명령어) | -| 대시보드 | `src/dashboard/` | FastAPI 읽기 전용 모니터링 (8개 API) | +| 대시보드 | `src/dashboard/` | FastAPI 읽기 전용 모니터링 (10개 API) | | 진화 | `src/evolution/` | 전략 진화 + Daily Review + Scorecard | | 의사결정 로그 | `src/logging/` | 전체 거래 결정 감사 추적 | | 데이터 | `src/data/` | 뉴스, 시장 데이터, 경제 캘린더 연동 | @@ -153,19 +153,16 @@ docker compose up -d ouroboros ## 테스트 -551개 테스트가 25개 파일에 걸쳐 구현되어 있습니다. 최소 커버리지 80%. +998개 테스트가 41개 파일에 걸쳐 구현되어 있습니다. 최소 커버리지 80%. ``` -tests/test_scenario_engine.py — 시나리오 매칭 (44개) -tests/test_data_integration.py — 외부 데이터 연동 (38개) -tests/test_pre_market_planner.py — 플레이북 생성 (37개) -tests/test_main.py — 거래 루프 통합 (37개) -tests/test_token_efficiency.py — 토큰 최적화 (34개) -tests/test_strategy_models.py — 전략 모델 검증 (33개) -tests/test_telegram_commands.py — 텔레그램 명령어 (31개) -tests/test_latency_control.py — 지연시간 제어 (30개) -tests/test_telegram.py — 텔레그램 알림 (25개) -... 외 16개 파일 +tests/test_main.py — 거래 루프 통합 +tests/test_scenario_engine.py — 시나리오 매칭 +tests/test_pre_market_planner.py — 플레이북 생성 +tests/test_overseas_broker.py — 해외 브로커 +tests/test_telegram_commands.py — 텔레그램 명령어 +tests/test_telegram.py — 텔레그램 알림 +... 외 35개 파일 ※ 파일별 수치는 CI 기준으로 변동 가능 ``` **상세**: [docs/testing.md](docs/testing.md) @@ -177,8 +174,8 @@ tests/test_telegram.py — 텔레그램 알림 (25개) - **AI**: Google Gemini Pro - **DB**: SQLite (5개 테이블: trades, contexts, decision_logs, playbooks, context_metadata) - **대시보드**: FastAPI + uvicorn -- **검증**: pytest + coverage (551 tests) -- **CI/CD**: GitHub Actions +- **검증**: pytest + coverage (998 tests) +- **CI/CD**: Gitea CI (`.gitea/workflows/ci.yml`) - **배포**: Docker + Docker Compose ## 프로젝트 구조 @@ -212,7 +209,7 @@ The-Ouroboros/ │ ├── config.py # Pydantic 설정 │ ├── db.py # SQLite 데이터베이스 │ └── main.py # 비동기 거래 루프 -├── tests/ # 551개 테스트 (25개 파일) +├── tests/ # 998개 테스트 (41개 파일) ├── Dockerfile # 멀티스테이지 빌드 ├── docker-compose.yml # 서비스 오케스트레이션 └── pyproject.toml # 의존성 및 도구 설정 diff --git a/docs/architecture.md b/docs/architecture.md index a334e2d..f0b42d6 100644 --- a/docs/architecture.md +++ b/docs/architecture.md @@ -84,6 +84,37 @@ High-frequency trading with individual stock analysis: - Momentum scoring (0-100 scale) - Breakout/breakdown pattern detection +**TripleBarrierLabeler** (`triple_barrier.py`) — Financial time-series labeling (v2) + +- Triple Barrier method: upper (take-profit), lower (stop-loss), time barrier +- First-touch labeling: labels confirmed by whichever barrier is breached first +- `max_holding_minutes` (calendar-minute) time barrier — session-aware, bar-period independent +- Tie-break mode: `"stop_first"` (conservative) or `"take_first"` +- Feature-label strict separation to prevent look-ahead bias + +**BacktestPipeline** (`backtest_pipeline.py`) — End-to-end validation pipeline (v2) + +- `run_v2_backtest_pipeline()`: cost guard → triple barrier labeling → walk-forward splits → fold scoring +- `BacktestPipelineResult`: artifact contract for reproducible output +- `fold_has_leakage()`: leakage detection utility + +**WalkForwardSplit** (`walk_forward_split.py`) — Time-series validation (v2) + +- Fold-based walk-forward splits (no random shuffling) +- Purge/Embargo: excludes N bars before/after fold boundaries to prevent data leakage + +**BacktestExecutionModel** (`backtest_execution_model.py`) — Conservative fill simulation (v2/v3) + +- Session-aware slippage: KRX_REG 5bps, NXT_AFTER 15bps, US_REG 3bps, US_PRE/DAY 30-50bps +- Order failure rate simulation per session +- Partial fill rate simulation with min/max ratio bounds +- Unfavorable-direction fill assumption (no simple close-price fill) + +**BacktestCostGuard** (`backtest_cost_guard.py`) — Cost model validator (v2) + +- `validate_backtest_cost_model()`: fail-fast check that session cost assumptions are present +- Enforces realistic cost assumptions before any backtest run proceeds + **SmartVolatilityScanner** (`smart_scanner.py`) — Python-first filtering pipeline - **Domestic (KR)**: @@ -98,7 +129,7 @@ High-frequency trading with individual stock analysis: - **Step 4**: Return top N candidates (default 3) - **Fallback (overseas only)**: If ranking API is unavailable, uses dynamic universe from runtime active symbols + recent traded symbols + current holdings (no static watchlist) -- **Realtime mode only**: Daily mode uses batch processing for API efficiency +- **Both modes**: Realtime 중심이지만 Daily 경로(`run_daily_session()`)에서도 후보 선별에 사용 **Benefits:** - Reduces Gemini API calls from 20-30 stocks to 1-3 qualified candidates @@ -124,9 +155,9 @@ High-frequency trading with individual stock analysis: - Selects appropriate context layers for current market conditions -### 4. Risk Manager (`src/core/risk_manager.py`) +### 4. Risk Manager & Session Policy (`src/core/`) -**RiskManager** — Safety circuit breaker and order validation +**RiskManager** (`risk_manager.py`) — Safety circuit breaker and order validation > **READ-ONLY by policy** (see [`docs/agents.md`](./agents.md)) @@ -136,8 +167,59 @@ High-frequency trading with individual stock analysis: - **Fat-Finger Protection**: Rejects orders exceeding 30% of available cash - Must always be enforced, cannot be disabled +**OrderPolicy** (`order_policy.py`) — Session classification and order type enforcement (v3) + +- `classify_session_id()`: Classifies current KR/US session from KST clock + - KR: `NXT_PRE` (08:00-08:50), `KRX_REG` (09:00-15:30), `NXT_AFTER` (15:30-20:00) + - US: `US_DAY` (10:00-18:00), `US_PRE` (18:00-23:30), `US_REG` (23:30-06:00), `US_AFTER` (06:00-07:00) +- Low-liquidity session detection: `NXT_AFTER`, `US_PRE`, `US_DAY`, `US_AFTER` +- Market order forbidden in low-liquidity sessions (`OrderPolicyRejected` raised) +- Limit/IOC/FOK orders always allowed + +**KillSwitch** (`kill_switch.py`) — Emergency trading halt orchestration (v2) + +- Fixed 5-step atomic sequence: + 1. Block new orders (`new_orders_blocked = True`) + 2. Cancel all unfilled orders + 3. Refresh order state (query final status) + 4. Reduce risk (force-close or reduce positions) + 5. Snapshot state + send Telegram alert +- Async, injectable step callables — each step individually testable +- Highest priority: overrides overnight exception and all other rules + +**BlackoutManager** (`blackout_manager.py`) — KIS maintenance window handling (v3) + +- Configurable blackout windows (e.g., `23:30-00:10 KST`) +- `queue_order()`: Queues order intent during blackout, enforces max queue size +- `pop_recovery_batch()`: Returns queued intents after recovery +- Recovery revalidation path (in `src/main.py`): + - Stale BUY drop (position already exists) + - Stale SELL drop (position absent) + - `validate_order_policy()` rechecked + - Price drift check (>5% → drop, configurable via `BLACKOUT_RECOVERY_MAX_PRICE_DRIFT_PCT`) + ### 5. Strategy (`src/strategy/`) +**PositionStateMachine** (`position_state_machine.py`) — 4-state sell state machine (v2) + +- States: `HOLDING` → `BE_LOCK` → `ARMED` → `EXITED` + - `HOLDING`: Normal holding + - `BE_LOCK`: Profit ≥ `be_arm_pct` — stop-loss elevated to break-even + - `ARMED`: Profit ≥ `arm_pct` — peak-tracking trailing stop active + - `EXITED`: Position closed +- `promote_state()`: Immediately elevates to highest admissible state (handles gaps/skips) +- `evaluate_exit_first()`: EXITED conditions checked before state promotion +- Monotonic: states only move up, never down + +**ExitRules** (`exit_rules.py`) — 4-layer composite exit logic (v2) + +- **Hard Stop**: `unrealized <= hard_stop_pct` (always enforced, ATR-adaptive for KR) +- **Break-Even Lock**: Once in BE_LOCK/ARMED, exit if price falls to entry price +- **ATR Trailing Stop**: `trailing_stop_price = peak_price - (atr_multiplier_k × ATR)` +- **Model Signal**: Exit if `pred_down_prob >= model_prob_threshold AND liquidity_weak` +- `evaluate_exit()`: Returns `ExitEvaluation` with next state, exit flag, reason, trailing price +- `ExitRuleConfig`: Frozen dataclass with all tunable parameters + **Pre-Market Planner** (`pre_market_planner.py`) — AI playbook generation - Runs before market open (configurable `PRE_MARKET_MINUTES`, default 30) @@ -195,7 +277,7 @@ High-frequency trading with individual stock analysis: - Configurable host/port (`DASHBOARD_HOST`, `DASHBOARD_PORT`, default `127.0.0.1:8080`) - Serves static HTML frontend -**8 API Endpoints:** +**10 API Endpoints:** | Endpoint | Method | Description | |----------|--------|-------------| @@ -207,6 +289,8 @@ High-frequency trading with individual stock analysis: | `/api/context/{layer}` | GET | Query context by layer (L1-L7) | | `/api/decisions` | GET | Decision log entries with outcomes | | `/api/scenarios/active` | GET | Today's matched scenarios | +| `/api/pnl/history` | GET | P&L history time series | +| `/api/positions` | GET | Current open positions | ### 8. Notifications (`src/notifications/telegram_client.py`) @@ -448,8 +532,12 @@ CREATE TABLE trades ( pnl REAL DEFAULT 0.0, market TEXT DEFAULT 'KR', exchange_code TEXT DEFAULT 'KRX', + session_id TEXT DEFAULT 'UNKNOWN', -- v3: KRX_REG | NXT_AFTER | US_REG | US_PRE | ... selection_context TEXT, -- JSON: {rsi, volume_ratio, signal, score} - decision_id TEXT -- Links to decision_logs + decision_id TEXT, -- Links to decision_logs + strategy_pnl REAL, -- v3: Core strategy P&L (separated from FX) + fx_pnl REAL DEFAULT 0.0, -- v3: FX gain/loss for USD trades (schema ready, activation pending) + mode TEXT -- paper | live ); ``` @@ -475,13 +563,14 @@ CREATE TABLE decision_logs ( stock_code TEXT, market TEXT, exchange_code TEXT, + session_id TEXT DEFAULT 'UNKNOWN', -- v3: session when decision was made action TEXT, confidence INTEGER, rationale TEXT, context_snapshot TEXT, -- JSON: full context at decision time input_data TEXT, -- JSON: market data used outcome_pnl REAL, - outcome_accuracy REAL, + outcome_accuracy INTEGER, reviewed INTEGER DEFAULT 0, review_notes TEXT ); @@ -494,7 +583,7 @@ CREATE TABLE playbooks ( id INTEGER PRIMARY KEY AUTOINCREMENT, date TEXT NOT NULL, market TEXT NOT NULL, - status TEXT DEFAULT 'generated', + status TEXT NOT NULL DEFAULT 'pending', -- pending → generated → active → expired playbook_json TEXT NOT NULL, -- Full playbook with scenarios generated_at TEXT NOT NULL, token_count INTEGER, @@ -552,6 +641,29 @@ PLANNER_TIMEOUT_SECONDS=60 # Timeout for playbook generation DEFENSIVE_PLAYBOOK_ON_FAILURE=true # Fallback on AI failure RESCAN_INTERVAL_SECONDS=300 # Scenario rescan interval during trading +# Optional — v2 Exit Rules (State Machine) +STAGED_EXIT_BE_ARM_PCT=1.2 # Break-even lock threshold (%) +STAGED_EXIT_ARM_PCT=3.0 # Armed state threshold (%) +KR_ATR_STOP_MULTIPLIER_K=2.0 # ATR multiplier for KR dynamic hard stop +KR_ATR_STOP_MIN_PCT=-2.0 # KR hard stop floor (must tighten, negative) +KR_ATR_STOP_MAX_PCT=-7.0 # KR hard stop ceiling (loosest, negative) + +# Optional — v2 Trade Filters +STOP_LOSS_COOLDOWN_MINUTES=120 # Cooldown after stop-loss before re-entry (same ticker) +US_MIN_PRICE=5.0 # Minimum US stock price for BUY ($) + +# Optional — v3 Session Risk Management +SESSION_RISK_RELOAD_ENABLED=true # Reload risk params at session boundaries +SESSION_RISK_PROFILES_JSON="{}" # Per-session overrides JSON: {"KRX_REG": {"be_arm_pct": 1.0}} +OVERNIGHT_EXCEPTION_ENABLED=true # Allow holding through session close (conditions apply) + +# Optional — v3 Blackout (KIS maintenance windows) +ORDER_BLACKOUT_ENABLED=true +ORDER_BLACKOUT_WINDOWS_KST=23:30-00:10 # Comma-separated: "HH:MM-HH:MM" +ORDER_BLACKOUT_QUEUE_MAX=500 # Max queued orders during blackout +BLACKOUT_RECOVERY_PRICE_REVALIDATION_ENABLED=true +BLACKOUT_RECOVERY_MAX_PRICE_DRIFT_PCT=5.0 # Drop recovery order if price drifted >5% + # Optional — Smart Scanner (realtime mode only) RSI_OVERSOLD_THRESHOLD=30 # 0-50, oversold threshold RSI_MOMENTUM_THRESHOLD=70 # 50-100, momentum threshold diff --git a/docs/commands.md b/docs/commands.md index f70a844..a667230 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -136,7 +136,7 @@ No decorator needed for async tests. # Install all dependencies (production + dev) pip install -e ".[dev]" -# Run full test suite with coverage (551 tests across 25 files) +# Run full test suite with coverage (998 tests across 41 files) pytest -v --cov=src --cov-report=term-missing # Run a single test file @@ -202,6 +202,8 @@ Dashboard runs as a daemon thread on `DASHBOARD_HOST:DASHBOARD_PORT` (default: ` | `GET /api/context/{layer}` | Context data by layer L1-L7 (query: `timeframe`) | | `GET /api/decisions` | Decision log entries (query: `limit`, `market`) | | `GET /api/scenarios/active` | Today's matched scenarios | +| `GET /api/pnl/history` | P&L history over time | +| `GET /api/positions` | Current open positions | ## Telegram Commands diff --git a/docs/ouroboros/60_repo_enforcement_checklist.md b/docs/ouroboros/60_repo_enforcement_checklist.md index 989248c..71bae69 100644 --- a/docs/ouroboros/60_repo_enforcement_checklist.md +++ b/docs/ouroboros/60_repo_enforcement_checklist.md @@ -24,11 +24,17 @@ Updated: 2026-02-27 ## 2) 필수 상태 체크 (필수) 필수 CI 항목: -- `validate_ouroboros_docs` (명령: `python3 scripts/validate_ouroboros_docs.py`) -- `test` (명령: `pytest -q`) + +| 참조 기준 | 이름 | 설명 | +|-----------|------|------| +| **job 단위** (브랜치 보호 설정 시 사용) | `test` | 전체 CI job (문서 검증 + 테스트 포함) | +| **step 단위** (로그 확인 시 참조) | `validate_ouroboros_docs` | `python3 scripts/validate_ouroboros_docs.py` 실행 step | +| **step 단위** | `run_tests` | `pytest -q` 실행 step | + +> **주의**: Gitea 브랜치 보호의 Required Status Checks는 **job 이름** 기준으로 설정한다 (`test`). step 이름은 UI 로그 탐색용이며 보호 규칙에 직접 입력하지 않는다. 설정 기준: -- 위 2개 체크가 `success` 아니면 머지 금지 +- `test` job이 `success` 아니면 머지 금지 - 체크 스킵/중립 상태 허용 금지 ## 3) 필수 리뷰어 규칙 (권장 -> 필수) diff --git a/docs/ouroboros/80_implementation_audit.md b/docs/ouroboros/80_implementation_audit.md index cfe6e14..fcf0160 100644 --- a/docs/ouroboros/80_implementation_audit.md +++ b/docs/ouroboros/80_implementation_audit.md @@ -1,14 +1,15 @@ # v2/v3 구현 감사 및 수익률 분석 보고서 작성일: 2026-02-28 +최종 업데이트: 2026-03-01 (Phase 2 완료 + Phase 3 부분 완료 반영) 대상 기간: 2026-02-25 ~ 2026-02-28 (실거래) 분석 브랜치: `feature/v3-session-policy-stream` @@ -29,69 +30,80 @@ Updated: 2026-02-28 | REQ-V2-007 | 비용/슬리피지/체결실패 모델 필수 | `src/analysis/backtest_cost_guard.py` | ✅ 완료 | | REQ-V2-008 | Kill Switch 실행 순서 (Block→Cancel→Refresh→Reduce→Snapshot) | `src/core/kill_switch.py` | ✅ 완료 | -### 1.2 v3 구현 상태: ~75% 완료 +### 1.2 v3 구현 상태: ~85% 완료 (2026-03-01 기준) -| REQ-ID | 요구사항 | 상태 | 갭 설명 | -|--------|----------|------|---------| -| REQ-V3-001 | 모든 신호/주문/로그에 session_id 포함 | ⚠️ 부분 | 아래 GAP-1, GAP-2 참조 | -| REQ-V3-002 | 세션 전환 훅 + 리스크 파라미터 재로딩 | ⚠️ 부분 | 아래 GAP-3 참조 | +| REQ-ID | 요구사항 | 상태 | 비고 | +|--------|----------|------|------| +| REQ-V3-001 | 모든 신호/주문/로그에 session_id 포함 | ✅ 완료 | #326 머지 — `log_decision()` 파라미터 추가, `log_trade()` 명시적 전달 | +| REQ-V3-002 | 세션 전환 훅 + 리스크 파라미터 재로딩 | ⚠️ 부분 | #327 머지 — 재로딩 메커니즘 구현, 세션 훅 테스트 미작성 | | REQ-V3-003 | 블랙아웃 윈도우 정책 | ✅ 완료 | `src/core/blackout_manager.py` | -| REQ-V3-004 | 블랙아웃 큐 + 복구 시 재검증 | ⚠️ 부분 | 아래 GAP-4 참조 (부분 해소) | +| REQ-V3-004 | 블랙아웃 큐 + 복구 시 재검증 | ✅ 완료 | #324(DB 기록) + #328(가격/세션 재검증) 머지 | | REQ-V3-005 | 저유동 세션 시장가 금지 | ✅ 완료 | `src/core/order_policy.py` | | REQ-V3-006 | 보수적 백테스트 체결 (불리 방향) | ✅ 완료 | `src/analysis/backtest_execution_model.py` | | REQ-V3-007 | FX 손익 분리 (전략 PnL vs 환율 PnL) | ⚠️ 코드 완료 / 운영 미반영 | `src/db.py` 스키마·함수 완료, 운영 데이터 `fx_pnl` 전부 0 | -| REQ-V3-008 | 오버나잇 예외 vs Kill Switch 우선순위 | ✅ 완료 | `src/main.py:459-471` | +| REQ-V3-008 | 오버나잇 예외 vs Kill Switch 우선순위 | ✅ 완료 | `src/main.py` — `_should_force_exit_for_overnight()`, `_apply_staged_exit_override_for_hold()` | -### 1.3 운영 거버넌스: ~20% 완료 +### 1.3 운영 거버넌스: ~60% 완료 (2026-03-01 재평가) -| REQ-ID | 요구사항 | 상태 | 갭 설명 | -|--------|----------|------|---------| +| REQ-ID | 요구사항 | 상태 | 비고 | +|--------|----------|------|------| | REQ-OPS-001 | 타임존 명시 (KST/UTC) | ⚠️ 부분 | DB 기록은 UTC, 세션은 KST. 일부 로그에서 타임존 미표기 | -| REQ-OPS-002 | 정책 변경 시 레지스트리 업데이트 강제 | ❌ 미구현 | CI 자동 검증 없음 | -| REQ-OPS-003 | TASK-REQ 매핑 강제 | ❌ 미구현 | PR 단위 자동 검증 없음 | +| REQ-OPS-002 | 정책 변경 시 레지스트리 업데이트 강제 | ⚠️ 기본 구현 완료 | `scripts/validate_governance_assets.py` CI 연동 완료; 규칙 고도화 잔여 | +| REQ-OPS-003 | TASK-REQ 매핑 강제 | ⚠️ 기본 구현 완료 | `scripts/validate_ouroboros_docs.py` CI 연동 완료; PR 강제 검증 강화 잔여 | --- ## 2. 구현 갭 상세 -### GAP-1: DecisionLogger에 session_id 미포함 (CRITICAL) +> **2026-03-01 업데이트**: GAP-1~5 모두 해소되었거나 이슈 머지로 부분 해소됨. -- **위치**: `src/logging/decision_logger.py:40` -- **문제**: `log_decision()` 함수에 `session_id` 파라미터가 없음 -- **영향**: 어떤 세션에서 전략적 의사결정이 내려졌는지 추적 불가 +### GAP-1: DecisionLogger에 session_id 미포함 → ✅ 해소 (#326) + +- **위치**: `src/logging/decision_logger.py` +- ~~문제: `log_decision()` 함수에 `session_id` 파라미터가 없음~~ +- **해소**: #326 머지 — `log_decision()` 파라미터에 `session_id` 추가, DB 기록 포함 - **요구사항**: REQ-V3-001 -### GAP-2: src/main.py 거래 로그에 session_id 미전달 (CRITICAL) +### GAP-2: src/main.py 거래 로그에 session_id 미전달 → ✅ 해소 (#326) -- **위치**: `src/main.py` line 1625, 1682, 2769 -- **문제**: `log_trade()` 호출 시 `session_id` 파라미터를 전달하지 않음 -- **현상**: 시장 코드 기반 자동 추론에 의존 → 실제 런타임 세션과 불일치 가능 +- **위치**: `src/main.py` +- ~~문제: `log_trade()` 호출 시 `session_id` 파라미터를 전달하지 않음~~ +- **해소**: #326 머지 — `log_trade()` 호출 시 런타임 `session_id` 명시적 전달 - **요구사항**: REQ-V3-001 -### GAP-3: 세션 전환 시 리스크 파라미터 재로딩 없음 (HIGH) +### GAP-3: 세션 전환 시 리스크 파라미터 재로딩 없음 → ⚠️ 부분 해소 (#327) -- **위치**: `src/main.py` 전체 -- **문제**: 리스크 파라미터가 시작 시 한 번만 로딩되고, 세션 경계 변경 시 재로딩 메커니즘 없음 -- **영향**: NXT_AFTER(저유동) → KRX_REG(정규장) 전환 시에도 동일 파라미터 사용 +- **위치**: `src/main.py`, `src/config.py` +- **해소 내용**: #327 머지 — `SESSION_RISK_PROFILES_JSON` 기반 세션별 파라미터 재로딩 메커니즘 구현 + - `SESSION_RISK_RELOAD_ENABLED=true` 시 세션 경계에서 파라미터 재로딩 + - 재로딩 실패 시 기존 파라미터 유지 (안전 폴백) +- **잔여 갭**: 세션 경계 실시간 전환 E2E 통합 테스트 보강 필요 (`test_main.py`에 설정 오버라이드/폴백 단위 테스트는 존재) - **요구사항**: REQ-V3-002 -### GAP-4: 블랙아웃 복구 시 재검증 부분 해소, DB 기록 미구현 (HIGH) +### GAP-4: 블랙아웃 복구 DB 기록 + 재검증 → ✅ 해소 (#324, #328) -- **위치**: `src/core/blackout_manager.py:89-96`, `src/main.py:694-791` -- **상태**: `pop_recovery_batch()` 자체는 단순 dequeue이나, 실행 경로에서 부분 재검증 수행: - - stale BUY 드롭 (포지션 이미 존재 시) — `src/main.py:713-720` - - stale SELL 드롭 (포지션 부재 시) — `src/main.py:721-727` - - `validate_order_policy()` 호출 — `src/main.py:729-734` -- **잔여 갭**: 가격 유효성(시세 변동), 세션 변경에 따른 파라미터 재적용은 미구현 -- **신규 발견**: 블랙아웃 복구 주문이 `log_trade()` 없이 실행되어 거래 DB에 기록되지 않음 → 성과 리포트 불일치 유발 +- **위치**: `src/core/blackout_manager.py`, `src/main.py` +- **해소 내용**: + - #324 머지 — 복구 주문 실행 후 `log_trade()` 호출, rationale에 `[blackout-recovery]` prefix + - #328 머지 — 가격 유효성 검증 (진입가 대비 급변 시 드롭), 세션 변경 시 새 파라미터로 재검증 - **요구사항**: REQ-V3-004 -### GAP-5: 시간장벽이 봉 개수 고정 (MEDIUM) +### GAP-5: 시간장벽이 봉 개수 고정 → ✅ 해소 (#329) -- **위치**: `src/analysis/triple_barrier.py:19` -- **문제**: `max_holding_bars` (고정 봉 수) 사용, v3 계획의 `max_holding_minutes` (캘린더 시간) 미반영 +- **위치**: `src/analysis/triple_barrier.py` +- ~~문제: `max_holding_bars` (고정 봉 수) 사용~~ +- **해소**: #329 머지 — `max_holding_minutes` (캘린더 분) 기반 시간장벽 전환 + - 봉 주기 무관하게 일정 시간 경과 시 장벽 도달 + - `max_holding_bars` deprecated 경고 유지 (하위 호환) - **요구사항**: REQ-V2-005 / v3 확장 +### GAP-6 (신규): FX PnL 운영 미활성 (LOW — 코드 완료) + +- **위치**: `src/db.py` (`fx_pnl`, `strategy_pnl` 컬럼 존재) +- **문제**: 스키마와 함수는 완료되었으나 운영 데이터에서 `fx_pnl` 전부 0 +- **영향**: USD 거래에서 환율 손익과 전략 손익이 분리되지 않아 성과 분석 부정확 +- **요구사항**: REQ-V3-007 + --- ## 3. 실거래 수익률 분석 @@ -244,18 +256,25 @@ Updated: 2026-02-28 - **문제**: 중첩 `def evaluate` 정의 (들여쓰기 오류) - **영향**: 런타임 실패 → 기본 전략으로 폴백 → 진화 시스템 사실상 무효 -### ROOT-5: v2 청산 로직이 부분 통합되었으나 실효성 부족 (HIGH) +### ROOT-5: v2 청산 로직이 부분 통합되었으나 실효성 부족 → ⚠️ 부분 해소 (#325) -- **현재 상태**: `src/main.py:500-583`에서 `evaluate_exit()` 기반 staged exit override가 동작함 - - 상태기계(HOLDING→BE_LOCK→ARMED→EXITED) 전이 구현 - - 4중 청산(hard stop, BE lock threat, ATR trailing, model/liquidity exit) 평가 -- **실효성 문제**: - - `hard_stop_pct`에 고정 `-2.0`이 기본값으로 들어가 v2 계획의 ATR 적응형 의도와 괴리 - - `be_arm_pct`/`arm_pct`가 playbook의 `take_profit_pct`에서 기계적 파생(`* 0.4`)되어 v2 계획의 독립 파라미터 튜닝 불가 - - `atr_value`, `pred_down_prob` 등 런타임 피처가 대부분 0.0으로 들어와 사실상 hard stop만 발동 -- **결론**: 코드 통합은 되었으나, 피처 공급과 파라미터 설정이 미비하여 v2 설계 가치가 실현되지 않는 상태 +**초기 진단 (2026-02-28 감사 기준):** +- `hard_stop_pct`에 고정 `-2.0`이 기본값으로 들어가 v2 계획의 ATR 적응형 의도와 괴리 +- `be_arm_pct`/`arm_pct`가 playbook의 `take_profit_pct`에서 기계적 파생(`* 0.4`)되어 v2 계획의 독립 파라미터 튜닝 불가 +- `atr_value`, `pred_down_prob` 등 런타임 피처가 0.0으로 공급되어 사실상 hard stop만 발동 -### ROOT-6: SELL 손익 계산이 부분청산/수량 불일치에 취약 (CRITICAL) +**현재 상태 (#325 머지 후):** +- `STAGED_EXIT_BE_ARM_PCT`, `STAGED_EXIT_ARM_PCT` 환경변수로 독립 파라미터 설정 가능 +- `_inject_staged_exit_features()`: KR 시장 ATR 실시간 계산 주입, RSI 기반 `pred_down_prob` 공급 +- KR ATR dynamic hard stop (#318)으로 `-2.0` 고정값 문제 해소 + +**잔여 리스크:** +- KR 외 시장(US 등)에서 `atr_value` 공급 경로 불완전 — hard stop 편향 잔존 가능 +- `pred_down_prob`가 RSI 프록시 수준 — 추후 실제 ML 모델 대체 권장 + +### ROOT-6: SELL 손익 계산이 부분청산/수량 불일치에 취약 (CRITICAL) → ✅ 해소 (#322) + +> **현재 상태**: #322 머지로 해소됨. 아래는 원인 발견 시점(2026-02-28) 진단 기록. - **위치**: `src/main.py:1658-1663`, `src/main.py:2755-2760` - **문제**: PnL 계산이 실제 매도 수량(`sell_qty`)이 아닌 직전 BUY의 `buy_qty`를 사용 @@ -263,7 +282,9 @@ Updated: 2026-02-28 - **영향**: 부분청산, 역분할/액분할, startup-sync 후 수량 드리프트 시 손익 과대/과소 계상 - **실증**: CRCA 이상치(BUY 146주 → SELL 15주에서 PnL +4,612 USD) 가 이 버그와 정합 -### ROOT-7: BUY 매칭 키에 exchange_code 미포함 — 잠재 오매칭 리스크 (HIGH) +### ROOT-7: BUY 매칭 키에 exchange_code 미포함 — 잠재 오매칭 리스크 (HIGH) → ✅ 해소 (#323) + +> **현재 상태**: #323 머지로 해소됨. 아래는 원인 발견 시점(2026-02-28) 진단 기록. - **위치**: `src/db.py:292-313` - **문제**: `get_latest_buy_trade()`가 `(stock_code, market)`만으로 매칭, `exchange_code` 미사용 @@ -283,17 +304,28 @@ Updated: 2026-02-28 | P1 | US 최소 가격 필터: $5 이하 종목 진입 차단 | 페니스탁 대폭락 방지 | 낮음 | | P1 | 진화 전략 코드 생성 시 syntax 검증 추가 | 진화 시스템 정상화 | 낮음 | -### 5.2 구조적 개선 (아키텍처 변경) +### 5.2 구조적 개선 현황 (2026-03-01 기준) -| 우선순위 | 방안 | 예상 효과 | 난이도 | -|----------|------|-----------|--------| -| **P0** | **SELL PnL 계산을 sell_qty 기준으로 수정 (ROOT-6)** | 손익 계상 정확도 확보, 이상치 제거 | 낮음 | -| **P0** | **v2 staged exit에 실제 피처 공급 (atr_value, pred_down_prob) + 독립 파라미터 설정 (ROOT-5)** | v2 설계 가치 실현, 수익 보호 | 중간 | -| P0 | BUY 매칭 키에 exchange_code 추가 (ROOT-7) | 오매칭 방지 | 낮음 | -| P0 | 블랙아웃 복구 주문에 `log_trade()` 추가 (GAP-4) | DB/성과 리포트 정합성 | 낮음 | -| P1 | 세션 전환 시 리스크 파라미터 동적 재로딩 (GAP-3 해소) | 세션별 최적 파라미터 적용 | 중간 | -| P1 | session_id를 거래 로그/의사결정 로그에 명시적 전달 (GAP-1,2 해소) | 세션별 성과 분석 가능 | 낮음 | -| P2 | 블랙아웃 복구 시 가격/세션 재검증 강화 (GAP-4 잔여) | 세션 변경 후 무효 주문 방지 | 중간 | +**완료 항목 (모니터링 단계):** + +| 항목 | 이슈 | 상태 | +|------|------|------| +| SELL PnL 계산을 sell_qty 기준으로 수정 (ROOT-6) | #322 | ✅ 머지 | +| v2 staged exit 피처 공급 + 독립 파라미터 설정 (ROOT-5) | #325 | ✅ 머지 | +| BUY 매칭 키에 exchange_code 추가 (ROOT-7) | #323 | ✅ 머지 | +| 블랙아웃 복구 주문 `log_trade()` 추가 (GAP-4) | #324 | ✅ 머지 | +| 세션 전환 리스크 파라미터 동적 재로딩 (GAP-3) | #327 | ✅ 머지 | +| session_id 거래/의사결정 로그 명시 전달 (GAP-1, GAP-2) | #326 | ✅ 머지 | +| 블랙아웃 복구 가격/세션 재검증 강화 (GAP-4 잔여) | #328 | ✅ 머지 | + +**잔여 개선 항목:** + +| 우선순위 | 방안 | 난이도 | +|----------|------|--------| +| P1 | US 시장 ATR 공급 경로 완성 (ROOT-5 잔여) | 중간 | +| P1 | FX PnL 운영 활성화 (REQ-V3-007) | 낮음 | +| P2 | pred_down_prob ML 모델 대체 (ROOT-5 잔여) | 높음 | +| P2 | 세션 경계 E2E 통합 테스트 보강 (GAP-3 잔여) | 낮음 | ### 5.3 권장 실행 순서 @@ -334,14 +366,26 @@ Phase 3 (중기): v3 세션 최적화 - ✅ 블랙아웃 복구 후 유효 intent 실행 (`tests/test_main.py:5811`) - ✅ 블랙아웃 복구 후 정책 거부 intent 드롭 (`tests/test_main.py:5851`) -### 테스트 미존재 +### 테스트 추가됨 (Phase 1~3, 2026-03-01) -- ❌ 세션 전환 훅 콜백 -- ❌ 세션 경계 리스크 파라미터 재로딩 -- ❌ DecisionLogger session_id 캡처 +- ✅ KR ATR 기반 동적 hard stop (`test_main.py` — #318) +- ✅ 재진입 쿨다운 (손절 후 동일 종목 매수 차단) (`test_main.py` — #319) +- ✅ US 최소 가격 필터 ($5 이하 차단) (`test_main.py` — #320) +- ✅ 진화 전략 syntax 검증 (`test_evolution.py` — #321) +- ✅ SELL PnL sell_qty 기준 계산 (`test_main.py` — #322) +- ✅ BUY 매칭 키 exchange_code 포함 (`test_db.py` — #323) +- ✅ 블랙아웃 복구 주문 DB 기록 (`test_main.py` — #324) +- ✅ staged exit에 실제 ATR/RSI 피처 공급 (`test_main.py` — #325) +- ✅ session_id 거래/의사결정 로그 명시적 전달 (`test_main.py`, `test_decision_logger.py` — #326) +- ✅ 블랙아웃 복구 후 유효 intent 실행 (`tests/test_main.py:5811`) +- ✅ 블랙아웃 복구 후 정책 거부 intent 드롭 (`tests/test_main.py:5851`) + +### 테스트 미존재 (잔여) + +- ❌ 세션 전환 훅 콜백 (GAP-3 잔여) +- ❌ 세션 경계 리스크 파라미터 재로딩 단위 테스트 (GAP-3 잔여) - ❌ 실거래 경로 ↔ v2 상태기계 통합 테스트 (피처 공급 포함) -- ❌ 블랙아웃 복구 주문의 DB 기록 검증 -- ❌ SELL PnL 계산 시 수량 불일치 케이스 +- ❌ FX PnL 운영 활성화 검증 (GAP-6) --- diff --git a/docs/ouroboros/85_loss_recovery_action_plan.md b/docs/ouroboros/85_loss_recovery_action_plan.md index 6004955..f2d82ac 100644 --- a/docs/ouroboros/85_loss_recovery_action_plan.md +++ b/docs/ouroboros/85_loss_recovery_action_plan.md @@ -1,16 +1,19 @@ # 손실 복구 실행 계획 작성일: 2026-02-28 +최종 업데이트: 2026-03-01 (Phase 1~3 완료 상태 반영) 기반 문서: [80_implementation_audit.md](./80_implementation_audit.md) (ROOT 7개 + GAP 5개) +> **2026-03-01 현황**: Phase 1 ✅ 완료, Phase 2 ✅ 완료, Phase 3 ✅ 기본 완료 (ACT-13 고도화 잔여) + --- ## 1. 요약 @@ -35,13 +38,13 @@ Updated: 2026-02-28 ## 2. Phase별 작업 분해 -### Phase 1: 즉시 — 손실 출혈 차단 +### Phase 1: 즉시 — 손실 출혈 차단 ✅ 완료 가장 큰 손실 패턴(노이즈 손절, 반복 매매, 페니스탁)을 즉시 제거한다. --- -#### ACT-01: KR 손절선 ATR 기반 동적 확대 +#### ACT-01: KR 손절선 ATR 기반 동적 확대 ✅ 머지 - **ROOT 참조**: ROOT-1 (hard_stop_pct -2%가 KR 소형주 변동성 대비 과소) - **Gitea 이슈**: feat: KR 손절선 ATR 기반 동적 확대 (-2% → ATR 적응형) @@ -60,7 +63,7 @@ Updated: 2026-02-28 --- -#### ACT-02: 손절 후 동일 종목 재진입 쿨다운 +#### ACT-02: 손절 후 동일 종목 재진입 쿨다운 ✅ 머지 - **ROOT 참조**: ROOT-2 (동일 종목 반복 매매) - **Gitea 이슈**: feat: 손절 후 동일 종목 재진입 쿨다운 (1~2시간) @@ -79,7 +82,7 @@ Updated: 2026-02-28 --- -#### ACT-03: US $5 이하 종목 진입 차단 필터 +#### ACT-03: US $5 이하 종목 진입 차단 필터 ✅ 머지 - **ROOT 참조**: ROOT-3 (미국 페니스탁 무분별 진입) - **Gitea 이슈**: feat: US $5 이하 종목 진입 차단 필터 @@ -97,7 +100,7 @@ Updated: 2026-02-28 --- -#### ACT-04: 진화 전략 코드 생성 시 syntax 검증 추가 +#### ACT-04: 진화 전략 코드 생성 시 syntax 검증 추가 ✅ 머지 - **ROOT 참조**: ROOT-4 (진화 전략 문법 오류) - **Gitea 이슈**: fix: 진화 전략 코드 생성 시 syntax 검증 추가 @@ -116,13 +119,13 @@ Updated: 2026-02-28 --- -### Phase 2: 단기 — 데이터 정합성 + v2 실효화 +### Phase 2: 단기 — 데이터 정합성 + v2 실효화 ✅ 완료 손익 계산 정확도를 확보하고, v2 청산 로직을 실효화한다. --- -#### ACT-05: SELL PnL 계산을 sell_qty 기준으로 수정 +#### ACT-05: SELL PnL 계산을 sell_qty 기준으로 수정 ✅ 머지 - **ROOT 참조**: ROOT-6 (CRITICAL — PnL 계산이 buy_qty 사용) - **Gitea 이슈**: fix(critical): SELL PnL 계산을 sell_qty 기준으로 수정 @@ -141,7 +144,7 @@ Updated: 2026-02-28 --- -#### ACT-06: BUY 매칭 키에 exchange_code 추가 +#### ACT-06: BUY 매칭 키에 exchange_code 추가 ✅ 머지 - **ROOT 참조**: ROOT-7 (BUY 매칭 키에 exchange_code 미포함) - **Gitea 이슈**: fix: BUY 매칭 키에 exchange_code 추가 @@ -159,12 +162,12 @@ Updated: 2026-02-28 --- -#### ACT-07: 블랙아웃 복구 주문에 log_trade() 추가 +#### ACT-07: 블랙아웃 복구 주문에 log_trade() 추가 ✅ 머지 - **ROOT 참조**: GAP-4 (블랙아웃 복구 주문 DB 미기록) - **Gitea 이슈**: fix: 블랙아웃 복구 주문에 log_trade() 추가 - **Gitea 이슈 번호**: #324 -- **변경 대상 파일**: `src/main.py` (line 694-791, 블랙아웃 복구 실행 경로) +- **변경 대상 파일**: `src/main.py` — `process_blackout_recovery_orders()` 함수 내 복구 주문 실행 경로 - **현재 동작**: 블랙아웃 복구 주문이 실행되나 `log_trade()` 호출 없음 → DB에 기록 안 됨 - **목표 동작**: 복구 주문 실행 후 `log_trade()` 호출하여 DB에 기록. rationale에 `[blackout-recovery]` prefix 추가 - **수용 기준**: @@ -178,7 +181,7 @@ Updated: 2026-02-28 --- -#### ACT-08: v2 staged exit에 실제 피처 공급 +#### ACT-08: v2 staged exit에 실제 피처 공급 ✅ 머지 - **ROOT 참조**: ROOT-5 (v2 청산 로직 실효성 부족) - **Gitea 이슈**: feat: v2 staged exit에 실제 피처(ATR, pred_down_prob) 공급 @@ -200,7 +203,7 @@ Updated: 2026-02-28 --- -#### ACT-09: session_id를 거래/의사결정 로그에 명시적 전달 +#### ACT-09: session_id를 거래/의사결정 로그에 명시적 전달 ✅ 머지 - **ROOT 참조**: GAP-1 (DecisionLogger session_id 미포함), GAP-2 (log_trade session_id 미전달) - **Gitea 이슈**: feat: session_id를 거래/의사결정 로그에 명시적 전달 @@ -223,13 +226,13 @@ Updated: 2026-02-28 --- -### Phase 3: 중기 — v3 세션 최적화 +### Phase 3: 중기 — v3 세션 최적화 ✅ 기본 완료 (ACT-13 고도화 잔여) 세션 경계 처리와 운영 거버넌스를 강화한다. --- -#### ACT-10: 세션 전환 시 리스크 파라미터 동적 재로딩 +#### ACT-10: 세션 전환 시 리스크 파라미터 동적 재로딩 ✅ 머지 - **ROOT 참조**: GAP-3 (세션 전환 시 리스크 파라미터 재로딩 없음) - **Gitea 이슈**: feat: 세션 전환 시 리스크 파라미터 동적 재로딩 @@ -241,14 +244,12 @@ Updated: 2026-02-28 - NXT_AFTER → KRX_REG 전환 시 파라미터 재로딩 확인 - 재로딩 이벤트 로그 기록 - 재로딩 실패 시 기존 파라미터 유지 (안전 폴백) -- **테스트 계획**: - - 단위: 세션 전환 훅 콜백 테스트 - - 단위: 재로딩 실패 시 폴백 테스트 +- **테스트**: `test_main.py`에 설정 오버라이드/리로드/폴백 단위 테스트 포함. **잔여**: 세션 경계 실시간 전환 E2E 보강 - **의존성**: ACT-09 (session_id 인프라) --- -#### ACT-11: 블랙아웃 복구 시 가격/세션 재검증 강화 +#### ACT-11: 블랙아웃 복구 시 가격/세션 재검증 강화 ✅ 머지 - **ROOT 참조**: GAP-4 잔여 (가격 유효성, 세션 변경 재적용 미구현) - **Gitea 이슈**: feat: 블랙아웃 복구 시 가격/세션 재검증 강화 @@ -268,7 +269,7 @@ Updated: 2026-02-28 --- -#### ACT-12: Triple Barrier 시간장벽을 캘린더 시간(분) 기반으로 전환 +#### ACT-12: Triple Barrier 시간장벽을 캘린더 시간(분) 기반으로 전환 ✅ 머지 - **ROOT 참조**: GAP-5 (시간장벽이 봉 개수 고정) - **Gitea 이슈**: feat: Triple Barrier 시간장벽을 캘린더 시간(분) 기반으로 전환 @@ -286,21 +287,13 @@ Updated: 2026-02-28 --- -#### ACT-13: CI 자동 검증 (정책 레지스트리 + TASK-REQ 매핑) +#### ACT-13: CI 자동 검증 (정책 레지스트리 + TASK-REQ 매핑) ✅ 기본 구현 완료, 고도화 잔여 - **ROOT 참조**: REQ-OPS-002 (정책 변경 시 레지스트리 업데이트 강제), REQ-OPS-003 (TASK-REQ 매핑 강제) - **Gitea 이슈**: infra: CI 자동 검증 (정책 레지스트리 + TASK-REQ 매핑) - **Gitea 이슈 번호**: #330 -- **변경 대상 파일**: `.gitea/workflows/`, `scripts/validate_governance_assets.py` -- **현재 동작**: CI 자동 검증 없음. 문서 검증은 수동 실행 -- **목표 동작**: - - PR 시 정책 레지스트리(`01_requirements_registry.md`) 변경 여부 자동 검증 - - TASK/이슈가 REQ-ID를 참조하는지 자동 검증 -- **수용 기준**: - - 정책 파일 변경 시 레지스트리 미업데이트면 CI 실패 - - 새 이슈/PR에 REQ-ID 미참조 시 경고 -- **테스트 계획**: - - CI 파이프라인 자체 테스트 (정상/실패 케이스) +- **현재 동작**: `.gitea/workflows/ci.yml`에서 `scripts/validate_governance_assets.py` + `scripts/validate_ouroboros_docs.py` 자동 실행 +- **잔여 고도화**: PR 본문 REQ/TASK/TEST 강제 레벨 상향, 정책 파일 미업데이트 시 CI 실패 기준 강화 - **의존성**: 없음 --- @@ -311,7 +304,7 @@ Updated: 2026-02-28 - 모든 ACT 항목에 대해 개별 테스트 작성 - 커버리지 >= 80% 유지 -- 기존 551개 테스트 전체 통과 확인 +- 현재 CI 기준 전체 테스트 통과 확인 (2026-03-01 기준 998 tests collected) ### 3.2 통합 테스트 @@ -389,4 +382,36 @@ Phase 3 --- +## 6. 미진 사항 (2026-03-01 기준) + +Phase 1~3 구현 완료 후에도 다음 항목이 운영상 미완료 상태이다. + +### 6.1 운영 검증 필요 + +| 항목 | 설명 | 우선순위 | +|------|------|----------| +| FX PnL 운영 활성화 | `fx_pnl`/`strategy_pnl` 컬럼 존재하나 모든 운영 데이터 값이 0 | P1 | +| 세션 경계 E2E 통합 테스트 보강 | `test_main.py`에 단위 테스트 존재; 세션 경계 실시간 전환 E2E 미작성 | P2 | +| v2 상태기계 통합 end-to-end | 실거래 경로에서 HOLDING→BE_LOCK→ARMED→EXITED 전체 시나리오 테스트 미작성 | P2 | + +### 6.2 아키텍처 수준 잔여 갭 + +| 항목 | 설명 | 배경 문서 | +|------|------|-----------| +| CI 자동 검증 고도화 (#330) | 기본 구현 완료(`validate_governance_assets.py` CI 연동); 규칙/강제수준 고도화 필요 | REQ-OPS-002, REQ-OPS-003 | +| pred_down_prob ML 모델 대체 | 현재 RSI 프록시 사용 — 추후 실제 GBDT/ML 모델로 대체 권장 | ROOT-5, ouroboros_plan_v2.txt §3.D | +| KR/US 파라미터 민감도 분석 | v2 계획의 be_arm_pct/arm_pct/atr_k 최적값 탐색 미수행 | ouroboros_plan_v2.txt §8 | + +### 6.3 v3 실험 매트릭스 미착수 + +ouroboros_plan_v3.txt §9에 정의된 3개 실험이 아직 시작되지 않았다. + +| 실험 ID | 시장 | 포커스 | 상태 | +|---------|------|--------|------| +| EXP-KR-01 | KR | NXT 야간 특화 (p_thresh 0.65) | ❌ 미착수 | +| EXP-US-01 | US | 21h 준연속 운용 (atr_k 2.5) | ❌ 미착수 | +| EXP-HYB-01 | Global | KR 낮 + US 밤 연계 레짐 자산배분 | ❌ 미착수 | + +--- + *끝.* diff --git a/docs/ouroboros/README.md b/docs/ouroboros/README.md index 6e53e6c..13a8292 100644 --- a/docs/ouroboros/README.md +++ b/docs/ouroboros/README.md @@ -18,13 +18,15 @@ Updated: 2026-02-26 4. v3 실행 지시서: [20_phase_v3_execution.md](./20_phase_v3_execution.md) 5. 코드 레벨 작업 지시: [30_code_level_work_orders.md](./30_code_level_work_orders.md) 6. 수용 기준/테스트 계획: [40_acceptance_and_test_plan.md](./40_acceptance_and_test_plan.md) -7. PM 시나리오/이슈 분류: [50_scenario_matrix_and_issue_taxonomy.md](./50_scenario_matrix_and_issue_taxonomy.md) -8. TPM 제어 프로토콜/수용 매트릭스: [50_tpm_control_protocol.md](./50_tpm_control_protocol.md) +7. PM 시나리오/이슈 분류 **(A)**: [50_scenario_matrix_and_issue_taxonomy.md](./50_scenario_matrix_and_issue_taxonomy.md) +8. TPM 제어 프로토콜/수용 매트릭스 **(B)**: [50_tpm_control_protocol.md](./50_tpm_control_protocol.md) 9. 저장소 강제 설정 체크리스트: [60_repo_enforcement_checklist.md](./60_repo_enforcement_checklist.md) 10. 메인 에이전트 아이디에이션 백로그: [70_main_agent_ideation.md](./70_main_agent_ideation.md) 11. v2/v3 구현 감사 및 수익률 분석: [80_implementation_audit.md](./80_implementation_audit.md) 12. 손실 복구 실행 계획: [85_loss_recovery_action_plan.md](./85_loss_recovery_action_plan.md) +> **참고**: 7번·8번은 `50_` 프리픽스를 공유합니다. (A) = 시나리오/이슈 분류, (B) = TPM 제어 프로토콜. + ## 운영 규칙 - 계획 변경은 반드시 `01_requirements_registry.md`의 ID 정의부터 수정한다. diff --git a/docs/requirements-log.md b/docs/requirements-log.md index bd18e04..19daeee 100644 --- a/docs/requirements-log.md +++ b/docs/requirements-log.md @@ -87,7 +87,7 @@ - 선정 기준 추적 → Evolution 시스템 최적화 가능 - API 장애 시 정적 watchlist로 자동 전환 -**참고:** Realtime 모드 전용. Daily 모드는 배치 효율성을 위해 정적 watchlist 사용. +**참고 (당시 구현 기준):** Realtime 모드 전용으로 설계되었으나, 이후 Daily 경로에서도 스캐너를 사용하도록 변경됨. 해외 fallback도 정적 watchlist → 동적 유니버스(active/recent/holdings)로 전환 (2026-02-16 참조). **이슈/PR:** #76, #77 @@ -388,3 +388,126 @@ Order result: 모의투자 매수주문이 완료 되었습니다. ✓ - `ruff check src/analysis/backtest_pipeline.py tests/test_backtest_pipeline_integration.py` **이슈/PR:** #305 + +--- + +## 2026-02-28 ~ 2026-03-01 + +### v2/v3 손실 복구 실행 계획 — Phase 1 완료 (#318~#321) + +**배경:** +- `docs/ouroboros/80_implementation_audit.md` 감사 결과 식별된 7개 근본 원인(ROOT) 및 5개 구현 갭(GAP) 중 + 가장 큰 손실 패턴 4개를 Phase 1로 즉시 제거. + +**구현 내용:** + +1. **ACT-01: KR 손절선 ATR 기반 동적 확대** (#318) + - `src/main.py`, `src/config.py` + - KR 시장: ATR(14) 기반 동적 hard stop (`k=2.0`, 범위 -2%~-7%) + - ATR 미제공 시 기존 -2% 폴백 + - ROOT-1 (hard_stop_pct 고정값 과소) 해소 + +2. **ACT-02: 손절 후 동일 종목 재진입 쿨다운** (#319) + - `src/main.py`, `src/config.py` + - 손절(pnl<0) 후 동일 종목 `COOLDOWN_MINUTES`(기본 120분) 동안 BUY 차단 + - 익절에는 미적용 + - ROOT-2 (동일 종목 반복 매매) 해소 + +3. **ACT-03: US $5 이하 종목 진입 차단 필터** (#320) + - `src/main.py`, `src/config.py` + - US 시장 BUY 시 현재가 `US_MIN_PRICE`(기본 $5) 이하 차단 + - ROOT-3 (미국 페니스탁 무분별 진입) 해소 + +4. **ACT-04: 진화 전략 코드 syntax 검증** (#321) + - `src/evolution/optimizer.py` + - `ast.parse()` + `compile()` 선검증 후 통과한 코드만 저장 + - ROOT-4 (진화 전략 문법 오류) 해소 + +**이슈/PR:** #318, #319, #320, #321 + +--- + +### v2/v3 손실 복구 실행 계획 — Phase 2 완료 (#322~#326) + +**배경:** +- 손익 계산 정확도 확보 및 v2 청산 로직 실효화. + +**구현 내용:** + +1. **ACT-05: SELL PnL 계산을 sell_qty 기준으로 수정** (#322) + - `src/main.py` (line 1658-1663, 2755-2760) + - `trade_pnl = (trade_price - buy_price) * sell_qty`로 변경 + - ROOT-6 (PnL 계산 buy_qty 사용 CRITICAL) 해소 + +2. **ACT-06: BUY 매칭 키에 exchange_code 추가** (#323) + - `src/db.py` + - `get_latest_buy_trade()`가 `(stock_code, market, exchange_code)` 기준 매칭 + - exchange_code NULL인 레거시 데이터 하위 호환 유지 + - ROOT-7 (오매칭 리스크) 해소 + +3. **ACT-07: 블랙아웃 복구 주문에 log_trade() 추가** (#324) + - `src/main.py` (블랙아웃 복구 실행 경로) + - 복구 주문 실행 후 `log_trade()` 호출, rationale에 `[blackout-recovery]` prefix + - GAP-4 (블랙아웃 복구 주문 DB 미기록) 해소 + +4. **ACT-08: v2 staged exit에 실제 피처 공급** (#325) + - `src/main.py`, `src/strategy/exit_rules.py` + - `atr_value`: ATR(14) 실시간 계산 공급 + - `pred_down_prob`: RSI 기반 하락 확률 추정값 공급 (ML 모델 대체 가능) + - `be_arm_pct`/`arm_pct` 독립 파라미터 설정 가능 (take_profit_pct * 0.4 파생 제거) + - ROOT-5 (v2 청산 로직 실효성 부족) 해소 + +5. **ACT-09: session_id를 거래/의사결정 로그에 명시적 전달** (#326) + - `src/logging/decision_logger.py`, `src/main.py`, `src/db.py` + - `log_decision()`: session_id 파라미터 추가 + - `log_trade()`: 런타임 session_id 명시적 전달 + - GAP-1, GAP-2 (session_id 미포함) 부분 해소 + +**이슈/PR:** #322, #323, #324, #325, #326 + +--- + +### v2/v3 손실 복구 실행 계획 — Phase 3 부분 완료 (#327~#329) + +**배경:** +- 세션 경계 처리 및 시간장벽 캘린더 기반 전환. + +**구현 내용:** + +1. **ACT-10: 세션 전환 시 리스크 파라미터 동적 재로딩** (#327) + - `src/main.py`, `src/config.py` + - 세션 경계 변경 이벤트 시 `SESSION_RISK_PROFILES_JSON` 기반 재로딩 + - 재로딩 실패 시 기존 파라미터 유지 (안전 폴백) + - GAP-3 (세션 전환 시 파라미터 재로딩 없음) 부분 해소 + +2. **ACT-11: 블랙아웃 복구 시 가격/세션 재검증 강화** (#328) + - `src/main.py`, `src/core/blackout_manager.py` + - 복구 시 현재 시세 조회하여 가격 유효성 검증 (진입가 대비 급등/급락 시 드롭) + - 세션 변경 시 새 세션의 파라미터로 재검증 + - GAP-4 잔여 (가격/세션 재검증) 부분 해소 + +3. **ACT-12: Triple Barrier 시간장벽을 캘린더 시간(분) 기반으로 전환** (#329) + - `src/analysis/triple_barrier.py` + - `max_holding_minutes` (캘린더 분) 기반 전환, 봉 주기 무관 일관 동작 + - 기존 `max_holding_bars` deprecated 경고 유지 (하위 호환) + - GAP-5 (시간장벽 봉 개수 고정) 해소 + +**미완료 (ACT-13):** +- **#330: CI 자동 검증 (정책 레지스트리 + TASK-REQ 매핑)** — 문서 구조화 작업으로 대체 진행 중 + +**이슈/PR:** #327, #328, #329 + +--- + +### v2/v3 문서 구조화 및 감사 문서 작성 (#331) + +**배경:** +- Phase 1~3 구현 완료 후 감사 결과와 실행 계획을 문서화 +- 기존 감사 문서가 산발적으로 관리되어 통합 정리 필요 + +**구현 내용:** +- `docs/ouroboros/80_implementation_audit.md` 신규 작성: v2/v3 구현 감사 + 실거래 수익률 분석 +- `docs/ouroboros/85_loss_recovery_action_plan.md` 신규 작성: ROOT/GAP 해소 Phase별 실행 계획 +- `scripts/audit_queries.sql` 신규 작성: 성과 재현용 표준 집계 SQL + +**이슈/PR:** #331 diff --git a/docs/testing.md b/docs/testing.md index 83aa6af..e385697 100644 --- a/docs/testing.md +++ b/docs/testing.md @@ -2,7 +2,7 @@ ## Test Structure -**551 tests** across **25 files**. `asyncio_mode = "auto"` in pyproject.toml — async tests need no special decorator. +**998 tests** across **41 files**. `asyncio_mode = "auto"` in pyproject.toml — async tests need no special decorator. The `settings` fixture in `conftest.py` provides safe defaults with test credentials and in-memory DB. @@ -23,6 +23,8 @@ The `settings` fixture in `conftest.py` provides safe defaults with test credent - Network error handling - SSL context configuration +> **Note**: 아래 파일별 테스트 수는 릴리즈 시점 스냅샷이며 실제 수치와 다를 수 있습니다. 현재 정확한 수치는 `pytest --collect-only -q`로 확인하세요. + ##### `tests/test_brain.py` (24 tests) - Valid JSON parsing and markdown-wrapped JSON handling - Malformed JSON fallback @@ -90,7 +92,7 @@ The `settings` fixture in `conftest.py` provides safe defaults with test credent - Python-first filtering pipeline - RSI and volume ratio filter logic - Candidate scoring and ranking -- Fallback to static watchlist +- Fallback to static watchlist (domestic) or dynamic universe (overseas) #### Context & Memory @@ -138,8 +140,8 @@ The `settings` fixture in `conftest.py` provides safe defaults with test credent #### Dashboard ##### `tests/test_dashboard.py` (14 tests) -- FastAPI endpoint responses (8 API routes) -- Status, playbook, scorecard, performance, context, decisions, scenarios +- FastAPI endpoint responses (10 API routes) +- Status, playbook, scorecard, performance, context, decisions, scenarios, pnl/history, positions - Query parameter handling (market, date, limit) #### Performance & Quality From 6f047a6daf79355b12b3247080a9c0e549505c74 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 20:02:48 +0900 Subject: [PATCH 060/109] ci: add --ci mode for session handover gate in workflows (#353) --- .gitea/workflows/ci.yml | 2 +- .github/workflows/ci.yml | 2 +- scripts/session_handover_check.py | 18 ++++++++++++++++-- 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml index 9fa9522..9ee06db 100644 --- a/.gitea/workflows/ci.yml +++ b/.gitea/workflows/ci.yml @@ -25,7 +25,7 @@ jobs: run: pip install ".[dev]" - name: Session handover gate - run: python3 scripts/session_handover_check.py --strict + run: python3 scripts/session_handover_check.py --strict --ci - name: Validate governance assets env: diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index da84fc7..40f340d 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: run: pip install ".[dev]" - name: Session handover gate - run: python3 scripts/session_handover_check.py --strict + run: python3 scripts/session_handover_check.py --strict --ci - name: Validate governance assets env: diff --git a/scripts/session_handover_check.py b/scripts/session_handover_check.py index b2ded16..7b354be 100755 --- a/scripts/session_handover_check.py +++ b/scripts/session_handover_check.py @@ -66,6 +66,7 @@ def _check_handover_entry( *, branch: str, strict: bool, + ci_mode: bool, errors: list[str], ) -> None: if not HANDOVER_LOG.exists(): @@ -87,7 +88,7 @@ def _check_handover_entry( if token not in latest: errors.append(f"latest handover entry missing token: {token}") - if strict: + if strict and not ci_mode: today_utc = datetime.now(UTC).date().isoformat() if today_utc not in latest: errors.append( @@ -117,6 +118,14 @@ def main() -> int: action="store_true", help="Enforce today-date and current-branch match on latest handover entry.", ) + parser.add_argument( + "--ci", + action="store_true", + help=( + "CI mode: keep structural/token checks but skip strict " + "today-date/current-branch matching." + ), + ) args = parser.parse_args() errors: list[str] = [] @@ -128,7 +137,12 @@ def main() -> int: elif branch in {"main", "master"}: errors.append(f"working branch must not be {branch}") - _check_handover_entry(branch=branch, strict=args.strict, errors=errors) + _check_handover_entry( + branch=branch, + strict=args.strict, + ci_mode=args.ci, + errors=errors, + ) if errors: print("[FAIL] session handover check failed") From 5730f0db2acc37cffbe08ef7fd85a40230e97476 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 20:17:13 +0900 Subject: [PATCH 061/109] ci: fix lint baseline and stabilize failing main tests --- src/analysis/backtest_cost_guard.py | 2 +- src/analysis/backtest_execution_model.py | 7 +- src/analysis/backtest_pipeline.py | 3 +- src/analysis/scanner.py | 16 +- src/analysis/smart_scanner.py | 25 +- src/analysis/triple_barrier.py | 9 +- src/analysis/volatility.py | 26 +- src/backup/__init__.py | 4 +- src/backup/cloud_storage.py | 4 +- src/backup/exporter.py | 19 +- src/backup/health_monitor.py | 10 +- src/backup/scheduler.py | 12 +- src/brain/cache.py | 10 +- src/brain/context_selector.py | 8 +- src/brain/gemini_client.py | 54 +-- src/brain/prompt_optimizer.py | 3 +- src/broker/kis_api.py | 86 ++-- src/broker/overseas.py | 90 ++-- src/config.py | 20 +- src/context/aggregator.py | 12 +- src/context/layer.py | 4 +- src/context/summarizer.py | 2 +- src/core/kill_switch.py | 3 +- src/core/order_policy.py | 14 +- src/core/priority_queue.py | 4 +- src/core/risk_manager.py | 10 +- src/dashboard/app.py | 9 +- src/data/economic_calendar.py | 1 - src/db.py | 11 +- src/evolution/ab_test.py | 7 +- src/evolution/daily_review.py | 4 +- src/evolution/optimizer.py | 52 ++- src/evolution/performance_tracker.py | 12 +- src/logging/decision_logger.py | 4 +- src/main.py | 332 +++++++-------- src/markets/schedule.py | 8 +- src/notifications/telegram_client.py | 62 +-- src/strategy/models.py | 8 +- src/strategy/playbook_store.py | 7 +- src/strategy/position_state_machine.py | 11 +- src/strategy/pre_market_planner.py | 39 +- src/strategy/scenario_engine.py | 46 +- tests/test_backup.py | 108 ++--- tests/test_brain.py | 9 +- tests/test_broker.py | 89 ++-- tests/test_context.py | 96 ++--- tests/test_daily_review.py | 33 +- tests/test_dashboard.py | 1 + tests/test_data_integration.py | 14 +- tests/test_db.py | 9 +- tests/test_decision_logger.py | 5 +- tests/test_evolution.py | 34 +- tests/test_logging_config.py | 4 +- tests/test_main.py | 509 +++++++++++++---------- tests/test_market_schedule.py | 16 +- tests/test_overseas_broker.py | 127 ++---- tests/test_pre_market_planner.py | 35 +- tests/test_scenario_engine.py | 86 ++-- tests/test_smart_scanner.py | 7 +- tests/test_strategy_models.py | 1 - tests/test_telegram.py | 126 +++--- tests/test_telegram_commands.py | 16 +- tests/test_validate_governance_assets.py | 4 +- tests/test_volatility.py | 22 +- 64 files changed, 1041 insertions(+), 1380 deletions(-) diff --git a/src/analysis/backtest_cost_guard.py b/src/analysis/backtest_cost_guard.py index 8f2cf98..97e1cd3 100644 --- a/src/analysis/backtest_cost_guard.py +++ b/src/analysis/backtest_cost_guard.py @@ -2,8 +2,8 @@ from __future__ import annotations -from dataclasses import dataclass import math +from dataclasses import dataclass @dataclass(frozen=True) diff --git a/src/analysis/backtest_execution_model.py b/src/analysis/backtest_execution_model.py index 24798dc..704b804 100644 --- a/src/analysis/backtest_execution_model.py +++ b/src/analysis/backtest_execution_model.py @@ -2,12 +2,11 @@ from __future__ import annotations -from dataclasses import dataclass import math +from dataclasses import dataclass from random import Random from typing import Literal - OrderSide = Literal["BUY", "SELL"] @@ -77,7 +76,9 @@ class BacktestExecutionModel: reason="execution_failure", ) - slip_mult = 1.0 + (slippage_bps / 10000.0 if request.side == "BUY" else -slippage_bps / 10000.0) + slip_mult = 1.0 + ( + slippage_bps / 10000.0 if request.side == "BUY" else -slippage_bps / 10000.0 + ) exec_price = request.reference_price * slip_mult if self._rng.random() < partial_rate: diff --git a/src/analysis/backtest_pipeline.py b/src/analysis/backtest_pipeline.py index ba49289..985e0e0 100644 --- a/src/analysis/backtest_pipeline.py +++ b/src/analysis/backtest_pipeline.py @@ -10,8 +10,7 @@ from collections.abc import Sequence from dataclasses import dataclass from datetime import datetime from statistics import mean -from typing import Literal -from typing import cast +from typing import Literal, cast from src.analysis.backtest_cost_guard import BacktestCostModel, validate_backtest_cost_model from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier diff --git a/src/analysis/scanner.py b/src/analysis/scanner.py index 50d34ba..8b9d379 100644 --- a/src/analysis/scanner.py +++ b/src/analysis/scanner.py @@ -104,6 +104,7 @@ class MarketScanner: # Store in L7 real-time layer from datetime import UTC, datetime + timeframe = datetime.now(UTC).isoformat() self.context_store.set_context( ContextLayer.L7_REALTIME, @@ -158,12 +159,8 @@ class MarketScanner: top_movers = valid_metrics[: self.top_n] # Detect breakouts and breakdowns - breakouts = [ - m.stock_code for m in valid_metrics if self.analyzer.is_breakout(m) - ] - breakdowns = [ - m.stock_code for m in valid_metrics if self.analyzer.is_breakdown(m) - ] + breakouts = [m.stock_code for m in valid_metrics if self.analyzer.is_breakout(m)] + breakdowns = [m.stock_code for m in valid_metrics if self.analyzer.is_breakdown(m)] logger.info( "%s scan complete: %d scanned, top momentum=%.1f, %d breakouts, %d breakdowns", @@ -228,10 +225,9 @@ class MarketScanner: # If we removed too many, backfill from current watchlist if len(updated) < len(current_watchlist): - backfill = [ - code for code in current_watchlist - if code not in updated - ][: len(current_watchlist) - len(updated)] + backfill = [code for code in current_watchlist if code not in updated][ + : len(current_watchlist) - len(updated) + ] updated.extend(backfill) logger.info( diff --git a/src/analysis/smart_scanner.py b/src/analysis/smart_scanner.py index 7717166..63d3fe1 100644 --- a/src/analysis/smart_scanner.py +++ b/src/analysis/smart_scanner.py @@ -158,7 +158,12 @@ class SmartVolatilityScanner: price = latest_close latest_high = _safe_float(latest.get("high")) latest_low = _safe_float(latest.get("low")) - if latest_close > 0 and latest_high > 0 and latest_low > 0 and latest_high >= latest_low: + if ( + latest_close > 0 + and latest_high > 0 + and latest_low > 0 + and latest_high >= latest_low + ): intraday_range_pct = (latest_high - latest_low) / latest_close * 100.0 if volume <= 0: volume = _safe_float(latest.get("volume")) @@ -234,9 +239,7 @@ class SmartVolatilityScanner: limit=50, ) except Exception as exc: - logger.warning( - "Overseas fluctuation ranking failed for %s: %s", market.code, exc - ) + logger.warning("Overseas fluctuation ranking failed for %s: %s", market.code, exc) fluct_rows = [] if not fluct_rows: @@ -250,9 +253,7 @@ class SmartVolatilityScanner: limit=50, ) except Exception as exc: - logger.warning( - "Overseas volume ranking failed for %s: %s", market.code, exc - ) + logger.warning("Overseas volume ranking failed for %s: %s", market.code, exc) volume_rows = [] for idx, row in enumerate(volume_rows): @@ -433,16 +434,10 @@ def _extract_intraday_range_pct(row: dict[str, Any], price: float) -> float: if price <= 0: return 0.0 high = _safe_float( - row.get("high") - or row.get("ovrs_hgpr") - or row.get("stck_hgpr") - or row.get("day_hgpr") + row.get("high") or row.get("ovrs_hgpr") or row.get("stck_hgpr") or row.get("day_hgpr") ) low = _safe_float( - row.get("low") - or row.get("ovrs_lwpr") - or row.get("stck_lwpr") - or row.get("day_lwpr") + row.get("low") or row.get("ovrs_lwpr") or row.get("stck_lwpr") or row.get("day_lwpr") ) if high <= 0 or low <= 0 or high < low: return 0.0 diff --git a/src/analysis/triple_barrier.py b/src/analysis/triple_barrier.py index 793250d..11c7018 100644 --- a/src/analysis/triple_barrier.py +++ b/src/analysis/triple_barrier.py @@ -6,10 +6,10 @@ Implements first-touch labeling with upper/lower/time barriers. from __future__ import annotations import warnings +from collections.abc import Sequence from dataclasses import dataclass from datetime import datetime, timedelta -from typing import Literal, Sequence - +from typing import Literal TieBreakMode = Literal["stop_first", "take_first"] @@ -92,7 +92,10 @@ def label_with_triple_barrier( else: assert spec.max_holding_bars is not None warnings.warn( - "TripleBarrierSpec.max_holding_bars is deprecated; use max_holding_minutes with timestamps instead.", + ( + "TripleBarrierSpec.max_holding_bars is deprecated; " + "use max_holding_minutes with timestamps instead." + ), DeprecationWarning, stacklevel=2, ) diff --git a/src/analysis/volatility.py b/src/analysis/volatility.py index 0794220..a974e0d 100644 --- a/src/analysis/volatility.py +++ b/src/analysis/volatility.py @@ -92,9 +92,7 @@ class VolatilityAnalyzer: recent_tr = true_ranges[-period:] return sum(recent_tr) / len(recent_tr) - def calculate_price_change( - self, current_price: float, past_price: float - ) -> float: + def calculate_price_change(self, current_price: float, past_price: float) -> float: """Calculate price change percentage. Args: @@ -108,9 +106,7 @@ class VolatilityAnalyzer: return 0.0 return ((current_price - past_price) / past_price) * 100 - def calculate_volume_surge( - self, current_volume: float, avg_volume: float - ) -> float: + def calculate_volume_surge(self, current_volume: float, avg_volume: float) -> float: """Calculate volume surge ratio. Args: @@ -240,11 +236,7 @@ class VolatilityAnalyzer: Momentum score (0-100) """ # Weight recent changes more heavily - weighted_change = ( - price_change_1m * 0.4 + - price_change_5m * 0.3 + - price_change_15m * 0.2 - ) + weighted_change = price_change_1m * 0.4 + price_change_5m * 0.3 + price_change_15m * 0.2 # Volume contribution (normalized to 0-10 scale) volume_contribution = min(10.0, (volume_surge - 1.0) * 5.0) @@ -301,17 +293,11 @@ class VolatilityAnalyzer: if len(close_prices) > 0: if len(close_prices) >= 1: - price_change_1m = self.calculate_price_change( - current_price, close_prices[-1] - ) + price_change_1m = self.calculate_price_change(current_price, close_prices[-1]) if len(close_prices) >= 5: - price_change_5m = self.calculate_price_change( - current_price, close_prices[-5] - ) + price_change_5m = self.calculate_price_change(current_price, close_prices[-5]) if len(close_prices) >= 15: - price_change_15m = self.calculate_price_change( - current_price, close_prices[-15] - ) + price_change_15m = self.calculate_price_change(current_price, close_prices[-15]) # Calculate volume surge avg_volume = sum(volumes) / len(volumes) if volumes else current_volume diff --git a/src/backup/__init__.py b/src/backup/__init__.py index a58e700..069fdd6 100644 --- a/src/backup/__init__.py +++ b/src/backup/__init__.py @@ -7,9 +7,9 @@ This module provides: - Health monitoring and alerts """ -from src.backup.exporter import BackupExporter, ExportFormat -from src.backup.scheduler import BackupScheduler, BackupPolicy from src.backup.cloud_storage import CloudStorage, S3Config +from src.backup.exporter import BackupExporter, ExportFormat +from src.backup.scheduler import BackupPolicy, BackupScheduler __all__ = [ "BackupExporter", diff --git a/src/backup/cloud_storage.py b/src/backup/cloud_storage.py index 4850e8d..ba62f4c 100644 --- a/src/backup/cloud_storage.py +++ b/src/backup/cloud_storage.py @@ -94,7 +94,9 @@ class CloudStorage: if metadata: extra_args["Metadata"] = metadata - logger.info("Uploading %s to s3://%s/%s", file_path.name, self.config.bucket_name, object_key) + logger.info( + "Uploading %s to s3://%s/%s", file_path.name, self.config.bucket_name, object_key + ) try: self.client.upload_file( diff --git a/src/backup/exporter.py b/src/backup/exporter.py index f5b3cd6..979982d 100644 --- a/src/backup/exporter.py +++ b/src/backup/exporter.py @@ -14,14 +14,14 @@ import json import logging import sqlite3 from datetime import UTC, datetime -from enum import Enum +from enum import StrEnum from pathlib import Path from typing import Any logger = logging.getLogger(__name__) -class ExportFormat(str, Enum): +class ExportFormat(StrEnum): """Supported export formats.""" JSON = "json" @@ -103,15 +103,11 @@ class BackupExporter: elif fmt == ExportFormat.CSV: return self._export_csv(output_dir, timestamp, compress, incremental_since) elif fmt == ExportFormat.PARQUET: - return self._export_parquet( - output_dir, timestamp, compress, incremental_since - ) + return self._export_parquet(output_dir, timestamp, compress, incremental_since) else: raise ValueError(f"Unsupported format: {fmt}") - def _get_trades( - self, incremental_since: datetime | None = None - ) -> list[dict[str, Any]]: + def _get_trades(self, incremental_since: datetime | None = None) -> list[dict[str, Any]]: """Fetch trades from database. Args: @@ -164,9 +160,7 @@ class BackupExporter: data = { "export_timestamp": datetime.now(UTC).isoformat(), - "incremental_since": ( - incremental_since.isoformat() if incremental_since else None - ), + "incremental_since": (incremental_since.isoformat() if incremental_since else None), "record_count": len(trades), "trades": trades, } @@ -284,8 +278,7 @@ class BackupExporter: import pyarrow.parquet as pq except ImportError: raise ImportError( - "pyarrow is required for Parquet export. " - "Install with: pip install pyarrow" + "pyarrow is required for Parquet export. Install with: pip install pyarrow" ) # Convert to pyarrow table diff --git a/src/backup/health_monitor.py b/src/backup/health_monitor.py index 4ec8406..a2c6fc9 100644 --- a/src/backup/health_monitor.py +++ b/src/backup/health_monitor.py @@ -14,14 +14,14 @@ import shutil import sqlite3 from dataclasses import dataclass from datetime import UTC, datetime, timedelta -from enum import Enum +from enum import StrEnum from pathlib import Path from typing import Any logger = logging.getLogger(__name__) -class HealthStatus(str, Enum): +class HealthStatus(StrEnum): """Health check status.""" HEALTHY = "healthy" @@ -137,9 +137,13 @@ class HealthMonitor: used_percent = (stat.used / stat.total) * 100 if stat.free < self.min_disk_space_bytes: + min_disk_gb = self.min_disk_space_bytes / 1024 / 1024 / 1024 return HealthCheckResult( status=HealthStatus.UNHEALTHY, - message=f"Low disk space: {free_gb:.2f} GB free (minimum: {self.min_disk_space_bytes / 1024 / 1024 / 1024:.2f} GB)", + message=( + f"Low disk space: {free_gb:.2f} GB free " + f"(minimum: {min_disk_gb:.2f} GB)" + ), details={ "free_gb": free_gb, "total_gb": total_gb, diff --git a/src/backup/scheduler.py b/src/backup/scheduler.py index c9f16d6..3b9f633 100644 --- a/src/backup/scheduler.py +++ b/src/backup/scheduler.py @@ -12,14 +12,14 @@ import logging import shutil from dataclasses import dataclass from datetime import UTC, datetime, timedelta -from enum import Enum +from enum import StrEnum from pathlib import Path from typing import Any logger = logging.getLogger(__name__) -class BackupPolicy(str, Enum): +class BackupPolicy(StrEnum): """Backup retention policies.""" DAILY = "daily" @@ -69,9 +69,7 @@ class BackupScheduler: for d in [self.daily_dir, self.weekly_dir, self.monthly_dir]: d.mkdir(parents=True, exist_ok=True) - def create_backup( - self, policy: BackupPolicy, verify: bool = True - ) -> BackupMetadata: + def create_backup(self, policy: BackupPolicy, verify: bool = True) -> BackupMetadata: """Create a database backup. Args: @@ -229,9 +227,7 @@ class BackupScheduler: return removed - def list_backups( - self, policy: BackupPolicy | None = None - ) -> list[BackupMetadata]: + def list_backups(self, policy: BackupPolicy | None = None) -> list[BackupMetadata]: """List available backups. Args: diff --git a/src/brain/cache.py b/src/brain/cache.py index cf9190b..cf5f540 100644 --- a/src/brain/cache.py +++ b/src/brain/cache.py @@ -13,8 +13,8 @@ import hashlib import json import logging import time -from dataclasses import dataclass, field -from typing import Any, TYPE_CHECKING +from dataclasses import dataclass +from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from src.brain.gemini_client import TradeDecision @@ -26,7 +26,7 @@ logger = logging.getLogger(__name__) class CacheEntry: """Cached decision with metadata.""" - decision: "TradeDecision" + decision: TradeDecision cached_at: float # Unix timestamp hit_count: int = 0 market_data_hash: str = "" @@ -239,9 +239,7 @@ class DecisionCache: """ current_time = time.time() expired_keys = [ - k - for k, v in self._cache.items() - if current_time - v.cached_at > self.ttl_seconds + k for k, v in self._cache.items() if current_time - v.cached_at > self.ttl_seconds ] count = len(expired_keys) diff --git a/src/brain/context_selector.py b/src/brain/context_selector.py index 47620e4..119eb78 100644 --- a/src/brain/context_selector.py +++ b/src/brain/context_selector.py @@ -11,14 +11,14 @@ from __future__ import annotations from dataclasses import dataclass from datetime import UTC, datetime -from enum import Enum +from enum import StrEnum from typing import Any from src.context.layer import ContextLayer from src.context.store import ContextStore -class DecisionType(str, Enum): +class DecisionType(StrEnum): """Type of trading decision being made.""" NORMAL = "normal" # Regular trade decision @@ -183,9 +183,7 @@ class ContextSelector: ContextLayer.L1_LEGACY, ] - scores = { - layer: self.score_layer_relevance(layer, decision_type) for layer in all_layers - } + scores = {layer: self.score_layer_relevance(layer, decision_type) for layer in all_layers} # Filter by minimum score selected_layers = [layer for layer, score in scores.items() if score >= min_score] diff --git a/src/brain/gemini_client.py b/src/brain/gemini_client.py index c664eb2..6e61c40 100644 --- a/src/brain/gemini_client.py +++ b/src/brain/gemini_client.py @@ -25,12 +25,12 @@ from typing import Any from google import genai -from src.config import Settings -from src.data.news_api import NewsAPI, NewsSentiment -from src.data.economic_calendar import EconomicCalendar -from src.data.market_data import MarketData from src.brain.cache import DecisionCache from src.brain.prompt_optimizer import PromptOptimizer +from src.config import Settings +from src.data.economic_calendar import EconomicCalendar +from src.data.market_data import MarketData +from src.data.news_api import NewsAPI, NewsSentiment logger = logging.getLogger(__name__) @@ -159,16 +159,12 @@ class GeminiClient: return "" # Check for upcoming high-impact events - upcoming = self._economic_calendar.get_upcoming_events( - days_ahead=7, min_impact="HIGH" - ) + upcoming = self._economic_calendar.get_upcoming_events(days_ahead=7, min_impact="HIGH") if upcoming.high_impact_count == 0: return "" - lines = [ - f"Upcoming High-Impact Events: {upcoming.high_impact_count} in next 7 days" - ] + lines = [f"Upcoming High-Impact Events: {upcoming.high_impact_count} in next 7 days"] if upcoming.next_major_event is not None: event = upcoming.next_major_event @@ -180,9 +176,7 @@ class GeminiClient: # Check for earnings earnings_date = self._economic_calendar.get_earnings_date(stock_code) if earnings_date is not None: - lines.append( - f" Earnings: {stock_code} on {earnings_date.strftime('%Y-%m-%d')}" - ) + lines.append(f" Earnings: {stock_code} on {earnings_date.strftime('%Y-%m-%d')}") return "\n".join(lines) @@ -235,9 +229,7 @@ class GeminiClient: # Add foreigner net if non-zero if market_data.get("foreigner_net", 0) != 0: - market_info_lines.append( - f"Foreigner Net Buy/Sell: {market_data['foreigner_net']}" - ) + market_info_lines.append(f"Foreigner Net Buy/Sell: {market_data['foreigner_net']}") market_info = "\n".join(market_info_lines) @@ -249,8 +241,7 @@ class GeminiClient: market_info += f"\n\n{external_context}" json_format = ( - '{"action": "BUY"|"SELL"|"HOLD", ' - '"confidence": , "rationale": ""}' + '{"action": "BUY"|"SELL"|"HOLD", "confidence": , "rationale": ""}' ) return ( f"You are a professional {market_name} trading analyst.\n" @@ -289,15 +280,12 @@ class GeminiClient: # Add foreigner net if non-zero if market_data.get("foreigner_net", 0) != 0: - market_info_lines.append( - f"Foreigner Net Buy/Sell: {market_data['foreigner_net']}" - ) + market_info_lines.append(f"Foreigner Net Buy/Sell: {market_data['foreigner_net']}") market_info = "\n".join(market_info_lines) json_format = ( - '{"action": "BUY"|"SELL"|"HOLD", ' - '"confidence": , "rationale": ""}' + '{"action": "BUY"|"SELL"|"HOLD", "confidence": , "rationale": ""}' ) return ( f"You are a professional {market_name} trading analyst.\n" @@ -339,25 +327,19 @@ class GeminiClient: data = json.loads(cleaned) except json.JSONDecodeError: logger.warning("Malformed JSON from Gemini — defaulting to HOLD") - return TradeDecision( - action="HOLD", confidence=0, rationale="Malformed JSON response" - ) + return TradeDecision(action="HOLD", confidence=0, rationale="Malformed JSON response") # Validate required fields if not all(k in data for k in ("action", "confidence", "rationale")): logger.warning("Missing fields in Gemini response — defaulting to HOLD") # Preserve raw text in rationale so prompt_override callers (e.g. pre_market_planner) # can extract their own JSON format from decision.rationale (#245) - return TradeDecision( - action="HOLD", confidence=0, rationale=raw - ) + return TradeDecision(action="HOLD", confidence=0, rationale=raw) action = str(data["action"]).upper() if action not in VALID_ACTIONS: logger.warning("Invalid action '%s' from Gemini — defaulting to HOLD", action) - return TradeDecision( - action="HOLD", confidence=0, rationale=f"Invalid action: {action}" - ) + return TradeDecision(action="HOLD", confidence=0, rationale=f"Invalid action: {action}") confidence = int(data["confidence"]) rationale = str(data["rationale"]) @@ -445,9 +427,7 @@ class GeminiClient: # not a parsed TradeDecision. Skip parse_response to avoid spurious # "Missing fields" warnings and return the raw response directly. (#247) if "prompt_override" in market_data: - logger.info( - "Gemini raw response received (prompt_override, tokens=%d)", token_count - ) + logger.info("Gemini raw response received (prompt_override, tokens=%d)", token_count) # Not a trade decision — don't inflate _total_decisions metrics return TradeDecision( action="HOLD", confidence=0, rationale=raw, token_count=token_count @@ -546,9 +526,7 @@ class GeminiClient: # Batch Decision Making (for daily trading mode) # ------------------------------------------------------------------ - async def decide_batch( - self, stocks_data: list[dict[str, Any]] - ) -> dict[str, TradeDecision]: + async def decide_batch(self, stocks_data: list[dict[str, Any]]) -> dict[str, TradeDecision]: """Make decisions for multiple stocks in a single API call. This is designed for daily trading mode to minimize API usage diff --git a/src/brain/prompt_optimizer.py b/src/brain/prompt_optimizer.py index fdc0d99..c85edc8 100644 --- a/src/brain/prompt_optimizer.py +++ b/src/brain/prompt_optimizer.py @@ -179,7 +179,8 @@ class PromptOptimizer: # Minimal instructions prompt = ( f"{market_name} trader. Analyze:\n{data_str}\n\n" - 'Return JSON: {"action":"BUY"|"SELL"|"HOLD","confidence":<0-100>,"rationale":""}\n' + "Return JSON: " + '{"action":"BUY"|"SELL"|"HOLD","confidence":<0-100>,"rationale":""}\n' "Rules: action=BUY/SELL/HOLD, confidence=0-100, rationale=concise. No markdown." ) else: diff --git a/src/broker/kis_api.py b/src/broker/kis_api.py index 953a604..269463b 100644 --- a/src/broker/kis_api.py +++ b/src/broker/kis_api.py @@ -58,7 +58,7 @@ class LeakyBucket: def __init__(self, rate: float) -> None: """Args: - rate: Maximum requests per second. + rate: Maximum requests per second. """ self._rate = rate self._interval = 1.0 / rate @@ -103,7 +103,8 @@ class KISBroker: ssl_ctx.verify_mode = ssl.CERT_NONE connector = aiohttp.TCPConnector(ssl=ssl_ctx) self._session = aiohttp.ClientSession( - timeout=timeout, connector=connector, + timeout=timeout, + connector=connector, ) return self._session @@ -224,16 +225,12 @@ class KISBroker: async with session.get(url, headers=headers, params=params) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"get_orderbook failed ({resp.status}): {text}" - ) + raise ConnectionError(f"get_orderbook failed ({resp.status}): {text}") return await resp.json() except (TimeoutError, aiohttp.ClientError) as exc: raise ConnectionError(f"Network error fetching orderbook: {exc}") from exc - async def get_current_price( - self, stock_code: str - ) -> tuple[float, float, float]: + async def get_current_price(self, stock_code: str) -> tuple[float, float, float]: """Fetch current price data for a domestic stock. Uses the ``inquire-price`` API (FHKST01010100), which works in both @@ -265,9 +262,7 @@ class KISBroker: async with session.get(url, headers=headers, params=params) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"get_current_price failed ({resp.status}): {text}" - ) + raise ConnectionError(f"get_current_price failed ({resp.status}): {text}") data = await resp.json() out = data.get("output", {}) return ( @@ -276,9 +271,7 @@ class KISBroker: _f(out.get("frgn_ntby_qty")), ) except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error fetching current price: {exc}" - ) from exc + raise ConnectionError(f"Network error fetching current price: {exc}") from exc async def get_balance(self) -> dict[str, Any]: """Fetch current account balance and holdings.""" @@ -308,9 +301,7 @@ class KISBroker: async with session.get(url, headers=headers, params=params) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"get_balance failed ({resp.status}): {text}" - ) + raise ConnectionError(f"get_balance failed ({resp.status}): {text}") return await resp.json() except (TimeoutError, aiohttp.ClientError) as exc: raise ConnectionError(f"Network error fetching balance: {exc}") from exc @@ -369,9 +360,7 @@ class KISBroker: async with session.post(url, headers=headers, json=body) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"send_order failed ({resp.status}): {text}" - ) + raise ConnectionError(f"send_order failed ({resp.status}): {text}") data = await resp.json() logger.info( "Order submitted", @@ -449,9 +438,7 @@ class KISBroker: async with session.get(url, headers=headers, params=params) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"fetch_market_rankings failed ({resp.status}): {text}" - ) + raise ConnectionError(f"fetch_market_rankings failed ({resp.status}): {text}") data = await resp.json() # Parse response - output is a list of ranked stocks @@ -465,14 +452,16 @@ class KISBroker: rankings = [] for item in data.get("output", [])[:limit]: - rankings.append({ - "stock_code": item.get("stck_shrn_iscd") or item.get("mksc_shrn_iscd", ""), - "name": item.get("hts_kor_isnm", ""), - "price": _safe_float(item.get("stck_prpr", "0")), - "volume": _safe_float(item.get("acml_vol", "0")), - "change_rate": _safe_float(item.get("prdy_ctrt", "0")), - "volume_increase_rate": _safe_float(item.get("vol_inrt", "0")), - }) + rankings.append( + { + "stock_code": item.get("stck_shrn_iscd") or item.get("mksc_shrn_iscd", ""), + "name": item.get("hts_kor_isnm", ""), + "price": _safe_float(item.get("stck_prpr", "0")), + "volume": _safe_float(item.get("acml_vol", "0")), + "change_rate": _safe_float(item.get("prdy_ctrt", "0")), + "volume_increase_rate": _safe_float(item.get("vol_inrt", "0")), + } + ) return rankings except (TimeoutError, aiohttp.ClientError) as exc: @@ -522,9 +511,7 @@ class KISBroker: data = await resp.json() return data.get("output", []) or [] except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error fetching domestic pending orders: {exc}" - ) from exc + raise ConnectionError(f"Network error fetching domestic pending orders: {exc}") from exc async def cancel_domestic_order( self, @@ -575,14 +562,10 @@ class KISBroker: async with session.post(url, headers=headers, json=body) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"cancel_domestic_order failed ({resp.status}): {text}" - ) + raise ConnectionError(f"cancel_domestic_order failed ({resp.status}): {text}") return cast(dict[str, Any], await resp.json()) except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error cancelling domestic order: {exc}" - ) from exc + raise ConnectionError(f"Network error cancelling domestic order: {exc}") from exc async def get_daily_prices( self, @@ -609,6 +592,7 @@ class KISBroker: # Calculate date range (today and N days ago) from datetime import datetime, timedelta + end_date = datetime.now().strftime("%Y%m%d") start_date = (datetime.now() - timedelta(days=days + 10)).strftime("%Y%m%d") @@ -627,9 +611,7 @@ class KISBroker: async with session.get(url, headers=headers, params=params) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"get_daily_prices failed ({resp.status}): {text}" - ) + raise ConnectionError(f"get_daily_prices failed ({resp.status}): {text}") data = await resp.json() # Parse response @@ -643,14 +625,16 @@ class KISBroker: prices = [] for item in data.get("output2", []): - prices.append({ - "date": item.get("stck_bsop_date", ""), - "open": _safe_float(item.get("stck_oprc", "0")), - "high": _safe_float(item.get("stck_hgpr", "0")), - "low": _safe_float(item.get("stck_lwpr", "0")), - "close": _safe_float(item.get("stck_clpr", "0")), - "volume": _safe_float(item.get("acml_vol", "0")), - }) + prices.append( + { + "date": item.get("stck_bsop_date", ""), + "open": _safe_float(item.get("stck_oprc", "0")), + "high": _safe_float(item.get("stck_hgpr", "0")), + "low": _safe_float(item.get("stck_lwpr", "0")), + "close": _safe_float(item.get("stck_clpr", "0")), + "volume": _safe_float(item.get("acml_vol", "0")), + } + ) # Sort oldest to newest (KIS returns newest first) prices.reverse() diff --git a/src/broker/overseas.py b/src/broker/overseas.py index d98ea67..5120ed6 100644 --- a/src/broker/overseas.py +++ b/src/broker/overseas.py @@ -36,11 +36,11 @@ _CANCEL_TR_ID_MAP: dict[str, tuple[str, str]] = { "NYSE": ("TTTT1004U", "VTTT1004U"), "AMEX": ("TTTT1004U", "VTTT1004U"), "SEHK": ("TTTS1003U", "VTTS1003U"), - "TSE": ("TTTS0309U", "VTTS0309U"), + "TSE": ("TTTS0309U", "VTTS0309U"), "SHAA": ("TTTS0302U", "VTTS0302U"), "SZAA": ("TTTS0306U", "VTTS0306U"), - "HNX": ("TTTS0312U", "VTTS0312U"), - "HSX": ("TTTS0312U", "VTTS0312U"), + "HNX": ("TTTS0312U", "VTTS0312U"), + "HSX": ("TTTS0312U", "VTTS0312U"), } @@ -56,9 +56,7 @@ class OverseasBroker: """ self._broker = kis_broker - async def get_overseas_price( - self, exchange_code: str, stock_code: str - ) -> dict[str, Any]: + async def get_overseas_price(self, exchange_code: str, stock_code: str) -> dict[str, Any]: """ Fetch overseas stock price. @@ -89,14 +87,10 @@ class OverseasBroker: async with session.get(url, headers=headers, params=params) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"get_overseas_price failed ({resp.status}): {text}" - ) + raise ConnectionError(f"get_overseas_price failed ({resp.status}): {text}") return await resp.json() except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error fetching overseas price: {exc}" - ) from exc + raise ConnectionError(f"Network error fetching overseas price: {exc}") from exc async def fetch_overseas_rankings( self, @@ -154,9 +148,7 @@ class OverseasBroker: ranking_type, ) return [] - raise ConnectionError( - f"fetch_overseas_rankings failed ({resp.status}): {text}" - ) + raise ConnectionError(f"fetch_overseas_rankings failed ({resp.status}): {text}") data = await resp.json() rows = self._extract_ranking_rows(data) @@ -171,9 +163,7 @@ class OverseasBroker: ) return [] except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error fetching overseas rankings: {exc}" - ) from exc + raise ConnectionError(f"Network error fetching overseas rankings: {exc}") from exc async def get_overseas_balance(self, exchange_code: str) -> dict[str, Any]: """ @@ -193,9 +183,7 @@ class OverseasBroker: # TR_ID: 실전 TTTS3012R, 모의 VTTS3012R # Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 잔고조회' 시트 - balance_tr_id = ( - "TTTS3012R" if self._broker._settings.MODE == "live" else "VTTS3012R" - ) + balance_tr_id = "TTTS3012R" if self._broker._settings.MODE == "live" else "VTTS3012R" headers = await self._broker._auth_headers(balance_tr_id) params = { "CANO": self._broker._account_no, @@ -205,22 +193,16 @@ class OverseasBroker: "CTX_AREA_FK200": "", "CTX_AREA_NK200": "", } - url = ( - f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-balance" - ) + url = f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-balance" try: async with session.get(url, headers=headers, params=params) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"get_overseas_balance failed ({resp.status}): {text}" - ) + raise ConnectionError(f"get_overseas_balance failed ({resp.status}): {text}") return await resp.json() except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error fetching overseas balance: {exc}" - ) from exc + raise ConnectionError(f"Network error fetching overseas balance: {exc}") from exc async def get_overseas_buying_power( self, @@ -247,9 +229,7 @@ class OverseasBroker: # TR_ID: 실전 TTTS3007R, 모의 VTTS3007R # Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 매수가능금액조회' 시트 - ps_tr_id = ( - "TTTS3007R" if self._broker._settings.MODE == "live" else "VTTS3007R" - ) + ps_tr_id = "TTTS3007R" if self._broker._settings.MODE == "live" else "VTTS3007R" headers = await self._broker._auth_headers(ps_tr_id) params = { "CANO": self._broker._account_no, @@ -258,9 +238,7 @@ class OverseasBroker: "OVRS_ORD_UNPR": f"{price:.2f}", "ITEM_CD": stock_code, } - url = ( - f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-psamount" - ) + url = f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-psamount" try: async with session.get(url, headers=headers, params=params) as resp: @@ -271,9 +249,7 @@ class OverseasBroker: ) return await resp.json() except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error fetching overseas buying power: {exc}" - ) from exc + raise ConnectionError(f"Network error fetching overseas buying power: {exc}") from exc async def send_overseas_order( self, @@ -330,9 +306,7 @@ class OverseasBroker: async with session.post(url, headers=headers, json=body) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"send_overseas_order failed ({resp.status}): {text}" - ) + raise ConnectionError(f"send_overseas_order failed ({resp.status}): {text}") data = await resp.json() rt_cd = data.get("rt_cd", "") msg1 = data.get("msg1", "") @@ -357,13 +331,9 @@ class OverseasBroker: ) return data except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error sending overseas order: {exc}" - ) from exc + raise ConnectionError(f"Network error sending overseas order: {exc}") from exc - async def get_overseas_pending_orders( - self, exchange_code: str - ) -> list[dict[str, Any]]: + async def get_overseas_pending_orders(self, exchange_code: str) -> list[dict[str, Any]]: """Fetch unfilled (pending) overseas orders for a given exchange. Args: @@ -379,9 +349,7 @@ class OverseasBroker: ConnectionError: On network or API errors (live mode only). """ if self._broker._settings.MODE != "live": - logger.debug( - "Pending orders API (TTTS3018R) not supported in paper mode; returning []" - ) + logger.debug("Pending orders API (TTTS3018R) not supported in paper mode; returning []") return [] await self._broker._rate_limiter.acquire() @@ -398,9 +366,7 @@ class OverseasBroker: "CTX_AREA_FK200": "", "CTX_AREA_NK200": "", } - url = ( - f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-nccs" - ) + url = f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-nccs" try: async with session.get(url, headers=headers, params=params) as resp: @@ -415,9 +381,7 @@ class OverseasBroker: return output return [] except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error fetching pending orders: {exc}" - ) from exc + raise ConnectionError(f"Network error fetching pending orders: {exc}") from exc async def cancel_overseas_order( self, @@ -469,22 +433,16 @@ class OverseasBroker: headers = await self._broker._auth_headers(tr_id) headers["hashkey"] = hash_key - url = ( - f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/order-rvsecncl" - ) + url = f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/order-rvsecncl" try: async with session.post(url, headers=headers, json=body) as resp: if resp.status != 200: text = await resp.text() - raise ConnectionError( - f"cancel_overseas_order failed ({resp.status}): {text}" - ) + raise ConnectionError(f"cancel_overseas_order failed ({resp.status}): {text}") return await resp.json() except (TimeoutError, aiohttp.ClientError) as exc: - raise ConnectionError( - f"Network error cancelling overseas order: {exc}" - ) from exc + raise ConnectionError(f"Network error cancelling overseas order: {exc}") from exc def _get_currency_code(self, exchange_code: str) -> str: """ diff --git a/src/config.py b/src/config.py index 671b95b..81290e3 100644 --- a/src/config.py +++ b/src/config.py @@ -111,25 +111,21 @@ class Settings(BaseSettings): # Telegram notification type filters (granular control) # circuit_breaker is always sent regardless — safety-critical - TELEGRAM_NOTIFY_TRADES: bool = True # BUY/SELL execution alerts + TELEGRAM_NOTIFY_TRADES: bool = True # BUY/SELL execution alerts TELEGRAM_NOTIFY_MARKET_OPEN_CLOSE: bool = True # Market open/close alerts - TELEGRAM_NOTIFY_FAT_FINGER: bool = True # Fat-finger rejection alerts - TELEGRAM_NOTIFY_SYSTEM_EVENTS: bool = True # System start/shutdown alerts - TELEGRAM_NOTIFY_PLAYBOOK: bool = True # Playbook generated/failed alerts - TELEGRAM_NOTIFY_SCENARIO_MATCH: bool = True # Scenario matched alerts (most frequent) - TELEGRAM_NOTIFY_ERRORS: bool = True # Error alerts + TELEGRAM_NOTIFY_FAT_FINGER: bool = True # Fat-finger rejection alerts + TELEGRAM_NOTIFY_SYSTEM_EVENTS: bool = True # System start/shutdown alerts + TELEGRAM_NOTIFY_PLAYBOOK: bool = True # Playbook generated/failed alerts + TELEGRAM_NOTIFY_SCENARIO_MATCH: bool = True # Scenario matched alerts (most frequent) + TELEGRAM_NOTIFY_ERRORS: bool = True # Error alerts # Overseas ranking API (KIS endpoint/TR_ID may vary by account/product) # Override these from .env if your account uses different specs. OVERSEAS_RANKING_ENABLED: bool = True OVERSEAS_RANKING_FLUCT_TR_ID: str = "HHDFS76290000" OVERSEAS_RANKING_VOLUME_TR_ID: str = "HHDFS76270000" - OVERSEAS_RANKING_FLUCT_PATH: str = ( - "/uapi/overseas-stock/v1/ranking/updown-rate" - ) - OVERSEAS_RANKING_VOLUME_PATH: str = ( - "/uapi/overseas-stock/v1/ranking/volume-surge" - ) + OVERSEAS_RANKING_FLUCT_PATH: str = "/uapi/overseas-stock/v1/ranking/updown-rate" + OVERSEAS_RANKING_VOLUME_PATH: str = "/uapi/overseas-stock/v1/ranking/volume-surge" # Dashboard (optional) DASHBOARD_ENABLED: bool = False diff --git a/src/context/aggregator.py b/src/context/aggregator.py index 8eaecab..36e3982 100644 --- a/src/context/aggregator.py +++ b/src/context/aggregator.py @@ -222,9 +222,7 @@ class ContextAggregator: total_pnl = 0.0 for month in months: - monthly_pnl = self.store.get_context( - ContextLayer.L4_MONTHLY, month, "monthly_pnl" - ) + monthly_pnl = self.store.get_context(ContextLayer.L4_MONTHLY, month, "monthly_pnl") if monthly_pnl is not None: total_pnl += monthly_pnl @@ -251,9 +249,7 @@ class ContextAggregator: if quarterly_pnl is not None: total_pnl += quarterly_pnl - self.store.set_context( - ContextLayer.L2_ANNUAL, year, "annual_pnl", round(total_pnl, 2) - ) + self.store.set_context(ContextLayer.L2_ANNUAL, year, "annual_pnl", round(total_pnl, 2)) def aggregate_legacy_from_annual(self) -> None: """Aggregate L1 (legacy) context from all L2 (annual) data.""" @@ -280,9 +276,7 @@ class ContextAggregator: self.store.set_context( ContextLayer.L1_LEGACY, "LEGACY", "total_pnl", round(total_pnl, 2) ) - self.store.set_context( - ContextLayer.L1_LEGACY, "LEGACY", "years_traded", years_traded - ) + self.store.set_context(ContextLayer.L1_LEGACY, "LEGACY", "years_traded", years_traded) self.store.set_context( ContextLayer.L1_LEGACY, "LEGACY", diff --git a/src/context/layer.py b/src/context/layer.py index fdad474..7c40d34 100644 --- a/src/context/layer.py +++ b/src/context/layer.py @@ -3,10 +3,10 @@ from __future__ import annotations from dataclasses import dataclass -from enum import Enum +from enum import StrEnum -class ContextLayer(str, Enum): +class ContextLayer(StrEnum): """7-tier context hierarchy from real-time to generational.""" L1_LEGACY = "L1_LEGACY" # Cumulative/generational wisdom diff --git a/src/context/summarizer.py b/src/context/summarizer.py index c154ff7..8bc024d 100644 --- a/src/context/summarizer.py +++ b/src/context/summarizer.py @@ -9,7 +9,7 @@ This module summarizes old context data instead of including raw details: from __future__ import annotations from dataclasses import dataclass -from datetime import UTC, datetime, timedelta +from datetime import UTC, datetime from typing import Any from src.context.layer import ContextLayer diff --git a/src/core/kill_switch.py b/src/core/kill_switch.py index 9f2231b..71a3cdf 100644 --- a/src/core/kill_switch.py +++ b/src/core/kill_switch.py @@ -11,8 +11,9 @@ Order is fixed: from __future__ import annotations import inspect +from collections.abc import Awaitable, Callable from dataclasses import dataclass, field -from typing import Any, Awaitable, Callable +from typing import Any StepCallable = Callable[[], Any | Awaitable[Any]] diff --git a/src/core/order_policy.py b/src/core/order_policy.py index 5fbb43a..a347996 100644 --- a/src/core/order_policy.py +++ b/src/core/order_policy.py @@ -15,7 +15,7 @@ from src.markets.schedule import MarketInfo _LOW_LIQUIDITY_SESSIONS = {"NXT_AFTER", "US_PRE", "US_DAY", "US_AFTER"} -class OrderPolicyRejected(Exception): +class OrderPolicyRejectedError(Exception): """Raised when an order violates session policy.""" def __init__(self, message: str, *, session_id: str, market_code: str) -> None: @@ -61,7 +61,9 @@ def classify_session_id(market: MarketInfo, now: datetime | None = None) -> str: def get_session_info(market: MarketInfo, now: datetime | None = None) -> SessionInfo: session_id = classify_session_id(market, now) - return SessionInfo(session_id=session_id, is_low_liquidity=session_id in _LOW_LIQUIDITY_SESSIONS) + return SessionInfo( + session_id=session_id, is_low_liquidity=session_id in _LOW_LIQUIDITY_SESSIONS + ) def validate_order_policy( @@ -76,7 +78,7 @@ def validate_order_policy( is_market_order = price <= 0 if info.is_low_liquidity and is_market_order: - raise OrderPolicyRejected( + raise OrderPolicyRejectedError( f"Market order is forbidden in low-liquidity session ({info.session_id})", session_id=info.session_id, market_code=market.code, @@ -84,10 +86,14 @@ def validate_order_policy( # Guard against accidental unsupported actions. if order_type not in {"BUY", "SELL"}: - raise OrderPolicyRejected( + raise OrderPolicyRejectedError( f"Unsupported order_type={order_type}", session_id=info.session_id, market_code=market.code, ) return info + + +# Backward compatibility alias +OrderPolicyRejected = OrderPolicyRejectedError diff --git a/src/core/priority_queue.py b/src/core/priority_queue.py index 92f9ace..1010491 100644 --- a/src/core/priority_queue.py +++ b/src/core/priority_queue.py @@ -28,9 +28,7 @@ class PriorityTask: # Task data not used in comparison task_id: str = field(compare=False) task_data: dict[str, Any] = field(compare=False, default_factory=dict) - callback: Callable[[], Coroutine[Any, Any, Any]] | None = field( - compare=False, default=None - ) + callback: Callable[[], Coroutine[Any, Any, Any]] | None = field(compare=False, default=None) @dataclass diff --git a/src/core/risk_manager.py b/src/core/risk_manager.py index 7fd559b..8ce405b 100644 --- a/src/core/risk_manager.py +++ b/src/core/risk_manager.py @@ -25,7 +25,7 @@ class CircuitBreakerTripped(SystemExit): ) -class FatFingerRejected(Exception): +class FatFingerRejectedError(Exception): """Raised when an order exceeds the maximum allowed proportion of cash.""" def __init__(self, order_amount: float, total_cash: float, max_pct: float) -> None: @@ -61,7 +61,7 @@ class RiskManager: def check_fat_finger(self, order_amount: float, total_cash: float) -> None: """Reject orders that exceed the maximum proportion of available cash.""" if total_cash <= 0: - raise FatFingerRejected(order_amount, total_cash, self._ff_max_pct) + raise FatFingerRejectedError(order_amount, total_cash, self._ff_max_pct) ratio_pct = (order_amount / total_cash) * 100 if ratio_pct > self._ff_max_pct: @@ -69,7 +69,7 @@ class RiskManager: "Fat finger check failed", extra={"order_amount": order_amount}, ) - raise FatFingerRejected(order_amount, total_cash, self._ff_max_pct) + raise FatFingerRejectedError(order_amount, total_cash, self._ff_max_pct) def validate_order( self, @@ -81,3 +81,7 @@ class RiskManager: self.check_circuit_breaker(current_pnl_pct) self.check_fat_finger(order_amount, total_cash) logger.info("Order passed risk validation") + + +# Backward compatibility alias +FatFingerRejected = FatFingerRejectedError diff --git a/src/dashboard/app.py b/src/dashboard/app.py index e9d8e26..2c42676 100644 --- a/src/dashboard/app.py +++ b/src/dashboard/app.py @@ -5,7 +5,7 @@ from __future__ import annotations import json import os import sqlite3 -from datetime import UTC, datetime, timezone +from datetime import UTC, datetime from pathlib import Path from typing import Any @@ -188,10 +188,7 @@ def create_dashboard_app(db_path: str, mode: str = "paper") -> FastAPI: return { "market": "all", "combined": combined, - "by_market": [ - _row_to_performance(row) - for row in by_market_rows - ], + "by_market": [_row_to_performance(row) for row in by_market_rows], } row = conn.execute( @@ -401,7 +398,7 @@ def create_dashboard_app(db_path: str, mode: str = "paper") -> FastAPI: """ ).fetchall() - now = datetime.now(timezone.utc) + now = datetime.now(UTC) positions = [] for row in rows: entry_time_str = row["entry_time"] diff --git a/src/data/economic_calendar.py b/src/data/economic_calendar.py index 9f662b6..3057ebe 100644 --- a/src/data/economic_calendar.py +++ b/src/data/economic_calendar.py @@ -9,7 +9,6 @@ from __future__ import annotations import logging from dataclasses import dataclass from datetime import datetime, timedelta -from typing import Any logger = logging.getLogger(__name__) diff --git a/src/db.py b/src/db.py index e161de3..d7cbeb5 100644 --- a/src/db.py +++ b/src/db.py @@ -123,8 +123,7 @@ def init_db(db_path: str) -> sqlite3.Connection: """ ) decision_columns = { - row[1] - for row in conn.execute("PRAGMA table_info(decision_logs)").fetchall() + row[1] for row in conn.execute("PRAGMA table_info(decision_logs)").fetchall() } if "session_id" not in decision_columns: conn.execute("ALTER TABLE decision_logs ADD COLUMN session_id TEXT DEFAULT 'UNKNOWN'") @@ -185,9 +184,7 @@ def init_db(db_path: str) -> sqlite3.Connection: conn.execute( "CREATE INDEX IF NOT EXISTS idx_decision_logs_timestamp ON decision_logs(timestamp)" ) - conn.execute( - "CREATE INDEX IF NOT EXISTS idx_decision_logs_reviewed ON decision_logs(reviewed)" - ) + conn.execute("CREATE INDEX IF NOT EXISTS idx_decision_logs_reviewed ON decision_logs(reviewed)") conn.execute( "CREATE INDEX IF NOT EXISTS idx_decision_logs_confidence ON decision_logs(confidence)" ) @@ -381,9 +378,7 @@ def get_open_position( return {"decision_id": row[1], "price": row[2], "quantity": row[3], "timestamp": row[4]} -def get_recent_symbols( - conn: sqlite3.Connection, market: str, limit: int = 30 -) -> list[str]: +def get_recent_symbols(conn: sqlite3.Connection, market: str, limit: int = 30) -> list[str]: """Return recent unique symbols for a market, newest first.""" cursor = conn.execute( """ diff --git a/src/evolution/ab_test.py b/src/evolution/ab_test.py index e9ed3df..daf8854 100644 --- a/src/evolution/ab_test.py +++ b/src/evolution/ab_test.py @@ -90,9 +90,7 @@ class ABTester: sharpe_ratio = None if len(pnls) > 1: mean_return = avg_pnl - std_return = ( - sum((p - mean_return) ** 2 for p in pnls) / (len(pnls) - 1) - ) ** 0.5 + std_return = (sum((p - mean_return) ** 2 for p in pnls) / (len(pnls) - 1)) ** 0.5 if std_return > 0: sharpe_ratio = mean_return / std_return @@ -198,8 +196,7 @@ class ABTester: if meets_criteria: logger.info( - "Strategy '%s' meets deployment criteria: " - "win_rate=%.2f%%, trades=%d, avg_pnl=%.2f", + "Strategy '%s' meets deployment criteria: win_rate=%.2f%%, trades=%d, avg_pnl=%.2f", result.winner, winning_perf.win_rate, winning_perf.total_trades, diff --git a/src/evolution/daily_review.py b/src/evolution/daily_review.py index fd4eb0c..eb37100 100644 --- a/src/evolution/daily_review.py +++ b/src/evolution/daily_review.py @@ -60,9 +60,7 @@ class DailyReviewer: if isinstance(scenario_match, dict) and scenario_match: matched += 1 scenario_match_rate = ( - round((matched / total_decisions) * 100, 2) - if total_decisions - else 0.0 + round((matched / total_decisions) * 100, 2) if total_decisions else 0.0 ) trade_stats = self._conn.execute( diff --git a/src/evolution/optimizer.py b/src/evolution/optimizer.py index c9ef719..4369c54 100644 --- a/src/evolution/optimizer.py +++ b/src/evolution/optimizer.py @@ -80,26 +80,26 @@ class EvolutionOptimizer: # Convert to dict format for analysis failures = [] for decision in losing_decisions: - failures.append({ - "decision_id": decision.decision_id, - "timestamp": decision.timestamp, - "stock_code": decision.stock_code, - "market": decision.market, - "exchange_code": decision.exchange_code, - "action": decision.action, - "confidence": decision.confidence, - "rationale": decision.rationale, - "outcome_pnl": decision.outcome_pnl, - "outcome_accuracy": decision.outcome_accuracy, - "context_snapshot": decision.context_snapshot, - "input_data": decision.input_data, - }) + failures.append( + { + "decision_id": decision.decision_id, + "timestamp": decision.timestamp, + "stock_code": decision.stock_code, + "market": decision.market, + "exchange_code": decision.exchange_code, + "action": decision.action, + "confidence": decision.confidence, + "rationale": decision.rationale, + "outcome_pnl": decision.outcome_pnl, + "outcome_accuracy": decision.outcome_accuracy, + "context_snapshot": decision.context_snapshot, + "input_data": decision.input_data, + } + ) return failures - def identify_failure_patterns( - self, failures: list[dict[str, Any]] - ) -> dict[str, Any]: + def identify_failure_patterns(self, failures: list[dict[str, Any]]) -> dict[str, Any]: """Identify patterns in losing decisions. Analyzes: @@ -143,12 +143,8 @@ class EvolutionOptimizer: total_confidence += failure.get("confidence", 0) total_loss += failure.get("outcome_pnl", 0.0) - patterns["avg_confidence"] = ( - round(total_confidence / len(failures), 2) if failures else 0.0 - ) - patterns["avg_loss"] = ( - round(total_loss / len(failures), 2) if failures else 0.0 - ) + patterns["avg_confidence"] = round(total_confidence / len(failures), 2) if failures else 0.0 + patterns["avg_loss"] = round(total_loss / len(failures), 2) if failures else 0.0 # Convert Counters to regular dicts for JSON serialization patterns["markets"] = dict(patterns["markets"]) @@ -197,7 +193,8 @@ class EvolutionOptimizer: prompt = ( "You are a quantitative trading strategy developer.\n" - "Analyze these failed trades and their patterns, then generate an improved strategy.\n\n" + "Analyze these failed trades and their patterns, " + "then generate an improved strategy.\n\n" f"Failure Patterns:\n{json.dumps(patterns, indent=2)}\n\n" f"Sample Failed Trades (first 5):\n" f"{json.dumps(failures[:5], indent=2, default=str)}\n\n" @@ -214,7 +211,8 @@ class EvolutionOptimizer: try: response = await self._client.aio.models.generate_content( - model=self._model_name, contents=prompt, + model=self._model_name, + contents=prompt, ) body = response.text.strip() except Exception as exc: @@ -280,9 +278,7 @@ class EvolutionOptimizer: logger.info("Strategy validation PASSED") return True else: - logger.warning( - "Strategy validation FAILED:\n%s", result.stdout + result.stderr - ) + logger.warning("Strategy validation FAILED:\n%s", result.stdout + result.stderr) # Clean up failing strategy strategy_path.unlink(missing_ok=True) return False diff --git a/src/evolution/performance_tracker.py b/src/evolution/performance_tracker.py index fd3476c..c7bc7e1 100644 --- a/src/evolution/performance_tracker.py +++ b/src/evolution/performance_tracker.py @@ -187,9 +187,7 @@ class PerformanceTracker: return metrics - def calculate_improvement_trend( - self, metrics_history: list[StrategyMetrics] - ) -> dict[str, Any]: + def calculate_improvement_trend(self, metrics_history: list[StrategyMetrics]) -> dict[str, Any]: """Calculate improvement trend from historical metrics. Args: @@ -229,9 +227,7 @@ class PerformanceTracker: "period_count": len(metrics_history), } - def generate_dashboard( - self, strategy_name: str | None = None - ) -> PerformanceDashboard: + def generate_dashboard(self, strategy_name: str | None = None) -> PerformanceDashboard: """Generate a comprehensive performance dashboard. Args: @@ -260,9 +256,7 @@ class PerformanceTracker: improvement_trend=improvement_trend, ) - def export_dashboard_json( - self, dashboard: PerformanceDashboard - ) -> str: + def export_dashboard_json(self, dashboard: PerformanceDashboard) -> str: """Export dashboard as JSON string. Args: diff --git a/src/logging/decision_logger.py b/src/logging/decision_logger.py index cd19b28..5a05d84 100644 --- a/src/logging/decision_logger.py +++ b/src/logging/decision_logger.py @@ -140,9 +140,7 @@ class DecisionLogger: ) self.conn.commit() - def update_outcome( - self, decision_id: str, pnl: float, accuracy: int - ) -> None: + def update_outcome(self, decision_id: str, pnl: float, accuracy: int) -> None: """Update the outcome of a decision after trade execution. Args: diff --git a/src/main.py b/src/main.py index da6f3e9..512f4f2 100644 --- a/src/main.py +++ b/src/main.py @@ -26,12 +26,12 @@ from src.context.aggregator import ContextAggregator from src.context.layer import ContextLayer from src.context.scheduler import ContextScheduler from src.context.store import ContextStore -from src.core.criticality import CriticalityAssessor from src.core.blackout_manager import ( BlackoutOrderManager, QueuedOrderIntent, parse_blackout_windows_kst, ) +from src.core.criticality import CriticalityAssessor from src.core.kill_switch import KillSwitchOrchestrator from src.core.order_policy import ( OrderPolicyRejected, @@ -52,12 +52,16 @@ from src.evolution.optimizer import EvolutionOptimizer from src.logging.decision_logger import DecisionLogger from src.logging_config import setup_logging from src.markets.schedule import MARKETS, MarketInfo, get_next_market_open, get_open_markets -from src.notifications.telegram_client import NotificationFilter, TelegramClient, TelegramCommandHandler -from src.strategy.models import DayPlaybook, MarketOutlook +from src.notifications.telegram_client import ( + NotificationFilter, + TelegramClient, + TelegramCommandHandler, +) from src.strategy.exit_rules import ExitRuleConfig, ExitRuleInput, evaluate_exit +from src.strategy.models import DayPlaybook, MarketOutlook from src.strategy.playbook_store import PlaybookStore -from src.strategy.pre_market_planner import PreMarketPlanner from src.strategy.position_state_machine import PositionState +from src.strategy.pre_market_planner import PreMarketPlanner from src.strategy.scenario_engine import ScenarioEngine logger = logging.getLogger(__name__) @@ -350,9 +354,7 @@ async def _inject_staged_exit_features( return if "pred_down_prob" not in market_data: - market_data["pred_down_prob"] = _estimate_pred_down_prob_from_rsi( - market_data.get("rsi") - ) + market_data["pred_down_prob"] = _estimate_pred_down_prob_from_rsi(market_data.get("rsi")) existing_atr = safe_float(market_data.get("atr_value"), 0.0) if existing_atr > 0: @@ -389,7 +391,7 @@ async def _retry_connection(coro_factory: Any, *args: Any, label: str = "", **kw return await coro_factory(*args, **kwargs) except ConnectionError as exc: if attempt < MAX_CONNECTION_RETRIES: - wait_secs = 2 ** attempt + wait_secs = 2**attempt logger.warning( "Connection error %s (attempt %d/%d), retrying in %ds: %s", label, @@ -413,7 +415,7 @@ async def sync_positions_from_broker( broker: Any, overseas_broker: Any, db_conn: Any, - settings: "Settings", + settings: Settings, ) -> int: """Sync open positions from the live broker into the local DB at startup. @@ -441,9 +443,7 @@ async def sync_positions_from_broker( if market.exchange_code in seen_exchange_codes: continue seen_exchange_codes.add(market.exchange_code) - balance_data = await overseas_broker.get_overseas_balance( - market.exchange_code - ) + balance_data = await overseas_broker.get_overseas_balance(market.exchange_code) log_market = market_code # e.g. "US_NASDAQ" except ConnectionError as exc: logger.warning( @@ -453,9 +453,7 @@ async def sync_positions_from_broker( ) continue - held_codes = _extract_held_codes_from_balance( - balance_data, is_domestic=market.is_domestic - ) + held_codes = _extract_held_codes_from_balance(balance_data, is_domestic=market.is_domestic) for stock_code in held_codes: if get_open_position(db_conn, stock_code, log_market): continue # already tracked @@ -487,9 +485,7 @@ async def sync_positions_from_broker( synced += 1 if synced: - logger.info( - "Startup sync complete: %d position(s) synced from broker", synced - ) + logger.info("Startup sync complete: %d position(s) synced from broker", synced) else: logger.info("Startup sync: no new positions to sync from broker") return synced @@ -859,15 +855,9 @@ def _apply_staged_exit_override_for_hold( pnl_pct = (current_price - entry_price) / entry_price * 100.0 if exit_eval.reason == "hard_stop": - rationale = ( - f"Stop-loss triggered ({pnl_pct:.2f}% <= " - f"{stop_loss_threshold:.2f}%)" - ) + rationale = f"Stop-loss triggered ({pnl_pct:.2f}% <= {stop_loss_threshold:.2f}%)" elif exit_eval.reason == "arm_take_profit": - rationale = ( - f"Take-profit triggered ({pnl_pct:.2f}% >= " - f"{arm_pct:.2f}%)" - ) + rationale = f"Take-profit triggered ({pnl_pct:.2f}% >= {arm_pct:.2f}%)" elif exit_eval.reason == "atr_trailing_stop": rationale = "ATR trailing-stop triggered" elif exit_eval.reason == "be_lock_threat": @@ -978,7 +968,10 @@ def _maybe_queue_order_intent( ) if queued: logger.warning( - "Blackout active: queued order intent %s %s (%s) qty=%d price=%.4f source=%s pending=%d", + ( + "Blackout active: queued order intent %s %s (%s) " + "qty=%d price=%.4f source=%s pending=%d" + ), order_type, stock_code, market.code, @@ -1071,7 +1064,10 @@ async def process_blackout_recovery_orders( ) if queued_price <= 0 or current_price <= 0: logger.info( - "Drop queued intent by price revalidation (invalid price): %s %s (%s) queued=%.4f current=%.4f", + ( + "Drop queued intent by price revalidation (invalid price): " + "%s %s (%s) queued=%.4f current=%.4f" + ), intent.order_type, intent.stock_code, market.code, @@ -1082,7 +1078,10 @@ async def process_blackout_recovery_orders( drift_pct = abs(current_price - queued_price) / queued_price * 100.0 if drift_pct > max_drift_pct: logger.info( - "Drop queued intent by price revalidation: %s %s (%s) queued=%.4f current=%.4f drift=%.2f%% max=%.2f%%", + ( + "Drop queued intent by price revalidation: %s %s (%s) " + "queued=%.4f current=%.4f drift=%.2f%% max=%.2f%%" + ), intent.order_type, intent.stock_code, market.code, @@ -1375,24 +1374,18 @@ async def trading_cycle( # 1. Fetch market data price_output: dict[str, Any] = {} # Populated for overseas markets; used for fallback metrics if market.is_domestic: - current_price, price_change_pct, foreigner_net = await broker.get_current_price( - stock_code - ) + current_price, price_change_pct, foreigner_net = await broker.get_current_price(stock_code) balance_data = await broker.get_balance() output2 = balance_data.get("output2", [{}]) total_eval = safe_float(output2[0].get("tot_evlu_amt", "0")) if output2 else 0 total_cash = safe_float( - balance_data.get("output2", [{}])[0].get("dnca_tot_amt", "0") - if output2 - else "0" + balance_data.get("output2", [{}])[0].get("dnca_tot_amt", "0") if output2 else "0" ) purchase_total = safe_float(output2[0].get("pchs_amt_smtl_amt", "0")) if output2 else 0 else: # Overseas market - price_data = await overseas_broker.get_overseas_price( - market.exchange_code, stock_code - ) + price_data = await overseas_broker.get_overseas_price(market.exchange_code, stock_code) balance_data = await overseas_broker.get_overseas_balance(market.exchange_code) output2 = balance_data.get("output2", [{}]) @@ -1459,11 +1452,7 @@ async def trading_cycle( total_cash = settings.PAPER_OVERSEAS_CASH # Calculate daily P&L % - pnl_pct = ( - ((total_eval - purchase_total) / purchase_total * 100) - if purchase_total > 0 - else 0.0 - ) + pnl_pct = ((total_eval - purchase_total) / purchase_total * 100) if purchase_total > 0 else 0.0 market_data: dict[str, Any] = { "stock_code": stock_code, @@ -1491,11 +1480,13 @@ async def trading_cycle( market_data["rsi"] = max(0.0, min(100.0, 50.0 + price_change_pct * 2.0)) if price_output and current_price > 0: pr_high = safe_float( - price_output.get("high") or price_output.get("ovrs_hgpr") + price_output.get("high") + or price_output.get("ovrs_hgpr") or price_output.get("stck_hgpr") ) pr_low = safe_float( - price_output.get("low") or price_output.get("ovrs_lwpr") + price_output.get("low") + or price_output.get("ovrs_lwpr") or price_output.get("stck_lwpr") ) if pr_high > 0 and pr_low > 0 and pr_high >= pr_low: @@ -1512,9 +1503,7 @@ async def trading_cycle( if open_pos and current_price > 0: entry_price = safe_float(open_pos.get("price"), 0.0) if entry_price > 0: - market_data["unrealized_pnl_pct"] = ( - (current_price - entry_price) / entry_price * 100 - ) + market_data["unrealized_pnl_pct"] = (current_price - entry_price) / entry_price * 100 entry_ts = open_pos.get("timestamp") if entry_ts: try: @@ -1745,16 +1734,19 @@ async def trading_cycle( stock_playbook=stock_playbook, settings=settings, ) - if open_position and decision.action == "HOLD" and _should_force_exit_for_overnight( + if ( + open_position + and decision.action == "HOLD" + and _should_force_exit_for_overnight( market=market, settings=settings, + ) ): decision = TradeDecision( action="SELL", confidence=max(decision.confidence, 85), rationale=( - "Forced exit by overnight policy" - " (session close window / kill switch priority)" + "Forced exit by overnight policy (session close window / kill switch priority)" ), ) logger.info( @@ -1834,9 +1826,7 @@ async def trading_cycle( return broker_held_qty = ( - _extract_held_qty_from_balance( - balance_data, stock_code, is_domestic=market.is_domestic - ) + _extract_held_qty_from_balance(balance_data, stock_code, is_domestic=market.is_domestic) if decision.action == "SELL" else 0 ) @@ -1871,7 +1861,10 @@ async def trading_cycle( ) if fx_blocked: logger.warning( - "Skip BUY %s (%s): FX buffer guard (remaining=%.2f, required=%.2f, cash=%.2f, order=%.2f)", + ( + "Skip BUY %s (%s): FX buffer guard " + "(remaining=%.2f, required=%.2f, cash=%.2f, order=%.2f)" + ), stock_code, market.name, remaining_cash, @@ -2068,8 +2061,7 @@ async def trading_cycle( action="SELL", confidence=0, rationale=( - "[ghost-close] Broker reported no balance;" - " position closed without fill" + "[ghost-close] Broker reported no balance; position closed without fill" ), quantity=0, price=0.0, @@ -2275,17 +2267,13 @@ async def handle_domestic_pending_orders( outcome="cancelled", ) except Exception as notify_exc: - logger.warning( - "notify_unfilled_order failed: %s", notify_exc - ) + logger.warning("notify_unfilled_order failed: %s", notify_exc) else: # First unfilled SELL → resubmit at last * 0.996 (-0.4%). try: last_price, _, _ = await broker.get_current_price(stock_code) if last_price <= 0: - raise ValueError( - f"Invalid price ({last_price}) for {stock_code}" - ) + raise ValueError(f"Invalid price ({last_price}) for {stock_code}") new_price = kr_round_down(last_price * 0.996) validate_order_policy( market=MARKETS["KR"], @@ -2298,9 +2286,7 @@ async def handle_domestic_pending_orders( quantity=psbl_qty, price=new_price, ) - sell_resubmit_counts[key] = ( - sell_resubmit_counts.get(key, 0) + 1 - ) + sell_resubmit_counts[key] = sell_resubmit_counts.get(key, 0) + 1 try: await telegram.notify_unfilled_order( stock_code=stock_code, @@ -2311,9 +2297,7 @@ async def handle_domestic_pending_orders( new_price=float(new_price), ) except Exception as notify_exc: - logger.warning( - "notify_unfilled_order failed: %s", notify_exc - ) + logger.warning("notify_unfilled_order failed: %s", notify_exc) except Exception as exc: logger.error( "SELL resubmit failed for KR %s: %s", @@ -2381,9 +2365,7 @@ async def handle_overseas_pending_orders( try: orders = await overseas_broker.get_overseas_pending_orders(exchange_code) except Exception as exc: - logger.warning( - "Failed to fetch pending orders for %s: %s", exchange_code, exc - ) + logger.warning("Failed to fetch pending orders for %s: %s", exchange_code, exc) continue for order in orders: @@ -2448,26 +2430,21 @@ async def handle_overseas_pending_orders( outcome="cancelled", ) except Exception as notify_exc: - logger.warning( - "notify_unfilled_order failed: %s", notify_exc - ) + logger.warning("notify_unfilled_order failed: %s", notify_exc) else: # First unfilled SELL → resubmit at last * 0.996 (-0.4%). try: price_data = await overseas_broker.get_overseas_price( order_exchange, stock_code ) - last_price = float( - price_data.get("output", {}).get("last", "0") or "0" - ) + last_price = float(price_data.get("output", {}).get("last", "0") or "0") if last_price <= 0: - raise ValueError( - f"Invalid price ({last_price}) for {stock_code}" - ) + raise ValueError(f"Invalid price ({last_price}) for {stock_code}") new_price = round(last_price * 0.996, 4) market_info = next( ( - m for m in MARKETS.values() + m + for m in MARKETS.values() if m.exchange_code == order_exchange and not m.is_domestic ), None, @@ -2485,9 +2462,7 @@ async def handle_overseas_pending_orders( quantity=nccs_qty, price=new_price, ) - sell_resubmit_counts[key] = ( - sell_resubmit_counts.get(key, 0) + 1 - ) + sell_resubmit_counts[key] = sell_resubmit_counts.get(key, 0) + 1 try: await telegram.notify_unfilled_order( stock_code=stock_code, @@ -2498,9 +2473,7 @@ async def handle_overseas_pending_orders( new_price=new_price, ) except Exception as notify_exc: - logger.warning( - "notify_unfilled_order failed: %s", notify_exc - ) + logger.warning("notify_unfilled_order failed: %s", notify_exc) except Exception as exc: logger.error( "SELL resubmit failed for %s %s: %s", @@ -2659,13 +2632,16 @@ async def run_daily_session( logger.warning("Playbook notification failed: %s", exc) logger.info( "Generated playbook for %s: %d stocks, %d scenarios", - market.code, playbook.stock_count, playbook.scenario_count, + market.code, + playbook.stock_count, + playbook.scenario_count, ) except Exception as exc: logger.error("Playbook generation failed for %s: %s", market.code, exc) try: await telegram.notify_playbook_failed( - market=market.code, reason=str(exc)[:200], + market=market.code, + reason=str(exc)[:200], ) except Exception as notify_exc: logger.warning("Playbook failed notification error: %s", notify_exc) @@ -2676,12 +2652,10 @@ async def run_daily_session( for stock_code in watchlist: try: if market.is_domestic: - current_price, price_change_pct, foreigner_net = ( - await _retry_connection( - broker.get_current_price, - stock_code, - label=stock_code, - ) + current_price, price_change_pct, foreigner_net = await _retry_connection( + broker.get_current_price, + stock_code, + label=stock_code, ) else: price_data = await _retry_connection( @@ -2690,9 +2664,7 @@ async def run_daily_session( stock_code, label=f"{stock_code}@{market.exchange_code}", ) - current_price = safe_float( - price_data.get("output", {}).get("last", "0") - ) + current_price = safe_float(price_data.get("output", {}).get("last", "0")) # Fallback: if price API returns 0, use scanner candidate price if current_price <= 0: cand_lookup = candidate_map.get(stock_code) @@ -2704,9 +2676,7 @@ async def run_daily_session( ) current_price = cand_lookup.price foreigner_net = 0.0 - price_change_pct = safe_float( - price_data.get("output", {}).get("rate", "0") - ) + price_change_pct = safe_float(price_data.get("output", {}).get("rate", "0")) # Fall back to scanner candidate price if API returns 0. if current_price <= 0: cand_lookup = candidate_map.get(stock_code) @@ -2769,15 +2739,9 @@ async def run_daily_session( if market.is_domestic: output2 = balance_data.get("output2", [{}]) - total_eval = safe_float( - output2[0].get("tot_evlu_amt", "0") - ) if output2 else 0 - total_cash = safe_float( - output2[0].get("dnca_tot_amt", "0") - ) if output2 else 0 - purchase_total = safe_float( - output2[0].get("pchs_amt_smtl_amt", "0") - ) if output2 else 0 + total_eval = safe_float(output2[0].get("tot_evlu_amt", "0")) if output2 else 0 + total_cash = safe_float(output2[0].get("dnca_tot_amt", "0")) if output2 else 0 + purchase_total = safe_float(output2[0].get("pchs_amt_smtl_amt", "0")) if output2 else 0 else: output2 = balance_data.get("output2", [{}]) if isinstance(output2, list) and output2: @@ -2788,18 +2752,15 @@ async def run_daily_session( balance_info = {} total_eval = safe_float(balance_info.get("frcr_evlu_tota", "0") or "0") - purchase_total = safe_float( - balance_info.get("frcr_buy_amt_smtl", "0") or "0" - ) + purchase_total = safe_float(balance_info.get("frcr_buy_amt_smtl", "0") or "0") # Fetch available foreign currency cash via inquire-psamount (TTTS3007R/VTTS3007R). - # TTTS3012R output2 does not include a cash/deposit field — frcr_dncl_amt_2 does not exist. + # TTTS3012R output2 does not include a cash/deposit field. + # frcr_dncl_amt_2 does not exist. # Use the first stock with a valid price as the reference for the buying power query. # Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 매수가능금액조회' 시트 total_cash = 0.0 - ref_stock = next( - (s for s in stocks_data if s.get("current_price", 0) > 0), None - ) + ref_stock = next((s for s in stocks_data if s.get("current_price", 0) > 0), None) if ref_stock: try: ps_data = await overseas_broker.get_overseas_buying_power( @@ -2819,11 +2780,7 @@ async def run_daily_session( # Paper mode fallback: VTS overseas balance API often fails for many accounts. # Only activate in paper mode — live mode must use real balance from KIS. - if ( - total_cash <= 0 - and settings.MODE == "paper" - and settings.PAPER_OVERSEAS_CASH > 0 - ): + if total_cash <= 0 and settings.MODE == "paper" and settings.PAPER_OVERSEAS_CASH > 0: total_cash = settings.PAPER_OVERSEAS_CASH # Capture the day's opening portfolio value on the first market processed @@ -2856,13 +2813,17 @@ async def run_daily_session( # Evaluate scenarios for each stock (local, no API calls) logger.info( "Evaluating %d stocks against playbook for %s", - len(stocks_data), market.name, + len(stocks_data), + market.name, ) for stock_data in stocks_data: stock_code = stock_data["stock_code"] stock_playbook = playbook.get_stock_playbook(stock_code) match = scenario_engine.evaluate( - playbook, stock_code, stock_data, portfolio_data, + playbook, + stock_code, + stock_data, + portfolio_data, ) decision = TradeDecision( action=match.action.value, @@ -2969,9 +2930,13 @@ async def run_daily_session( stock_playbook=stock_playbook, settings=settings, ) - if daily_open and decision.action == "HOLD" and _should_force_exit_for_overnight( - market=market, - settings=settings, + if ( + daily_open + and decision.action == "HOLD" + and _should_force_exit_for_overnight( + market=market, + settings=settings, + ) ): decision = TradeDecision( action="SELL", @@ -3063,16 +3028,21 @@ async def run_daily_session( ) continue order_amount = stock_data["current_price"] * quantity - fx_blocked, remaining_cash, required_buffer = _should_block_overseas_buy_for_fx_buffer( - market=market, - action=decision.action, - total_cash=total_cash, - order_amount=order_amount, - settings=settings, + fx_blocked, remaining_cash, required_buffer = ( + _should_block_overseas_buy_for_fx_buffer( + market=market, + action=decision.action, + total_cash=total_cash, + order_amount=order_amount, + settings=settings, + ) ) if fx_blocked: logger.warning( - "Skip BUY %s (%s): FX buffer guard (remaining=%.2f, required=%.2f, cash=%.2f, order=%.2f)", + ( + "Skip BUY %s (%s): FX buffer guard " + "(remaining=%.2f, required=%.2f, cash=%.2f, order=%.2f)" + ), stock_code, market.name, remaining_cash, @@ -3090,7 +3060,10 @@ async def run_daily_session( if now < daily_cooldown_until: remaining = int(daily_cooldown_until - now) logger.info( - "Skip BUY %s (%s): insufficient-balance cooldown active (%ds remaining)", + ( + "Skip BUY %s (%s): insufficient-balance cooldown active " + "(%ds remaining)" + ), stock_code, market.name, remaining, @@ -3149,13 +3122,9 @@ async def run_daily_session( # Use limit orders (지정가) for domestic stocks. # KRX tick rounding applied via kr_round_down. if decision.action == "BUY": - order_price = kr_round_down( - stock_data["current_price"] * 1.002 - ) + order_price = kr_round_down(stock_data["current_price"] * 1.002) else: - order_price = kr_round_down( - stock_data["current_price"] * 0.998 - ) + order_price = kr_round_down(stock_data["current_price"] * 0.998) try: validate_order_policy( market=market, @@ -3260,9 +3229,7 @@ async def run_daily_session( except Exception as exc: logger.warning("Telegram notification failed: %s", exc) except Exception as exc: - logger.error( - "Order execution failed for %s: %s", stock_code, exc - ) + logger.error("Order execution failed for %s: %s", stock_code, exc) continue if decision.action == "SELL" and order_succeeded: @@ -3286,7 +3253,9 @@ async def run_daily_session( accuracy=1 if trade_pnl > 0 else 0, ) if trade_pnl < 0: - cooldown_key = _stoploss_cooldown_key(market=market, stock_code=stock_code) + cooldown_key = _stoploss_cooldown_key( + market=market, stock_code=stock_code + ) cooldown_minutes = _stoploss_cooldown_minutes( settings, market=market, @@ -3369,7 +3338,8 @@ async def _handle_market_close( def _run_context_scheduler( - scheduler: ContextScheduler, now: datetime | None = None, + scheduler: ContextScheduler, + now: datetime | None = None, ) -> None: """Run periodic context scheduler tasks and log when anything executes.""" result = scheduler.run_if_due(now=now) @@ -3438,6 +3408,7 @@ def _start_dashboard_server(settings: Settings) -> threading.Thread | None: # reported synchronously (avoids the misleading "started" → "failed" log pair). try: import uvicorn # noqa: F401 + from src.dashboard import create_dashboard_app # noqa: F401 except ImportError as exc: logger.warning("Dashboard server unavailable (missing dependency): %s", exc) @@ -3446,6 +3417,7 @@ def _start_dashboard_server(settings: Settings) -> threading.Thread | None: def _serve() -> None: try: import uvicorn + from src.dashboard import create_dashboard_app app = create_dashboard_app(settings.DB_PATH, mode=settings.MODE) @@ -3586,8 +3558,7 @@ async def run(settings: Settings) -> None: pause_trading.set() logger.info("Trading resumed via Telegram command") await telegram.send_message( - "▶️ Trading Resumed\n\n" - "Trading operations have been restarted." + "▶️ Trading Resumed\n\nTrading operations have been restarted." ) async def handle_status() -> None: @@ -3630,9 +3601,7 @@ async def run(settings: Settings) -> None: except Exception as exc: logger.error("Error in /status handler: %s", exc) - await telegram.send_message( - "⚠️ Error\n\nFailed to retrieve trading status." - ) + await telegram.send_message("⚠️ Error\n\nFailed to retrieve trading status.") async def handle_positions() -> None: """Handle /positions command - show account summary.""" @@ -3643,8 +3612,7 @@ async def run(settings: Settings) -> None: if not output2: await telegram.send_message( - "💼 Account Summary\n\n" - "No balance information available." + "💼 Account Summary\n\nNo balance information available." ) return @@ -3673,9 +3641,7 @@ async def run(settings: Settings) -> None: except Exception as exc: logger.error("Error in /positions handler: %s", exc) - await telegram.send_message( - "⚠️ Error\n\nFailed to retrieve positions." - ) + await telegram.send_message("⚠️ Error\n\nFailed to retrieve positions.") async def handle_report() -> None: """Handle /report command - show daily summary metrics.""" @@ -3719,9 +3685,7 @@ async def run(settings: Settings) -> None: ) except Exception as exc: logger.error("Error in /report handler: %s", exc) - await telegram.send_message( - "⚠️ Error\n\nFailed to generate daily report." - ) + await telegram.send_message("⚠️ Error\n\nFailed to generate daily report.") async def handle_scenarios() -> None: """Handle /scenarios command - show today's playbook scenarios.""" @@ -3770,9 +3734,7 @@ async def run(settings: Settings) -> None: await telegram.send_message("\n".join(lines).strip()) except Exception as exc: logger.error("Error in /scenarios handler: %s", exc) - await telegram.send_message( - "⚠️ Error\n\nFailed to retrieve scenarios." - ) + await telegram.send_message("⚠️ Error\n\nFailed to retrieve scenarios.") async def handle_review() -> None: """Handle /review command - show recent scorecards.""" @@ -3788,9 +3750,7 @@ async def run(settings: Settings) -> None: ).fetchall() if not rows: - await telegram.send_message( - "📝 Recent Reviews\n\nNo scorecards available." - ) + await telegram.send_message("📝 Recent Reviews\n\nNo scorecards available.") return lines = ["📝 Recent Reviews", ""] @@ -3808,9 +3768,7 @@ async def run(settings: Settings) -> None: await telegram.send_message("\n".join(lines)) except Exception as exc: logger.error("Error in /review handler: %s", exc) - await telegram.send_message( - "⚠️ Error\n\nFailed to retrieve reviews." - ) + await telegram.send_message("⚠️ Error\n\nFailed to retrieve reviews.") async def handle_notify(args: list[str]) -> None: """Handle /notify [key] [on|off] — query or change notification filters.""" @@ -3845,8 +3803,7 @@ async def run(settings: Settings) -> None: else: valid = ", ".join(list(status.keys()) + ["all"]) await telegram.send_message( - f"❌ 알 수 없는 키: {key}\n" - f"유효한 키: {valid}" + f"❌ 알 수 없는 키: {key}\n유효한 키: {valid}" ) return @@ -3858,30 +3815,22 @@ async def run(settings: Settings) -> None: value = toggle == "on" if telegram.set_notification(key, value): icon = "✅" if value else "❌" - label = f"전체 알림" if key == "all" else f"{key} 알림" + label = "전체 알림" if key == "all" else f"{key} 알림" state = "켜짐" if value else "꺼짐" await telegram.send_message(f"{icon} {label} → {state}") logger.info("Notification filter changed via Telegram: %s=%s", key, value) else: valid = ", ".join(list(telegram.filter_status().keys()) + ["all"]) - await telegram.send_message( - f"❌ 알 수 없는 키: {key}\n" - f"유효한 키: {valid}" - ) + await telegram.send_message(f"❌ 알 수 없는 키: {key}\n유효한 키: {valid}") async def handle_dashboard() -> None: """Handle /dashboard command - show dashboard URL if enabled.""" if not settings.DASHBOARD_ENABLED: - await telegram.send_message( - "🖥️ Dashboard\n\nDashboard is not enabled." - ) + await telegram.send_message("🖥️ Dashboard\n\nDashboard is not enabled.") return url = f"http://{settings.DASHBOARD_HOST}:{settings.DASHBOARD_PORT}" - await telegram.send_message( - "🖥️ Dashboard\n\n" - f"URL: {url}" - ) + await telegram.send_message(f"🖥️ Dashboard\n\nURL: {url}") command_handler.register_command("help", handle_help) command_handler.register_command("stop", handle_stop) @@ -4182,9 +4131,7 @@ async def run(settings: Settings) -> None: ) # Store candidates per market for selection context logging - scan_candidates[market.code] = { - c.stock_code: c for c in candidates - } + scan_candidates[market.code] = {c.stock_code: c for c in candidates} logger.info( "Smart Scanner: Found %d candidates for %s: %s", @@ -4194,9 +4141,7 @@ async def run(settings: Settings) -> None: ) # Get market-local date for playbook keying - market_today = datetime.now( - market.timezone - ).date() + market_today = datetime.now(market.timezone).date() # Load or generate playbook (1 Gemini call per market per day) if market.code not in playbooks: @@ -4234,7 +4179,8 @@ async def run(settings: Settings) -> None: except Exception as exc: logger.error( "Playbook generation failed for %s: %s", - market.code, exc, + market.code, + exc, ) try: await telegram.notify_playbook_failed( @@ -4279,7 +4225,8 @@ async def run(settings: Settings) -> None: except Exception as exc: logger.warning( "Failed to fetch holdings for %s: %s — skipping holdings merge", - market.name, exc, + market.name, + exc, ) held_codes = [] @@ -4288,7 +4235,8 @@ async def run(settings: Settings) -> None: if extra_held: logger.info( "Holdings added to loop for %s (not in scanner): %s", - market.name, extra_held, + market.name, + extra_held, ) if not stock_codes: diff --git a/src/markets/schedule.py b/src/markets/schedule.py index 9d142d9..a87408e 100644 --- a/src/markets/schedule.py +++ b/src/markets/schedule.py @@ -211,9 +211,7 @@ def get_open_markets( return is_market_open(market, now) open_markets = [ - MARKETS[code] - for code in enabled_markets - if code in MARKETS and is_available(MARKETS[code]) + MARKETS[code] for code in enabled_markets if code in MARKETS and is_available(MARKETS[code]) ] return sorted(open_markets, key=lambda m: m.code) @@ -282,9 +280,7 @@ def get_next_market_open( # Calculate next open time for this market for days_ahead in range(7): # Check next 7 days check_date = market_now.date() + timedelta(days=days_ahead) - check_datetime = datetime.combine( - check_date, market.open_time, tzinfo=market.timezone - ) + check_datetime = datetime.combine(check_date, market.open_time, tzinfo=market.timezone) # Skip weekends if check_datetime.weekday() >= 5: diff --git a/src/notifications/telegram_client.py b/src/notifications/telegram_client.py index 0030645..381c5dd 100644 --- a/src/notifications/telegram_client.py +++ b/src/notifications/telegram_client.py @@ -4,7 +4,7 @@ import asyncio import logging import time from collections.abc import Awaitable, Callable -from dataclasses import dataclass, fields +from dataclasses import dataclass from enum import Enum from typing import ClassVar @@ -136,14 +136,14 @@ class TelegramClient: self._enabled = enabled self._rate_limiter = LeakyBucket(rate=rate_limit) self._session: aiohttp.ClientSession | None = None - self._filter = notification_filter if notification_filter is not None else NotificationFilter() + self._filter = ( + notification_filter if notification_filter is not None else NotificationFilter() + ) if not enabled: logger.info("Telegram notifications disabled via configuration") elif bot_token is None or chat_id is None: - logger.warning( - "Telegram notifications disabled (missing bot_token or chat_id)" - ) + logger.warning("Telegram notifications disabled (missing bot_token or chat_id)") self._enabled = False else: logger.info("Telegram notifications enabled for chat_id=%s", chat_id) @@ -209,14 +209,12 @@ class TelegramClient: async with session.post(url, json=payload) as resp: if resp.status != 200: error_text = await resp.text() - logger.error( - "Telegram API error (status=%d): %s", resp.status, error_text - ) + logger.error("Telegram API error (status=%d): %s", resp.status, error_text) return False logger.debug("Telegram message sent: %s", text[:50]) return True - except asyncio.TimeoutError: + except TimeoutError: logger.error("Telegram message timeout") return False except aiohttp.ClientError as exc: @@ -305,9 +303,7 @@ class TelegramClient: NotificationMessage(priority=NotificationPriority.LOW, message=message) ) - async def notify_circuit_breaker( - self, pnl_pct: float, threshold: float - ) -> None: + async def notify_circuit_breaker(self, pnl_pct: float, threshold: float) -> None: """ Notify circuit breaker activation. @@ -354,9 +350,7 @@ class TelegramClient: NotificationMessage(priority=NotificationPriority.HIGH, message=message) ) - async def notify_system_start( - self, mode: str, enabled_markets: list[str] - ) -> None: + async def notify_system_start(self, mode: str, enabled_markets: list[str]) -> None: """ Notify system startup. @@ -369,9 +363,7 @@ class TelegramClient: mode_emoji = "📝" if mode == "paper" else "💰" markets_str = ", ".join(enabled_markets) message = ( - f"{mode_emoji} System Started\n" - f"Mode: {mode.upper()}\n" - f"Markets: {markets_str}" + f"{mode_emoji} System Started\nMode: {mode.upper()}\nMarkets: {markets_str}" ) await self._send_notification( NotificationMessage(priority=NotificationPriority.MEDIUM, message=message) @@ -445,11 +437,7 @@ class TelegramClient: """ if not self._filter.playbook: return - message = ( - f"Playbook Failed\n" - f"Market: {market}\n" - f"Reason: {reason[:200]}" - ) + message = f"Playbook Failed\nMarket: {market}\nReason: {reason[:200]}" await self._send_notification( NotificationMessage(priority=NotificationPriority.HIGH, message=message) ) @@ -469,9 +457,7 @@ class TelegramClient: if "circuit breaker" in reason.lower() else NotificationPriority.MEDIUM ) - await self._send_notification( - NotificationMessage(priority=priority, message=message) - ) + await self._send_notification(NotificationMessage(priority=priority, message=message)) async def notify_unfilled_order( self, @@ -496,11 +482,7 @@ class TelegramClient: return # SELL resubmit is high priority — position liquidation at risk. # BUY cancel is medium priority — only cash is freed. - priority = ( - NotificationPriority.HIGH - if action == "SELL" - else NotificationPriority.MEDIUM - ) + priority = NotificationPriority.HIGH if action == "SELL" else NotificationPriority.MEDIUM outcome_emoji = "🔄" if outcome == "resubmitted" else "❌" outcome_label = "재주문" if outcome == "resubmitted" else "취소됨" action_emoji = "🔴" if action == "SELL" else "🟢" @@ -515,9 +497,7 @@ class TelegramClient: message = "\n".join(lines) await self._send_notification(NotificationMessage(priority=priority, message=message)) - async def notify_error( - self, error_type: str, error_msg: str, context: str - ) -> None: + async def notify_error(self, error_type: str, error_msg: str, context: str) -> None: """ Notify system error. @@ -541,9 +521,7 @@ class TelegramClient: class TelegramCommandHandler: """Handles incoming Telegram commands via long polling.""" - def __init__( - self, client: TelegramClient, polling_interval: float = 1.0 - ) -> None: + def __init__(self, client: TelegramClient, polling_interval: float = 1.0) -> None: """ Initialize command handler. @@ -559,9 +537,7 @@ class TelegramCommandHandler: self._polling_task: asyncio.Task[None] | None = None self._running = False - def register_command( - self, command: str, handler: Callable[[], Awaitable[None]] - ) -> None: + def register_command(self, command: str, handler: Callable[[], Awaitable[None]]) -> None: """ Register a command handler (no arguments). @@ -672,7 +648,7 @@ class TelegramCommandHandler: return updates - except asyncio.TimeoutError: + except TimeoutError: logger.debug("getUpdates timeout (normal)") return [] except aiohttp.ClientError as exc: @@ -697,9 +673,7 @@ class TelegramCommandHandler: # Verify chat_id matches configured chat chat_id = str(message.get("chat", {}).get("id", "")) if chat_id != self._client._chat_id: - logger.warning( - "Ignoring command from unauthorized chat_id: %s", chat_id - ) + logger.warning("Ignoring command from unauthorized chat_id: %s", chat_id) return # Extract command text diff --git a/src/strategy/models.py b/src/strategy/models.py index f7090f7..68375da 100644 --- a/src/strategy/models.py +++ b/src/strategy/models.py @@ -8,12 +8,12 @@ Defines the data contracts for the proactive strategy system: from __future__ import annotations from datetime import UTC, date, datetime -from enum import Enum +from enum import StrEnum from pydantic import BaseModel, Field, field_validator -class ScenarioAction(str, Enum): +class ScenarioAction(StrEnum): """Actions that can be taken by scenarios.""" BUY = "BUY" @@ -22,7 +22,7 @@ class ScenarioAction(str, Enum): REDUCE_ALL = "REDUCE_ALL" -class MarketOutlook(str, Enum): +class MarketOutlook(StrEnum): """AI's assessment of market direction.""" BULLISH = "bullish" @@ -32,7 +32,7 @@ class MarketOutlook(str, Enum): BEARISH = "bearish" -class PlaybookStatus(str, Enum): +class PlaybookStatus(StrEnum): """Lifecycle status of a playbook.""" PENDING = "pending" diff --git a/src/strategy/playbook_store.py b/src/strategy/playbook_store.py index 4b47356..95f2a2f 100644 --- a/src/strategy/playbook_store.py +++ b/src/strategy/playbook_store.py @@ -6,7 +6,6 @@ Designed for the pre-market strategy system (one playbook per market per day). from __future__ import annotations -import json import logging import sqlite3 from datetime import date @@ -53,8 +52,10 @@ class PlaybookStore: row_id = cursor.lastrowid or 0 logger.info( "Saved playbook for %s/%s (%d stocks, %d scenarios)", - playbook.date, playbook.market, - playbook.stock_count, playbook.scenario_count, + playbook.date, + playbook.market, + playbook.stock_count, + playbook.scenario_count, ) return row_id diff --git a/src/strategy/position_state_machine.py b/src/strategy/position_state_machine.py index 6a9e3a6..79f993f 100644 --- a/src/strategy/position_state_machine.py +++ b/src/strategy/position_state_machine.py @@ -6,10 +6,10 @@ State progression is monotonic (promotion-only) except terminal EXITED. from __future__ import annotations from dataclasses import dataclass -from enum import Enum +from enum import StrEnum -class PositionState(str, Enum): +class PositionState(StrEnum): HOLDING = "HOLDING" BE_LOCK = "BE_LOCK" ARMED = "ARMED" @@ -40,12 +40,7 @@ def evaluate_exit_first(inp: StateTransitionInput) -> bool: EXITED must be evaluated before any promotion. """ - return ( - inp.hard_stop_hit - or inp.trailing_stop_hit - or inp.model_exit_signal - or inp.be_lock_threat - ) + return inp.hard_stop_hit or inp.trailing_stop_hit or inp.model_exit_signal or inp.be_lock_threat def promote_state(current: PositionState, inp: StateTransitionInput) -> PositionState: diff --git a/src/strategy/pre_market_planner.py b/src/strategy/pre_market_planner.py index 1f30b11..7370a16 100644 --- a/src/strategy/pre_market_planner.py +++ b/src/strategy/pre_market_planner.py @@ -124,12 +124,14 @@ class PreMarketPlanner: # 4. Parse response playbook = self._parse_response( - decision.rationale, today, market, candidates, cross_market, + decision.rationale, + today, + market, + candidates, + cross_market, current_holdings=current_holdings, ) - playbook_with_tokens = playbook.model_copy( - update={"token_count": decision.token_count} - ) + playbook_with_tokens = playbook.model_copy(update={"token_count": decision.token_count}) logger.info( "Generated playbook for %s: %d stocks, %d scenarios, %d tokens", market, @@ -146,7 +148,9 @@ class PreMarketPlanner: return self._empty_playbook(today, market) def build_cross_market_context( - self, target_market: str, today: date | None = None, + self, + target_market: str, + today: date | None = None, ) -> CrossMarketContext | None: """Build cross-market context from the other market's L6 data. @@ -192,7 +196,9 @@ class PreMarketPlanner: ) def build_self_market_scorecard( - self, market: str, today: date | None = None, + self, + market: str, + today: date | None = None, ) -> dict[str, Any] | None: """Build previous-day scorecard for the same market.""" if today is None: @@ -320,18 +326,18 @@ class PreMarketPlanner: f"{context_text}\n" f"## Instructions\n" f"Return a JSON object with this exact structure:\n" - f'{{\n' + f"{{\n" f' "market_outlook": "bullish|neutral_to_bullish|neutral' f'|neutral_to_bearish|bearish",\n' f' "global_rules": [\n' f' {{"condition": "portfolio_pnl_pct < -2.0",' f' "action": "REDUCE_ALL", "rationale": "..."}}\n' - f' ],\n' + f" ],\n" f' "stocks": [\n' - f' {{\n' + f" {{\n" f' "stock_code": "...",\n' f' "scenarios": [\n' - f' {{\n' + f" {{\n" f' "condition": {{"rsi_below": 30, "volume_ratio_above": 2.0,' f' "unrealized_pnl_pct_above": 3.0, "holding_days_above": 5}},\n' f' "action": "BUY|SELL|HOLD",\n' @@ -340,11 +346,11 @@ class PreMarketPlanner: f' "stop_loss_pct": -2.0,\n' f' "take_profit_pct": 3.0,\n' f' "rationale": "..."\n' - f' }}\n' - f' ]\n' - f' }}\n' - f' ]\n' - f'}}\n\n' + f" }}\n" + f" ]\n" + f" }}\n" + f" ]\n" + f"}}\n\n" f"Rules:\n" f"- Max {max_scenarios} scenarios per stock\n" f"- Candidates list is the primary source for BUY candidates\n" @@ -575,8 +581,7 @@ class PreMarketPlanner: stop_loss_pct=-3.0, take_profit_pct=5.0, rationale=( - f"Rule-based BUY: oversold signal, " - f"RSI={c.rsi:.0f} (fallback planner)" + f"Rule-based BUY: oversold signal, RSI={c.rsi:.0f} (fallback planner)" ), ) ) diff --git a/src/strategy/scenario_engine.py b/src/strategy/scenario_engine.py index f1cd530..bf8f217 100644 --- a/src/strategy/scenario_engine.py +++ b/src/strategy/scenario_engine.py @@ -107,7 +107,9 @@ class ScenarioEngine: # 2. Find stock playbook stock_pb = playbook.get_stock_playbook(stock_code) if stock_pb is None: - logger.debug("No playbook for %s — defaulting to %s", stock_code, playbook.default_action) + logger.debug( + "No playbook for %s — defaulting to %s", stock_code, playbook.default_action + ) return ScenarioMatch( stock_code=stock_code, matched_scenario=None, @@ -135,7 +137,9 @@ class ScenarioEngine: ) # 4. No match — default action - logger.debug("No scenario matched for %s — defaulting to %s", stock_code, playbook.default_action) + logger.debug( + "No scenario matched for %s — defaulting to %s", stock_code, playbook.default_action + ) return ScenarioMatch( stock_code=stock_code, matched_scenario=None, @@ -198,17 +202,27 @@ class ScenarioEngine: checks.append(price is not None and price < condition.price_below) price_change_pct = self._safe_float(market_data.get("price_change_pct")) - if condition.price_change_pct_above is not None or condition.price_change_pct_below is not None: + if ( + condition.price_change_pct_above is not None + or condition.price_change_pct_below is not None + ): if "price_change_pct" not in market_data: self._warn_missing_key("price_change_pct") if condition.price_change_pct_above is not None: - checks.append(price_change_pct is not None and price_change_pct > condition.price_change_pct_above) + checks.append( + price_change_pct is not None and price_change_pct > condition.price_change_pct_above + ) if condition.price_change_pct_below is not None: - checks.append(price_change_pct is not None and price_change_pct < condition.price_change_pct_below) + checks.append( + price_change_pct is not None and price_change_pct < condition.price_change_pct_below + ) # Position-aware conditions unrealized_pnl_pct = self._safe_float(market_data.get("unrealized_pnl_pct")) - if condition.unrealized_pnl_pct_above is not None or condition.unrealized_pnl_pct_below is not None: + if ( + condition.unrealized_pnl_pct_above is not None + or condition.unrealized_pnl_pct_below is not None + ): if "unrealized_pnl_pct" not in market_data: self._warn_missing_key("unrealized_pnl_pct") if condition.unrealized_pnl_pct_above is not None: @@ -227,15 +241,9 @@ class ScenarioEngine: if "holding_days" not in market_data: self._warn_missing_key("holding_days") if condition.holding_days_above is not None: - checks.append( - holding_days is not None - and holding_days > condition.holding_days_above - ) + checks.append(holding_days is not None and holding_days > condition.holding_days_above) if condition.holding_days_below is not None: - checks.append( - holding_days is not None - and holding_days < condition.holding_days_below - ) + checks.append(holding_days is not None and holding_days < condition.holding_days_below) return len(checks) > 0 and all(checks) @@ -295,9 +303,15 @@ class ScenarioEngine: details["volume_ratio"] = self._safe_float(market_data.get("volume_ratio")) if condition.price_above is not None or condition.price_below is not None: details["current_price"] = self._safe_float(market_data.get("current_price")) - if condition.price_change_pct_above is not None or condition.price_change_pct_below is not None: + if ( + condition.price_change_pct_above is not None + or condition.price_change_pct_below is not None + ): details["price_change_pct"] = self._safe_float(market_data.get("price_change_pct")) - if condition.unrealized_pnl_pct_above is not None or condition.unrealized_pnl_pct_below is not None: + if ( + condition.unrealized_pnl_pct_above is not None + or condition.unrealized_pnl_pct_below is not None + ): details["unrealized_pnl_pct"] = self._safe_float(market_data.get("unrealized_pnl_pct")) if condition.holding_days_above is not None or condition.holding_days_below is not None: details["holding_days"] = self._safe_float(market_data.get("holding_days")) diff --git a/tests/test_backup.py b/tests/test_backup.py index 0ecfa3e..3e82e39 100644 --- a/tests/test_backup.py +++ b/tests/test_backup.py @@ -4,8 +4,7 @@ from __future__ import annotations import sqlite3 import sys -import tempfile -from datetime import UTC, datetime, timedelta +from datetime import UTC, datetime from pathlib import Path from unittest.mock import MagicMock, patch @@ -48,7 +47,9 @@ def temp_db(tmp_path: Path) -> Path: cursor.executemany( """ - INSERT INTO trades (timestamp, stock_code, action, quantity, price, confidence, rationale, pnl) + INSERT INTO trades ( + timestamp, stock_code, action, quantity, price, confidence, rationale, pnl + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?) """, test_trades, @@ -73,9 +74,7 @@ class TestBackupExporter: exporter = BackupExporter(str(temp_db)) output_dir = tmp_path / "exports" - results = exporter.export_all( - output_dir, formats=[ExportFormat.JSON], compress=False - ) + results = exporter.export_all(output_dir, formats=[ExportFormat.JSON], compress=False) assert ExportFormat.JSON in results assert results[ExportFormat.JSON].exists() @@ -86,9 +85,7 @@ class TestBackupExporter: exporter = BackupExporter(str(temp_db)) output_dir = tmp_path / "exports" - results = exporter.export_all( - output_dir, formats=[ExportFormat.JSON], compress=True - ) + results = exporter.export_all(output_dir, formats=[ExportFormat.JSON], compress=True) assert ExportFormat.JSON in results assert results[ExportFormat.JSON].suffix == ".gz" @@ -98,15 +95,13 @@ class TestBackupExporter: exporter = BackupExporter(str(temp_db)) output_dir = tmp_path / "exports" - results = exporter.export_all( - output_dir, formats=[ExportFormat.CSV], compress=False - ) + results = exporter.export_all(output_dir, formats=[ExportFormat.CSV], compress=False) assert ExportFormat.CSV in results assert results[ExportFormat.CSV].exists() # Verify CSV content - with open(results[ExportFormat.CSV], "r") as f: + with open(results[ExportFormat.CSV]) as f: lines = f.readlines() assert len(lines) == 4 # Header + 3 rows @@ -146,7 +141,7 @@ class TestBackupExporter: # Should only have 1 trade (AAPL on Jan 2) import json - with open(results[ExportFormat.JSON], "r") as f: + with open(results[ExportFormat.JSON]) as f: data = json.load(f) assert data["record_count"] == 1 assert data["trades"][0]["stock_code"] == "AAPL" @@ -407,9 +402,7 @@ class TestBackupExporterAdditional: assert ExportFormat.JSON in results assert ExportFormat.CSV in results - def test_export_all_logs_error_on_failure( - self, temp_db: Path, tmp_path: Path - ) -> None: + def test_export_all_logs_error_on_failure(self, temp_db: Path, tmp_path: Path) -> None: """export_all must log an error and continue when one format fails.""" exporter = BackupExporter(str(temp_db)) # Patch _export_format to raise on JSON, succeed on CSV @@ -430,9 +423,7 @@ class TestBackupExporterAdditional: assert ExportFormat.JSON not in results assert ExportFormat.CSV in results - def test_export_csv_empty_trades_no_compress( - self, empty_db: Path, tmp_path: Path - ) -> None: + def test_export_csv_empty_trades_no_compress(self, empty_db: Path, tmp_path: Path) -> None: """CSV export with no trades and compress=False must write header row only.""" exporter = BackupExporter(str(empty_db)) results = exporter.export_all( @@ -446,9 +437,7 @@ class TestBackupExporterAdditional: content = out.read_text() assert "timestamp" in content - def test_export_csv_empty_trades_compressed( - self, empty_db: Path, tmp_path: Path - ) -> None: + def test_export_csv_empty_trades_compressed(self, empty_db: Path, tmp_path: Path) -> None: """CSV export with no trades and compress=True must write gzipped header.""" import gzip @@ -465,9 +454,7 @@ class TestBackupExporterAdditional: content = f.read() assert "timestamp" in content - def test_export_csv_with_data_compressed( - self, temp_db: Path, tmp_path: Path - ) -> None: + def test_export_csv_with_data_compressed(self, temp_db: Path, tmp_path: Path) -> None: """CSV export with data and compress=True must write gzipped rows.""" import gzip @@ -492,6 +479,7 @@ class TestBackupExporterAdditional: with patch.dict(sys.modules, {"pyarrow": None, "pyarrow.parquet": None}): try: import pyarrow # noqa: F401 + pytest.skip("pyarrow is installed; cannot test ImportError path") except ImportError: pass @@ -557,9 +545,7 @@ class TestCloudStorage: importlib.reload(m) m.CloudStorage(s3_config) - def test_upload_file_success( - self, mock_boto3_module, s3_config, tmp_path: Path - ) -> None: + def test_upload_file_success(self, mock_boto3_module, s3_config, tmp_path: Path) -> None: """upload_file must call client.upload_file and return the object key.""" from src.backup.cloud_storage import CloudStorage @@ -572,9 +558,7 @@ class TestCloudStorage: assert key == "backups/backup.json.gz" storage.client.upload_file.assert_called_once() - def test_upload_file_default_key( - self, mock_boto3_module, s3_config, tmp_path: Path - ) -> None: + def test_upload_file_default_key(self, mock_boto3_module, s3_config, tmp_path: Path) -> None: """upload_file without object_key must use the filename as key.""" from src.backup.cloud_storage import CloudStorage @@ -586,9 +570,7 @@ class TestCloudStorage: assert key == "myfile.gz" - def test_upload_file_not_found( - self, mock_boto3_module, s3_config, tmp_path: Path - ) -> None: + def test_upload_file_not_found(self, mock_boto3_module, s3_config, tmp_path: Path) -> None: """upload_file must raise FileNotFoundError for missing files.""" from src.backup.cloud_storage import CloudStorage @@ -611,9 +593,7 @@ class TestCloudStorage: with pytest.raises(RuntimeError, match="network error"): storage.upload_file(test_file) - def test_download_file_success( - self, mock_boto3_module, s3_config, tmp_path: Path - ) -> None: + def test_download_file_success(self, mock_boto3_module, s3_config, tmp_path: Path) -> None: """download_file must call client.download_file and return local path.""" from src.backup.cloud_storage import CloudStorage @@ -637,11 +617,8 @@ class TestCloudStorage: with pytest.raises(RuntimeError, match="timeout"): storage.download_file("key", tmp_path / "dest.gz") - def test_list_files_returns_objects( - self, mock_boto3_module, s3_config - ) -> None: + def test_list_files_returns_objects(self, mock_boto3_module, s3_config) -> None: """list_files must return parsed file metadata from S3 response.""" - from datetime import timezone from src.backup.cloud_storage import CloudStorage @@ -651,7 +628,7 @@ class TestCloudStorage: { "Key": "backups/a.gz", "Size": 1024, - "LastModified": datetime(2026, 1, 1, tzinfo=timezone.utc), + "LastModified": datetime(2026, 1, 1, tzinfo=UTC), "ETag": '"abc123"', } ] @@ -662,9 +639,7 @@ class TestCloudStorage: assert files[0]["key"] == "backups/a.gz" assert files[0]["size_bytes"] == 1024 - def test_list_files_empty_bucket( - self, mock_boto3_module, s3_config - ) -> None: + def test_list_files_empty_bucket(self, mock_boto3_module, s3_config) -> None: """list_files must return empty list when bucket has no objects.""" from src.backup.cloud_storage import CloudStorage @@ -674,9 +649,7 @@ class TestCloudStorage: files = storage.list_files() assert files == [] - def test_list_files_propagates_error( - self, mock_boto3_module, s3_config - ) -> None: + def test_list_files_propagates_error(self, mock_boto3_module, s3_config) -> None: """list_files must re-raise exceptions from the boto3 client.""" from src.backup.cloud_storage import CloudStorage @@ -686,9 +659,7 @@ class TestCloudStorage: with pytest.raises(RuntimeError): storage.list_files() - def test_delete_file_success( - self, mock_boto3_module, s3_config - ) -> None: + def test_delete_file_success(self, mock_boto3_module, s3_config) -> None: """delete_file must call client.delete_object with the correct key.""" from src.backup.cloud_storage import CloudStorage @@ -698,9 +669,7 @@ class TestCloudStorage: Bucket="test-bucket", Key="backups/old.gz" ) - def test_delete_file_propagates_error( - self, mock_boto3_module, s3_config - ) -> None: + def test_delete_file_propagates_error(self, mock_boto3_module, s3_config) -> None: """delete_file must re-raise exceptions from the boto3 client.""" from src.backup.cloud_storage import CloudStorage @@ -710,11 +679,8 @@ class TestCloudStorage: with pytest.raises(RuntimeError): storage.delete_file("backups/old.gz") - def test_get_storage_stats_success( - self, mock_boto3_module, s3_config - ) -> None: + def test_get_storage_stats_success(self, mock_boto3_module, s3_config) -> None: """get_storage_stats must aggregate file sizes correctly.""" - from datetime import timezone from src.backup.cloud_storage import CloudStorage @@ -724,13 +690,13 @@ class TestCloudStorage: { "Key": "a.gz", "Size": 1024 * 1024, - "LastModified": datetime(2026, 1, 1, tzinfo=timezone.utc), + "LastModified": datetime(2026, 1, 1, tzinfo=UTC), "ETag": '"x"', }, { "Key": "b.gz", "Size": 1024 * 1024, - "LastModified": datetime(2026, 1, 2, tzinfo=timezone.utc), + "LastModified": datetime(2026, 1, 2, tzinfo=UTC), "ETag": '"y"', }, ] @@ -741,9 +707,7 @@ class TestCloudStorage: assert stats["total_size_bytes"] == 2 * 1024 * 1024 assert stats["total_size_mb"] == pytest.approx(2.0) - def test_get_storage_stats_on_error( - self, mock_boto3_module, s3_config - ) -> None: + def test_get_storage_stats_on_error(self, mock_boto3_module, s3_config) -> None: """get_storage_stats must return error dict without raising on failure.""" from src.backup.cloud_storage import CloudStorage @@ -754,9 +718,7 @@ class TestCloudStorage: assert "error" in stats assert stats["total_files"] == 0 - def test_verify_connection_success( - self, mock_boto3_module, s3_config - ) -> None: + def test_verify_connection_success(self, mock_boto3_module, s3_config) -> None: """verify_connection must return True when head_bucket succeeds.""" from src.backup.cloud_storage import CloudStorage @@ -764,9 +726,7 @@ class TestCloudStorage: result = storage.verify_connection() assert result is True - def test_verify_connection_failure( - self, mock_boto3_module, s3_config - ) -> None: + def test_verify_connection_failure(self, mock_boto3_module, s3_config) -> None: """verify_connection must return False when head_bucket raises.""" from src.backup.cloud_storage import CloudStorage @@ -776,9 +736,7 @@ class TestCloudStorage: result = storage.verify_connection() assert result is False - def test_enable_versioning( - self, mock_boto3_module, s3_config - ) -> None: + def test_enable_versioning(self, mock_boto3_module, s3_config) -> None: """enable_versioning must call put_bucket_versioning.""" from src.backup.cloud_storage import CloudStorage @@ -786,9 +744,7 @@ class TestCloudStorage: storage.enable_versioning() storage.client.put_bucket_versioning.assert_called_once() - def test_enable_versioning_propagates_error( - self, mock_boto3_module, s3_config - ) -> None: + def test_enable_versioning_propagates_error(self, mock_boto3_module, s3_config) -> None: """enable_versioning must re-raise exceptions from the boto3 client.""" from src.backup.cloud_storage import CloudStorage diff --git a/tests/test_brain.py b/tests/test_brain.py index c857720..9bf99d8 100644 --- a/tests/test_brain.py +++ b/tests/test_brain.py @@ -323,7 +323,8 @@ class TestPromptOverride: # Verify the custom prompt was sent, not a built prompt mock_generate.assert_called_once() actual_prompt = mock_generate.call_args[1].get( - "contents", mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None + "contents", + mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None, ) assert actual_prompt == custom_prompt # Raw response preserved in rationale without parse_response (#247) @@ -385,7 +386,8 @@ class TestPromptOverride: await client.decide(market_data) actual_prompt = mock_generate.call_args[1].get( - "contents", mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None + "contents", + mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None, ) # The custom prompt must be used, not the compressed prompt assert actual_prompt == custom_prompt @@ -411,7 +413,8 @@ class TestPromptOverride: await client.decide(market_data) actual_prompt = mock_generate.call_args[1].get( - "contents", mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None + "contents", + mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None, ) # Should contain stock code from build_prompt, not be a custom override assert "005930" in actual_prompt diff --git a/tests/test_broker.py b/tests/test_broker.py index 5213013..16ad45f 100644 --- a/tests/test_broker.py +++ b/tests/test_broker.py @@ -3,7 +3,7 @@ from __future__ import annotations import asyncio -from unittest.mock import AsyncMock, MagicMock, patch +from unittest.mock import AsyncMock, patch import pytest @@ -99,7 +99,10 @@ class TestTokenManagement: mock_resp_403 = AsyncMock() mock_resp_403.status = 403 mock_resp_403.text = AsyncMock( - return_value='{"error_code":"EGW00133","error_description":"접근토큰 발급 잠시 후 다시 시도하세요(1분당 1회)"}' + return_value=( + '{"error_code":"EGW00133","error_description":' + '"접근토큰 발급 잠시 후 다시 시도하세요(1분당 1회)"}' + ) ) mock_resp_403.__aenter__ = AsyncMock(return_value=mock_resp_403) mock_resp_403.__aexit__ = AsyncMock(return_value=False) @@ -232,9 +235,7 @@ class TestRateLimiter: mock_order_resp.__aenter__ = AsyncMock(return_value=mock_order_resp) mock_order_resp.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash_resp, mock_order_resp] - ): + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash_resp, mock_order_resp]): with patch.object( broker._rate_limiter, "acquire", new_callable=AsyncMock ) as mock_acquire: @@ -405,7 +406,7 @@ class TestFetchMarketRankings: # --------------------------------------------------------------------------- -from src.broker.kis_api import kr_tick_unit, kr_round_down # noqa: E402 +from src.broker.kis_api import kr_round_down, kr_tick_unit # noqa: E402 class TestKrTickUnit: @@ -435,13 +436,13 @@ class TestKrTickUnit: @pytest.mark.parametrize( "price, expected_rounded", [ - (188150, 188100), # 100원 단위, 50원 잔여 → 내림 - (188100, 188100), # 이미 정렬됨 - (75050, 75000), # 100원 단위, 50원 잔여 → 내림 - (49950, 49950), # 50원 단위 정렬됨 - (49960, 49950), # 50원 단위, 10원 잔여 → 내림 - (1999, 1999), # 1원 단위 → 그대로 - (5003, 5000), # 10원 단위, 3원 잔여 → 내림 + (188150, 188100), # 100원 단위, 50원 잔여 → 내림 + (188100, 188100), # 이미 정렬됨 + (75050, 75000), # 100원 단위, 50원 잔여 → 내림 + (49950, 49950), # 50원 단위 정렬됨 + (49960, 49950), # 50원 단위, 10원 잔여 → 내림 + (1999, 1999), # 1원 단위 → 그대로 + (5003, 5000), # 10원 단위, 3원 잔여 → 내림 ], ) def test_round_down_to_tick(self, price: int, expected_rounded: int) -> None: @@ -538,15 +539,13 @@ class TestSendOrderTickRounding: mock_order.__aenter__ = AsyncMock(return_value=mock_order) mock_order.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.send_order("005930", "BUY", 1, price=188150) order_call = mock_post.call_args_list[1] body = order_call[1].get("json", {}) assert body["ORD_UNPR"] == "188100" # rounded down - assert body["ORD_DVSN"] == "00" # 지정가 + assert body["ORD_DVSN"] == "00" # 지정가 @pytest.mark.asyncio async def test_limit_order_ord_dvsn_is_00(self, broker: KISBroker) -> None: @@ -563,9 +562,7 @@ class TestSendOrderTickRounding: mock_order.__aenter__ = AsyncMock(return_value=mock_order) mock_order.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.send_order("005930", "BUY", 1, price=50000) order_call = mock_post.call_args_list[1] @@ -587,9 +584,7 @@ class TestSendOrderTickRounding: mock_order.__aenter__ = AsyncMock(return_value=mock_order) mock_order.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.send_order("005930", "SELL", 1, price=0) order_call = mock_post.call_args_list[1] @@ -628,9 +623,7 @@ class TestTRIDBranchingDomestic: broker = self._make_broker(settings, "paper") mock_resp = AsyncMock() mock_resp.status = 200 - mock_resp.json = AsyncMock( - return_value={"output1": [], "output2": {}} - ) + mock_resp.json = AsyncMock(return_value={"output1": [], "output2": {}}) mock_resp.__aenter__ = AsyncMock(return_value=mock_resp) mock_resp.__aexit__ = AsyncMock(return_value=False) @@ -645,9 +638,7 @@ class TestTRIDBranchingDomestic: broker = self._make_broker(settings, "live") mock_resp = AsyncMock() mock_resp.status = 200 - mock_resp.json = AsyncMock( - return_value={"output1": [], "output2": {}} - ) + mock_resp.json = AsyncMock(return_value={"output1": [], "output2": {}}) mock_resp.__aenter__ = AsyncMock(return_value=mock_resp) mock_resp.__aexit__ = AsyncMock(return_value=False) @@ -672,9 +663,7 @@ class TestTRIDBranchingDomestic: mock_order.__aenter__ = AsyncMock(return_value=mock_order) mock_order.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.send_order("005930", "BUY", 1) order_headers = mock_post.call_args_list[1][1].get("headers", {}) @@ -695,9 +684,7 @@ class TestTRIDBranchingDomestic: mock_order.__aenter__ = AsyncMock(return_value=mock_order) mock_order.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.send_order("005930", "BUY", 1) order_headers = mock_post.call_args_list[1][1].get("headers", {}) @@ -718,9 +705,7 @@ class TestTRIDBranchingDomestic: mock_order.__aenter__ = AsyncMock(return_value=mock_order) mock_order.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.send_order("005930", "SELL", 1) order_headers = mock_post.call_args_list[1][1].get("headers", {}) @@ -741,9 +726,7 @@ class TestTRIDBranchingDomestic: mock_order.__aenter__ = AsyncMock(return_value=mock_order) mock_order.__aexit__ = AsyncMock(return_value=False) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.send_order("005930", "SELL", 1) order_headers = mock_post.call_args_list[1][1].get("headers", {}) @@ -788,9 +771,7 @@ class TestGetDomesticPendingOrders: mock_get.assert_not_called() @pytest.mark.asyncio - async def test_live_mode_calls_tttc0084r_with_correct_params( - self, settings - ) -> None: + async def test_live_mode_calls_tttc0084r_with_correct_params(self, settings) -> None: """Live mode must call TTTC0084R with INQR_DVSN_1/2 and paging params.""" broker = self._make_broker(settings, "live") pending = [{"odno": "001", "pdno": "005930", "psbl_qty": "10"}] @@ -872,9 +853,7 @@ class TestCancelDomesticOrder: broker = self._make_broker(settings, "live") mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"}) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 5) order_headers = mock_post.call_args_list[1][1].get("headers", {}) @@ -886,9 +865,7 @@ class TestCancelDomesticOrder: broker = self._make_broker(settings, "paper") mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"}) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 5) order_headers = mock_post.call_args_list[1][1].get("headers", {}) @@ -900,9 +877,7 @@ class TestCancelDomesticOrder: broker = self._make_broker(settings, "live") mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"}) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 5) body = mock_post.call_args_list[1][1].get("json", {}) @@ -916,9 +891,7 @@ class TestCancelDomesticOrder: broker = self._make_broker(settings, "live") mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"}) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.cancel_domestic_order("005930", "ORD123", "BRN456", 3) body = mock_post.call_args_list[1][1].get("json", {}) @@ -932,9 +905,7 @@ class TestCancelDomesticOrder: broker = self._make_broker(settings, "live") mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"}) - with patch( - "aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order] - ) as mock_post: + with patch("aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]) as mock_post: await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 2) order_headers = mock_post.call_args_list[1][1].get("headers", {}) diff --git a/tests/test_context.py b/tests/test_context.py index a1d1f29..3abc58d 100644 --- a/tests/test_context.py +++ b/tests/test_context.py @@ -77,9 +77,7 @@ class TestContextStore: # Latest by updated_at, which should be the last one set assert latest == "2026-02-02" - def test_delete_old_contexts( - self, store: ContextStore, db_conn: sqlite3.Connection - ) -> None: + def test_delete_old_contexts(self, store: ContextStore, db_conn: sqlite3.Connection) -> None: """Test deleting contexts older than a cutoff date.""" # Insert contexts with specific old timestamps # (bypassing set_context which uses current time) @@ -170,9 +168,7 @@ class TestContextAggregator: log_trade(db_conn, "035720", "HOLD", 75, "Wait", quantity=0, price=0, pnl=0) # Manually set timestamps to the target date - db_conn.execute( - f"UPDATE trades SET timestamp = '{date}T10:00:00+00:00'" - ) + db_conn.execute(f"UPDATE trades SET timestamp = '{date}T10:00:00+00:00'") db_conn.commit() # Aggregate @@ -194,18 +190,10 @@ class TestContextAggregator: week = "2026-W06" # Set daily contexts - aggregator.store.set_context( - ContextLayer.L6_DAILY, "2026-02-02", "total_pnl_KR", 100.0 - ) - aggregator.store.set_context( - ContextLayer.L6_DAILY, "2026-02-03", "total_pnl_KR", 200.0 - ) - aggregator.store.set_context( - ContextLayer.L6_DAILY, "2026-02-02", "avg_confidence_KR", 80.0 - ) - aggregator.store.set_context( - ContextLayer.L6_DAILY, "2026-02-03", "avg_confidence_KR", 85.0 - ) + aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-02", "total_pnl_KR", 100.0) + aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-03", "total_pnl_KR", 200.0) + aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-02", "avg_confidence_KR", 80.0) + aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-03", "avg_confidence_KR", 85.0) # Aggregate aggregator.aggregate_weekly_from_daily(week) @@ -223,15 +211,9 @@ class TestContextAggregator: month = "2026-02" # Set weekly contexts - aggregator.store.set_context( - ContextLayer.L5_WEEKLY, "2026-W05", "weekly_pnl_KR", 100.0 - ) - aggregator.store.set_context( - ContextLayer.L5_WEEKLY, "2026-W06", "weekly_pnl_KR", 200.0 - ) - aggregator.store.set_context( - ContextLayer.L5_WEEKLY, "2026-W07", "weekly_pnl_KR", 150.0 - ) + aggregator.store.set_context(ContextLayer.L5_WEEKLY, "2026-W05", "weekly_pnl_KR", 100.0) + aggregator.store.set_context(ContextLayer.L5_WEEKLY, "2026-W06", "weekly_pnl_KR", 200.0) + aggregator.store.set_context(ContextLayer.L5_WEEKLY, "2026-W07", "weekly_pnl_KR", 150.0) # Aggregate aggregator.aggregate_monthly_from_weekly(month) @@ -316,6 +298,7 @@ class TestContextAggregator: store = aggregator.store assert store.get_context(ContextLayer.L6_DAILY, date, "total_pnl_KR") == 1000.0 from datetime import date as date_cls + trade_date = date_cls.fromisoformat(date) iso_year, iso_week, _ = trade_date.isocalendar() trade_week = f"{iso_year}-W{iso_week:02d}" @@ -324,7 +307,9 @@ class TestContextAggregator: trade_quarter = f"{trade_date.year}-Q{(trade_date.month - 1) // 3 + 1}" trade_year = str(trade_date.year) assert store.get_context(ContextLayer.L4_MONTHLY, trade_month, "monthly_pnl") == 1000.0 - assert store.get_context(ContextLayer.L3_QUARTERLY, trade_quarter, "quarterly_pnl") == 1000.0 + assert ( + store.get_context(ContextLayer.L3_QUARTERLY, trade_quarter, "quarterly_pnl") == 1000.0 + ) assert store.get_context(ContextLayer.L2_ANNUAL, trade_year, "annual_pnl") == 1000.0 @@ -429,9 +414,7 @@ class TestContextSummarizer: # summarize_layer # ------------------------------------------------------------------ - def test_summarize_layer_no_data( - self, summarizer: ContextSummarizer - ) -> None: + def test_summarize_layer_no_data(self, summarizer: ContextSummarizer) -> None: """summarize_layer with no data must return the 'No data' sentinel.""" result = summarizer.summarize_layer(ContextLayer.L6_DAILY) assert result["count"] == 0 @@ -448,15 +431,12 @@ class TestContextSummarizer: result = summarizer.summarize_layer(ContextLayer.L6_DAILY) assert "total_entries" in result - def test_summarize_layer_with_dict_values( - self, summarizer: ContextSummarizer - ) -> None: + def test_summarize_layer_with_dict_values(self, summarizer: ContextSummarizer) -> None: """summarize_layer must handle dict values by extracting numeric subkeys.""" store = summarizer.store # set_context serialises the value as JSON, so passing a dict works store.set_context( - ContextLayer.L6_DAILY, "2026-02-01", "metrics", - {"win_rate": 65.0, "label": "good"} + ContextLayer.L6_DAILY, "2026-02-01", "metrics", {"win_rate": 65.0, "label": "good"} ) result = summarizer.summarize_layer(ContextLayer.L6_DAILY) @@ -464,9 +444,7 @@ class TestContextSummarizer: # numeric subkey "win_rate" should appear as "metrics.win_rate" assert "metrics.win_rate" in result - def test_summarize_layer_with_string_values( - self, summarizer: ContextSummarizer - ) -> None: + def test_summarize_layer_with_string_values(self, summarizer: ContextSummarizer) -> None: """summarize_layer must count string values separately.""" store = summarizer.store # set_context stores string values as JSON-encoded strings @@ -480,9 +458,7 @@ class TestContextSummarizer: # rolling_window_summary # ------------------------------------------------------------------ - def test_rolling_window_summary_basic( - self, summarizer: ContextSummarizer - ) -> None: + def test_rolling_window_summary_basic(self, summarizer: ContextSummarizer) -> None: """rolling_window_summary must return the expected structure.""" store = summarizer.store store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "pnl", 500.0) @@ -492,22 +468,16 @@ class TestContextSummarizer: assert "recent_data" in result assert "historical_summary" in result - def test_rolling_window_summary_no_older_data( - self, summarizer: ContextSummarizer - ) -> None: + def test_rolling_window_summary_no_older_data(self, summarizer: ContextSummarizer) -> None: """rolling_window_summary with summarize_older=False skips history.""" - result = summarizer.rolling_window_summary( - ContextLayer.L6_DAILY, summarize_older=False - ) + result = summarizer.rolling_window_summary(ContextLayer.L6_DAILY, summarize_older=False) assert result["historical_summary"] == {} # ------------------------------------------------------------------ # aggregate_to_higher_layer # ------------------------------------------------------------------ - def test_aggregate_to_higher_layer_mean( - self, summarizer: ContextSummarizer - ) -> None: + def test_aggregate_to_higher_layer_mean(self, summarizer: ContextSummarizer) -> None: """aggregate_to_higher_layer with 'mean' via dict subkeys returns average.""" store = summarizer.store # Use different outer keys but same inner metric key so get_all_contexts @@ -520,9 +490,7 @@ class TestContextSummarizer: ) assert result == pytest.approx(150.0) - def test_aggregate_to_higher_layer_sum( - self, summarizer: ContextSummarizer - ) -> None: + def test_aggregate_to_higher_layer_sum(self, summarizer: ContextSummarizer) -> None: """aggregate_to_higher_layer with 'sum' must return the total.""" store = summarizer.store store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0}) @@ -533,9 +501,7 @@ class TestContextSummarizer: ) assert result == pytest.approx(300.0) - def test_aggregate_to_higher_layer_max( - self, summarizer: ContextSummarizer - ) -> None: + def test_aggregate_to_higher_layer_max(self, summarizer: ContextSummarizer) -> None: """aggregate_to_higher_layer with 'max' must return the maximum.""" store = summarizer.store store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0}) @@ -546,9 +512,7 @@ class TestContextSummarizer: ) assert result == pytest.approx(200.0) - def test_aggregate_to_higher_layer_min( - self, summarizer: ContextSummarizer - ) -> None: + def test_aggregate_to_higher_layer_min(self, summarizer: ContextSummarizer) -> None: """aggregate_to_higher_layer with 'min' must return the minimum.""" store = summarizer.store store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0}) @@ -559,9 +523,7 @@ class TestContextSummarizer: ) assert result == pytest.approx(100.0) - def test_aggregate_to_higher_layer_no_data( - self, summarizer: ContextSummarizer - ) -> None: + def test_aggregate_to_higher_layer_no_data(self, summarizer: ContextSummarizer) -> None: """aggregate_to_higher_layer with no matching key must return None.""" result = summarizer.aggregate_to_higher_layer( ContextLayer.L6_DAILY, ContextLayer.L5_WEEKLY, "nonexistent", "mean" @@ -585,9 +547,7 @@ class TestContextSummarizer: # create_compact_summary + format_summary_for_prompt # ------------------------------------------------------------------ - def test_create_compact_summary( - self, summarizer: ContextSummarizer - ) -> None: + def test_create_compact_summary(self, summarizer: ContextSummarizer) -> None: """create_compact_summary must produce a dict keyed by layer value.""" store = summarizer.store store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "pnl", 100.0) @@ -615,9 +575,7 @@ class TestContextSummarizer: text = summarizer.format_summary_for_prompt(summary) assert text == "" - def test_format_summary_non_dict_value( - self, summarizer: ContextSummarizer - ) -> None: + def test_format_summary_non_dict_value(self, summarizer: ContextSummarizer) -> None: """format_summary_for_prompt must render non-dict values as plain text.""" summary = { "daily": { diff --git a/tests/test_daily_review.py b/tests/test_daily_review.py index 38765e6..e127b84 100644 --- a/tests/test_daily_review.py +++ b/tests/test_daily_review.py @@ -4,6 +4,7 @@ from __future__ import annotations import json import sqlite3 +from datetime import UTC, datetime from types import SimpleNamespace from unittest.mock import AsyncMock, MagicMock @@ -16,8 +17,6 @@ from src.evolution.daily_review import DailyReviewer from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger -from datetime import UTC, datetime - TODAY = datetime.now(UTC).strftime("%Y-%m-%d") @@ -53,7 +52,8 @@ def _log_decision( def test_generate_scorecard_market_scoped( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: reviewer = DailyReviewer(db_conn, context_store) logger = DecisionLogger(db_conn) @@ -134,7 +134,8 @@ def test_generate_scorecard_market_scoped( def test_generate_scorecard_top_winners_and_losers( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: reviewer = DailyReviewer(db_conn, context_store) logger = DecisionLogger(db_conn) @@ -168,7 +169,8 @@ def test_generate_scorecard_top_winners_and_losers( def test_generate_scorecard_empty_day( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: reviewer = DailyReviewer(db_conn, context_store) scorecard = reviewer.generate_scorecard(TODAY, "KR") @@ -184,7 +186,8 @@ def test_generate_scorecard_empty_day( @pytest.mark.asyncio async def test_generate_lessons_without_gemini_returns_empty( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: reviewer = DailyReviewer(db_conn, context_store, gemini_client=None) lessons = await reviewer.generate_lessons( @@ -206,7 +209,8 @@ async def test_generate_lessons_without_gemini_returns_empty( @pytest.mark.asyncio async def test_generate_lessons_parses_json_array( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: mock_gemini = MagicMock() mock_gemini.decide = AsyncMock( @@ -233,7 +237,8 @@ async def test_generate_lessons_parses_json_array( @pytest.mark.asyncio async def test_generate_lessons_fallback_to_lines( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: mock_gemini = MagicMock() mock_gemini.decide = AsyncMock( @@ -260,7 +265,8 @@ async def test_generate_lessons_fallback_to_lines( @pytest.mark.asyncio async def test_generate_lessons_handles_gemini_error( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: mock_gemini = MagicMock() mock_gemini.decide = AsyncMock(side_effect=RuntimeError("boom")) @@ -284,7 +290,8 @@ async def test_generate_lessons_handles_gemini_error( def test_store_scorecard_in_context( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: reviewer = DailyReviewer(db_conn, context_store) scorecard = DailyScorecard( @@ -316,7 +323,8 @@ def test_store_scorecard_in_context( def test_store_scorecard_key_is_market_scoped( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: reviewer = DailyReviewer(db_conn, context_store) kr = DailyScorecard( @@ -357,7 +365,8 @@ def test_store_scorecard_key_is_market_scoped( def test_generate_scorecard_handles_invalid_context_snapshot( - db_conn: sqlite3.Connection, context_store: ContextStore, + db_conn: sqlite3.Connection, + context_store: ContextStore, ) -> None: reviewer = DailyReviewer(db_conn, context_store) db_conn.execute( diff --git a/tests/test_dashboard.py b/tests/test_dashboard.py index 8620c44..106ff54 100644 --- a/tests/test_dashboard.py +++ b/tests/test_dashboard.py @@ -355,6 +355,7 @@ def test_positions_empty_when_no_trades(tmp_path: Path) -> None: def _seed_cb_context(conn: sqlite3.Connection, pnl_pct: float, market: str = "KR") -> None: import json as _json + conn.execute( "INSERT OR REPLACE INTO system_metrics (key, value, updated_at) VALUES (?, ?, ?)", ( diff --git a/tests/test_data_integration.py b/tests/test_data_integration.py index 45b1e2a..ea41199 100644 --- a/tests/test_data_integration.py +++ b/tests/test_data_integration.py @@ -79,7 +79,7 @@ class TestNewsAPI: # Mock the fetch to avoid real API call with patch.object(api, "_fetch_news", new_callable=AsyncMock) as mock_fetch: mock_fetch.return_value = None - result = await api.get_news_sentiment("AAPL") + await api.get_news_sentiment("AAPL") # Should have attempted refetch since cache expired mock_fetch.assert_called_once_with("AAPL") @@ -111,9 +111,7 @@ class TestNewsAPI: "source": "Reuters", "time_published": "2026-02-04T10:00:00", "url": "https://example.com/1", - "ticker_sentiment": [ - {"ticker": "AAPL", "ticker_sentiment_score": "0.85"} - ], + "ticker_sentiment": [{"ticker": "AAPL", "ticker_sentiment_score": "0.85"}], "overall_sentiment_score": "0.75", }, { @@ -122,9 +120,7 @@ class TestNewsAPI: "source": "Bloomberg", "time_published": "2026-02-04T09:00:00", "url": "https://example.com/2", - "ticker_sentiment": [ - {"ticker": "AAPL", "ticker_sentiment_score": "-0.3"} - ], + "ticker_sentiment": [{"ticker": "AAPL", "ticker_sentiment_score": "-0.3"}], "overall_sentiment_score": "-0.2", }, ] @@ -661,7 +657,9 @@ class TestGeminiClientWithExternalData: ) # Mock the Gemini API call - with patch.object(client._client.aio.models, "generate_content", new_callable=AsyncMock) as mock_gen: + with patch.object( + client._client.aio.models, "generate_content", new_callable=AsyncMock + ) as mock_gen: mock_response = MagicMock() mock_response.text = '{"action": "BUY", "confidence": 85, "rationale": "Good news"}' mock_gen.return_value = mock_response diff --git a/tests/test_db.py b/tests/test_db.py index fb2feb9..4f4d7a2 100644 --- a/tests/test_db.py +++ b/tests/test_db.py @@ -1,7 +1,7 @@ """Tests for database helper functions.""" -import tempfile import os +import tempfile from src.db import get_latest_buy_trade, get_open_position, init_db, log_trade @@ -204,7 +204,8 @@ def test_mode_migration_adds_column_to_existing_db() -> None: assert "strategy_pnl" in columns assert "fx_pnl" in columns migrated = conn.execute( - "SELECT pnl, strategy_pnl, fx_pnl, session_id FROM trades WHERE stock_code='AAPL' LIMIT 1" + "SELECT pnl, strategy_pnl, fx_pnl, session_id " + "FROM trades WHERE stock_code='AAPL' LIMIT 1" ).fetchone() assert migrated is not None assert migrated[0] == 123.45 @@ -407,9 +408,7 @@ def test_decision_logs_session_id_migration_backfills_unknown() -> None: conn = init_db(db_path) columns = {row[1] for row in conn.execute("PRAGMA table_info(decision_logs)").fetchall()} assert "session_id" in columns - row = conn.execute( - "SELECT session_id FROM decision_logs WHERE decision_id='d1'" - ).fetchone() + row = conn.execute("SELECT session_id FROM decision_logs WHERE decision_id='d1'").fetchone() assert row is not None assert row[0] == "UNKNOWN" conn.close() diff --git a/tests/test_decision_logger.py b/tests/test_decision_logger.py index dec3a64..ebb1572 100644 --- a/tests/test_decision_logger.py +++ b/tests/test_decision_logger.py @@ -49,7 +49,10 @@ def test_log_decision_creates_record(logger: DecisionLogger, db_conn: sqlite3.Co # Verify record exists in database cursor = db_conn.execute( - "SELECT decision_id, action, confidence, session_id FROM decision_logs WHERE decision_id = ?", + ( + "SELECT decision_id, action, confidence, session_id " + "FROM decision_logs WHERE decision_id = ?" + ), (decision_id,), ) row = cursor.fetchone() diff --git a/tests/test_evolution.py b/tests/test_evolution.py index d5ad349..cdcd38c 100644 --- a/tests/test_evolution.py +++ b/tests/test_evolution.py @@ -208,7 +208,9 @@ def test_identify_failure_patterns_empty(optimizer: EvolutionOptimizer) -> None: @pytest.mark.asyncio -async def test_generate_strategy_creates_file(optimizer: EvolutionOptimizer, tmp_path: Path) -> None: +async def test_generate_strategy_creates_file( + optimizer: EvolutionOptimizer, tmp_path: Path +) -> None: """Test that generate_strategy creates a strategy file.""" failures = [ { @@ -234,7 +236,9 @@ async def test_generate_strategy_creates_file(optimizer: EvolutionOptimizer, tmp return {"action": "HOLD", "confidence": 50, "rationale": "Waiting"} """ - with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)): + with patch.object( + optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response) + ): with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path): strategy_path = await optimizer.generate_strategy(failures) @@ -247,7 +251,8 @@ async def test_generate_strategy_creates_file(optimizer: EvolutionOptimizer, tmp @pytest.mark.asyncio async def test_generate_strategy_saves_valid_python_code( - optimizer: EvolutionOptimizer, tmp_path: Path, + optimizer: EvolutionOptimizer, + tmp_path: Path, ) -> None: """Test that syntactically valid generated code is saved.""" failures = [{"decision_id": "1", "timestamp": "2024-01-15T09:30:00+00:00"}] @@ -255,12 +260,14 @@ async def test_generate_strategy_saves_valid_python_code( mock_response = Mock() mock_response.text = ( 'price = market_data.get("current_price", 0)\n' - 'if price > 0:\n' + "if price > 0:\n" ' return {"action": "BUY", "confidence": 80, "rationale": "Positive price"}\n' 'return {"action": "HOLD", "confidence": 50, "rationale": "No signal"}\n' ) - with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)): + with patch.object( + optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response) + ): with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path): strategy_path = await optimizer.generate_strategy(failures) @@ -270,7 +277,9 @@ async def test_generate_strategy_saves_valid_python_code( @pytest.mark.asyncio async def test_generate_strategy_blocks_invalid_python_code( - optimizer: EvolutionOptimizer, tmp_path: Path, caplog: pytest.LogCaptureFixture, + optimizer: EvolutionOptimizer, + tmp_path: Path, + caplog: pytest.LogCaptureFixture, ) -> None: """Test that syntactically invalid generated code is not saved.""" failures = [{"decision_id": "1", "timestamp": "2024-01-15T09:30:00+00:00"}] @@ -281,7 +290,9 @@ async def test_generate_strategy_blocks_invalid_python_code( ' return {"action": "BUY", "confidence": 80, "rationale": "broken"}\n' ) - with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)): + with patch.object( + optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response) + ): with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path): with caplog.at_level("WARNING"): strategy_path = await optimizer.generate_strategy(failures) @@ -310,6 +321,7 @@ def test_get_performance_summary() -> None: """Test getting performance summary from trades table.""" # Create a temporary database with trades import tempfile + with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as tmp: tmp_path = tmp.name @@ -604,7 +616,9 @@ def test_calculate_improvement_trend_declining(performance_tracker: PerformanceT assert trend["pnl_change"] == -250.0 -def test_calculate_improvement_trend_insufficient_data(performance_tracker: PerformanceTracker) -> None: +def test_calculate_improvement_trend_insufficient_data( + performance_tracker: PerformanceTracker, +) -> None: """Test improvement trend with insufficient data.""" metrics = [ StrategyMetrics( @@ -718,7 +732,9 @@ async def test_full_evolution_pipeline(optimizer: EvolutionOptimizer, tmp_path: mock_response = Mock() mock_response.text = 'return {"action": "HOLD", "confidence": 50, "rationale": "Test"}' - with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)): + with patch.object( + optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response) + ): with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path): with patch("subprocess.run") as mock_run: mock_run.return_value = Mock(returncode=0, stdout="", stderr="") diff --git a/tests/test_logging_config.py b/tests/test_logging_config.py index 526f692..387623e 100644 --- a/tests/test_logging_config.py +++ b/tests/test_logging_config.py @@ -103,9 +103,7 @@ class TestSetupLogging: """setup_logging must attach a JSON handler to the root logger.""" setup_logging(level=logging.DEBUG) root = logging.getLogger() - json_handlers = [ - h for h in root.handlers if isinstance(h.formatter, JSONFormatter) - ] + json_handlers = [h for h in root.handlers if isinstance(h.formatter, JSONFormatter)] assert len(json_handlers) == 1 assert root.level == logging.DEBUG diff --git a/tests/test_main.py b/tests/test_main.py index bacedc1..95b2c40 100644 --- a/tests/test_main.py +++ b/tests/test_main.py @@ -4,45 +4,45 @@ from datetime import UTC, date, datetime from unittest.mock import ANY, AsyncMock, MagicMock, patch import pytest -import src.main as main_module +import src.main as main_module from src.config import Settings from src.context.layer import ContextLayer from src.context.scheduler import ScheduleResult -from src.core.order_policy import OrderPolicyRejected +from src.core.order_policy import OrderPolicyRejected, get_session_info from src.core.risk_manager import CircuitBreakerTripped, FatFingerRejected from src.db import init_db, log_trade from src.evolution.scorecard import DailyScorecard from src.logging.decision_logger import DecisionLogger from src.main import ( - KILL_SWITCH, + _RUNTIME_EXIT_PEAKS, + _RUNTIME_EXIT_STATES, _SESSION_RISK_LAST_BY_MARKET, _SESSION_RISK_OVERRIDES_BY_MARKET, _SESSION_RISK_PROFILES_MAP, _STOPLOSS_REENTRY_COOLDOWN_UNTIL, + KILL_SWITCH, + _apply_dashboard_flag, _apply_staged_exit_override_for_hold, _compute_kr_atr_value, - _estimate_pred_down_prob_from_rsi, - _inject_staged_exit_features, - _RUNTIME_EXIT_PEAKS, - _RUNTIME_EXIT_STATES, - _should_force_exit_for_overnight, - _should_block_overseas_buy_for_fx_buffer, - _trigger_emergency_kill_switch, - _apply_dashboard_flag, + _compute_kr_dynamic_stop_loss_pct, _determine_order_quantity, + _estimate_pred_down_prob_from_rsi, _extract_avg_price_from_balance, _extract_held_codes_from_balance, _extract_held_qty_from_balance, _handle_market_close, - _retry_connection, + _inject_staged_exit_features, _resolve_market_setting, _resolve_sell_qty_for_pnl, + _retry_connection, _run_context_scheduler, _run_evolution_loop, + _should_block_overseas_buy_for_fx_buffer, + _should_force_exit_for_overnight, _start_dashboard_server, _stoploss_cooldown_minutes, - _compute_kr_dynamic_stop_loss_pct, + _trigger_emergency_kill_switch, handle_domestic_pending_orders, handle_overseas_pending_orders, process_blackout_recovery_orders, @@ -336,10 +336,7 @@ async def test_inject_staged_exit_features_sets_pred_down_prob_and_atr_for_kr() broker = MagicMock() broker.get_daily_prices = AsyncMock( - return_value=[ - {"high": 102.0 + i, "low": 98.0 + i, "close": 100.0 + i} - for i in range(40) - ] + return_value=[{"high": 102.0 + i, "low": 98.0 + i, "close": 100.0 + i} for i in range(40)] ) await _inject_staged_exit_features( @@ -483,9 +480,7 @@ class TestExtractHeldQtyFromBalance: def test_overseas_returns_ord_psbl_qty_first(self) -> None: """ord_psbl_qty (주문가능수량) takes priority over ovrs_cblc_qty.""" - balance = { - "output1": [{"ovrs_pdno": "AAPL", "ord_psbl_qty": "8", "ovrs_cblc_qty": "10"}] - } + balance = {"output1": [{"ovrs_pdno": "AAPL", "ord_psbl_qty": "8", "ovrs_cblc_qty": "10"}]} assert _extract_held_qty_from_balance(balance, "AAPL", is_domestic=False) == 8 def test_overseas_fallback_to_ovrs_cblc_qty_when_ord_psbl_qty_absent(self) -> None: @@ -809,9 +804,7 @@ class TestTradingCycleTelegramIntegration: def mock_criticality_assessor(self) -> MagicMock: """Create mock criticality assessor.""" assessor = MagicMock() - assessor.assess_market_conditions = MagicMock( - return_value=MagicMock(value="NORMAL") - ) + assessor.assess_market_conditions = MagicMock(return_value=MagicMock(value="NORMAL")) assessor.get_timeout = MagicMock(return_value=5.0) return assessor @@ -1199,9 +1192,7 @@ class TestOverseasBalanceParsing: def mock_overseas_broker_with_list(self) -> MagicMock: """Create mock overseas broker returning list format.""" broker = MagicMock() - broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "150.50"}} - ) + broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "150.50"}}) broker.get_overseas_balance = AsyncMock( return_value={ "output2": [ @@ -1221,9 +1212,7 @@ class TestOverseasBalanceParsing: def mock_overseas_broker_with_dict(self) -> MagicMock: """Create mock overseas broker returning dict format.""" broker = MagicMock() - broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "150.50"}} - ) + broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "150.50"}}) broker.get_overseas_balance = AsyncMock( return_value={ "output2": { @@ -1241,9 +1230,7 @@ class TestOverseasBalanceParsing: def mock_overseas_broker_with_empty(self) -> MagicMock: """Create mock overseas broker returning empty output2.""" broker = MagicMock() - broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "150.50"}} - ) + broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "150.50"}}) broker.get_overseas_balance = AsyncMock(return_value={"output2": []}) broker.get_overseas_buying_power = AsyncMock( return_value={"output": {"ovrs_ord_psbl_amt": "0.00"}} @@ -1327,9 +1314,7 @@ class TestOverseasBalanceParsing: def mock_criticality_assessor(self) -> MagicMock: """Create mock criticality assessor.""" assessor = MagicMock() - assessor.assess_market_conditions = MagicMock( - return_value=MagicMock(value="NORMAL") - ) + assessor.assess_market_conditions = MagicMock(return_value=MagicMock(value="NORMAL")) assessor.get_timeout = MagicMock(return_value=5.0) return assessor @@ -1492,9 +1477,7 @@ class TestOverseasBalanceParsing: def mock_overseas_broker_with_buy_scenario(self) -> MagicMock: """Create mock overseas broker that returns a valid price for BUY orders.""" broker = MagicMock() - broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "182.50"}} - ) + broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "182.50"}}) broker.get_overseas_balance = AsyncMock( return_value={ "output2": [ @@ -1615,9 +1598,7 @@ class TestOverseasBalanceParsing: overseas_broker.get_overseas_buying_power = AsyncMock( return_value={"output": {"ovrs_ord_psbl_amt": "50000.00"}} ) - overseas_broker.send_overseas_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) + overseas_broker.send_overseas_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) sell_engine = MagicMock(spec=ScenarioEngine) sell_engine.evaluate = MagicMock(return_value=_make_sell_match("AAPL")) @@ -1709,8 +1690,10 @@ class TestOverseasBalanceParsing: ) overseas_broker.send_overseas_order.assert_called_once() - sent_price = overseas_broker.send_overseas_order.call_args[1].get("price") or \ - overseas_broker.send_overseas_order.call_args[0][4] + sent_price = ( + overseas_broker.send_overseas_order.call_args[1].get("price") + or overseas_broker.send_overseas_order.call_args[0][4] + ) # 50.1234 * 1.002 = 50.2235... rounded to 2 decimals = 50.22 assert sent_price == round(50.1234 * 1.002, 2), ( f"Expected 2-decimal price {round(50.1234 * 1.002, 2)} but got {sent_price} (#252)" @@ -1753,25 +1736,33 @@ class TestOverseasBalanceParsing: engine = MagicMock(spec=ScenarioEngine) engine.evaluate = MagicMock(return_value=_make_buy_match()) - await trading_cycle( - broker=mock_domestic_broker, - overseas_broker=overseas_broker, - scenario_engine=engine, - playbook=mock_playbook, - risk=mock_risk, - db_conn=db_conn, - decision_logger=decision_logger, - context_store=mock_context_store, - criticality_assessor=mock_criticality_assessor, - telegram=mock_telegram, - market=mock_overseas_market, - stock_code="PENNYX", - scan_candidates={}, - ) + with patch( + "src.main._resolve_market_setting", + side_effect=lambda **kwargs: ( + 0.1 if kwargs.get("key") == "US_MIN_PRICE" else kwargs.get("default") + ), + ): + await trading_cycle( + broker=mock_domestic_broker, + overseas_broker=overseas_broker, + scenario_engine=engine, + playbook=mock_playbook, + risk=mock_risk, + db_conn=db_conn, + decision_logger=decision_logger, + context_store=mock_context_store, + criticality_assessor=mock_criticality_assessor, + telegram=mock_telegram, + market=mock_overseas_market, + stock_code="PENNYX", + scan_candidates={}, + ) overseas_broker.send_overseas_order.assert_called_once() - sent_price = overseas_broker.send_overseas_order.call_args[1].get("price") or \ - overseas_broker.send_overseas_order.call_args[0][4] + sent_price = ( + overseas_broker.send_overseas_order.call_args[1].get("price") + or overseas_broker.send_overseas_order.call_args[0][4] + ) # 0.5678 * 1.002 = 0.56893... rounded to 4 decimals = 0.5689 assert sent_price == round(0.5678 * 1.002, 4), ( f"Expected 4-decimal price {round(0.5678 * 1.002, 4)} but got {sent_price} (#252)" @@ -1821,7 +1812,10 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_scenario_engine_called_with_enriched_market_data( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test scenario engine receives market_data enriched with scanner metrics.""" from src.analysis.smart_scanner import ScanCandidate @@ -1831,9 +1825,14 @@ class TestScenarioEngineIntegration: playbook = _make_playbook() candidate = ScanCandidate( - stock_code="005930", name="Samsung", price=50000, - volume=1000000, volume_ratio=3.5, rsi=25.0, - signal="oversold", score=85.0, + stock_code="005930", + name="Samsung", + price=50000, + volume=1000000, + volume_ratio=3.5, + rsi=25.0, + signal="oversold", + score=85.0, ) with ( @@ -1877,7 +1876,10 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_trading_cycle_sets_l7_context_keys( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test L7 context is written with market-scoped keys.""" from src.analysis.smart_scanner import ScanCandidate @@ -1888,9 +1890,14 @@ class TestScenarioEngineIntegration: context_store = MagicMock(get_latest_timeframe=MagicMock(return_value=None)) candidate = ScanCandidate( - stock_code="005930", name="Samsung", price=50000, - volume=1000000, volume_ratio=3.5, rsi=25.0, - signal="oversold", score=85.0, + stock_code="005930", + name="Samsung", + price=50000, + volume=1000000, + volume_ratio=3.5, + rsi=25.0, + signal="oversold", + score=85.0, ) with patch("src.main.log_trade"): @@ -1940,7 +1947,10 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_scan_candidates_market_scoped( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test scan_candidates uses market-scoped lookup, ignoring other markets.""" from src.analysis.smart_scanner import ScanCandidate @@ -1950,9 +1960,14 @@ class TestScenarioEngineIntegration: # Candidate stored under US market — should NOT be found for KR market us_candidate = ScanCandidate( - stock_code="005930", name="Overlap", price=100, - volume=500000, volume_ratio=5.0, rsi=15.0, - signal="oversold", score=90.0, + stock_code="005930", + name="Overlap", + price=100, + volume=500000, + volume_ratio=5.0, + rsi=15.0, + signal="oversold", + score=90.0, ) with patch("src.main.log_trade"): @@ -1982,7 +1997,10 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_scenario_engine_called_without_scanner_data( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test scenario engine works when stock has no scan candidate.""" engine = MagicMock(spec=ScenarioEngine) @@ -2020,7 +2038,9 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_holding_overseas_stock_derives_volume_ratio_from_price_api( - self, mock_broker: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test overseas holding stocks derive volume_ratio from get_overseas_price high/low.""" engine = MagicMock(spec=ScenarioEngine) @@ -2035,15 +2055,17 @@ class TestScenarioEngineIntegration: os_broker = MagicMock() # price_change_pct=5.0, high=106, low=94 → intraday_range=12% → volume_ratio=max(1,6)=6 - os_broker.get_overseas_price = AsyncMock(return_value={ - "output": {"last": "100.0", "rate": "5.0", "high": "106.0", "low": "94.0"} - }) - os_broker.get_overseas_balance = AsyncMock(return_value={ - "output2": [{"frcr_evlu_tota": "10000", "frcr_buy_amt_smtl": "9000"}] - }) - os_broker.get_overseas_buying_power = AsyncMock(return_value={ - "output": {"ovrs_ord_psbl_amt": "500"} - }) + os_broker.get_overseas_price = AsyncMock( + return_value={ + "output": {"last": "100.0", "rate": "5.0", "high": "106.0", "low": "94.0"} + } + ) + os_broker.get_overseas_balance = AsyncMock( + return_value={"output2": [{"frcr_evlu_tota": "10000", "frcr_buy_amt_smtl": "9000"}]} + ) + os_broker.get_overseas_buying_power = AsyncMock( + return_value={"output": {"ovrs_ord_psbl_amt": "500"}} + ) with patch("src.main.log_trade"): await trading_cycle( @@ -2075,7 +2097,10 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_scenario_matched_notification_sent( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test telegram notification sent when a scenario matches.""" # Create a match with matched_scenario (not None) @@ -2125,7 +2150,10 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_no_scenario_matched_notification_on_default_hold( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test no scenario notification when default HOLD is returned.""" engine = MagicMock(spec=ScenarioEngine) @@ -2156,7 +2184,10 @@ class TestScenarioEngineIntegration: @pytest.mark.asyncio async def test_decision_logger_receives_scenario_match_details( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test decision logger context includes scenario match details.""" match = ScenarioMatch( @@ -2193,13 +2224,16 @@ class TestScenarioEngineIntegration: decision_logger.log_decision.assert_called_once() call_kwargs = decision_logger.log_decision.call_args.kwargs - assert call_kwargs["session_id"] == "KRX_REG" + assert call_kwargs["session_id"] == get_session_info(mock_market).session_id assert "scenario_match" in call_kwargs["context_snapshot"] assert call_kwargs["context_snapshot"]["scenario_match"]["rsi"] == 45.0 @pytest.mark.asyncio async def test_reduce_all_does_not_execute_order( - self, mock_broker: MagicMock, mock_market: MagicMock, mock_telegram: MagicMock, + self, + mock_broker: MagicMock, + mock_market: MagicMock, + mock_telegram: MagicMock, ) -> None: """Test REDUCE_ALL action does not trigger order execution.""" match = ScenarioMatch( @@ -2340,7 +2374,9 @@ async def test_stoploss_reentry_cooldown_blocks_buy_when_active() -> None: broker.get_balance = AsyncMock( return_value={ "output1": [], - "output2": [{"tot_evlu_amt": "100000", "dnca_tot_amt": "50000", "pchs_amt_smtl_amt": "50000"}], + "output2": [ + {"tot_evlu_amt": "100000", "dnca_tot_amt": "50000", "pchs_amt_smtl_amt": "50000"} + ], } ) broker.send_order = AsyncMock(return_value={"msg1": "OK"}) @@ -2359,7 +2395,9 @@ async def test_stoploss_reentry_cooldown_blocks_buy_when_active() -> None: risk=MagicMock(validate_order=MagicMock(), check_circuit_breaker=MagicMock()), db_conn=db_conn, decision_logger=DecisionLogger(db_conn), - context_store=MagicMock(get_latest_timeframe=MagicMock(return_value=None), set_context=MagicMock()), + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), set_context=MagicMock() + ), criticality_assessor=MagicMock( assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), get_timeout=MagicMock(return_value=5.0), @@ -2389,7 +2427,9 @@ async def test_stoploss_reentry_cooldown_allows_buy_after_expiry() -> None: broker.get_balance = AsyncMock( return_value={ "output1": [], - "output2": [{"tot_evlu_amt": "100000", "dnca_tot_amt": "50000", "pchs_amt_smtl_amt": "50000"}], + "output2": [ + {"tot_evlu_amt": "100000", "dnca_tot_amt": "50000", "pchs_amt_smtl_amt": "50000"} + ], } ) broker.send_order = AsyncMock(return_value={"msg1": "OK"}) @@ -2408,7 +2448,9 @@ async def test_stoploss_reentry_cooldown_allows_buy_after_expiry() -> None: risk=MagicMock(validate_order=MagicMock(), check_circuit_breaker=MagicMock()), db_conn=db_conn, decision_logger=DecisionLogger(db_conn), - context_store=MagicMock(get_latest_timeframe=MagicMock(return_value=None), set_context=MagicMock()), + context_store=MagicMock( + get_latest_timeframe=MagicMock(return_value=None), set_context=MagicMock() + ), criticality_assessor=MagicMock( assess_market_conditions=MagicMock(return_value=MagicMock(value="NORMAL")), get_timeout=MagicMock(return_value=5.0), @@ -3419,6 +3461,7 @@ def test_start_dashboard_server_returns_none_when_uvicorn_missing() -> None: DASHBOARD_ENABLED=True, ) import builtins + real_import = builtins.__import__ def mock_import(name: str, *args: object, **kwargs: object) -> object: @@ -3446,8 +3489,13 @@ class TestBuyCooldown: broker.get_current_price = AsyncMock(return_value=(100.0, 1.0, 0.0)) broker.get_balance = AsyncMock( return_value={ - "output2": [{"tot_evlu_amt": "1000000", "dnca_tot_amt": "500000", - "pchs_amt_smtl_amt": "500000"}] + "output2": [ + { + "tot_evlu_amt": "1000000", + "dnca_tot_amt": "500000", + "pchs_amt_smtl_amt": "500000", + } + ] } ) broker.send_order = AsyncMock(return_value={"msg1": "OK"}) @@ -3475,13 +3523,22 @@ class TestBuyCooldown: def mock_overseas_broker(self) -> MagicMock: broker = MagicMock() broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "1.0", "rate": "0.0", - "high": "1.05", "low": "0.95", "tvol": "1000000"}} + return_value={ + "output": { + "last": "1.0", + "rate": "0.0", + "high": "1.05", + "low": "0.95", + "tvol": "1000000", + } + } + ) + broker.get_overseas_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [{"frcr_evlu_tota": "50000", "frcr_buy_amt_smtl": "0"}], + } ) - broker.get_overseas_balance = AsyncMock(return_value={ - "output1": [], - "output2": [{"frcr_evlu_tota": "50000", "frcr_buy_amt_smtl": "0"}], - }) broker.get_overseas_buying_power = AsyncMock( return_value={"output": {"ovrs_ord_psbl_amt": "50000"}} ) @@ -3501,7 +3558,9 @@ class TestBuyCooldown: @pytest.mark.asyncio async def test_cooldown_set_on_insufficient_balance( - self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, + self, + mock_broker: MagicMock, + mock_overseas_broker: MagicMock, mock_overseas_market: MagicMock, ) -> None: """BUY cooldown entry is created after 주문가능금액 rejection.""" @@ -3509,7 +3568,12 @@ class TestBuyCooldown: engine.evaluate = MagicMock(return_value=self._make_buy_match_overseas("MLECW")) buy_cooldown: dict[str, float] = {} - with patch("src.main.log_trade"): + with patch("src.main.log_trade"), patch( + "src.main._resolve_market_setting", + side_effect=lambda **kwargs: ( + 0.1 if kwargs.get("key") == "US_MIN_PRICE" else kwargs.get("default") + ), + ): await trading_cycle( broker=mock_broker, overseas_broker=mock_overseas_broker, @@ -3540,7 +3604,9 @@ class TestBuyCooldown: @pytest.mark.asyncio async def test_cooldown_skips_buy( - self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, + self, + mock_broker: MagicMock, + mock_overseas_broker: MagicMock, mock_overseas_market: MagicMock, ) -> None: """BUY is skipped when cooldown is active for the stock.""" @@ -3548,10 +3614,9 @@ class TestBuyCooldown: engine.evaluate = MagicMock(return_value=self._make_buy_match_overseas("MLECW")) import asyncio + # Set an active cooldown (expires far in the future) - buy_cooldown: dict[str, float] = { - "US_NASDAQ:MLECW": asyncio.get_event_loop().time() + 600 - } + buy_cooldown: dict[str, float] = {"US_NASDAQ:MLECW": asyncio.get_event_loop().time() + 600} with patch("src.main.log_trade"): await trading_cycle( @@ -3584,7 +3649,9 @@ class TestBuyCooldown: @pytest.mark.asyncio async def test_cooldown_not_set_on_other_errors( - self, mock_broker: MagicMock, mock_overseas_market: MagicMock, + self, + mock_broker: MagicMock, + mock_overseas_market: MagicMock, ) -> None: """Cooldown is NOT set for non-balance-related rejections.""" engine = MagicMock(spec=ScenarioEngine) @@ -3592,13 +3659,22 @@ class TestBuyCooldown: # Different rejection reason overseas_broker = MagicMock() overseas_broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "1.0", "rate": "0.0", - "high": "1.05", "low": "0.95", "tvol": "1000000"}} + return_value={ + "output": { + "last": "1.0", + "rate": "0.0", + "high": "1.05", + "low": "0.95", + "tvol": "1000000", + } + } + ) + overseas_broker.get_overseas_balance = AsyncMock( + return_value={ + "output1": [], + "output2": [{"frcr_evlu_tota": "50000", "frcr_buy_amt_smtl": "0"}], + } ) - overseas_broker.get_overseas_balance = AsyncMock(return_value={ - "output1": [], - "output2": [{"frcr_evlu_tota": "50000", "frcr_buy_amt_smtl": "0"}], - }) overseas_broker.get_overseas_buying_power = AsyncMock( return_value={"output": {"ovrs_ord_psbl_amt": "50000"}} ) @@ -3638,14 +3714,21 @@ class TestBuyCooldown: @pytest.mark.asyncio async def test_no_cooldown_param_still_works( - self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, + self, + mock_broker: MagicMock, + mock_overseas_broker: MagicMock, mock_overseas_market: MagicMock, ) -> None: """trading_cycle works normally when buy_cooldown is None (default).""" engine = MagicMock(spec=ScenarioEngine) engine.evaluate = MagicMock(return_value=self._make_buy_match_overseas("MLECW")) - with patch("src.main.log_trade"): + with patch("src.main.log_trade"), patch( + "src.main._resolve_market_setting", + side_effect=lambda **kwargs: ( + 0.1 if kwargs.get("key") == "US_MIN_PRICE" else kwargs.get("default") + ), + ): await trading_cycle( broker=mock_broker, overseas_broker=mock_overseas_broker, @@ -3722,6 +3805,7 @@ class TestMarketOutlookConfidenceThreshold: self, confidence: int, stock_code: str = "005930" ) -> ScenarioMatch: from src.strategy.models import StockScenario + scenario = StockScenario( condition=StockCondition(rsi_below=30), action=ScenarioAction.BUY, @@ -3736,10 +3820,9 @@ class TestMarketOutlookConfidenceThreshold: rationale="Test buy", ) - def _make_playbook_with_outlook( - self, outlook_str: str, market: str = "KR" - ) -> DayPlaybook: + def _make_playbook_with_outlook(self, outlook_str: str, market: str = "KR") -> DayPlaybook: from src.strategy.models import MarketOutlook + outlook_map = { "bearish": MarketOutlook.BEARISH, "bullish": MarketOutlook.BULLISH, @@ -3991,7 +4074,15 @@ async def test_buy_suppressed_when_open_position_exists() -> None: overseas_broker = MagicMock() overseas_broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "51.0", "rate": "2.0", "high": "52.0", "low": "50.0", "tvol": "1000000"}} + return_value={ + "output": { + "last": "51.0", + "rate": "2.0", + "high": "52.0", + "low": "50.0", + "tvol": "1000000", + } + } ) overseas_broker.get_overseas_balance = AsyncMock( return_value={ @@ -4058,7 +4149,15 @@ async def test_buy_proceeds_when_no_open_position() -> None: overseas_broker = MagicMock() overseas_broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "100.0", "rate": "1.0", "high": "101.0", "low": "99.0", "tvol": "500000"}} + return_value={ + "output": { + "last": "100.0", + "rate": "1.0", + "high": "101.0", + "low": "99.0", + "tvol": "500000", + } + } ) overseas_broker.get_overseas_balance = AsyncMock( return_value={ @@ -4160,9 +4259,7 @@ class TestOverseasBrokerIntegration: ) overseas_broker = MagicMock() - overseas_broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "182.50"}} - ) + overseas_broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "182.50"}}) # 브로커: 여전히 AAPL 10주 보유 중 (SELL 미체결) overseas_broker.get_overseas_balance = AsyncMock( return_value={ @@ -4236,9 +4333,7 @@ class TestOverseasBrokerIntegration: # DB: 레코드 없음 (신규 포지션) overseas_broker = MagicMock() - overseas_broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "182.50"}} - ) + overseas_broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "182.50"}}) # 브로커: AAPL 미보유 overseas_broker.get_overseas_balance = AsyncMock( return_value={ @@ -4306,9 +4401,7 @@ class TestOverseasBrokerIntegration: db_conn = init_db(":memory:") overseas_broker = MagicMock() - overseas_broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "182.50"}} - ) + overseas_broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "182.50"}}) overseas_broker.get_overseas_balance = AsyncMock( return_value={ "output1": [], @@ -4387,6 +4480,7 @@ class TestRetryConnection: @pytest.mark.asyncio async def test_success_on_first_attempt(self) -> None: """Returns the result immediately when the first call succeeds.""" + async def ok() -> str: return "data" @@ -4596,9 +4690,7 @@ class TestDailyCBBaseline: return_value=self._make_domestic_balance(tot_evlu_amt=55000.0) ) # Price data for the stock - broker.get_current_price = AsyncMock( - return_value=(100.0, 1.5, 100.0) - ) + broker.get_current_price = AsyncMock(return_value=(100.0, 1.5, 100.0)) market = MagicMock() market.name = "KR" @@ -4643,8 +4735,10 @@ class TestDailyCBBaseline: async def _passthrough(fn, *a, label: str = "", **kw): # type: ignore[override] return await fn(*a, **kw) - with patch("src.main.get_open_markets", return_value=[market]), \ - patch("src.main._retry_connection", new=_passthrough): + with ( + patch("src.main.get_open_markets", return_value=[market]), + patch("src.main._retry_connection", new=_passthrough), + ): result = await run_daily_session( broker=broker, overseas_broker=MagicMock(), @@ -4720,8 +4814,10 @@ class TestDailyCBBaseline: async def _passthrough(fn, *a, label: str = "", **kw): # type: ignore[override] return await fn(*a, **kw) - with patch("src.main.get_open_markets", return_value=[market]), \ - patch("src.main._retry_connection", new=_passthrough): + with ( + patch("src.main.get_open_markets", return_value=[market]), + patch("src.main._retry_connection", new=_passthrough), + ): result = await run_daily_session( broker=broker, overseas_broker=MagicMock(), @@ -4844,8 +4940,10 @@ async def test_run_daily_session_applies_staged_exit_override_on_hold() -> None: async def _passthrough(fn, *a, label: str = "", **kw): # type: ignore[override] return await fn(*a, **kw) - with patch("src.main.get_open_markets", return_value=[market]), \ - patch("src.main._retry_connection", new=_passthrough): + with ( + patch("src.main.get_open_markets", return_value=[market]), + patch("src.main._retry_connection", new=_passthrough), + ): await run_daily_session( broker=broker, overseas_broker=MagicMock(), @@ -5032,17 +5130,14 @@ class TestSyncPositionsFromBroker: db_conn = init_db(":memory:") broker = MagicMock() - broker.get_balance = AsyncMock( - return_value=self._domestic_balance("005930", qty=7) - ) + broker.get_balance = AsyncMock(return_value=self._domestic_balance("005930", qty=7)) overseas_broker = MagicMock() - synced = await sync_positions_from_broker( - broker, overseas_broker, db_conn, settings - ) + synced = await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) assert synced == 1 from src.db import get_open_position + pos = get_open_position(db_conn, "005930", "KR") assert pos is not None assert pos["quantity"] == 7 @@ -5066,14 +5161,10 @@ class TestSyncPositionsFromBroker: ) broker = MagicMock() - broker.get_balance = AsyncMock( - return_value=self._domestic_balance("005930", qty=5) - ) + broker.get_balance = AsyncMock(return_value=self._domestic_balance("005930", qty=5)) overseas_broker = MagicMock() - synced = await sync_positions_from_broker( - broker, overseas_broker, db_conn, settings - ) + synced = await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) assert synced == 0 @@ -5089,12 +5180,11 @@ class TestSyncPositionsFromBroker: return_value=self._overseas_balance("AAPL", qty=10) ) - synced = await sync_positions_from_broker( - broker, overseas_broker, db_conn, settings - ) + synced = await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) assert synced == 1 from src.db import get_open_position + pos = get_open_position(db_conn, "AAPL", "US_NASDAQ") assert pos is not None assert pos["quantity"] == 10 @@ -5106,14 +5196,10 @@ class TestSyncPositionsFromBroker: db_conn = init_db(":memory:") broker = MagicMock() - broker.get_balance = AsyncMock( - return_value={"output1": [], "output2": [{}]} - ) + broker.get_balance = AsyncMock(return_value={"output1": [], "output2": [{}]}) overseas_broker = MagicMock() - synced = await sync_positions_from_broker( - broker, overseas_broker, db_conn, settings - ) + synced = await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) assert synced == 0 @@ -5124,14 +5210,10 @@ class TestSyncPositionsFromBroker: db_conn = init_db(":memory:") broker = MagicMock() - broker.get_balance = AsyncMock( - side_effect=ConnectionError("KIS unreachable") - ) + broker.get_balance = AsyncMock(side_effect=ConnectionError("KIS unreachable")) overseas_broker = MagicMock() - synced = await sync_positions_from_broker( - broker, overseas_broker, db_conn, settings - ) + synced = await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) assert synced == 0 # Failure treated as no-op @@ -5151,9 +5233,7 @@ class TestSyncPositionsFromBroker: return_value={"output1": [], "output2": [{}]} ) - await sync_positions_from_broker( - broker, overseas_broker, db_conn, settings - ) + await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) # Two distinct exchange codes (NASD, NYSE) → 2 calls assert overseas_broker.get_overseas_balance.call_count == 2 @@ -5166,7 +5246,9 @@ class TestSyncPositionsFromBroker: balance = { "output1": [{"pdno": "005930", "ord_psbl_qty": "5", "pchs_avg_pric": "68000.0"}], - "output2": [{"tot_evlu_amt": "1000000", "dnca_tot_amt": "500000", "pchs_amt_smtl_amt": "500000"}], + "output2": [ + {"tot_evlu_amt": "1000000", "dnca_tot_amt": "500000", "pchs_amt_smtl_amt": "500000"} + ], } broker = MagicMock() broker.get_balance = AsyncMock(return_value=balance) @@ -5175,6 +5257,7 @@ class TestSyncPositionsFromBroker: await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) from src.db import get_open_position + pos = get_open_position(db_conn, "005930", "KR") assert pos is not None assert pos["price"] == 68000.0 @@ -5196,6 +5279,7 @@ class TestSyncPositionsFromBroker: await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) from src.db import get_open_position + pos = get_open_position(db_conn, "AAPL", "US_NASDAQ") assert pos is not None assert pos["price"] == 170.0 @@ -5209,7 +5293,9 @@ class TestSyncPositionsFromBroker: # No pchs_avg_pric in output1 balance = { "output1": [{"pdno": "005930", "ord_psbl_qty": "5"}], - "output2": [{"tot_evlu_amt": "1000000", "dnca_tot_amt": "500000", "pchs_amt_smtl_amt": "500000"}], + "output2": [ + {"tot_evlu_amt": "1000000", "dnca_tot_amt": "500000", "pchs_amt_smtl_amt": "500000"} + ], } broker = MagicMock() broker.get_balance = AsyncMock(return_value=balance) @@ -5218,6 +5304,7 @@ class TestSyncPositionsFromBroker: await sync_positions_from_broker(broker, overseas_broker, db_conn, settings) from src.db import get_open_position + pos = get_open_position(db_conn, "005930", "KR") assert pos is not None assert pos["price"] == 0.0 @@ -5345,12 +5432,8 @@ class TestHandleOverseasPendingOrders: "ovrs_excg_cd": "NASD", } overseas_broker = MagicMock() - overseas_broker.get_overseas_pending_orders = AsyncMock( - return_value=[pending_order] - ) - overseas_broker.cancel_overseas_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) + overseas_broker.get_overseas_pending_orders = AsyncMock(return_value=[pending_order]) + overseas_broker.cancel_overseas_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) sell_resubmit_counts: dict[str, int] = {} buy_cooldown: dict[str, float] = {} @@ -5385,18 +5468,10 @@ class TestHandleOverseasPendingOrders: "ovrs_excg_cd": "NASD", } overseas_broker = MagicMock() - overseas_broker.get_overseas_pending_orders = AsyncMock( - return_value=[pending_order] - ) - overseas_broker.cancel_overseas_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) - overseas_broker.get_overseas_price = AsyncMock( - return_value={"output": {"last": "200.0"}} - ) - overseas_broker.send_overseas_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) + overseas_broker.get_overseas_pending_orders = AsyncMock(return_value=[pending_order]) + overseas_broker.cancel_overseas_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) + overseas_broker.get_overseas_price = AsyncMock(return_value={"output": {"last": "200.0"}}) + overseas_broker.send_overseas_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) sell_resubmit_counts: dict[str, int] = {} @@ -5427,9 +5502,7 @@ class TestHandleOverseasPendingOrders: "ovrs_excg_cd": "NASD", } overseas_broker = MagicMock() - overseas_broker.get_overseas_pending_orders = AsyncMock( - return_value=[pending_order] - ) + overseas_broker.get_overseas_pending_orders = AsyncMock(return_value=[pending_order]) overseas_broker.cancel_overseas_order = AsyncMock( return_value={"rt_cd": "1", "msg1": "Error"} # failure ) @@ -5458,12 +5531,8 @@ class TestHandleOverseasPendingOrders: "ovrs_excg_cd": "NASD", } overseas_broker = MagicMock() - overseas_broker.get_overseas_pending_orders = AsyncMock( - return_value=[pending_order] - ) - overseas_broker.cancel_overseas_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) + overseas_broker.get_overseas_pending_orders = AsyncMock(return_value=[pending_order]) + overseas_broker.cancel_overseas_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) overseas_broker.send_overseas_order = AsyncMock() # Already resubmitted once @@ -5536,9 +5605,7 @@ class TestHandleDomesticPendingOrders: } broker = MagicMock() broker.get_domestic_pending_orders = AsyncMock(return_value=[pending_order]) - broker.cancel_domestic_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) + broker.cancel_domestic_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) sell_resubmit_counts: dict[str, int] = {} buy_cooldown: dict[str, float] = {} @@ -5577,17 +5644,13 @@ class TestHandleDomesticPendingOrders: } broker = MagicMock() broker.get_domestic_pending_orders = AsyncMock(return_value=[pending_order]) - broker.cancel_domestic_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) + broker.cancel_domestic_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) broker.get_current_price = AsyncMock(return_value=(50000.0, 0.0, 0.0)) broker.send_order = AsyncMock(return_value={"rt_cd": "0"}) sell_resubmit_counts: dict[str, int] = {} - await handle_domestic_pending_orders( - broker, telegram, settings, sell_resubmit_counts - ) + await handle_domestic_pending_orders(broker, telegram, settings, sell_resubmit_counts) broker.cancel_domestic_order.assert_called_once() broker.send_order.assert_called_once() @@ -5621,9 +5684,7 @@ class TestHandleDomesticPendingOrders: sell_resubmit_counts: dict[str, int] = {} - await handle_domestic_pending_orders( - broker, telegram, settings, sell_resubmit_counts - ) + await handle_domestic_pending_orders(broker, telegram, settings, sell_resubmit_counts) broker.send_order.assert_not_called() telegram.notify_unfilled_order.assert_not_called() @@ -5643,17 +5704,13 @@ class TestHandleDomesticPendingOrders: } broker = MagicMock() broker.get_domestic_pending_orders = AsyncMock(return_value=[pending_order]) - broker.cancel_domestic_order = AsyncMock( - return_value={"rt_cd": "0", "msg1": "OK"} - ) + broker.cancel_domestic_order = AsyncMock(return_value={"rt_cd": "0", "msg1": "OK"}) broker.send_order = AsyncMock() # Already resubmitted once sell_resubmit_counts: dict[str, int] = {"KR:005930": 1} - await handle_domestic_pending_orders( - broker, telegram, settings, sell_resubmit_counts - ) + await handle_domestic_pending_orders(broker, telegram, settings, sell_resubmit_counts) broker.cancel_domestic_order.assert_called_once() broker.send_order.assert_not_called() @@ -5867,9 +5924,7 @@ class TestOverseasGhostPositionClose: current_price = 1.5 # ord_psbl_qty=5 means the code passes the qty check and a SELL is sent balance_data = { - "output1": [ - {"ovrs_pdno": stock_code, "ord_psbl_qty": "5", "ovrs_cblc_qty": "5"} - ], + "output1": [{"ovrs_pdno": stock_code, "ord_psbl_qty": "5", "ovrs_cblc_qty": "5"}], "output2": [{"tot_evlu_amt": "10000"}], } sell_result = {"rt_cd": "1", "msg1": "모의투자 잔고내역이 없습니다"} @@ -5905,9 +5960,11 @@ class TestOverseasGhostPositionClose: settings.POSITION_SIZING_ENABLED = False settings.PAPER_OVERSEAS_CASH = 0 - with patch("src.main.log_trade") as mock_log_trade, patch( - "src.main.get_open_position", return_value=None - ), patch("src.main.get_latest_buy_trade", return_value=None): + with ( + patch("src.main.log_trade") as mock_log_trade, + patch("src.main.get_open_position", return_value=None), + patch("src.main.get_latest_buy_trade", return_value=None), + ): await trading_cycle( broker=domestic_broker, overseas_broker=overseas_broker, @@ -5976,8 +6033,9 @@ class TestOverseasGhostPositionClose: db_conn = MagicMock() - with patch("src.main.log_trade") as mock_log_trade, patch( - "src.main.get_open_position", return_value=None + with ( + patch("src.main.log_trade") as mock_log_trade, + patch("src.main.get_open_position", return_value=None), ): await trading_cycle( broker=domestic_broker, @@ -6168,7 +6226,10 @@ async def test_us_min_price_filter_boundary(price: float, should_block: bool) -> return_value={"output": {"last": str(price), "rate": "0.0"}} ) overseas_broker.get_overseas_balance = AsyncMock( - return_value={"output1": [], "output2": [{"frcr_evlu_tota": "10000", "frcr_buy_amt_smtl": "0"}]} + return_value={ + "output1": [], + "output2": [{"frcr_evlu_tota": "10000", "frcr_buy_amt_smtl": "0"}], + } ) overseas_broker.get_overseas_buying_power = AsyncMock( return_value={"output": {"ovrs_ord_psbl_amt": "10000"}} diff --git a/tests/test_market_schedule.py b/tests/test_market_schedule.py index 49110bc..8723c2f 100644 --- a/tests/test_market_schedule.py +++ b/tests/test_market_schedule.py @@ -173,9 +173,7 @@ class TestGetNextMarketOpen: """Should find next Monday opening when called on weekend.""" # Saturday 2026-02-07 12:00 UTC test_time = datetime(2026, 2, 7, 12, 0, tzinfo=ZoneInfo("UTC")) - market, open_time = get_next_market_open( - enabled_markets=["KR"], now=test_time - ) + market, open_time = get_next_market_open(enabled_markets=["KR"], now=test_time) assert market.code == "KR" # Monday 2026-02-09 09:00 KST expected = datetime(2026, 2, 9, 9, 0, tzinfo=ZoneInfo("Asia/Seoul")) @@ -185,9 +183,7 @@ class TestGetNextMarketOpen: """Should find next day opening when called after market close.""" # Monday 2026-02-02 16:00 KST (after close) test_time = datetime(2026, 2, 2, 16, 0, tzinfo=ZoneInfo("Asia/Seoul")) - market, open_time = get_next_market_open( - enabled_markets=["KR"], now=test_time - ) + market, open_time = get_next_market_open(enabled_markets=["KR"], now=test_time) assert market.code == "KR" # Tuesday 2026-02-03 09:00 KST expected = datetime(2026, 2, 3, 9, 0, tzinfo=ZoneInfo("Asia/Seoul")) @@ -197,9 +193,7 @@ class TestGetNextMarketOpen: """Should find earliest opening market among multiple.""" # Saturday 2026-02-07 12:00 UTC test_time = datetime(2026, 2, 7, 12, 0, tzinfo=ZoneInfo("UTC")) - market, open_time = get_next_market_open( - enabled_markets=["KR", "US_NASDAQ"], now=test_time - ) + market, open_time = get_next_market_open(enabled_markets=["KR", "US_NASDAQ"], now=test_time) # Monday 2026-02-09: KR opens at 09:00 KST = 00:00 UTC # Monday 2026-02-09: US opens at 09:30 EST = 14:30 UTC # KR opens first @@ -214,9 +208,7 @@ class TestGetNextMarketOpen: def test_get_next_market_open_invalid_market(self) -> None: """Should skip invalid market codes.""" test_time = datetime(2026, 2, 7, 12, 0, tzinfo=ZoneInfo("UTC")) - market, _ = get_next_market_open( - enabled_markets=["INVALID", "KR"], now=test_time - ) + market, _ = get_next_market_open(enabled_markets=["INVALID", "KR"], now=test_time) assert market.code == "KR" def test_get_next_market_open_prefers_extended_session(self) -> None: diff --git a/tests/test_overseas_broker.py b/tests/test_overseas_broker.py index bd74cd9..6ac6f9b 100644 --- a/tests/test_overseas_broker.py +++ b/tests/test_overseas_broker.py @@ -8,7 +8,7 @@ import aiohttp import pytest from src.broker.kis_api import KISBroker -from src.broker.overseas import OverseasBroker, _PRICE_EXCHANGE_MAP, _RANKING_EXCHANGE_MAP +from src.broker.overseas import _PRICE_EXCHANGE_MAP, _RANKING_EXCHANGE_MAP, OverseasBroker from src.config import Settings @@ -85,25 +85,27 @@ class TestConfigDefaults: assert mock_settings.OVERSEAS_RANKING_VOLUME_TR_ID == "HHDFS76270000" def test_fluct_path(self, mock_settings: Settings) -> None: - assert mock_settings.OVERSEAS_RANKING_FLUCT_PATH == "/uapi/overseas-stock/v1/ranking/updown-rate" + assert ( + mock_settings.OVERSEAS_RANKING_FLUCT_PATH + == "/uapi/overseas-stock/v1/ranking/updown-rate" + ) def test_volume_path(self, mock_settings: Settings) -> None: - assert mock_settings.OVERSEAS_RANKING_VOLUME_PATH == "/uapi/overseas-stock/v1/ranking/volume-surge" + assert ( + mock_settings.OVERSEAS_RANKING_VOLUME_PATH + == "/uapi/overseas-stock/v1/ranking/volume-surge" + ) class TestFetchOverseasRankings: """Test fetch_overseas_rankings method.""" @pytest.mark.asyncio - async def test_fluctuation_uses_correct_params( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_fluctuation_uses_correct_params(self, overseas_broker: OverseasBroker) -> None: """Fluctuation ranking should use HHDFS76290000, updown-rate path, and correct params.""" mock_resp = AsyncMock() mock_resp.status = 200 - mock_resp.json = AsyncMock( - return_value={"output": [{"symb": "AAPL", "name": "Apple"}]} - ) + mock_resp.json = AsyncMock(return_value={"output": [{"symb": "AAPL", "name": "Apple"}]}) mock_session = MagicMock() mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp)) @@ -132,15 +134,11 @@ class TestFetchOverseasRankings: overseas_broker._broker._auth_headers.assert_called_with("HHDFS76290000") @pytest.mark.asyncio - async def test_volume_uses_correct_params( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_volume_uses_correct_params(self, overseas_broker: OverseasBroker) -> None: """Volume ranking should use HHDFS76270000, volume-surge path, and correct params.""" mock_resp = AsyncMock() mock_resp.status = 200 - mock_resp.json = AsyncMock( - return_value={"output": [{"symb": "TSLA", "name": "Tesla"}]} - ) + mock_resp.json = AsyncMock(return_value={"output": [{"symb": "TSLA", "name": "Tesla"}]}) mock_session = MagicMock() mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp)) @@ -169,9 +167,7 @@ class TestFetchOverseasRankings: overseas_broker._broker._auth_headers.assert_called_with("HHDFS76270000") @pytest.mark.asyncio - async def test_404_returns_empty_list( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_404_returns_empty_list(self, overseas_broker: OverseasBroker) -> None: """HTTP 404 should return empty list (fallback) instead of raising.""" mock_resp = AsyncMock() mock_resp.status = 404 @@ -186,9 +182,7 @@ class TestFetchOverseasRankings: assert result == [] @pytest.mark.asyncio - async def test_non_404_error_raises( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_non_404_error_raises(self, overseas_broker: OverseasBroker) -> None: """Non-404 HTTP errors should raise ConnectionError.""" mock_resp = AsyncMock() mock_resp.status = 500 @@ -203,9 +197,7 @@ class TestFetchOverseasRankings: await overseas_broker.fetch_overseas_rankings("NASD") @pytest.mark.asyncio - async def test_empty_response_returns_empty( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_empty_response_returns_empty(self, overseas_broker: OverseasBroker) -> None: """Empty output in response should return empty list.""" mock_resp = AsyncMock() mock_resp.status = 200 @@ -220,18 +212,14 @@ class TestFetchOverseasRankings: assert result == [] @pytest.mark.asyncio - async def test_ranking_disabled_returns_empty( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_ranking_disabled_returns_empty(self, overseas_broker: OverseasBroker) -> None: """When OVERSEAS_RANKING_ENABLED=False, should return empty immediately.""" overseas_broker._broker._settings.OVERSEAS_RANKING_ENABLED = False result = await overseas_broker.fetch_overseas_rankings("NASD") assert result == [] @pytest.mark.asyncio - async def test_limit_truncates_results( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_limit_truncates_results(self, overseas_broker: OverseasBroker) -> None: """Results should be truncated to the specified limit.""" rows = [{"symb": f"SYM{i}"} for i in range(20)] mock_resp = AsyncMock() @@ -247,9 +235,7 @@ class TestFetchOverseasRankings: assert len(result) == 5 @pytest.mark.asyncio - async def test_network_error_raises( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_network_error_raises(self, overseas_broker: OverseasBroker) -> None: """Network errors should raise ConnectionError.""" cm = MagicMock() cm.__aenter__ = AsyncMock(side_effect=aiohttp.ClientError("timeout")) @@ -264,9 +250,7 @@ class TestFetchOverseasRankings: await overseas_broker.fetch_overseas_rankings("NASD") @pytest.mark.asyncio - async def test_exchange_code_mapping_applied( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_exchange_code_mapping_applied(self, overseas_broker: OverseasBroker) -> None: """All major exchanges should use mapped codes in API params.""" for original, mapped in [("NASD", "NAS"), ("NYSE", "NYS"), ("AMEX", "AMS")]: mock_resp = AsyncMock() @@ -298,7 +282,9 @@ class TestGetOverseasPrice: mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp)) _setup_broker_mocks(overseas_broker, mock_session) - overseas_broker._broker._auth_headers = AsyncMock(return_value={"authorization": "Bearer t"}) + overseas_broker._broker._auth_headers = AsyncMock( + return_value={"authorization": "Bearer t"} + ) result = await overseas_broker.get_overseas_price("NASD", "AAPL") assert result["output"]["last"] == "150.00" @@ -530,11 +516,14 @@ class TestPriceExchangeMap: def test_price_map_equals_ranking_map(self) -> None: assert _PRICE_EXCHANGE_MAP is _RANKING_EXCHANGE_MAP - @pytest.mark.parametrize("original,expected", [ - ("NASD", "NAS"), - ("NYSE", "NYS"), - ("AMEX", "AMS"), - ]) + @pytest.mark.parametrize( + "original,expected", + [ + ("NASD", "NAS"), + ("NYSE", "NYS"), + ("AMEX", "AMS"), + ], + ) def test_us_exchange_code_mapping(self, original: str, expected: str) -> None: assert _PRICE_EXCHANGE_MAP[original] == expected @@ -574,9 +563,7 @@ class TestOrderRtCdCheck: return OverseasBroker(broker) @pytest.mark.asyncio - async def test_success_rt_cd_returns_data( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_success_rt_cd_returns_data(self, overseas_broker: OverseasBroker) -> None: """rt_cd='0' → order accepted, data returned.""" mock_resp = AsyncMock() mock_resp.status = 200 @@ -590,9 +577,7 @@ class TestOrderRtCdCheck: assert result["rt_cd"] == "0" @pytest.mark.asyncio - async def test_error_rt_cd_returns_data_with_msg( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_error_rt_cd_returns_data_with_msg(self, overseas_broker: OverseasBroker) -> None: """rt_cd != '0' → order rejected, data still returned (caller checks rt_cd).""" mock_resp = AsyncMock() mock_resp.status = 200 @@ -623,6 +608,7 @@ class TestPaperOverseasCash: def test_env_override(self) -> None: import os + os.environ["PAPER_OVERSEAS_CASH"] = "25000" settings = Settings( KIS_APP_KEY="k", @@ -635,6 +621,7 @@ class TestPaperOverseasCash: def test_zero_disables_fallback(self) -> None: import os + os.environ["PAPER_OVERSEAS_CASH"] = "0" settings = Settings( KIS_APP_KEY="k", @@ -822,9 +809,7 @@ class TestGetOverseasPendingOrders: """Tests for get_overseas_pending_orders method.""" @pytest.mark.asyncio - async def test_paper_mode_returns_empty( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_paper_mode_returns_empty(self, overseas_broker: OverseasBroker) -> None: """Paper mode should immediately return [] without any API call.""" # Default mock_settings has MODE="paper" overseas_broker._broker._settings = overseas_broker._broker._settings.model_copy( @@ -855,9 +840,7 @@ class TestGetOverseasPendingOrders: overseas_broker._broker._auth_headers = mock_auth_headers # type: ignore[method-assign] - pending_orders = [ - {"odno": "001", "pdno": "AAPL", "sll_buy_dvsn_cd": "02", "nccs_qty": "5"} - ] + pending_orders = [{"odno": "001", "pdno": "AAPL", "sll_buy_dvsn_cd": "02", "nccs_qty": "5"}] mock_resp = AsyncMock() mock_resp.status = 200 mock_resp.json = AsyncMock(return_value={"output": pending_orders}) @@ -879,9 +862,7 @@ class TestGetOverseasPendingOrders: assert captured_params[0]["OVRS_EXCG_CD"] == "NASD" @pytest.mark.asyncio - async def test_live_mode_connection_error( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_live_mode_connection_error(self, overseas_broker: OverseasBroker) -> None: """Network error in live mode should raise ConnectionError.""" overseas_broker._broker._settings = overseas_broker._broker._settings.model_copy( update={"MODE": "live"} @@ -926,55 +907,41 @@ class TestCancelOverseasOrder: return captured_tr_ids, mock_session @pytest.mark.asyncio - async def test_us_live_uses_tttt1004u( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_us_live_uses_tttt1004u(self, overseas_broker: OverseasBroker) -> None: """US exchange in live mode should use TTTT1004U.""" overseas_broker._broker._settings = overseas_broker._broker._settings.model_copy( update={"MODE": "live"} ) - captured, _ = self._setup_cancel_mocks( - overseas_broker, {"rt_cd": "0", "msg1": "OK"} - ) + captured, _ = self._setup_cancel_mocks(overseas_broker, {"rt_cd": "0", "msg1": "OK"}) await overseas_broker.cancel_overseas_order("NASD", "AAPL", "ORD001", 5) assert "TTTT1004U" in captured @pytest.mark.asyncio - async def test_us_paper_uses_vttt1004u( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_us_paper_uses_vttt1004u(self, overseas_broker: OverseasBroker) -> None: """US exchange in paper mode should use VTTT1004U.""" # Default mock_settings has MODE="paper" - captured, _ = self._setup_cancel_mocks( - overseas_broker, {"rt_cd": "0", "msg1": "OK"} - ) + captured, _ = self._setup_cancel_mocks(overseas_broker, {"rt_cd": "0", "msg1": "OK"}) await overseas_broker.cancel_overseas_order("NASD", "AAPL", "ORD001", 5) assert "VTTT1004U" in captured @pytest.mark.asyncio - async def test_hk_live_uses_ttts1003u( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_hk_live_uses_ttts1003u(self, overseas_broker: OverseasBroker) -> None: """SEHK exchange in live mode should use TTTS1003U.""" overseas_broker._broker._settings = overseas_broker._broker._settings.model_copy( update={"MODE": "live"} ) - captured, _ = self._setup_cancel_mocks( - overseas_broker, {"rt_cd": "0", "msg1": "OK"} - ) + captured, _ = self._setup_cancel_mocks(overseas_broker, {"rt_cd": "0", "msg1": "OK"}) await overseas_broker.cancel_overseas_order("SEHK", "0700", "ORD002", 10) assert "TTTS1003U" in captured @pytest.mark.asyncio - async def test_cancel_sets_rvse_cncl_dvsn_cd_02( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_cancel_sets_rvse_cncl_dvsn_cd_02(self, overseas_broker: OverseasBroker) -> None: """Cancel body must include RVSE_CNCL_DVSN_CD='02' and OVRS_ORD_UNPR='0'.""" captured_body: list[dict] = [] @@ -1005,9 +972,7 @@ class TestCancelOverseasOrder: assert captured_body[0]["ORGN_ODNO"] == "ORD003" @pytest.mark.asyncio - async def test_cancel_sets_hashkey_header( - self, overseas_broker: OverseasBroker - ) -> None: + async def test_cancel_sets_hashkey_header(self, overseas_broker: OverseasBroker) -> None: """hashkey must be set in the request headers.""" captured_headers: list[dict] = [] overseas_broker._broker._get_hash_key = AsyncMock(return_value="test_hash") # type: ignore[method-assign] diff --git a/tests/test_pre_market_planner.py b/tests/test_pre_market_planner.py index 50e2a3b..b35161c 100644 --- a/tests/test_pre_market_planner.py +++ b/tests/test_pre_market_planner.py @@ -78,9 +78,7 @@ def _gemini_response_json( "rationale": "Near circuit breaker", } ] - return json.dumps( - {"market_outlook": outlook, "global_rules": global_rules, "stocks": stocks} - ) + return json.dumps({"market_outlook": outlook, "global_rules": global_rules, "stocks": stocks}) def _make_planner( @@ -564,8 +562,12 @@ class TestBuildPrompt: def test_prompt_contains_cross_market(self) -> None: planner = _make_planner() cross = CrossMarketContext( - market="US", date="2026-02-07", total_pnl=1.5, - win_rate=60, index_change_pct=0.8, lessons=["Cut losses early"], + market="US", + date="2026-02-07", + total_pnl=1.5, + win_rate=60, + index_change_pct=0.8, + lessons=["Cut losses early"], ) prompt = planner._build_prompt("KR", [_candidate()], {}, None, cross) @@ -683,9 +685,7 @@ class TestSmartFallbackPlaybook: ) def test_momentum_candidate_gets_buy_on_volume(self) -> None: - candidates = [ - _candidate(code="CHOW", signal="momentum", volume_ratio=13.64, rsi=100.0) - ] + candidates = [_candidate(code="CHOW", signal="momentum", volume_ratio=13.64, rsi=100.0)] settings = self._make_settings() pb = PreMarketPlanner._smart_fallback_playbook( @@ -707,9 +707,7 @@ class TestSmartFallbackPlaybook: assert sell_sc.condition.price_change_pct_below == -3.0 def test_oversold_candidate_gets_buy_on_rsi(self) -> None: - candidates = [ - _candidate(code="005930", signal="oversold", rsi=22.0, volume_ratio=3.5) - ] + candidates = [_candidate(code="005930", signal="oversold", rsi=22.0, volume_ratio=3.5)] settings = self._make_settings() pb = PreMarketPlanner._smart_fallback_playbook( @@ -776,9 +774,7 @@ class TestSmartFallbackPlaybook: def test_empty_candidates_returns_empty_playbook(self) -> None: settings = self._make_settings() - pb = PreMarketPlanner._smart_fallback_playbook( - date(2026, 2, 17), "US_AMEX", [], settings - ) + pb = PreMarketPlanner._smart_fallback_playbook(date(2026, 2, 17), "US_AMEX", [], settings) assert pb.stock_count == 0 @@ -814,19 +810,14 @@ class TestSmartFallbackPlaybook: planner = _make_planner() planner._gemini.decide = AsyncMock(side_effect=ConnectionError("429 quota exceeded")) # momentum candidate - candidates = [ - _candidate(code="CHOW", signal="momentum", volume_ratio=13.64, rsi=100.0) - ] + candidates = [_candidate(code="CHOW", signal="momentum", volume_ratio=13.64, rsi=100.0)] - pb = await planner.generate_playbook( - "US_AMEX", candidates, today=date(2026, 2, 18) - ) + pb = await planner.generate_playbook("US_AMEX", candidates, today=date(2026, 2, 18)) # Should NOT be all-SELL defensive; should have BUY for momentum assert pb.stock_count == 1 buy_scenarios = [ - s for s in pb.stock_playbooks[0].scenarios - if s.action == ScenarioAction.BUY + s for s in pb.stock_playbooks[0].scenarios if s.action == ScenarioAction.BUY ] assert len(buy_scenarios) == 1 assert buy_scenarios[0].condition.volume_ratio_above == 2.0 # VOL_MULTIPLIER default diff --git a/tests/test_scenario_engine.py b/tests/test_scenario_engine.py index 4fcea51..4b6bbd5 100644 --- a/tests/test_scenario_engine.py +++ b/tests/test_scenario_engine.py @@ -14,7 +14,7 @@ from src.strategy.models import ( StockPlaybook, StockScenario, ) -from src.strategy.scenario_engine import ScenarioEngine, ScenarioMatch +from src.strategy.scenario_engine import ScenarioEngine @pytest.fixture @@ -162,13 +162,15 @@ class TestEvaluateCondition: def test_mixed_invalid_types_no_exception(self, engine: ScenarioEngine) -> None: """Various invalid types should not raise exceptions.""" cond = StockCondition( - rsi_below=30.0, volume_ratio_above=2.0, - price_above=100, price_change_pct_below=-1.0, + rsi_below=30.0, + volume_ratio_above=2.0, + price_above=100, + price_change_pct_below=-1.0, ) data = { - "rsi": [25], # list + "rsi": [25], # list "volume_ratio": "bad", # non-numeric string - "current_price": {}, # dict + "current_price": {}, # dict "price_change_pct": object(), # arbitrary object } # Should return False (invalid types → None → False), never raise @@ -356,9 +358,7 @@ class TestEvaluate: def test_match_details_populated(self, engine: ScenarioEngine) -> None: pb = _playbook(scenarios=[_scenario(rsi_below=30.0, volume_ratio_above=2.0)]) - result = engine.evaluate( - pb, "005930", {"rsi": 25.0, "volume_ratio": 3.0}, {} - ) + result = engine.evaluate(pb, "005930", {"rsi": 25.0, "volume_ratio": 3.0}, {}) assert result.match_details.get("rsi") == 25.0 assert result.match_details.get("volume_ratio") == 3.0 @@ -381,7 +381,9 @@ class TestEvaluate: ), StockPlaybook( stock_code="MSFT", - scenarios=[_scenario(rsi_above=75.0, action=ScenarioAction.SELL, confidence=80)], + scenarios=[ + _scenario(rsi_above=75.0, action=ScenarioAction.SELL, confidence=80) + ], ), ], ) @@ -450,58 +452,42 @@ class TestEvaluate: class TestPositionAwareConditions: """Tests for unrealized_pnl_pct and holding_days condition fields.""" - def test_evaluate_condition_unrealized_pnl_above_matches( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_unrealized_pnl_above_matches(self, engine: ScenarioEngine) -> None: """unrealized_pnl_pct_above should match when P&L exceeds threshold.""" condition = StockCondition(unrealized_pnl_pct_above=3.0) assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": 5.0}) is True - def test_evaluate_condition_unrealized_pnl_above_no_match( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_unrealized_pnl_above_no_match(self, engine: ScenarioEngine) -> None: """unrealized_pnl_pct_above should NOT match when P&L is below threshold.""" condition = StockCondition(unrealized_pnl_pct_above=3.0) assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": 2.0}) is False - def test_evaluate_condition_unrealized_pnl_below_matches( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_unrealized_pnl_below_matches(self, engine: ScenarioEngine) -> None: """unrealized_pnl_pct_below should match when P&L is under threshold.""" condition = StockCondition(unrealized_pnl_pct_below=-2.0) assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": -3.5}) is True - def test_evaluate_condition_unrealized_pnl_below_no_match( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_unrealized_pnl_below_no_match(self, engine: ScenarioEngine) -> None: """unrealized_pnl_pct_below should NOT match when P&L is above threshold.""" condition = StockCondition(unrealized_pnl_pct_below=-2.0) assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": -1.0}) is False - def test_evaluate_condition_holding_days_above_matches( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_holding_days_above_matches(self, engine: ScenarioEngine) -> None: """holding_days_above should match when position held longer than threshold.""" condition = StockCondition(holding_days_above=5) assert engine.evaluate_condition(condition, {"holding_days": 7}) is True - def test_evaluate_condition_holding_days_above_no_match( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_holding_days_above_no_match(self, engine: ScenarioEngine) -> None: """holding_days_above should NOT match when position held shorter.""" condition = StockCondition(holding_days_above=5) assert engine.evaluate_condition(condition, {"holding_days": 3}) is False - def test_evaluate_condition_holding_days_below_matches( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_holding_days_below_matches(self, engine: ScenarioEngine) -> None: """holding_days_below should match when position held fewer days.""" condition = StockCondition(holding_days_below=3) assert engine.evaluate_condition(condition, {"holding_days": 1}) is True - def test_evaluate_condition_holding_days_below_no_match( - self, engine: ScenarioEngine - ) -> None: + def test_evaluate_condition_holding_days_below_no_match(self, engine: ScenarioEngine) -> None: """holding_days_below should NOT match when held more days.""" condition = StockCondition(holding_days_below=3) assert engine.evaluate_condition(condition, {"holding_days": 5}) is False @@ -513,33 +499,33 @@ class TestPositionAwareConditions: holding_days_above=5, ) # Both met → match - assert engine.evaluate_condition( - condition, - {"unrealized_pnl_pct": 4.5, "holding_days": 7}, - ) is True + assert ( + engine.evaluate_condition( + condition, + {"unrealized_pnl_pct": 4.5, "holding_days": 7}, + ) + is True + ) # Only pnl met → no match - assert engine.evaluate_condition( - condition, - {"unrealized_pnl_pct": 4.5, "holding_days": 3}, - ) is False + assert ( + engine.evaluate_condition( + condition, + {"unrealized_pnl_pct": 4.5, "holding_days": 3}, + ) + is False + ) - def test_missing_unrealized_pnl_does_not_match( - self, engine: ScenarioEngine - ) -> None: + def test_missing_unrealized_pnl_does_not_match(self, engine: ScenarioEngine) -> None: """Missing unrealized_pnl_pct key should not match the condition.""" condition = StockCondition(unrealized_pnl_pct_above=3.0) assert engine.evaluate_condition(condition, {}) is False - def test_missing_holding_days_does_not_match( - self, engine: ScenarioEngine - ) -> None: + def test_missing_holding_days_does_not_match(self, engine: ScenarioEngine) -> None: """Missing holding_days key should not match the condition.""" condition = StockCondition(holding_days_above=5) assert engine.evaluate_condition(condition, {}) is False - def test_match_details_includes_position_fields( - self, engine: ScenarioEngine - ) -> None: + def test_match_details_includes_position_fields(self, engine: ScenarioEngine) -> None: """match_details should include position fields when condition specifies them.""" pb = _playbook( scenarios=[ diff --git a/tests/test_smart_scanner.py b/tests/test_smart_scanner.py index bb8200f..5fa1c07 100644 --- a/tests/test_smart_scanner.py +++ b/tests/test_smart_scanner.py @@ -2,9 +2,10 @@ from __future__ import annotations -import pytest from unittest.mock import AsyncMock, MagicMock +import pytest + from src.analysis.smart_scanner import ScanCandidate, SmartVolatilityScanner from src.analysis.volatility import VolatilityAnalyzer from src.broker.kis_api import KISBroker @@ -200,9 +201,7 @@ class TestSmartVolatilityScanner: assert len(candidates) <= scanner.top_n @pytest.mark.asyncio - async def test_get_stock_codes( - self, scanner: SmartVolatilityScanner - ) -> None: + async def test_get_stock_codes(self, scanner: SmartVolatilityScanner) -> None: """Test extraction of stock codes from candidates.""" candidates = [ ScanCandidate( diff --git a/tests/test_strategy_models.py b/tests/test_strategy_models.py index 9ea40e0..7cee5eb 100644 --- a/tests/test_strategy_models.py +++ b/tests/test_strategy_models.py @@ -19,7 +19,6 @@ from src.strategy.models import ( StockScenario, ) - # --------------------------------------------------------------------------- # StockCondition # --------------------------------------------------------------------------- diff --git a/tests/test_telegram.py b/tests/test_telegram.py index 606b4e7..6af177c 100644 --- a/tests/test_telegram.py +++ b/tests/test_telegram.py @@ -5,7 +5,11 @@ from unittest.mock import AsyncMock, patch import aiohttp import pytest -from src.notifications.telegram_client import NotificationFilter, NotificationPriority, TelegramClient +from src.notifications.telegram_client import ( + NotificationFilter, + NotificationPriority, + TelegramClient, +) class TestTelegramClientInit: @@ -13,9 +17,7 @@ class TestTelegramClientInit: def test_disabled_via_flag(self) -> None: """Client disabled via enabled=False flag.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=False - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=False) assert client._enabled is False def test_disabled_missing_token(self) -> None: @@ -30,9 +32,7 @@ class TestTelegramClientInit: def test_enabled_with_credentials(self) -> None: """Client enabled when credentials provided.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) assert client._enabled is True @@ -42,9 +42,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_send_message_success(self) -> None: """send_message returns True on successful send.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -76,9 +74,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_send_message_api_error(self) -> None: """send_message returns False on API error.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 400 @@ -93,9 +89,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_send_message_with_markdown(self) -> None: """send_message supports different parse modes.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -128,9 +122,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_trade_execution_format(self) -> None: """Trade notification has correct format.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -163,9 +155,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_playbook_generated_format(self) -> None: """Playbook generated notification has expected fields.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -190,9 +180,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_scenario_matched_format(self) -> None: """Scenario matched notification has expected fields.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -217,9 +205,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_playbook_failed_format(self) -> None: """Playbook failed notification has expected fields.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -240,9 +226,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_circuit_breaker_priority(self) -> None: """Circuit breaker uses CRITICAL priority.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -260,9 +244,7 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_api_error_handling(self) -> None: """API errors logged but don't crash.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 400 @@ -277,25 +259,19 @@ class TestNotificationSending: @pytest.mark.asyncio async def test_timeout_handling(self) -> None: """Timeouts logged but don't crash.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) with patch( "aiohttp.ClientSession.post", side_effect=aiohttp.ClientError("Connection timeout"), ): # Should not raise exception - await client.notify_error( - error_type="Test Error", error_msg="Test", context="test" - ) + await client.notify_error(error_type="Test Error", error_msg="Test", context="test") @pytest.mark.asyncio async def test_session_management(self) -> None: """Session created and reused correctly.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) # Session should be None initially assert client._session is None @@ -324,9 +300,7 @@ class TestRateLimiting: """Rate limiter delays rapid requests.""" import time - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True, rate_limit=2.0 - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True, rate_limit=2.0) mock_resp = AsyncMock() mock_resp.status = 200 @@ -353,9 +327,7 @@ class TestMessagePriorities: @pytest.mark.asyncio async def test_low_priority_uses_info_emoji(self) -> None: """LOW priority uses ℹ️ emoji.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -371,9 +343,7 @@ class TestMessagePriorities: @pytest.mark.asyncio async def test_critical_priority_uses_alarm_emoji(self) -> None: """CRITICAL priority uses 🚨 emoji.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -389,9 +359,7 @@ class TestMessagePriorities: @pytest.mark.asyncio async def test_playbook_generated_priority(self) -> None: """Playbook generated uses MEDIUM priority emoji.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -412,9 +380,7 @@ class TestMessagePriorities: @pytest.mark.asyncio async def test_playbook_failed_priority(self) -> None: """Playbook failed uses HIGH priority emoji.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -433,9 +399,7 @@ class TestMessagePriorities: @pytest.mark.asyncio async def test_scenario_matched_priority(self) -> None: """Scenario matched uses HIGH priority emoji.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_resp = AsyncMock() mock_resp.status = 200 @@ -460,9 +424,7 @@ class TestClientCleanup: @pytest.mark.asyncio async def test_close_closes_session(self) -> None: """close() closes the HTTP session.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) mock_session = AsyncMock() mock_session.closed = False @@ -475,9 +437,7 @@ class TestClientCleanup: @pytest.mark.asyncio async def test_close_handles_no_session(self) -> None: """close() handles None session gracefully.""" - client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True - ) + client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True) # Should not raise exception await client.close() @@ -535,8 +495,12 @@ class TestNotificationFilter: ) with patch("aiohttp.ClientSession.post") as mock_post: await client.notify_trade_execution( - stock_code="005930", market="KR", action="BUY", - quantity=10, price=70000.0, confidence=85.0 + stock_code="005930", + market="KR", + action="BUY", + quantity=10, + price=70000.0, + confidence=85.0, ) mock_post.assert_not_called() @@ -556,8 +520,13 @@ class TestNotificationFilter: async def test_circuit_breaker_always_sends_regardless_of_filter(self) -> None: """notify_circuit_breaker always sends (no filter flag).""" nf = NotificationFilter( - trades=False, market_open_close=False, fat_finger=False, - system_events=False, playbook=False, scenario_match=False, errors=False, + trades=False, + market_open_close=False, + fat_finger=False, + system_events=False, + playbook=False, + scenario_match=False, + errors=False, ) client = TelegramClient( bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf @@ -617,7 +586,7 @@ class TestNotificationFilter: nf = NotificationFilter() assert nf.set_flag("unknown_key", False) is False - def test_as_dict_keys_match_KEYS(self) -> None: + def test_as_dict_keys_match_keys(self) -> None: """as_dict() returns every key defined in KEYS.""" nf = NotificationFilter() d = nf.as_dict() @@ -640,10 +609,17 @@ class TestNotificationFilter: def test_set_notification_all_on(self) -> None: """set_notification('all', True) enables every filter flag.""" client = TelegramClient( - bot_token="123:abc", chat_id="456", enabled=True, + bot_token="123:abc", + chat_id="456", + enabled=True, notification_filter=NotificationFilter( - trades=False, market_open_close=False, scenario_match=False, - fat_finger=False, system_events=False, playbook=False, errors=False, + trades=False, + market_open_close=False, + scenario_match=False, + fat_finger=False, + system_events=False, + playbook=False, + errors=False, ), ) assert client.set_notification("all", True) is True diff --git a/tests/test_telegram_commands.py b/tests/test_telegram_commands.py index a184549..9615022 100644 --- a/tests/test_telegram_commands.py +++ b/tests/test_telegram_commands.py @@ -357,8 +357,7 @@ class TestTradingControlCommands: pause_event.set() await client.send_message( - "▶️ Trading Resumed\n\n" - "Trading operations have been restarted." + "▶️ Trading Resumed\n\nTrading operations have been restarted." ) handler.register_command("resume", mock_resume) @@ -526,9 +525,7 @@ class TestStatusCommands: async def mock_status_error() -> None: """Mock /status handler with error.""" - await client.send_message( - "⚠️ Error\n\nFailed to retrieve trading status." - ) + await client.send_message("⚠️ Error\n\nFailed to retrieve trading status.") handler.register_command("status", mock_status_error) @@ -603,10 +600,7 @@ class TestStatusCommands: async def mock_positions_empty() -> None: """Mock /positions handler with no positions.""" - message = ( - "💼 Account Summary\n\n" - "No balance information available." - ) + message = "💼 Account Summary\n\nNo balance information available." await client.send_message(message) handler.register_command("positions", mock_positions_empty) @@ -639,9 +633,7 @@ class TestStatusCommands: async def mock_positions_error() -> None: """Mock /positions handler with error.""" - await client.send_message( - "⚠️ Error\n\nFailed to retrieve positions." - ) + await client.send_message("⚠️ Error\n\nFailed to retrieve positions.") handler.register_command("positions", mock_positions_error) diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py index 3a0bc0b..719d801 100644 --- a/tests/test_validate_governance_assets.py +++ b/tests/test_validate_governance_assets.py @@ -70,7 +70,9 @@ def test_load_changed_files_with_range_uses_git_diff(monkeypatch) -> None: assert check is True assert capture_output is True assert text is True - return SimpleNamespace(stdout="docs/ouroboros/85_loss_recovery_action_plan.md\nsrc/main.py\n") + return SimpleNamespace( + stdout="docs/ouroboros/85_loss_recovery_action_plan.md\nsrc/main.py\n" + ) monkeypatch.setattr(module.subprocess, "run", fake_run) changed = module.load_changed_files(["abc...def"], errors) diff --git a/tests/test_volatility.py b/tests/test_volatility.py index 02f0234..25b08b1 100644 --- a/tests/test_volatility.py +++ b/tests/test_volatility.py @@ -80,9 +80,7 @@ class TestVolatilityAnalyzer: # ATR should be roughly the average true range assert 3.0 <= atr <= 6.0 - def test_calculate_atr_insufficient_data( - self, volatility_analyzer: VolatilityAnalyzer - ) -> None: + def test_calculate_atr_insufficient_data(self, volatility_analyzer: VolatilityAnalyzer) -> None: """Test ATR with insufficient data returns 0.""" high_prices = [110.0, 112.0] low_prices = [105.0, 107.0] @@ -120,17 +118,13 @@ class TestVolatilityAnalyzer: surge = volatility_analyzer.calculate_volume_surge(1000.0, 0.0) assert surge == 1.0 - def test_calculate_pv_divergence_bullish( - self, volatility_analyzer: VolatilityAnalyzer - ) -> None: + def test_calculate_pv_divergence_bullish(self, volatility_analyzer: VolatilityAnalyzer) -> None: """Test bullish price-volume divergence.""" # Price up + Volume up = bullish divergence = volatility_analyzer.calculate_pv_divergence(5.0, 2.0) assert divergence > 0.0 - def test_calculate_pv_divergence_bearish( - self, volatility_analyzer: VolatilityAnalyzer - ) -> None: + def test_calculate_pv_divergence_bearish(self, volatility_analyzer: VolatilityAnalyzer) -> None: """Test bearish price-volume divergence.""" # Price up + Volume down = bearish divergence divergence = volatility_analyzer.calculate_pv_divergence(5.0, 0.5) @@ -144,9 +138,7 @@ class TestVolatilityAnalyzer: divergence = volatility_analyzer.calculate_pv_divergence(-5.0, 2.0) assert divergence < 0.0 - def test_calculate_momentum_score( - self, volatility_analyzer: VolatilityAnalyzer - ) -> None: + def test_calculate_momentum_score(self, volatility_analyzer: VolatilityAnalyzer) -> None: """Test momentum score calculation.""" score = volatility_analyzer.calculate_momentum_score( price_change_1m=5.0, @@ -500,9 +492,7 @@ class TestMarketScanner: # Should keep all current stocks since they're all in top movers assert set(updated) == set(current_watchlist) - def test_get_updated_watchlist_max_replacements( - self, scanner: MarketScanner - ) -> None: + def test_get_updated_watchlist_max_replacements(self, scanner: MarketScanner) -> None: """Test that max_replacements limit is respected.""" current_watchlist = ["000660", "035420", "005490"] @@ -556,8 +546,6 @@ class TestMarketScanner: active_count = 0 peak_count = 0 - original_scan = scanner.scan_stock - async def tracking_scan(code: str, market: Any) -> VolatilityMetrics: nonlocal active_count, peak_count active_count += 1 From 4c0b55d67c9d80572704cd954f2b25022d60df2d Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 20:22:13 +0900 Subject: [PATCH 062/109] docs: replace absolute plan links with repo-relative paths --- docs/ouroboros/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/ouroboros/README.md b/docs/ouroboros/README.md index 6e53e6c..e64062d 100644 --- a/docs/ouroboros/README.md +++ b/docs/ouroboros/README.md @@ -38,5 +38,5 @@ python3 scripts/validate_ouroboros_docs.py ## 원본 계획 문서 -- [v2](/home/agentson/repos/The-Ouroboros/ouroboros_plan_v2.txt) -- [v3](/home/agentson/repos/The-Ouroboros/ouroboros_plan_v3.txt) +- [v2](../../ouroboros_plan_v2.txt) +- [v3](../../ouroboros_plan_v3.txt) From 2c6e9802be81725c2e752f47d1d307154e23155b Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 20:23:34 +0900 Subject: [PATCH 063/109] docs: sync requirements registry metadata for policy doc changes --- docs/ouroboros/01_requirements_registry.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/ouroboros/01_requirements_registry.md b/docs/ouroboros/01_requirements_registry.md index d01269e..23f8868 100644 --- a/docs/ouroboros/01_requirements_registry.md +++ b/docs/ouroboros/01_requirements_registry.md @@ -3,7 +3,7 @@ Doc-ID: DOC-REQ-001 Version: 1.0.0 Status: active Owner: strategy -Updated: 2026-02-26 +Updated: 2026-03-01 --> # 요구사항 원장 (Single Source of Truth) From 05be1120858d192d7b3baf11f938618c72ff55de Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 20:25:39 +0900 Subject: [PATCH 064/109] docs: move v2/v3 source plans under docs/ouroboros/source --- docs/ouroboros/01_requirements_registry.md | 3 ++- docs/ouroboros/30_code_level_work_orders.md | 1 + docs/ouroboros/40_acceptance_and_test_plan.md | 1 + docs/ouroboros/README.md | 10 +++++----- .../ouroboros/source/ouroboros_plan_v2.txt | 0 .../ouroboros/source/ouroboros_plan_v3.txt | 0 6 files changed, 9 insertions(+), 6 deletions(-) rename ouroboros_plan_v2.txt => docs/ouroboros/source/ouroboros_plan_v2.txt (100%) rename ouroboros_plan_v3.txt => docs/ouroboros/source/ouroboros_plan_v3.txt (100%) diff --git a/docs/ouroboros/01_requirements_registry.md b/docs/ouroboros/01_requirements_registry.md index 23f8868..7248955 100644 --- a/docs/ouroboros/01_requirements_registry.md +++ b/docs/ouroboros/01_requirements_registry.md @@ -1,6 +1,6 @@ # The Ouroboros 실행 문서 허브 -이 폴더는 `ouroboros_plan_v2.txt`, `ouroboros_plan_v3.txt`를 구현 가능한 작업 지시서 수준으로 분해한 문서 허브다. +이 폴더는 `source/ouroboros_plan_v2.txt`, `source/ouroboros_plan_v3.txt`를 구현 가능한 작업 지시서 수준으로 분해한 문서 허브다. ## 읽기 순서 (Routing) @@ -38,5 +38,5 @@ python3 scripts/validate_ouroboros_docs.py ## 원본 계획 문서 -- [v2](../../ouroboros_plan_v2.txt) -- [v3](../../ouroboros_plan_v3.txt) +- [v2](./source/ouroboros_plan_v2.txt) +- [v3](./source/ouroboros_plan_v3.txt) diff --git a/ouroboros_plan_v2.txt b/docs/ouroboros/source/ouroboros_plan_v2.txt similarity index 100% rename from ouroboros_plan_v2.txt rename to docs/ouroboros/source/ouroboros_plan_v2.txt diff --git a/ouroboros_plan_v3.txt b/docs/ouroboros/source/ouroboros_plan_v3.txt similarity index 100% rename from ouroboros_plan_v3.txt rename to docs/ouroboros/source/ouroboros_plan_v3.txt From 940a7e094bed5d787190e34a90068ba3018dde5b Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 20:35:22 +0900 Subject: [PATCH 065/109] workflow: skip main/master branch guard in --ci mode --- scripts/session_handover_check.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/session_handover_check.py b/scripts/session_handover_check.py index 7b354be..dfe200b 100755 --- a/scripts/session_handover_check.py +++ b/scripts/session_handover_check.py @@ -134,7 +134,7 @@ def main() -> int: branch = _current_branch() if not branch: errors.append("cannot resolve current git branch") - elif branch in {"main", "master"}: + elif not args.ci and branch in {"main", "master"}: errors.append(f"working branch must not be {branch}") _check_handover_entry( From 8f2c08e2b7e5f89aa528f54cfca955a953281e8c Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 20:43:06 +0900 Subject: [PATCH 066/109] test: add ci-mode coverage for session handover gate --- scripts/session_handover_check.py | 10 +-- tests/test_session_handover_check.py | 100 +++++++++++++++++++++++++++ 2 files changed, 106 insertions(+), 4 deletions(-) create mode 100644 tests/test_session_handover_check.py diff --git a/scripts/session_handover_check.py b/scripts/session_handover_check.py index dfe200b..68ae256 100755 --- a/scripts/session_handover_check.py +++ b/scripts/session_handover_check.py @@ -88,6 +88,10 @@ def _check_handover_entry( if token not in latest: errors.append(f"latest handover entry missing token: {token}") + if strict: + if "- next_ticket: #TBD" in latest: + errors.append("latest handover entry must not use placeholder next_ticket (#TBD)") + if strict and not ci_mode: today_utc = datetime.now(UTC).date().isoformat() if today_utc not in latest: @@ -100,8 +104,6 @@ def _check_handover_entry( "latest handover entry must target current branch " f"({branch_token})" ) - if "- next_ticket: #TBD" in latest: - errors.append("latest handover entry must not use placeholder next_ticket (#TBD)") if "merged_to_feature_branch=no" in latest: errors.append( "process gate indicates not merged; implementation must stay blocked " @@ -122,8 +124,8 @@ def main() -> int: "--ci", action="store_true", help=( - "CI mode: keep structural/token checks but skip strict " - "today-date/current-branch matching." + "CI mode: keep structural/token checks and placeholder guard, " + "but skip strict today-date/current-branch/merge-gate checks." ), ) args = parser.parse_args() diff --git a/tests/test_session_handover_check.py b/tests/test_session_handover_check.py new file mode 100644 index 0000000..8c4aedb --- /dev/null +++ b/tests/test_session_handover_check.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import importlib.util +from pathlib import Path + + +def _load_module(): + script_path = Path(__file__).resolve().parents[1] / "scripts" / "session_handover_check.py" + spec = importlib.util.spec_from_file_location("session_handover_check", script_path) + assert spec is not None + assert spec.loader is not None + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def test_ci_mode_skips_date_branch_and_merge_gate(monkeypatch, tmp_path) -> None: + module = _load_module() + handover = tmp_path / "session-handover.md" + handover.write_text( + "\n".join( + [ + "### 2000-01-01 | session=test", + "- branch: feature/other-branch", + "- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md", + "- open_issues_reviewed: #1", + "- next_ticket: #123", + "- process_gate_checked: process_ticket=#1 merged_to_feature_branch=no", + ] + ), + encoding="utf-8", + ) + monkeypatch.setattr(module, "HANDOVER_LOG", handover) + + errors: list[str] = [] + module._check_handover_entry( + branch="feature/current-branch", + strict=True, + ci_mode=True, + errors=errors, + ) + assert errors == [] + + +def test_ci_mode_still_blocks_tbd_next_ticket(monkeypatch, tmp_path) -> None: + module = _load_module() + handover = tmp_path / "session-handover.md" + handover.write_text( + "\n".join( + [ + "### 2000-01-01 | session=test", + "- branch: feature/other-branch", + "- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md", + "- open_issues_reviewed: #1", + "- next_ticket: #TBD", + "- process_gate_checked: process_ticket=#1 merged_to_feature_branch=no", + ] + ), + encoding="utf-8", + ) + monkeypatch.setattr(module, "HANDOVER_LOG", handover) + + errors: list[str] = [] + module._check_handover_entry( + branch="feature/current-branch", + strict=True, + ci_mode=True, + errors=errors, + ) + assert "latest handover entry must not use placeholder next_ticket (#TBD)" in errors + + +def test_non_ci_strict_enforces_date_branch_and_merge_gate(monkeypatch, tmp_path) -> None: + module = _load_module() + handover = tmp_path / "session-handover.md" + handover.write_text( + "\n".join( + [ + "### 2000-01-01 | session=test", + "- branch: feature/other-branch", + "- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md", + "- open_issues_reviewed: #1", + "- next_ticket: #123", + "- process_gate_checked: process_ticket=#1 merged_to_feature_branch=no", + ] + ), + encoding="utf-8", + ) + monkeypatch.setattr(module, "HANDOVER_LOG", handover) + + errors: list[str] = [] + module._check_handover_entry( + branch="feature/current-branch", + strict=True, + ci_mode=False, + errors=errors, + ) + assert any("must contain today's UTC date" in e for e in errors) + assert any("must target current branch" in e for e in errors) + assert any("merged_to_feature_branch=no" in e for e in errors) From 999091e003f93ae2a697f536b61b59ee1f24db67 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 21:02:51 +0900 Subject: [PATCH 067/109] test: cover non-ci strict #TBD guard in handover check (#358) --- tests/test_session_handover_check.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/tests/test_session_handover_check.py b/tests/test_session_handover_check.py index 8c4aedb..7c924c8 100644 --- a/tests/test_session_handover_check.py +++ b/tests/test_session_handover_check.py @@ -98,3 +98,31 @@ def test_non_ci_strict_enforces_date_branch_and_merge_gate(monkeypatch, tmp_path assert any("must contain today's UTC date" in e for e in errors) assert any("must target current branch" in e for e in errors) assert any("merged_to_feature_branch=no" in e for e in errors) + + +def test_non_ci_strict_still_blocks_tbd_next_ticket(monkeypatch, tmp_path) -> None: + module = _load_module() + handover = tmp_path / "session-handover.md" + handover.write_text( + "\n".join( + [ + "### 2000-01-01 | session=test", + "- branch: feature/other-branch", + "- docs_checked: docs/workflow.md, docs/commands.md, docs/agent-constraints.md", + "- open_issues_reviewed: #1", + "- next_ticket: #TBD", + "- process_gate_checked: process_ticket=#1 merged_to_feature_branch=yes", + ] + ), + encoding="utf-8", + ) + monkeypatch.setattr(module, "HANDOVER_LOG", handover) + + errors: list[str] = [] + module._check_handover_entry( + branch="feature/current-branch", + strict=True, + ci_mode=False, + errors=errors, + ) + assert "latest handover entry must not use placeholder next_ticket (#TBD)" in errors From 117657d13f23718441f3f401d821695142f8064f Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 21:11:34 +0900 Subject: [PATCH 068/109] docs: enforce source path policy for ouroboros plan links (#357) --- scripts/validate_ouroboros_docs.py | 43 ++++++++++++++++++++-- tests/test_validate_ouroboros_docs.py | 53 +++++++++++++++++++++++++++ 2 files changed, 93 insertions(+), 3 deletions(-) create mode 100644 tests/test_validate_ouroboros_docs.py diff --git a/scripts/validate_ouroboros_docs.py b/scripts/validate_ouroboros_docs.py index 2cbeb7f..9f1485e 100755 --- a/scripts/validate_ouroboros_docs.py +++ b/scripts/validate_ouroboros_docs.py @@ -19,9 +19,20 @@ META_PATTERN = re.compile( re.MULTILINE, ) ID_PATTERN = re.compile(r"\b(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3}\b") -DEF_PATTERN = re.compile(r"^-\s+`(?P(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3})`", re.MULTILINE) +DEF_PATTERN = re.compile( + r"^-\s+`(?P(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3})`", + re.MULTILINE, +) LINK_PATTERN = re.compile(r"\[[^\]]+\]\((?P[^)]+)\)") -LINE_DEF_PATTERN = re.compile(r"^-\s+`(?P(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3})`.*$", re.MULTILINE) +LINE_DEF_PATTERN = re.compile( + r"^-\s+`(?P(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3})`.*$", + re.MULTILINE, +) +PLAN_LINK_PATTERN = re.compile(r"ouroboros_plan_v(?P[23])\.txt$") +ALLOWED_PLAN_TARGETS = { + "2": (DOC_DIR / "source" / "ouroboros_plan_v2.txt").resolve(), + "3": (DOC_DIR / "source" / "ouroboros_plan_v3.txt").resolve(), +} def iter_docs() -> list[Path]: @@ -40,11 +51,35 @@ def validate_metadata(path: Path, text: str, errors: list[str], doc_ids: dict[st doc_ids[doc_id] = path +def validate_plan_source_link(path: Path, link: str, errors: list[str]) -> None: + normalized = link.strip() + match = PLAN_LINK_PATTERN.search(normalized) + if not match: + return + + version = match.group("version") + expected_target = ALLOWED_PLAN_TARGETS[version] + if normalized.startswith("/"): + errors.append( + f"{path}: invalid plan link path -> {link} " + f"(use ./source/ouroboros_plan_v{version}.txt)" + ) + return + + resolved_target = (path.parent / normalized).resolve() + if resolved_target != expected_target: + errors.append( + f"{path}: invalid plan link path -> {link} " + f"(must resolve to docs/ouroboros/source/ouroboros_plan_v{version}.txt)" + ) + + def validate_links(path: Path, text: str, errors: list[str]) -> None: for m in LINK_PATTERN.finditer(text): link = m.group("link").strip() if not link or link.startswith("http") or link.startswith("#"): continue + validate_plan_source_link(path, link, errors) if link.startswith("/"): target = Path(link) else: @@ -61,7 +96,9 @@ def collect_ids(path: Path, text: str, defs: dict[str, Path], refs: dict[str, se refs.setdefault(idv, set()).add(path) -def collect_req_traceability(text: str, req_to_task: dict[str, set[str]], req_to_test: dict[str, set[str]]) -> None: +def collect_req_traceability( + text: str, req_to_task: dict[str, set[str]], req_to_test: dict[str, set[str]] +) -> None: for m in LINE_DEF_PATTERN.finditer(text): line = m.group(0) item_id = m.group("id") diff --git a/tests/test_validate_ouroboros_docs.py b/tests/test_validate_ouroboros_docs.py new file mode 100644 index 0000000..66c0ec5 --- /dev/null +++ b/tests/test_validate_ouroboros_docs.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +import importlib.util +from pathlib import Path + + +def _load_module(): + script_path = Path(__file__).resolve().parents[1] / "scripts" / "validate_ouroboros_docs.py" + spec = importlib.util.spec_from_file_location("validate_ouroboros_docs", script_path) + assert spec is not None + assert spec.loader is not None + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def test_validate_plan_source_link_accepts_canonical_source_path() -> None: + module = _load_module() + errors: list[str] = [] + path = Path("docs/ouroboros/README.md").resolve() + + module.validate_plan_source_link(path, "./source/ouroboros_plan_v2.txt", errors) + module.validate_plan_source_link(path, "./source/ouroboros_plan_v3.txt", errors) + + assert errors == [] + + +def test_validate_plan_source_link_rejects_root_relative_path() -> None: + module = _load_module() + errors: list[str] = [] + path = Path("docs/ouroboros/README.md").resolve() + + module.validate_plan_source_link( + path, + "/home/agentson/repos/The-Ouroboros/ouroboros_plan_v2.txt", + errors, + ) + + assert errors + assert "invalid plan link path" in errors[0] + assert "use ./source/ouroboros_plan_v2.txt" in errors[0] + + +def test_validate_plan_source_link_rejects_repo_root_relative_path() -> None: + module = _load_module() + errors: list[str] = [] + path = Path("docs/ouroboros/README.md").resolve() + + module.validate_plan_source_link(path, "../../ouroboros_plan_v2.txt", errors) + + assert errors + assert "invalid plan link path" in errors[0] + assert "must resolve to docs/ouroboros/source/ouroboros_plan_v2.txt" in errors[0] From d1ef79f3855638eb7a547965626be181a0124f72 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 21:20:06 +0900 Subject: [PATCH 069/109] docs validator: handle plan link fragments and avoid duplicate link errors --- scripts/validate_ouroboros_docs.py | 28 +++++++++++++-------- tests/test_validate_ouroboros_docs.py | 36 ++++++++++++++++++++++++--- 2 files changed, 50 insertions(+), 14 deletions(-) diff --git a/scripts/validate_ouroboros_docs.py b/scripts/validate_ouroboros_docs.py index 9f1485e..902aded 100755 --- a/scripts/validate_ouroboros_docs.py +++ b/scripts/validate_ouroboros_docs.py @@ -51,27 +51,33 @@ def validate_metadata(path: Path, text: str, errors: list[str], doc_ids: dict[st doc_ids[doc_id] = path -def validate_plan_source_link(path: Path, link: str, errors: list[str]) -> None: +def validate_plan_source_link(path: Path, link: str, errors: list[str]) -> bool: normalized = link.strip() - match = PLAN_LINK_PATTERN.search(normalized) + # Ignore in-page anchors and parse the filesystem part for validation. + link_path = normalized.split("#", 1)[0].strip() + if not link_path: + return False + match = PLAN_LINK_PATTERN.search(link_path) if not match: - return + return False version = match.group("version") expected_target = ALLOWED_PLAN_TARGETS[version] - if normalized.startswith("/"): + if link_path.startswith("/"): errors.append( f"{path}: invalid plan link path -> {link} " f"(use ./source/ouroboros_plan_v{version}.txt)" ) - return + return True - resolved_target = (path.parent / normalized).resolve() + resolved_target = (path.parent / link_path).resolve() if resolved_target != expected_target: errors.append( f"{path}: invalid plan link path -> {link} " f"(must resolve to docs/ouroboros/source/ouroboros_plan_v{version}.txt)" ) + return True + return False def validate_links(path: Path, text: str, errors: list[str]) -> None: @@ -79,11 +85,13 @@ def validate_links(path: Path, text: str, errors: list[str]) -> None: link = m.group("link").strip() if not link or link.startswith("http") or link.startswith("#"): continue - validate_plan_source_link(path, link, errors) - if link.startswith("/"): - target = Path(link) + if validate_plan_source_link(path, link, errors): + continue + link_path = link.split("#", 1)[0].strip() + if link_path.startswith("/"): + target = Path(link_path) else: - target = (path.parent / link).resolve() + target = (path.parent / link_path).resolve() if not target.exists(): errors.append(f"{path}: broken link -> {link}") diff --git a/tests/test_validate_ouroboros_docs.py b/tests/test_validate_ouroboros_docs.py index 66c0ec5..3ef972b 100644 --- a/tests/test_validate_ouroboros_docs.py +++ b/tests/test_validate_ouroboros_docs.py @@ -19,8 +19,8 @@ def test_validate_plan_source_link_accepts_canonical_source_path() -> None: errors: list[str] = [] path = Path("docs/ouroboros/README.md").resolve() - module.validate_plan_source_link(path, "./source/ouroboros_plan_v2.txt", errors) - module.validate_plan_source_link(path, "./source/ouroboros_plan_v3.txt", errors) + assert module.validate_plan_source_link(path, "./source/ouroboros_plan_v2.txt", errors) is False + assert module.validate_plan_source_link(path, "./source/ouroboros_plan_v3.txt", errors) is False assert errors == [] @@ -30,12 +30,13 @@ def test_validate_plan_source_link_rejects_root_relative_path() -> None: errors: list[str] = [] path = Path("docs/ouroboros/README.md").resolve() - module.validate_plan_source_link( + handled = module.validate_plan_source_link( path, "/home/agentson/repos/The-Ouroboros/ouroboros_plan_v2.txt", errors, ) + assert handled is True assert errors assert "invalid plan link path" in errors[0] assert "use ./source/ouroboros_plan_v2.txt" in errors[0] @@ -46,8 +47,35 @@ def test_validate_plan_source_link_rejects_repo_root_relative_path() -> None: errors: list[str] = [] path = Path("docs/ouroboros/README.md").resolve() - module.validate_plan_source_link(path, "../../ouroboros_plan_v2.txt", errors) + handled = module.validate_plan_source_link(path, "../../ouroboros_plan_v2.txt", errors) + assert handled is True assert errors assert "invalid plan link path" in errors[0] assert "must resolve to docs/ouroboros/source/ouroboros_plan_v2.txt" in errors[0] + + +def test_validate_plan_source_link_accepts_fragment_suffix() -> None: + module = _load_module() + errors: list[str] = [] + path = Path("docs/ouroboros/README.md").resolve() + + handled = module.validate_plan_source_link(path, "./source/ouroboros_plan_v2.txt#sec", errors) + + assert handled is False + assert errors == [] + + +def test_validate_links_avoids_duplicate_error_for_invalid_plan_link(tmp_path) -> None: + module = _load_module() + errors: list[str] = [] + doc = tmp_path / "doc.md" + doc.write_text( + "[v2](/home/agentson/repos/The-Ouroboros/ouroboros_plan_v2.txt)\n", + encoding="utf-8", + ) + + module.validate_links(doc, doc.read_text(encoding="utf-8"), errors) + + assert len(errors) == 1 + assert "invalid plan link path" in errors[0] From 6be78d73ff8d1a0f64c9b4479b1d924316eba70a Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 22:09:48 +0900 Subject: [PATCH 070/109] governance: enforce READ-ONLY approval evidence for protected file changes (#356) --- .gitea/PULL_REQUEST_TEMPLATE.md | 7 +++ scripts/validate_governance_assets.py | 61 +++++++++++++++++++++++- tests/test_validate_governance_assets.py | 60 +++++++++++++++++++++++ 3 files changed, 126 insertions(+), 2 deletions(-) diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 90edcf4..1fb6990 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -47,6 +47,13 @@ - 모니터링 로그 경로: - 이상 징후/이슈 링크: +## READ-ONLY Approval (Required when touching READ-ONLY files) + +- Touched READ-ONLY files: +- Human approval: +- Test suite 1: +- Test suite 2: + ## Approval Gate - [ ] Static Verifier approval comment linked diff --git a/scripts/validate_governance_assets.py b/scripts/validate_governance_assets.py index 5872de1..79bc882 100644 --- a/scripts/validate_governance_assets.py +++ b/scripts/validate_governance_assets.py @@ -3,10 +3,10 @@ from __future__ import annotations -import subprocess -import sys import os import re +import subprocess +import sys from pathlib import Path REQUIREMENTS_REGISTRY = "docs/ouroboros/01_requirements_registry.md" @@ -15,6 +15,8 @@ TASK_DEF_LINE = re.compile(r"^-\s+`(?PTASK-[A-Z0-9-]+-\d{3})`(?P. REQ_ID_IN_LINE = re.compile(r"\bREQ-[A-Z0-9-]+-\d{3}\b") TASK_ID_IN_TEXT = re.compile(r"\bTASK-[A-Z0-9-]+-\d{3}\b") TEST_ID_IN_TEXT = re.compile(r"\bTEST-[A-Z0-9-]+-\d{3}\b") +READ_ONLY_FILES = {"src/core/risk_manager.py"} +PLACEHOLDER_VALUES = {"", "tbd", "n/a", "na", "none", "", ""} def must_contain(path: Path, required: list[str], errors: list[str]) -> None: @@ -118,6 +120,55 @@ def validate_pr_traceability(warnings: list[str]) -> None: warnings.append("PR text missing TEST-ID reference") +def _parse_pr_evidence_line(text: str, field: str) -> str | None: + pattern = re.compile(rf"^\s*-\s*{re.escape(field)}:\s*(?P.+?)\s*$", re.MULTILINE) + match = pattern.search(text) + if not match: + return None + return match.group("value").strip() + + +def _is_placeholder(value: str | None) -> bool: + if value is None: + return True + normalized = value.strip().lower() + return normalized in PLACEHOLDER_VALUES + + +def validate_read_only_approval( + changed_files: list[str], errors: list[str], warnings: list[str] +) -> None: + changed_set = set(changed_files) + touched = sorted(path for path in READ_ONLY_FILES if path in changed_set) + if not touched: + return + + body = os.getenv("GOVERNANCE_PR_BODY", "").strip() + if not body: + warnings.append( + "READ-ONLY file changed but PR body is unavailable; approval evidence check skipped" + ) + return + + if "READ-ONLY Approval" not in body: + errors.append("READ-ONLY file changed without 'READ-ONLY Approval' section in PR body") + return + + touched_field = _parse_pr_evidence_line(body, "Touched READ-ONLY files") + human_approval = _parse_pr_evidence_line(body, "Human approval") + test_suite_1 = _parse_pr_evidence_line(body, "Test suite 1") + test_suite_2 = _parse_pr_evidence_line(body, "Test suite 2") + + if _is_placeholder(touched_field): + errors.append("READ-ONLY Approval section missing 'Touched READ-ONLY files' evidence") + if _is_placeholder(human_approval): + errors.append("READ-ONLY Approval section missing 'Human approval' evidence") + if _is_placeholder(test_suite_1): + errors.append("READ-ONLY Approval section missing 'Test suite 1' evidence") + if _is_placeholder(test_suite_2): + errors.append("READ-ONLY Approval section missing 'Test suite 2' evidence") + + def main() -> int: errors: list[str] = [] warnings: list[str] = [] @@ -141,6 +192,11 @@ def main() -> int: "gh", "Session Handover Gate", "session_handover_check.py --strict", + "READ-ONLY Approval", + "Touched READ-ONLY files", + "Human approval", + "Test suite 1", + "Test suite 2", ], errors, ) @@ -187,6 +243,7 @@ def main() -> int: validate_registry_sync(changed_files, errors) validate_task_req_mapping(errors) validate_pr_traceability(warnings) + validate_read_only_approval(changed_files, errors, warnings) if errors: print("[FAIL] governance asset validation failed") diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py index 719d801..398c677 100644 --- a/tests/test_validate_governance_assets.py +++ b/tests/test_validate_governance_assets.py @@ -116,3 +116,63 @@ def test_validate_pr_traceability_warns_when_req_missing(monkeypatch) -> None: module.validate_pr_traceability(warnings) assert warnings assert "PR text missing REQ-ID reference" in warnings + + +def test_validate_read_only_approval_requires_evidence(monkeypatch) -> None: + module = _load_module() + changed_files = ["src/core/risk_manager.py"] + errors: list[str] = [] + warnings: list[str] = [] + monkeypatch.setenv( + "GOVERNANCE_PR_BODY", + "\n".join( + [ + "## READ-ONLY Approval (Required when touching READ-ONLY files)", + "- Touched READ-ONLY files: src/core/risk_manager.py", + "- Human approval: TBD", + "- Test suite 1: pytest -q", + "- Test suite 2: TBD", + ] + ), + ) + + module.validate_read_only_approval(changed_files, errors, warnings) + assert warnings == [] + assert any("Human approval" in err for err in errors) + assert any("Test suite 2" in err for err in errors) + + +def test_validate_read_only_approval_passes_with_complete_evidence(monkeypatch) -> None: + module = _load_module() + changed_files = ["src/core/risk_manager.py"] + errors: list[str] = [] + warnings: list[str] = [] + monkeypatch.setenv( + "GOVERNANCE_PR_BODY", + "\n".join( + [ + "## READ-ONLY Approval (Required when touching READ-ONLY files)", + "- Touched READ-ONLY files: src/core/risk_manager.py", + "- Human approval: https://example.com/review/123", + "- Test suite 1: pytest -q tests/test_risk.py", + "- Test suite 2: pytest -q tests/test_main.py -k risk", + ] + ), + ) + + module.validate_read_only_approval(changed_files, errors, warnings) + assert errors == [] + assert warnings == [] + + +def test_validate_read_only_approval_warns_without_pr_body(monkeypatch) -> None: + module = _load_module() + changed_files = ["src/core/risk_manager.py"] + errors: list[str] = [] + warnings: list[str] = [] + monkeypatch.delenv("GOVERNANCE_PR_BODY", raising=False) + + module.validate_read_only_approval(changed_files, errors, warnings) + assert errors == [] + assert warnings + assert "approval evidence check skipped" in warnings[0] From c431d82c0dec39d871ab8fd63705acc8080c5588 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 22:44:02 +0900 Subject: [PATCH 071/109] test: cover no-readonly-change early return in governance validator --- tests/test_validate_governance_assets.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py index 398c677..3f2ff21 100644 --- a/tests/test_validate_governance_assets.py +++ b/tests/test_validate_governance_assets.py @@ -176,3 +176,14 @@ def test_validate_read_only_approval_warns_without_pr_body(monkeypatch) -> None: assert errors == [] assert warnings assert "approval evidence check skipped" in warnings[0] + + +def test_validate_read_only_approval_skips_when_no_readonly_file_changed() -> None: + module = _load_module() + changed_files = ["src/main.py"] + errors: list[str] = [] + warnings: list[str] = [] + + module.validate_read_only_approval(changed_files, errors, warnings) + assert errors == [] + assert warnings == [] From 51fd6b7a72b5882242e0fcd9f15ad06e7d51db22 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 23:03:52 +0900 Subject: [PATCH 072/109] docs validator: add validate_docs_sync with unit tests (#363) --- scripts/validate_docs_sync.py | 134 +++++++++++++++++++++++++++++++ tests/test_validate_docs_sync.py | 102 +++++++++++++++++++++++ 2 files changed, 236 insertions(+) create mode 100644 scripts/validate_docs_sync.py create mode 100644 tests/test_validate_docs_sync.py diff --git a/scripts/validate_docs_sync.py b/scripts/validate_docs_sync.py new file mode 100644 index 0000000..87cf519 --- /dev/null +++ b/scripts/validate_docs_sync.py @@ -0,0 +1,134 @@ +#!/usr/bin/env python3 +"""Validate top-level docs synchronization invariants.""" + +from __future__ import annotations + +import re +import sys +from pathlib import Path + +REPO_ROOT = Path(".") +REQUIRED_FILES = { + "README.md": REPO_ROOT / "README.md", + "CLAUDE.md": REPO_ROOT / "CLAUDE.md", + "commands": REPO_ROOT / "docs" / "commands.md", + "testing": REPO_ROOT / "docs" / "testing.md", + "workflow": REPO_ROOT / "docs" / "workflow.md", +} + +LINK_PATTERN = re.compile(r"\[[^\]]+\]\((?P[^)]+)\)") +ENDPOINT_ROW_PATTERN = re.compile( + r"^\|\s*`(?P(?:GET|POST|PUT|PATCH|DELETE)\s+/[^`]*)`\s*\|" +) + + +def _read(path: Path) -> str: + return path.read_text(encoding="utf-8") + + +def validate_required_files_exist(errors: list[str]) -> None: + for name, path in REQUIRED_FILES.items(): + if not path.exists(): + errors.append(f"missing required doc file ({name}): {path}") + + +def validate_links_resolve(doc_path: Path, text: str, errors: list[str]) -> None: + for match in LINK_PATTERN.finditer(text): + raw_link = match.group("link").strip() + if not raw_link or raw_link.startswith("#") or raw_link.startswith("http"): + continue + link_path = raw_link.split("#", 1)[0].strip() + if not link_path: + continue + if link_path.startswith("/"): + errors.append(f"{doc_path}: absolute link is forbidden -> {raw_link}") + continue + target = (doc_path.parent / link_path).resolve() + if not target.exists(): + errors.append(f"{doc_path}: broken link -> {raw_link}") + + +def validate_summary_docs_reference_core_docs(errors: list[str]) -> None: + required_links = { + "README.md": ("docs/workflow.md", "docs/commands.md", "docs/testing.md"), + "CLAUDE.md": ("docs/workflow.md", "docs/commands.md"), + } + for file_name, links in required_links.items(): + doc_path = ( + REQUIRED_FILES["README.md"] + if file_name == "README.md" + else REQUIRED_FILES["CLAUDE.md"] + ) + text = _read(doc_path) + for link in links: + if link not in text: + errors.append(f"{doc_path}: missing core doc link reference -> {link}") + + +def collect_command_endpoints(text: str) -> list[str]: + endpoints: list[str] = [] + for line in text.splitlines(): + match = ENDPOINT_ROW_PATTERN.match(line.strip()) + if match: + endpoints.append(match.group("endpoint")) + return endpoints + + +def validate_commands_endpoint_duplicates(errors: list[str]) -> None: + text = _read(REQUIRED_FILES["commands"]) + endpoints = collect_command_endpoints(text) + seen: set[str] = set() + duplicates: set[str] = set() + for endpoint in endpoints: + if endpoint in seen: + duplicates.add(endpoint) + seen.add(endpoint) + for endpoint in sorted(duplicates): + errors.append(f"docs/commands.md: duplicated API endpoint row -> {endpoint}") + + +def validate_testing_doc_has_dynamic_count_guidance(errors: list[str]) -> None: + text = _read(REQUIRED_FILES["testing"]) + if "pytest --collect-only -q" not in text: + errors.append( + "docs/testing.md: missing dynamic test count guidance " + "(pytest --collect-only -q)" + ) + + +def main() -> int: + errors: list[str] = [] + + validate_required_files_exist(errors) + if errors: + print("[FAIL] docs sync validation failed") + for err in errors: + print(f"- {err}") + return 1 + + readme_text = _read(REQUIRED_FILES["README.md"]) + claude_text = _read(REQUIRED_FILES["CLAUDE.md"]) + validate_links_resolve(REQUIRED_FILES["README.md"], readme_text, errors) + validate_links_resolve(REQUIRED_FILES["CLAUDE.md"], claude_text, errors) + validate_links_resolve(REQUIRED_FILES["commands"], _read(REQUIRED_FILES["commands"]), errors) + validate_links_resolve(REQUIRED_FILES["testing"], _read(REQUIRED_FILES["testing"]), errors) + + validate_summary_docs_reference_core_docs(errors) + validate_commands_endpoint_duplicates(errors) + validate_testing_doc_has_dynamic_count_guidance(errors) + + if errors: + print("[FAIL] docs sync validation failed") + for err in errors: + print(f"- {err}") + return 1 + + print("[OK] docs sync validated") + print("[OK] summary docs link to core docs and links resolve") + print("[OK] commands endpoint rows have no duplicates") + print("[OK] testing doc includes dynamic count guidance") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/tests/test_validate_docs_sync.py b/tests/test_validate_docs_sync.py new file mode 100644 index 0000000..793c795 --- /dev/null +++ b/tests/test_validate_docs_sync.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +import importlib.util +from pathlib import Path + + +def _load_module(): + script_path = Path(__file__).resolve().parents[1] / "scripts" / "validate_docs_sync.py" + spec = importlib.util.spec_from_file_location("validate_docs_sync", script_path) + assert spec is not None + assert spec.loader is not None + module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(module) + return module + + +def test_collect_command_endpoints_parses_markdown_table_rows() -> None: + module = _load_module() + text = "\n".join( + [ + "| Endpoint | Description |", + "|----------|-------------|", + "| `GET /api/status` | status |", + "| `POST /api/run` | run |", + "| not-a-row | ignored |", + ] + ) + endpoints = module.collect_command_endpoints(text) + assert endpoints == ["GET /api/status", "POST /api/run"] + + +def test_validate_links_resolve_detects_absolute_and_broken_links(tmp_path) -> None: + module = _load_module() + doc = tmp_path / "doc.md" + existing = tmp_path / "ok.md" + existing.write_text("# ok\n", encoding="utf-8") + doc.write_text( + "\n".join( + [ + "[ok](./ok.md)", + "[abs](/tmp/nowhere.md)", + "[broken](./missing.md)", + ] + ), + encoding="utf-8", + ) + errors: list[str] = [] + module.validate_links_resolve(doc, doc.read_text(encoding="utf-8"), errors) + + assert any("absolute link is forbidden" in err for err in errors) + assert any("broken link" in err for err in errors) + + +def test_validate_summary_docs_reference_core_docs(monkeypatch) -> None: + module = _load_module() + errors: list[str] = [] + fake_docs = { + str(module.REQUIRED_FILES["README.md"]): ( + "docs/workflow.md docs/commands.md docs/testing.md" + ), + str(module.REQUIRED_FILES["CLAUDE.md"]): "docs/workflow.md docs/commands.md", + } + + def fake_read(path: Path) -> str: + return fake_docs[str(path)] + + monkeypatch.setattr(module, "_read", fake_read) + module.validate_summary_docs_reference_core_docs(errors) + assert errors == [] + + +def test_validate_commands_endpoint_duplicates_reports_duplicates(monkeypatch) -> None: + module = _load_module() + errors: list[str] = [] + text = "\n".join( + [ + "| `GET /api/status` | status |", + "| `GET /api/status` | duplicate |", + ] + ) + + def fake_read(path: Path) -> str: + assert path == module.REQUIRED_FILES["commands"] + return text + + monkeypatch.setattr(module, "_read", fake_read) + module.validate_commands_endpoint_duplicates(errors) + assert errors + assert "duplicated API endpoint row -> GET /api/status" in errors[0] + + +def test_validate_testing_doc_has_dynamic_count_guidance(monkeypatch) -> None: + module = _load_module() + errors: list[str] = [] + + def fake_read(path: Path) -> str: + assert path == module.REQUIRED_FILES["testing"] + return "Use pytest --collect-only -q for dynamic counts." + + monkeypatch.setattr(module, "_read", fake_read) + module.validate_testing_doc_has_dynamic_count_guidance(errors) + assert errors == [] From 6656adc2b7c7e7e399d6704fddf68cc7376e9558 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 23:09:20 +0900 Subject: [PATCH 073/109] ci/docs: wire docs sync validator into workflows and tighten tests --- .gitea/workflows/ci.yml | 3 +++ .github/workflows/ci.yml | 3 +++ scripts/validate_docs_sync.py | 13 +++++++------ tests/test_validate_docs_sync.py | 21 +++++++++++++++++++++ 4 files changed, 34 insertions(+), 6 deletions(-) diff --git a/.gitea/workflows/ci.yml b/.gitea/workflows/ci.yml index 9ee06db..d992e70 100644 --- a/.gitea/workflows/ci.yml +++ b/.gitea/workflows/ci.yml @@ -47,6 +47,9 @@ jobs: - name: Validate Ouroboros docs run: python3 scripts/validate_ouroboros_docs.py + - name: Validate docs sync + run: python3 scripts/validate_docs_sync.py + - name: Lint run: ruff check src/ tests/ diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 40f340d..d2e5f1f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -44,6 +44,9 @@ jobs: - name: Validate Ouroboros docs run: python3 scripts/validate_ouroboros_docs.py + - name: Validate docs sync + run: python3 scripts/validate_docs_sync.py + - name: Lint run: ruff check src/ tests/ diff --git a/scripts/validate_docs_sync.py b/scripts/validate_docs_sync.py index 87cf519..0dc83c2 100644 --- a/scripts/validate_docs_sync.py +++ b/scripts/validate_docs_sync.py @@ -54,11 +54,7 @@ def validate_summary_docs_reference_core_docs(errors: list[str]) -> None: "CLAUDE.md": ("docs/workflow.md", "docs/commands.md"), } for file_name, links in required_links.items(): - doc_path = ( - REQUIRED_FILES["README.md"] - if file_name == "README.md" - else REQUIRED_FILES["CLAUDE.md"] - ) + doc_path = REQUIRED_FILES[file_name] text = _read(doc_path) for link in links: if link not in text: @@ -110,8 +106,13 @@ def main() -> int: claude_text = _read(REQUIRED_FILES["CLAUDE.md"]) validate_links_resolve(REQUIRED_FILES["README.md"], readme_text, errors) validate_links_resolve(REQUIRED_FILES["CLAUDE.md"], claude_text, errors) - validate_links_resolve(REQUIRED_FILES["commands"], _read(REQUIRED_FILES["commands"]), errors) + validate_links_resolve( + REQUIRED_FILES["commands"], _read(REQUIRED_FILES["commands"]), errors + ) validate_links_resolve(REQUIRED_FILES["testing"], _read(REQUIRED_FILES["testing"]), errors) + validate_links_resolve( + REQUIRED_FILES["workflow"], _read(REQUIRED_FILES["workflow"]), errors + ) validate_summary_docs_reference_core_docs(errors) validate_commands_endpoint_duplicates(errors) diff --git a/tests/test_validate_docs_sync.py b/tests/test_validate_docs_sync.py index 793c795..5c8309f 100644 --- a/tests/test_validate_docs_sync.py +++ b/tests/test_validate_docs_sync.py @@ -69,6 +69,27 @@ def test_validate_summary_docs_reference_core_docs(monkeypatch) -> None: assert errors == [] +def test_validate_summary_docs_reference_core_docs_reports_missing_links( + monkeypatch, +) -> None: + module = _load_module() + errors: list[str] = [] + fake_docs = { + str(module.REQUIRED_FILES["README.md"]): "docs/workflow.md", + str(module.REQUIRED_FILES["CLAUDE.md"]): "docs/workflow.md", + } + + def fake_read(path: Path) -> str: + return fake_docs[str(path)] + + monkeypatch.setattr(module, "_read", fake_read) + module.validate_summary_docs_reference_core_docs(errors) + + assert any("README.md" in err and "docs/commands.md" in err for err in errors) + assert any("README.md" in err and "docs/testing.md" in err for err in errors) + assert any("CLAUDE.md" in err and "docs/commands.md" in err for err in errors) + + def test_validate_commands_endpoint_duplicates_reports_duplicates(monkeypatch) -> None: module = _load_module() errors: list[str] = [] From 8e819e593915329298d337979f948fdbe6b3b9c4 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 23:13:51 +0900 Subject: [PATCH 074/109] docs: align template/commands with docs sync gate (#364) --- .gitea/PULL_REQUEST_TEMPLATE.md | 7 +++++++ docs/commands.md | 14 ++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 1fb6990..995ebf7 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -41,6 +41,13 @@ - [ ] `workflow/session-handover.md` 최신 엔트리가 현재 브랜치/당일(UTC) 기준으로 갱신됨 - 최신 handover 엔트리 heading: +## Docs Sync Gate + +- [ ] `python3 scripts/validate_docs_sync.py` 통과 +- [ ] `README.md`/`CLAUDE.md`에 `docs/workflow.md`, `docs/commands.md` 코어 링크가 유지됨 +- [ ] 문서 링크를 절대경로(`/`)로 추가하지 않음 +- [ ] `docs/commands.md` API endpoint 표에 중복 row 없음 + ## Runtime Evidence - 시스템 실제 구동 커맨드: diff --git a/docs/commands.md b/docs/commands.md index a667230..2cc3c83 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -21,6 +21,20 @@ python3 scripts/session_handover_check.py --strict - 실패 시 `workflow/session-handover.md` 최신 엔트리를 보강한 뒤 재실행한다. +## Docs Sync Validator (Mandatory for docs changes) + +- 문서 변경 PR에서는 아래 명령으로 동기화 검증을 먼저 실행한다. + +```bash +python3 scripts/validate_docs_sync.py +``` + +- 검증 실패 시 메시지 기준으로 즉시 수정한다. + - `absolute link is forbidden`: 문서 링크에 절대경로(`/...`) 사용 + - `broken link`: 상대경로 링크 대상 파일/앵커 누락 + - `missing core doc link reference`: `README.md`/`CLAUDE.md` 핵심 링크 누락 + - `duplicated API endpoint row`: `docs/commands.md` API endpoint 표 중복 행 + ### tea CLI (Gitea Command Line Tool) #### ❌ TTY Error - Interactive Confirmation Fails From 243469cd4092cd73c1d229e895728f49e1fb3006 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 23:18:42 +0900 Subject: [PATCH 075/109] docs: address PR #366 review on docs sync gate --- .gitea/PULL_REQUEST_TEMPLATE.md | 7 ++----- docs/commands.md | 1 + 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/.gitea/PULL_REQUEST_TEMPLATE.md b/.gitea/PULL_REQUEST_TEMPLATE.md index 995ebf7..a27682a 100644 --- a/.gitea/PULL_REQUEST_TEMPLATE.md +++ b/.gitea/PULL_REQUEST_TEMPLATE.md @@ -41,12 +41,9 @@ - [ ] `workflow/session-handover.md` 최신 엔트리가 현재 브랜치/당일(UTC) 기준으로 갱신됨 - 최신 handover 엔트리 heading: -## Docs Sync Gate +## Docs Sync Gate (docs 파일 변경 시 필수) -- [ ] `python3 scripts/validate_docs_sync.py` 통과 -- [ ] `README.md`/`CLAUDE.md`에 `docs/workflow.md`, `docs/commands.md` 코어 링크가 유지됨 -- [ ] 문서 링크를 절대경로(`/`)로 추가하지 않음 -- [ ] `docs/commands.md` API endpoint 표에 중복 row 없음 +- [ ] `python3 scripts/validate_docs_sync.py` 통과 (`docs` 미변경 PR은 N/A 기재) ## Runtime Evidence diff --git a/docs/commands.md b/docs/commands.md index 2cc3c83..abf197d 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -34,6 +34,7 @@ python3 scripts/validate_docs_sync.py - `broken link`: 상대경로 링크 대상 파일/앵커 누락 - `missing core doc link reference`: `README.md`/`CLAUDE.md` 핵심 링크 누락 - `duplicated API endpoint row`: `docs/commands.md` API endpoint 표 중복 행 + - `missing dynamic test count guidance`: `docs/testing.md`에 `pytest --collect-only -q` 가이드 누락 ### tea CLI (Gitea Command Line Tool) From d2ac0dae539bb5cd00ba9071349e15989b3483aa Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 23:23:38 +0900 Subject: [PATCH 076/109] docs: add top-level documentation hub index (#362) --- CLAUDE.md | 1 + README.md | 1 + docs/README.md | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 48 insertions(+) create mode 100644 docs/README.md diff --git a/CLAUDE.md b/CLAUDE.md index 9387fa9..f3c1177 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -87,6 +87,7 @@ Smart Scanner runs in both `TRADE_MODE=realtime` and `daily` paths. On API failu ## Documentation +- **[Documentation Hub](docs/README.md)** — Top-level doc routing and reading order - **[Workflow Guide](docs/workflow.md)** — Git workflow policy and agent-based development - **[Command Reference](docs/commands.md)** — Common failures, build commands, troubleshooting - **[Architecture](docs/architecture.md)** — System design, components, data flow diff --git a/README.md b/README.md index 587ac0c..2fade9f 100644 --- a/README.md +++ b/README.md @@ -217,6 +217,7 @@ The-Ouroboros/ ## 문서 +- **[문서 허브](docs/README.md)** — 전체 문서 라우팅, 우선순위, 읽기 순서 - **[아키텍처](docs/architecture.md)** — 시스템 설계, 컴포넌트, 데이터 흐름 - **[테스트](docs/testing.md)** — 테스트 구조, 커버리지, 작성 가이드 - **[명령어](docs/commands.md)** — CLI, Dashboard, Telegram 명령어 diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000..a71f6a0 --- /dev/null +++ b/docs/README.md @@ -0,0 +1,46 @@ +# Documentation Hub + +이 문서는 저장소 전체 문서의 상위 라우팅 허브입니다. +세부 문서로 바로 들어가기 전에 아래 우선순위와 읽기 순서를 기준으로 이동하세요. + +## Priority (SSOT) + +1. 실행/협업 규칙 SSOT: [workflow.md](./workflow.md) +2. 명령/장애 대응 SSOT: [commands.md](./commands.md) +3. 테스트/검증 SSOT: [testing.md](./testing.md) +4. 에이전트 제약 SSOT: [agents.md](./agents.md) +5. 요구사항 추적 SSOT: [requirements-log.md](./requirements-log.md) +6. Ouroboros 실행 문서 허브: [ouroboros/README.md](./ouroboros/README.md) + +## Recommended Reading Order + +1. [workflow.md](./workflow.md) +2. [commands.md](./commands.md) +3. [testing.md](./testing.md) +4. [agents.md](./agents.md) +5. [architecture.md](./architecture.md) +6. [context-tree.md](./context-tree.md) +7. [disaster_recovery.md](./disaster_recovery.md) +8. [live-trading-checklist.md](./live-trading-checklist.md) +9. [ouroboros/README.md](./ouroboros/README.md) + +## Document Map + +- Core + - [workflow.md](./workflow.md): 브랜치/PR/리뷰/세션 handover 정책 + - [commands.md](./commands.md): 실행 커맨드, 실패 사례, 트러블슈팅 + - [testing.md](./testing.md): 테스트 구조, 작성 규칙, 검증 명령 + - [agents.md](./agents.md): 에이전트 작업 제약과 금지 행위 +- Design and Operations + - [architecture.md](./architecture.md): 시스템 구조와 컴포넌트 책임 + - [context-tree.md](./context-tree.md): L1-L7 컨텍스트 계층 설계 + - [disaster_recovery.md](./disaster_recovery.md): 백업/복구 절차 + - [live-trading-checklist.md](./live-trading-checklist.md): 실전 전환 체크리스트 +- Governance and Planning + - [requirements-log.md](./requirements-log.md): 요구사항/피드백 히스토리 + - [ouroboros/README.md](./ouroboros/README.md): v2/v3 실행 문서 라우팅 + +## Change Rule + +- 문서 신규/이동/대규모 개편 시 이 파일의 링크와 분류를 함께 갱신합니다. +- 링크는 상대경로만 사용합니다. From a36e85b708116ecbe4e0c9e4b92d13f2c34e2892 Mon Sep 17 00:00:00 2001 From: agentson Date: Sun, 1 Mar 2026 23:26:27 +0900 Subject: [PATCH 077/109] docs: include agent constraints and skills in docs hub --- docs/README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/README.md b/docs/README.md index a71f6a0..f50a6b9 100644 --- a/docs/README.md +++ b/docs/README.md @@ -31,6 +31,8 @@ - [commands.md](./commands.md): 실행 커맨드, 실패 사례, 트러블슈팅 - [testing.md](./testing.md): 테스트 구조, 작성 규칙, 검증 명령 - [agents.md](./agents.md): 에이전트 작업 제약과 금지 행위 + - [agent-constraints.md](./agent-constraints.md): 영속 제약/운영 불변식(agents.md 보완) + - [skills.md](./skills.md): 설치/사용 가능한 스킬 목록과 활용 가이드 - Design and Operations - [architecture.md](./architecture.md): 시스템 구조와 컴포넌트 책임 - [context-tree.md](./context-tree.md): L1-L7 컨텍스트 계층 설계 From 746f873650b9bff46bb0c19bce093ebb428578d9 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 01:21:13 +0900 Subject: [PATCH 078/109] process: add newline-safe tea comment helper and governance guard (#372) --- docs/commands.md | 21 ++++++++++++ docs/workflow.md | 16 +++++++++ scripts/tea_comment.sh | 49 +++++++++++++++++++++++++++ scripts/validate_governance_assets.py | 3 ++ 4 files changed, 89 insertions(+) create mode 100755 scripts/tea_comment.sh diff --git a/docs/commands.md b/docs/commands.md index abf197d..5e85f94 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -38,6 +38,27 @@ python3 scripts/validate_docs_sync.py ### tea CLI (Gitea Command Line Tool) +#### ❌ Comment Newline Escaping (`\n` rendered literally) +```bash +YES="" ~/bin/tea comment 374 "line1\nline2" +# Web UI shows "\n" as text instead of line breaks +``` +**💡 Reason:** Inline string escaping is interpreted literally before comment submission. + +**✅ Solution:** Use file-based helper to preserve multiline text +```bash +cat > /tmp/comment.md <<'EOF' +line1 +line2 +EOF + +scripts/tea_comment.sh 374 /tmp/comment.md +``` + +**📝 Notes:** +- `scripts/tea_comment.sh` accepts stdin with `-` as body source. +- The helper fails fast when body looks like escaped-newline text only. + #### ❌ TTY Error - Interactive Confirmation Fails ```bash ~/bin/tea issues create --repo X --title "Y" --description "Z" diff --git a/docs/workflow.md b/docs/workflow.md index 288fe70..ecbcdcd 100644 --- a/docs/workflow.md +++ b/docs/workflow.md @@ -70,6 +70,22 @@ Gitea 이슈/PR/코멘트 작업 전에 모든 에이전트는 아래를 먼저 Issue/PR 본문 작성 시 줄바꿈(`\n`)이 문자열 그대로 저장되는 문제가 반복될 수 있다. 원인은 `-d "...\n..."` 형태에서 쉘/CLI가 이스케이프를 실제 개행으로 해석하지 않기 때문이다. +코멘트도 동일한 문제가 자주 발생하므로, 코멘트는 파일 기반 래퍼를 표준으로 사용한다. + +```bash +# 권장: 파일/STDIN 기반 코멘트 등록 (줄바꿈 보존) +cat > /tmp/review.md <<'EOF' +리뷰 반영 완료했습니다. + +- 항목 1 +- 항목 2 +EOF + +scripts/tea_comment.sh 374 /tmp/review.md +# 또는 +cat /tmp/review.md | scripts/tea_comment.sh 374 - +``` + 권장 패턴: ```bash diff --git a/scripts/tea_comment.sh b/scripts/tea_comment.sh new file mode 100755 index 0000000..1d4ca04 --- /dev/null +++ b/scripts/tea_comment.sh @@ -0,0 +1,49 @@ +#!/usr/bin/env bash +# Safe helper for posting multiline Gitea comments without escaped-newline artifacts. + +set -euo pipefail + +if [ "${1:-}" = "-h" ] || [ "${1:-}" = "--help" ] || [ "$#" -lt 2 ]; then + cat <<'EOF' +Usage: + scripts/tea_comment.sh [repo] + +Examples: + scripts/tea_comment.sh 374 /tmp/comment.md + cat /tmp/comment.md | scripts/tea_comment.sh 374 - jihoson/The-Ouroboros + +Notes: + - Use file/stdin input to preserve real newlines. + - Passing inline strings with "\n" is intentionally avoided by this helper. +EOF + exit 1 +fi + +INDEX="$1" +BODY_SOURCE="$2" +REPO="${3:-jihoson/The-Ouroboros}" + +if [ "$BODY_SOURCE" = "-" ]; then + BODY="$(cat)" +else + if [ ! -f "$BODY_SOURCE" ]; then + echo "[FAIL] body file not found: $BODY_SOURCE" >&2 + exit 1 + fi + BODY="$(cat "$BODY_SOURCE")" +fi + +if [ -z "$BODY" ]; then + echo "[FAIL] empty comment body" >&2 + exit 1 +fi + +# Guard against the common escaped-newline mistake. +if [[ "$BODY" == *"\\n"* ]] && [[ "$BODY" != *$'\n'* ]]; then + echo "[FAIL] body appears to contain escaped newlines (\\n) instead of real line breaks" >&2 + echo "Use a multiline file/heredoc and pass that file to scripts/tea_comment.sh" >&2 + exit 1 +fi + +YES="" ~/bin/tea comment "$INDEX" --repo "$REPO" "$BODY" + diff --git a/scripts/validate_governance_assets.py b/scripts/validate_governance_assets.py index 79bc882..012138a 100644 --- a/scripts/validate_governance_assets.py +++ b/scripts/validate_governance_assets.py @@ -215,6 +215,7 @@ def main() -> int: [ "Session Handover Gate (Mandatory)", "session_handover_check.py --strict", + "scripts/tea_comment.sh", ], errors, ) @@ -223,6 +224,8 @@ def main() -> int: [ "Session Handover Preflight (Mandatory)", "session_handover_check.py --strict", + "Comment Newline Escaping", + "scripts/tea_comment.sh", ], errors, ) From 3712a7a30b748d228903cfea29e7bf227eab7883 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 01:30:02 +0900 Subject: [PATCH 079/109] test: cover governance newline-helper required tokens --- tests/test_validate_governance_assets.py | 54 ++++++++++++++++++++++++ 1 file changed, 54 insertions(+) diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py index 3f2ff21..5bb023e 100644 --- a/tests/test_validate_governance_assets.py +++ b/tests/test_validate_governance_assets.py @@ -187,3 +187,57 @@ def test_validate_read_only_approval_skips_when_no_readonly_file_changed() -> No module.validate_read_only_approval(changed_files, errors, warnings) assert errors == [] assert warnings == [] + + +def test_must_contain_enforces_workflow_newline_helper_tokens(tmp_path) -> None: + module = _load_module() + workflow_doc = tmp_path / "workflow.md" + workflow_doc.write_text( + "\n".join( + [ + "Session Handover Gate (Mandatory)", + "python3 scripts/session_handover_check.py --strict", + "scripts/tea_comment.sh", + ] + ), + encoding="utf-8", + ) + errors: list[str] = [] + module.must_contain( + workflow_doc, + [ + "Session Handover Gate (Mandatory)", + "session_handover_check.py --strict", + "scripts/tea_comment.sh", + ], + errors, + ) + assert errors == [] + + +def test_must_contain_enforces_commands_newline_section_tokens(tmp_path) -> None: + module = _load_module() + commands_doc = tmp_path / "commands.md" + commands_doc.write_text( + "\n".join( + [ + "Session Handover Preflight (Mandatory)", + "python3 scripts/session_handover_check.py --strict", + "Comment Newline Escaping", + "scripts/tea_comment.sh", + ] + ), + encoding="utf-8", + ) + errors: list[str] = [] + module.must_contain( + commands_doc, + [ + "Session Handover Preflight (Mandatory)", + "session_handover_check.py --strict", + "Comment Newline Escaping", + "scripts/tea_comment.sh", + ], + errors, + ) + assert errors == [] From d469002be7a0eaf1b47776c2f4799a933ba2d3a5 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 01:33:39 +0900 Subject: [PATCH 080/109] test: add unhappy-path coverage for newline guard tokens --- tests/test_validate_governance_assets.py | 43 ++++++++++++++++++++++++ 1 file changed, 43 insertions(+) diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py index 5bb023e..f5a425d 100644 --- a/tests/test_validate_governance_assets.py +++ b/tests/test_validate_governance_assets.py @@ -215,6 +215,27 @@ def test_must_contain_enforces_workflow_newline_helper_tokens(tmp_path) -> None: assert errors == [] +def test_must_contain_fails_when_workflow_missing_newline_helper_token(tmp_path) -> None: + module = _load_module() + workflow_doc = tmp_path / "workflow.md" + workflow_doc.write_text( + "\n".join( + [ + "Session Handover Gate (Mandatory)", + "python3 scripts/session_handover_check.py --strict", + ] + ), + encoding="utf-8", + ) + errors: list[str] = [] + module.must_contain( + workflow_doc, + ["scripts/tea_comment.sh"], + errors, + ) + assert any("scripts/tea_comment.sh" in err for err in errors) + + def test_must_contain_enforces_commands_newline_section_tokens(tmp_path) -> None: module = _load_module() commands_doc = tmp_path / "commands.md" @@ -241,3 +262,25 @@ def test_must_contain_enforces_commands_newline_section_tokens(tmp_path) -> None errors, ) assert errors == [] + + +def test_must_contain_fails_when_commands_missing_newline_section_token(tmp_path) -> None: + module = _load_module() + commands_doc = tmp_path / "commands.md" + commands_doc.write_text( + "\n".join( + [ + "Session Handover Preflight (Mandatory)", + "python3 scripts/session_handover_check.py --strict", + "scripts/tea_comment.sh", + ] + ), + encoding="utf-8", + ) + errors: list[str] = [] + module.must_contain( + commands_doc, + ["Comment Newline Escaping"], + errors, + ) + assert any("Comment Newline Escaping" in err for err in errors) From 53a6ef2968c1d360d1487aa16e51a78b7c001f36 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 01:41:06 +0900 Subject: [PATCH 081/109] governance: enforce fail-fast ops traceability and task-test pairing (#372) --- docs/ouroboros/01_requirements_registry.md | 4 +- docs/ouroboros/30_code_level_work_orders.md | 32 +++++----- scripts/validate_governance_assets.py | 53 +++++++++++++--- tests/test_validate_governance_assets.py | 69 ++++++++++++++++++--- 4 files changed, 124 insertions(+), 34 deletions(-) diff --git a/docs/ouroboros/01_requirements_registry.md b/docs/ouroboros/01_requirements_registry.md index 7248955..56cb062 100644 --- a/docs/ouroboros/01_requirements_registry.md +++ b/docs/ouroboros/01_requirements_registry.md @@ -1,9 +1,9 @@ # 요구사항 원장 (Single Source of Truth) diff --git a/docs/ouroboros/30_code_level_work_orders.md b/docs/ouroboros/30_code_level_work_orders.md index 5a75a02..66eb5c9 100644 --- a/docs/ouroboros/30_code_level_work_orders.md +++ b/docs/ouroboros/30_code_level_work_orders.md @@ -16,42 +16,42 @@ Updated: 2026-02-26 ## 구현 단위 A: 상태기계/청산 -- `TASK-CODE-001` (`REQ-V2-001`,`REQ-V2-002`,`REQ-V2-003`): `src/strategy/`에 상태기계 모듈 추가 -- `TASK-CODE-002` (`REQ-V2-004`): ATR/BE/Hard Stop 결합 청산 함수 추가 -- `TASK-CODE-003` (`REQ-V2-008`): Kill Switch 오케스트레이터를 `src/core/kill_switch.py`에 추가 +- `TASK-CODE-001` (`REQ-V2-001`,`REQ-V2-002`,`REQ-V2-003`,`TEST-CODE-001`,`TEST-CODE-002`): `src/strategy/`에 상태기계 모듈 추가 +- `TASK-CODE-002` (`REQ-V2-004`,`TEST-ACC-011`): ATR/BE/Hard Stop 결합 청산 함수 추가 +- `TASK-CODE-003` (`REQ-V2-008`,`TEST-ACC-002`): Kill Switch 오케스트레이터를 `src/core/kill_switch.py`에 추가 - `TEST-CODE-001`: 갭 점프 시 최고상태 승격 테스트 - `TEST-CODE-002`: EXIT 우선순위 테스트 ## 구현 단위 B: 라벨링/검증 -- `TASK-CODE-004` (`REQ-V2-005`): Triple Barrier 라벨러 모듈 추가(`src/analysis/` 또는 `src/strategy/`) -- `TASK-CODE-005` (`REQ-V2-006`): Walk-forward + Purge/Embargo 분할 유틸 추가 -- `TASK-CODE-006` (`REQ-V2-007`): 백테스트 실행기에서 비용/슬리피지 옵션 필수화 +- `TASK-CODE-004` (`REQ-V2-005`,`TEST-CODE-003`,`TEST-ACC-012`): Triple Barrier 라벨러 모듈 추가(`src/analysis/` 또는 `src/strategy/`) +- `TASK-CODE-005` (`REQ-V2-006`,`TEST-CODE-004`,`TEST-ACC-013`): Walk-forward + Purge/Embargo 분할 유틸 추가 +- `TASK-CODE-006` (`REQ-V2-007`,`TEST-ACC-014`): 백테스트 실행기에서 비용/슬리피지 옵션 필수화 - `TEST-CODE-003`: 라벨 선터치 우선 테스트 - `TEST-CODE-004`: 누수 차단 테스트 ## 구현 단위 C: 세션/주문 정책 -- `TASK-CODE-007` (`REQ-V3-001`,`REQ-V3-002`): 세션 분류/전환 훅을 `src/markets/schedule.py` 연동 -- `TASK-CODE-008` (`REQ-V3-003`,`REQ-V3-004`): 블랙아웃 큐 처리기를 `src/broker/`에 추가 -- `TASK-CODE-009` (`REQ-V3-005`): 세션별 주문 타입 검증기 추가 +- `TASK-CODE-007` (`REQ-V3-001`,`REQ-V3-002`,`TEST-ACC-015`,`TEST-ACC-016`): 세션 분류/전환 훅을 `src/markets/schedule.py` 연동 +- `TASK-CODE-008` (`REQ-V3-003`,`REQ-V3-004`,`TEST-CODE-005`,`TEST-ACC-017`): 블랙아웃 큐 처리기를 `src/broker/`에 추가 +- `TASK-CODE-009` (`REQ-V3-005`,`TEST-CODE-006`,`TEST-ACC-004`): 세션별 주문 타입 검증기 추가 - `TEST-CODE-005`: 블랙아웃 신규주문 차단 테스트 - `TEST-CODE-006`: 저유동 세션 시장가 거부 테스트 ## 구현 단위 D: 체결/환율/오버나잇 -- `TASK-CODE-010` (`REQ-V3-006`): 불리한 체결가 모델을 백테스트 체결기로 구현 -- `TASK-CODE-011` (`REQ-V3-007`): FX PnL 분리 회계 테이블/컬럼 추가 -- `TASK-CODE-012` (`REQ-V3-008`): 오버나잇 예외와 Kill Switch 충돌 해소 로직 구현 +- `TASK-CODE-010` (`REQ-V3-006`,`TEST-CODE-007`,`TEST-ACC-005`): 불리한 체결가 모델을 백테스트 체결기로 구현 +- `TASK-CODE-011` (`REQ-V3-007`,`TEST-CODE-008`,`TEST-ACC-006`): FX PnL 분리 회계 테이블/컬럼 추가 +- `TASK-CODE-012` (`REQ-V3-008`,`TEST-ACC-018`): 오버나잇 예외와 Kill Switch 충돌 해소 로직 구현 - `TEST-CODE-007`: 불리한 체결가 모델 테스트 - `TEST-CODE-008`: FX 버퍼 위반 시 신규진입 제한 테스트 ## 구현 단위 E: 운영/문서 거버넌스 -- `TASK-OPS-001` (`REQ-OPS-001`): 시간 필드/로그 스키마의 타임존 표기 강제 규칙 구현 -- `TASK-OPS-002` (`REQ-OPS-002`): 정책 수치 변경 시 `01_requirements_registry.md` 선수정 CI 체크 추가 -- `TASK-OPS-003` (`REQ-OPS-003`): `TASK-*` 없는 `REQ-*` 또는 `TEST-*` 없는 `REQ-*`를 차단하는 문서 검증 게이트 유지 -- `TASK-OPS-004` (`REQ-OPS-004`): v2/v3 원본 계획 문서 위치를 `docs/ouroboros/source/`로 표준화하고 링크 일관성 검증 +- `TASK-OPS-001` (`REQ-OPS-001`,`TEST-ACC-007`): 시간 필드/로그 스키마의 타임존(KST/UTC) 표기 강제 규칙 구현 +- `TASK-OPS-002` (`REQ-OPS-002`,`TEST-ACC-008`): 정책 수치 변경 시 `01_requirements_registry.md` 선수정 CI 체크 추가 +- `TASK-OPS-003` (`REQ-OPS-003`,`TEST-ACC-009`): `TASK-*` 없는 `REQ-*` 또는 `TEST-*` 없는 `REQ-*`를 차단하는 문서 검증 게이트 유지 +- `TASK-OPS-004` (`REQ-OPS-004`,`TEST-ACC-019`): v2/v3 원본 계획 문서 위치를 `docs/ouroboros/source/`로 표준화하고 링크 일관성 검증 ## 커밋 규칙 diff --git a/scripts/validate_governance_assets.py b/scripts/validate_governance_assets.py index 012138a..bfce37c 100644 --- a/scripts/validate_governance_assets.py +++ b/scripts/validate_governance_assets.py @@ -17,6 +17,7 @@ TASK_ID_IN_TEXT = re.compile(r"\bTASK-[A-Z0-9-]+-\d{3}\b") TEST_ID_IN_TEXT = re.compile(r"\bTEST-[A-Z0-9-]+-\d{3}\b") READ_ONLY_FILES = {"src/core/risk_manager.py"} PLACEHOLDER_VALUES = {"", "tbd", "n/a", "na", "none", "", ""} +TIMEZONE_TOKEN_PATTERN = re.compile(r"\b(?:KST|UTC)\b") def must_contain(path: Path, required: list[str], errors: list[str]) -> None: @@ -105,7 +106,43 @@ def validate_task_req_mapping(errors: list[str], *, task_doc: Path | None = None errors.append(f"{path}: no TASK definitions found") -def validate_pr_traceability(warnings: list[str]) -> None: +def validate_task_test_pairing(errors: list[str], *, task_doc: Path | None = None) -> None: + """Fail when TASK definitions are not linked to at least one TEST id.""" + path = task_doc or Path(TASK_WORK_ORDERS_DOC) + if not path.exists(): + errors.append(f"missing file: {path}") + return + + text = path.read_text(encoding="utf-8") + found_task = False + for line in text.splitlines(): + m = TASK_DEF_LINE.match(line.strip()) + if not m: + continue + found_task = True + if not TEST_ID_IN_TEXT.search(m.group("body")): + errors.append(f"{path}: TASK without TEST mapping -> {m.group('task_id')}") + if not found_task: + errors.append(f"{path}: no TASK definitions found") + + +def validate_timezone_policy_tokens(errors: list[str]) -> None: + """Fail-fast check for REQ-OPS-001 governance tokens.""" + required_docs = [ + Path("docs/ouroboros/01_requirements_registry.md"), + Path("docs/ouroboros/30_code_level_work_orders.md"), + Path("docs/workflow.md"), + ] + for path in required_docs: + if not path.exists(): + errors.append(f"missing file: {path}") + continue + text = path.read_text(encoding="utf-8") + if not TIMEZONE_TOKEN_PATTERN.search(text): + errors.append(f"{path}: missing timezone policy token (KST/UTC)") + + +def validate_pr_traceability(errors: list[str]) -> None: title = os.getenv("GOVERNANCE_PR_TITLE", "").strip() body = os.getenv("GOVERNANCE_PR_BODY", "").strip() if not title and not body: @@ -113,11 +150,11 @@ def validate_pr_traceability(warnings: list[str]) -> None: text = f"{title}\n{body}" if not REQ_ID_IN_LINE.search(text): - warnings.append("PR text missing REQ-ID reference") + errors.append("PR text missing REQ-ID reference") if not TASK_ID_IN_TEXT.search(text): - warnings.append("PR text missing TASK-ID reference") + errors.append("PR text missing TASK-ID reference") if not TEST_ID_IN_TEXT.search(text): - warnings.append("PR text missing TEST-ID reference") + errors.append("PR text missing TEST-ID reference") def _parse_pr_evidence_line(text: str, field: str) -> str | None: @@ -145,8 +182,8 @@ def validate_read_only_approval( body = os.getenv("GOVERNANCE_PR_BODY", "").strip() if not body: - warnings.append( - "READ-ONLY file changed but PR body is unavailable; approval evidence check skipped" + errors.append( + "READ-ONLY file changed but PR body is unavailable; approval evidence is required" ) return @@ -245,7 +282,9 @@ def main() -> int: validate_registry_sync(changed_files, errors) validate_task_req_mapping(errors) - validate_pr_traceability(warnings) + validate_task_test_pairing(errors) + validate_timezone_policy_tokens(errors) + validate_pr_traceability(errors) validate_read_only_approval(changed_files, errors, warnings) if errors: diff --git a/tests/test_validate_governance_assets.py b/tests/test_validate_governance_assets.py index f5a425d..30312f5 100644 --- a/tests/test_validate_governance_assets.py +++ b/tests/test_validate_governance_assets.py @@ -108,14 +108,14 @@ def test_validate_task_req_mapping_passes_when_req_present(tmp_path) -> None: assert errors == [] -def test_validate_pr_traceability_warns_when_req_missing(monkeypatch) -> None: +def test_validate_pr_traceability_fails_when_req_missing(monkeypatch) -> None: module = _load_module() monkeypatch.setenv("GOVERNANCE_PR_TITLE", "feat: update policy checker") monkeypatch.setenv("GOVERNANCE_PR_BODY", "Refs: TASK-OPS-001 TEST-ACC-007") - warnings: list[str] = [] - module.validate_pr_traceability(warnings) - assert warnings - assert "PR text missing REQ-ID reference" in warnings + errors: list[str] = [] + module.validate_pr_traceability(errors) + assert errors + assert "PR text missing REQ-ID reference" in errors def test_validate_read_only_approval_requires_evidence(monkeypatch) -> None: @@ -165,7 +165,7 @@ def test_validate_read_only_approval_passes_with_complete_evidence(monkeypatch) assert warnings == [] -def test_validate_read_only_approval_warns_without_pr_body(monkeypatch) -> None: +def test_validate_read_only_approval_fails_without_pr_body(monkeypatch) -> None: module = _load_module() changed_files = ["src/core/risk_manager.py"] errors: list[str] = [] @@ -173,9 +173,9 @@ def test_validate_read_only_approval_warns_without_pr_body(monkeypatch) -> None: monkeypatch.delenv("GOVERNANCE_PR_BODY", raising=False) module.validate_read_only_approval(changed_files, errors, warnings) - assert errors == [] - assert warnings - assert "approval evidence check skipped" in warnings[0] + assert warnings == [] + assert errors + assert "approval evidence is required" in errors[0] def test_validate_read_only_approval_skips_when_no_readonly_file_changed() -> None: @@ -284,3 +284,54 @@ def test_must_contain_fails_when_commands_missing_newline_section_token(tmp_path errors, ) assert any("Comment Newline Escaping" in err for err in errors) + + +def test_validate_task_test_pairing_reports_missing_test_reference(tmp_path) -> None: + module = _load_module() + doc = tmp_path / "work_orders.md" + doc.write_text( + "- `TASK-OPS-999` (`REQ-OPS-001`): enforce timezone labels only\n", + encoding="utf-8", + ) + errors: list[str] = [] + module.validate_task_test_pairing(errors, task_doc=doc) + assert errors + assert "TASK without TEST mapping" in errors[0] + + +def test_validate_task_test_pairing_passes_when_test_present(tmp_path) -> None: + module = _load_module() + doc = tmp_path / "work_orders.md" + doc.write_text( + "- `TASK-OPS-999` (`REQ-OPS-001`,`TEST-ACC-007`): enforce timezone labels\n", + encoding="utf-8", + ) + errors: list[str] = [] + module.validate_task_test_pairing(errors, task_doc=doc) + assert errors == [] + + +def test_validate_timezone_policy_tokens_requires_kst_or_utc(tmp_path, monkeypatch) -> None: + module = _load_module() + docs = tmp_path / "docs" + ouroboros = docs / "ouroboros" + docs.mkdir(parents=True) + ouroboros.mkdir(parents=True) + monkeypatch.chdir(tmp_path) + + (ouroboros / "01_requirements_registry.md").write_text("REQ-OPS-001\nUTC\n", encoding="utf-8") + (ouroboros / "30_code_level_work_orders.md").write_text( + "TASK-OPS-001 (`REQ-OPS-001`,`TEST-ACC-007`)\nKST\n", + encoding="utf-8", + ) + (docs / "workflow.md").write_text("timezone policy: KST and UTC\n", encoding="utf-8") + + errors: list[str] = [] + module.validate_timezone_policy_tokens(errors) + assert errors == [] + + (docs / "workflow.md").write_text("timezone policy missing labels\n", encoding="utf-8") + errors = [] + module.validate_timezone_policy_tokens(errors) + assert errors + assert any("missing timezone policy token" in err for err in errors) From c849e60199ae671ee25af4fdddd4f77959262ff5 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 01:51:28 +0900 Subject: [PATCH 082/109] ci: retrigger after PR body update From 1d404975ea9cc35a64287a0b7607fdfdbbb292eb Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 01:54:27 +0900 Subject: [PATCH 083/109] docs: resync implementation audit status with actual code gaps (#373) --- docs/ouroboros/01_requirements_registry.md | 2 +- docs/ouroboros/80_implementation_audit.md | 56 +++++++++++++--------- 2 files changed, 34 insertions(+), 24 deletions(-) diff --git a/docs/ouroboros/01_requirements_registry.md b/docs/ouroboros/01_requirements_registry.md index 56cb062..0a49bc7 100644 --- a/docs/ouroboros/01_requirements_registry.md +++ b/docs/ouroboros/01_requirements_registry.md @@ -1,6 +1,6 @@ # v2/v3 구현 감사 및 수익률 분석 보고서 작성일: 2026-02-28 -최종 업데이트: 2026-03-01 (Phase 2 완료 + Phase 3 부분 완료 반영) +최종 업데이트: 2026-03-02 (#373 상태표 정합화 반영) 대상 기간: 2026-02-25 ~ 2026-02-28 (실거래) 분석 브랜치: `feature/v3-session-policy-stream` @@ -17,45 +17,54 @@ Updated: 2026-03-01 ## 1. 계획 대비 구현 감사 -### 1.1 v2 구현 상태: 100% 완료 +### 1.1 완료 판정 기준 (Definition of Done) + +아래 3가지를 모두 만족할 때만 `✅ 완료`로 표기한다. + +1. 코드 경로 존재: 요구사항을 수행하는 실행 경로가 코드에 존재한다. +2. 효과 검증 통과: 요구사항 효과를 검증하는 테스트/런타임 증적이 존재한다. +3. 추적성 일치: 요구사항 상태와 열린 갭 이슈가 모순되지 않는다. + +### 1.2 v2 구현 상태: 부분 완료 (핵심 갭 잔존) | REQ-ID | 요구사항 | 구현 파일 | 상태 | |--------|----------|-----------|------| | REQ-V2-001 | 4-상태 매도 상태기계 (HOLDING→BE_LOCK→ARMED→EXITED) | `src/strategy/position_state_machine.py` | ✅ 완료 | | REQ-V2-002 | 즉시 최상위 상태 승격 (갭 대응) | `position_state_machine.py:51-70` | ✅ 완료 | | REQ-V2-003 | EXITED 우선 평가 | `position_state_machine.py:38-48` | ✅ 완료 | -| REQ-V2-004 | 4중 청산 로직 (Hard/BE/ATR Trailing/Model) | `src/strategy/exit_rules.py` | ✅ 완료 | +| REQ-V2-004 | 4중 청산 로직 (Hard/BE/ATR Trailing/Model) | `src/strategy/exit_rules.py` | ⚠️ 부분 (`#369`) | | REQ-V2-005 | Triple Barrier 라벨링 | `src/analysis/triple_barrier.py` | ✅ 완료 | | REQ-V2-006 | Walk-Forward + Purge/Embargo 검증 | `src/analysis/walk_forward_split.py` | ✅ 완료 | -| REQ-V2-007 | 비용/슬리피지/체결실패 모델 필수 | `src/analysis/backtest_cost_guard.py` | ✅ 완료 | -| REQ-V2-008 | Kill Switch 실행 순서 (Block→Cancel→Refresh→Reduce→Snapshot) | `src/core/kill_switch.py` | ✅ 완료 | +| REQ-V2-007 | 비용/슬리피지/체결실패 모델 필수 | `src/analysis/backtest_cost_guard.py` | ⚠️ 부분 (`#368`) | +| REQ-V2-008 | Kill Switch 실행 순서 (Block→Cancel→Refresh→Reduce→Snapshot) | `src/core/kill_switch.py` | ⚠️ 부분 (`#377`) | -### 1.2 v3 구현 상태: ~85% 완료 (2026-03-01 기준) +### 1.3 v3 구현 상태: 부분 완료 (2026-03-02 기준) | REQ-ID | 요구사항 | 상태 | 비고 | |--------|----------|------|------| -| REQ-V3-001 | 모든 신호/주문/로그에 session_id 포함 | ✅ 완료 | #326 머지 — `log_decision()` 파라미터 추가, `log_trade()` 명시적 전달 | -| REQ-V3-002 | 세션 전환 훅 + 리스크 파라미터 재로딩 | ⚠️ 부분 | #327 머지 — 재로딩 메커니즘 구현, 세션 훅 테스트 미작성 | +| REQ-V3-001 | 모든 신호/주문/로그에 session_id 포함 | ⚠️ 부분 | 큐 intent에 `session_id` 누락 (`#375`) | +| REQ-V3-002 | 세션 전환 훅 + 리스크 파라미터 재로딩 | ⚠️ 부분 | 구현 존재, 세션 경계 E2E 회귀 보강 필요 (`#376`) | | REQ-V3-003 | 블랙아웃 윈도우 정책 | ✅ 완료 | `src/core/blackout_manager.py` | -| REQ-V3-004 | 블랙아웃 큐 + 복구 시 재검증 | ✅ 완료 | #324(DB 기록) + #328(가격/세션 재검증) 머지 | +| REQ-V3-004 | 블랙아웃 큐 + 복구 시 재검증 | ⚠️ 부분 | 큐 포화 시 intent 유실 경로 존재 (`#371`), 재검증 강화를 `#328`에서 추적 | | REQ-V3-005 | 저유동 세션 시장가 금지 | ✅ 완료 | `src/core/order_policy.py` | | REQ-V3-006 | 보수적 백테스트 체결 (불리 방향) | ✅ 완료 | `src/analysis/backtest_execution_model.py` | -| REQ-V3-007 | FX 손익 분리 (전략 PnL vs 환율 PnL) | ⚠️ 코드 완료 / 운영 미반영 | `src/db.py` 스키마·함수 완료, 운영 데이터 `fx_pnl` 전부 0 | +| REQ-V3-007 | FX 손익 분리 (전략 PnL vs 환율 PnL) | ⚠️ 부분 | 스키마 존재, 런타임 분리 계산/전달 미적용 (`#370`) | | REQ-V3-008 | 오버나잇 예외 vs Kill Switch 우선순위 | ✅ 완료 | `src/main.py` — `_should_force_exit_for_overnight()`, `_apply_staged_exit_override_for_hold()` | -### 1.3 운영 거버넌스: ~60% 완료 (2026-03-01 재평가) +### 1.4 운영 거버넌스: 부분 완료 (2026-03-02 재평가) | REQ-ID | 요구사항 | 상태 | 비고 | |--------|----------|------|------| -| REQ-OPS-001 | 타임존 명시 (KST/UTC) | ⚠️ 부분 | DB 기록은 UTC, 세션은 KST. 일부 로그에서 타임존 미표기 | -| REQ-OPS-002 | 정책 변경 시 레지스트리 업데이트 강제 | ⚠️ 기본 구현 완료 | `scripts/validate_governance_assets.py` CI 연동 완료; 규칙 고도화 잔여 | -| REQ-OPS-003 | TASK-REQ 매핑 강제 | ⚠️ 기본 구현 완료 | `scripts/validate_ouroboros_docs.py` CI 연동 완료; PR 강제 검증 강화 잔여 | +| REQ-OPS-001 | 타임존 명시 (KST/UTC) | ⚠️ 부분 | 문서 토큰 fail-fast 추가, 필드 수준 검증은 `#372` 잔여 | +| REQ-OPS-002 | 정책 변경 시 레지스트리 업데이트 강제 | ⚠️ 부분 | 파일 단위 강제는 구현, 정책 수치 단위 정밀 검증은 `#372` 잔여 | +| REQ-OPS-003 | TASK-REQ 매핑 강제 | ⚠️ 부분 | TASK-REQ/TASK-TEST 강제는 구현, 우회 케이스 추가 점검은 `#372` 잔여 | +| REQ-OPS-004 | source 경로 표준화 검증 | ✅ 완료 | `scripts/validate_ouroboros_docs.py`의 canonical source path 검증 | --- ## 2. 구현 갭 상세 -> **2026-03-01 업데이트**: GAP-1~5 모두 해소되었거나 이슈 머지로 부분 해소됨. +> **2026-03-02 업데이트**: 기존 해소 표기를 재검증했고, 열려 있는 갭 이슈 기준으로 상태를 재분류함. ### GAP-1: DecisionLogger에 session_id 미포함 → ✅ 해소 (#326) @@ -80,12 +89,13 @@ Updated: 2026-03-01 - **잔여 갭**: 세션 경계 실시간 전환 E2E 통합 테스트 보강 필요 (`test_main.py`에 설정 오버라이드/폴백 단위 테스트는 존재) - **요구사항**: REQ-V3-002 -### GAP-4: 블랙아웃 복구 DB 기록 + 재검증 → ✅ 해소 (#324, #328) +### GAP-4: 블랙아웃 복구 DB 기록 + 재검증 → ⚠️ 부분 해소 (#324, #328, #371) - **위치**: `src/core/blackout_manager.py`, `src/main.py` -- **해소 내용**: - - #324 머지 — 복구 주문 실행 후 `log_trade()` 호출, rationale에 `[blackout-recovery]` prefix - - #328 머지 — 가격 유효성 검증 (진입가 대비 급변 시 드롭), 세션 변경 시 새 파라미터로 재검증 +- **현 상태**: + - #324 추적 범위(DB 기록)는 구현 경로가 존재 + - #328 범위(가격/세션 재검증 강화)는 추적 이슈 오픈 상태 + - #371: 큐 포화 시 intent 유실 경로가 남아 있어 `REQ-V3-004`를 완료로 보기 어려움 - **요구사항**: REQ-V3-004 ### GAP-5: 시간장벽이 봉 개수 고정 → ✅ 해소 (#329) @@ -97,10 +107,10 @@ Updated: 2026-03-01 - `max_holding_bars` deprecated 경고 유지 (하위 호환) - **요구사항**: REQ-V2-005 / v3 확장 -### GAP-6 (신규): FX PnL 운영 미활성 (LOW — 코드 완료) +### GAP-6 (신규): FX PnL 분리 미완료 (MEDIUM — 부분 구현) - **위치**: `src/db.py` (`fx_pnl`, `strategy_pnl` 컬럼 존재) -- **문제**: 스키마와 함수는 완료되었으나 운영 데이터에서 `fx_pnl` 전부 0 +- **문제**: 스키마와 함수는 존재하지만 런타임 경로에서 `strategy_pnl`/`fx_pnl` 분리 계산 전달이 누락됨 (`#370`) - **영향**: USD 거래에서 환율 손익과 전략 손익이 분리되지 않아 성과 분석 부정확 - **요구사항**: REQ-V3-007 From c27decb6b1a31fcee01bc440129a5fdd54b45923 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 02:10:08 +0900 Subject: [PATCH 084/109] backtest: reflect cost/execution effects in fold scoring (#368) --- src/analysis/backtest_cost_guard.py | 11 +++ src/analysis/backtest_pipeline.py | 104 ++++++++++++++++++-- tests/test_backtest_cost_guard.py | 32 ++++++ tests/test_backtest_pipeline_integration.py | 45 +++++++++ 4 files changed, 186 insertions(+), 6 deletions(-) diff --git a/src/analysis/backtest_cost_guard.py b/src/analysis/backtest_cost_guard.py index 97e1cd3..ae0d729 100644 --- a/src/analysis/backtest_cost_guard.py +++ b/src/analysis/backtest_cost_guard.py @@ -11,6 +11,7 @@ class BacktestCostModel: commission_bps: float | None = None slippage_bps_by_session: dict[str, float] | None = None failure_rate_by_session: dict[str, float] | None = None + partial_fill_rate_by_session: dict[str, float] | None = None unfavorable_fill_required: bool = True @@ -31,6 +32,7 @@ def validate_backtest_cost_model( slippage = model.slippage_bps_by_session or {} failure = model.failure_rate_by_session or {} + partial_fill = model.partial_fill_rate_by_session or {} missing_slippage = [s for s in required_sessions if s not in slippage] if missing_slippage: @@ -43,6 +45,12 @@ def validate_backtest_cost_model( raise ValueError( f"missing failure_rate_by_session for sessions: {', '.join(missing_failure)}" ) + missing_partial_fill = [s for s in required_sessions if s not in partial_fill] + if missing_partial_fill: + raise ValueError( + "missing partial_fill_rate_by_session for sessions: " + f"{', '.join(missing_partial_fill)}" + ) for sess, bps in slippage.items(): if not math.isfinite(bps) or bps < 0: @@ -50,3 +58,6 @@ def validate_backtest_cost_model( for sess, rate in failure.items(): if not math.isfinite(rate) or rate < 0 or rate > 1: raise ValueError(f"failure rate must be within [0,1] for session={sess}") + for sess, rate in partial_fill.items(): + if not math.isfinite(rate) or rate < 0 or rate > 1: + raise ValueError(f"partial fill rate must be within [0,1] for session={sess}") diff --git a/src/analysis/backtest_pipeline.py b/src/analysis/backtest_pipeline.py index 985e0e0..ce27f1f 100644 --- a/src/analysis/backtest_pipeline.py +++ b/src/analysis/backtest_pipeline.py @@ -13,6 +13,11 @@ from statistics import mean from typing import Literal, cast from src.analysis.backtest_cost_guard import BacktestCostModel, validate_backtest_cost_model +from src.analysis.backtest_execution_model import ( + BacktestExecutionModel, + ExecutionAssumptions, + ExecutionRequest, +) from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier from src.analysis.walk_forward_split import WalkForwardFold, generate_walk_forward_splits @@ -40,6 +45,7 @@ class WalkForwardConfig: class BaselineScore: name: Literal["B0", "B1", "M1"] accuracy: float + cost_adjusted_accuracy: float @dataclass(frozen=True) @@ -115,6 +121,8 @@ def run_v2_backtest_pipeline( ).label ordered_labels = [labels_by_bar_index[idx] for idx in normalized_entries] + ordered_sessions = [bars[idx].session_id for idx in normalized_entries] + ordered_prices = [bars[idx].close for idx in normalized_entries] folds = generate_walk_forward_splits( n_samples=len(normalized_entries), train_size=walk_forward.train_size, @@ -129,8 +137,13 @@ def run_v2_backtest_pipeline( for fold_idx, fold in enumerate(folds): train_labels = [ordered_labels[i] for i in fold.train_indices] test_labels = [ordered_labels[i] for i in fold.test_indices] + test_sessions = [ordered_sessions[i] for i in fold.test_indices] + test_prices = [ordered_prices[i] for i in fold.test_indices] if not test_labels: continue + execution_model = _build_execution_model(cost_model=cost_model, fold_seed=fold_idx) + b0_pred = _baseline_b0_pred(train_labels) + m1_pred = _m1_pred(train_labels) fold_results.append( BacktestFoldResult( fold_index=fold_idx, @@ -139,11 +152,41 @@ def run_v2_backtest_pipeline( train_label_distribution=_label_dist(train_labels), test_label_distribution=_label_dist(test_labels), baseline_scores=[ - BaselineScore(name="B0", accuracy=_baseline_b0(train_labels, test_labels)), - BaselineScore(name="B1", accuracy=_score_constant(1, test_labels)), + BaselineScore( + name="B0", + accuracy=_score_constant(b0_pred, test_labels), + cost_adjusted_accuracy=_score_with_execution( + prediction=b0_pred, + actual=test_labels, + sessions=test_sessions, + reference_prices=test_prices, + execution_model=execution_model, + commission_bps=float(cost_model.commission_bps or 0.0), + ), + ), + BaselineScore( + name="B1", + accuracy=_score_constant(1, test_labels), + cost_adjusted_accuracy=_score_with_execution( + prediction=1, + actual=test_labels, + sessions=test_sessions, + reference_prices=test_prices, + execution_model=execution_model, + commission_bps=float(cost_model.commission_bps or 0.0), + ), + ), BaselineScore( name="M1", - accuracy=_score_constant(_m1_pred(train_labels), test_labels), + accuracy=_score_constant(m1_pred, test_labels), + cost_adjusted_accuracy=_score_with_execution( + prediction=m1_pred, + actual=test_labels, + sessions=test_sessions, + reference_prices=test_prices, + execution_model=execution_model, + commission_bps=float(cost_model.commission_bps or 0.0), + ), ), ], ) @@ -176,12 +219,15 @@ def _score_constant(pred: int, actual: Sequence[int]) -> float: def _baseline_b0(train_labels: Sequence[int], test_labels: Sequence[int]) -> float: + return _score_constant(_baseline_b0_pred(train_labels), test_labels) + + +def _baseline_b0_pred(train_labels: Sequence[int]) -> int: if not train_labels: - return _score_constant(0, test_labels) + return 0 # Majority-class baseline from training fold. choices = (-1, 0, 1) - pred = max(choices, key=lambda c: train_labels.count(c)) - return _score_constant(pred, test_labels) + return max(choices, key=lambda c: train_labels.count(c)) def _m1_pred(train_labels: Sequence[int]) -> int: @@ -190,6 +236,52 @@ def _m1_pred(train_labels: Sequence[int]) -> int: return train_labels[-1] +def _build_execution_model(*, cost_model: BacktestCostModel, fold_seed: int) -> BacktestExecutionModel: + return BacktestExecutionModel( + ExecutionAssumptions( + slippage_bps_by_session=dict(cost_model.slippage_bps_by_session or {}), + failure_rate_by_session=dict(cost_model.failure_rate_by_session or {}), + partial_fill_rate_by_session=dict(cost_model.partial_fill_rate_by_session or {}), + seed=fold_seed, + ) + ) + + +def _score_with_execution( + *, + prediction: int, + actual: Sequence[int], + sessions: Sequence[str], + reference_prices: Sequence[float], + execution_model: BacktestExecutionModel, + commission_bps: float, +) -> float: + if not actual: + return 0.0 + contributions: list[float] = [] + for label, session_id, reference_price in zip(actual, sessions, reference_prices, strict=True): + if prediction == 0: + contributions.append(1.0 if label == 0 else 0.0) + continue + side = "BUY" if prediction > 0 else "SELL" + execution = execution_model.simulate( + ExecutionRequest( + side=side, + session_id=session_id, + qty=100, + reference_price=reference_price, + ) + ) + if execution.status == "REJECTED": + contributions.append(0.0) + continue + fill_ratio = execution.filled_qty / 100.0 + cost_penalty = min(0.99, (commission_bps + execution.slippage_bps) / 10000.0) + correctness = 1.0 if prediction == label else 0.0 + contributions.append(correctness * fill_ratio * (1.0 - cost_penalty)) + return mean(contributions) + + def _build_run_id(*, n_entries: int, n_folds: int, sessions: Sequence[str]) -> str: sess_key = "_".join(sessions) return f"v2p-e{n_entries}-f{n_folds}-s{sess_key}" diff --git a/tests/test_backtest_cost_guard.py b/tests/test_backtest_cost_guard.py index 6c73a30..bc315f6 100644 --- a/tests/test_backtest_cost_guard.py +++ b/tests/test_backtest_cost_guard.py @@ -10,6 +10,7 @@ def test_valid_backtest_cost_model_passes() -> None: commission_bps=5.0, slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0}, failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08}, + partial_fill_rate_by_session={"KRX_REG": 0.1, "US_PRE": 0.2}, unfavorable_fill_required=True, ) validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"]) @@ -20,6 +21,7 @@ def test_missing_required_slippage_session_raises() -> None: commission_bps=5.0, slippage_bps_by_session={"KRX_REG": 10.0}, failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08}, + partial_fill_rate_by_session={"KRX_REG": 0.1, "US_PRE": 0.2}, unfavorable_fill_required=True, ) with pytest.raises(ValueError, match="missing slippage_bps_by_session.*US_PRE"): @@ -31,6 +33,7 @@ def test_missing_required_failure_rate_session_raises() -> None: commission_bps=5.0, slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0}, failure_rate_by_session={"KRX_REG": 0.01}, + partial_fill_rate_by_session={"KRX_REG": 0.1, "US_PRE": 0.2}, unfavorable_fill_required=True, ) with pytest.raises(ValueError, match="missing failure_rate_by_session.*US_PRE"): @@ -42,6 +45,7 @@ def test_invalid_failure_rate_range_raises() -> None: commission_bps=5.0, slippage_bps_by_session={"KRX_REG": 10.0}, failure_rate_by_session={"KRX_REG": 1.2}, + partial_fill_rate_by_session={"KRX_REG": 0.2}, unfavorable_fill_required=True, ) with pytest.raises(ValueError, match="failure rate must be within"): @@ -53,6 +57,7 @@ def test_unfavorable_fill_requirement_cannot_be_disabled() -> None: commission_bps=5.0, slippage_bps_by_session={"KRX_REG": 10.0}, failure_rate_by_session={"KRX_REG": 0.02}, + partial_fill_rate_by_session={"KRX_REG": 0.2}, unfavorable_fill_required=False, ) with pytest.raises(ValueError, match="unfavorable_fill_required must be True"): @@ -65,6 +70,7 @@ def test_non_finite_commission_rejected(bad_commission: float) -> None: commission_bps=bad_commission, slippage_bps_by_session={"KRX_REG": 10.0}, failure_rate_by_session={"KRX_REG": 0.02}, + partial_fill_rate_by_session={"KRX_REG": 0.2}, unfavorable_fill_required=True, ) with pytest.raises(ValueError, match="commission_bps"): @@ -77,7 +83,33 @@ def test_non_finite_slippage_rejected(bad_slippage: float) -> None: commission_bps=5.0, slippage_bps_by_session={"KRX_REG": bad_slippage}, failure_rate_by_session={"KRX_REG": 0.02}, + partial_fill_rate_by_session={"KRX_REG": 0.2}, unfavorable_fill_required=True, ) with pytest.raises(ValueError, match="slippage bps"): validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"]) + + +def test_missing_required_partial_fill_session_raises() -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0}, + failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08}, + partial_fill_rate_by_session={"KRX_REG": 0.1}, + unfavorable_fill_required=True, + ) + with pytest.raises(ValueError, match="missing partial_fill_rate_by_session.*US_PRE"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"]) + + +@pytest.mark.parametrize("bad_partial_fill", [float("nan"), float("inf"), float("-inf"), -0.1, 1.1]) +def test_invalid_partial_fill_rate_rejected(bad_partial_fill: float) -> None: + model = BacktestCostModel( + commission_bps=5.0, + slippage_bps_by_session={"KRX_REG": 10.0}, + failure_rate_by_session={"KRX_REG": 0.02}, + partial_fill_rate_by_session={"KRX_REG": bad_partial_fill}, + unfavorable_fill_required=True, + ) + with pytest.raises(ValueError, match="partial fill rate must be within"): + validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"]) diff --git a/tests/test_backtest_pipeline_integration.py b/tests/test_backtest_pipeline_integration.py index c0ad496..d63a540 100644 --- a/tests/test_backtest_pipeline_integration.py +++ b/tests/test_backtest_pipeline_integration.py @@ -35,6 +35,7 @@ def _cost_model() -> BacktestCostModel: commission_bps=3.0, slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0}, failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08}, + partial_fill_rate_by_session={"KRX_REG": 0.05, "US_PRE": 0.2}, unfavorable_fill_required=True, ) @@ -71,6 +72,7 @@ def test_pipeline_happy_path_returns_fold_and_artifact_contract() -> None: assert names == {"B0", "B1", "M1"} for score in fold.baseline_scores: assert 0.0 <= score.accuracy <= 1.0 + assert 0.0 <= score.cost_adjusted_accuracy <= 1.0 def test_pipeline_cost_guard_fail_fast() -> None: @@ -78,6 +80,7 @@ def test_pipeline_cost_guard_fail_fast() -> None: commission_bps=3.0, slippage_bps_by_session={"KRX_REG": 10.0}, failure_rate_by_session={"KRX_REG": 0.01}, + partial_fill_rate_by_session={"KRX_REG": 0.05}, unfavorable_fill_required=True, ) try: @@ -166,3 +169,45 @@ def test_pipeline_rejects_minutes_spec_when_timestamp_missing() -> None: assert "BacktestBar.timestamp is required" in str(exc) else: raise AssertionError("expected timestamp validation error") + + +def test_pipeline_fold_scores_reflect_cost_and_execution_effects() -> None: + cfg = dict( + bars=_bars(), + entry_indices=[0, 1, 2, 3, 4, 5, 6, 7], + side=1, + triple_barrier_spec=TripleBarrierSpec( + take_profit_pct=0.02, + stop_loss_pct=0.01, + max_holding_minutes=3, + ), + walk_forward=WalkForwardConfig( + train_size=4, + test_size=2, + step_size=2, + purge_size=1, + embargo_size=1, + min_train_size=3, + ), + ) + optimistic = BacktestCostModel( + commission_bps=0.0, + slippage_bps_by_session={"KRX_REG": 0.0, "US_PRE": 0.0}, + failure_rate_by_session={"KRX_REG": 0.0, "US_PRE": 0.0}, + partial_fill_rate_by_session={"KRX_REG": 0.0, "US_PRE": 0.0}, + unfavorable_fill_required=True, + ) + conservative = BacktestCostModel( + commission_bps=10.0, + slippage_bps_by_session={"KRX_REG": 30.0, "US_PRE": 80.0}, + failure_rate_by_session={"KRX_REG": 0.2, "US_PRE": 0.4}, + partial_fill_rate_by_session={"KRX_REG": 0.5, "US_PRE": 0.7}, + unfavorable_fill_required=True, + ) + optimistic_out = run_v2_backtest_pipeline(cost_model=optimistic, **cfg) + conservative_out = run_v2_backtest_pipeline(cost_model=conservative, **cfg) + + assert optimistic_out.folds and conservative_out.folds + optimistic_score = optimistic_out.folds[0].baseline_scores[1].cost_adjusted_accuracy + conservative_score = conservative_out.folds[0].baseline_scores[1].cost_adjusted_accuracy + assert conservative_score < optimistic_score From ed713fdf4015eca17d3a0810bab7c6f859463406 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 02:24:01 +0900 Subject: [PATCH 085/109] style: wrap long helper signature in backtest pipeline --- src/analysis/backtest_pipeline.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/analysis/backtest_pipeline.py b/src/analysis/backtest_pipeline.py index ce27f1f..da39cc5 100644 --- a/src/analysis/backtest_pipeline.py +++ b/src/analysis/backtest_pipeline.py @@ -236,7 +236,11 @@ def _m1_pred(train_labels: Sequence[int]) -> int: return train_labels[-1] -def _build_execution_model(*, cost_model: BacktestCostModel, fold_seed: int) -> BacktestExecutionModel: +def _build_execution_model( + *, + cost_model: BacktestCostModel, + fold_seed: int, +) -> BacktestExecutionModel: return BacktestExecutionModel( ExecutionAssumptions( slippage_bps_by_session=dict(cost_model.slippage_bps_by_session or {}), From d4f37ee392c8d84ecd0bbcc5760d1ddfbdc28548 Mon Sep 17 00:00:00 2001 From: agentson Date: Mon, 2 Mar 2026 02:35:54 +0900 Subject: [PATCH 086/109] trade: apply runtime strategy/fx pnl split on sell paths (#370) --- docs/ouroboros/01_requirements_registry.md | 2 +- docs/ouroboros/80_implementation_audit.md | 7 +- src/db.py | 4 +- src/main.py | 131 ++++++++++++++++++++- tests/test_main.py | 25 ++++ 5 files changed, 161 insertions(+), 8 deletions(-) diff --git a/docs/ouroboros/01_requirements_registry.md b/docs/ouroboros/01_requirements_registry.md index 0a49bc7..d04c848 100644 --- a/docs/ouroboros/01_requirements_registry.md +++ b/docs/ouroboros/01_requirements_registry.md @@ -1,6 +1,6 @@