Compare commits
2 Commits
feature/v3
...
feature/is
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2406a80782 | ||
|
|
2e394cd17c |
@@ -13,6 +13,8 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
@@ -26,7 +28,18 @@ jobs:
|
||||
run: python3 scripts/session_handover_check.py --strict
|
||||
|
||||
- name: Validate governance assets
|
||||
run: python3 scripts/validate_governance_assets.py
|
||||
run: |
|
||||
RANGE=""
|
||||
if [ "${{ github.event_name }}" = "pull_request" ] && [ -n "${{ github.event.pull_request.base.sha }}" ]; then
|
||||
RANGE="${{ github.event.pull_request.base.sha }}...${{ github.sha }}"
|
||||
elif [ -n "${{ github.event.before }}" ] && [ "${{ github.event.before }}" != "0000000000000000000000000000000000000000" ]; then
|
||||
RANGE="${{ github.event.before }}...${{ github.sha }}"
|
||||
fi
|
||||
if [ -n "$RANGE" ]; then
|
||||
python3 scripts/validate_governance_assets.py "$RANGE"
|
||||
else
|
||||
python3 scripts/validate_governance_assets.py
|
||||
fi
|
||||
|
||||
- name: Validate Ouroboros docs
|
||||
run: python3 scripts/validate_ouroboros_docs.py
|
||||
|
||||
@@ -3,9 +3,12 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import subprocess
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
REQUIREMENTS_REGISTRY = "docs/ouroboros/01_requirements_registry.md"
|
||||
|
||||
|
||||
def must_contain(path: Path, required: list[str], errors: list[str]) -> None:
|
||||
if not path.exists():
|
||||
@@ -17,8 +20,64 @@ def must_contain(path: Path, required: list[str], errors: list[str]) -> None:
|
||||
errors.append(f"{path}: missing required token -> {token}")
|
||||
|
||||
|
||||
def normalize_changed_path(path: str) -> str:
|
||||
normalized = path.strip().replace("\\", "/")
|
||||
if normalized.startswith("./"):
|
||||
normalized = normalized[2:]
|
||||
return normalized
|
||||
|
||||
|
||||
def is_policy_file(path: str) -> bool:
|
||||
normalized = normalize_changed_path(path)
|
||||
if not normalized.endswith(".md"):
|
||||
return False
|
||||
if not normalized.startswith("docs/ouroboros/"):
|
||||
return False
|
||||
return normalized != REQUIREMENTS_REGISTRY
|
||||
|
||||
|
||||
def load_changed_files(args: list[str], errors: list[str]) -> list[str]:
|
||||
if not args:
|
||||
return []
|
||||
|
||||
# Single range input (e.g. BASE..HEAD or BASE...HEAD)
|
||||
if len(args) == 1 and ".." in args[0]:
|
||||
range_spec = args[0]
|
||||
try:
|
||||
completed = subprocess.run(
|
||||
["git", "diff", "--name-only", range_spec],
|
||||
check=True,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
except (subprocess.CalledProcessError, FileNotFoundError) as exc:
|
||||
errors.append(f"failed to load changed files from range '{range_spec}': {exc}")
|
||||
return []
|
||||
return [
|
||||
normalize_changed_path(line)
|
||||
for line in completed.stdout.splitlines()
|
||||
if line.strip()
|
||||
]
|
||||
|
||||
return [normalize_changed_path(path) for path in args if path.strip()]
|
||||
|
||||
|
||||
def validate_registry_sync(changed_files: list[str], errors: list[str]) -> None:
|
||||
if not changed_files:
|
||||
return
|
||||
|
||||
changed_set = set(changed_files)
|
||||
policy_changed = any(is_policy_file(path) for path in changed_set)
|
||||
registry_changed = REQUIREMENTS_REGISTRY in changed_set
|
||||
if policy_changed and not registry_changed:
|
||||
errors.append(
|
||||
"policy file changed without updating docs/ouroboros/01_requirements_registry.md"
|
||||
)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
errors: list[str] = []
|
||||
changed_files = load_changed_files(sys.argv[1:], errors)
|
||||
|
||||
pr_template = Path(".gitea/PULL_REQUEST_TEMPLATE.md")
|
||||
issue_template = Path(".gitea/ISSUE_TEMPLATE/runtime_verification.md")
|
||||
@@ -81,6 +140,8 @@ def main() -> int:
|
||||
if not handover_script.exists():
|
||||
errors.append(f"missing file: {handover_script}")
|
||||
|
||||
validate_registry_sync(changed_files, errors)
|
||||
|
||||
if errors:
|
||||
print("[FAIL] governance asset validation failed")
|
||||
for err in errors:
|
||||
|
||||
@@ -5,9 +5,7 @@ Implements first-touch labeling with upper/lower/time barriers.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import warnings
|
||||
from dataclasses import dataclass
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Literal, Sequence
|
||||
|
||||
|
||||
@@ -18,18 +16,9 @@ TieBreakMode = Literal["stop_first", "take_first"]
|
||||
class TripleBarrierSpec:
|
||||
take_profit_pct: float
|
||||
stop_loss_pct: float
|
||||
max_holding_bars: int | None = None
|
||||
max_holding_minutes: int | None = None
|
||||
max_holding_bars: int
|
||||
tie_break: TieBreakMode = "stop_first"
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
if self.max_holding_minutes is None and self.max_holding_bars is None:
|
||||
raise ValueError("one of max_holding_minutes or max_holding_bars must be set")
|
||||
if self.max_holding_minutes is not None and self.max_holding_minutes <= 0:
|
||||
raise ValueError("max_holding_minutes must be positive")
|
||||
if self.max_holding_bars is not None and self.max_holding_bars <= 0:
|
||||
raise ValueError("max_holding_bars must be positive")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class TripleBarrierLabel:
|
||||
@@ -46,7 +35,6 @@ def label_with_triple_barrier(
|
||||
highs: Sequence[float],
|
||||
lows: Sequence[float],
|
||||
closes: Sequence[float],
|
||||
timestamps: Sequence[datetime] | None = None,
|
||||
entry_index: int,
|
||||
side: int,
|
||||
spec: TripleBarrierSpec,
|
||||
@@ -65,6 +53,8 @@ def label_with_triple_barrier(
|
||||
raise ValueError("highs, lows, closes lengths must match")
|
||||
if entry_index < 0 or entry_index >= len(closes):
|
||||
raise IndexError("entry_index out of range")
|
||||
if spec.max_holding_bars <= 0:
|
||||
raise ValueError("max_holding_bars must be positive")
|
||||
|
||||
entry_price = float(closes[entry_index])
|
||||
if entry_price <= 0:
|
||||
@@ -78,31 +68,13 @@ def label_with_triple_barrier(
|
||||
upper = entry_price * (1.0 + spec.stop_loss_pct)
|
||||
lower = entry_price * (1.0 - spec.take_profit_pct)
|
||||
|
||||
if spec.max_holding_minutes is not None:
|
||||
if timestamps is None:
|
||||
raise ValueError("timestamps are required when max_holding_minutes is set")
|
||||
if len(timestamps) != len(closes):
|
||||
raise ValueError("timestamps length must match OHLC lengths")
|
||||
expiry_timestamp = timestamps[entry_index] + timedelta(minutes=spec.max_holding_minutes)
|
||||
last_index = entry_index
|
||||
for idx in range(entry_index + 1, len(closes)):
|
||||
if timestamps[idx] > expiry_timestamp:
|
||||
break
|
||||
last_index = idx
|
||||
else:
|
||||
assert spec.max_holding_bars is not None
|
||||
warnings.warn(
|
||||
"TripleBarrierSpec.max_holding_bars is deprecated; use max_holding_minutes with timestamps instead.",
|
||||
DeprecationWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
last_index = min(len(closes) - 1, entry_index + spec.max_holding_bars)
|
||||
last_index = min(len(closes) - 1, entry_index + spec.max_holding_bars)
|
||||
for idx in range(entry_index + 1, last_index + 1):
|
||||
high_price = float(highs[idx])
|
||||
low_price = float(lows[idx])
|
||||
h = float(highs[idx])
|
||||
l = float(lows[idx])
|
||||
|
||||
up_touch = high_price >= upper
|
||||
down_touch = low_price <= lower
|
||||
up_touch = h >= upper
|
||||
down_touch = l <= lower
|
||||
if not up_touch and not down_touch:
|
||||
continue
|
||||
|
||||
|
||||
75
src/db.py
75
src/db.py
@@ -109,7 +109,6 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
stock_code TEXT NOT NULL,
|
||||
market TEXT NOT NULL,
|
||||
exchange_code TEXT NOT NULL,
|
||||
session_id TEXT DEFAULT 'UNKNOWN',
|
||||
action TEXT NOT NULL,
|
||||
confidence INTEGER NOT NULL,
|
||||
rationale TEXT NOT NULL,
|
||||
@@ -122,27 +121,6 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
)
|
||||
"""
|
||||
)
|
||||
decision_columns = {
|
||||
row[1]
|
||||
for row in conn.execute("PRAGMA table_info(decision_logs)").fetchall()
|
||||
}
|
||||
if "session_id" not in decision_columns:
|
||||
conn.execute("ALTER TABLE decision_logs ADD COLUMN session_id TEXT DEFAULT 'UNKNOWN'")
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE decision_logs
|
||||
SET session_id = 'UNKNOWN'
|
||||
WHERE session_id IS NULL OR session_id = ''
|
||||
"""
|
||||
)
|
||||
if "outcome_pnl" not in decision_columns:
|
||||
conn.execute("ALTER TABLE decision_logs ADD COLUMN outcome_pnl REAL")
|
||||
if "outcome_accuracy" not in decision_columns:
|
||||
conn.execute("ALTER TABLE decision_logs ADD COLUMN outcome_accuracy INTEGER")
|
||||
if "reviewed" not in decision_columns:
|
||||
conn.execute("ALTER TABLE decision_logs ADD COLUMN reviewed INTEGER DEFAULT 0")
|
||||
if "review_notes" not in decision_columns:
|
||||
conn.execute("ALTER TABLE decision_logs ADD COLUMN review_notes TEXT")
|
||||
|
||||
conn.execute(
|
||||
"""
|
||||
@@ -312,47 +290,22 @@ def _resolve_session_id(*, market: str, session_id: str | None) -> str:
|
||||
|
||||
|
||||
def get_latest_buy_trade(
|
||||
conn: sqlite3.Connection,
|
||||
stock_code: str,
|
||||
market: str,
|
||||
exchange_code: str | None = None,
|
||||
conn: sqlite3.Connection, stock_code: str, market: str
|
||||
) -> dict[str, Any] | None:
|
||||
"""Fetch the most recent BUY trade for a stock and market."""
|
||||
if exchange_code:
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT decision_id, price, quantity
|
||||
FROM trades
|
||||
WHERE stock_code = ?
|
||||
AND market = ?
|
||||
AND action = 'BUY'
|
||||
AND decision_id IS NOT NULL
|
||||
AND (
|
||||
exchange_code = ?
|
||||
OR exchange_code IS NULL
|
||||
OR exchange_code = ''
|
||||
)
|
||||
ORDER BY
|
||||
CASE WHEN exchange_code = ? THEN 0 ELSE 1 END,
|
||||
timestamp DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(stock_code, market, exchange_code, exchange_code),
|
||||
)
|
||||
else:
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT decision_id, price, quantity
|
||||
FROM trades
|
||||
WHERE stock_code = ?
|
||||
AND market = ?
|
||||
AND action = 'BUY'
|
||||
AND decision_id IS NOT NULL
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(stock_code, market),
|
||||
)
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT decision_id, price, quantity
|
||||
FROM trades
|
||||
WHERE stock_code = ?
|
||||
AND market = ?
|
||||
AND action = 'BUY'
|
||||
AND decision_id IS NOT NULL
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(stock_code, market),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if not row:
|
||||
return None
|
||||
|
||||
@@ -9,7 +9,6 @@ This module:
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import ast
|
||||
import json
|
||||
import logging
|
||||
import sqlite3
|
||||
@@ -29,24 +28,24 @@ from src.logging.decision_logger import DecisionLogger
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
STRATEGIES_DIR = Path("src/strategies")
|
||||
STRATEGY_TEMPLATE = """\
|
||||
\"\"\"Auto-generated strategy: {name}
|
||||
STRATEGY_TEMPLATE = textwrap.dedent("""\
|
||||
\"\"\"Auto-generated strategy: {name}
|
||||
|
||||
Generated at: {timestamp}
|
||||
Rationale: {rationale}
|
||||
\"\"\"
|
||||
Generated at: {timestamp}
|
||||
Rationale: {rationale}
|
||||
\"\"\"
|
||||
|
||||
from __future__ import annotations
|
||||
from typing import Any
|
||||
from src.strategies.base import BaseStrategy
|
||||
from __future__ import annotations
|
||||
from typing import Any
|
||||
from src.strategies.base import BaseStrategy
|
||||
|
||||
|
||||
class {class_name}(BaseStrategy):
|
||||
\"\"\"Strategy: {name}\"\"\"
|
||||
class {class_name}(BaseStrategy):
|
||||
\"\"\"Strategy: {name}\"\"\"
|
||||
|
||||
def evaluate(self, market_data: dict[str, Any]) -> dict[str, Any]:
|
||||
{body}
|
||||
"""
|
||||
def evaluate(self, market_data: dict[str, Any]) -> dict[str, Any]:
|
||||
{body}
|
||||
""")
|
||||
|
||||
|
||||
class EvolutionOptimizer:
|
||||
@@ -236,8 +235,7 @@ class EvolutionOptimizer:
|
||||
file_path = STRATEGIES_DIR / file_name
|
||||
|
||||
# Indent the body for the class method
|
||||
normalized_body = textwrap.dedent(body).strip()
|
||||
indented_body = textwrap.indent(normalized_body, " ")
|
||||
indented_body = textwrap.indent(body, " ")
|
||||
|
||||
# Generate rationale from patterns
|
||||
rationale = f"Auto-evolved from {len(failures)} failures. "
|
||||
@@ -249,16 +247,9 @@ class EvolutionOptimizer:
|
||||
timestamp=datetime.now(UTC).isoformat(),
|
||||
rationale=rationale,
|
||||
class_name=class_name,
|
||||
body=indented_body.rstrip(),
|
||||
body=indented_body.strip(),
|
||||
)
|
||||
|
||||
try:
|
||||
parsed = ast.parse(content, filename=str(file_path))
|
||||
compile(parsed, filename=str(file_path), mode="exec")
|
||||
except SyntaxError as exc:
|
||||
logger.warning("Generated strategy failed syntax validation: %s", exc)
|
||||
return None
|
||||
|
||||
file_path.write_text(content)
|
||||
logger.info("Generated strategy file: %s", file_path)
|
||||
return file_path
|
||||
|
||||
@@ -19,7 +19,6 @@ class DecisionLog:
|
||||
stock_code: str
|
||||
market: str
|
||||
exchange_code: str
|
||||
session_id: str
|
||||
action: str
|
||||
confidence: int
|
||||
rationale: str
|
||||
@@ -48,7 +47,6 @@ class DecisionLogger:
|
||||
rationale: str,
|
||||
context_snapshot: dict[str, Any],
|
||||
input_data: dict[str, Any],
|
||||
session_id: str | None = None,
|
||||
) -> str:
|
||||
"""Log a trading decision with full context.
|
||||
|
||||
@@ -61,22 +59,20 @@ class DecisionLogger:
|
||||
rationale: Reasoning for the decision
|
||||
context_snapshot: L1-L7 context snapshot at decision time
|
||||
input_data: Market data inputs (price, volume, orderbook, etc.)
|
||||
session_id: Runtime session identifier
|
||||
|
||||
Returns:
|
||||
decision_id: Unique identifier for this decision
|
||||
"""
|
||||
decision_id = str(uuid.uuid4())
|
||||
timestamp = datetime.now(UTC).isoformat()
|
||||
resolved_session = session_id or "UNKNOWN"
|
||||
|
||||
self.conn.execute(
|
||||
"""
|
||||
INSERT INTO decision_logs (
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
session_id, action, confidence, rationale, context_snapshot, input_data
|
||||
action, confidence, rationale, context_snapshot, input_data
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
decision_id,
|
||||
@@ -84,7 +80,6 @@ class DecisionLogger:
|
||||
stock_code,
|
||||
market,
|
||||
exchange_code,
|
||||
resolved_session,
|
||||
action,
|
||||
confidence,
|
||||
rationale,
|
||||
@@ -111,7 +106,7 @@ class DecisionLogger:
|
||||
query = """
|
||||
SELECT
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
session_id, action, confidence, rationale, context_snapshot, input_data,
|
||||
action, confidence, rationale, context_snapshot, input_data,
|
||||
outcome_pnl, outcome_accuracy, reviewed, review_notes
|
||||
FROM decision_logs
|
||||
WHERE reviewed = 0 AND confidence >= ?
|
||||
@@ -173,7 +168,7 @@ class DecisionLogger:
|
||||
"""
|
||||
SELECT
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
session_id, action, confidence, rationale, context_snapshot, input_data,
|
||||
action, confidence, rationale, context_snapshot, input_data,
|
||||
outcome_pnl, outcome_accuracy, reviewed, review_notes
|
||||
FROM decision_logs
|
||||
WHERE decision_id = ?
|
||||
@@ -201,7 +196,7 @@ class DecisionLogger:
|
||||
"""
|
||||
SELECT
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
session_id, action, confidence, rationale, context_snapshot, input_data,
|
||||
action, confidence, rationale, context_snapshot, input_data,
|
||||
outcome_pnl, outcome_accuracy, reviewed, review_notes
|
||||
FROM decision_logs
|
||||
WHERE confidence >= ?
|
||||
@@ -228,14 +223,13 @@ class DecisionLogger:
|
||||
stock_code=row[2],
|
||||
market=row[3],
|
||||
exchange_code=row[4],
|
||||
session_id=row[5] or "UNKNOWN",
|
||||
action=row[6],
|
||||
confidence=row[7],
|
||||
rationale=row[8],
|
||||
context_snapshot=json.loads(row[9]),
|
||||
input_data=json.loads(row[10]),
|
||||
outcome_pnl=row[11],
|
||||
outcome_accuracy=row[12],
|
||||
reviewed=bool(row[13]),
|
||||
review_notes=row[14],
|
||||
action=row[5],
|
||||
confidence=row[6],
|
||||
rationale=row[7],
|
||||
context_snapshot=json.loads(row[8]),
|
||||
input_data=json.loads(row[9]),
|
||||
outcome_pnl=row[10],
|
||||
outcome_accuracy=row[11],
|
||||
reviewed=bool(row[12]),
|
||||
review_notes=row[13],
|
||||
)
|
||||
|
||||
22
src/main.py
22
src/main.py
@@ -217,7 +217,6 @@ async def sync_positions_from_broker(
|
||||
price=avg_price,
|
||||
market=log_market,
|
||||
exchange_code=market.exchange_code,
|
||||
session_id=get_session_info(market).session_id,
|
||||
mode=settings.MODE,
|
||||
)
|
||||
logger.info(
|
||||
@@ -1369,12 +1368,10 @@ async def trading_cycle(
|
||||
"pnl_pct": pnl_pct,
|
||||
}
|
||||
|
||||
runtime_session_id = get_session_info(market).session_id
|
||||
decision_id = decision_logger.log_decision(
|
||||
stock_code=stock_code,
|
||||
market=market.code,
|
||||
exchange_code=market.exchange_code,
|
||||
session_id=runtime_session_id,
|
||||
action=decision.action,
|
||||
confidence=decision.confidence,
|
||||
rationale=decision.rationale,
|
||||
@@ -1639,7 +1636,6 @@ async def trading_cycle(
|
||||
pnl=0.0,
|
||||
market=market.code,
|
||||
exchange_code=market.exchange_code,
|
||||
session_id=runtime_session_id,
|
||||
mode=settings.MODE if settings else "paper",
|
||||
)
|
||||
logger.info("Order result: %s", result.get("msg1", "OK"))
|
||||
@@ -1659,12 +1655,7 @@ async def trading_cycle(
|
||||
logger.warning("Telegram notification failed: %s", exc)
|
||||
|
||||
if decision.action == "SELL" and order_succeeded:
|
||||
buy_trade = get_latest_buy_trade(
|
||||
db_conn,
|
||||
stock_code,
|
||||
market.code,
|
||||
exchange_code=market.exchange_code,
|
||||
)
|
||||
buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code)
|
||||
if buy_trade and buy_trade.get("price") is not None:
|
||||
buy_price = float(buy_trade["price"])
|
||||
buy_qty = int(buy_trade.get("quantity") or 1)
|
||||
@@ -1699,7 +1690,6 @@ async def trading_cycle(
|
||||
pnl=trade_pnl,
|
||||
market=market.code,
|
||||
exchange_code=market.exchange_code,
|
||||
session_id=runtime_session_id,
|
||||
selection_context=selection_context,
|
||||
decision_id=decision_id,
|
||||
mode=settings.MODE if settings else "paper",
|
||||
@@ -2507,12 +2497,10 @@ async def run_daily_session(
|
||||
"pnl_pct": pnl_pct,
|
||||
}
|
||||
|
||||
runtime_session_id = get_session_info(market).session_id
|
||||
decision_id = decision_logger.log_decision(
|
||||
stock_code=stock_code,
|
||||
market=market.code,
|
||||
exchange_code=market.exchange_code,
|
||||
session_id=runtime_session_id,
|
||||
action=decision.action,
|
||||
confidence=decision.confidence,
|
||||
rationale=decision.rationale,
|
||||
@@ -2764,12 +2752,7 @@ async def run_daily_session(
|
||||
continue
|
||||
|
||||
if decision.action == "SELL" and order_succeeded:
|
||||
buy_trade = get_latest_buy_trade(
|
||||
db_conn,
|
||||
stock_code,
|
||||
market.code,
|
||||
exchange_code=market.exchange_code,
|
||||
)
|
||||
buy_trade = get_latest_buy_trade(db_conn, stock_code, market.code)
|
||||
if buy_trade and buy_trade.get("price") is not None:
|
||||
buy_price = float(buy_trade["price"])
|
||||
buy_qty = int(buy_trade.get("quantity") or 1)
|
||||
@@ -2794,7 +2777,6 @@ async def run_daily_session(
|
||||
pnl=trade_pnl,
|
||||
market=market.code,
|
||||
exchange_code=market.exchange_code,
|
||||
session_id=runtime_session_id,
|
||||
decision_id=decision_id,
|
||||
mode=settings.MODE,
|
||||
)
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
from src.db import get_latest_buy_trade, get_open_position, init_db, log_trade
|
||||
from src.db import get_open_position, init_db, log_trade
|
||||
|
||||
|
||||
def test_get_open_position_returns_latest_buy() -> None:
|
||||
@@ -329,89 +329,3 @@ def test_log_trade_unknown_market_falls_back_to_unknown_session() -> None:
|
||||
row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == "UNKNOWN"
|
||||
|
||||
|
||||
def test_get_latest_buy_trade_prefers_exchange_code_match() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="AAPL",
|
||||
action="BUY",
|
||||
confidence=80,
|
||||
rationale="legacy",
|
||||
quantity=10,
|
||||
price=120.0,
|
||||
market="US_NASDAQ",
|
||||
exchange_code="",
|
||||
decision_id="legacy-buy",
|
||||
)
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="AAPL",
|
||||
action="BUY",
|
||||
confidence=85,
|
||||
rationale="matched",
|
||||
quantity=5,
|
||||
price=125.0,
|
||||
market="US_NASDAQ",
|
||||
exchange_code="NASD",
|
||||
decision_id="matched-buy",
|
||||
)
|
||||
matched = get_latest_buy_trade(
|
||||
conn,
|
||||
stock_code="AAPL",
|
||||
market="US_NASDAQ",
|
||||
exchange_code="NASD",
|
||||
)
|
||||
assert matched is not None
|
||||
assert matched["decision_id"] == "matched-buy"
|
||||
|
||||
|
||||
def test_decision_logs_session_id_migration_backfills_unknown() -> None:
|
||||
import sqlite3
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f:
|
||||
db_path = f.name
|
||||
try:
|
||||
old_conn = sqlite3.connect(db_path)
|
||||
old_conn.execute(
|
||||
"""
|
||||
CREATE TABLE decision_logs (
|
||||
decision_id TEXT PRIMARY KEY,
|
||||
timestamp TEXT NOT NULL,
|
||||
stock_code TEXT NOT NULL,
|
||||
market TEXT NOT NULL,
|
||||
exchange_code TEXT NOT NULL,
|
||||
action TEXT NOT NULL,
|
||||
confidence INTEGER NOT NULL,
|
||||
rationale TEXT NOT NULL,
|
||||
context_snapshot TEXT NOT NULL,
|
||||
input_data TEXT NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
old_conn.execute(
|
||||
"""
|
||||
INSERT INTO decision_logs (
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data
|
||||
) VALUES (
|
||||
'd1', '2026-01-01T00:00:00+00:00', 'AAPL', 'US_NASDAQ', 'NASD',
|
||||
'BUY', 80, 'legacy row', '{}', '{}'
|
||||
)
|
||||
"""
|
||||
)
|
||||
old_conn.commit()
|
||||
old_conn.close()
|
||||
|
||||
conn = init_db(db_path)
|
||||
columns = {row[1] for row in conn.execute("PRAGMA table_info(decision_logs)").fetchall()}
|
||||
assert "session_id" in columns
|
||||
row = conn.execute(
|
||||
"SELECT session_id FROM decision_logs WHERE decision_id='d1'"
|
||||
).fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == "UNKNOWN"
|
||||
conn.close()
|
||||
finally:
|
||||
os.unlink(db_path)
|
||||
|
||||
@@ -49,7 +49,7 @@ def test_log_decision_creates_record(logger: DecisionLogger, db_conn: sqlite3.Co
|
||||
|
||||
# Verify record exists in database
|
||||
cursor = db_conn.execute(
|
||||
"SELECT decision_id, action, confidence, session_id FROM decision_logs WHERE decision_id = ?",
|
||||
"SELECT decision_id, action, confidence FROM decision_logs WHERE decision_id = ?",
|
||||
(decision_id,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
@@ -57,7 +57,6 @@ def test_log_decision_creates_record(logger: DecisionLogger, db_conn: sqlite3.Co
|
||||
assert row[0] == decision_id
|
||||
assert row[1] == "BUY"
|
||||
assert row[2] == 85
|
||||
assert row[3] == "UNKNOWN"
|
||||
|
||||
|
||||
def test_log_decision_stores_context_snapshot(logger: DecisionLogger) -> None:
|
||||
@@ -85,24 +84,6 @@ def test_log_decision_stores_context_snapshot(logger: DecisionLogger) -> None:
|
||||
assert decision is not None
|
||||
assert decision.context_snapshot == context_snapshot
|
||||
assert decision.input_data == input_data
|
||||
assert decision.session_id == "UNKNOWN"
|
||||
|
||||
|
||||
def test_log_decision_stores_explicit_session_id(logger: DecisionLogger) -> None:
|
||||
decision_id = logger.log_decision(
|
||||
stock_code="AAPL",
|
||||
market="US_NASDAQ",
|
||||
exchange_code="NASD",
|
||||
action="BUY",
|
||||
confidence=88,
|
||||
rationale="session check",
|
||||
context_snapshot={},
|
||||
input_data={},
|
||||
session_id="US_PRE",
|
||||
)
|
||||
decision = logger.get_decision_by_id(decision_id)
|
||||
assert decision is not None
|
||||
assert decision.session_id == "US_PRE"
|
||||
|
||||
|
||||
def test_get_unreviewed_decisions(logger: DecisionLogger) -> None:
|
||||
@@ -297,7 +278,6 @@ def test_decision_log_dataclass() -> None:
|
||||
stock_code="005930",
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
session_id="KRX_REG",
|
||||
action="BUY",
|
||||
confidence=85,
|
||||
rationale="Test",
|
||||
@@ -306,7 +286,6 @@ def test_decision_log_dataclass() -> None:
|
||||
)
|
||||
|
||||
assert log.decision_id == "test-uuid"
|
||||
assert log.session_id == "KRX_REG"
|
||||
assert log.action == "BUY"
|
||||
assert log.confidence == 85
|
||||
assert log.reviewed is False
|
||||
|
||||
@@ -245,52 +245,6 @@ async def test_generate_strategy_creates_file(optimizer: EvolutionOptimizer, tmp
|
||||
assert "def evaluate" in strategy_path.read_text()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_strategy_saves_valid_python_code(
|
||||
optimizer: EvolutionOptimizer, tmp_path: Path,
|
||||
) -> None:
|
||||
"""Test that syntactically valid generated code is saved."""
|
||||
failures = [{"decision_id": "1", "timestamp": "2024-01-15T09:30:00+00:00"}]
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.text = (
|
||||
'price = market_data.get("current_price", 0)\n'
|
||||
'if price > 0:\n'
|
||||
' return {"action": "BUY", "confidence": 80, "rationale": "Positive price"}\n'
|
||||
'return {"action": "HOLD", "confidence": 50, "rationale": "No signal"}\n'
|
||||
)
|
||||
|
||||
with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)):
|
||||
with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path):
|
||||
strategy_path = await optimizer.generate_strategy(failures)
|
||||
|
||||
assert strategy_path is not None
|
||||
assert strategy_path.exists()
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_strategy_blocks_invalid_python_code(
|
||||
optimizer: EvolutionOptimizer, tmp_path: Path, caplog: pytest.LogCaptureFixture,
|
||||
) -> None:
|
||||
"""Test that syntactically invalid generated code is not saved."""
|
||||
failures = [{"decision_id": "1", "timestamp": "2024-01-15T09:30:00+00:00"}]
|
||||
|
||||
mock_response = Mock()
|
||||
mock_response.text = (
|
||||
'if market_data.get("current_price", 0) > 0\n'
|
||||
' return {"action": "BUY", "confidence": 80, "rationale": "broken"}\n'
|
||||
)
|
||||
|
||||
with patch.object(optimizer._client.aio.models, "generate_content", new=AsyncMock(return_value=mock_response)):
|
||||
with patch("src.evolution.optimizer.STRATEGIES_DIR", tmp_path):
|
||||
with caplog.at_level("WARNING"):
|
||||
strategy_path = await optimizer.generate_strategy(failures)
|
||||
|
||||
assert strategy_path is None
|
||||
assert list(tmp_path.glob("*.py")) == []
|
||||
assert "failed syntax validation" in caplog.text
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_strategy_handles_api_error(optimizer: EvolutionOptimizer) -> None:
|
||||
"""Test that generate_strategy handles Gemini API errors gracefully."""
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime, timedelta
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier
|
||||
|
||||
|
||||
@@ -133,52 +129,3 @@ def test_short_tie_break_modes() -> None:
|
||||
)
|
||||
assert out_take.label == 1
|
||||
assert out_take.touched == "take_profit"
|
||||
|
||||
|
||||
def test_minutes_time_barrier_consistent_across_sampling() -> None:
|
||||
base = datetime(2026, 2, 28, 9, 0, tzinfo=UTC)
|
||||
highs = [100.0, 100.5, 100.6, 100.4]
|
||||
lows = [100.0, 99.6, 99.4, 99.5]
|
||||
closes = [100.0, 100.1, 100.0, 100.0]
|
||||
spec = TripleBarrierSpec(
|
||||
take_profit_pct=0.02,
|
||||
stop_loss_pct=0.02,
|
||||
max_holding_minutes=5,
|
||||
)
|
||||
|
||||
out_1m = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
timestamps=[base + timedelta(minutes=i) for i in range(4)],
|
||||
entry_index=0,
|
||||
side=1,
|
||||
spec=spec,
|
||||
)
|
||||
out_5m = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
timestamps=[base + timedelta(minutes=5 * i) for i in range(4)],
|
||||
entry_index=0,
|
||||
side=1,
|
||||
spec=spec,
|
||||
)
|
||||
assert out_1m.touch_bar == 3
|
||||
assert out_5m.touch_bar == 1
|
||||
|
||||
|
||||
def test_bars_mode_emits_deprecation_warning() -> None:
|
||||
highs = [100, 101, 103]
|
||||
lows = [100, 99.6, 100]
|
||||
closes = [100, 100, 102]
|
||||
spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3)
|
||||
with pytest.deprecated_call(match="max_holding_bars is deprecated"):
|
||||
label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=1,
|
||||
spec=spec,
|
||||
)
|
||||
|
||||
81
tests/test_validate_governance_assets.py
Normal file
81
tests/test_validate_governance_assets.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import importlib.util
|
||||
from pathlib import Path
|
||||
from types import SimpleNamespace
|
||||
|
||||
|
||||
def _load_module():
|
||||
script_path = Path(__file__).resolve().parents[1] / "scripts" / "validate_governance_assets.py"
|
||||
spec = importlib.util.spec_from_file_location("validate_governance_assets", script_path)
|
||||
assert spec is not None
|
||||
assert spec.loader is not None
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
return module
|
||||
|
||||
|
||||
def test_is_policy_file_detects_ouroboros_policy_docs() -> None:
|
||||
module = _load_module()
|
||||
assert module.is_policy_file("docs/ouroboros/85_loss_recovery_action_plan.md")
|
||||
assert not module.is_policy_file("docs/ouroboros/01_requirements_registry.md")
|
||||
assert not module.is_policy_file("docs/workflow.md")
|
||||
assert not module.is_policy_file("docs/ouroboros/notes.txt")
|
||||
|
||||
|
||||
def test_validate_registry_sync_requires_registry_update_when_policy_changes() -> None:
|
||||
module = _load_module()
|
||||
errors: list[str] = []
|
||||
module.validate_registry_sync(
|
||||
["docs/ouroboros/85_loss_recovery_action_plan.md"],
|
||||
errors,
|
||||
)
|
||||
assert errors
|
||||
assert "policy file changed without updating" in errors[0]
|
||||
|
||||
|
||||
def test_validate_registry_sync_passes_when_registry_included() -> None:
|
||||
module = _load_module()
|
||||
errors: list[str] = []
|
||||
module.validate_registry_sync(
|
||||
[
|
||||
"docs/ouroboros/85_loss_recovery_action_plan.md",
|
||||
"docs/ouroboros/01_requirements_registry.md",
|
||||
],
|
||||
errors,
|
||||
)
|
||||
assert errors == []
|
||||
|
||||
|
||||
def test_load_changed_files_supports_explicit_paths() -> None:
|
||||
module = _load_module()
|
||||
errors: list[str] = []
|
||||
changed = module.load_changed_files(
|
||||
["./docs/ouroboros/85_loss_recovery_action_plan.md", " src/main.py "],
|
||||
errors,
|
||||
)
|
||||
assert errors == []
|
||||
assert changed == [
|
||||
"docs/ouroboros/85_loss_recovery_action_plan.md",
|
||||
"src/main.py",
|
||||
]
|
||||
|
||||
|
||||
def test_load_changed_files_with_range_uses_git_diff(monkeypatch) -> None:
|
||||
module = _load_module()
|
||||
errors: list[str] = []
|
||||
|
||||
def fake_run(cmd, check, capture_output, text): # noqa: ANN001
|
||||
assert cmd[:3] == ["git", "diff", "--name-only"]
|
||||
assert check is True
|
||||
assert capture_output is True
|
||||
assert text is True
|
||||
return SimpleNamespace(stdout="docs/ouroboros/85_loss_recovery_action_plan.md\nsrc/main.py\n")
|
||||
|
||||
monkeypatch.setattr(module.subprocess, "run", fake_run)
|
||||
changed = module.load_changed_files(["abc...def"], errors)
|
||||
assert errors == []
|
||||
assert changed == [
|
||||
"docs/ouroboros/85_loss_recovery_action_plan.md",
|
||||
"src/main.py",
|
||||
]
|
||||
Reference in New Issue
Block a user