diff --git a/pyproject.toml b/pyproject.toml
index 7eb6e83..4cef7f1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -9,6 +9,7 @@ dependencies = [
"pydantic-settings>=2.1,<3",
"google-genai>=1.0,<2",
"scipy>=1.11,<2",
+ "fastapi>=0.110,<1",
]
[project.optional-dependencies]
diff --git a/src/dashboard/__init__.py b/src/dashboard/__init__.py
new file mode 100644
index 0000000..5bba3aa
--- /dev/null
+++ b/src/dashboard/__init__.py
@@ -0,0 +1,5 @@
+"""FastAPI dashboard package for observability APIs."""
+
+from src.dashboard.app import create_dashboard_app
+
+__all__ = ["create_dashboard_app"]
diff --git a/src/dashboard/app.py b/src/dashboard/app.py
new file mode 100644
index 0000000..205f035
--- /dev/null
+++ b/src/dashboard/app.py
@@ -0,0 +1,349 @@
+"""FastAPI application for observability dashboard endpoints."""
+
+from __future__ import annotations
+
+import json
+import sqlite3
+from datetime import UTC, datetime
+from pathlib import Path
+from typing import Any
+
+from fastapi import FastAPI, HTTPException, Query
+from fastapi.responses import FileResponse
+
+
+def create_dashboard_app(db_path: str) -> FastAPI:
+ """Create dashboard FastAPI app bound to a SQLite database path."""
+ app = FastAPI(title="The Ouroboros Dashboard", version="1.0.0")
+ app.state.db_path = db_path
+
+ @app.get("/")
+ def index() -> FileResponse:
+ index_path = Path(__file__).parent / "static" / "index.html"
+ return FileResponse(index_path)
+
+ @app.get("/api/status")
+ def get_status() -> dict[str, Any]:
+ today = datetime.now(UTC).date().isoformat()
+ with _connect(db_path) as conn:
+ markets = ["KR", "US"]
+ market_status: dict[str, Any] = {}
+ total_trades = 0
+ total_pnl = 0.0
+ total_decisions = 0
+ for market in markets:
+ trade_row = conn.execute(
+ """
+ SELECT COUNT(*) AS c, COALESCE(SUM(pnl), 0.0) AS p
+ FROM trades
+ WHERE DATE(timestamp) = ? AND market = ?
+ """,
+ (today, market),
+ ).fetchone()
+ decision_row = conn.execute(
+ """
+ SELECT COUNT(*) AS c
+ FROM decision_logs
+ WHERE DATE(timestamp) = ? AND market = ?
+ """,
+ (today, market),
+ ).fetchone()
+ playbook_row = conn.execute(
+ """
+ SELECT status
+ FROM playbooks
+ WHERE date = ? AND market = ?
+ LIMIT 1
+ """,
+ (today, market),
+ ).fetchone()
+ market_status[market] = {
+ "trade_count": int(trade_row["c"] if trade_row else 0),
+ "total_pnl": float(trade_row["p"] if trade_row else 0.0),
+ "decision_count": int(decision_row["c"] if decision_row else 0),
+ "playbook_status": playbook_row["status"] if playbook_row else None,
+ }
+ total_trades += market_status[market]["trade_count"]
+ total_pnl += market_status[market]["total_pnl"]
+ total_decisions += market_status[market]["decision_count"]
+
+ return {
+ "date": today,
+ "markets": market_status,
+ "totals": {
+ "trade_count": total_trades,
+ "total_pnl": round(total_pnl, 2),
+ "decision_count": total_decisions,
+ },
+ }
+
+ @app.get("/api/playbook/{date_str}")
+ def get_playbook(date_str: str, market: str = Query("KR")) -> dict[str, Any]:
+ with _connect(db_path) as conn:
+ row = conn.execute(
+ """
+ SELECT date, market, status, playbook_json, generated_at,
+ token_count, scenario_count, match_count
+ FROM playbooks
+ WHERE date = ? AND market = ?
+ """,
+ (date_str, market),
+ ).fetchone()
+ if row is None:
+ raise HTTPException(status_code=404, detail="playbook not found")
+ return {
+ "date": row["date"],
+ "market": row["market"],
+ "status": row["status"],
+ "playbook": json.loads(row["playbook_json"]),
+ "generated_at": row["generated_at"],
+ "token_count": row["token_count"],
+ "scenario_count": row["scenario_count"],
+ "match_count": row["match_count"],
+ }
+
+ @app.get("/api/scorecard/{date_str}")
+ def get_scorecard(date_str: str, market: str = Query("KR")) -> dict[str, Any]:
+ key = f"scorecard_{market}"
+ with _connect(db_path) as conn:
+ row = conn.execute(
+ """
+ SELECT value
+ FROM contexts
+ WHERE layer = 'L6_DAILY' AND timeframe = ? AND key = ?
+ """,
+ (date_str, key),
+ ).fetchone()
+ if row is None:
+ raise HTTPException(status_code=404, detail="scorecard not found")
+ return {"date": date_str, "market": market, "scorecard": json.loads(row["value"])}
+
+ @app.get("/api/performance")
+ def get_performance(market: str = Query("all")) -> dict[str, Any]:
+ with _connect(db_path) as conn:
+ if market == "all":
+ by_market_rows = conn.execute(
+ """
+ SELECT market,
+ COUNT(*) AS total_trades,
+ SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) AS wins,
+ SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) AS losses,
+ COALESCE(SUM(pnl), 0.0) AS total_pnl,
+ COALESCE(AVG(confidence), 0.0) AS avg_confidence
+ FROM trades
+ GROUP BY market
+ ORDER BY market
+ """
+ ).fetchall()
+ combined = _performance_from_rows(by_market_rows)
+ return {
+ "market": "all",
+ "combined": combined,
+ "by_market": [
+ _row_to_performance(row)
+ for row in by_market_rows
+ ],
+ }
+
+ row = conn.execute(
+ """
+ SELECT market,
+ COUNT(*) AS total_trades,
+ SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) AS wins,
+ SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) AS losses,
+ COALESCE(SUM(pnl), 0.0) AS total_pnl,
+ COALESCE(AVG(confidence), 0.0) AS avg_confidence
+ FROM trades
+ WHERE market = ?
+ GROUP BY market
+ """,
+ (market,),
+ ).fetchone()
+ if row is None:
+ return {"market": market, "metrics": _empty_performance(market)}
+ return {"market": market, "metrics": _row_to_performance(row)}
+
+ @app.get("/api/context/{layer}")
+ def get_context_layer(
+ layer: str,
+ timeframe: str | None = Query(default=None),
+ limit: int = Query(default=100, ge=1, le=1000),
+ ) -> dict[str, Any]:
+ with _connect(db_path) as conn:
+ if timeframe is None:
+ rows = conn.execute(
+ """
+ SELECT timeframe, key, value, updated_at
+ FROM contexts
+ WHERE layer = ?
+ ORDER BY updated_at DESC
+ LIMIT ?
+ """,
+ (layer, limit),
+ ).fetchall()
+ else:
+ rows = conn.execute(
+ """
+ SELECT timeframe, key, value, updated_at
+ FROM contexts
+ WHERE layer = ? AND timeframe = ?
+ ORDER BY key
+ LIMIT ?
+ """,
+ (layer, timeframe, limit),
+ ).fetchall()
+
+ entries = [
+ {
+ "timeframe": row["timeframe"],
+ "key": row["key"],
+ "value": json.loads(row["value"]),
+ "updated_at": row["updated_at"],
+ }
+ for row in rows
+ ]
+ return {
+ "layer": layer,
+ "timeframe": timeframe,
+ "count": len(entries),
+ "entries": entries,
+ }
+
+ @app.get("/api/decisions")
+ def get_decisions(
+ market: str = Query("KR"),
+ limit: int = Query(default=50, ge=1, le=500),
+ ) -> dict[str, Any]:
+ with _connect(db_path) as conn:
+ rows = conn.execute(
+ """
+ SELECT decision_id, timestamp, stock_code, market, exchange_code,
+ action, confidence, rationale, context_snapshot, input_data,
+ outcome_pnl, outcome_accuracy
+ FROM decision_logs
+ WHERE market = ?
+ ORDER BY timestamp DESC
+ LIMIT ?
+ """,
+ (market, limit),
+ ).fetchall()
+ decisions = []
+ for row in rows:
+ decisions.append(
+ {
+ "decision_id": row["decision_id"],
+ "timestamp": row["timestamp"],
+ "stock_code": row["stock_code"],
+ "market": row["market"],
+ "exchange_code": row["exchange_code"],
+ "action": row["action"],
+ "confidence": row["confidence"],
+ "rationale": row["rationale"],
+ "context_snapshot": json.loads(row["context_snapshot"]),
+ "input_data": json.loads(row["input_data"]),
+ "outcome_pnl": row["outcome_pnl"],
+ "outcome_accuracy": row["outcome_accuracy"],
+ }
+ )
+ return {"market": market, "count": len(decisions), "decisions": decisions}
+
+ @app.get("/api/scenarios/active")
+ def get_active_scenarios(
+ market: str = Query("US"),
+ date_str: str | None = Query(default=None),
+ limit: int = Query(default=50, ge=1, le=500),
+ ) -> dict[str, Any]:
+ if date_str is None:
+ date_str = datetime.now(UTC).date().isoformat()
+
+ with _connect(db_path) as conn:
+ rows = conn.execute(
+ """
+ SELECT timestamp, stock_code, action, confidence, rationale, context_snapshot
+ FROM decision_logs
+ WHERE market = ? AND DATE(timestamp) = ?
+ ORDER BY timestamp DESC
+ LIMIT ?
+ """,
+ (market, date_str, limit),
+ ).fetchall()
+ matches: list[dict[str, Any]] = []
+ for row in rows:
+ snapshot = json.loads(row["context_snapshot"])
+ scenario_match = snapshot.get("scenario_match", {})
+ if not isinstance(scenario_match, dict) or not scenario_match:
+ continue
+ matches.append(
+ {
+ "timestamp": row["timestamp"],
+ "stock_code": row["stock_code"],
+ "action": row["action"],
+ "confidence": row["confidence"],
+ "rationale": row["rationale"],
+ "scenario_match": scenario_match,
+ }
+ )
+ return {"market": market, "date": date_str, "count": len(matches), "matches": matches}
+
+ return app
+
+
+def _connect(db_path: str) -> sqlite3.Connection:
+ conn = sqlite3.connect(db_path)
+ conn.row_factory = sqlite3.Row
+ return conn
+
+
+def _row_to_performance(row: sqlite3.Row) -> dict[str, Any]:
+ wins = int(row["wins"] or 0)
+ losses = int(row["losses"] or 0)
+ total = int(row["total_trades"] or 0)
+ win_rate = round((wins / (wins + losses) * 100), 2) if (wins + losses) > 0 else 0.0
+ return {
+ "market": row["market"],
+ "total_trades": total,
+ "wins": wins,
+ "losses": losses,
+ "win_rate": win_rate,
+ "total_pnl": round(float(row["total_pnl"] or 0.0), 2),
+ "avg_confidence": round(float(row["avg_confidence"] or 0.0), 2),
+ }
+
+
+def _performance_from_rows(rows: list[sqlite3.Row]) -> dict[str, Any]:
+ total_trades = 0
+ wins = 0
+ losses = 0
+ total_pnl = 0.0
+ confidence_weighted = 0.0
+ for row in rows:
+ market_total = int(row["total_trades"] or 0)
+ market_conf = float(row["avg_confidence"] or 0.0)
+ total_trades += market_total
+ wins += int(row["wins"] or 0)
+ losses += int(row["losses"] or 0)
+ total_pnl += float(row["total_pnl"] or 0.0)
+ confidence_weighted += market_total * market_conf
+ win_rate = round((wins / (wins + losses) * 100), 2) if (wins + losses) > 0 else 0.0
+ avg_confidence = round(confidence_weighted / total_trades, 2) if total_trades > 0 else 0.0
+ return {
+ "market": "all",
+ "total_trades": total_trades,
+ "wins": wins,
+ "losses": losses,
+ "win_rate": win_rate,
+ "total_pnl": round(total_pnl, 2),
+ "avg_confidence": avg_confidence,
+ }
+
+
+def _empty_performance(market: str) -> dict[str, Any]:
+ return {
+ "market": market,
+ "total_trades": 0,
+ "wins": 0,
+ "losses": 0,
+ "win_rate": 0.0,
+ "total_pnl": 0.0,
+ "avg_confidence": 0.0,
+ }
diff --git a/src/dashboard/static/index.html b/src/dashboard/static/index.html
new file mode 100644
index 0000000..b46f0c8
--- /dev/null
+++ b/src/dashboard/static/index.html
@@ -0,0 +1,61 @@
+
+
+
+
+
+ The Ouroboros Dashboard
+
+
+
+
+
+
The Ouroboros Dashboard API
+
Use the following endpoints:
+
+ /api/status
+ /api/playbook/{date}?market=KR
+ /api/scorecard/{date}?market=KR
+ /api/performance?market=all
+ /api/context/{layer}
+ /api/decisions?market=KR
+ /api/scenarios/active?market=US
+
+
+
+
+
diff --git a/src/main.py b/src/main.py
index 5a7f711..f0e8f17 100644
--- a/src/main.py
+++ b/src/main.py
@@ -832,13 +832,16 @@ async def _run_evolution_loop(
logger.info("Evolution loop skipped on %s (no actionable failures)", market_date)
return
- await telegram.send_message(
- "Evolution Update\n"
- f"Date: {market_date}\n"
- f"PR: {pr_info.get('title', 'N/A')}\n"
- f"Branch: {pr_info.get('branch', 'N/A')}\n"
- f"Status: {pr_info.get('status', 'N/A')}"
- )
+ try:
+ await telegram.send_message(
+ "Evolution Update\n"
+ f"Date: {market_date}\n"
+ f"PR: {pr_info.get('title', 'N/A')}\n"
+ f"Branch: {pr_info.get('branch', 'N/A')}\n"
+ f"Status: {pr_info.get('status', 'N/A')}"
+ )
+ except Exception as exc:
+ logger.warning("Evolution notification failed on %s: %s", market_date, exc)
async def run(settings: Settings) -> None:
diff --git a/tests/test_dashboard.py b/tests/test_dashboard.py
new file mode 100644
index 0000000..caaf3fb
--- /dev/null
+++ b/tests/test_dashboard.py
@@ -0,0 +1,270 @@
+"""Tests for FastAPI dashboard endpoints."""
+
+from __future__ import annotations
+
+import json
+import sqlite3
+from pathlib import Path
+
+import pytest
+
+pytest.importorskip("fastapi")
+from fastapi.testclient import TestClient
+
+from src.dashboard.app import create_dashboard_app
+from src.db import init_db
+
+
+def _seed_db(conn: sqlite3.Connection) -> None:
+ conn.execute(
+ """
+ INSERT INTO playbooks (
+ date, market, status, playbook_json, generated_at,
+ token_count, scenario_count, match_count
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
+ """,
+ (
+ "2026-02-14",
+ "KR",
+ "ready",
+ json.dumps({"market": "KR", "stock_playbooks": []}),
+ "2026-02-14T08:30:00+00:00",
+ 123,
+ 2,
+ 1,
+ ),
+ )
+ conn.execute(
+ """
+ INSERT INTO contexts (layer, timeframe, key, value, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """,
+ (
+ "L6_DAILY",
+ "2026-02-14",
+ "scorecard_KR",
+ json.dumps({"market": "KR", "total_pnl": 1.5, "win_rate": 60.0}),
+ "2026-02-14T15:30:00+00:00",
+ "2026-02-14T15:30:00+00:00",
+ ),
+ )
+ conn.execute(
+ """
+ INSERT INTO contexts (layer, timeframe, key, value, created_at, updated_at)
+ VALUES (?, ?, ?, ?, ?, ?)
+ """,
+ (
+ "L7_REALTIME",
+ "2026-02-14T10:00:00+00:00",
+ "volatility_KR_005930",
+ json.dumps({"momentum_score": 70.0}),
+ "2026-02-14T10:00:00+00:00",
+ "2026-02-14T10:00:00+00:00",
+ ),
+ )
+ conn.execute(
+ """
+ INSERT INTO decision_logs (
+ decision_id, timestamp, stock_code, market, exchange_code,
+ action, confidence, rationale, context_snapshot, input_data
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ """,
+ (
+ "d-kr-1",
+ "2026-02-14T09:10:00+00:00",
+ "005930",
+ "KR",
+ "KRX",
+ "BUY",
+ 85,
+ "signal matched",
+ json.dumps({"scenario_match": {"rsi": 28.0}}),
+ json.dumps({"current_price": 70000}),
+ ),
+ )
+ conn.execute(
+ """
+ INSERT INTO decision_logs (
+ decision_id, timestamp, stock_code, market, exchange_code,
+ action, confidence, rationale, context_snapshot, input_data
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ """,
+ (
+ "d-us-1",
+ "2026-02-14T21:10:00+00:00",
+ "AAPL",
+ "US",
+ "NASDAQ",
+ "SELL",
+ 80,
+ "no match",
+ json.dumps({"scenario_match": {}}),
+ json.dumps({"current_price": 200}),
+ ),
+ )
+ conn.execute(
+ """
+ INSERT INTO trades (
+ timestamp, stock_code, action, confidence, rationale,
+ quantity, price, pnl, market, exchange_code, selection_context, decision_id
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ """,
+ (
+ "2026-02-14T09:11:00+00:00",
+ "005930",
+ "BUY",
+ 85,
+ "buy",
+ 1,
+ 70000,
+ 2.0,
+ "KR",
+ "KRX",
+ None,
+ "d-kr-1",
+ ),
+ )
+ conn.execute(
+ """
+ INSERT INTO trades (
+ timestamp, stock_code, action, confidence, rationale,
+ quantity, price, pnl, market, exchange_code, selection_context, decision_id
+ ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
+ """,
+ (
+ "2026-02-14T21:11:00+00:00",
+ "AAPL",
+ "SELL",
+ 80,
+ "sell",
+ 1,
+ 200,
+ -1.0,
+ "US",
+ "NASDAQ",
+ None,
+ "d-us-1",
+ ),
+ )
+ conn.commit()
+
+
+def _client(tmp_path: Path) -> TestClient:
+ db_path = tmp_path / "dashboard_test.db"
+ conn = init_db(str(db_path))
+ _seed_db(conn)
+ conn.close()
+ app = create_dashboard_app(str(db_path))
+ return TestClient(app)
+
+
+def test_index_serves_html(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/")
+ assert resp.status_code == 200
+ assert "The Ouroboros Dashboard API" in resp.text
+
+
+def test_status_endpoint(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/status")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert "KR" in body["markets"]
+ assert "US" in body["markets"]
+ assert "totals" in body
+
+
+def test_playbook_found(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/playbook/2026-02-14?market=KR")
+ assert resp.status_code == 200
+ assert resp.json()["market"] == "KR"
+
+
+def test_playbook_not_found(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/playbook/2026-02-15?market=KR")
+ assert resp.status_code == 404
+
+
+def test_scorecard_found(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/scorecard/2026-02-14?market=KR")
+ assert resp.status_code == 200
+ assert resp.json()["scorecard"]["total_pnl"] == 1.5
+
+
+def test_scorecard_not_found(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/scorecard/2026-02-15?market=KR")
+ assert resp.status_code == 404
+
+
+def test_performance_all(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/performance?market=all")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["market"] == "all"
+ assert body["combined"]["total_trades"] == 2
+ assert len(body["by_market"]) == 2
+
+
+def test_performance_market_filter(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/performance?market=KR")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["market"] == "KR"
+ assert body["metrics"]["total_trades"] == 1
+
+
+def test_performance_empty_market(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/performance?market=JP")
+ assert resp.status_code == 200
+ assert resp.json()["metrics"]["total_trades"] == 0
+
+
+def test_context_layer_all(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/context/L7_REALTIME")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["layer"] == "L7_REALTIME"
+ assert body["count"] == 1
+
+
+def test_context_layer_timeframe_filter(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/context/L6_DAILY?timeframe=2026-02-14")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["count"] == 1
+ assert body["entries"][0]["key"] == "scorecard_KR"
+
+
+def test_decisions_endpoint(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/decisions?market=KR")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["count"] == 1
+ assert body["decisions"][0]["decision_id"] == "d-kr-1"
+
+
+def test_scenarios_active_filters_non_matched(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/scenarios/active?market=KR&date_str=2026-02-14")
+ assert resp.status_code == 200
+ body = resp.json()
+ assert body["count"] == 1
+ assert body["matches"][0]["stock_code"] == "005930"
+
+
+def test_scenarios_active_empty_when_no_matches(tmp_path: Path) -> None:
+ client = _client(tmp_path)
+ resp = client.get("/api/scenarios/active?market=US&date_str=2026-02-14")
+ assert resp.status_code == 200
+ assert resp.json()["count"] == 0
diff --git a/tests/test_main.py b/tests/test_main.py
index 378077a..9afc36f 100644
--- a/tests/test_main.py
+++ b/tests/test_main.py
@@ -1304,6 +1304,82 @@ async def test_handle_market_close_without_lessons_stores_once() -> None:
assert reviewer.store_scorecard_in_context.call_count == 1
+@pytest.mark.asyncio
+async def test_handle_market_close_triggers_evolution_for_us() -> None:
+ telegram = MagicMock()
+ telegram.notify_market_close = AsyncMock()
+ telegram.send_message = AsyncMock()
+
+ context_aggregator = MagicMock()
+ reviewer = MagicMock()
+ reviewer.generate_scorecard.return_value = DailyScorecard(
+ date="2026-02-14",
+ market="US",
+ total_decisions=2,
+ buys=1,
+ sells=1,
+ holds=0,
+ total_pnl=3.0,
+ win_rate=50.0,
+ avg_confidence=80.0,
+ scenario_match_rate=100.0,
+ )
+ reviewer.generate_lessons = AsyncMock(return_value=[])
+
+ evolution_optimizer = MagicMock()
+ evolution_optimizer.evolve = AsyncMock(return_value=None)
+
+ await _handle_market_close(
+ market_code="US",
+ market_name="United States",
+ market_timezone=UTC,
+ telegram=telegram,
+ context_aggregator=context_aggregator,
+ daily_reviewer=reviewer,
+ evolution_optimizer=evolution_optimizer,
+ )
+
+ evolution_optimizer.evolve.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_handle_market_close_skips_evolution_for_kr() -> None:
+ telegram = MagicMock()
+ telegram.notify_market_close = AsyncMock()
+ telegram.send_message = AsyncMock()
+
+ context_aggregator = MagicMock()
+ reviewer = MagicMock()
+ reviewer.generate_scorecard.return_value = DailyScorecard(
+ date="2026-02-14",
+ market="KR",
+ total_decisions=1,
+ buys=1,
+ sells=0,
+ holds=0,
+ total_pnl=1.0,
+ win_rate=100.0,
+ avg_confidence=90.0,
+ scenario_match_rate=100.0,
+ )
+ reviewer.generate_lessons = AsyncMock(return_value=[])
+
+ evolution_optimizer = MagicMock()
+ evolution_optimizer.evolve = AsyncMock(return_value=None)
+
+ await _handle_market_close(
+ market_code="KR",
+ market_name="Korea",
+ market_timezone=UTC,
+ telegram=telegram,
+ context_aggregator=context_aggregator,
+ daily_reviewer=reviewer,
+ evolution_optimizer=evolution_optimizer,
+ )
+
+ evolution_optimizer.evolve.assert_not_called()
+
+
def test_run_context_scheduler_invokes_scheduler() -> None:
"""Scheduler helper should call run_if_due with provided datetime."""
scheduler = MagicMock()
@@ -1354,3 +1430,27 @@ async def test_run_evolution_loop_notifies_when_pr_generated() -> None:
optimizer.evolve.assert_called_once()
telegram.send_message.assert_called_once()
+
+
+@pytest.mark.asyncio
+async def test_run_evolution_loop_notification_error_is_ignored() -> None:
+ optimizer = MagicMock()
+ optimizer.evolve = AsyncMock(
+ return_value={
+ "title": "[Evolution] New strategy: v20260214_050000",
+ "branch": "evolution/v20260214_050000",
+ "status": "ready_for_review",
+ }
+ )
+ telegram = MagicMock()
+ telegram.send_message = AsyncMock(side_effect=RuntimeError("telegram down"))
+
+ await _run_evolution_loop(
+ evolution_optimizer=optimizer,
+ telegram=telegram,
+ market_code="US",
+ market_date="2026-02-14",
+ )
+
+ optimizer.evolve.assert_called_once()
+ telegram.send_message.assert_called_once()