Compare commits
100 Commits
10b6e34d44
...
feature/is
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b484f0daff | ||
|
|
1288181e39 | ||
|
|
b625f41621 | ||
| 77d3ba967c | |||
|
|
aeed881d85 | ||
|
|
d0bbdb5dc1 | ||
| 44339c52d7 | |||
|
|
22ffdafacc | ||
|
|
c49765e951 | ||
| 64000b9967 | |||
|
|
733e6b36e9 | ||
|
|
0659cc0aca | ||
|
|
748b9b848e | ||
|
|
6a1ad230ee | ||
| 90bbc78867 | |||
|
|
1ef5dcb2b3 | ||
|
|
d105a3ff5e | ||
| 0424c78f6c | |||
|
|
3fdb7a29d4 | ||
| 31b4d0bf1e | |||
|
|
e2275a23b1 | ||
| 7522bb7e66 | |||
|
|
63fa6841a2 | ||
| ece3c5597b | |||
|
|
63f4e49d88 | ||
|
|
e0a6b307a2 | ||
| 75320eb587 | |||
|
|
afb31b7f4b | ||
| a429a9f4da | |||
|
|
d9763def85 | ||
| ab7f0444b2 | |||
|
|
6b3960a3a4 | ||
| 6cad8e74e1 | |||
|
|
86c94cff62 | ||
| 692cb61991 | |||
|
|
392422992b | ||
| cc637a9738 | |||
|
|
8c27473fed | ||
| bde54c7487 | |||
|
|
a14f944fcc | ||
| 56f7405baa | |||
|
|
e3b1ecc572 | ||
| 8acf72b22c | |||
|
|
c95102a0bd | ||
| 0685d62f9c | |||
|
|
78021d4695 | ||
| 3cdd10783b | |||
|
|
c4e31be27a | ||
| 9d9ade14eb | |||
|
|
9a8936ab34 | ||
| c5831966ed | |||
|
|
f03cc6039b | ||
| 9171e54652 | |||
|
|
d64e072f06 | ||
|
|
b2312fbe01 | ||
|
|
98c4a2413c | ||
| 6fba7c7ae8 | |||
|
|
be695a5d7c | ||
|
|
6471e66d89 | ||
|
|
149039a904 | ||
| 815d675529 | |||
|
|
e8634b93c3 | ||
| f20736fd2a | |||
|
|
7f2f96a819 | ||
| aaa74894dd | |||
|
|
e711d6702a | ||
|
|
d2fc829380 | ||
| de27b1af10 | |||
|
|
7370220497 | ||
| b01dacf328 | |||
|
|
1210c17989 | ||
|
|
9599b188e8 | ||
| c43660a58c | |||
|
|
7fd48c7764 | ||
| a105bb7c1a | |||
|
|
1a34a74232 | ||
| a82a167915 | |||
|
|
7725e7a8de | ||
|
|
f0ae25c533 | ||
| 27f581f17d | |||
|
|
18a098d9a6 | ||
| d2b07326ed | |||
|
|
1c5eadc23b | ||
| 10ff718045 | |||
|
|
0ca3fe9f5d | ||
| 462f8763ab | |||
|
|
57a45a24cb | ||
| a7696568cc | |||
|
|
70701bf73a | ||
| 20dbd94892 | |||
|
|
48a99962e3 | ||
| ee66ecc305 | |||
|
|
065c9daaad | ||
| c76b9d5c15 | |||
|
|
259f9d2e24 | ||
| 8e715c55cd | |||
|
|
0057de4d12 | ||
|
|
71ac59794e | ||
| be04820b00 | |||
|
|
db0d966a6a |
66
CLAUDE.md
66
CLAUDE.md
@@ -15,6 +15,9 @@ pytest -v --cov=src
|
||||
|
||||
# Run (paper trading)
|
||||
python -m src.main --mode=paper
|
||||
|
||||
# Run with dashboard
|
||||
python -m src.main --mode=paper --dashboard
|
||||
```
|
||||
|
||||
## Telegram Notifications (Optional)
|
||||
@@ -43,8 +46,45 @@ Get real-time alerts for trades, circuit breakers, and system events via Telegra
|
||||
- ℹ️ Market open/close notifications
|
||||
- 📝 System startup/shutdown status
|
||||
|
||||
### Interactive Commands
|
||||
|
||||
With `TELEGRAM_COMMANDS_ENABLED=true` (default), the bot supports 9 bidirectional commands: `/help`, `/status`, `/positions`, `/report`, `/scenarios`, `/review`, `/dashboard`, `/stop`, `/resume`.
|
||||
|
||||
**Fail-safe**: Notifications never crash the trading system. Missing credentials or API errors are logged but trading continues normally.
|
||||
|
||||
## Smart Volatility Scanner (Optional)
|
||||
|
||||
Python-first filtering pipeline that reduces Gemini API calls by pre-filtering stocks using technical indicators.
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Fetch Rankings** — KIS API volume surge rankings (top 30 stocks)
|
||||
2. **Python Filter** — RSI + volume ratio calculations (no AI)
|
||||
- Volume > 200% of previous day
|
||||
- RSI(14) < 30 (oversold) OR RSI(14) > 70 (momentum)
|
||||
3. **AI Judgment** — Only qualified candidates (1-3 stocks) sent to Gemini
|
||||
|
||||
### Configuration
|
||||
|
||||
Add to `.env` (optional, has sensible defaults):
|
||||
```bash
|
||||
RSI_OVERSOLD_THRESHOLD=30 # 0-50, default 30
|
||||
RSI_MOMENTUM_THRESHOLD=70 # 50-100, default 70
|
||||
VOL_MULTIPLIER=2.0 # Volume threshold (2.0 = 200%)
|
||||
SCANNER_TOP_N=3 # Max candidates per scan
|
||||
```
|
||||
|
||||
### Benefits
|
||||
|
||||
- **Reduces API costs** — Process 1-3 stocks instead of 20-30
|
||||
- **Python-based filtering** — Fast technical analysis before AI
|
||||
- **Evolution-ready** — Selection context logged for strategy optimization
|
||||
- **Fault-tolerant** — Falls back to static watchlist on API failure
|
||||
|
||||
### Realtime Mode Only
|
||||
|
||||
Smart Scanner runs in `TRADE_MODE=realtime` only. Daily mode uses static watchlists for batch efficiency.
|
||||
|
||||
## Documentation
|
||||
|
||||
- **[Workflow Guide](docs/workflow.md)** — Git workflow policy and agent-based development
|
||||
@@ -53,6 +93,7 @@ Get real-time alerts for trades, circuit breakers, and system events via Telegra
|
||||
- **[Context Tree](docs/context-tree.md)** — L1-L7 hierarchical memory system
|
||||
- **[Testing](docs/testing.md)** — Test structure, coverage requirements, writing tests
|
||||
- **[Agent Policies](docs/agents.md)** — Prime directives, constraints, prohibited actions
|
||||
- **[Requirements Log](docs/requirements-log.md)** — User requirements and feedback tracking
|
||||
|
||||
## Core Principles
|
||||
|
||||
@@ -61,21 +102,37 @@ Get real-time alerts for trades, circuit breakers, and system events via Telegra
|
||||
3. **Issue-Driven Development** — All work goes through Gitea issues → feature branches → PRs
|
||||
4. **Agent Specialization** — Use dedicated agents for design, coding, testing, docs, review
|
||||
|
||||
## Requirements Management
|
||||
|
||||
User requirements and feedback are tracked in [docs/requirements-log.md](docs/requirements-log.md):
|
||||
|
||||
- New requirements are added chronologically with dates
|
||||
- Code changes should reference related requirements
|
||||
- Helps maintain project evolution aligned with user needs
|
||||
- Preserves context across conversations and development cycles
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
src/
|
||||
├── analysis/ # Technical analysis (RSI, volatility, smart scanner)
|
||||
├── backup/ # Disaster recovery (scheduler, cloud storage, health)
|
||||
├── brain/ # Gemini AI decision engine (prompt optimizer, context selector)
|
||||
├── broker/ # KIS API client (domestic + overseas)
|
||||
├── brain/ # Gemini AI decision engine
|
||||
├── context/ # L1-L7 hierarchical memory system
|
||||
├── core/ # Risk manager (READ-ONLY)
|
||||
├── evolution/ # Self-improvement optimizer
|
||||
├── dashboard/ # FastAPI read-only monitoring (8 API endpoints)
|
||||
├── data/ # External data integration (news, market data, calendar)
|
||||
├── evolution/ # Self-improvement (optimizer, daily review, scorecard)
|
||||
├── logging/ # Decision logger (audit trail)
|
||||
├── markets/ # Market schedules and timezone handling
|
||||
├── notifications/ # Telegram real-time alerts
|
||||
├── notifications/ # Telegram alerts + bidirectional commands (9 commands)
|
||||
├── strategy/ # Pre-market planner, scenario engine, playbook store
|
||||
├── db.py # SQLite trade logging
|
||||
├── main.py # Trading loop orchestrator
|
||||
└── config.py # Settings (from .env)
|
||||
|
||||
tests/ # 273 tests across 13 files
|
||||
tests/ # 551 tests across 25 files
|
||||
docs/ # Extended documentation
|
||||
```
|
||||
|
||||
@@ -87,6 +144,7 @@ ruff check src/ tests/ # Lint
|
||||
mypy src/ --strict # Type check
|
||||
|
||||
python -m src.main --mode=paper # Paper trading
|
||||
python -m src.main --mode=paper --dashboard # With dashboard
|
||||
python -m src.main --mode=live # Live trading (⚠️ real money)
|
||||
|
||||
# Gitea workflow (requires tea CLI)
|
||||
|
||||
160
README.md
160
README.md
@@ -10,28 +10,41 @@ KIS(한국투자증권) API로 매매하고, Google Gemini로 판단하며, 자
|
||||
│ (매매 실행) │ │ (거래 루프) │ │ (의사결정) │
|
||||
└─────────────┘ └──────┬──────┘ └─────────────┘
|
||||
│
|
||||
┌──────┴──────┐
|
||||
│Risk Manager │
|
||||
│ (안전장치) │
|
||||
└──────┬──────┘
|
||||
│
|
||||
┌──────┴──────┐
|
||||
│ Evolution │
|
||||
│ (전략 진화) │
|
||||
└─────────────┘
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──┴───┐ ┌──────┴──────┐
|
||||
│Risk Manager │ │ DB │ │ Telegram │
|
||||
│ (안전장치) │ │ │ │ (알림+명령) │
|
||||
└──────┬──────┘ └──────┘ └─────────────┘
|
||||
│
|
||||
┌────────┼────────┐
|
||||
│ │ │
|
||||
┌────┴────┐┌──┴──┐┌────┴─────┐
|
||||
│Strategy ││Ctx ││Evolution │
|
||||
│(플레이북)││(메모리)││ (진화) │
|
||||
└─────────┘└─────┘└──────────┘
|
||||
```
|
||||
|
||||
**v2 핵심**: "Plan Once, Execute Locally" — 장 시작 전 AI가 시나리오 플레이북을 1회 생성하고, 거래 시간에는 로컬 시나리오 매칭만 수행하여 API 비용과 지연 시간을 대폭 절감.
|
||||
|
||||
## 핵심 모듈
|
||||
|
||||
| 모듈 | 파일 | 설명 |
|
||||
| 모듈 | 위치 | 설명 |
|
||||
|------|------|------|
|
||||
| 설정 | `src/config.py` | Pydantic 기반 환경변수 로딩 및 타입 검증 |
|
||||
| 브로커 | `src/broker/kis_api.py` | KIS API 비동기 래퍼 (토큰 갱신, 레이트 리미터, 해시키) |
|
||||
| 두뇌 | `src/brain/gemini_client.py` | Gemini 프롬프트 구성 및 JSON 응답 파싱 |
|
||||
| 방패 | `src/core/risk_manager.py` | 서킷 브레이커 + 팻 핑거 체크 |
|
||||
| 알림 | `src/notifications/telegram_client.py` | 텔레그램 실시간 거래 알림 (선택사항) |
|
||||
| 진화 | `src/evolution/optimizer.py` | 실패 패턴 분석 → 새 전략 생성 → 테스트 → PR |
|
||||
| DB | `src/db.py` | SQLite 거래 로그 기록 |
|
||||
| 설정 | `src/config.py` | Pydantic 기반 환경변수 로딩 및 타입 검증 (35+ 변수) |
|
||||
| 브로커 | `src/broker/` | KIS API 비동기 래퍼 (국내 + 해외 9개 시장) |
|
||||
| 두뇌 | `src/brain/` | Gemini 프롬프트 구성, JSON 파싱, 토큰 최적화 |
|
||||
| 방패 | `src/core/risk_manager.py` | 서킷 브레이커 + 팻 핑거 체크 (READ-ONLY) |
|
||||
| 전략 | `src/strategy/` | Pre-Market Planner, Scenario Engine, Playbook Store |
|
||||
| 컨텍스트 | `src/context/` | L1-L7 계층형 메모리 시스템 |
|
||||
| 분석 | `src/analysis/` | RSI, ATR, Smart Volatility Scanner |
|
||||
| 알림 | `src/notifications/` | 텔레그램 양방향 (알림 + 9개 명령어) |
|
||||
| 대시보드 | `src/dashboard/` | FastAPI 읽기 전용 모니터링 (8개 API) |
|
||||
| 진화 | `src/evolution/` | 전략 진화 + Daily Review + Scorecard |
|
||||
| 의사결정 로그 | `src/logging/` | 전체 거래 결정 감사 추적 |
|
||||
| 데이터 | `src/data/` | 뉴스, 시장 데이터, 경제 캘린더 연동 |
|
||||
| 백업 | `src/backup/` | 자동 백업, S3 클라우드, 무결성 검증 |
|
||||
| DB | `src/db.py` | SQLite 거래 로그 (5개 테이블) |
|
||||
|
||||
## 안전장치
|
||||
|
||||
@@ -42,6 +55,7 @@ KIS(한국투자증권) API로 매매하고, Google Gemini로 판단하며, 자
|
||||
| 신뢰도 임계값 | Gemini 신뢰도 80 미만이면 강제 HOLD |
|
||||
| 레이트 리미터 | Leaky Bucket 알고리즘으로 API 호출 제한 |
|
||||
| 토큰 자동 갱신 | 만료 1분 전 자동으로 Access Token 재발급 |
|
||||
| 손절 모니터링 | 플레이북 시나리오 기반 실시간 포지션 보호 |
|
||||
|
||||
## 빠른 시작
|
||||
|
||||
@@ -67,7 +81,11 @@ pytest -v --cov=src --cov-report=term-missing
|
||||
### 4. 실행 (모의투자)
|
||||
|
||||
```bash
|
||||
# 기본 실행
|
||||
python -m src.main --mode=paper
|
||||
|
||||
# 대시보드 활성화
|
||||
python -m src.main --mode=paper --dashboard
|
||||
```
|
||||
|
||||
### 5. Docker 실행
|
||||
@@ -76,7 +94,20 @@ python -m src.main --mode=paper
|
||||
docker compose up -d ouroboros
|
||||
```
|
||||
|
||||
## 텔레그램 알림 (선택사항)
|
||||
## 지원 시장
|
||||
|
||||
| 국가 | 거래소 | 코드 |
|
||||
|------|--------|------|
|
||||
| 🇰🇷 한국 | KRX | KR |
|
||||
| 🇺🇸 미국 | NASDAQ, NYSE, AMEX | US_NASDAQ, US_NYSE, US_AMEX |
|
||||
| 🇯🇵 일본 | TSE | JP |
|
||||
| 🇭🇰 홍콩 | SEHK | HK |
|
||||
| 🇨🇳 중국 | 상하이, 선전 | CN_SHA, CN_SZA |
|
||||
| 🇻🇳 베트남 | 하노이, 호치민 | VN_HNX, VN_HSX |
|
||||
|
||||
`ENABLED_MARKETS` 환경변수로 활성 시장 선택 (기본: `KR,US`).
|
||||
|
||||
## 텔레그램 (선택사항)
|
||||
|
||||
거래 실행, 서킷 브레이커 발동, 시스템 상태 등을 텔레그램으로 실시간 알림 받을 수 있습니다.
|
||||
|
||||
@@ -102,25 +133,51 @@ docker compose up -d ouroboros
|
||||
- ℹ️ 장 시작/종료 알림
|
||||
- 📝 시스템 시작/종료 상태
|
||||
|
||||
**안전장치**: 알림 실패해도 거래는 계속 진행됩니다. 텔레그램 API 오류나 설정 누락이 있어도 거래 시스템은 정상 작동합니다.
|
||||
### 양방향 명령어
|
||||
|
||||
`TELEGRAM_COMMANDS_ENABLED=true` (기본값) 설정 시 9개 대화형 명령어 지원:
|
||||
|
||||
| 명령어 | 설명 |
|
||||
|--------|------|
|
||||
| `/help` | 사용 가능한 명령어 목록 |
|
||||
| `/status` | 거래 상태 (모드, 시장, P&L) |
|
||||
| `/positions` | 계좌 요약 (잔고, 현금, P&L) |
|
||||
| `/report` | 일일 요약 (거래 수, P&L, 승률) |
|
||||
| `/scenarios` | 오늘의 플레이북 시나리오 |
|
||||
| `/review` | 최근 스코어카드 (L6_DAILY) |
|
||||
| `/dashboard` | 대시보드 URL 표시 |
|
||||
| `/stop` | 거래 일시 정지 |
|
||||
| `/resume` | 거래 재개 |
|
||||
|
||||
**안전장치**: 알림 실패해도 거래는 계속 진행됩니다.
|
||||
|
||||
## 테스트
|
||||
|
||||
35개 테스트가 TDD 방식으로 구현 전에 먼저 작성되었습니다.
|
||||
551개 테스트가 25개 파일에 걸쳐 구현되어 있습니다. 최소 커버리지 80%.
|
||||
|
||||
```
|
||||
tests/test_risk.py — 서킷 브레이커, 팻 핑거, 통합 검증 (11개)
|
||||
tests/test_broker.py — 토큰 관리, 타임아웃, HTTP 에러, 해시키 (6개)
|
||||
tests/test_brain.py — JSON 파싱, 신뢰도 임계값, 비정상 응답 처리 (15개)
|
||||
tests/test_scenario_engine.py — 시나리오 매칭 (44개)
|
||||
tests/test_data_integration.py — 외부 데이터 연동 (38개)
|
||||
tests/test_pre_market_planner.py — 플레이북 생성 (37개)
|
||||
tests/test_main.py — 거래 루프 통합 (37개)
|
||||
tests/test_token_efficiency.py — 토큰 최적화 (34개)
|
||||
tests/test_strategy_models.py — 전략 모델 검증 (33개)
|
||||
tests/test_telegram_commands.py — 텔레그램 명령어 (31개)
|
||||
tests/test_latency_control.py — 지연시간 제어 (30개)
|
||||
tests/test_telegram.py — 텔레그램 알림 (25개)
|
||||
... 외 16개 파일
|
||||
```
|
||||
|
||||
**상세**: [docs/testing.md](docs/testing.md)
|
||||
|
||||
## 기술 스택
|
||||
|
||||
- **언어**: Python 3.11+ (asyncio 기반)
|
||||
- **브로커**: KIS Open API (REST)
|
||||
- **브로커**: KIS Open API (REST, 국내+해외)
|
||||
- **AI**: Google Gemini Pro
|
||||
- **DB**: SQLite
|
||||
- **검증**: pytest + coverage
|
||||
- **DB**: SQLite (5개 테이블: trades, contexts, decision_logs, playbooks, context_metadata)
|
||||
- **대시보드**: FastAPI + uvicorn
|
||||
- **검증**: pytest + coverage (551 tests)
|
||||
- **CI/CD**: GitHub Actions
|
||||
- **배포**: Docker + Docker Compose
|
||||
|
||||
@@ -128,27 +185,50 @@ tests/test_brain.py — JSON 파싱, 신뢰도 임계값, 비정상 응답 처
|
||||
|
||||
```
|
||||
The-Ouroboros/
|
||||
├── .github/workflows/ci.yml # CI 파이프라인
|
||||
├── docs/
|
||||
│ ├── agents.md # AI 에이전트 페르소나 정의
|
||||
│ └── skills.md # 사용 가능한 도구 목록
|
||||
│ ├── architecture.md # 시스템 아키텍처
|
||||
│ ├── testing.md # 테스트 가이드
|
||||
│ ├── commands.md # 명령어 레퍼런스
|
||||
│ ├── context-tree.md # L1-L7 메모리 시스템
|
||||
│ ├── workflow.md # Git 워크플로우
|
||||
│ ├── agents.md # 에이전트 정책
|
||||
│ ├── skills.md # 도구 목록
|
||||
│ ├── disaster_recovery.md # 백업/복구
|
||||
│ └── requirements-log.md # 요구사항 기록
|
||||
├── src/
|
||||
│ ├── config.py # Pydantic 설정
|
||||
│ ├── logging_config.py # JSON 구조화 로깅
|
||||
│ ├── db.py # SQLite 거래 기록
|
||||
│ ├── main.py # 비동기 거래 루프
|
||||
│ ├── broker/kis_api.py # KIS API 클라이언트
|
||||
│ ├── brain/gemini_client.py # Gemini 의사결정 엔진
|
||||
│ ├── core/risk_manager.py # 리스크 관리
|
||||
│ ├── notifications/telegram_client.py # 텔레그램 알림
|
||||
│ ├── evolution/optimizer.py # 전략 진화 엔진
|
||||
│ └── strategies/base.py # 전략 베이스 클래스
|
||||
├── tests/ # TDD 테스트 스위트
|
||||
│ ├── analysis/ # 기술적 분석 (RSI, ATR, Smart Scanner)
|
||||
│ ├── backup/ # 백업 (스케줄러, S3, 무결성 검증)
|
||||
│ ├── brain/ # Gemini 의사결정 (프롬프트 최적화, 컨텍스트 선택)
|
||||
│ ├── broker/ # KIS API (국내 + 해외)
|
||||
│ ├── context/ # L1-L7 계층 메모리
|
||||
│ ├── core/ # 리스크 관리 (READ-ONLY)
|
||||
│ ├── dashboard/ # FastAPI 모니터링 대시보드
|
||||
│ ├── data/ # 외부 데이터 연동
|
||||
│ ├── evolution/ # 전략 진화 + Daily Review
|
||||
│ ├── logging/ # 의사결정 감사 추적
|
||||
│ ├── markets/ # 시장 스케줄 + 타임존
|
||||
│ ├── notifications/ # 텔레그램 알림 + 명령어
|
||||
│ ├── strategy/ # 플레이북 (Planner, Scenario Engine)
|
||||
│ ├── config.py # Pydantic 설정
|
||||
│ ├── db.py # SQLite 데이터베이스
|
||||
│ └── main.py # 비동기 거래 루프
|
||||
├── tests/ # 551개 테스트 (25개 파일)
|
||||
├── Dockerfile # 멀티스테이지 빌드
|
||||
├── docker-compose.yml # 서비스 오케스트레이션
|
||||
└── pyproject.toml # 의존성 및 도구 설정
|
||||
```
|
||||
|
||||
## 문서
|
||||
|
||||
- **[아키텍처](docs/architecture.md)** — 시스템 설계, 컴포넌트, 데이터 흐름
|
||||
- **[테스트](docs/testing.md)** — 테스트 구조, 커버리지, 작성 가이드
|
||||
- **[명령어](docs/commands.md)** — CLI, Dashboard, Telegram 명령어
|
||||
- **[컨텍스트 트리](docs/context-tree.md)** — L1-L7 계층 메모리
|
||||
- **[워크플로우](docs/workflow.md)** — Git 워크플로우 정책
|
||||
- **[에이전트 정책](docs/agents.md)** — 안전 제약, 금지 행위
|
||||
- **[백업/복구](docs/disaster_recovery.md)** — 재해 복구 절차
|
||||
- **[요구사항](docs/requirements-log.md)** — 사용자 요구사항 추적
|
||||
|
||||
## 라이선스
|
||||
|
||||
이 프로젝트의 라이선스는 [LICENSE](LICENSE) 파일을 참조하세요.
|
||||
|
||||
45
docs/agent-constraints.md
Normal file
45
docs/agent-constraints.md
Normal file
@@ -0,0 +1,45 @@
|
||||
# Agent Constraints
|
||||
|
||||
This document records **persistent behavioral constraints** for agents working on this repository.
|
||||
It is distinct from `docs/requirements-log.md`, which records **project/product requirements**.
|
||||
|
||||
## Scope
|
||||
|
||||
- Applies to all AI agents and automation that modify this repo.
|
||||
- Supplements (does not replace) `docs/agents.md` and `docs/workflow.md`.
|
||||
|
||||
## Persistent Rules
|
||||
|
||||
1. **Workflow enforcement**
|
||||
- Follow `docs/workflow.md` for all changes.
|
||||
- Create a Gitea issue before any code or documentation change.
|
||||
- Work on a feature branch `feature/issue-{N}-{short-description}` and open a PR.
|
||||
- Never commit directly to `main`.
|
||||
|
||||
2. **Document-first routing**
|
||||
- When performing work, consult relevant `docs/` files *before* making changes.
|
||||
- Route decisions to the documented policy whenever applicable.
|
||||
- If guidance conflicts, prefer the stricter/safety-first rule and note it in the PR.
|
||||
|
||||
3. **Docs with code**
|
||||
- Any code change must be accompanied by relevant documentation updates.
|
||||
- If no doc update is needed, state the reason explicitly in the PR.
|
||||
|
||||
4. **Session-persistent user constraints**
|
||||
- If the user requests that a behavior should persist across sessions, record it here
|
||||
(or in a dedicated policy doc) and reference it when working.
|
||||
- Keep entries short and concrete, with dates.
|
||||
|
||||
## Change Control
|
||||
|
||||
- Changes to this file follow the same workflow as code changes.
|
||||
- Keep the history chronological and minimize rewording of existing entries.
|
||||
|
||||
## History
|
||||
|
||||
### 2026-02-08
|
||||
|
||||
- Always enforce Gitea workflow: issue -> feature branch -> PR before changes.
|
||||
- When work requires guidance, consult the relevant `docs/` policies first.
|
||||
- Any code change must be accompanied by relevant documentation updates.
|
||||
- Persist user constraints across sessions by recording them in this document.
|
||||
@@ -2,7 +2,44 @@
|
||||
|
||||
## Overview
|
||||
|
||||
Self-evolving AI trading agent for global stock markets via KIS (Korea Investment & Securities) API. The main loop in `src/main.py` orchestrates four components in a 60-second cycle per stock across multiple markets.
|
||||
Self-evolving AI trading agent for global stock markets via KIS (Korea Investment & Securities) API. The main loop in `src/main.py` orchestrates components across multiple markets with two trading modes: daily (batch API calls) or realtime (per-stock decisions).
|
||||
|
||||
**v2 Proactive Playbook Architecture**: The system uses a "plan once, execute locally" approach. Pre-market, the AI generates a playbook of scenarios (one Gemini API call per market per day). During trading hours, a local scenario engine matches live market data against these pre-computed scenarios — no additional AI calls needed. This dramatically reduces API costs and latency.
|
||||
|
||||
## Trading Modes
|
||||
|
||||
The system supports two trading frequency modes controlled by the `TRADE_MODE` environment variable:
|
||||
|
||||
### Daily Mode (default)
|
||||
|
||||
Optimized for Gemini Free tier API limits (20 calls/day):
|
||||
|
||||
- **Batch decisions**: 1 API call per market per session
|
||||
- **Fixed schedule**: 4 sessions per day at 6-hour intervals (configurable)
|
||||
- **API efficiency**: Processes all stocks in a market simultaneously
|
||||
- **Use case**: Free tier users, cost-conscious deployments
|
||||
- **Configuration**:
|
||||
```bash
|
||||
TRADE_MODE=daily
|
||||
DAILY_SESSIONS=4 # Sessions per day (1-10)
|
||||
SESSION_INTERVAL_HOURS=6 # Hours between sessions (1-24)
|
||||
```
|
||||
|
||||
**Example**: With 2 markets (US, KR) and 4 sessions/day = 8 API calls/day (within 20 call limit)
|
||||
|
||||
### Realtime Mode
|
||||
|
||||
High-frequency trading with individual stock analysis:
|
||||
|
||||
- **Per-stock decisions**: 1 API call per stock per cycle
|
||||
- **60-second interval**: Continuous monitoring
|
||||
- **Use case**: Production deployments with Gemini paid tier
|
||||
- **Configuration**:
|
||||
```bash
|
||||
TRADE_MODE=realtime
|
||||
```
|
||||
|
||||
**Note**: Realtime mode requires Gemini API subscription due to high call volume.
|
||||
|
||||
## Core Components
|
||||
|
||||
@@ -11,9 +48,11 @@ Self-evolving AI trading agent for global stock markets via KIS (Korea Investmen
|
||||
**KISBroker** (`kis_api.py`) — Async KIS API client for domestic Korean market
|
||||
|
||||
- Automatic OAuth token refresh (valid for 24 hours)
|
||||
- Leaky-bucket rate limiter (10 requests per second)
|
||||
- Leaky-bucket rate limiter (configurable RPS, default 2.0)
|
||||
- POST body hash-key signing for order authentication
|
||||
- Custom SSL context with disabled hostname verification for VTS (virtual trading) endpoint due to known certificate mismatch
|
||||
- `fetch_market_rankings()` — Fetch volume surge rankings from KIS API
|
||||
- `get_daily_prices()` — Fetch OHLCV history for technical analysis
|
||||
|
||||
**OverseasBroker** (`overseas.py`) — KIS overseas stock API wrapper
|
||||
|
||||
@@ -28,10 +67,47 @@ Self-evolving AI trading agent for global stock markets via KIS (Korea Investmen
|
||||
- `is_market_open()` checks weekends, trading hours, lunch breaks
|
||||
- `get_open_markets()` returns currently active markets
|
||||
- `get_next_market_open()` finds next market to open and when
|
||||
- 10 global markets defined (KR, US_NASDAQ, US_NYSE, US_AMEX, JP, HK, CN_SHA, CN_SZA, VN_HNX, VN_HSX)
|
||||
|
||||
### 2. Brain (`src/brain/gemini_client.py`)
|
||||
**Overseas Ranking API Methods** (added in v0.10.x):
|
||||
- `fetch_overseas_rankings()` — Fetch overseas ranking universe (fluctuation / volume)
|
||||
- Ranking endpoint paths and TR_IDs are configurable via environment variables
|
||||
|
||||
**GeminiClient** — AI decision engine powered by Google Gemini
|
||||
### 2. Analysis (`src/analysis/`)
|
||||
|
||||
**VolatilityAnalyzer** (`volatility.py`) — Technical indicator calculations
|
||||
|
||||
- ATR (Average True Range) for volatility measurement
|
||||
- RSI (Relative Strength Index) using Wilder's smoothing method
|
||||
- Price change percentages across multiple timeframes
|
||||
- Volume surge ratios and price-volume divergence
|
||||
- Momentum scoring (0-100 scale)
|
||||
- Breakout/breakdown pattern detection
|
||||
|
||||
**SmartVolatilityScanner** (`smart_scanner.py`) — Python-first filtering pipeline
|
||||
|
||||
- **Domestic (KR)**:
|
||||
- **Step 1**: Fetch domestic fluctuation ranking as primary universe
|
||||
- **Step 2**: Fetch domestic volume ranking for liquidity bonus
|
||||
- **Step 3**: Compute volatility-first score (max of daily change% and intraday range%)
|
||||
- **Step 4**: Apply liquidity bonus and return top N candidates
|
||||
- **Overseas (US/JP/HK/CN/VN)**:
|
||||
- **Step 1**: Fetch overseas ranking universe (fluctuation rank + volume rank bonus)
|
||||
- **Step 2**: Compute volatility-first score (max of daily change% and intraday range%)
|
||||
- **Step 3**: Apply liquidity bonus from volume ranking
|
||||
- **Step 4**: Return top N candidates (default 3)
|
||||
- **Fallback (overseas only)**: If ranking API is unavailable, uses dynamic universe
|
||||
from runtime active symbols + recent traded symbols + current holdings (no static watchlist)
|
||||
- **Realtime mode only**: Daily mode uses batch processing for API efficiency
|
||||
|
||||
**Benefits:**
|
||||
- Reduces Gemini API calls from 20-30 stocks to 1-3 qualified candidates
|
||||
- Fast Python-based filtering before expensive AI judgment
|
||||
- Logs selection context (RSI-compatible proxy, volume_ratio, signal, score) for Evolution system
|
||||
|
||||
### 3. Brain (`src/brain/`)
|
||||
|
||||
**GeminiClient** (`gemini_client.py`) — AI decision engine powered by Google Gemini
|
||||
|
||||
- Constructs structured prompts from market data
|
||||
- Parses JSON responses into `TradeDecision` objects (`action`, `confidence`, `rationale`)
|
||||
@@ -39,11 +115,20 @@ Self-evolving AI trading agent for global stock markets via KIS (Korea Investmen
|
||||
- Falls back to safe HOLD on any parse/API error
|
||||
- Handles markdown-wrapped JSON, malformed responses, invalid actions
|
||||
|
||||
### 3. Risk Manager (`src/core/risk_manager.py`)
|
||||
**PromptOptimizer** (`prompt_optimizer.py`) — Token efficiency optimization
|
||||
|
||||
- Reduces prompt size while preserving decision quality
|
||||
- Caches optimized prompts
|
||||
|
||||
**ContextSelector** (`context_selector.py`) — Relevant context selection for prompts
|
||||
|
||||
- Selects appropriate context layers for current market conditions
|
||||
|
||||
### 4. Risk Manager (`src/core/risk_manager.py`)
|
||||
|
||||
**RiskManager** — Safety circuit breaker and order validation
|
||||
|
||||
⚠️ **READ-ONLY by policy** (see [`docs/agents.md`](./agents.md))
|
||||
> **READ-ONLY by policy** (see [`docs/agents.md`](./agents.md))
|
||||
|
||||
- **Circuit Breaker**: Halts all trading via `SystemExit` when daily P&L drops below -3.0%
|
||||
- Threshold may only be made stricter, never relaxed
|
||||
@@ -51,7 +136,79 @@ Self-evolving AI trading agent for global stock markets via KIS (Korea Investmen
|
||||
- **Fat-Finger Protection**: Rejects orders exceeding 30% of available cash
|
||||
- Must always be enforced, cannot be disabled
|
||||
|
||||
### 4. Notifications (`src/notifications/telegram_client.py`)
|
||||
### 5. Strategy (`src/strategy/`)
|
||||
|
||||
**Pre-Market Planner** (`pre_market_planner.py`) — AI playbook generation
|
||||
|
||||
- Runs before market open (configurable `PRE_MARKET_MINUTES`, default 30)
|
||||
- Generates scenario-based playbooks via single Gemini API call per market
|
||||
- Handles timeout (`PLANNER_TIMEOUT_SECONDS`, default 60) with defensive playbook fallback
|
||||
- Persists playbooks to database for audit trail
|
||||
|
||||
**Scenario Engine** (`scenario_engine.py`) — Local scenario matching
|
||||
|
||||
- Matches live market data against pre-computed playbook scenarios
|
||||
- No AI calls during trading hours — pure Python matching logic
|
||||
- Returns matched scenarios with confidence scores
|
||||
- Configurable `MAX_SCENARIOS_PER_STOCK` (default 5)
|
||||
- Periodic rescan at `RESCAN_INTERVAL_SECONDS` (default 300)
|
||||
|
||||
**Playbook Store** (`playbook_store.py`) — Playbook persistence
|
||||
|
||||
- SQLite-backed storage for daily playbooks
|
||||
- Date and market-based retrieval
|
||||
- Status tracking (generated, active, expired)
|
||||
|
||||
**Models** (`models.py`) — Pydantic data models
|
||||
|
||||
- Scenario, Playbook, MatchResult, and related type definitions
|
||||
|
||||
### 6. Context System (`src/context/`)
|
||||
|
||||
**Context Store** (`store.py`) — L1-L7 hierarchical memory
|
||||
|
||||
- 7-layer context system (see [docs/context-tree.md](./context-tree.md)):
|
||||
- L1: Tick-level (real-time price)
|
||||
- L2: Intraday (session summary)
|
||||
- L3: Daily (end-of-day)
|
||||
- L4: Weekly (trend analysis)
|
||||
- L5: Monthly (strategy review)
|
||||
- L6: Daily Review (scorecard)
|
||||
- L7: Evolution (long-term learning)
|
||||
- Key-value storage with timeframe tagging
|
||||
- SQLite persistence in `contexts` table
|
||||
|
||||
**Context Scheduler** (`scheduler.py`) — Periodic aggregation
|
||||
|
||||
- Scheduled summarization from lower to higher layers
|
||||
- Configurable aggregation intervals
|
||||
|
||||
**Context Summarizer** (`summarizer.py`) — Layer summarization
|
||||
|
||||
- Aggregates lower-layer data into higher-layer summaries
|
||||
|
||||
### 7. Dashboard (`src/dashboard/`)
|
||||
|
||||
**FastAPI App** (`app.py`) — Read-only monitoring dashboard
|
||||
|
||||
- Runs as daemon thread when enabled (`--dashboard` CLI flag or `DASHBOARD_ENABLED=true`)
|
||||
- Configurable host/port (`DASHBOARD_HOST`, `DASHBOARD_PORT`, default `127.0.0.1:8080`)
|
||||
- Serves static HTML frontend
|
||||
|
||||
**8 API Endpoints:**
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/` | GET | Static HTML dashboard |
|
||||
| `/api/status` | GET | Daily trading status by market |
|
||||
| `/api/playbook/{date}` | GET | Playbook for specific date and market |
|
||||
| `/api/scorecard/{date}` | GET | Daily scorecard from L6_DAILY context |
|
||||
| `/api/performance` | GET | Trading performance metrics (by market + combined) |
|
||||
| `/api/context/{layer}` | GET | Query context by layer (L1-L7) |
|
||||
| `/api/decisions` | GET | Decision log entries with outcomes |
|
||||
| `/api/scenarios/active` | GET | Today's matched scenarios |
|
||||
|
||||
### 8. Notifications (`src/notifications/telegram_client.py`)
|
||||
|
||||
**TelegramClient** — Real-time event notifications via Telegram Bot API
|
||||
|
||||
@@ -59,7 +216,13 @@ Self-evolving AI trading agent for global stock markets via KIS (Korea Investmen
|
||||
- Non-blocking: failures are logged but never crash trading
|
||||
- Rate-limited: 1 message/second default to respect Telegram API limits
|
||||
- Auto-disabled when credentials missing
|
||||
- Gracefully handles API errors, network timeouts, invalid tokens
|
||||
|
||||
**TelegramCommandHandler** — Bidirectional command interface
|
||||
|
||||
- Long polling from Telegram API (configurable `TELEGRAM_POLLING_INTERVAL`)
|
||||
- 9 interactive commands: `/help`, `/status`, `/positions`, `/report`, `/scenarios`, `/review`, `/dashboard`, `/stop`, `/resume`
|
||||
- Authorization filtering by `TELEGRAM_CHAT_ID`
|
||||
- Enable/disable via `TELEGRAM_COMMANDS_ENABLED` (default: true)
|
||||
|
||||
**Notification Types:**
|
||||
- Trade execution (BUY/SELL with confidence)
|
||||
@@ -67,12 +230,12 @@ Self-evolving AI trading agent for global stock markets via KIS (Korea Investmen
|
||||
- Fat-finger protection triggers (order rejection)
|
||||
- Market open/close events
|
||||
- System startup/shutdown status
|
||||
- Playbook generation results
|
||||
- Stop-loss monitoring alerts
|
||||
|
||||
**Setup:** See [src/notifications/README.md](../src/notifications/README.md) for bot creation and configuration.
|
||||
### 9. Evolution (`src/evolution/`)
|
||||
|
||||
### 5. Evolution (`src/evolution/optimizer.py`)
|
||||
|
||||
**StrategyOptimizer** — Self-improvement loop
|
||||
**StrategyOptimizer** (`optimizer.py`) — Self-improvement loop
|
||||
|
||||
- Analyzes high-confidence losing trades from SQLite
|
||||
- Asks Gemini to generate new `BaseStrategy` subclasses
|
||||
@@ -80,79 +243,198 @@ Self-evolving AI trading agent for global stock markets via KIS (Korea Investmen
|
||||
- Simulates PR creation for human review
|
||||
- Only activates strategies that pass all tests
|
||||
|
||||
**DailyReview** (`daily_review.py`) — End-of-day review
|
||||
|
||||
- Generates comprehensive trade performance summary
|
||||
- Stores results in L6_DAILY context layer
|
||||
- Tracks win rate, P&L, confidence accuracy
|
||||
|
||||
**DailyScorecard** (`scorecard.py`) — Performance scoring
|
||||
|
||||
- Calculates daily metrics (trades, P&L, win rate, avg confidence)
|
||||
- Enables trend tracking across days
|
||||
|
||||
**Stop-Loss Monitoring** — Real-time position protection
|
||||
|
||||
- Monitors positions against stop-loss levels from playbook scenarios
|
||||
- Sends Telegram alerts when thresholds approached or breached
|
||||
|
||||
### 10. Decision Logger (`src/logging/decision_logger.py`)
|
||||
|
||||
**DecisionLogger** — Comprehensive audit trail
|
||||
|
||||
- Logs every trading decision with full context snapshot
|
||||
- Captures input data, rationale, confidence, and outcomes
|
||||
- Supports outcome tracking (P&L, accuracy) for post-analysis
|
||||
- Stored in `decision_logs` table with indexed queries
|
||||
- Review workflow support (reviewed flag, review notes)
|
||||
|
||||
### 11. Data Integration (`src/data/`)
|
||||
|
||||
**External Data Sources** (optional):
|
||||
|
||||
- `news_api.py` — News sentiment data
|
||||
- `market_data.py` — Extended market data
|
||||
- `economic_calendar.py` — Economic event calendar
|
||||
|
||||
### 12. Backup (`src/backup/`)
|
||||
|
||||
**Disaster Recovery** (see [docs/disaster_recovery.md](./disaster_recovery.md)):
|
||||
|
||||
- `scheduler.py` — Automated backup scheduling
|
||||
- `exporter.py` — Data export to various formats
|
||||
- `cloud_storage.py` — S3-compatible cloud backup
|
||||
- `health_monitor.py` — Backup integrity verification
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Playbook Mode (Daily — Primary v2 Flow)
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Main Loop (60s cycle per stock, per market) │
|
||||
│ Pre-Market Phase (before market open) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Market Schedule Check │
|
||||
│ - Get open markets │
|
||||
│ - Filter by enabled markets │
|
||||
│ - Wait if all closed │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Pre-Market Planner │
|
||||
│ - 1 Gemini API call per market │
|
||||
│ - Generate scenario playbook │
|
||||
│ - Store in playbooks table │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Trading Hours (market open → close) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Market Schedule Check │
|
||||
│ - Get open markets │
|
||||
│ - Filter by enabled markets │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Scenario Engine (local) │
|
||||
│ - Match live data vs playbook │
|
||||
│ - No AI calls needed │
|
||||
│ - Return matched scenarios │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Risk Manager: Validate Order │
|
||||
│ - Check circuit breaker │
|
||||
│ - Check fat-finger limit │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Broker: Execute Order │
|
||||
│ - Domestic: send_order() │
|
||||
│ - Overseas: send_overseas_order()│
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Decision Logger + DB │
|
||||
│ - Full audit trail │
|
||||
│ - Context snapshot │
|
||||
│ - Telegram notification │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Post-Market Phase │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Daily Review + Scorecard │
|
||||
│ - Performance summary │
|
||||
│ - Store in L6_DAILY context │
|
||||
│ - Evolution learning │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Realtime Mode (with Smart Scanner)
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Main Loop (60s cycle per market) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Market Schedule Check │
|
||||
│ - Get open markets │
|
||||
│ - Filter by enabled markets │
|
||||
│ - Wait if all closed │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Smart Scanner (Python-first) │
|
||||
│ - Domestic: fluctuation rank │
|
||||
│ + volume rank bonus │
|
||||
│ + volatility-first scoring │
|
||||
│ - Overseas: ranking universe │
|
||||
│ + volatility-first scoring │
|
||||
│ - Fallback: dynamic universe │
|
||||
│ - Return top 3 qualified stocks │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ For Each Qualified Candidate │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Broker: Fetch Market Data │
|
||||
│ - Domestic: orderbook + balance │
|
||||
│ - Overseas: price + balance │
|
||||
└──────────────────┬────────────────┘
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Calculate P&L │
|
||||
│ pnl_pct = (eval - cost) / cost │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Brain: Get Decision (AI) │
|
||||
│ - Build prompt with market data │
|
||||
│ - Call Gemini API │
|
||||
│ - Parse JSON response │
|
||||
│ - Return TradeDecision │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Brain: Get Decision │
|
||||
│ - Build prompt with market data │
|
||||
│ - Call Gemini API │
|
||||
│ - Parse JSON response │
|
||||
│ - Return TradeDecision │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Risk Manager: Validate Order │
|
||||
│ - Check circuit breaker │
|
||||
│ - Check fat-finger limit │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Risk Manager: Validate Order │
|
||||
│ - Check circuit breaker │
|
||||
│ - Check fat-finger limit │
|
||||
│ - Raise if validation fails │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Broker: Execute Order │
|
||||
│ - Domestic: send_order() │
|
||||
│ - Overseas: send_overseas_order()│
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Broker: Execute Order │
|
||||
│ - Domestic: send_order() │
|
||||
│ - Overseas: send_overseas_order() │
|
||||
└──────────────────┬────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Notifications: Send Alert │
|
||||
│ - Trade execution notification │
|
||||
│ - Non-blocking (errors logged) │
|
||||
│ - Rate-limited to 1/sec │
|
||||
└──────────────────┬────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Database: Log Trade │
|
||||
│ - SQLite (data/trades.db) │
|
||||
│ - Track: action, confidence, │
|
||||
│ rationale, market, exchange │
|
||||
└───────────────────────────────────┘
|
||||
│ Decision Logger + Notifications │
|
||||
│ - Log trade to SQLite │
|
||||
│ - selection_context (JSON) │
|
||||
│ - Telegram notification │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
**SQLite** (`src/db.py`)
|
||||
**SQLite** (`src/db.py`) — Database: `data/trades.db`
|
||||
|
||||
### trades
|
||||
```sql
|
||||
CREATE TABLE trades (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
@@ -164,12 +446,73 @@ CREATE TABLE trades (
|
||||
quantity INTEGER,
|
||||
price REAL,
|
||||
pnl REAL DEFAULT 0.0,
|
||||
market TEXT DEFAULT 'KR', -- KR | US_NASDAQ | JP | etc.
|
||||
exchange_code TEXT DEFAULT 'KRX' -- KRX | NASD | NYSE | etc.
|
||||
market TEXT DEFAULT 'KR',
|
||||
exchange_code TEXT DEFAULT 'KRX',
|
||||
selection_context TEXT, -- JSON: {rsi, volume_ratio, signal, score}
|
||||
decision_id TEXT -- Links to decision_logs
|
||||
);
|
||||
```
|
||||
|
||||
Auto-migration: Adds `market` and `exchange_code` columns if missing for backward compatibility.
|
||||
### contexts
|
||||
```sql
|
||||
CREATE TABLE contexts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
layer TEXT NOT NULL, -- L1 through L7
|
||||
timeframe TEXT,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL, -- JSON data
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
-- Indices: idx_contexts_layer, idx_contexts_timeframe, idx_contexts_updated
|
||||
```
|
||||
|
||||
### decision_logs
|
||||
```sql
|
||||
CREATE TABLE decision_logs (
|
||||
decision_id TEXT PRIMARY KEY,
|
||||
timestamp TEXT NOT NULL,
|
||||
stock_code TEXT,
|
||||
market TEXT,
|
||||
exchange_code TEXT,
|
||||
action TEXT,
|
||||
confidence INTEGER,
|
||||
rationale TEXT,
|
||||
context_snapshot TEXT, -- JSON: full context at decision time
|
||||
input_data TEXT, -- JSON: market data used
|
||||
outcome_pnl REAL,
|
||||
outcome_accuracy REAL,
|
||||
reviewed INTEGER DEFAULT 0,
|
||||
review_notes TEXT
|
||||
);
|
||||
-- Indices: idx_decision_logs_timestamp, idx_decision_logs_reviewed, idx_decision_logs_confidence
|
||||
```
|
||||
|
||||
### playbooks
|
||||
```sql
|
||||
CREATE TABLE playbooks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
date TEXT NOT NULL,
|
||||
market TEXT NOT NULL,
|
||||
status TEXT DEFAULT 'generated',
|
||||
playbook_json TEXT NOT NULL, -- Full playbook with scenarios
|
||||
generated_at TEXT NOT NULL,
|
||||
token_count INTEGER,
|
||||
scenario_count INTEGER,
|
||||
match_count INTEGER DEFAULT 0
|
||||
);
|
||||
-- Indices: idx_playbooks_date, idx_playbooks_market
|
||||
```
|
||||
|
||||
### context_metadata
|
||||
```sql
|
||||
CREATE TABLE context_metadata (
|
||||
layer TEXT PRIMARY KEY,
|
||||
description TEXT,
|
||||
retention_days INTEGER,
|
||||
aggregation_source TEXT
|
||||
);
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -184,18 +527,81 @@ KIS_APP_SECRET=your_app_secret
|
||||
KIS_ACCOUNT_NO=XXXXXXXX-XX
|
||||
GEMINI_API_KEY=your_gemini_key
|
||||
|
||||
# Optional
|
||||
# Optional — Trading Mode
|
||||
MODE=paper # paper | live
|
||||
TRADE_MODE=daily # daily | realtime
|
||||
DAILY_SESSIONS=4 # Sessions per day (daily mode only)
|
||||
SESSION_INTERVAL_HOURS=6 # Hours between sessions (daily mode only)
|
||||
|
||||
# Optional — Database
|
||||
DB_PATH=data/trades.db
|
||||
|
||||
# Optional — Risk
|
||||
CONFIDENCE_THRESHOLD=80
|
||||
MAX_LOSS_PCT=3.0
|
||||
MAX_ORDER_PCT=30.0
|
||||
ENABLED_MARKETS=KR,US_NASDAQ # Comma-separated market codes
|
||||
|
||||
# Telegram Notifications (optional)
|
||||
# Optional — Markets
|
||||
ENABLED_MARKETS=KR,US # Comma-separated market codes
|
||||
RATE_LIMIT_RPS=2.0 # KIS API requests per second
|
||||
|
||||
# Optional — Pre-Market Planner (v2)
|
||||
PRE_MARKET_MINUTES=30 # Minutes before market open to generate playbook
|
||||
MAX_SCENARIOS_PER_STOCK=5 # Max scenarios per stock in playbook
|
||||
PLANNER_TIMEOUT_SECONDS=60 # Timeout for playbook generation
|
||||
DEFENSIVE_PLAYBOOK_ON_FAILURE=true # Fallback on AI failure
|
||||
RESCAN_INTERVAL_SECONDS=300 # Scenario rescan interval during trading
|
||||
|
||||
# Optional — Smart Scanner (realtime mode only)
|
||||
RSI_OVERSOLD_THRESHOLD=30 # 0-50, oversold threshold
|
||||
RSI_MOMENTUM_THRESHOLD=70 # 50-100, momentum threshold
|
||||
VOL_MULTIPLIER=2.0 # Minimum volume ratio (2.0 = 200%)
|
||||
SCANNER_TOP_N=3 # Max qualified candidates per scan
|
||||
|
||||
# Optional — Dashboard
|
||||
DASHBOARD_ENABLED=false # Enable FastAPI dashboard
|
||||
DASHBOARD_HOST=127.0.0.1 # Dashboard bind address
|
||||
DASHBOARD_PORT=8080 # Dashboard port (1-65535)
|
||||
|
||||
# Optional — Telegram
|
||||
TELEGRAM_BOT_TOKEN=1234567890:ABCdefGHIjklMNOpqrsTUVwxyz
|
||||
TELEGRAM_CHAT_ID=123456789
|
||||
TELEGRAM_ENABLED=true
|
||||
TELEGRAM_COMMANDS_ENABLED=true # Enable bidirectional commands
|
||||
TELEGRAM_POLLING_INTERVAL=1.0 # Command polling interval (seconds)
|
||||
|
||||
# Optional — Backup
|
||||
BACKUP_ENABLED=false
|
||||
BACKUP_DIR=data/backups
|
||||
S3_ENDPOINT_URL=...
|
||||
S3_ACCESS_KEY=...
|
||||
S3_SECRET_KEY=...
|
||||
S3_BUCKET_NAME=...
|
||||
S3_REGION=...
|
||||
|
||||
# Optional — External Data
|
||||
NEWS_API_KEY=...
|
||||
NEWS_API_PROVIDER=...
|
||||
MARKET_DATA_API_KEY=...
|
||||
|
||||
# Position Sizing (optional)
|
||||
POSITION_SIZING_ENABLED=true
|
||||
POSITION_BASE_ALLOCATION_PCT=5.0
|
||||
POSITION_MIN_ALLOCATION_PCT=1.0
|
||||
POSITION_MAX_ALLOCATION_PCT=10.0
|
||||
POSITION_VOLATILITY_TARGET_SCORE=50.0
|
||||
|
||||
# Legacy/compat scanner thresholds (kept for backward compatibility)
|
||||
RSI_OVERSOLD_THRESHOLD=30
|
||||
RSI_MOMENTUM_THRESHOLD=70
|
||||
VOL_MULTIPLIER=2.0
|
||||
|
||||
# Overseas Ranking API (optional override; account-dependent)
|
||||
OVERSEAS_RANKING_ENABLED=true
|
||||
OVERSEAS_RANKING_FLUCT_TR_ID=HHDFS76200100
|
||||
OVERSEAS_RANKING_VOLUME_TR_ID=HHDFS76200200
|
||||
OVERSEAS_RANKING_FLUCT_PATH=/uapi/overseas-price/v1/quotations/inquire-updown-rank
|
||||
OVERSEAS_RANKING_VOLUME_PATH=/uapi/overseas-price/v1/quotations/inquire-volume-rank
|
||||
```
|
||||
|
||||
Tests use in-memory SQLite (`DB_PATH=":memory:"`) and dummy credentials via `tests/conftest.py`.
|
||||
@@ -229,4 +635,9 @@ Tests use in-memory SQLite (`DB_PATH=":memory:"`) and dummy credentials via `tes
|
||||
- Invalid token → log error, trading unaffected
|
||||
- Rate limit exceeded → queued via rate limiter
|
||||
|
||||
**Guarantee**: Notification failures never interrupt trading operations.
|
||||
### Playbook Generation Failure
|
||||
- Timeout → fall back to defensive playbook (`DEFENSIVE_PLAYBOOK_ON_FAILURE`)
|
||||
- API error → use previous day's playbook if available
|
||||
- No playbook → skip pre-market phase, fall back to direct AI calls
|
||||
|
||||
**Guarantee**: Notification and dashboard failures never interrupt trading operations.
|
||||
|
||||
@@ -119,7 +119,7 @@ No decorator needed for async tests.
|
||||
# Install all dependencies (production + dev)
|
||||
pip install -e ".[dev]"
|
||||
|
||||
# Run full test suite with coverage
|
||||
# Run full test suite with coverage (551 tests across 25 files)
|
||||
pytest -v --cov=src --cov-report=term-missing
|
||||
|
||||
# Run a single test file
|
||||
@@ -137,11 +137,61 @@ mypy src/ --strict
|
||||
# Run the trading agent
|
||||
python -m src.main --mode=paper
|
||||
|
||||
# Run with dashboard enabled
|
||||
python -m src.main --mode=paper --dashboard
|
||||
|
||||
# Docker
|
||||
docker compose up -d ouroboros # Run agent
|
||||
docker compose --profile test up test # Run tests in container
|
||||
```
|
||||
|
||||
## Dashboard
|
||||
|
||||
The FastAPI dashboard provides read-only monitoring of the trading system.
|
||||
|
||||
### Starting the Dashboard
|
||||
|
||||
```bash
|
||||
# Via CLI flag
|
||||
python -m src.main --mode=paper --dashboard
|
||||
|
||||
# Via environment variable
|
||||
DASHBOARD_ENABLED=true python -m src.main --mode=paper
|
||||
```
|
||||
|
||||
Dashboard runs as a daemon thread on `DASHBOARD_HOST:DASHBOARD_PORT` (default: `127.0.0.1:8080`).
|
||||
|
||||
### API Endpoints
|
||||
|
||||
| Endpoint | Description |
|
||||
|----------|-------------|
|
||||
| `GET /` | HTML dashboard UI |
|
||||
| `GET /api/status` | Daily trading status by market |
|
||||
| `GET /api/playbook/{date}` | Playbook for specific date (query: `market`) |
|
||||
| `GET /api/scorecard/{date}` | Daily scorecard from L6_DAILY context |
|
||||
| `GET /api/performance` | Performance metrics by market and combined |
|
||||
| `GET /api/context/{layer}` | Context data by layer L1-L7 (query: `timeframe`) |
|
||||
| `GET /api/decisions` | Decision log entries (query: `limit`, `market`) |
|
||||
| `GET /api/scenarios/active` | Today's matched scenarios |
|
||||
|
||||
## Telegram Commands
|
||||
|
||||
When `TELEGRAM_COMMANDS_ENABLED=true` (default), the bot accepts these interactive commands:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/help` | List available commands |
|
||||
| `/status` | Show trading status (mode, markets, P&L) |
|
||||
| `/positions` | Display account summary (balance, cash, P&L) |
|
||||
| `/report` | Daily summary metrics (trades, P&L, win rate) |
|
||||
| `/scenarios` | Show today's playbook scenarios |
|
||||
| `/review` | Display recent scorecards (L6_DAILY layer) |
|
||||
| `/dashboard` | Show dashboard URL if enabled |
|
||||
| `/stop` | Pause trading |
|
||||
| `/resume` | Resume trading |
|
||||
|
||||
Commands are only processed from the authorized `TELEGRAM_CHAT_ID`.
|
||||
|
||||
## Environment Setup
|
||||
|
||||
```bash
|
||||
|
||||
186
docs/requirements-log.md
Normal file
186
docs/requirements-log.md
Normal file
@@ -0,0 +1,186 @@
|
||||
# Requirements Log
|
||||
|
||||
프로젝트 진화를 위한 사용자 요구사항 기록.
|
||||
|
||||
이 문서는 시간순으로 사용자와의 대화에서 나온 요구사항과 피드백을 기록합니다.
|
||||
새로운 요구사항이 있으면 날짜와 함께 추가하세요.
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-05
|
||||
|
||||
### API 효율화
|
||||
- Gemini API는 귀중한 자원. 종목별 개별 호출 대신 배치 호출 필요
|
||||
- Free tier 한도(20 calls/day) 고려하여 일일 몇 차례 거래 모드로 전환
|
||||
- 배치 API 호출로 여러 종목을 한 번에 분석
|
||||
|
||||
### 거래 모드
|
||||
- **Daily Mode**: 하루 4회 거래 세션 (6시간 간격) - Free tier 호환
|
||||
- **Realtime Mode**: 60초 간격 실시간 거래 - 유료 구독 필요
|
||||
- `TRADE_MODE` 환경변수로 모드 선택
|
||||
|
||||
### 진화 시스템
|
||||
- 사용자 대화 내용을 문서로 기록하여 향후에도 의도 반영
|
||||
- 프롬프트 품질 검증은 별도 이슈로 다룰 예정
|
||||
|
||||
### 문서화
|
||||
- 시스템 구조, 기능별 설명 등 코드 문서화 항상 신경쓸 것
|
||||
- 새로운 기능 추가 시 관련 문서 업데이트 필수
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-06
|
||||
|
||||
### Smart Volatility Scanner (Python-First, AI-Last 파이프라인)
|
||||
|
||||
**배경:**
|
||||
- 정적 종목 리스트를 순회하는 방식은 비효율적
|
||||
- KIS API 거래량 순위를 통해 시장 주도주를 자동 탐지해야 함
|
||||
- Gemini API 호출 전에 Python 기반 기술적 분석으로 필터링 필요
|
||||
|
||||
**요구사항:**
|
||||
1. KIS API 거래량 순위 API 통합 (`fetch_market_rankings`)
|
||||
2. 일별 가격 히스토리 API 추가 (`get_daily_prices`)
|
||||
3. RSI(14) 계산 기능 구현 (Wilder's smoothing method)
|
||||
4. 필터 조건:
|
||||
- 거래량 > 전일 대비 200% (VOL_MULTIPLIER)
|
||||
- RSI < 30 (과매도) OR RSI > 70 (모멘텀)
|
||||
5. 상위 1-3개 적격 종목만 Gemini에 전달
|
||||
6. 종목 선정 배경(RSI, volume_ratio, signal, score) 데이터베이스 기록
|
||||
|
||||
**구현 결과:**
|
||||
- `src/analysis/smart_scanner.py`: SmartVolatilityScanner 클래스
|
||||
- `src/analysis/volatility.py`: calculate_rsi() 메서드 추가
|
||||
- `src/broker/kis_api.py`: 2개 신규 API 메서드
|
||||
- `src/db.py`: selection_context 컬럼 추가
|
||||
- 설정 가능한 임계값: RSI_OVERSOLD_THRESHOLD, RSI_MOMENTUM_THRESHOLD, VOL_MULTIPLIER, SCANNER_TOP_N
|
||||
|
||||
**효과:**
|
||||
- Gemini API 호출 20-30개 → 1-3개로 감소
|
||||
- Python 기반 빠른 필터링 → 비용 절감
|
||||
- 선정 기준 추적 → Evolution 시스템 최적화 가능
|
||||
- API 장애 시 정적 watchlist로 자동 전환
|
||||
|
||||
**참고:** Realtime 모드 전용. Daily 모드는 배치 효율성을 위해 정적 watchlist 사용.
|
||||
|
||||
**이슈/PR:** #76, #77
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-10
|
||||
|
||||
### 코드 리뷰 시 플랜-구현 일치 검증 규칙
|
||||
|
||||
**배경:**
|
||||
- 코드 리뷰 시 플랜(EnterPlanMode에서 승인된 계획)과 실제 구현이 일치하는지 확인하는 절차가 없었음
|
||||
- 플랜과 다른 구현이 리뷰 없이 통과될 위험
|
||||
|
||||
**요구사항:**
|
||||
1. 모든 PR 리뷰에서 플랜-구현 일치 여부를 필수 체크
|
||||
2. 플랜에 없는 변경은 정당한 사유 필요
|
||||
3. 플랜 항목이 누락되면 PR 설명에 사유 기록
|
||||
4. 스코프가 플랜과 일치하는지 확인
|
||||
|
||||
**구현 결과:**
|
||||
- `docs/workflow.md`에 Code Review Checklist 섹션 추가
|
||||
- Plan Consistency (필수), Safety & Constraints, Quality, Workflow 4개 카테고리
|
||||
|
||||
**이슈/PR:** #114
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-16
|
||||
|
||||
### 문서 v2 동기화 (전체 문서 현행화)
|
||||
|
||||
**배경:**
|
||||
- v2 기능 구현 완료 후 문서가 실제 코드 상태와 크게 괴리
|
||||
- 문서에는 54 tests / 4 files로 기록되었으나 실제로는 551 tests / 25 files
|
||||
- v2 핵심 기능(Playbook, Scenario Engine, Dashboard, Telegram Commands, Daily Review, Context System, Backup) 문서화 누락
|
||||
|
||||
**요구사항:**
|
||||
1. `docs/testing.md` — 551 tests / 25 files 반영, 전체 테스트 파일 설명
|
||||
2. `docs/architecture.md` — v2 컴포넌트(Strategy, Context, Dashboard, Decision Logger 등) 추가, Playbook Mode 데이터 플로우, DB 스키마 5개 테이블, v2 환경변수
|
||||
3. `docs/commands.md` — Dashboard 실행 명령어, Telegram 명령어 9종 레퍼런스
|
||||
4. `CLAUDE.md` — Project Structure 트리 확장, 테스트 수 업데이트, `--dashboard` 플래그
|
||||
5. `docs/skills.md` — DB 파일명 `trades.db`로 통일, Dashboard 명령어 추가
|
||||
6. 기존에 유효한 트러블슈팅, 코드 예제 등은 유지
|
||||
|
||||
**구현 결과:**
|
||||
- 6개 문서 파일 업데이트
|
||||
- 이전 시도(2개 커밋)는 기존 내용을 과도하게 삭제하여 폐기, main 기준으로 재작업
|
||||
|
||||
**이슈/PR:** #131, PR #134
|
||||
|
||||
### 해외 스캐너 개선: 랭킹 연동 + 변동성 우선 선별
|
||||
|
||||
**배경:**
|
||||
- `run_overnight` 실운영에서 미국장 동안 거래가 0건 지속
|
||||
- 원인: 해외 시장에서도 국내 랭킹/일봉 API 경로를 사용하던 구조적 불일치
|
||||
|
||||
**요구사항:**
|
||||
1. 해외 시장도 랭킹 API 기반 유니버스 탐색 지원
|
||||
2. 단순 상승률/거래대금 상위가 아니라, **변동성이 큰 종목**을 우선 선별
|
||||
3. 고정 티커 fallback 금지
|
||||
|
||||
**구현 결과:**
|
||||
- `src/broker/overseas.py`
|
||||
- `fetch_overseas_rankings()` 추가 (fluctuation / volume)
|
||||
- 해외 랭킹 API 경로/TR_ID를 설정값으로 오버라이드 가능하게 구현
|
||||
- `src/analysis/smart_scanner.py`
|
||||
- market-aware 스캔(국내/해외 분리)
|
||||
- 해외: 랭킹 API 유니버스 + 변동성 우선 점수(일변동률 vs 장중 고저폭)
|
||||
- 거래대금/거래량 랭킹은 유동성 보정 점수로 활용
|
||||
- 랭킹 실패 시에는 동적 유니버스(active/recent/holdings)만 사용
|
||||
- `src/config.py`
|
||||
- `OVERSEAS_RANKING_*` 설정 추가
|
||||
|
||||
**효과:**
|
||||
- 해외 시장에서 스캐너 후보 0개로 정지되는 상황 완화
|
||||
- 종목 선정 기준이 단순 상승률 중심에서 변동성 중심으로 개선
|
||||
- 고정 티커 없이도 시장 주도 변동 종목 탐지 가능
|
||||
|
||||
### 국내 스캐너/주문수량 정렬: 변동성 우선 + 리스크 타기팅
|
||||
|
||||
**배경:**
|
||||
- 해외만 변동성 우선으로 동작하고, 국내는 RSI/거래량 필터 중심으로 동작해 시장 간 전략 일관성이 낮았음
|
||||
- 매수 수량이 고정 1주라서 변동성 구간별 익스포저 관리가 어려웠음
|
||||
|
||||
**요구사항:**
|
||||
1. 국내 스캐너도 변동성 우선 선별로 해외와 통일
|
||||
2. 고변동 종목일수록 포지션 크기를 줄이는 수량 산식 적용
|
||||
|
||||
**구현 결과:**
|
||||
- `src/analysis/smart_scanner.py`
|
||||
- 국내: `fluctuation ranking + volume ranking bonus` 기반 점수화로 전환
|
||||
- 점수는 `max(abs(change_rate), intraday_range_pct)` 중심으로 계산
|
||||
- 국내 랭킹 응답 스키마 키(`price`, `change_rate`, `volume`) 파싱 보강
|
||||
- `src/main.py`
|
||||
- `_determine_order_quantity()` 추가
|
||||
- BUY 시 변동성 점수 기반 동적 수량 산정 적용
|
||||
- `trading_cycle`, `run_daily_session` 경로 모두 동일 수량 로직 사용
|
||||
- `src/config.py`
|
||||
- `POSITION_SIZING_*` 설정 추가
|
||||
|
||||
**효과:**
|
||||
- 국내/해외 스캐너 기준이 변동성 중심으로 일관화
|
||||
- 고변동 구간에서 자동 익스포저 축소, 저변동 구간에서 과소진입 완화
|
||||
|
||||
## 2026-02-18
|
||||
|
||||
### KIS 해외 랭킹 API 404 에러 수정
|
||||
|
||||
**배경:**
|
||||
- KIS 해외주식 랭킹 API(`fetch_overseas_rankings`)가 모든 거래소에서 HTTP 404를 반환
|
||||
- Smart Scanner가 해외 시장 후보 종목을 찾지 못해 거래가 전혀 실행되지 않음
|
||||
|
||||
**근본 원인:**
|
||||
- TR_ID, API 경로, 거래소 코드가 모두 KIS 공식 문서와 불일치
|
||||
|
||||
**구현 결과:**
|
||||
- `src/config.py`: TR_ID/Path 기본값을 KIS 공식 스펙으로 수정
|
||||
- `src/broker/overseas.py`: 랭킹 API 전용 거래소 코드 매핑 추가 (NASD→NAS, NYSE→NYS, AMEX→AMS), 올바른 API 파라미터 사용
|
||||
- `tests/test_overseas_broker.py`: 19개 단위 테스트 추가
|
||||
|
||||
**효과:**
|
||||
- 해외 시장 랭킹 스캔이 정상 동작하여 Smart Scanner가 후보 종목 탐지 가능
|
||||
@@ -34,6 +34,12 @@ python -m src.main --mode=paper
|
||||
```
|
||||
Runs the agent in paper-trading mode (no real orders).
|
||||
|
||||
### Start Trading Agent with Dashboard
|
||||
```bash
|
||||
python -m src.main --mode=paper --dashboard
|
||||
```
|
||||
Runs the agent with FastAPI dashboard on `127.0.0.1:8080` (configurable via `DASHBOARD_HOST`/`DASHBOARD_PORT`).
|
||||
|
||||
### Start Trading Agent (Production)
|
||||
```bash
|
||||
docker compose up -d ouroboros
|
||||
@@ -59,7 +65,7 @@ Analyze the last 30 days of trade logs and generate performance metrics.
|
||||
python -m src.evolution.optimizer --evolve
|
||||
```
|
||||
Triggers the evolution engine to:
|
||||
1. Analyze `trade_logs.db` for failing patterns
|
||||
1. Analyze `trades.db` for failing patterns
|
||||
2. Ask Gemini to generate a new strategy
|
||||
3. Run tests on the new strategy
|
||||
4. Create a PR if tests pass
|
||||
@@ -91,12 +97,12 @@ curl http://localhost:8080/health
|
||||
|
||||
### View Trade Logs
|
||||
```bash
|
||||
sqlite3 data/trade_logs.db "SELECT * FROM trades ORDER BY timestamp DESC LIMIT 20;"
|
||||
sqlite3 data/trades.db "SELECT * FROM trades ORDER BY timestamp DESC LIMIT 20;"
|
||||
```
|
||||
|
||||
### Export Trade History
|
||||
```bash
|
||||
sqlite3 -header -csv data/trade_logs.db "SELECT * FROM trades;" > trades_export.csv
|
||||
sqlite3 -header -csv data/trades.db "SELECT * FROM trades;" > trades_export.csv
|
||||
```
|
||||
|
||||
## Safety Checklist (Pre-Deploy)
|
||||
|
||||
206
docs/testing.md
206
docs/testing.md
@@ -2,51 +2,29 @@
|
||||
|
||||
## Test Structure
|
||||
|
||||
**54 tests** across four files. `asyncio_mode = "auto"` in pyproject.toml — async tests need no special decorator.
|
||||
**551 tests** across **25 files**. `asyncio_mode = "auto"` in pyproject.toml — async tests need no special decorator.
|
||||
|
||||
The `settings` fixture in `conftest.py` provides safe defaults with test credentials and in-memory DB.
|
||||
|
||||
### Test Files
|
||||
|
||||
#### `tests/test_risk.py` (11 tests)
|
||||
- Circuit breaker boundaries
|
||||
- Fat-finger edge cases
|
||||
#### Core Components
|
||||
|
||||
##### `tests/test_risk.py` (14 tests)
|
||||
- Circuit breaker boundaries and exact threshold triggers
|
||||
- Fat-finger edge cases and percentage validation
|
||||
- P&L calculation edge cases
|
||||
- Order validation logic
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
def test_circuit_breaker_exact_threshold(risk_manager):
|
||||
"""Circuit breaker should trip at exactly -3.0%."""
|
||||
with pytest.raises(CircuitBreakerTripped):
|
||||
risk_manager.validate_order(
|
||||
current_pnl_pct=-3.0,
|
||||
order_amount=1000,
|
||||
total_cash=10000
|
||||
)
|
||||
```
|
||||
|
||||
#### `tests/test_broker.py` (6 tests)
|
||||
##### `tests/test_broker.py` (11 tests)
|
||||
- OAuth token lifecycle
|
||||
- Rate limiting enforcement
|
||||
- Hash key generation
|
||||
- Network error handling
|
||||
- SSL context configuration
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
async def test_rate_limiter(broker):
|
||||
"""Rate limiter should delay requests to stay under 10 RPS."""
|
||||
start = time.monotonic()
|
||||
for _ in range(15): # 15 requests
|
||||
await broker._rate_limiter.acquire()
|
||||
elapsed = time.monotonic() - start
|
||||
assert elapsed >= 1.0 # Should take at least 1 second
|
||||
```
|
||||
|
||||
#### `tests/test_brain.py` (18 tests)
|
||||
- Valid JSON parsing
|
||||
- Markdown-wrapped JSON handling
|
||||
##### `tests/test_brain.py` (24 tests)
|
||||
- Valid JSON parsing and markdown-wrapped JSON handling
|
||||
- Malformed JSON fallback
|
||||
- Missing fields handling
|
||||
- Invalid action validation
|
||||
@@ -54,33 +32,143 @@ async def test_rate_limiter(broker):
|
||||
- Empty response handling
|
||||
- Prompt construction for different markets
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
async def test_confidence_below_threshold_forces_hold(brain):
|
||||
"""Decisions below confidence threshold should force HOLD."""
|
||||
decision = brain.parse_response('{"action":"BUY","confidence":70,"rationale":"test"}')
|
||||
assert decision.action == "HOLD"
|
||||
assert decision.confidence == 70
|
||||
```
|
||||
|
||||
#### `tests/test_market_schedule.py` (19 tests)
|
||||
##### `tests/test_market_schedule.py` (24 tests)
|
||||
- Market open/close logic
|
||||
- Timezone handling (UTC, Asia/Seoul, America/New_York, etc.)
|
||||
- DST (Daylight Saving Time) transitions
|
||||
- Weekend handling
|
||||
- Lunch break logic
|
||||
- Weekend handling and lunch break logic
|
||||
- Multiple market filtering
|
||||
- Next market open calculation
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
def test_is_market_open_during_trading_hours():
|
||||
"""Market should be open during regular trading hours."""
|
||||
# KRX: 9:00-15:30 KST, no lunch break
|
||||
market = MARKETS["KR"]
|
||||
trading_time = datetime(2026, 2, 3, 10, 0, tzinfo=ZoneInfo("Asia/Seoul")) # Monday 10:00
|
||||
assert is_market_open(market, trading_time) is True
|
||||
```
|
||||
##### `tests/test_db.py` (3 tests)
|
||||
- Database initialization and table creation
|
||||
- Trade logging with all fields (market, exchange_code, decision_id)
|
||||
- Query and retrieval operations
|
||||
|
||||
##### `tests/test_main.py` (37 tests)
|
||||
- Trading loop orchestration
|
||||
- Market iteration and stock processing
|
||||
- Dashboard integration (`--dashboard` flag)
|
||||
- Telegram command handler wiring
|
||||
- Error handling and graceful shutdown
|
||||
|
||||
#### Strategy & Playbook (v2)
|
||||
|
||||
##### `tests/test_pre_market_planner.py` (37 tests)
|
||||
- Pre-market playbook generation
|
||||
- Gemini API integration for scenario creation
|
||||
- Timeout handling and defensive playbook fallback
|
||||
- Multi-market playbook generation
|
||||
|
||||
##### `tests/test_scenario_engine.py` (44 tests)
|
||||
- Scenario matching against live market data
|
||||
- Confidence scoring and threshold filtering
|
||||
- Multiple scenario type handling
|
||||
- Edge cases (no match, partial match, expired scenarios)
|
||||
|
||||
##### `tests/test_playbook_store.py` (23 tests)
|
||||
- Playbook persistence to SQLite
|
||||
- Date-based retrieval and market filtering
|
||||
- Playbook status management (generated, active, expired)
|
||||
- JSON serialization/deserialization
|
||||
|
||||
##### `tests/test_strategy_models.py` (33 tests)
|
||||
- Pydantic model validation for scenarios, playbooks, decisions
|
||||
- Field constraints and default values
|
||||
- Serialization round-trips
|
||||
|
||||
#### Analysis & Scanning
|
||||
|
||||
##### `tests/test_volatility.py` (24 tests)
|
||||
- ATR and RSI calculation accuracy
|
||||
- Volume surge ratio computation
|
||||
- Momentum scoring
|
||||
- Breakout/breakdown pattern detection
|
||||
- Market scanner watchlist management
|
||||
|
||||
##### `tests/test_smart_scanner.py` (13 tests)
|
||||
- Python-first filtering pipeline
|
||||
- RSI and volume ratio filter logic
|
||||
- Candidate scoring and ranking
|
||||
- Fallback to static watchlist
|
||||
|
||||
#### Context & Memory
|
||||
|
||||
##### `tests/test_context.py` (18 tests)
|
||||
- L1-L7 layer storage and retrieval
|
||||
- Context key-value CRUD operations
|
||||
- Timeframe-based queries
|
||||
- Layer metadata management
|
||||
|
||||
##### `tests/test_context_scheduler.py` (5 tests)
|
||||
- Periodic context aggregation scheduling
|
||||
- Layer summarization triggers
|
||||
|
||||
#### Evolution & Review
|
||||
|
||||
##### `tests/test_evolution.py` (24 tests)
|
||||
- Strategy optimization loop
|
||||
- High-confidence losing trade analysis
|
||||
- Generated strategy validation
|
||||
|
||||
##### `tests/test_daily_review.py` (10 tests)
|
||||
- End-of-day review generation
|
||||
- Trade performance summarization
|
||||
- Context layer (L6_DAILY) integration
|
||||
|
||||
##### `tests/test_scorecard.py` (3 tests)
|
||||
- Daily scorecard metrics calculation
|
||||
- Win rate, P&L, confidence tracking
|
||||
|
||||
#### Notifications & Commands
|
||||
|
||||
##### `tests/test_telegram.py` (25 tests)
|
||||
- Message sending and formatting
|
||||
- Rate limiting (leaky bucket)
|
||||
- Error handling (network timeout, invalid token)
|
||||
- Auto-disable on missing credentials
|
||||
- Notification types (trade, circuit breaker, fat-finger, market events)
|
||||
|
||||
##### `tests/test_telegram_commands.py` (31 tests)
|
||||
- 9 command handlers (/help, /status, /positions, /report, /scenarios, /review, /dashboard, /stop, /resume)
|
||||
- Long polling and command dispatch
|
||||
- Authorization filtering by chat_id
|
||||
- Command response formatting
|
||||
|
||||
#### Dashboard
|
||||
|
||||
##### `tests/test_dashboard.py` (14 tests)
|
||||
- FastAPI endpoint responses (8 API routes)
|
||||
- Status, playbook, scorecard, performance, context, decisions, scenarios
|
||||
- Query parameter handling (market, date, limit)
|
||||
|
||||
#### Performance & Quality
|
||||
|
||||
##### `tests/test_token_efficiency.py` (34 tests)
|
||||
- Gemini token usage optimization
|
||||
- Prompt size reduction verification
|
||||
- Cache effectiveness
|
||||
|
||||
##### `tests/test_latency_control.py` (30 tests)
|
||||
- API call latency measurement
|
||||
- Rate limiter timing accuracy
|
||||
- Async operation overhead
|
||||
|
||||
##### `tests/test_decision_logger.py` (9 tests)
|
||||
- Decision audit trail completeness
|
||||
- Context snapshot capture
|
||||
- Outcome tracking (P&L, accuracy)
|
||||
|
||||
##### `tests/test_data_integration.py` (38 tests)
|
||||
- External data source integration
|
||||
- News API, market data, economic calendar
|
||||
- Error handling for API failures
|
||||
|
||||
##### `tests/test_backup.py` (23 tests)
|
||||
- Backup scheduler and execution
|
||||
- Cloud storage (S3) upload
|
||||
- Health monitoring
|
||||
- Data export functionality
|
||||
|
||||
## Coverage Requirements
|
||||
|
||||
@@ -91,20 +179,6 @@ Check coverage:
|
||||
pytest -v --cov=src --cov-report=term-missing
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
Name Stmts Miss Cover Missing
|
||||
-----------------------------------------------------------
|
||||
src/brain/gemini_client.py 85 5 94% 165-169
|
||||
src/broker/kis_api.py 120 12 90% ...
|
||||
src/core/risk_manager.py 35 2 94% ...
|
||||
src/db.py 25 1 96% ...
|
||||
src/main.py 150 80 47% (excluded from CI)
|
||||
src/markets/schedule.py 95 3 97% ...
|
||||
-----------------------------------------------------------
|
||||
TOTAL 510 103 80%
|
||||
```
|
||||
|
||||
**Note:** `main.py` has lower coverage as it contains the main loop which is tested via integration/manual testing.
|
||||
|
||||
## Test Configuration
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
|
||||
1. **Create Gitea Issue First** — All features, bug fixes, and policy changes require a Gitea issue before any code is written
|
||||
2. **Create Feature Branch** — Branch from `main` using format `feature/issue-{N}-{short-description}`
|
||||
- After creating the branch, run `git pull origin main` and rebase to ensure the branch is up to date
|
||||
3. **Implement Changes** — Write code, tests, and documentation on the feature branch
|
||||
4. **Create Pull Request** — Submit PR to `main` branch referencing the issue number
|
||||
5. **Review & Merge** — After approval, merge via PR (squash or merge commit)
|
||||
@@ -73,3 +74,37 @@ task_tool(
|
||||
```
|
||||
|
||||
Use `run_in_background=True` for independent tasks that don't block subsequent work.
|
||||
|
||||
## Code Review Checklist
|
||||
|
||||
**CRITICAL: Every PR review MUST verify plan-implementation consistency.**
|
||||
|
||||
Before approving any PR, the reviewer (human or agent) must check ALL of the following:
|
||||
|
||||
### 1. Plan Consistency (MANDATORY)
|
||||
|
||||
- [ ] **Implementation matches the approved plan** — Compare the actual code changes against the plan created during `EnterPlanMode`. Every item in the plan must be addressed.
|
||||
- [ ] **No unplanned changes** — If the implementation includes changes not in the plan, they must be explicitly justified.
|
||||
- [ ] **No plan items omitted** — If any planned item was skipped, the reason must be documented in the PR description.
|
||||
- [ ] **Scope matches** — The PR does not exceed or fall short of the planned scope.
|
||||
|
||||
### 2. Safety & Constraints
|
||||
|
||||
- [ ] `src/core/risk_manager.py` is unchanged (READ-ONLY)
|
||||
- [ ] Circuit breaker threshold not weakened (only stricter allowed)
|
||||
- [ ] Fat-finger protection (30% max order) still enforced
|
||||
- [ ] Confidence < 80 still forces HOLD
|
||||
- [ ] No hardcoded API keys or secrets
|
||||
|
||||
### 3. Quality
|
||||
|
||||
- [ ] All new/modified code has corresponding tests
|
||||
- [ ] Test coverage >= 80%
|
||||
- [ ] `ruff check src/ tests/` passes (no lint errors)
|
||||
- [ ] No `assert` statements removed from tests
|
||||
|
||||
### 4. Workflow
|
||||
|
||||
- [ ] PR references the Gitea issue number
|
||||
- [ ] Feature branch follows naming convention (`feature/issue-N-description`)
|
||||
- [ ] Commit messages are clear and descriptive
|
||||
|
||||
@@ -9,6 +9,8 @@ dependencies = [
|
||||
"pydantic-settings>=2.1,<3",
|
||||
"google-genai>=1.0,<2",
|
||||
"scipy>=1.11,<2",
|
||||
"fastapi>=0.110,<1",
|
||||
"uvicorn>=0.29,<1",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
|
||||
54
scripts/morning_report.sh
Executable file
54
scripts/morning_report.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
# Morning summary for overnight run logs.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
LOG_DIR="${LOG_DIR:-data/overnight}"
|
||||
|
||||
if [ ! -d "$LOG_DIR" ]; then
|
||||
echo "로그 디렉터리가 없습니다: $LOG_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
latest_run="$(ls -1t "$LOG_DIR"/run_*.log 2>/dev/null | head -n 1 || true)"
|
||||
latest_watchdog="$(ls -1t "$LOG_DIR"/watchdog_*.log 2>/dev/null | head -n 1 || true)"
|
||||
|
||||
if [ -z "$latest_run" ]; then
|
||||
echo "run 로그가 없습니다: $LOG_DIR/run_*.log"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Overnight report"
|
||||
echo "- run log: $latest_run"
|
||||
if [ -n "$latest_watchdog" ]; then
|
||||
echo "- watchdog log: $latest_watchdog"
|
||||
fi
|
||||
|
||||
start_line="$(head -n 1 "$latest_run" || true)"
|
||||
end_line="$(tail -n 1 "$latest_run" || true)"
|
||||
|
||||
info_count="$(rg -c '"level": "INFO"' "$latest_run" || true)"
|
||||
warn_count="$(rg -c '"level": "WARNING"' "$latest_run" || true)"
|
||||
error_count="$(rg -c '"level": "ERROR"' "$latest_run" || true)"
|
||||
critical_count="$(rg -c '"level": "CRITICAL"' "$latest_run" || true)"
|
||||
traceback_count="$(rg -c 'Traceback' "$latest_run" || true)"
|
||||
|
||||
echo "- start: ${start_line:-N/A}"
|
||||
echo "- end: ${end_line:-N/A}"
|
||||
echo "- INFO: ${info_count:-0}"
|
||||
echo "- WARNING: ${warn_count:-0}"
|
||||
echo "- ERROR: ${error_count:-0}"
|
||||
echo "- CRITICAL: ${critical_count:-0}"
|
||||
echo "- Traceback: ${traceback_count:-0}"
|
||||
|
||||
if [ -n "$latest_watchdog" ]; then
|
||||
watchdog_errors="$(rg -c '\[ERROR\]' "$latest_watchdog" || true)"
|
||||
echo "- watchdog ERROR: ${watchdog_errors:-0}"
|
||||
echo ""
|
||||
echo "최근 watchdog 로그:"
|
||||
tail -n 5 "$latest_watchdog" || true
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "최근 앱 로그:"
|
||||
tail -n 20 "$latest_run" || true
|
||||
87
scripts/run_overnight.sh
Executable file
87
scripts/run_overnight.sh
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env bash
|
||||
# Start The Ouroboros overnight with logs and watchdog.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
LOG_DIR="${LOG_DIR:-data/overnight}"
|
||||
CHECK_INTERVAL="${CHECK_INTERVAL:-30}"
|
||||
TMUX_AUTO="${TMUX_AUTO:-true}"
|
||||
TMUX_ATTACH="${TMUX_ATTACH:-true}"
|
||||
TMUX_SESSION_PREFIX="${TMUX_SESSION_PREFIX:-ouroboros_overnight}"
|
||||
|
||||
if [ -z "${APP_CMD:-}" ]; then
|
||||
if [ -x ".venv/bin/python" ]; then
|
||||
PYTHON_BIN=".venv/bin/python"
|
||||
elif command -v python3 >/dev/null 2>&1; then
|
||||
PYTHON_BIN="python3"
|
||||
elif command -v python >/dev/null 2>&1; then
|
||||
PYTHON_BIN="python"
|
||||
else
|
||||
echo ".venv/bin/python 또는 python3/python 실행 파일을 찾을 수 없습니다."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dashboard_port="${DASHBOARD_PORT:-8080}"
|
||||
|
||||
APP_CMD="DASHBOARD_PORT=$dashboard_port $PYTHON_BIN -m src.main --mode=paper --dashboard"
|
||||
fi
|
||||
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
timestamp="$(date +"%Y%m%d_%H%M%S")"
|
||||
RUN_LOG="$LOG_DIR/run_${timestamp}.log"
|
||||
WATCHDOG_LOG="$LOG_DIR/watchdog_${timestamp}.log"
|
||||
PID_FILE="$LOG_DIR/app.pid"
|
||||
WATCHDOG_PID_FILE="$LOG_DIR/watchdog.pid"
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
old_pid="$(cat "$PID_FILE" || true)"
|
||||
if [ -n "$old_pid" ] && kill -0 "$old_pid" 2>/dev/null; then
|
||||
echo "앱이 이미 실행 중입니다. pid=$old_pid"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[$(date -u +"%Y-%m-%dT%H:%M:%SZ")] starting: $APP_CMD" | tee -a "$RUN_LOG"
|
||||
nohup bash -lc "$APP_CMD" >>"$RUN_LOG" 2>&1 &
|
||||
app_pid=$!
|
||||
echo "$app_pid" > "$PID_FILE"
|
||||
|
||||
echo "[$(date -u +"%Y-%m-%dT%H:%M:%SZ")] app pid=$app_pid" | tee -a "$RUN_LOG"
|
||||
|
||||
nohup env PID_FILE="$PID_FILE" LOG_FILE="$WATCHDOG_LOG" CHECK_INTERVAL="$CHECK_INTERVAL" \
|
||||
bash scripts/watchdog.sh >/dev/null 2>&1 &
|
||||
watchdog_pid=$!
|
||||
echo "$watchdog_pid" > "$WATCHDOG_PID_FILE"
|
||||
|
||||
cat <<EOF
|
||||
시작 완료
|
||||
- app pid: $app_pid
|
||||
- watchdog pid: $watchdog_pid
|
||||
- app log: $RUN_LOG
|
||||
- watchdog log: $WATCHDOG_LOG
|
||||
|
||||
실시간 확인:
|
||||
tail -f "$RUN_LOG"
|
||||
tail -f "$WATCHDOG_LOG"
|
||||
EOF
|
||||
|
||||
if [ "$TMUX_AUTO" = "true" ]; then
|
||||
if ! command -v tmux >/dev/null 2>&1; then
|
||||
echo "tmux를 찾지 못해 자동 세션 생성은 건너뜁니다."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
session_name="${TMUX_SESSION_PREFIX}_${timestamp}"
|
||||
window_name="overnight"
|
||||
tmux new-session -d -s "$session_name" -n "$window_name" "tail -f '$RUN_LOG'"
|
||||
tmux split-window -t "${session_name}:${window_name}" -v "tail -f '$WATCHDOG_LOG'"
|
||||
tmux select-layout -t "${session_name}:${window_name}" even-vertical
|
||||
|
||||
echo "tmux session 생성: $session_name"
|
||||
echo "수동 접속: tmux attach -t $session_name"
|
||||
|
||||
if [ -z "${TMUX:-}" ] && [ "$TMUX_ATTACH" = "true" ]; then
|
||||
tmux attach -t "$session_name"
|
||||
fi
|
||||
fi
|
||||
76
scripts/stop_overnight.sh
Executable file
76
scripts/stop_overnight.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env bash
|
||||
# Stop The Ouroboros overnight app/watchdog/tmux session.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
LOG_DIR="${LOG_DIR:-data/overnight}"
|
||||
PID_FILE="$LOG_DIR/app.pid"
|
||||
WATCHDOG_PID_FILE="$LOG_DIR/watchdog.pid"
|
||||
TMUX_SESSION_PREFIX="${TMUX_SESSION_PREFIX:-ouroboros_overnight}"
|
||||
KILL_TIMEOUT="${KILL_TIMEOUT:-5}"
|
||||
|
||||
stop_pid() {
|
||||
local name="$1"
|
||||
local pid="$2"
|
||||
|
||||
if [ -z "$pid" ]; then
|
||||
echo "$name PID가 비어 있습니다."
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! kill -0 "$pid" 2>/dev/null; then
|
||||
echo "$name 프로세스가 이미 종료됨 (pid=$pid)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
kill "$pid" 2>/dev/null || true
|
||||
for _ in $(seq 1 "$KILL_TIMEOUT"); do
|
||||
if ! kill -0 "$pid" 2>/dev/null; then
|
||||
echo "$name 종료됨 (pid=$pid)"
|
||||
return 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
kill -9 "$pid" 2>/dev/null || true
|
||||
if ! kill -0 "$pid" 2>/dev/null; then
|
||||
echo "$name 강제 종료됨 (pid=$pid)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "$name 종료 실패 (pid=$pid)"
|
||||
return 1
|
||||
}
|
||||
|
||||
status=0
|
||||
|
||||
if [ -f "$WATCHDOG_PID_FILE" ]; then
|
||||
watchdog_pid="$(cat "$WATCHDOG_PID_FILE" || true)"
|
||||
stop_pid "watchdog" "$watchdog_pid" || status=1
|
||||
rm -f "$WATCHDOG_PID_FILE"
|
||||
else
|
||||
echo "watchdog pid 파일 없음: $WATCHDOG_PID_FILE"
|
||||
fi
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
app_pid="$(cat "$PID_FILE" || true)"
|
||||
stop_pid "app" "$app_pid" || status=1
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
echo "app pid 파일 없음: $PID_FILE"
|
||||
fi
|
||||
|
||||
if command -v tmux >/dev/null 2>&1; then
|
||||
sessions="$(tmux ls 2>/dev/null | awk -F: -v p="$TMUX_SESSION_PREFIX" '$1 ~ "^" p "_" {print $1}')"
|
||||
if [ -n "$sessions" ]; then
|
||||
while IFS= read -r s; do
|
||||
[ -z "$s" ] && continue
|
||||
tmux kill-session -t "$s" 2>/dev/null || true
|
||||
echo "tmux 세션 종료: $s"
|
||||
done <<< "$sessions"
|
||||
else
|
||||
echo "종료할 tmux 세션 없음 (prefix=${TMUX_SESSION_PREFIX}_)"
|
||||
fi
|
||||
fi
|
||||
|
||||
exit "$status"
|
||||
42
scripts/watchdog.sh
Executable file
42
scripts/watchdog.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
# Simple watchdog for The Ouroboros process.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PID_FILE="${PID_FILE:-data/overnight/app.pid}"
|
||||
LOG_FILE="${LOG_FILE:-data/overnight/watchdog.log}"
|
||||
CHECK_INTERVAL="${CHECK_INTERVAL:-30}"
|
||||
STATUS_EVERY="${STATUS_EVERY:-10}"
|
||||
|
||||
mkdir -p "$(dirname "$LOG_FILE")"
|
||||
|
||||
log() {
|
||||
printf '%s %s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "$1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
if [ ! -f "$PID_FILE" ]; then
|
||||
log "[ERROR] pid file not found: $PID_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PID="$(cat "$PID_FILE")"
|
||||
if [ -z "$PID" ]; then
|
||||
log "[ERROR] pid file is empty: $PID_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "[INFO] watchdog started (pid=$PID, interval=${CHECK_INTERVAL}s)"
|
||||
|
||||
count=0
|
||||
while true; do
|
||||
if kill -0 "$PID" 2>/dev/null; then
|
||||
count=$((count + 1))
|
||||
if [ $((count % STATUS_EVERY)) -eq 0 ]; then
|
||||
log "[INFO] process alive (pid=$PID)"
|
||||
fi
|
||||
else
|
||||
log "[ERROR] process stopped (pid=$PID)"
|
||||
exit 1
|
||||
fi
|
||||
sleep "$CHECK_INTERVAL"
|
||||
done
|
||||
@@ -3,6 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from src.analysis.scanner import MarketScanner
|
||||
from src.analysis.smart_scanner import ScanCandidate, SmartVolatilityScanner
|
||||
from src.analysis.volatility import VolatilityAnalyzer
|
||||
|
||||
__all__ = ["VolatilityAnalyzer", "MarketScanner"]
|
||||
__all__ = ["VolatilityAnalyzer", "MarketScanner", "SmartVolatilityScanner", "ScanCandidate"]
|
||||
|
||||
@@ -42,6 +42,7 @@ class MarketScanner:
|
||||
volatility_analyzer: VolatilityAnalyzer,
|
||||
context_store: ContextStore,
|
||||
top_n: int = 5,
|
||||
max_concurrent_scans: int = 1,
|
||||
) -> None:
|
||||
"""Initialize the market scanner.
|
||||
|
||||
@@ -51,12 +52,14 @@ class MarketScanner:
|
||||
volatility_analyzer: Volatility analyzer instance
|
||||
context_store: Context store for L7 real-time data
|
||||
top_n: Number of top movers to return per market (default 5)
|
||||
max_concurrent_scans: Max concurrent stock scans (default 1, fully serialized)
|
||||
"""
|
||||
self.broker = broker
|
||||
self.overseas_broker = overseas_broker
|
||||
self.analyzer = volatility_analyzer
|
||||
self.context_store = context_store
|
||||
self.top_n = top_n
|
||||
self._scan_semaphore = asyncio.Semaphore(max_concurrent_scans)
|
||||
|
||||
async def scan_stock(
|
||||
self,
|
||||
@@ -76,10 +79,6 @@ class MarketScanner:
|
||||
if market.is_domestic:
|
||||
orderbook = await self.broker.get_orderbook(stock_code)
|
||||
else:
|
||||
# Rate limiting: Add 200ms delay for overseas API calls
|
||||
# to prevent hitting KIS API rate limit (EGW00201)
|
||||
await asyncio.sleep(0.2)
|
||||
|
||||
# For overseas, we need to adapt the price data structure
|
||||
price_data = await self.overseas_broker.get_overseas_price(
|
||||
market.exchange_code, stock_code
|
||||
@@ -109,7 +108,7 @@ class MarketScanner:
|
||||
self.context_store.set_context(
|
||||
ContextLayer.L7_REALTIME,
|
||||
timeframe,
|
||||
f"{market.code}_{stock_code}_volatility",
|
||||
f"volatility_{market.code}_{stock_code}",
|
||||
{
|
||||
"price": metrics.current_price,
|
||||
"atr": metrics.atr,
|
||||
@@ -143,8 +142,12 @@ class MarketScanner:
|
||||
|
||||
logger.info("Scanning %s market (%d stocks)", market.name, len(stock_codes))
|
||||
|
||||
# Scan all stocks concurrently (with rate limiting handled by broker)
|
||||
tasks = [self.scan_stock(code, market) for code in stock_codes]
|
||||
# Scan stocks with bounded concurrency to prevent API rate limit burst
|
||||
async def _bounded_scan(code: str) -> VolatilityMetrics | None:
|
||||
async with self._scan_semaphore:
|
||||
return await self.scan_stock(code, market)
|
||||
|
||||
tasks = [_bounded_scan(code) for code in stock_codes]
|
||||
results = await asyncio.gather(*tasks)
|
||||
|
||||
# Filter out failures and sort by momentum score
|
||||
@@ -176,7 +179,7 @@ class MarketScanner:
|
||||
self.context_store.set_context(
|
||||
ContextLayer.L7_REALTIME,
|
||||
timeframe,
|
||||
f"{market.code}_scan_result",
|
||||
f"scan_result_{market.code}",
|
||||
{
|
||||
"total_scanned": len(valid_metrics),
|
||||
"top_movers": [m.stock_code for m in top_movers],
|
||||
|
||||
449
src/analysis/smart_scanner.py
Normal file
449
src/analysis/smart_scanner.py
Normal file
@@ -0,0 +1,449 @@
|
||||
"""Smart Volatility Scanner with volatility-first market ranking logic."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Any
|
||||
|
||||
from src.analysis.volatility import VolatilityAnalyzer
|
||||
from src.broker.kis_api import KISBroker
|
||||
from src.broker.overseas import OverseasBroker
|
||||
from src.config import Settings
|
||||
from src.markets.schedule import MarketInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScanCandidate:
|
||||
"""A qualified candidate from the smart scanner."""
|
||||
|
||||
stock_code: str
|
||||
name: str
|
||||
price: float
|
||||
volume: float
|
||||
volume_ratio: float # Current volume / previous day volume
|
||||
rsi: float
|
||||
signal: str # "oversold" or "momentum"
|
||||
score: float # Composite score for ranking
|
||||
|
||||
|
||||
class SmartVolatilityScanner:
|
||||
"""Scans market rankings and applies volatility-first filters.
|
||||
|
||||
Flow:
|
||||
1. Fetch fluctuation rankings as primary universe
|
||||
2. Fetch volume rankings for liquidity bonus
|
||||
3. Score by volatility first, liquidity second
|
||||
4. Return top N qualified candidates
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
broker: KISBroker,
|
||||
overseas_broker: OverseasBroker | None,
|
||||
volatility_analyzer: VolatilityAnalyzer,
|
||||
settings: Settings,
|
||||
) -> None:
|
||||
"""Initialize the smart scanner.
|
||||
|
||||
Args:
|
||||
broker: KIS broker for API calls
|
||||
volatility_analyzer: Analyzer for RSI calculation
|
||||
settings: Application settings
|
||||
"""
|
||||
self.broker = broker
|
||||
self.overseas_broker = overseas_broker
|
||||
self.analyzer = volatility_analyzer
|
||||
self.settings = settings
|
||||
|
||||
# Extract scanner settings
|
||||
self.rsi_oversold = settings.RSI_OVERSOLD_THRESHOLD
|
||||
self.rsi_momentum = settings.RSI_MOMENTUM_THRESHOLD
|
||||
self.vol_multiplier = settings.VOL_MULTIPLIER
|
||||
self.top_n = settings.SCANNER_TOP_N
|
||||
|
||||
async def scan(
|
||||
self,
|
||||
market: MarketInfo | None = None,
|
||||
fallback_stocks: list[str] | None = None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Execute smart scan and return qualified candidates.
|
||||
|
||||
Args:
|
||||
market: Target market info (domestic vs overseas behavior)
|
||||
fallback_stocks: Stock codes to use if ranking API fails
|
||||
|
||||
Returns:
|
||||
List of ScanCandidate, sorted by score, up to top_n items
|
||||
"""
|
||||
if market and not market.is_domestic:
|
||||
return await self._scan_overseas(market, fallback_stocks)
|
||||
|
||||
return await self._scan_domestic(fallback_stocks)
|
||||
|
||||
async def _scan_domestic(
|
||||
self,
|
||||
fallback_stocks: list[str] | None = None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Scan domestic market using volatility-first ranking + liquidity bonus."""
|
||||
# 1) Primary universe from fluctuation ranking.
|
||||
try:
|
||||
fluct_rows = await self.broker.fetch_market_rankings(
|
||||
ranking_type="fluctuation",
|
||||
limit=50,
|
||||
)
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Domestic fluctuation ranking failed: %s", exc)
|
||||
fluct_rows = []
|
||||
|
||||
# 2) Liquidity bonus from volume ranking.
|
||||
try:
|
||||
volume_rows = await self.broker.fetch_market_rankings(
|
||||
ranking_type="volume",
|
||||
limit=50,
|
||||
)
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Domestic volume ranking failed: %s", exc)
|
||||
volume_rows = []
|
||||
|
||||
if not fluct_rows and fallback_stocks:
|
||||
logger.info(
|
||||
"Domestic ranking unavailable; using fallback symbols (%d)",
|
||||
len(fallback_stocks),
|
||||
)
|
||||
fluct_rows = [
|
||||
{
|
||||
"stock_code": code,
|
||||
"name": code,
|
||||
"price": 0.0,
|
||||
"volume": 0.0,
|
||||
"change_rate": 0.0,
|
||||
"volume_increase_rate": 0.0,
|
||||
}
|
||||
for code in fallback_stocks
|
||||
]
|
||||
|
||||
if not fluct_rows:
|
||||
return []
|
||||
|
||||
volume_rank_bonus: dict[str, float] = {}
|
||||
for idx, row in enumerate(volume_rows):
|
||||
code = _extract_stock_code(row)
|
||||
if not code:
|
||||
continue
|
||||
volume_rank_bonus[code] = max(0.0, 15.0 - idx * 0.3)
|
||||
|
||||
candidates: list[ScanCandidate] = []
|
||||
for stock in fluct_rows:
|
||||
stock_code = _extract_stock_code(stock)
|
||||
if not stock_code:
|
||||
continue
|
||||
|
||||
try:
|
||||
price = _extract_last_price(stock)
|
||||
change_rate = _extract_change_rate_pct(stock)
|
||||
volume = _extract_volume(stock)
|
||||
|
||||
intraday_range_pct = 0.0
|
||||
volume_ratio = _safe_float(stock.get("volume_increase_rate"), 0.0) / 100.0 + 1.0
|
||||
|
||||
# Use daily chart to refine range/volume when available.
|
||||
daily_prices = await self.broker.get_daily_prices(stock_code, days=2)
|
||||
if daily_prices:
|
||||
latest = daily_prices[-1]
|
||||
latest_close = _safe_float(latest.get("close"), default=price)
|
||||
if price <= 0:
|
||||
price = latest_close
|
||||
latest_high = _safe_float(latest.get("high"))
|
||||
latest_low = _safe_float(latest.get("low"))
|
||||
if latest_close > 0 and latest_high > 0 and latest_low > 0 and latest_high >= latest_low:
|
||||
intraday_range_pct = (latest_high - latest_low) / latest_close * 100.0
|
||||
if volume <= 0:
|
||||
volume = _safe_float(latest.get("volume"))
|
||||
if len(daily_prices) >= 2:
|
||||
prev_day_volume = _safe_float(daily_prices[-2].get("volume"))
|
||||
if prev_day_volume > 0:
|
||||
volume_ratio = max(volume_ratio, volume / prev_day_volume)
|
||||
|
||||
volatility_pct = max(abs(change_rate), intraday_range_pct)
|
||||
if price <= 0 or volatility_pct < 0.8:
|
||||
continue
|
||||
|
||||
volatility_score = min(volatility_pct / 10.0, 1.0) * 85.0
|
||||
liquidity_score = volume_rank_bonus.get(stock_code, 0.0)
|
||||
score = min(100.0, volatility_score + liquidity_score)
|
||||
signal = "momentum" if change_rate >= 0 else "oversold"
|
||||
implied_rsi = max(0.0, min(100.0, 50.0 + (change_rate * 4.0)))
|
||||
|
||||
candidates.append(
|
||||
ScanCandidate(
|
||||
stock_code=stock_code,
|
||||
name=stock.get("name", stock_code),
|
||||
price=price,
|
||||
volume=volume,
|
||||
volume_ratio=max(1.0, volume_ratio, volatility_pct / 2.0),
|
||||
rsi=implied_rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
)
|
||||
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Failed to analyze %s: %s", stock_code, exc)
|
||||
continue
|
||||
except Exception as exc:
|
||||
logger.error("Unexpected error analyzing %s: %s", stock_code, exc)
|
||||
continue
|
||||
|
||||
logger.info("Domestic ranking scan found %d candidates", len(candidates))
|
||||
candidates.sort(key=lambda c: c.score, reverse=True)
|
||||
return candidates[: self.top_n]
|
||||
|
||||
async def _scan_overseas(
|
||||
self,
|
||||
market: MarketInfo,
|
||||
fallback_stocks: list[str] | None = None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Scan overseas symbols using ranking API first, then fallback universe."""
|
||||
if self.overseas_broker is None:
|
||||
logger.warning(
|
||||
"Overseas scanner unavailable for %s: overseas broker not configured",
|
||||
market.name,
|
||||
)
|
||||
return []
|
||||
|
||||
candidates = await self._scan_overseas_from_rankings(market)
|
||||
if not candidates:
|
||||
candidates = await self._scan_overseas_from_symbols(market, fallback_stocks)
|
||||
|
||||
candidates.sort(key=lambda c: c.score, reverse=True)
|
||||
return candidates[: self.top_n]
|
||||
|
||||
async def _scan_overseas_from_rankings(
|
||||
self,
|
||||
market: MarketInfo,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Build overseas candidates from ranking APIs using volatility-first scoring."""
|
||||
assert self.overseas_broker is not None
|
||||
try:
|
||||
fluct_rows = await self.overseas_broker.fetch_overseas_rankings(
|
||||
exchange_code=market.exchange_code,
|
||||
ranking_type="fluctuation",
|
||||
limit=50,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Overseas fluctuation ranking failed for %s: %s", market.code, exc
|
||||
)
|
||||
fluct_rows = []
|
||||
|
||||
if not fluct_rows:
|
||||
return []
|
||||
|
||||
volume_rank_bonus: dict[str, float] = {}
|
||||
try:
|
||||
volume_rows = await self.overseas_broker.fetch_overseas_rankings(
|
||||
exchange_code=market.exchange_code,
|
||||
ranking_type="volume",
|
||||
limit=50,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Overseas volume ranking failed for %s: %s", market.code, exc
|
||||
)
|
||||
volume_rows = []
|
||||
|
||||
for idx, row in enumerate(volume_rows):
|
||||
code = _extract_stock_code(row)
|
||||
if not code:
|
||||
continue
|
||||
# Top-ranked by traded value/volume gets higher liquidity bonus.
|
||||
volume_rank_bonus[code] = max(0.0, 15.0 - idx * 0.3)
|
||||
|
||||
candidates: list[ScanCandidate] = []
|
||||
for row in fluct_rows:
|
||||
stock_code = _extract_stock_code(row)
|
||||
if not stock_code:
|
||||
continue
|
||||
|
||||
price = _extract_last_price(row)
|
||||
change_rate = _extract_change_rate_pct(row)
|
||||
volume = _extract_volume(row)
|
||||
intraday_range_pct = _extract_intraday_range_pct(row, price)
|
||||
volatility_pct = max(abs(change_rate), intraday_range_pct)
|
||||
|
||||
# Volatility-first filter (not simple gainers/value ranking).
|
||||
if price <= 0 or volatility_pct < 0.8:
|
||||
continue
|
||||
|
||||
volatility_score = min(volatility_pct / 10.0, 1.0) * 85.0
|
||||
liquidity_score = volume_rank_bonus.get(stock_code, 0.0)
|
||||
score = min(100.0, volatility_score + liquidity_score)
|
||||
signal = "momentum" if change_rate >= 0 else "oversold"
|
||||
implied_rsi = max(0.0, min(100.0, 50.0 + (change_rate * 4.0)))
|
||||
candidates.append(
|
||||
ScanCandidate(
|
||||
stock_code=stock_code,
|
||||
name=str(row.get("name") or row.get("ovrs_item_name") or stock_code),
|
||||
price=price,
|
||||
volume=volume,
|
||||
volume_ratio=max(1.0, volatility_pct / 2.0),
|
||||
rsi=implied_rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
)
|
||||
|
||||
if candidates:
|
||||
logger.info(
|
||||
"Overseas ranking scan found %d candidates for %s",
|
||||
len(candidates),
|
||||
market.name,
|
||||
)
|
||||
return candidates
|
||||
|
||||
async def _scan_overseas_from_symbols(
|
||||
self,
|
||||
market: MarketInfo,
|
||||
symbols: list[str] | None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Fallback overseas scan from dynamic symbol universe."""
|
||||
assert self.overseas_broker is not None
|
||||
if not symbols:
|
||||
logger.info("Overseas scanner: no symbol universe for %s", market.name)
|
||||
return []
|
||||
|
||||
logger.info(
|
||||
"Overseas scanner: scanning %d fallback symbols for %s",
|
||||
len(symbols),
|
||||
market.name,
|
||||
)
|
||||
candidates: list[ScanCandidate] = []
|
||||
for stock_code in symbols:
|
||||
try:
|
||||
price_data = await self.overseas_broker.get_overseas_price(
|
||||
market.exchange_code, stock_code
|
||||
)
|
||||
output = price_data.get("output", {})
|
||||
price = _extract_last_price(output)
|
||||
change_rate = _extract_change_rate_pct(output)
|
||||
volume = _extract_volume(output)
|
||||
intraday_range_pct = _extract_intraday_range_pct(output, price)
|
||||
volatility_pct = max(abs(change_rate), intraday_range_pct)
|
||||
|
||||
if price <= 0 or volatility_pct < 0.8:
|
||||
continue
|
||||
|
||||
score = min(volatility_pct / 10.0, 1.0) * 100.0
|
||||
signal = "momentum" if change_rate >= 0 else "oversold"
|
||||
implied_rsi = max(0.0, min(100.0, 50.0 + (change_rate * 4.0)))
|
||||
candidates.append(
|
||||
ScanCandidate(
|
||||
stock_code=stock_code,
|
||||
name=stock_code,
|
||||
price=price,
|
||||
volume=volume,
|
||||
volume_ratio=max(1.0, volatility_pct / 2.0),
|
||||
rsi=implied_rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
)
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Failed to analyze overseas %s: %s", stock_code, exc)
|
||||
except Exception as exc:
|
||||
logger.error("Unexpected error analyzing overseas %s: %s", stock_code, exc)
|
||||
logger.info(
|
||||
"Overseas symbol fallback scan found %d candidates for %s",
|
||||
len(candidates),
|
||||
market.name,
|
||||
)
|
||||
return candidates
|
||||
|
||||
def get_stock_codes(self, candidates: list[ScanCandidate]) -> list[str]:
|
||||
"""Extract stock codes from candidates for watchlist update.
|
||||
|
||||
Args:
|
||||
candidates: List of scan candidates
|
||||
|
||||
Returns:
|
||||
List of stock codes
|
||||
"""
|
||||
return [c.stock_code for c in candidates]
|
||||
|
||||
|
||||
def _safe_float(value: Any, default: float = 0.0) -> float:
|
||||
"""Convert arbitrary values to float safely."""
|
||||
if value in (None, ""):
|
||||
return default
|
||||
try:
|
||||
return float(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _extract_stock_code(row: dict[str, Any]) -> str:
|
||||
"""Extract normalized stock code from various API schemas."""
|
||||
return (
|
||||
str(
|
||||
row.get("symb")
|
||||
or row.get("ovrs_pdno")
|
||||
or row.get("stock_code")
|
||||
or row.get("pdno")
|
||||
or ""
|
||||
)
|
||||
.strip()
|
||||
.upper()
|
||||
)
|
||||
|
||||
|
||||
def _extract_last_price(row: dict[str, Any]) -> float:
|
||||
"""Extract last/close-like price from API schema variants."""
|
||||
return _safe_float(
|
||||
row.get("last")
|
||||
or row.get("ovrs_nmix_prpr")
|
||||
or row.get("stck_prpr")
|
||||
or row.get("price")
|
||||
or row.get("close")
|
||||
)
|
||||
|
||||
|
||||
def _extract_change_rate_pct(row: dict[str, Any]) -> float:
|
||||
"""Extract daily change rate (%) from API schema variants."""
|
||||
return _safe_float(
|
||||
row.get("rate")
|
||||
or row.get("change_rate")
|
||||
or row.get("prdy_ctrt")
|
||||
or row.get("evlu_pfls_rt")
|
||||
or row.get("chg_rt")
|
||||
)
|
||||
|
||||
|
||||
def _extract_volume(row: dict[str, Any]) -> float:
|
||||
"""Extract volume/traded-amount proxy from schema variants."""
|
||||
return _safe_float(
|
||||
row.get("tvol") or row.get("acml_vol") or row.get("vol") or row.get("volume")
|
||||
)
|
||||
|
||||
|
||||
def _extract_intraday_range_pct(row: dict[str, Any], price: float) -> float:
|
||||
"""Estimate intraday range percentage from high/low fields."""
|
||||
if price <= 0:
|
||||
return 0.0
|
||||
high = _safe_float(
|
||||
row.get("high")
|
||||
or row.get("ovrs_hgpr")
|
||||
or row.get("stck_hgpr")
|
||||
or row.get("day_hgpr")
|
||||
)
|
||||
low = _safe_float(
|
||||
row.get("low")
|
||||
or row.get("ovrs_lwpr")
|
||||
or row.get("stck_lwpr")
|
||||
or row.get("day_lwpr")
|
||||
)
|
||||
if high <= 0 or low <= 0 or high < low:
|
||||
return 0.0
|
||||
return (high - low) / price * 100.0
|
||||
@@ -124,6 +124,54 @@ class VolatilityAnalyzer:
|
||||
return 1.0
|
||||
return current_volume / avg_volume
|
||||
|
||||
def calculate_rsi(
|
||||
self,
|
||||
close_prices: list[float],
|
||||
period: int = 14,
|
||||
) -> float:
|
||||
"""Calculate Relative Strength Index (RSI) using Wilder's smoothing.
|
||||
|
||||
Args:
|
||||
close_prices: List of closing prices (oldest to newest, minimum period+1 values)
|
||||
period: RSI period (default 14)
|
||||
|
||||
Returns:
|
||||
RSI value between 0 and 100, or 50.0 (neutral) if insufficient data
|
||||
|
||||
Examples:
|
||||
>>> analyzer = VolatilityAnalyzer()
|
||||
>>> prices = [100 - i * 0.5 for i in range(20)] # Downtrend
|
||||
>>> rsi = analyzer.calculate_rsi(prices)
|
||||
>>> assert rsi < 50 # Oversold territory
|
||||
"""
|
||||
if len(close_prices) < period + 1:
|
||||
return 50.0 # Neutral RSI if insufficient data
|
||||
|
||||
# Calculate price changes
|
||||
changes = [close_prices[i] - close_prices[i - 1] for i in range(1, len(close_prices))]
|
||||
|
||||
# Separate gains and losses
|
||||
gains = [max(0.0, change) for change in changes]
|
||||
losses = [max(0.0, -change) for change in changes]
|
||||
|
||||
# Calculate initial average gain/loss (simple average for first period)
|
||||
avg_gain = sum(gains[:period]) / period
|
||||
avg_loss = sum(losses[:period]) / period
|
||||
|
||||
# Apply Wilder's smoothing for remaining periods
|
||||
for i in range(period, len(changes)):
|
||||
avg_gain = (avg_gain * (period - 1) + gains[i]) / period
|
||||
avg_loss = (avg_loss * (period - 1) + losses[i]) / period
|
||||
|
||||
# Calculate RS and RSI
|
||||
if avg_loss == 0:
|
||||
return 100.0 # All gains, maximum RSI
|
||||
|
||||
rs = avg_gain / avg_loss
|
||||
rsi = 100 - (100 / (1 + rs))
|
||||
|
||||
return rsi
|
||||
|
||||
def calculate_pv_divergence(
|
||||
self,
|
||||
price_change: float,
|
||||
|
||||
@@ -525,3 +525,233 @@ class GeminiClient:
|
||||
DecisionCache instance or None if caching disabled
|
||||
"""
|
||||
return self._cache
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Batch Decision Making (for daily trading mode)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
async def decide_batch(
|
||||
self, stocks_data: list[dict[str, Any]]
|
||||
) -> dict[str, TradeDecision]:
|
||||
"""Make decisions for multiple stocks in a single API call.
|
||||
|
||||
This is designed for daily trading mode to minimize API usage
|
||||
when working with Gemini Free tier (20 calls/day limit).
|
||||
|
||||
Args:
|
||||
stocks_data: List of market data dictionaries, each with:
|
||||
- stock_code: Stock ticker
|
||||
- current_price: Current price
|
||||
- market_name: Market name (optional)
|
||||
- foreigner_net: Foreigner net buy/sell (optional)
|
||||
|
||||
Returns:
|
||||
Dictionary mapping stock_code to TradeDecision
|
||||
|
||||
Example:
|
||||
>>> stocks_data = [
|
||||
... {"stock_code": "AAPL", "current_price": 185.5},
|
||||
... {"stock_code": "MSFT", "current_price": 420.0},
|
||||
... ]
|
||||
>>> decisions = await client.decide_batch(stocks_data)
|
||||
>>> decisions["AAPL"].action
|
||||
'BUY'
|
||||
"""
|
||||
if not stocks_data:
|
||||
return {}
|
||||
|
||||
# Build compressed batch prompt
|
||||
market_name = stocks_data[0].get("market_name", "stock market")
|
||||
|
||||
# Format stock data as compact JSON array
|
||||
compact_stocks = []
|
||||
for stock in stocks_data:
|
||||
compact = {
|
||||
"code": stock["stock_code"],
|
||||
"price": stock["current_price"],
|
||||
}
|
||||
if stock.get("foreigner_net", 0) != 0:
|
||||
compact["frgn"] = stock["foreigner_net"]
|
||||
compact_stocks.append(compact)
|
||||
|
||||
data_str = json.dumps(compact_stocks, ensure_ascii=False)
|
||||
|
||||
prompt = (
|
||||
f"You are a professional {market_name} trading analyst.\n"
|
||||
"Analyze the following stocks and decide whether to BUY, SELL, or HOLD each one.\n\n"
|
||||
f"Stock Data: {data_str}\n\n"
|
||||
"You MUST respond with ONLY a valid JSON array in this format:\n"
|
||||
'[{"code": "AAPL", "action": "BUY", "confidence": 85, "rationale": "..."},\n'
|
||||
' {"code": "MSFT", "action": "HOLD", "confidence": 50, "rationale": "..."}, ...]\n\n'
|
||||
"Rules:\n"
|
||||
"- Return one decision object per stock\n"
|
||||
"- action must be exactly: BUY, SELL, or HOLD\n"
|
||||
"- confidence must be 0-100\n"
|
||||
"- rationale should be concise (1-2 sentences)\n"
|
||||
"- Do NOT wrap JSON in markdown code blocks\n"
|
||||
)
|
||||
|
||||
# Estimate tokens
|
||||
token_count = self._optimizer.estimate_tokens(prompt)
|
||||
self._total_tokens_used += token_count
|
||||
|
||||
logger.info(
|
||||
"Requesting batch decision for %d stocks from Gemini",
|
||||
len(stocks_data),
|
||||
extra={"estimated_tokens": token_count},
|
||||
)
|
||||
|
||||
try:
|
||||
response = await self._client.aio.models.generate_content(
|
||||
model=self._model_name,
|
||||
contents=prompt,
|
||||
)
|
||||
raw = response.text
|
||||
except Exception as exc:
|
||||
logger.error("Gemini API error in batch decision: %s", exc)
|
||||
# Return HOLD for all stocks on API error
|
||||
return {
|
||||
stock["stock_code"]: TradeDecision(
|
||||
action="HOLD",
|
||||
confidence=0,
|
||||
rationale=f"API error: {exc}",
|
||||
token_count=token_count,
|
||||
cached=False,
|
||||
)
|
||||
for stock in stocks_data
|
||||
}
|
||||
|
||||
# Parse batch response
|
||||
return self._parse_batch_response(raw, stocks_data, token_count)
|
||||
|
||||
def _parse_batch_response(
|
||||
self, raw: str, stocks_data: list[dict[str, Any]], token_count: int
|
||||
) -> dict[str, TradeDecision]:
|
||||
"""Parse batch response into a dictionary of decisions.
|
||||
|
||||
Args:
|
||||
raw: Raw response from Gemini
|
||||
stocks_data: Original stock data list
|
||||
token_count: Token count for the request
|
||||
|
||||
Returns:
|
||||
Dictionary mapping stock_code to TradeDecision
|
||||
"""
|
||||
if not raw or not raw.strip():
|
||||
logger.warning("Empty batch response from Gemini — defaulting all to HOLD")
|
||||
return {
|
||||
stock["stock_code"]: TradeDecision(
|
||||
action="HOLD",
|
||||
confidence=0,
|
||||
rationale="Empty response",
|
||||
token_count=0,
|
||||
cached=False,
|
||||
)
|
||||
for stock in stocks_data
|
||||
}
|
||||
|
||||
# Strip markdown code fences if present
|
||||
cleaned = raw.strip()
|
||||
match = re.search(r"```(?:json)?\s*\n?(.*?)\n?```", cleaned, re.DOTALL)
|
||||
if match:
|
||||
cleaned = match.group(1).strip()
|
||||
|
||||
try:
|
||||
data = json.loads(cleaned)
|
||||
except json.JSONDecodeError:
|
||||
logger.warning("Malformed JSON in batch response — defaulting all to HOLD")
|
||||
return {
|
||||
stock["stock_code"]: TradeDecision(
|
||||
action="HOLD",
|
||||
confidence=0,
|
||||
rationale="Malformed JSON response",
|
||||
token_count=0,
|
||||
cached=False,
|
||||
)
|
||||
for stock in stocks_data
|
||||
}
|
||||
|
||||
if not isinstance(data, list):
|
||||
logger.warning("Batch response is not a JSON array — defaulting all to HOLD")
|
||||
return {
|
||||
stock["stock_code"]: TradeDecision(
|
||||
action="HOLD",
|
||||
confidence=0,
|
||||
rationale="Invalid response format",
|
||||
token_count=0,
|
||||
cached=False,
|
||||
)
|
||||
for stock in stocks_data
|
||||
}
|
||||
|
||||
# Build decision map
|
||||
decisions: dict[str, TradeDecision] = {}
|
||||
stock_codes = {stock["stock_code"] for stock in stocks_data}
|
||||
|
||||
for item in data:
|
||||
if not isinstance(item, dict):
|
||||
continue
|
||||
|
||||
code = item.get("code")
|
||||
if not code or code not in stock_codes:
|
||||
continue
|
||||
|
||||
# Validate required fields
|
||||
if not all(k in item for k in ("action", "confidence", "rationale")):
|
||||
logger.warning("Missing fields for %s — using HOLD", code)
|
||||
decisions[code] = TradeDecision(
|
||||
action="HOLD",
|
||||
confidence=0,
|
||||
rationale="Missing required fields",
|
||||
token_count=0,
|
||||
cached=False,
|
||||
)
|
||||
continue
|
||||
|
||||
action = str(item["action"]).upper()
|
||||
if action not in VALID_ACTIONS:
|
||||
logger.warning("Invalid action '%s' for %s — forcing HOLD", action, code)
|
||||
action = "HOLD"
|
||||
|
||||
confidence = int(item["confidence"])
|
||||
rationale = str(item["rationale"])
|
||||
|
||||
# Enforce confidence threshold
|
||||
if confidence < self._confidence_threshold:
|
||||
logger.info(
|
||||
"Confidence %d < threshold %d for %s — forcing HOLD",
|
||||
confidence,
|
||||
self._confidence_threshold,
|
||||
code,
|
||||
)
|
||||
action = "HOLD"
|
||||
|
||||
decisions[code] = TradeDecision(
|
||||
action=action,
|
||||
confidence=confidence,
|
||||
rationale=rationale,
|
||||
token_count=token_count // len(stocks_data), # Split token cost
|
||||
cached=False,
|
||||
)
|
||||
self._total_decisions += 1
|
||||
|
||||
# Fill in missing stocks with HOLD
|
||||
for stock in stocks_data:
|
||||
code = stock["stock_code"]
|
||||
if code not in decisions:
|
||||
logger.warning("No decision for %s in batch response — using HOLD", code)
|
||||
decisions[code] = TradeDecision(
|
||||
action="HOLD",
|
||||
confidence=0,
|
||||
rationale="Not found in batch response",
|
||||
token_count=0,
|
||||
cached=False,
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Batch decision completed for %d stocks",
|
||||
len(decisions),
|
||||
extra={"tokens": token_count},
|
||||
)
|
||||
|
||||
return decisions
|
||||
|
||||
@@ -104,12 +104,14 @@ class KISBroker:
|
||||
time_since_last_attempt = now - self._last_refresh_attempt
|
||||
if time_since_last_attempt < self._refresh_cooldown:
|
||||
remaining = self._refresh_cooldown - time_since_last_attempt
|
||||
error_msg = (
|
||||
f"Token refresh on cooldown. "
|
||||
f"Retry in {remaining:.1f}s (KIS allows 1/minute)"
|
||||
# Do not fail fast here. If token is unavailable, upstream calls
|
||||
# will all fail for up to a minute and scanning returns no trades.
|
||||
logger.warning(
|
||||
"Token refresh on cooldown. Waiting %.1fs before retry (KIS allows 1/minute)",
|
||||
remaining,
|
||||
)
|
||||
logger.warning(error_msg)
|
||||
raise ConnectionError(error_msg)
|
||||
await asyncio.sleep(remaining)
|
||||
now = asyncio.get_event_loop().time()
|
||||
|
||||
logger.info("Refreshing KIS access token")
|
||||
self._last_refresh_attempt = now
|
||||
@@ -138,6 +140,7 @@ class KISBroker:
|
||||
|
||||
async def _get_hash_key(self, body: dict[str, Any]) -> str:
|
||||
"""Request a hash key from KIS for POST request body signing."""
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
url = f"{self._base_url}/uapi/hashkey"
|
||||
headers = {
|
||||
@@ -279,3 +282,153 @@ class KISBroker:
|
||||
return data
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(f"Network error sending order: {exc}") from exc
|
||||
|
||||
async def fetch_market_rankings(
|
||||
self,
|
||||
ranking_type: str = "volume",
|
||||
limit: int = 30,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Fetch market rankings from KIS API.
|
||||
|
||||
Args:
|
||||
ranking_type: Type of ranking ("volume" or "fluctuation")
|
||||
limit: Maximum number of results to return
|
||||
|
||||
Returns:
|
||||
List of stock data dicts with keys: stock_code, name, price, volume,
|
||||
change_rate, volume_increase_rate
|
||||
|
||||
Raises:
|
||||
ConnectionError: If API request fails
|
||||
"""
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
# TR_ID for volume ranking
|
||||
tr_id = "FHPST01710000" if ranking_type == "volume" else "FHPST01710100"
|
||||
headers = await self._auth_headers(tr_id)
|
||||
|
||||
params = {
|
||||
"FID_COND_MRKT_DIV_CODE": "J", # Stock/ETF/ETN
|
||||
"FID_COND_SCR_DIV_CODE": "20001", # Volume surge
|
||||
"FID_INPUT_ISCD": "0000", # All stocks
|
||||
"FID_DIV_CLS_CODE": "0", # All types
|
||||
"FID_BLNG_CLS_CODE": "0",
|
||||
"FID_TRGT_CLS_CODE": "111111111",
|
||||
"FID_TRGT_EXLS_CLS_CODE": "000000",
|
||||
"FID_INPUT_PRICE_1": "0",
|
||||
"FID_INPUT_PRICE_2": "0",
|
||||
"FID_VOL_CNT": "0",
|
||||
"FID_INPUT_DATE_1": "",
|
||||
}
|
||||
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/quotations/volume-rank"
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"fetch_market_rankings failed ({resp.status}): {text}"
|
||||
)
|
||||
data = await resp.json()
|
||||
|
||||
# Parse response - output is a list of ranked stocks
|
||||
def _safe_float(value: str | float | None, default: float = 0.0) -> float:
|
||||
if value is None or value == "":
|
||||
return default
|
||||
try:
|
||||
return float(value)
|
||||
except (ValueError, TypeError):
|
||||
return default
|
||||
|
||||
rankings = []
|
||||
for item in data.get("output", [])[:limit]:
|
||||
rankings.append({
|
||||
"stock_code": item.get("mksc_shrn_iscd", ""),
|
||||
"name": item.get("hts_kor_isnm", ""),
|
||||
"price": _safe_float(item.get("stck_prpr", "0")),
|
||||
"volume": _safe_float(item.get("acml_vol", "0")),
|
||||
"change_rate": _safe_float(item.get("prdy_ctrt", "0")),
|
||||
"volume_increase_rate": _safe_float(item.get("vol_inrt", "0")),
|
||||
})
|
||||
return rankings
|
||||
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(f"Network error fetching rankings: {exc}") from exc
|
||||
|
||||
async def get_daily_prices(
|
||||
self,
|
||||
stock_code: str,
|
||||
days: int = 20,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Fetch daily OHLCV price history for a stock.
|
||||
|
||||
Args:
|
||||
stock_code: 6-digit stock code
|
||||
days: Number of trading days to fetch (default 20 for RSI calculation)
|
||||
|
||||
Returns:
|
||||
List of daily price dicts with keys: date, open, high, low, close, volume
|
||||
Sorted oldest to newest
|
||||
|
||||
Raises:
|
||||
ConnectionError: If API request fails
|
||||
"""
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
headers = await self._auth_headers("FHKST03010100")
|
||||
|
||||
# Calculate date range (today and N days ago)
|
||||
from datetime import datetime, timedelta
|
||||
end_date = datetime.now().strftime("%Y%m%d")
|
||||
start_date = (datetime.now() - timedelta(days=days + 10)).strftime("%Y%m%d")
|
||||
|
||||
params = {
|
||||
"FID_COND_MRKT_DIV_CODE": "J",
|
||||
"FID_INPUT_ISCD": stock_code,
|
||||
"FID_INPUT_DATE_1": start_date,
|
||||
"FID_INPUT_DATE_2": end_date,
|
||||
"FID_PERIOD_DIV_CODE": "D", # Daily
|
||||
"FID_ORG_ADJ_PRC": "0", # Adjusted price
|
||||
}
|
||||
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/quotations/inquire-daily-itemchartprice"
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"get_daily_prices failed ({resp.status}): {text}"
|
||||
)
|
||||
data = await resp.json()
|
||||
|
||||
# Parse response
|
||||
def _safe_float(value: str | float | None, default: float = 0.0) -> float:
|
||||
if value is None or value == "":
|
||||
return default
|
||||
try:
|
||||
return float(value)
|
||||
except (ValueError, TypeError):
|
||||
return default
|
||||
|
||||
prices = []
|
||||
for item in data.get("output2", []):
|
||||
prices.append({
|
||||
"date": item.get("stck_bsop_date", ""),
|
||||
"open": _safe_float(item.get("stck_oprc", "0")),
|
||||
"high": _safe_float(item.get("stck_hgpr", "0")),
|
||||
"low": _safe_float(item.get("stck_lwpr", "0")),
|
||||
"close": _safe_float(item.get("stck_clpr", "0")),
|
||||
"volume": _safe_float(item.get("acml_vol", "0")),
|
||||
})
|
||||
|
||||
# Sort oldest to newest (KIS returns newest first)
|
||||
prices.reverse()
|
||||
|
||||
return prices[:days] # Return only requested number of days
|
||||
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(f"Network error fetching daily prices: {exc}") from exc
|
||||
|
||||
@@ -12,6 +12,20 @@ from src.broker.kis_api import KISBroker
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Ranking API uses different exchange codes than order/quote APIs.
|
||||
_RANKING_EXCHANGE_MAP: dict[str, str] = {
|
||||
"NASD": "NAS",
|
||||
"NYSE": "NYS",
|
||||
"AMEX": "AMS",
|
||||
"SEHK": "HKS",
|
||||
"SHAA": "SHS",
|
||||
"SZAA": "SZS",
|
||||
"HSX": "HSX",
|
||||
"HNX": "HNX",
|
||||
"TSE": "TSE",
|
||||
}
|
||||
|
||||
|
||||
class OverseasBroker:
|
||||
"""KIS Overseas Stock API wrapper that reuses KISBroker infrastructure."""
|
||||
|
||||
@@ -64,6 +78,81 @@ class OverseasBroker:
|
||||
f"Network error fetching overseas price: {exc}"
|
||||
) from exc
|
||||
|
||||
async def fetch_overseas_rankings(
|
||||
self,
|
||||
exchange_code: str,
|
||||
ranking_type: str = "fluctuation",
|
||||
limit: int = 30,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Fetch overseas rankings (price change or volume surge).
|
||||
|
||||
Ranking API specs may differ by account/product. Endpoint paths and
|
||||
TR_IDs are configurable via settings and can be overridden in .env.
|
||||
"""
|
||||
if not self._broker._settings.OVERSEAS_RANKING_ENABLED:
|
||||
return []
|
||||
|
||||
await self._broker._rate_limiter.acquire()
|
||||
session = self._broker._get_session()
|
||||
|
||||
ranking_excd = _RANKING_EXCHANGE_MAP.get(exchange_code, exchange_code)
|
||||
|
||||
if ranking_type == "volume":
|
||||
tr_id = self._broker._settings.OVERSEAS_RANKING_VOLUME_TR_ID
|
||||
path = self._broker._settings.OVERSEAS_RANKING_VOLUME_PATH
|
||||
params: dict[str, str] = {
|
||||
"AUTH": "",
|
||||
"EXCD": ranking_excd,
|
||||
"MIXN": "0",
|
||||
"VOL_RANG": "0",
|
||||
}
|
||||
else:
|
||||
tr_id = self._broker._settings.OVERSEAS_RANKING_FLUCT_TR_ID
|
||||
path = self._broker._settings.OVERSEAS_RANKING_FLUCT_PATH
|
||||
params = {
|
||||
"AUTH": "",
|
||||
"EXCD": ranking_excd,
|
||||
"NDAY": "0",
|
||||
"GUBN": "1",
|
||||
"VOL_RANG": "0",
|
||||
}
|
||||
|
||||
headers = await self._broker._auth_headers(tr_id)
|
||||
url = f"{self._broker._base_url}{path}"
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
if resp.status == 404:
|
||||
logger.warning(
|
||||
"Overseas ranking endpoint unavailable (404) for %s/%s; "
|
||||
"using symbol fallback scan",
|
||||
exchange_code,
|
||||
ranking_type,
|
||||
)
|
||||
return []
|
||||
raise ConnectionError(
|
||||
f"fetch_overseas_rankings failed ({resp.status}): {text}"
|
||||
)
|
||||
|
||||
data = await resp.json()
|
||||
rows = self._extract_ranking_rows(data)
|
||||
if rows:
|
||||
return rows[:limit]
|
||||
|
||||
logger.debug(
|
||||
"Overseas ranking returned empty for %s/%s (keys=%s)",
|
||||
exchange_code,
|
||||
ranking_type,
|
||||
list(data.keys()),
|
||||
)
|
||||
return []
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error fetching overseas rankings: {exc}"
|
||||
) from exc
|
||||
|
||||
async def get_overseas_balance(self, exchange_code: str) -> dict[str, Any]:
|
||||
"""
|
||||
Fetch overseas account balance.
|
||||
@@ -198,3 +287,11 @@ class OverseasBroker:
|
||||
"HSX": "VND",
|
||||
}
|
||||
return currency_map.get(exchange_code, "USD")
|
||||
|
||||
def _extract_ranking_rows(self, data: dict[str, Any]) -> list[dict[str, Any]]:
|
||||
"""Extract list rows from ranking response across schema variants."""
|
||||
candidates = [data.get("output"), data.get("output1"), data.get("output2")]
|
||||
for value in candidates:
|
||||
if isinstance(value, list):
|
||||
return [row for row in value if isinstance(row, dict)]
|
||||
return []
|
||||
|
||||
@@ -33,18 +33,42 @@ class Settings(BaseSettings):
|
||||
FAT_FINGER_PCT: float = Field(default=30.0, gt=0.0, le=100.0)
|
||||
CONFIDENCE_THRESHOLD: int = Field(default=80, ge=0, le=100)
|
||||
|
||||
# Smart Scanner Configuration
|
||||
RSI_OVERSOLD_THRESHOLD: int = Field(default=30, ge=0, le=50)
|
||||
RSI_MOMENTUM_THRESHOLD: int = Field(default=70, ge=50, le=100)
|
||||
VOL_MULTIPLIER: float = Field(default=2.0, gt=1.0, le=10.0)
|
||||
SCANNER_TOP_N: int = Field(default=3, ge=1, le=10)
|
||||
POSITION_SIZING_ENABLED: bool = True
|
||||
POSITION_BASE_ALLOCATION_PCT: float = Field(default=5.0, gt=0.0, le=30.0)
|
||||
POSITION_MIN_ALLOCATION_PCT: float = Field(default=1.0, gt=0.0, le=20.0)
|
||||
POSITION_MAX_ALLOCATION_PCT: float = Field(default=10.0, gt=0.0, le=50.0)
|
||||
POSITION_VOLATILITY_TARGET_SCORE: float = Field(default=50.0, gt=0.0, le=100.0)
|
||||
|
||||
# Database
|
||||
DB_PATH: str = "data/trade_logs.db"
|
||||
|
||||
# Rate Limiting (requests per second for KIS API)
|
||||
# Reduced to 5.0 to avoid EGW00201 "초당 거래건수 초과" errors
|
||||
RATE_LIMIT_RPS: float = 5.0
|
||||
# Conservative limit to avoid EGW00201 "초당 거래건수 초과" errors.
|
||||
# KIS API real limit is ~2 RPS; 2.0 provides maximum safety.
|
||||
RATE_LIMIT_RPS: float = 2.0
|
||||
|
||||
# Trading mode
|
||||
MODE: str = Field(default="paper", pattern="^(paper|live)$")
|
||||
|
||||
# Trading frequency mode (daily = batch API calls, realtime = per-stock calls)
|
||||
TRADE_MODE: str = Field(default="daily", pattern="^(daily|realtime)$")
|
||||
DAILY_SESSIONS: int = Field(default=4, ge=1, le=10)
|
||||
SESSION_INTERVAL_HOURS: int = Field(default=6, ge=1, le=24)
|
||||
|
||||
# Pre-Market Planner
|
||||
PRE_MARKET_MINUTES: int = Field(default=30, ge=10, le=120)
|
||||
MAX_SCENARIOS_PER_STOCK: int = Field(default=5, ge=1, le=10)
|
||||
PLANNER_TIMEOUT_SECONDS: int = Field(default=60, ge=10, le=300)
|
||||
DEFENSIVE_PLAYBOOK_ON_FAILURE: bool = True
|
||||
RESCAN_INTERVAL_SECONDS: int = Field(default=300, ge=60, le=900)
|
||||
|
||||
# Market selection (comma-separated market codes)
|
||||
ENABLED_MARKETS: str = "KR"
|
||||
ENABLED_MARKETS: str = "KR,US"
|
||||
|
||||
# Backup and Disaster Recovery (optional)
|
||||
BACKUP_ENABLED: bool = True
|
||||
@@ -60,6 +84,27 @@ class Settings(BaseSettings):
|
||||
TELEGRAM_CHAT_ID: str | None = None
|
||||
TELEGRAM_ENABLED: bool = True
|
||||
|
||||
# Telegram Commands (optional)
|
||||
TELEGRAM_COMMANDS_ENABLED: bool = True
|
||||
TELEGRAM_POLLING_INTERVAL: float = 1.0 # seconds
|
||||
|
||||
# Overseas ranking API (KIS endpoint/TR_ID may vary by account/product)
|
||||
# Override these from .env if your account uses different specs.
|
||||
OVERSEAS_RANKING_ENABLED: bool = True
|
||||
OVERSEAS_RANKING_FLUCT_TR_ID: str = "HHDFS76290000"
|
||||
OVERSEAS_RANKING_VOLUME_TR_ID: str = "HHDFS76270000"
|
||||
OVERSEAS_RANKING_FLUCT_PATH: str = (
|
||||
"/uapi/overseas-stock/v1/ranking/updown-rate"
|
||||
)
|
||||
OVERSEAS_RANKING_VOLUME_PATH: str = (
|
||||
"/uapi/overseas-stock/v1/ranking/volume-surge"
|
||||
)
|
||||
|
||||
# Dashboard (optional)
|
||||
DASHBOARD_ENABLED: bool = False
|
||||
DASHBOARD_HOST: str = "127.0.0.1"
|
||||
DASHBOARD_PORT: int = Field(default=8080, ge=1, le=65535)
|
||||
|
||||
model_config = {"env_file": ".env", "env_file_encoding": "utf-8"}
|
||||
|
||||
@property
|
||||
@@ -73,4 +118,7 @@ class Settings(BaseSettings):
|
||||
@property
|
||||
def enabled_market_list(self) -> list[str]:
|
||||
"""Parse ENABLED_MARKETS into list of market codes."""
|
||||
return [m.strip() for m in self.ENABLED_MARKETS.split(",") if m.strip()]
|
||||
from src.markets.schedule import expand_market_codes
|
||||
|
||||
raw = [m.strip() for m in self.ENABLED_MARKETS.split(",") if m.strip()]
|
||||
return expand_market_codes(raw)
|
||||
|
||||
@@ -5,6 +5,7 @@ The context tree implements Pillar 2: hierarchical memory management across
|
||||
"""
|
||||
|
||||
from src.context.layer import ContextLayer
|
||||
from src.context.scheduler import ContextScheduler
|
||||
from src.context.store import ContextStore
|
||||
|
||||
__all__ = ["ContextLayer", "ContextStore"]
|
||||
__all__ = ["ContextLayer", "ContextScheduler", "ContextStore"]
|
||||
|
||||
@@ -18,52 +18,83 @@ class ContextAggregator:
|
||||
self.conn = conn
|
||||
self.store = ContextStore(conn)
|
||||
|
||||
def aggregate_daily_from_trades(self, date: str | None = None) -> None:
|
||||
def aggregate_daily_from_trades(
|
||||
self, date: str | None = None, market: str | None = None
|
||||
) -> None:
|
||||
"""Aggregate L6 (daily) context from trades table.
|
||||
|
||||
Args:
|
||||
date: Date in YYYY-MM-DD format. If None, uses today.
|
||||
market: Market code filter (e.g., "KR", "US"). If None, aggregates all markets.
|
||||
"""
|
||||
if date is None:
|
||||
date = datetime.now(UTC).date().isoformat()
|
||||
|
||||
# Calculate daily metrics from trades
|
||||
cursor = self.conn.execute(
|
||||
"""
|
||||
SELECT
|
||||
COUNT(*) as trade_count,
|
||||
SUM(CASE WHEN action = 'BUY' THEN 1 ELSE 0 END) as buys,
|
||||
SUM(CASE WHEN action = 'SELL' THEN 1 ELSE 0 END) as sells,
|
||||
SUM(CASE WHEN action = 'HOLD' THEN 1 ELSE 0 END) as holds,
|
||||
AVG(confidence) as avg_confidence,
|
||||
SUM(pnl) as total_pnl,
|
||||
COUNT(DISTINCT stock_code) as unique_stocks,
|
||||
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) as wins,
|
||||
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) as losses
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ?
|
||||
""",
|
||||
(date,),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row and row[0] > 0: # At least one trade
|
||||
trade_count, buys, sells, holds, avg_conf, total_pnl, stocks, wins, losses = row
|
||||
|
||||
# Store daily metrics in L6
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, "trade_count", trade_count)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, "buys", buys)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, "sells", sells)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, "holds", holds)
|
||||
self.store.set_context(
|
||||
ContextLayer.L6_DAILY, date, "avg_confidence", round(avg_conf, 2)
|
||||
if market is None:
|
||||
cursor = self.conn.execute(
|
||||
"""
|
||||
SELECT DISTINCT market
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ?
|
||||
""",
|
||||
(date,),
|
||||
)
|
||||
self.store.set_context(
|
||||
ContextLayer.L6_DAILY, date, "total_pnl", round(total_pnl, 2)
|
||||
markets = [row[0] for row in cursor.fetchall() if row[0]]
|
||||
else:
|
||||
markets = [market]
|
||||
|
||||
for market_code in markets:
|
||||
# Calculate daily metrics from trades for the market
|
||||
cursor = self.conn.execute(
|
||||
"""
|
||||
SELECT
|
||||
COUNT(*) as trade_count,
|
||||
SUM(CASE WHEN action = 'BUY' THEN 1 ELSE 0 END) as buys,
|
||||
SUM(CASE WHEN action = 'SELL' THEN 1 ELSE 0 END) as sells,
|
||||
SUM(CASE WHEN action = 'HOLD' THEN 1 ELSE 0 END) as holds,
|
||||
AVG(confidence) as avg_confidence,
|
||||
SUM(pnl) as total_pnl,
|
||||
COUNT(DISTINCT stock_code) as unique_stocks,
|
||||
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) as wins,
|
||||
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) as losses
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
""",
|
||||
(date, market_code),
|
||||
)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, "unique_stocks", stocks)
|
||||
win_rate = round(wins / max(wins + losses, 1) * 100, 2)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, "win_rate", win_rate)
|
||||
row = cursor.fetchone()
|
||||
|
||||
if row and row[0] > 0: # At least one trade
|
||||
trade_count, buys, sells, holds, avg_conf, total_pnl, stocks, wins, losses = row
|
||||
|
||||
key_suffix = f"_{market_code}"
|
||||
|
||||
# Store daily metrics in L6 with market suffix
|
||||
self.store.set_context(
|
||||
ContextLayer.L6_DAILY, date, f"trade_count{key_suffix}", trade_count
|
||||
)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, f"buys{key_suffix}", buys)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, f"sells{key_suffix}", sells)
|
||||
self.store.set_context(ContextLayer.L6_DAILY, date, f"holds{key_suffix}", holds)
|
||||
self.store.set_context(
|
||||
ContextLayer.L6_DAILY,
|
||||
date,
|
||||
f"avg_confidence{key_suffix}",
|
||||
round(avg_conf, 2),
|
||||
)
|
||||
self.store.set_context(
|
||||
ContextLayer.L6_DAILY,
|
||||
date,
|
||||
f"total_pnl{key_suffix}",
|
||||
round(total_pnl, 2),
|
||||
)
|
||||
self.store.set_context(
|
||||
ContextLayer.L6_DAILY, date, f"unique_stocks{key_suffix}", stocks
|
||||
)
|
||||
win_rate = round(wins / max(wins + losses, 1) * 100, 2)
|
||||
self.store.set_context(
|
||||
ContextLayer.L6_DAILY, date, f"win_rate{key_suffix}", win_rate
|
||||
)
|
||||
|
||||
def aggregate_weekly_from_daily(self, week: str | None = None) -> None:
|
||||
"""Aggregate L5 (weekly) context from L6 (daily).
|
||||
@@ -92,14 +123,25 @@ class ContextAggregator:
|
||||
daily_data[row[0]].append(json.loads(row[1]))
|
||||
|
||||
if daily_data:
|
||||
# Sum all PnL values
|
||||
# Sum all PnL values (market-specific if suffixed)
|
||||
if "total_pnl" in daily_data:
|
||||
total_pnl = sum(daily_data["total_pnl"])
|
||||
self.store.set_context(
|
||||
ContextLayer.L5_WEEKLY, week, "weekly_pnl", round(total_pnl, 2)
|
||||
)
|
||||
|
||||
# Average all confidence values
|
||||
for key, values in daily_data.items():
|
||||
if key.startswith("total_pnl_"):
|
||||
market_code = key.split("total_pnl_", 1)[1]
|
||||
total_pnl = sum(values)
|
||||
self.store.set_context(
|
||||
ContextLayer.L5_WEEKLY,
|
||||
week,
|
||||
f"weekly_pnl_{market_code}",
|
||||
round(total_pnl, 2),
|
||||
)
|
||||
|
||||
# Average all confidence values (market-specific if suffixed)
|
||||
if "avg_confidence" in daily_data:
|
||||
conf_values = daily_data["avg_confidence"]
|
||||
avg_conf = sum(conf_values) / len(conf_values)
|
||||
@@ -107,6 +149,17 @@ class ContextAggregator:
|
||||
ContextLayer.L5_WEEKLY, week, "avg_confidence", round(avg_conf, 2)
|
||||
)
|
||||
|
||||
for key, values in daily_data.items():
|
||||
if key.startswith("avg_confidence_"):
|
||||
market_code = key.split("avg_confidence_", 1)[1]
|
||||
avg_conf = sum(values) / len(values)
|
||||
self.store.set_context(
|
||||
ContextLayer.L5_WEEKLY,
|
||||
week,
|
||||
f"avg_confidence_{market_code}",
|
||||
round(avg_conf, 2),
|
||||
)
|
||||
|
||||
def aggregate_monthly_from_weekly(self, month: str | None = None) -> None:
|
||||
"""Aggregate L4 (monthly) context from L5 (weekly).
|
||||
|
||||
@@ -135,8 +188,16 @@ class ContextAggregator:
|
||||
|
||||
if weekly_data:
|
||||
# Sum all weekly PnL values
|
||||
total_pnl_values: list[float] = []
|
||||
if "weekly_pnl" in weekly_data:
|
||||
total_pnl = sum(weekly_data["weekly_pnl"])
|
||||
total_pnl_values.extend(weekly_data["weekly_pnl"])
|
||||
|
||||
for key, values in weekly_data.items():
|
||||
if key.startswith("weekly_pnl_"):
|
||||
total_pnl_values.extend(values)
|
||||
|
||||
if total_pnl_values:
|
||||
total_pnl = sum(total_pnl_values)
|
||||
self.store.set_context(
|
||||
ContextLayer.L4_MONTHLY, month, "monthly_pnl", round(total_pnl, 2)
|
||||
)
|
||||
@@ -230,21 +291,44 @@ class ContextAggregator:
|
||||
)
|
||||
|
||||
def run_all_aggregations(self) -> None:
|
||||
"""Run all aggregations from L7 to L1 (bottom-up)."""
|
||||
"""Run all aggregations from L7 to L1 (bottom-up).
|
||||
|
||||
All timeframes are derived from the latest trade timestamp so that
|
||||
past data re-aggregation produces consistent results across layers.
|
||||
"""
|
||||
cursor = self.conn.execute("SELECT MAX(timestamp) FROM trades")
|
||||
row = cursor.fetchone()
|
||||
if not row or row[0] is None:
|
||||
return
|
||||
|
||||
ts_raw = row[0]
|
||||
if ts_raw.endswith("Z"):
|
||||
ts_raw = ts_raw.replace("Z", "+00:00")
|
||||
latest_ts = datetime.fromisoformat(ts_raw)
|
||||
trade_date = latest_ts.date()
|
||||
date_str = trade_date.isoformat()
|
||||
|
||||
iso_year, iso_week, _ = trade_date.isocalendar()
|
||||
week_str = f"{iso_year}-W{iso_week:02d}"
|
||||
month_str = f"{trade_date.year}-{trade_date.month:02d}"
|
||||
quarter = (trade_date.month - 1) // 3 + 1
|
||||
quarter_str = f"{trade_date.year}-Q{quarter}"
|
||||
year_str = str(trade_date.year)
|
||||
|
||||
# L7 (trades) → L6 (daily)
|
||||
self.aggregate_daily_from_trades()
|
||||
self.aggregate_daily_from_trades(date_str)
|
||||
|
||||
# L6 (daily) → L5 (weekly)
|
||||
self.aggregate_weekly_from_daily()
|
||||
self.aggregate_weekly_from_daily(week_str)
|
||||
|
||||
# L5 (weekly) → L4 (monthly)
|
||||
self.aggregate_monthly_from_weekly()
|
||||
self.aggregate_monthly_from_weekly(month_str)
|
||||
|
||||
# L4 (monthly) → L3 (quarterly)
|
||||
self.aggregate_quarterly_from_monthly()
|
||||
self.aggregate_quarterly_from_monthly(quarter_str)
|
||||
|
||||
# L3 (quarterly) → L2 (annual)
|
||||
self.aggregate_annual_from_quarterly()
|
||||
self.aggregate_annual_from_quarterly(year_str)
|
||||
|
||||
# L2 (annual) → L1 (legacy)
|
||||
self.aggregate_legacy_from_annual()
|
||||
|
||||
135
src/context/scheduler.py
Normal file
135
src/context/scheduler.py
Normal file
@@ -0,0 +1,135 @@
|
||||
"""Context aggregation scheduler for periodic rollups and cleanup."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlite3
|
||||
from calendar import monthrange
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from src.context.aggregator import ContextAggregator
|
||||
from src.context.store import ContextStore
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ScheduleResult:
|
||||
"""Represents which scheduled tasks ran."""
|
||||
|
||||
weekly: bool = False
|
||||
monthly: bool = False
|
||||
quarterly: bool = False
|
||||
annual: bool = False
|
||||
legacy: bool = False
|
||||
cleanup: bool = False
|
||||
|
||||
|
||||
class ContextScheduler:
|
||||
"""Run periodic context aggregations and cleanup when due."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
conn: sqlite3.Connection | None = None,
|
||||
aggregator: ContextAggregator | None = None,
|
||||
store: ContextStore | None = None,
|
||||
) -> None:
|
||||
if aggregator is None:
|
||||
if conn is None:
|
||||
raise ValueError("conn is required when aggregator is not provided")
|
||||
aggregator = ContextAggregator(conn)
|
||||
self.aggregator = aggregator
|
||||
|
||||
if store is None:
|
||||
store = getattr(aggregator, "store", None)
|
||||
if store is None:
|
||||
if conn is None:
|
||||
raise ValueError("conn is required when store is not provided")
|
||||
store = ContextStore(conn)
|
||||
self.store = store
|
||||
|
||||
self._last_run: dict[str, str] = {}
|
||||
|
||||
def run_if_due(self, now: datetime | None = None) -> ScheduleResult:
|
||||
"""Run scheduled aggregations if their schedule is due.
|
||||
|
||||
Args:
|
||||
now: Current datetime (UTC). If None, uses current time.
|
||||
|
||||
Returns:
|
||||
ScheduleResult indicating which tasks ran.
|
||||
"""
|
||||
if now is None:
|
||||
now = datetime.now(UTC)
|
||||
|
||||
today = now.date().isoformat()
|
||||
result = ScheduleResult()
|
||||
|
||||
if self._should_run("cleanup", today):
|
||||
self.store.cleanup_expired_contexts()
|
||||
result = self._with(result, cleanup=True)
|
||||
|
||||
if self._is_sunday(now) and self._should_run("weekly", today):
|
||||
week = now.strftime("%Y-W%V")
|
||||
self.aggregator.aggregate_weekly_from_daily(week)
|
||||
result = self._with(result, weekly=True)
|
||||
|
||||
if self._is_last_day_of_month(now) and self._should_run("monthly", today):
|
||||
month = now.strftime("%Y-%m")
|
||||
self.aggregator.aggregate_monthly_from_weekly(month)
|
||||
result = self._with(result, monthly=True)
|
||||
|
||||
if self._is_last_day_of_quarter(now) and self._should_run("quarterly", today):
|
||||
quarter = self._current_quarter(now)
|
||||
self.aggregator.aggregate_quarterly_from_monthly(quarter)
|
||||
result = self._with(result, quarterly=True)
|
||||
|
||||
if self._is_last_day_of_year(now) and self._should_run("annual", today):
|
||||
year = str(now.year)
|
||||
self.aggregator.aggregate_annual_from_quarterly(year)
|
||||
result = self._with(result, annual=True)
|
||||
|
||||
# Legacy rollup runs after annual aggregation.
|
||||
self.aggregator.aggregate_legacy_from_annual()
|
||||
result = self._with(result, legacy=True)
|
||||
|
||||
return result
|
||||
|
||||
def _should_run(self, key: str, date_str: str) -> bool:
|
||||
if self._last_run.get(key) == date_str:
|
||||
return False
|
||||
self._last_run[key] = date_str
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _is_sunday(now: datetime) -> bool:
|
||||
return now.weekday() == 6
|
||||
|
||||
@staticmethod
|
||||
def _is_last_day_of_month(now: datetime) -> bool:
|
||||
last_day = monthrange(now.year, now.month)[1]
|
||||
return now.day == last_day
|
||||
|
||||
@classmethod
|
||||
def _is_last_day_of_quarter(cls, now: datetime) -> bool:
|
||||
if now.month not in (3, 6, 9, 12):
|
||||
return False
|
||||
return cls._is_last_day_of_month(now)
|
||||
|
||||
@staticmethod
|
||||
def _is_last_day_of_year(now: datetime) -> bool:
|
||||
return now.month == 12 and now.day == 31
|
||||
|
||||
@staticmethod
|
||||
def _current_quarter(now: datetime) -> str:
|
||||
quarter = (now.month - 1) // 3 + 1
|
||||
return f"{now.year}-Q{quarter}"
|
||||
|
||||
@staticmethod
|
||||
def _with(result: ScheduleResult, **kwargs: bool) -> ScheduleResult:
|
||||
return ScheduleResult(
|
||||
weekly=kwargs.get("weekly", result.weekly),
|
||||
monthly=kwargs.get("monthly", result.monthly),
|
||||
quarterly=kwargs.get("quarterly", result.quarterly),
|
||||
annual=kwargs.get("annual", result.annual),
|
||||
legacy=kwargs.get("legacy", result.legacy),
|
||||
cleanup=kwargs.get("cleanup", result.cleanup),
|
||||
)
|
||||
5
src/dashboard/__init__.py
Normal file
5
src/dashboard/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""FastAPI dashboard package for observability APIs."""
|
||||
|
||||
from src.dashboard.app import create_dashboard_app
|
||||
|
||||
__all__ = ["create_dashboard_app"]
|
||||
361
src/dashboard/app.py
Normal file
361
src/dashboard/app.py
Normal file
@@ -0,0 +1,361 @@
|
||||
"""FastAPI application for observability dashboard endpoints."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Query
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
|
||||
def create_dashboard_app(db_path: str) -> FastAPI:
|
||||
"""Create dashboard FastAPI app bound to a SQLite database path."""
|
||||
app = FastAPI(title="The Ouroboros Dashboard", version="1.0.0")
|
||||
app.state.db_path = db_path
|
||||
|
||||
@app.get("/")
|
||||
def index() -> FileResponse:
|
||||
index_path = Path(__file__).parent / "static" / "index.html"
|
||||
return FileResponse(index_path)
|
||||
|
||||
@app.get("/api/status")
|
||||
def get_status() -> dict[str, Any]:
|
||||
today = datetime.now(UTC).date().isoformat()
|
||||
with _connect(db_path) as conn:
|
||||
market_rows = conn.execute(
|
||||
"""
|
||||
SELECT DISTINCT market FROM (
|
||||
SELECT market FROM trades WHERE DATE(timestamp) = ?
|
||||
UNION
|
||||
SELECT market FROM decision_logs WHERE DATE(timestamp) = ?
|
||||
UNION
|
||||
SELECT market FROM playbooks WHERE date = ?
|
||||
) ORDER BY market
|
||||
""",
|
||||
(today, today, today),
|
||||
).fetchall()
|
||||
markets = [row[0] for row in market_rows] if market_rows else []
|
||||
market_status: dict[str, Any] = {}
|
||||
total_trades = 0
|
||||
total_pnl = 0.0
|
||||
total_decisions = 0
|
||||
for market in markets:
|
||||
trade_row = conn.execute(
|
||||
"""
|
||||
SELECT COUNT(*) AS c, COALESCE(SUM(pnl), 0.0) AS p
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
""",
|
||||
(today, market),
|
||||
).fetchone()
|
||||
decision_row = conn.execute(
|
||||
"""
|
||||
SELECT COUNT(*) AS c
|
||||
FROM decision_logs
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
""",
|
||||
(today, market),
|
||||
).fetchone()
|
||||
playbook_row = conn.execute(
|
||||
"""
|
||||
SELECT status
|
||||
FROM playbooks
|
||||
WHERE date = ? AND market = ?
|
||||
LIMIT 1
|
||||
""",
|
||||
(today, market),
|
||||
).fetchone()
|
||||
market_status[market] = {
|
||||
"trade_count": int(trade_row["c"] if trade_row else 0),
|
||||
"total_pnl": float(trade_row["p"] if trade_row else 0.0),
|
||||
"decision_count": int(decision_row["c"] if decision_row else 0),
|
||||
"playbook_status": playbook_row["status"] if playbook_row else None,
|
||||
}
|
||||
total_trades += market_status[market]["trade_count"]
|
||||
total_pnl += market_status[market]["total_pnl"]
|
||||
total_decisions += market_status[market]["decision_count"]
|
||||
|
||||
return {
|
||||
"date": today,
|
||||
"markets": market_status,
|
||||
"totals": {
|
||||
"trade_count": total_trades,
|
||||
"total_pnl": round(total_pnl, 2),
|
||||
"decision_count": total_decisions,
|
||||
},
|
||||
}
|
||||
|
||||
@app.get("/api/playbook/{date_str}")
|
||||
def get_playbook(date_str: str, market: str = Query("KR")) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT date, market, status, playbook_json, generated_at,
|
||||
token_count, scenario_count, match_count
|
||||
FROM playbooks
|
||||
WHERE date = ? AND market = ?
|
||||
""",
|
||||
(date_str, market),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
raise HTTPException(status_code=404, detail="playbook not found")
|
||||
return {
|
||||
"date": row["date"],
|
||||
"market": row["market"],
|
||||
"status": row["status"],
|
||||
"playbook": json.loads(row["playbook_json"]),
|
||||
"generated_at": row["generated_at"],
|
||||
"token_count": row["token_count"],
|
||||
"scenario_count": row["scenario_count"],
|
||||
"match_count": row["match_count"],
|
||||
}
|
||||
|
||||
@app.get("/api/scorecard/{date_str}")
|
||||
def get_scorecard(date_str: str, market: str = Query("KR")) -> dict[str, Any]:
|
||||
key = f"scorecard_{market}"
|
||||
with _connect(db_path) as conn:
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT value
|
||||
FROM contexts
|
||||
WHERE layer = 'L6_DAILY' AND timeframe = ? AND key = ?
|
||||
""",
|
||||
(date_str, key),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
raise HTTPException(status_code=404, detail="scorecard not found")
|
||||
return {"date": date_str, "market": market, "scorecard": json.loads(row["value"])}
|
||||
|
||||
@app.get("/api/performance")
|
||||
def get_performance(market: str = Query("all")) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
if market == "all":
|
||||
by_market_rows = conn.execute(
|
||||
"""
|
||||
SELECT market,
|
||||
COUNT(*) AS total_trades,
|
||||
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) AS wins,
|
||||
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) AS losses,
|
||||
COALESCE(SUM(pnl), 0.0) AS total_pnl,
|
||||
COALESCE(AVG(confidence), 0.0) AS avg_confidence
|
||||
FROM trades
|
||||
GROUP BY market
|
||||
ORDER BY market
|
||||
"""
|
||||
).fetchall()
|
||||
combined = _performance_from_rows(by_market_rows)
|
||||
return {
|
||||
"market": "all",
|
||||
"combined": combined,
|
||||
"by_market": [
|
||||
_row_to_performance(row)
|
||||
for row in by_market_rows
|
||||
],
|
||||
}
|
||||
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT market,
|
||||
COUNT(*) AS total_trades,
|
||||
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) AS wins,
|
||||
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) AS losses,
|
||||
COALESCE(SUM(pnl), 0.0) AS total_pnl,
|
||||
COALESCE(AVG(confidence), 0.0) AS avg_confidence
|
||||
FROM trades
|
||||
WHERE market = ?
|
||||
GROUP BY market
|
||||
""",
|
||||
(market,),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
return {"market": market, "metrics": _empty_performance(market)}
|
||||
return {"market": market, "metrics": _row_to_performance(row)}
|
||||
|
||||
@app.get("/api/context/{layer}")
|
||||
def get_context_layer(
|
||||
layer: str,
|
||||
timeframe: str | None = Query(default=None),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
if timeframe is None:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT timeframe, key, value, updated_at
|
||||
FROM contexts
|
||||
WHERE layer = ?
|
||||
ORDER BY updated_at DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(layer, limit),
|
||||
).fetchall()
|
||||
else:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT timeframe, key, value, updated_at
|
||||
FROM contexts
|
||||
WHERE layer = ? AND timeframe = ?
|
||||
ORDER BY key
|
||||
LIMIT ?
|
||||
""",
|
||||
(layer, timeframe, limit),
|
||||
).fetchall()
|
||||
|
||||
entries = [
|
||||
{
|
||||
"timeframe": row["timeframe"],
|
||||
"key": row["key"],
|
||||
"value": json.loads(row["value"]),
|
||||
"updated_at": row["updated_at"],
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
return {
|
||||
"layer": layer,
|
||||
"timeframe": timeframe,
|
||||
"count": len(entries),
|
||||
"entries": entries,
|
||||
}
|
||||
|
||||
@app.get("/api/decisions")
|
||||
def get_decisions(
|
||||
market: str = Query("KR"),
|
||||
limit: int = Query(default=50, ge=1, le=500),
|
||||
) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data,
|
||||
outcome_pnl, outcome_accuracy
|
||||
FROM decision_logs
|
||||
WHERE market = ?
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(market, limit),
|
||||
).fetchall()
|
||||
decisions = []
|
||||
for row in rows:
|
||||
decisions.append(
|
||||
{
|
||||
"decision_id": row["decision_id"],
|
||||
"timestamp": row["timestamp"],
|
||||
"stock_code": row["stock_code"],
|
||||
"market": row["market"],
|
||||
"exchange_code": row["exchange_code"],
|
||||
"action": row["action"],
|
||||
"confidence": row["confidence"],
|
||||
"rationale": row["rationale"],
|
||||
"context_snapshot": json.loads(row["context_snapshot"]),
|
||||
"input_data": json.loads(row["input_data"]),
|
||||
"outcome_pnl": row["outcome_pnl"],
|
||||
"outcome_accuracy": row["outcome_accuracy"],
|
||||
}
|
||||
)
|
||||
return {"market": market, "count": len(decisions), "decisions": decisions}
|
||||
|
||||
@app.get("/api/scenarios/active")
|
||||
def get_active_scenarios(
|
||||
market: str = Query("US"),
|
||||
date_str: str | None = Query(default=None),
|
||||
limit: int = Query(default=50, ge=1, le=500),
|
||||
) -> dict[str, Any]:
|
||||
if date_str is None:
|
||||
date_str = datetime.now(UTC).date().isoformat()
|
||||
|
||||
with _connect(db_path) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT timestamp, stock_code, action, confidence, rationale, context_snapshot
|
||||
FROM decision_logs
|
||||
WHERE market = ? AND DATE(timestamp) = ?
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(market, date_str, limit),
|
||||
).fetchall()
|
||||
matches: list[dict[str, Any]] = []
|
||||
for row in rows:
|
||||
snapshot = json.loads(row["context_snapshot"])
|
||||
scenario_match = snapshot.get("scenario_match", {})
|
||||
if not isinstance(scenario_match, dict) or not scenario_match:
|
||||
continue
|
||||
matches.append(
|
||||
{
|
||||
"timestamp": row["timestamp"],
|
||||
"stock_code": row["stock_code"],
|
||||
"action": row["action"],
|
||||
"confidence": row["confidence"],
|
||||
"rationale": row["rationale"],
|
||||
"scenario_match": scenario_match,
|
||||
}
|
||||
)
|
||||
return {"market": market, "date": date_str, "count": len(matches), "matches": matches}
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def _connect(db_path: str) -> sqlite3.Connection:
|
||||
conn = sqlite3.connect(db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
return conn
|
||||
|
||||
|
||||
def _row_to_performance(row: sqlite3.Row) -> dict[str, Any]:
|
||||
wins = int(row["wins"] or 0)
|
||||
losses = int(row["losses"] or 0)
|
||||
total = int(row["total_trades"] or 0)
|
||||
win_rate = round((wins / (wins + losses) * 100), 2) if (wins + losses) > 0 else 0.0
|
||||
return {
|
||||
"market": row["market"],
|
||||
"total_trades": total,
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"win_rate": win_rate,
|
||||
"total_pnl": round(float(row["total_pnl"] or 0.0), 2),
|
||||
"avg_confidence": round(float(row["avg_confidence"] or 0.0), 2),
|
||||
}
|
||||
|
||||
|
||||
def _performance_from_rows(rows: list[sqlite3.Row]) -> dict[str, Any]:
|
||||
total_trades = 0
|
||||
wins = 0
|
||||
losses = 0
|
||||
total_pnl = 0.0
|
||||
confidence_weighted = 0.0
|
||||
for row in rows:
|
||||
market_total = int(row["total_trades"] or 0)
|
||||
market_conf = float(row["avg_confidence"] or 0.0)
|
||||
total_trades += market_total
|
||||
wins += int(row["wins"] or 0)
|
||||
losses += int(row["losses"] or 0)
|
||||
total_pnl += float(row["total_pnl"] or 0.0)
|
||||
confidence_weighted += market_total * market_conf
|
||||
win_rate = round((wins / (wins + losses) * 100), 2) if (wins + losses) > 0 else 0.0
|
||||
avg_confidence = round(confidence_weighted / total_trades, 2) if total_trades > 0 else 0.0
|
||||
return {
|
||||
"market": "all",
|
||||
"total_trades": total_trades,
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"win_rate": win_rate,
|
||||
"total_pnl": round(total_pnl, 2),
|
||||
"avg_confidence": avg_confidence,
|
||||
}
|
||||
|
||||
|
||||
def _empty_performance(market: str) -> dict[str, Any]:
|
||||
return {
|
||||
"market": market,
|
||||
"total_trades": 0,
|
||||
"wins": 0,
|
||||
"losses": 0,
|
||||
"win_rate": 0.0,
|
||||
"total_pnl": 0.0,
|
||||
"avg_confidence": 0.0,
|
||||
}
|
||||
61
src/dashboard/static/index.html
Normal file
61
src/dashboard/static/index.html
Normal file
@@ -0,0 +1,61 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>The Ouroboros Dashboard</title>
|
||||
<style>
|
||||
:root {
|
||||
--bg: #0b1724;
|
||||
--panel: #12263a;
|
||||
--fg: #e6eef7;
|
||||
--muted: #9fb3c8;
|
||||
--accent: #3cb371;
|
||||
}
|
||||
body {
|
||||
margin: 0;
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, monospace;
|
||||
background: radial-gradient(circle at top left, #173b58, var(--bg));
|
||||
color: var(--fg);
|
||||
}
|
||||
.wrap {
|
||||
max-width: 900px;
|
||||
margin: 48px auto;
|
||||
padding: 0 16px;
|
||||
}
|
||||
.card {
|
||||
background: color-mix(in oklab, var(--panel), black 12%);
|
||||
border: 1px solid #28455f;
|
||||
border-radius: 12px;
|
||||
padding: 20px;
|
||||
}
|
||||
h1 {
|
||||
margin-top: 0;
|
||||
}
|
||||
code {
|
||||
color: var(--accent);
|
||||
}
|
||||
li {
|
||||
margin: 6px 0;
|
||||
color: var(--muted);
|
||||
}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="wrap">
|
||||
<div class="card">
|
||||
<h1>The Ouroboros Dashboard API</h1>
|
||||
<p>Use the following endpoints:</p>
|
||||
<ul>
|
||||
<li><code>/api/status</code></li>
|
||||
<li><code>/api/playbook/{date}?market=KR</code></li>
|
||||
<li><code>/api/scorecard/{date}?market=KR</code></li>
|
||||
<li><code>/api/performance?market=all</code></li>
|
||||
<li><code>/api/context/{layer}</code></li>
|
||||
<li><code>/api/decisions?market=KR</code></li>
|
||||
<li><code>/api/scenarios/active?market=US</code></li>
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
||||
119
src/db.py
119
src/db.py
@@ -2,9 +2,11 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
|
||||
def init_db(db_path: str) -> sqlite3.Connection:
|
||||
@@ -25,7 +27,8 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
price REAL,
|
||||
pnl REAL DEFAULT 0.0,
|
||||
market TEXT DEFAULT 'KR',
|
||||
exchange_code TEXT DEFAULT 'KRX'
|
||||
exchange_code TEXT DEFAULT 'KRX',
|
||||
decision_id TEXT
|
||||
)
|
||||
"""
|
||||
)
|
||||
@@ -38,6 +41,10 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN market TEXT DEFAULT 'KR'")
|
||||
if "exchange_code" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN exchange_code TEXT DEFAULT 'KRX'")
|
||||
if "selection_context" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN selection_context TEXT")
|
||||
if "decision_id" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN decision_id TEXT")
|
||||
|
||||
# Context tree tables for multi-layered memory management
|
||||
conn.execute(
|
||||
@@ -88,6 +95,27 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
"""
|
||||
)
|
||||
|
||||
# Playbook storage for pre-market strategy persistence
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS playbooks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
date TEXT NOT NULL,
|
||||
market TEXT NOT NULL,
|
||||
status TEXT NOT NULL DEFAULT 'pending',
|
||||
playbook_json TEXT NOT NULL,
|
||||
generated_at TEXT NOT NULL,
|
||||
token_count INTEGER DEFAULT 0,
|
||||
scenario_count INTEGER DEFAULT 0,
|
||||
match_count INTEGER DEFAULT 0,
|
||||
UNIQUE(date, market)
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_playbooks_date ON playbooks(date)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_playbooks_market ON playbooks(market)")
|
||||
|
||||
# Create indices for efficient context queries
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_contexts_layer ON contexts(layer)")
|
||||
conn.execute("CREATE INDEX IF NOT EXISTS idx_contexts_timeframe ON contexts(timeframe)")
|
||||
@@ -118,15 +146,34 @@ def log_trade(
|
||||
pnl: float = 0.0,
|
||||
market: str = "KR",
|
||||
exchange_code: str = "KRX",
|
||||
selection_context: dict[str, any] | None = None,
|
||||
decision_id: str | None = None,
|
||||
) -> None:
|
||||
"""Insert a trade record into the database."""
|
||||
"""Insert a trade record into the database.
|
||||
|
||||
Args:
|
||||
conn: Database connection
|
||||
stock_code: Stock code
|
||||
action: Trade action (BUY/SELL/HOLD)
|
||||
confidence: Confidence level (0-100)
|
||||
rationale: AI decision rationale
|
||||
quantity: Number of shares
|
||||
price: Trade price
|
||||
pnl: Profit/loss
|
||||
market: Market code
|
||||
exchange_code: Exchange code
|
||||
selection_context: Scanner selection data (RSI, volume_ratio, signal, score)
|
||||
"""
|
||||
# Serialize selection context to JSON
|
||||
context_json = json.dumps(selection_context) if selection_context else None
|
||||
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO trades (
|
||||
timestamp, stock_code, action, confidence, rationale,
|
||||
quantity, price, pnl, market, exchange_code
|
||||
quantity, price, pnl, market, exchange_code, selection_context, decision_id
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
datetime.now(UTC).isoformat(),
|
||||
@@ -139,6 +186,70 @@ def log_trade(
|
||||
pnl,
|
||||
market,
|
||||
exchange_code,
|
||||
context_json,
|
||||
decision_id,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def get_latest_buy_trade(
|
||||
conn: sqlite3.Connection, stock_code: str, market: str
|
||||
) -> dict[str, Any] | None:
|
||||
"""Fetch the most recent BUY trade for a stock and market."""
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT decision_id, price, quantity
|
||||
FROM trades
|
||||
WHERE stock_code = ?
|
||||
AND market = ?
|
||||
AND action = 'BUY'
|
||||
AND decision_id IS NOT NULL
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(stock_code, market),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if not row:
|
||||
return None
|
||||
return {"decision_id": row[0], "price": row[1], "quantity": row[2]}
|
||||
|
||||
|
||||
def get_open_position(
|
||||
conn: sqlite3.Connection, stock_code: str, market: str
|
||||
) -> dict[str, Any] | None:
|
||||
"""Return open position if latest trade is BUY, else None."""
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT action, decision_id, price, quantity
|
||||
FROM trades
|
||||
WHERE stock_code = ?
|
||||
AND market = ?
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(stock_code, market),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if not row or row[0] != "BUY":
|
||||
return None
|
||||
return {"decision_id": row[1], "price": row[2], "quantity": row[3]}
|
||||
|
||||
|
||||
def get_recent_symbols(
|
||||
conn: sqlite3.Connection, market: str, limit: int = 30
|
||||
) -> list[str]:
|
||||
"""Return recent unique symbols for a market, newest first."""
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT stock_code, MAX(timestamp) AS last_ts
|
||||
FROM trades
|
||||
WHERE market = ?
|
||||
GROUP BY stock_code
|
||||
ORDER BY last_ts DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(market, limit),
|
||||
)
|
||||
return [row[0] for row in cursor.fetchall() if row and row[0]]
|
||||
|
||||
@@ -1,12 +1,14 @@
|
||||
"""Evolution engine for self-improving trading strategies."""
|
||||
|
||||
from src.evolution.ab_test import ABTester, ABTestResult, StrategyPerformance
|
||||
from src.evolution.daily_review import DailyReviewer
|
||||
from src.evolution.optimizer import EvolutionOptimizer
|
||||
from src.evolution.performance_tracker import (
|
||||
PerformanceDashboard,
|
||||
PerformanceTracker,
|
||||
StrategyMetrics,
|
||||
)
|
||||
from src.evolution.scorecard import DailyScorecard
|
||||
|
||||
__all__ = [
|
||||
"EvolutionOptimizer",
|
||||
@@ -16,4 +18,6 @@ __all__ = [
|
||||
"PerformanceTracker",
|
||||
"PerformanceDashboard",
|
||||
"StrategyMetrics",
|
||||
"DailyScorecard",
|
||||
"DailyReviewer",
|
||||
]
|
||||
|
||||
196
src/evolution/daily_review.py
Normal file
196
src/evolution/daily_review.py
Normal file
@@ -0,0 +1,196 @@
|
||||
"""Daily review generator for market-scoped end-of-day scorecards."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import re
|
||||
import sqlite3
|
||||
from dataclasses import asdict
|
||||
|
||||
from src.brain.gemini_client import GeminiClient
|
||||
from src.context.layer import ContextLayer
|
||||
from src.context.store import ContextStore
|
||||
from src.evolution.scorecard import DailyScorecard
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DailyReviewer:
|
||||
"""Builds daily scorecards and optional AI-generated lessons."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
conn: sqlite3.Connection,
|
||||
context_store: ContextStore,
|
||||
gemini_client: GeminiClient | None = None,
|
||||
) -> None:
|
||||
self._conn = conn
|
||||
self._context_store = context_store
|
||||
self._gemini = gemini_client
|
||||
|
||||
def generate_scorecard(self, date: str, market: str) -> DailyScorecard:
|
||||
"""Generate a market-scoped scorecard from decision logs and trades."""
|
||||
decision_rows = self._conn.execute(
|
||||
"""
|
||||
SELECT action, confidence, context_snapshot
|
||||
FROM decision_logs
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
""",
|
||||
(date, market),
|
||||
).fetchall()
|
||||
|
||||
total_decisions = len(decision_rows)
|
||||
buys = sum(1 for row in decision_rows if row[0] == "BUY")
|
||||
sells = sum(1 for row in decision_rows if row[0] == "SELL")
|
||||
holds = sum(1 for row in decision_rows if row[0] == "HOLD")
|
||||
avg_confidence = (
|
||||
round(sum(int(row[1]) for row in decision_rows) / total_decisions, 2)
|
||||
if total_decisions > 0
|
||||
else 0.0
|
||||
)
|
||||
|
||||
matched = 0
|
||||
for row in decision_rows:
|
||||
try:
|
||||
snapshot = json.loads(row[2]) if row[2] else {}
|
||||
except json.JSONDecodeError:
|
||||
snapshot = {}
|
||||
scenario_match = snapshot.get("scenario_match", {})
|
||||
if isinstance(scenario_match, dict) and scenario_match:
|
||||
matched += 1
|
||||
scenario_match_rate = (
|
||||
round((matched / total_decisions) * 100, 2)
|
||||
if total_decisions
|
||||
else 0.0
|
||||
)
|
||||
|
||||
trade_stats = self._conn.execute(
|
||||
"""
|
||||
SELECT
|
||||
COALESCE(SUM(pnl), 0.0),
|
||||
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END),
|
||||
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END)
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
""",
|
||||
(date, market),
|
||||
).fetchone()
|
||||
total_pnl = round(float(trade_stats[0] or 0.0), 2) if trade_stats else 0.0
|
||||
wins = int(trade_stats[1] or 0) if trade_stats else 0
|
||||
losses = int(trade_stats[2] or 0) if trade_stats else 0
|
||||
win_rate = round((wins / (wins + losses)) * 100, 2) if (wins + losses) > 0 else 0.0
|
||||
|
||||
top_winners = [
|
||||
row[0]
|
||||
for row in self._conn.execute(
|
||||
"""
|
||||
SELECT stock_code, SUM(pnl) AS stock_pnl
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
GROUP BY stock_code
|
||||
HAVING stock_pnl > 0
|
||||
ORDER BY stock_pnl DESC
|
||||
LIMIT 3
|
||||
""",
|
||||
(date, market),
|
||||
).fetchall()
|
||||
]
|
||||
|
||||
top_losers = [
|
||||
row[0]
|
||||
for row in self._conn.execute(
|
||||
"""
|
||||
SELECT stock_code, SUM(pnl) AS stock_pnl
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
GROUP BY stock_code
|
||||
HAVING stock_pnl < 0
|
||||
ORDER BY stock_pnl ASC
|
||||
LIMIT 3
|
||||
""",
|
||||
(date, market),
|
||||
).fetchall()
|
||||
]
|
||||
|
||||
return DailyScorecard(
|
||||
date=date,
|
||||
market=market,
|
||||
total_decisions=total_decisions,
|
||||
buys=buys,
|
||||
sells=sells,
|
||||
holds=holds,
|
||||
total_pnl=total_pnl,
|
||||
win_rate=win_rate,
|
||||
avg_confidence=avg_confidence,
|
||||
scenario_match_rate=scenario_match_rate,
|
||||
top_winners=top_winners,
|
||||
top_losers=top_losers,
|
||||
lessons=[],
|
||||
cross_market_note="",
|
||||
)
|
||||
|
||||
async def generate_lessons(self, scorecard: DailyScorecard) -> list[str]:
|
||||
"""Generate concise lessons from scorecard metrics using Gemini."""
|
||||
if self._gemini is None:
|
||||
return []
|
||||
|
||||
prompt = (
|
||||
"You are a trading performance reviewer.\n"
|
||||
"Return ONLY a JSON array of 1-3 short lessons in English.\n"
|
||||
f"Market: {scorecard.market}\n"
|
||||
f"Date: {scorecard.date}\n"
|
||||
f"Total decisions: {scorecard.total_decisions}\n"
|
||||
f"Buys/Sells/Holds: {scorecard.buys}/{scorecard.sells}/{scorecard.holds}\n"
|
||||
f"Total PnL: {scorecard.total_pnl}\n"
|
||||
f"Win rate: {scorecard.win_rate}%\n"
|
||||
f"Average confidence: {scorecard.avg_confidence}\n"
|
||||
f"Scenario match rate: {scorecard.scenario_match_rate}%\n"
|
||||
f"Top winners: {', '.join(scorecard.top_winners) or 'N/A'}\n"
|
||||
f"Top losers: {', '.join(scorecard.top_losers) or 'N/A'}\n"
|
||||
)
|
||||
|
||||
try:
|
||||
decision = await self._gemini.decide(
|
||||
{
|
||||
"stock_code": "REVIEW",
|
||||
"market_name": scorecard.market,
|
||||
"current_price": 0,
|
||||
"prompt_override": prompt,
|
||||
}
|
||||
)
|
||||
return self._parse_lessons(decision.rationale)
|
||||
except Exception as exc:
|
||||
logger.warning("Failed to generate daily lessons: %s", exc)
|
||||
return []
|
||||
|
||||
def store_scorecard_in_context(self, scorecard: DailyScorecard) -> None:
|
||||
"""Store scorecard in L6 using market-scoped key."""
|
||||
self._context_store.set_context(
|
||||
ContextLayer.L6_DAILY,
|
||||
scorecard.date,
|
||||
f"scorecard_{scorecard.market}",
|
||||
asdict(scorecard),
|
||||
)
|
||||
|
||||
def _parse_lessons(self, raw_text: str) -> list[str]:
|
||||
"""Parse lessons from JSON array response or fallback text."""
|
||||
raw_text = raw_text.strip()
|
||||
try:
|
||||
parsed = json.loads(raw_text)
|
||||
if isinstance(parsed, list):
|
||||
return [str(item).strip() for item in parsed if str(item).strip()][:3]
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
match = re.search(r"\[.*\]", raw_text, re.DOTALL)
|
||||
if match:
|
||||
try:
|
||||
parsed = json.loads(match.group(0))
|
||||
if isinstance(parsed, list):
|
||||
return [str(item).strip() for item in parsed if str(item).strip()][:3]
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
lines = [line.strip("-* \t") for line in raw_text.splitlines() if line.strip()]
|
||||
return lines[:3]
|
||||
25
src/evolution/scorecard.py
Normal file
25
src/evolution/scorecard.py
Normal file
@@ -0,0 +1,25 @@
|
||||
"""Daily scorecard model for end-of-day performance review."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
|
||||
|
||||
@dataclass
|
||||
class DailyScorecard:
|
||||
"""Structured daily performance snapshot for a single market."""
|
||||
|
||||
date: str
|
||||
market: str
|
||||
total_decisions: int
|
||||
buys: int
|
||||
sells: int
|
||||
holds: int
|
||||
total_pnl: float
|
||||
win_rate: float
|
||||
avg_confidence: float
|
||||
scenario_match_rate: float
|
||||
top_winners: list[str] = field(default_factory=list)
|
||||
top_losers: list[str] = field(default_factory=list)
|
||||
lessons: list[str] = field(default_factory=list)
|
||||
cross_market_note: str = ""
|
||||
1618
src/main.py
1618
src/main.py
File diff suppressed because it is too large
Load Diff
@@ -123,6 +123,23 @@ MARKETS: dict[str, MarketInfo] = {
|
||||
),
|
||||
}
|
||||
|
||||
MARKET_SHORTHAND: dict[str, list[str]] = {
|
||||
"US": ["US_NASDAQ", "US_NYSE", "US_AMEX"],
|
||||
"CN": ["CN_SHA", "CN_SZA"],
|
||||
"VN": ["VN_HAN", "VN_HCM"],
|
||||
}
|
||||
|
||||
|
||||
def expand_market_codes(codes: list[str]) -> list[str]:
|
||||
"""Expand shorthand market codes into concrete exchange market codes."""
|
||||
expanded: list[str] = []
|
||||
for code in codes:
|
||||
if code in MARKET_SHORTHAND:
|
||||
expanded.extend(MARKET_SHORTHAND[code])
|
||||
else:
|
||||
expanded.append(code)
|
||||
return expanded
|
||||
|
||||
|
||||
def is_market_open(market: MarketInfo, now: datetime | None = None) -> bool:
|
||||
"""
|
||||
|
||||
@@ -200,14 +200,151 @@ telegram = TelegramClient(
|
||||
)
|
||||
```
|
||||
|
||||
## Bidirectional Commands
|
||||
|
||||
Control your trading bot remotely via Telegram commands. The bot not only sends notifications but also accepts commands for real-time control.
|
||||
|
||||
### Available Commands
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/start` | Welcome message with quick start guide |
|
||||
| `/help` | List all available commands |
|
||||
| `/status` | Current trading status (mode, markets, P&L, circuit breaker) |
|
||||
| `/positions` | View current holdings grouped by market |
|
||||
| `/stop` | Pause all trading operations |
|
||||
| `/resume` | Resume trading operations |
|
||||
|
||||
### Command Examples
|
||||
|
||||
**Check Trading Status**
|
||||
```
|
||||
You: /status
|
||||
|
||||
Bot:
|
||||
📊 Trading Status
|
||||
|
||||
Mode: PAPER
|
||||
Markets: Korea, United States
|
||||
Trading: Active
|
||||
|
||||
Current P&L: +2.50%
|
||||
Circuit Breaker: -3.0%
|
||||
```
|
||||
|
||||
**View Holdings**
|
||||
```
|
||||
You: /positions
|
||||
|
||||
Bot:
|
||||
💼 Current Holdings
|
||||
|
||||
🇰🇷 Korea
|
||||
• 005930: 10 shares @ 70,000
|
||||
• 035420: 5 shares @ 200,000
|
||||
|
||||
🇺🇸 Overseas
|
||||
• AAPL: 15 shares @ 175
|
||||
• TSLA: 8 shares @ 245
|
||||
|
||||
Cash: ₩5,000,000
|
||||
```
|
||||
|
||||
**Pause Trading**
|
||||
```
|
||||
You: /stop
|
||||
|
||||
Bot:
|
||||
⏸️ Trading Paused
|
||||
|
||||
All trading operations have been suspended.
|
||||
Use /resume to restart trading.
|
||||
```
|
||||
|
||||
**Resume Trading**
|
||||
```
|
||||
You: /resume
|
||||
|
||||
Bot:
|
||||
▶️ Trading Resumed
|
||||
|
||||
Trading operations have been restarted.
|
||||
```
|
||||
|
||||
### Security
|
||||
|
||||
**Chat ID Verification**
|
||||
- Commands are only accepted from the configured `TELEGRAM_CHAT_ID`
|
||||
- Unauthorized users receive no response
|
||||
- Command attempts from wrong chat IDs are logged
|
||||
|
||||
**Authorization Required**
|
||||
- Only the bot owner (chat ID in `.env`) can control trading
|
||||
- No way for unauthorized users to discover or use commands
|
||||
- All command executions are logged for audit
|
||||
|
||||
### Configuration
|
||||
|
||||
Add to your `.env` file:
|
||||
|
||||
```bash
|
||||
# Commands are enabled by default
|
||||
TELEGRAM_COMMANDS_ENABLED=true
|
||||
|
||||
# Polling interval (seconds) - how often to check for commands
|
||||
TELEGRAM_POLLING_INTERVAL=1.0
|
||||
```
|
||||
|
||||
To disable commands but keep notifications:
|
||||
```bash
|
||||
TELEGRAM_COMMANDS_ENABLED=false
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Long Polling**: Bot checks Telegram API every second for new messages
|
||||
2. **Command Parsing**: Messages starting with `/` are parsed as commands
|
||||
3. **Authentication**: Chat ID is verified before executing any command
|
||||
4. **Execution**: Command handler is called with current bot state
|
||||
5. **Response**: Result is sent back via Telegram
|
||||
|
||||
### Error Handling
|
||||
|
||||
- Command parsing errors → "Unknown command" response
|
||||
- API failures → Graceful degradation, error logged
|
||||
- Invalid state → Appropriate message (e.g., "Trading is already paused")
|
||||
- Trading loop isolation → Command errors never crash trading
|
||||
|
||||
### Troubleshooting Commands
|
||||
|
||||
**Commands not responding**
|
||||
1. Check `TELEGRAM_COMMANDS_ENABLED=true` in `.env`
|
||||
2. Verify you started conversation with `/start`
|
||||
3. Check logs for command handler errors
|
||||
4. Confirm chat ID matches `.env` configuration
|
||||
|
||||
**Wrong chat ID**
|
||||
- Commands from unauthorized chats are silently ignored
|
||||
- Check logs for "unauthorized chat_id" warnings
|
||||
|
||||
**Delayed responses**
|
||||
- Polling interval is 1 second by default
|
||||
- Network latency may add delay
|
||||
- Check `TELEGRAM_POLLING_INTERVAL` setting
|
||||
|
||||
## API Reference
|
||||
|
||||
See `telegram_client.py` for full API documentation.
|
||||
|
||||
Key methods:
|
||||
### Notification Methods
|
||||
- `notify_trade_execution()` - Trade alerts
|
||||
- `notify_circuit_breaker()` - Emergency stops
|
||||
- `notify_fat_finger()` - Order rejections
|
||||
- `notify_market_open/close()` - Session tracking
|
||||
- `notify_system_start/shutdown()` - Lifecycle events
|
||||
- `notify_error()` - Error alerts
|
||||
|
||||
### Command Handler
|
||||
- `TelegramCommandHandler` - Bidirectional command processing
|
||||
- `register_command()` - Register custom command handlers
|
||||
- `start_polling()` / `stop_polling()` - Lifecycle management
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Awaitable, Callable
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
@@ -117,26 +118,28 @@ class TelegramClient:
|
||||
if self._session is not None and not self._session.closed:
|
||||
await self._session.close()
|
||||
|
||||
async def _send_notification(self, msg: NotificationMessage) -> None:
|
||||
async def send_message(self, text: str, parse_mode: str = "HTML") -> bool:
|
||||
"""
|
||||
Send notification to Telegram with graceful degradation.
|
||||
Send a generic text message to Telegram.
|
||||
|
||||
Args:
|
||||
msg: Notification message to send
|
||||
text: Message text to send
|
||||
parse_mode: Parse mode for formatting (HTML or Markdown)
|
||||
|
||||
Returns:
|
||||
True if message was sent successfully, False otherwise
|
||||
"""
|
||||
if not self._enabled:
|
||||
return
|
||||
return False
|
||||
|
||||
try:
|
||||
await self._rate_limiter.acquire()
|
||||
|
||||
formatted_message = f"{msg.priority.emoji} {msg.message}"
|
||||
url = f"{self.API_BASE.format(token=self._bot_token)}/sendMessage"
|
||||
|
||||
payload = {
|
||||
"chat_id": self._chat_id,
|
||||
"text": formatted_message,
|
||||
"parse_mode": "HTML",
|
||||
"text": text,
|
||||
"parse_mode": parse_mode,
|
||||
}
|
||||
|
||||
session = self._get_session()
|
||||
@@ -146,15 +149,29 @@ class TelegramClient:
|
||||
logger.error(
|
||||
"Telegram API error (status=%d): %s", resp.status, error_text
|
||||
)
|
||||
else:
|
||||
logger.debug("Telegram notification sent: %s", msg.message[:50])
|
||||
return False
|
||||
logger.debug("Telegram message sent: %s", text[:50])
|
||||
return True
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.error("Telegram notification timeout")
|
||||
logger.error("Telegram message timeout")
|
||||
return False
|
||||
except aiohttp.ClientError as exc:
|
||||
logger.error("Telegram notification failed: %s", exc)
|
||||
logger.error("Telegram message failed: %s", exc)
|
||||
return False
|
||||
except Exception as exc:
|
||||
logger.error("Unexpected error sending notification: %s", exc)
|
||||
logger.error("Unexpected error sending message: %s", exc)
|
||||
return False
|
||||
|
||||
async def _send_notification(self, msg: NotificationMessage) -> None:
|
||||
"""
|
||||
Send notification to Telegram with graceful degradation.
|
||||
|
||||
Args:
|
||||
msg: Notification message to send
|
||||
"""
|
||||
formatted_message = f"{msg.priority.emoji} {msg.message}"
|
||||
await self.send_message(formatted_message)
|
||||
|
||||
async def notify_trade_execution(
|
||||
self,
|
||||
@@ -287,6 +304,77 @@ class TelegramClient:
|
||||
NotificationMessage(priority=NotificationPriority.MEDIUM, message=message)
|
||||
)
|
||||
|
||||
async def notify_playbook_generated(
|
||||
self,
|
||||
market: str,
|
||||
stock_count: int,
|
||||
scenario_count: int,
|
||||
token_count: int,
|
||||
) -> None:
|
||||
"""
|
||||
Notify that a daily playbook was generated.
|
||||
|
||||
Args:
|
||||
market: Market code (e.g., "KR", "US")
|
||||
stock_count: Number of stocks in the playbook
|
||||
scenario_count: Total number of scenarios
|
||||
token_count: Gemini token usage for the playbook
|
||||
"""
|
||||
message = (
|
||||
f"<b>Playbook Generated</b>\n"
|
||||
f"Market: {market}\n"
|
||||
f"Stocks: {stock_count}\n"
|
||||
f"Scenarios: {scenario_count}\n"
|
||||
f"Tokens: {token_count}"
|
||||
)
|
||||
await self._send_notification(
|
||||
NotificationMessage(priority=NotificationPriority.MEDIUM, message=message)
|
||||
)
|
||||
|
||||
async def notify_scenario_matched(
|
||||
self,
|
||||
stock_code: str,
|
||||
action: str,
|
||||
condition_summary: str,
|
||||
confidence: float,
|
||||
) -> None:
|
||||
"""
|
||||
Notify that a scenario matched for a stock.
|
||||
|
||||
Args:
|
||||
stock_code: Stock ticker symbol
|
||||
action: Scenario action (BUY/SELL/HOLD/REDUCE_ALL)
|
||||
condition_summary: Short summary of the matched condition
|
||||
confidence: Scenario confidence (0-100)
|
||||
"""
|
||||
message = (
|
||||
f"<b>Scenario Matched</b>\n"
|
||||
f"Symbol: <code>{stock_code}</code>\n"
|
||||
f"Action: {action}\n"
|
||||
f"Condition: {condition_summary}\n"
|
||||
f"Confidence: {confidence:.0f}%"
|
||||
)
|
||||
await self._send_notification(
|
||||
NotificationMessage(priority=NotificationPriority.HIGH, message=message)
|
||||
)
|
||||
|
||||
async def notify_playbook_failed(self, market: str, reason: str) -> None:
|
||||
"""
|
||||
Notify that playbook generation failed.
|
||||
|
||||
Args:
|
||||
market: Market code (e.g., "KR", "US")
|
||||
reason: Failure reason summary
|
||||
"""
|
||||
message = (
|
||||
f"<b>Playbook Failed</b>\n"
|
||||
f"Market: {market}\n"
|
||||
f"Reason: {reason[:200]}"
|
||||
)
|
||||
await self._send_notification(
|
||||
NotificationMessage(priority=NotificationPriority.HIGH, message=message)
|
||||
)
|
||||
|
||||
async def notify_system_shutdown(self, reason: str) -> None:
|
||||
"""
|
||||
Notify system shutdown.
|
||||
@@ -323,3 +411,172 @@ class TelegramClient:
|
||||
await self._send_notification(
|
||||
NotificationMessage(priority=NotificationPriority.HIGH, message=message)
|
||||
)
|
||||
|
||||
|
||||
class TelegramCommandHandler:
|
||||
"""Handles incoming Telegram commands via long polling."""
|
||||
|
||||
def __init__(
|
||||
self, client: TelegramClient, polling_interval: float = 1.0
|
||||
) -> None:
|
||||
"""
|
||||
Initialize command handler.
|
||||
|
||||
Args:
|
||||
client: TelegramClient instance for sending responses
|
||||
polling_interval: Polling interval in seconds
|
||||
"""
|
||||
self._client = client
|
||||
self._polling_interval = polling_interval
|
||||
self._commands: dict[str, Callable[[], Awaitable[None]]] = {}
|
||||
self._last_update_id = 0
|
||||
self._polling_task: asyncio.Task[None] | None = None
|
||||
self._running = False
|
||||
|
||||
def register_command(
|
||||
self, command: str, handler: Callable[[], Awaitable[None]]
|
||||
) -> None:
|
||||
"""
|
||||
Register a command handler.
|
||||
|
||||
Args:
|
||||
command: Command name (without leading slash, e.g., "start")
|
||||
handler: Async function to handle the command
|
||||
"""
|
||||
self._commands[command] = handler
|
||||
logger.debug("Registered command handler: /%s", command)
|
||||
|
||||
async def start_polling(self) -> None:
|
||||
"""Start long polling for commands."""
|
||||
if self._running:
|
||||
logger.warning("Command handler already running")
|
||||
return
|
||||
|
||||
if not self._client._enabled:
|
||||
logger.info("Command handler disabled (TelegramClient disabled)")
|
||||
return
|
||||
|
||||
self._running = True
|
||||
self._polling_task = asyncio.create_task(self._poll_loop())
|
||||
logger.info("Started Telegram command polling")
|
||||
|
||||
async def stop_polling(self) -> None:
|
||||
"""Stop polling and cancel pending tasks."""
|
||||
if not self._running:
|
||||
return
|
||||
|
||||
self._running = False
|
||||
if self._polling_task:
|
||||
self._polling_task.cancel()
|
||||
try:
|
||||
await self._polling_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
logger.info("Stopped Telegram command polling")
|
||||
|
||||
async def _poll_loop(self) -> None:
|
||||
"""Main polling loop that fetches updates."""
|
||||
while self._running:
|
||||
try:
|
||||
updates = await self._get_updates()
|
||||
for update in updates:
|
||||
await self._handle_update(update)
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception as exc:
|
||||
logger.error("Error in polling loop: %s", exc)
|
||||
|
||||
await asyncio.sleep(self._polling_interval)
|
||||
|
||||
async def _get_updates(self) -> list[dict]:
|
||||
"""
|
||||
Fetch updates from Telegram API.
|
||||
|
||||
Returns:
|
||||
List of update objects
|
||||
"""
|
||||
try:
|
||||
url = f"{self._client.API_BASE.format(token=self._client._bot_token)}/getUpdates"
|
||||
payload = {
|
||||
"offset": self._last_update_id + 1,
|
||||
"timeout": int(self._polling_interval),
|
||||
"allowed_updates": ["message"],
|
||||
}
|
||||
|
||||
session = self._client._get_session()
|
||||
async with session.post(url, json=payload) as resp:
|
||||
if resp.status != 200:
|
||||
error_text = await resp.text()
|
||||
logger.error(
|
||||
"getUpdates API error (status=%d): %s", resp.status, error_text
|
||||
)
|
||||
return []
|
||||
|
||||
data = await resp.json()
|
||||
if not data.get("ok"):
|
||||
logger.error("getUpdates returned ok=false: %s", data)
|
||||
return []
|
||||
|
||||
updates = data.get("result", [])
|
||||
if updates:
|
||||
self._last_update_id = updates[-1]["update_id"]
|
||||
|
||||
return updates
|
||||
|
||||
except asyncio.TimeoutError:
|
||||
logger.debug("getUpdates timeout (normal)")
|
||||
return []
|
||||
except aiohttp.ClientError as exc:
|
||||
logger.error("getUpdates failed: %s", exc)
|
||||
return []
|
||||
except Exception as exc:
|
||||
logger.error("Unexpected error in _get_updates: %s", exc)
|
||||
return []
|
||||
|
||||
async def _handle_update(self, update: dict) -> None:
|
||||
"""
|
||||
Parse and handle a single update.
|
||||
|
||||
Args:
|
||||
update: Update object from Telegram API
|
||||
"""
|
||||
try:
|
||||
message = update.get("message")
|
||||
if not message:
|
||||
return
|
||||
|
||||
# Verify chat_id matches configured chat
|
||||
chat_id = str(message.get("chat", {}).get("id", ""))
|
||||
if chat_id != self._client._chat_id:
|
||||
logger.warning(
|
||||
"Ignoring command from unauthorized chat_id: %s", chat_id
|
||||
)
|
||||
return
|
||||
|
||||
# Extract command text
|
||||
text = message.get("text", "").strip()
|
||||
if not text.startswith("/"):
|
||||
return
|
||||
|
||||
# Parse command (remove leading slash and extract command name)
|
||||
command_parts = text[1:].split()
|
||||
if not command_parts:
|
||||
return
|
||||
|
||||
# Remove @botname suffix if present (for group chats)
|
||||
command_name = command_parts[0].split("@")[0]
|
||||
|
||||
# Execute handler
|
||||
handler = self._commands.get(command_name)
|
||||
if handler:
|
||||
logger.info("Executing command: /%s", command_name)
|
||||
await handler()
|
||||
else:
|
||||
logger.debug("Unknown command: /%s", command_name)
|
||||
await self._client.send_message(
|
||||
f"Unknown command: /{command_name}\nUse /help to see available commands."
|
||||
)
|
||||
|
||||
except Exception as exc:
|
||||
logger.error("Error handling update: %s", exc)
|
||||
# Don't crash the polling loop on handler errors
|
||||
|
||||
0
src/strategy/__init__.py
Normal file
0
src/strategy/__init__.py
Normal file
164
src/strategy/models.py
Normal file
164
src/strategy/models.py
Normal file
@@ -0,0 +1,164 @@
|
||||
"""Pydantic models for pre-market scenario planning.
|
||||
|
||||
Defines the data contracts for the proactive strategy system:
|
||||
- AI generates DayPlaybook before market open (structured JSON scenarios)
|
||||
- Local ScenarioEngine matches conditions during market hours (no API calls)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, date, datetime
|
||||
from enum import Enum
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
||||
class ScenarioAction(str, Enum):
|
||||
"""Actions that can be taken by scenarios."""
|
||||
|
||||
BUY = "BUY"
|
||||
SELL = "SELL"
|
||||
HOLD = "HOLD"
|
||||
REDUCE_ALL = "REDUCE_ALL"
|
||||
|
||||
|
||||
class MarketOutlook(str, Enum):
|
||||
"""AI's assessment of market direction."""
|
||||
|
||||
BULLISH = "bullish"
|
||||
NEUTRAL_TO_BULLISH = "neutral_to_bullish"
|
||||
NEUTRAL = "neutral"
|
||||
NEUTRAL_TO_BEARISH = "neutral_to_bearish"
|
||||
BEARISH = "bearish"
|
||||
|
||||
|
||||
class PlaybookStatus(str, Enum):
|
||||
"""Lifecycle status of a playbook."""
|
||||
|
||||
PENDING = "pending"
|
||||
READY = "ready"
|
||||
FAILED = "failed"
|
||||
EXPIRED = "expired"
|
||||
|
||||
|
||||
class StockCondition(BaseModel):
|
||||
"""Condition fields for scenario matching (all optional, AND-combined).
|
||||
|
||||
The ScenarioEngine evaluates all non-None fields as AND conditions.
|
||||
A condition matches only if ALL specified fields are satisfied.
|
||||
"""
|
||||
|
||||
rsi_below: float | None = None
|
||||
rsi_above: float | None = None
|
||||
volume_ratio_above: float | None = None
|
||||
volume_ratio_below: float | None = None
|
||||
price_above: float | None = None
|
||||
price_below: float | None = None
|
||||
price_change_pct_above: float | None = None
|
||||
price_change_pct_below: float | None = None
|
||||
|
||||
def has_any_condition(self) -> bool:
|
||||
"""Check if at least one condition field is set."""
|
||||
return any(
|
||||
v is not None
|
||||
for v in (
|
||||
self.rsi_below,
|
||||
self.rsi_above,
|
||||
self.volume_ratio_above,
|
||||
self.volume_ratio_below,
|
||||
self.price_above,
|
||||
self.price_below,
|
||||
self.price_change_pct_above,
|
||||
self.price_change_pct_below,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class StockScenario(BaseModel):
|
||||
"""A single condition-action rule for one stock."""
|
||||
|
||||
condition: StockCondition
|
||||
action: ScenarioAction
|
||||
confidence: int = Field(ge=0, le=100)
|
||||
allocation_pct: float = Field(ge=0, le=100, default=10.0)
|
||||
stop_loss_pct: float = Field(le=0, default=-2.0)
|
||||
take_profit_pct: float = Field(ge=0, default=3.0)
|
||||
rationale: str = ""
|
||||
|
||||
|
||||
class StockPlaybook(BaseModel):
|
||||
"""All scenarios for a single stock (ordered by priority)."""
|
||||
|
||||
stock_code: str
|
||||
stock_name: str = ""
|
||||
scenarios: list[StockScenario] = Field(min_length=1)
|
||||
|
||||
|
||||
class GlobalRule(BaseModel):
|
||||
"""Portfolio-level rule (checked before stock-level scenarios)."""
|
||||
|
||||
condition: str # e.g. "portfolio_pnl_pct < -2.0"
|
||||
action: ScenarioAction
|
||||
rationale: str = ""
|
||||
|
||||
|
||||
class CrossMarketContext(BaseModel):
|
||||
"""Summary of another market's state for cross-market awareness."""
|
||||
|
||||
market: str # e.g. "US" or "KR"
|
||||
date: str
|
||||
total_pnl: float = 0.0
|
||||
win_rate: float = 0.0
|
||||
index_change_pct: float = 0.0 # e.g. KOSPI or S&P500 change
|
||||
key_events: list[str] = Field(default_factory=list)
|
||||
lessons: list[str] = Field(default_factory=list)
|
||||
|
||||
|
||||
class DayPlaybook(BaseModel):
|
||||
"""Complete playbook for a single trading day in a single market.
|
||||
|
||||
Generated by PreMarketPlanner (1 Gemini call per market per day).
|
||||
Consumed by ScenarioEngine during market hours (0 API calls).
|
||||
"""
|
||||
|
||||
date: date
|
||||
market: str # "KR" or "US"
|
||||
market_outlook: MarketOutlook = MarketOutlook.NEUTRAL
|
||||
generated_at: str = "" # ISO timestamp
|
||||
gemini_model: str = ""
|
||||
token_count: int = 0
|
||||
global_rules: list[GlobalRule] = Field(default_factory=list)
|
||||
stock_playbooks: list[StockPlaybook] = Field(default_factory=list)
|
||||
default_action: ScenarioAction = ScenarioAction.HOLD
|
||||
context_summary: dict = Field(default_factory=dict)
|
||||
cross_market: CrossMarketContext | None = None
|
||||
|
||||
@field_validator("stock_playbooks")
|
||||
@classmethod
|
||||
def validate_unique_stocks(cls, v: list[StockPlaybook]) -> list[StockPlaybook]:
|
||||
codes = [pb.stock_code for pb in v]
|
||||
if len(codes) != len(set(codes)):
|
||||
raise ValueError("Duplicate stock codes in playbook")
|
||||
return v
|
||||
|
||||
def get_stock_playbook(self, stock_code: str) -> StockPlaybook | None:
|
||||
"""Find the playbook for a specific stock."""
|
||||
for pb in self.stock_playbooks:
|
||||
if pb.stock_code == stock_code:
|
||||
return pb
|
||||
return None
|
||||
|
||||
@property
|
||||
def scenario_count(self) -> int:
|
||||
"""Total number of scenarios across all stocks."""
|
||||
return sum(len(pb.scenarios) for pb in self.stock_playbooks)
|
||||
|
||||
@property
|
||||
def stock_count(self) -> int:
|
||||
"""Number of stocks with scenarios."""
|
||||
return len(self.stock_playbooks)
|
||||
|
||||
def model_post_init(self, __context: object) -> None:
|
||||
"""Set generated_at if not provided."""
|
||||
if not self.generated_at:
|
||||
self.generated_at = datetime.now(UTC).isoformat()
|
||||
184
src/strategy/playbook_store.py
Normal file
184
src/strategy/playbook_store.py
Normal file
@@ -0,0 +1,184 @@
|
||||
"""Playbook persistence layer — CRUD for DayPlaybook in SQLite.
|
||||
|
||||
Stores and retrieves market-specific daily playbooks with JSON serialization.
|
||||
Designed for the pre-market strategy system (one playbook per market per day).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import sqlite3
|
||||
from datetime import date
|
||||
|
||||
from src.strategy.models import DayPlaybook, PlaybookStatus
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PlaybookStore:
|
||||
"""CRUD operations for DayPlaybook persistence."""
|
||||
|
||||
def __init__(self, conn: sqlite3.Connection) -> None:
|
||||
self._conn = conn
|
||||
|
||||
def save(self, playbook: DayPlaybook) -> int:
|
||||
"""Save or replace a playbook for a given date+market.
|
||||
|
||||
Uses INSERT OR REPLACE to enforce UNIQUE(date, market).
|
||||
|
||||
Returns:
|
||||
The row id of the inserted/replaced record.
|
||||
"""
|
||||
playbook_json = playbook.model_dump_json()
|
||||
cursor = self._conn.execute(
|
||||
"""
|
||||
INSERT OR REPLACE INTO playbooks
|
||||
(date, market, status, playbook_json, generated_at,
|
||||
token_count, scenario_count, match_count)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
playbook.date.isoformat(),
|
||||
playbook.market,
|
||||
PlaybookStatus.READY.value,
|
||||
playbook_json,
|
||||
playbook.generated_at,
|
||||
playbook.token_count,
|
||||
playbook.scenario_count,
|
||||
0,
|
||||
),
|
||||
)
|
||||
self._conn.commit()
|
||||
row_id = cursor.lastrowid or 0
|
||||
logger.info(
|
||||
"Saved playbook for %s/%s (%d stocks, %d scenarios)",
|
||||
playbook.date, playbook.market,
|
||||
playbook.stock_count, playbook.scenario_count,
|
||||
)
|
||||
return row_id
|
||||
|
||||
def load(self, target_date: date, market: str) -> DayPlaybook | None:
|
||||
"""Load a playbook for a specific date and market.
|
||||
|
||||
Returns:
|
||||
DayPlaybook if found, None otherwise.
|
||||
"""
|
||||
row = self._conn.execute(
|
||||
"SELECT playbook_json FROM playbooks WHERE date = ? AND market = ?",
|
||||
(target_date.isoformat(), market),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return DayPlaybook.model_validate_json(row[0])
|
||||
|
||||
def get_status(self, target_date: date, market: str) -> PlaybookStatus | None:
|
||||
"""Get the status of a playbook without deserializing the full JSON."""
|
||||
row = self._conn.execute(
|
||||
"SELECT status FROM playbooks WHERE date = ? AND market = ?",
|
||||
(target_date.isoformat(), market),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return PlaybookStatus(row[0])
|
||||
|
||||
def update_status(self, target_date: date, market: str, status: PlaybookStatus) -> bool:
|
||||
"""Update the status of a playbook.
|
||||
|
||||
Returns:
|
||||
True if a row was updated, False if not found.
|
||||
"""
|
||||
cursor = self._conn.execute(
|
||||
"UPDATE playbooks SET status = ? WHERE date = ? AND market = ?",
|
||||
(status.value, target_date.isoformat(), market),
|
||||
)
|
||||
self._conn.commit()
|
||||
return cursor.rowcount > 0
|
||||
|
||||
def increment_match_count(self, target_date: date, market: str) -> bool:
|
||||
"""Increment the match_count for tracking scenario hits during the day.
|
||||
|
||||
Returns:
|
||||
True if a row was updated, False if not found.
|
||||
"""
|
||||
cursor = self._conn.execute(
|
||||
"UPDATE playbooks SET match_count = match_count + 1 WHERE date = ? AND market = ?",
|
||||
(target_date.isoformat(), market),
|
||||
)
|
||||
self._conn.commit()
|
||||
return cursor.rowcount > 0
|
||||
|
||||
def get_stats(self, target_date: date, market: str) -> dict | None:
|
||||
"""Get playbook stats without full deserialization.
|
||||
|
||||
Returns:
|
||||
Dict with status, token_count, scenario_count, match_count, or None.
|
||||
"""
|
||||
row = self._conn.execute(
|
||||
"""
|
||||
SELECT status, token_count, scenario_count, match_count, generated_at
|
||||
FROM playbooks WHERE date = ? AND market = ?
|
||||
""",
|
||||
(target_date.isoformat(), market),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
return None
|
||||
return {
|
||||
"status": row[0],
|
||||
"token_count": row[1],
|
||||
"scenario_count": row[2],
|
||||
"match_count": row[3],
|
||||
"generated_at": row[4],
|
||||
}
|
||||
|
||||
def list_recent(self, market: str | None = None, limit: int = 7) -> list[dict]:
|
||||
"""List recent playbooks with summary info.
|
||||
|
||||
Args:
|
||||
market: Filter by market code. None for all markets.
|
||||
limit: Max number of results.
|
||||
|
||||
Returns:
|
||||
List of dicts with date, market, status, scenario_count, match_count.
|
||||
"""
|
||||
if market is not None:
|
||||
rows = self._conn.execute(
|
||||
"""
|
||||
SELECT date, market, status, scenario_count, match_count
|
||||
FROM playbooks WHERE market = ?
|
||||
ORDER BY date DESC LIMIT ?
|
||||
""",
|
||||
(market, limit),
|
||||
).fetchall()
|
||||
else:
|
||||
rows = self._conn.execute(
|
||||
"""
|
||||
SELECT date, market, status, scenario_count, match_count
|
||||
FROM playbooks
|
||||
ORDER BY date DESC LIMIT ?
|
||||
""",
|
||||
(limit,),
|
||||
).fetchall()
|
||||
return [
|
||||
{
|
||||
"date": row[0],
|
||||
"market": row[1],
|
||||
"status": row[2],
|
||||
"scenario_count": row[3],
|
||||
"match_count": row[4],
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
|
||||
def delete(self, target_date: date, market: str) -> bool:
|
||||
"""Delete a playbook.
|
||||
|
||||
Returns:
|
||||
True if a row was deleted, False if not found.
|
||||
"""
|
||||
cursor = self._conn.execute(
|
||||
"DELETE FROM playbooks WHERE date = ? AND market = ?",
|
||||
(target_date.isoformat(), market),
|
||||
)
|
||||
self._conn.commit()
|
||||
return cursor.rowcount > 0
|
||||
472
src/strategy/pre_market_planner.py
Normal file
472
src/strategy/pre_market_planner.py
Normal file
@@ -0,0 +1,472 @@
|
||||
"""Pre-market planner — generates DayPlaybook via Gemini before market open.
|
||||
|
||||
One Gemini API call per market per day. Candidates come from SmartVolatilityScanner.
|
||||
On failure, returns a defensive playbook (all HOLD, no trades).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
from datetime import date, timedelta
|
||||
from typing import Any
|
||||
|
||||
from src.analysis.smart_scanner import ScanCandidate
|
||||
from src.brain.context_selector import ContextSelector, DecisionType
|
||||
from src.brain.gemini_client import GeminiClient
|
||||
from src.config import Settings
|
||||
from src.context.store import ContextLayer, ContextStore
|
||||
from src.strategy.models import (
|
||||
CrossMarketContext,
|
||||
DayPlaybook,
|
||||
GlobalRule,
|
||||
MarketOutlook,
|
||||
ScenarioAction,
|
||||
StockCondition,
|
||||
StockPlaybook,
|
||||
StockScenario,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Mapping from string to MarketOutlook enum
|
||||
_OUTLOOK_MAP: dict[str, MarketOutlook] = {
|
||||
"bullish": MarketOutlook.BULLISH,
|
||||
"neutral_to_bullish": MarketOutlook.NEUTRAL_TO_BULLISH,
|
||||
"neutral": MarketOutlook.NEUTRAL,
|
||||
"neutral_to_bearish": MarketOutlook.NEUTRAL_TO_BEARISH,
|
||||
"bearish": MarketOutlook.BEARISH,
|
||||
}
|
||||
|
||||
_ACTION_MAP: dict[str, ScenarioAction] = {
|
||||
"BUY": ScenarioAction.BUY,
|
||||
"SELL": ScenarioAction.SELL,
|
||||
"HOLD": ScenarioAction.HOLD,
|
||||
"REDUCE_ALL": ScenarioAction.REDUCE_ALL,
|
||||
}
|
||||
|
||||
|
||||
class PreMarketPlanner:
|
||||
"""Generates a DayPlaybook by calling Gemini once before market open.
|
||||
|
||||
Flow:
|
||||
1. Collect strategic context (L5-L7) + cross-market context
|
||||
2. Build a structured prompt with scan candidates
|
||||
3. Call Gemini for JSON scenario generation
|
||||
4. Parse and validate response into DayPlaybook
|
||||
5. On failure → defensive playbook (HOLD everything)
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
gemini_client: GeminiClient,
|
||||
context_store: ContextStore,
|
||||
context_selector: ContextSelector,
|
||||
settings: Settings,
|
||||
) -> None:
|
||||
self._gemini = gemini_client
|
||||
self._context_store = context_store
|
||||
self._context_selector = context_selector
|
||||
self._settings = settings
|
||||
|
||||
async def generate_playbook(
|
||||
self,
|
||||
market: str,
|
||||
candidates: list[ScanCandidate],
|
||||
today: date | None = None,
|
||||
) -> DayPlaybook:
|
||||
"""Generate a DayPlaybook for a market using Gemini.
|
||||
|
||||
Args:
|
||||
market: Market code ("KR" or "US")
|
||||
candidates: Stock candidates from SmartVolatilityScanner
|
||||
today: Override date (defaults to date.today()). Use market-local date.
|
||||
|
||||
Returns:
|
||||
DayPlaybook with scenarios. Empty/defensive if no candidates or failure.
|
||||
"""
|
||||
if today is None:
|
||||
today = date.today()
|
||||
|
||||
if not candidates:
|
||||
logger.info("No candidates for %s — returning empty playbook", market)
|
||||
return self._empty_playbook(today, market)
|
||||
|
||||
try:
|
||||
# 1. Gather context
|
||||
context_data = self._gather_context()
|
||||
self_market_scorecard = self.build_self_market_scorecard(market, today)
|
||||
cross_market = self.build_cross_market_context(market, today)
|
||||
|
||||
# 2. Build prompt
|
||||
prompt = self._build_prompt(
|
||||
market,
|
||||
candidates,
|
||||
context_data,
|
||||
self_market_scorecard,
|
||||
cross_market,
|
||||
)
|
||||
|
||||
# 3. Call Gemini
|
||||
market_data = {
|
||||
"stock_code": "PLANNER",
|
||||
"current_price": 0,
|
||||
"prompt_override": prompt,
|
||||
}
|
||||
decision = await self._gemini.decide(market_data)
|
||||
|
||||
# 4. Parse response
|
||||
playbook = self._parse_response(
|
||||
decision.rationale, today, market, candidates, cross_market
|
||||
)
|
||||
playbook_with_tokens = playbook.model_copy(
|
||||
update={"token_count": decision.token_count}
|
||||
)
|
||||
logger.info(
|
||||
"Generated playbook for %s: %d stocks, %d scenarios, %d tokens",
|
||||
market,
|
||||
playbook_with_tokens.stock_count,
|
||||
playbook_with_tokens.scenario_count,
|
||||
playbook_with_tokens.token_count,
|
||||
)
|
||||
return playbook_with_tokens
|
||||
|
||||
except Exception:
|
||||
logger.exception("Playbook generation failed for %s", market)
|
||||
if self._settings.DEFENSIVE_PLAYBOOK_ON_FAILURE:
|
||||
return self._defensive_playbook(today, market, candidates)
|
||||
return self._empty_playbook(today, market)
|
||||
|
||||
def build_cross_market_context(
|
||||
self, target_market: str, today: date | None = None,
|
||||
) -> CrossMarketContext | None:
|
||||
"""Build cross-market context from the other market's L6 data.
|
||||
|
||||
KR planner → reads US scorecard from previous night.
|
||||
US planner → reads KR scorecard from today.
|
||||
|
||||
Args:
|
||||
target_market: The market being planned ("KR" or "US")
|
||||
today: Override date (defaults to date.today()). Use market-local date.
|
||||
"""
|
||||
other_market = "US" if target_market == "KR" else "KR"
|
||||
if today is None:
|
||||
today = date.today()
|
||||
timeframe_date = today - timedelta(days=1) if target_market == "KR" else today
|
||||
timeframe = timeframe_date.isoformat()
|
||||
|
||||
scorecard_key = f"scorecard_{other_market}"
|
||||
scorecard_data = self._context_store.get_context(
|
||||
ContextLayer.L6_DAILY, timeframe, scorecard_key
|
||||
)
|
||||
|
||||
if scorecard_data is None:
|
||||
logger.debug("No cross-market scorecard found for %s", other_market)
|
||||
return None
|
||||
|
||||
if isinstance(scorecard_data, str):
|
||||
try:
|
||||
scorecard_data = json.loads(scorecard_data)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return None
|
||||
|
||||
if not isinstance(scorecard_data, dict):
|
||||
return None
|
||||
|
||||
return CrossMarketContext(
|
||||
market=other_market,
|
||||
date=timeframe,
|
||||
total_pnl=float(scorecard_data.get("total_pnl", 0.0)),
|
||||
win_rate=float(scorecard_data.get("win_rate", 0.0)),
|
||||
index_change_pct=float(scorecard_data.get("index_change_pct", 0.0)),
|
||||
key_events=scorecard_data.get("key_events", []),
|
||||
lessons=scorecard_data.get("lessons", []),
|
||||
)
|
||||
|
||||
def build_self_market_scorecard(
|
||||
self, market: str, today: date | None = None,
|
||||
) -> dict[str, Any] | None:
|
||||
"""Build previous-day scorecard for the same market."""
|
||||
if today is None:
|
||||
today = date.today()
|
||||
timeframe = (today - timedelta(days=1)).isoformat()
|
||||
scorecard_key = f"scorecard_{market}"
|
||||
scorecard_data = self._context_store.get_context(
|
||||
ContextLayer.L6_DAILY, timeframe, scorecard_key
|
||||
)
|
||||
|
||||
if scorecard_data is None:
|
||||
return None
|
||||
|
||||
if isinstance(scorecard_data, str):
|
||||
try:
|
||||
scorecard_data = json.loads(scorecard_data)
|
||||
except (json.JSONDecodeError, TypeError):
|
||||
return None
|
||||
|
||||
if not isinstance(scorecard_data, dict):
|
||||
return None
|
||||
|
||||
return {
|
||||
"date": timeframe,
|
||||
"total_pnl": float(scorecard_data.get("total_pnl", 0.0)),
|
||||
"win_rate": float(scorecard_data.get("win_rate", 0.0)),
|
||||
"lessons": scorecard_data.get("lessons", []),
|
||||
}
|
||||
|
||||
def _gather_context(self) -> dict[str, Any]:
|
||||
"""Gather strategic context using ContextSelector."""
|
||||
layers = self._context_selector.select_layers(
|
||||
decision_type=DecisionType.STRATEGIC,
|
||||
include_realtime=True,
|
||||
)
|
||||
return self._context_selector.get_context_data(layers, max_items_per_layer=10)
|
||||
|
||||
def _build_prompt(
|
||||
self,
|
||||
market: str,
|
||||
candidates: list[ScanCandidate],
|
||||
context_data: dict[str, Any],
|
||||
self_market_scorecard: dict[str, Any] | None,
|
||||
cross_market: CrossMarketContext | None,
|
||||
) -> str:
|
||||
"""Build a structured prompt for Gemini to generate scenario JSON."""
|
||||
max_scenarios = self._settings.MAX_SCENARIOS_PER_STOCK
|
||||
|
||||
candidates_text = "\n".join(
|
||||
f" - {c.stock_code} ({c.name}): price={c.price}, "
|
||||
f"RSI={c.rsi:.1f}, volume_ratio={c.volume_ratio:.1f}, "
|
||||
f"signal={c.signal}, score={c.score:.1f}"
|
||||
for c in candidates
|
||||
)
|
||||
|
||||
cross_market_text = ""
|
||||
if cross_market:
|
||||
cross_market_text = (
|
||||
f"\n## Other Market ({cross_market.market}) Summary\n"
|
||||
f"- P&L: {cross_market.total_pnl:+.2f}%\n"
|
||||
f"- Win Rate: {cross_market.win_rate:.0f}%\n"
|
||||
f"- Index Change: {cross_market.index_change_pct:+.2f}%\n"
|
||||
)
|
||||
if cross_market.lessons:
|
||||
cross_market_text += f"- Lessons: {'; '.join(cross_market.lessons[:3])}\n"
|
||||
|
||||
self_market_text = ""
|
||||
if self_market_scorecard:
|
||||
self_market_text = (
|
||||
f"\n## My Market Previous Day ({market})\n"
|
||||
f"- Date: {self_market_scorecard['date']}\n"
|
||||
f"- P&L: {self_market_scorecard['total_pnl']:+.2f}%\n"
|
||||
f"- Win Rate: {self_market_scorecard['win_rate']:.0f}%\n"
|
||||
)
|
||||
lessons = self_market_scorecard.get("lessons", [])
|
||||
if lessons:
|
||||
self_market_text += f"- Lessons: {'; '.join(lessons[:3])}\n"
|
||||
|
||||
context_text = ""
|
||||
if context_data:
|
||||
context_text = "\n## Strategic Context\n"
|
||||
for layer_name, layer_data in context_data.items():
|
||||
if layer_data:
|
||||
context_text += f"### {layer_name}\n"
|
||||
for key, value in list(layer_data.items())[:5]:
|
||||
context_text += f" - {key}: {value}\n"
|
||||
|
||||
return (
|
||||
f"You are a pre-market trading strategist for the {market} market.\n"
|
||||
f"Generate structured trading scenarios for today.\n\n"
|
||||
f"## Candidates (from volatility scanner)\n{candidates_text}\n"
|
||||
f"{self_market_text}"
|
||||
f"{cross_market_text}"
|
||||
f"{context_text}\n"
|
||||
f"## Instructions\n"
|
||||
f"Return a JSON object with this exact structure:\n"
|
||||
f'{{\n'
|
||||
f' "market_outlook": "bullish|neutral_to_bullish|neutral'
|
||||
f'|neutral_to_bearish|bearish",\n'
|
||||
f' "global_rules": [\n'
|
||||
f' {{"condition": "portfolio_pnl_pct < -2.0",'
|
||||
f' "action": "REDUCE_ALL", "rationale": "..."}}\n'
|
||||
f' ],\n'
|
||||
f' "stocks": [\n'
|
||||
f' {{\n'
|
||||
f' "stock_code": "...",\n'
|
||||
f' "scenarios": [\n'
|
||||
f' {{\n'
|
||||
f' "condition": {{"rsi_below": 30, "volume_ratio_above": 2.0}},\n'
|
||||
f' "action": "BUY|SELL|HOLD",\n'
|
||||
f' "confidence": 85,\n'
|
||||
f' "allocation_pct": 10.0,\n'
|
||||
f' "stop_loss_pct": -2.0,\n'
|
||||
f' "take_profit_pct": 3.0,\n'
|
||||
f' "rationale": "..."\n'
|
||||
f' }}\n'
|
||||
f' ]\n'
|
||||
f' }}\n'
|
||||
f' ]\n'
|
||||
f'}}\n\n'
|
||||
f"Rules:\n"
|
||||
f"- Max {max_scenarios} scenarios per stock\n"
|
||||
f"- Only use stocks from the candidates list\n"
|
||||
f"- Confidence 0-100 (80+ for actionable trades)\n"
|
||||
f"- stop_loss_pct must be <= 0, take_profit_pct must be >= 0\n"
|
||||
f"- Return ONLY the JSON, no markdown fences or explanation\n"
|
||||
)
|
||||
|
||||
def _parse_response(
|
||||
self,
|
||||
response_text: str,
|
||||
today: date,
|
||||
market: str,
|
||||
candidates: list[ScanCandidate],
|
||||
cross_market: CrossMarketContext | None,
|
||||
) -> DayPlaybook:
|
||||
"""Parse Gemini's JSON response into a validated DayPlaybook."""
|
||||
cleaned = self._extract_json(response_text)
|
||||
data = json.loads(cleaned)
|
||||
|
||||
valid_codes = {c.stock_code for c in candidates}
|
||||
|
||||
# Parse market outlook
|
||||
outlook_str = data.get("market_outlook", "neutral")
|
||||
market_outlook = _OUTLOOK_MAP.get(outlook_str, MarketOutlook.NEUTRAL)
|
||||
|
||||
# Parse global rules
|
||||
global_rules = []
|
||||
for rule_data in data.get("global_rules", []):
|
||||
action_str = rule_data.get("action", "HOLD")
|
||||
action = _ACTION_MAP.get(action_str, ScenarioAction.HOLD)
|
||||
global_rules.append(
|
||||
GlobalRule(
|
||||
condition=rule_data.get("condition", ""),
|
||||
action=action,
|
||||
rationale=rule_data.get("rationale", ""),
|
||||
)
|
||||
)
|
||||
|
||||
# Parse stock playbooks
|
||||
stock_playbooks = []
|
||||
max_scenarios = self._settings.MAX_SCENARIOS_PER_STOCK
|
||||
for stock_data in data.get("stocks", []):
|
||||
code = stock_data.get("stock_code", "")
|
||||
if code not in valid_codes:
|
||||
logger.warning("Gemini returned unknown stock %s — skipping", code)
|
||||
continue
|
||||
|
||||
scenarios = []
|
||||
for sc_data in stock_data.get("scenarios", [])[:max_scenarios]:
|
||||
scenario = self._parse_scenario(sc_data)
|
||||
if scenario:
|
||||
scenarios.append(scenario)
|
||||
|
||||
if scenarios:
|
||||
stock_playbooks.append(
|
||||
StockPlaybook(
|
||||
stock_code=code,
|
||||
scenarios=scenarios,
|
||||
)
|
||||
)
|
||||
|
||||
return DayPlaybook(
|
||||
date=today,
|
||||
market=market,
|
||||
market_outlook=market_outlook,
|
||||
global_rules=global_rules,
|
||||
stock_playbooks=stock_playbooks,
|
||||
cross_market=cross_market,
|
||||
)
|
||||
|
||||
def _parse_scenario(self, sc_data: dict) -> StockScenario | None:
|
||||
"""Parse a single scenario from JSON data. Returns None if invalid."""
|
||||
try:
|
||||
cond_data = sc_data.get("condition", {})
|
||||
condition = StockCondition(
|
||||
rsi_below=cond_data.get("rsi_below"),
|
||||
rsi_above=cond_data.get("rsi_above"),
|
||||
volume_ratio_above=cond_data.get("volume_ratio_above"),
|
||||
volume_ratio_below=cond_data.get("volume_ratio_below"),
|
||||
price_above=cond_data.get("price_above"),
|
||||
price_below=cond_data.get("price_below"),
|
||||
price_change_pct_above=cond_data.get("price_change_pct_above"),
|
||||
price_change_pct_below=cond_data.get("price_change_pct_below"),
|
||||
)
|
||||
|
||||
if not condition.has_any_condition():
|
||||
logger.warning("Scenario has no conditions — skipping")
|
||||
return None
|
||||
|
||||
action_str = sc_data.get("action", "HOLD")
|
||||
action = _ACTION_MAP.get(action_str, ScenarioAction.HOLD)
|
||||
|
||||
return StockScenario(
|
||||
condition=condition,
|
||||
action=action,
|
||||
confidence=int(sc_data.get("confidence", 50)),
|
||||
allocation_pct=float(sc_data.get("allocation_pct", 10.0)),
|
||||
stop_loss_pct=float(sc_data.get("stop_loss_pct", -2.0)),
|
||||
take_profit_pct=float(sc_data.get("take_profit_pct", 3.0)),
|
||||
rationale=sc_data.get("rationale", ""),
|
||||
)
|
||||
except (ValueError, TypeError) as e:
|
||||
logger.warning("Failed to parse scenario: %s", e)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _extract_json(text: str) -> str:
|
||||
"""Extract JSON from response, stripping markdown fences if present."""
|
||||
stripped = text.strip()
|
||||
if stripped.startswith("```"):
|
||||
# Remove first line (```json or ```) and last line (```)
|
||||
lines = stripped.split("\n")
|
||||
lines = lines[1:] # Remove opening fence
|
||||
if lines and lines[-1].strip() == "```":
|
||||
lines = lines[:-1]
|
||||
stripped = "\n".join(lines)
|
||||
return stripped.strip()
|
||||
|
||||
@staticmethod
|
||||
def _empty_playbook(today: date, market: str) -> DayPlaybook:
|
||||
"""Return an empty playbook (no stocks, no scenarios)."""
|
||||
return DayPlaybook(
|
||||
date=today,
|
||||
market=market,
|
||||
market_outlook=MarketOutlook.NEUTRAL,
|
||||
stock_playbooks=[],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _defensive_playbook(
|
||||
today: date,
|
||||
market: str,
|
||||
candidates: list[ScanCandidate],
|
||||
) -> DayPlaybook:
|
||||
"""Return a defensive playbook — HOLD everything with stop-loss ready."""
|
||||
stock_playbooks = [
|
||||
StockPlaybook(
|
||||
stock_code=c.stock_code,
|
||||
scenarios=[
|
||||
StockScenario(
|
||||
condition=StockCondition(price_change_pct_below=-3.0),
|
||||
action=ScenarioAction.SELL,
|
||||
confidence=90,
|
||||
stop_loss_pct=-3.0,
|
||||
rationale="Defensive stop-loss (planner failure)",
|
||||
),
|
||||
],
|
||||
)
|
||||
for c in candidates
|
||||
]
|
||||
return DayPlaybook(
|
||||
date=today,
|
||||
market=market,
|
||||
market_outlook=MarketOutlook.NEUTRAL_TO_BEARISH,
|
||||
default_action=ScenarioAction.HOLD,
|
||||
stock_playbooks=stock_playbooks,
|
||||
global_rules=[
|
||||
GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
rationale="Defensive: reduce on loss threshold",
|
||||
),
|
||||
],
|
||||
)
|
||||
270
src/strategy/scenario_engine.py
Normal file
270
src/strategy/scenario_engine.py
Normal file
@@ -0,0 +1,270 @@
|
||||
"""Local scenario engine for playbook execution.
|
||||
|
||||
Matches real-time market conditions against pre-defined scenarios
|
||||
without any API calls. Designed for sub-100ms execution.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any
|
||||
|
||||
from src.strategy.models import (
|
||||
DayPlaybook,
|
||||
GlobalRule,
|
||||
ScenarioAction,
|
||||
StockCondition,
|
||||
StockScenario,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScenarioMatch:
|
||||
"""Result of matching market conditions against scenarios."""
|
||||
|
||||
stock_code: str
|
||||
matched_scenario: StockScenario | None
|
||||
action: ScenarioAction
|
||||
confidence: int
|
||||
rationale: str
|
||||
global_rule_triggered: GlobalRule | None = None
|
||||
match_details: dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
class ScenarioEngine:
|
||||
"""Evaluates playbook scenarios against real-time market data.
|
||||
|
||||
No API calls — pure Python condition matching.
|
||||
|
||||
Expected market_data keys: "rsi", "volume_ratio", "current_price", "price_change_pct".
|
||||
Callers must normalize data source keys to match this contract.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._warned_keys: set[str] = set()
|
||||
|
||||
@staticmethod
|
||||
def _safe_float(value: Any) -> float | None:
|
||||
"""Safely cast a value to float. Returns None on failure."""
|
||||
if value is None:
|
||||
return None
|
||||
try:
|
||||
return float(value)
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
def _warn_missing_key(self, key: str) -> None:
|
||||
"""Log a missing-key warning once per key per engine instance."""
|
||||
if key not in self._warned_keys:
|
||||
self._warned_keys.add(key)
|
||||
logger.warning("Condition requires '%s' but key missing from market_data", key)
|
||||
|
||||
def evaluate(
|
||||
self,
|
||||
playbook: DayPlaybook,
|
||||
stock_code: str,
|
||||
market_data: dict[str, Any],
|
||||
portfolio_data: dict[str, Any],
|
||||
) -> ScenarioMatch:
|
||||
"""Match market conditions to scenarios and return a decision.
|
||||
|
||||
Algorithm:
|
||||
1. Check global rules first (portfolio-level circuit breakers)
|
||||
2. Find the StockPlaybook for the given stock_code
|
||||
3. Iterate scenarios in order (first match wins)
|
||||
4. If no match, return playbook.default_action (HOLD)
|
||||
|
||||
Args:
|
||||
playbook: Today's DayPlaybook for this market
|
||||
stock_code: Stock ticker to evaluate
|
||||
market_data: Real-time market data (price, rsi, volume_ratio, etc.)
|
||||
portfolio_data: Portfolio state (pnl_pct, total_cash, etc.)
|
||||
|
||||
Returns:
|
||||
ScenarioMatch with the decision
|
||||
"""
|
||||
# 1. Check global rules
|
||||
triggered_rule = self.check_global_rules(playbook, portfolio_data)
|
||||
if triggered_rule is not None:
|
||||
logger.info(
|
||||
"Global rule triggered for %s: %s -> %s",
|
||||
stock_code,
|
||||
triggered_rule.condition,
|
||||
triggered_rule.action.value,
|
||||
)
|
||||
return ScenarioMatch(
|
||||
stock_code=stock_code,
|
||||
matched_scenario=None,
|
||||
action=triggered_rule.action,
|
||||
confidence=100,
|
||||
rationale=f"Global rule: {triggered_rule.rationale or triggered_rule.condition}",
|
||||
global_rule_triggered=triggered_rule,
|
||||
)
|
||||
|
||||
# 2. Find stock playbook
|
||||
stock_pb = playbook.get_stock_playbook(stock_code)
|
||||
if stock_pb is None:
|
||||
logger.debug("No playbook for %s — defaulting to %s", stock_code, playbook.default_action)
|
||||
return ScenarioMatch(
|
||||
stock_code=stock_code,
|
||||
matched_scenario=None,
|
||||
action=playbook.default_action,
|
||||
confidence=0,
|
||||
rationale=f"No scenarios defined for {stock_code}",
|
||||
)
|
||||
|
||||
# 3. Iterate scenarios (first match wins)
|
||||
for scenario in stock_pb.scenarios:
|
||||
if self.evaluate_condition(scenario.condition, market_data):
|
||||
logger.info(
|
||||
"Scenario matched for %s: %s (confidence=%d)",
|
||||
stock_code,
|
||||
scenario.action.value,
|
||||
scenario.confidence,
|
||||
)
|
||||
return ScenarioMatch(
|
||||
stock_code=stock_code,
|
||||
matched_scenario=scenario,
|
||||
action=scenario.action,
|
||||
confidence=scenario.confidence,
|
||||
rationale=scenario.rationale,
|
||||
match_details=self._build_match_details(scenario.condition, market_data),
|
||||
)
|
||||
|
||||
# 4. No match — default action
|
||||
logger.debug("No scenario matched for %s — defaulting to %s", stock_code, playbook.default_action)
|
||||
return ScenarioMatch(
|
||||
stock_code=stock_code,
|
||||
matched_scenario=None,
|
||||
action=playbook.default_action,
|
||||
confidence=0,
|
||||
rationale="No scenario conditions met — holding position",
|
||||
)
|
||||
|
||||
def check_global_rules(
|
||||
self,
|
||||
playbook: DayPlaybook,
|
||||
portfolio_data: dict[str, Any],
|
||||
) -> GlobalRule | None:
|
||||
"""Check portfolio-level rules. Returns first triggered rule or None."""
|
||||
for rule in playbook.global_rules:
|
||||
if self._evaluate_global_condition(rule.condition, portfolio_data):
|
||||
return rule
|
||||
return None
|
||||
|
||||
def evaluate_condition(
|
||||
self,
|
||||
condition: StockCondition,
|
||||
market_data: dict[str, Any],
|
||||
) -> bool:
|
||||
"""Evaluate all non-None fields in condition as AND.
|
||||
|
||||
Returns True only if ALL specified conditions are met.
|
||||
Empty condition (no fields set) returns False for safety.
|
||||
"""
|
||||
if not condition.has_any_condition():
|
||||
return False
|
||||
|
||||
checks: list[bool] = []
|
||||
|
||||
rsi = self._safe_float(market_data.get("rsi"))
|
||||
if condition.rsi_below is not None or condition.rsi_above is not None:
|
||||
if "rsi" not in market_data:
|
||||
self._warn_missing_key("rsi")
|
||||
if condition.rsi_below is not None:
|
||||
checks.append(rsi is not None and rsi < condition.rsi_below)
|
||||
if condition.rsi_above is not None:
|
||||
checks.append(rsi is not None and rsi > condition.rsi_above)
|
||||
|
||||
volume_ratio = self._safe_float(market_data.get("volume_ratio"))
|
||||
if condition.volume_ratio_above is not None or condition.volume_ratio_below is not None:
|
||||
if "volume_ratio" not in market_data:
|
||||
self._warn_missing_key("volume_ratio")
|
||||
if condition.volume_ratio_above is not None:
|
||||
checks.append(volume_ratio is not None and volume_ratio > condition.volume_ratio_above)
|
||||
if condition.volume_ratio_below is not None:
|
||||
checks.append(volume_ratio is not None and volume_ratio < condition.volume_ratio_below)
|
||||
|
||||
price = self._safe_float(market_data.get("current_price"))
|
||||
if condition.price_above is not None or condition.price_below is not None:
|
||||
if "current_price" not in market_data:
|
||||
self._warn_missing_key("current_price")
|
||||
if condition.price_above is not None:
|
||||
checks.append(price is not None and price > condition.price_above)
|
||||
if condition.price_below is not None:
|
||||
checks.append(price is not None and price < condition.price_below)
|
||||
|
||||
price_change_pct = self._safe_float(market_data.get("price_change_pct"))
|
||||
if condition.price_change_pct_above is not None or condition.price_change_pct_below is not None:
|
||||
if "price_change_pct" not in market_data:
|
||||
self._warn_missing_key("price_change_pct")
|
||||
if condition.price_change_pct_above is not None:
|
||||
checks.append(price_change_pct is not None and price_change_pct > condition.price_change_pct_above)
|
||||
if condition.price_change_pct_below is not None:
|
||||
checks.append(price_change_pct is not None and price_change_pct < condition.price_change_pct_below)
|
||||
|
||||
return len(checks) > 0 and all(checks)
|
||||
|
||||
def _evaluate_global_condition(
|
||||
self,
|
||||
condition_str: str,
|
||||
portfolio_data: dict[str, Any],
|
||||
) -> bool:
|
||||
"""Evaluate a simple global condition string against portfolio data.
|
||||
|
||||
Supports: "field < value", "field > value", "field <= value", "field >= value"
|
||||
"""
|
||||
parts = condition_str.strip().split()
|
||||
if len(parts) != 3:
|
||||
logger.warning("Invalid global condition format: %s", condition_str)
|
||||
return False
|
||||
|
||||
field_name, operator, value_str = parts
|
||||
try:
|
||||
threshold = float(value_str)
|
||||
except ValueError:
|
||||
logger.warning("Invalid threshold in condition: %s", condition_str)
|
||||
return False
|
||||
|
||||
actual = portfolio_data.get(field_name)
|
||||
if actual is None:
|
||||
return False
|
||||
|
||||
try:
|
||||
actual_val = float(actual)
|
||||
except (ValueError, TypeError):
|
||||
return False
|
||||
|
||||
if operator == "<":
|
||||
return actual_val < threshold
|
||||
elif operator == ">":
|
||||
return actual_val > threshold
|
||||
elif operator == "<=":
|
||||
return actual_val <= threshold
|
||||
elif operator == ">=":
|
||||
return actual_val >= threshold
|
||||
else:
|
||||
logger.warning("Unknown operator in condition: %s", operator)
|
||||
return False
|
||||
|
||||
def _build_match_details(
|
||||
self,
|
||||
condition: StockCondition,
|
||||
market_data: dict[str, Any],
|
||||
) -> dict[str, Any]:
|
||||
"""Build a summary of which conditions matched and their normalized values."""
|
||||
details: dict[str, Any] = {}
|
||||
|
||||
if condition.rsi_below is not None or condition.rsi_above is not None:
|
||||
details["rsi"] = self._safe_float(market_data.get("rsi"))
|
||||
if condition.volume_ratio_above is not None or condition.volume_ratio_below is not None:
|
||||
details["volume_ratio"] = self._safe_float(market_data.get("volume_ratio"))
|
||||
if condition.price_above is not None or condition.price_below is not None:
|
||||
details["current_price"] = self._safe_float(market_data.get("current_price"))
|
||||
if condition.price_change_pct_above is not None or condition.price_change_pct_below is not None:
|
||||
details["price_change_pct"] = self._safe_float(market_data.get("price_change_pct"))
|
||||
|
||||
return details
|
||||
@@ -152,3 +152,121 @@ class TestPromptConstruction:
|
||||
assert "JSON" in prompt
|
||||
assert "action" in prompt
|
||||
assert "confidence" in prompt
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Batch Decision Making
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBatchDecisionParsing:
|
||||
"""Batch response parser must handle JSON arrays correctly."""
|
||||
|
||||
def test_parse_valid_batch_response(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [
|
||||
{"stock_code": "AAPL", "current_price": 185.5},
|
||||
{"stock_code": "MSFT", "current_price": 420.0},
|
||||
]
|
||||
raw = """[
|
||||
{"code": "AAPL", "action": "BUY", "confidence": 85, "rationale": "Strong momentum"},
|
||||
{"code": "MSFT", "action": "HOLD", "confidence": 50, "rationale": "Wait for earnings"}
|
||||
]"""
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert len(decisions) == 2
|
||||
assert decisions["AAPL"].action == "BUY"
|
||||
assert decisions["AAPL"].confidence == 85
|
||||
assert decisions["MSFT"].action == "HOLD"
|
||||
assert decisions["MSFT"].confidence == 50
|
||||
|
||||
def test_parse_batch_with_markdown_wrapper(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [{"stock_code": "AAPL", "current_price": 185.5}]
|
||||
raw = """```json
|
||||
[{"code": "AAPL", "action": "BUY", "confidence": 90, "rationale": "Good"}]
|
||||
```"""
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert decisions["AAPL"].action == "BUY"
|
||||
assert decisions["AAPL"].confidence == 90
|
||||
|
||||
def test_parse_batch_empty_response_returns_hold_for_all(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [
|
||||
{"stock_code": "AAPL", "current_price": 185.5},
|
||||
{"stock_code": "MSFT", "current_price": 420.0},
|
||||
]
|
||||
|
||||
decisions = client._parse_batch_response("", stocks_data, token_count=100)
|
||||
|
||||
assert len(decisions) == 2
|
||||
assert decisions["AAPL"].action == "HOLD"
|
||||
assert decisions["AAPL"].confidence == 0
|
||||
assert decisions["MSFT"].action == "HOLD"
|
||||
|
||||
def test_parse_batch_malformed_json_returns_hold_for_all(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [{"stock_code": "AAPL", "current_price": 185.5}]
|
||||
raw = "This is not JSON"
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert decisions["AAPL"].action == "HOLD"
|
||||
assert decisions["AAPL"].confidence == 0
|
||||
|
||||
def test_parse_batch_not_array_returns_hold_for_all(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [{"stock_code": "AAPL", "current_price": 185.5}]
|
||||
raw = '{"code": "AAPL", "action": "BUY", "confidence": 90, "rationale": "Good"}'
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert decisions["AAPL"].action == "HOLD"
|
||||
assert decisions["AAPL"].confidence == 0
|
||||
|
||||
def test_parse_batch_missing_stock_gets_hold(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [
|
||||
{"stock_code": "AAPL", "current_price": 185.5},
|
||||
{"stock_code": "MSFT", "current_price": 420.0},
|
||||
]
|
||||
# Response only has AAPL, MSFT is missing
|
||||
raw = '[{"code": "AAPL", "action": "BUY", "confidence": 85, "rationale": "Good"}]'
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert decisions["AAPL"].action == "BUY"
|
||||
assert decisions["MSFT"].action == "HOLD"
|
||||
assert decisions["MSFT"].confidence == 0
|
||||
|
||||
def test_parse_batch_invalid_action_becomes_hold(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [{"stock_code": "AAPL", "current_price": 185.5}]
|
||||
raw = '[{"code": "AAPL", "action": "YOLO", "confidence": 90, "rationale": "Moon"}]'
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert decisions["AAPL"].action == "HOLD"
|
||||
|
||||
def test_parse_batch_low_confidence_becomes_hold(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [{"stock_code": "AAPL", "current_price": 185.5}]
|
||||
raw = '[{"code": "AAPL", "action": "BUY", "confidence": 65, "rationale": "Weak"}]'
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert decisions["AAPL"].action == "HOLD"
|
||||
assert decisions["AAPL"].confidence == 65
|
||||
|
||||
def test_parse_batch_missing_fields_gets_hold(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
stocks_data = [{"stock_code": "AAPL", "current_price": 185.5}]
|
||||
raw = '[{"code": "AAPL", "action": "BUY"}]' # Missing confidence and rationale
|
||||
|
||||
decisions = client._parse_batch_response(raw, stocks_data, token_count=100)
|
||||
|
||||
assert decisions["AAPL"].action == "HOLD"
|
||||
assert decisions["AAPL"].confidence == 0
|
||||
|
||||
@@ -90,12 +90,12 @@ class TestTokenManagement:
|
||||
await broker.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_token_refresh_cooldown_prevents_rapid_retries(self, settings):
|
||||
"""Token refresh should enforce cooldown after failure (issue #54)."""
|
||||
async def test_token_refresh_cooldown_waits_then_retries(self, settings):
|
||||
"""Token refresh should wait out cooldown then retry (issue #54)."""
|
||||
broker = KISBroker(settings)
|
||||
broker._refresh_cooldown = 2.0 # Short cooldown for testing
|
||||
broker._refresh_cooldown = 0.1 # Short cooldown for testing
|
||||
|
||||
# First refresh attempt fails with 403 (EGW00133)
|
||||
# All attempts fail with 403 (EGW00133)
|
||||
mock_resp_403 = AsyncMock()
|
||||
mock_resp_403.status = 403
|
||||
mock_resp_403.text = AsyncMock(
|
||||
@@ -109,8 +109,8 @@ class TestTokenManagement:
|
||||
with pytest.raises(ConnectionError, match="Token refresh failed"):
|
||||
await broker._ensure_token()
|
||||
|
||||
# Second attempt within cooldown should fail with cooldown error
|
||||
with pytest.raises(ConnectionError, match="Token refresh on cooldown"):
|
||||
# Second attempt within cooldown should wait then retry (and still get 403)
|
||||
with pytest.raises(ConnectionError, match="Token refresh failed"):
|
||||
await broker._ensure_token()
|
||||
|
||||
await broker.close()
|
||||
@@ -211,6 +211,38 @@ class TestRateLimiter:
|
||||
await broker._rate_limiter.acquire()
|
||||
await broker.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_order_acquires_rate_limiter_twice(self, settings):
|
||||
"""send_order must acquire rate limiter for both hash key and order call."""
|
||||
broker = KISBroker(settings)
|
||||
broker._access_token = "tok"
|
||||
broker._token_expires_at = asyncio.get_event_loop().time() + 3600
|
||||
|
||||
# Mock hash key response
|
||||
mock_hash_resp = AsyncMock()
|
||||
mock_hash_resp.status = 200
|
||||
mock_hash_resp.json = AsyncMock(return_value={"HASH": "abc123"})
|
||||
mock_hash_resp.__aenter__ = AsyncMock(return_value=mock_hash_resp)
|
||||
mock_hash_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
# Mock order response
|
||||
mock_order_resp = AsyncMock()
|
||||
mock_order_resp.status = 200
|
||||
mock_order_resp.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order_resp.__aenter__ = AsyncMock(return_value=mock_order_resp)
|
||||
mock_order_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash_resp, mock_order_resp]
|
||||
):
|
||||
with patch.object(
|
||||
broker._rate_limiter, "acquire", new_callable=AsyncMock
|
||||
) as mock_acquire:
|
||||
await broker.send_order("005930", "BUY", 1, 50000)
|
||||
assert mock_acquire.call_count == 2
|
||||
|
||||
await broker.close()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Hash Key Generation
|
||||
@@ -240,3 +272,27 @@ class TestHashKey:
|
||||
assert len(hash_key) > 0
|
||||
|
||||
await broker.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_hash_key_acquires_rate_limiter(self, settings):
|
||||
"""_get_hash_key must go through the rate limiter to prevent burst."""
|
||||
broker = KISBroker(settings)
|
||||
broker._access_token = "tok"
|
||||
broker._token_expires_at = asyncio.get_event_loop().time() + 3600
|
||||
|
||||
body = {"CANO": "12345678", "ACNT_PRDT_CD": "01"}
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"HASH": "abc123hash"})
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp):
|
||||
with patch.object(
|
||||
broker._rate_limiter, "acquire", new_callable=AsyncMock
|
||||
) as mock_acquire:
|
||||
await broker._get_hash_key(body)
|
||||
mock_acquire.assert_called_once()
|
||||
|
||||
await broker.close()
|
||||
|
||||
@@ -161,7 +161,7 @@ class TestContextAggregator:
|
||||
self, aggregator: ContextAggregator, db_conn: sqlite3.Connection
|
||||
) -> None:
|
||||
"""Test aggregating daily metrics from trades."""
|
||||
date = "2026-02-04"
|
||||
date = datetime.now(UTC).date().isoformat()
|
||||
|
||||
# Create sample trades
|
||||
log_trade(db_conn, "005930", "BUY", 85, "Good signal", quantity=10, price=70000, pnl=500)
|
||||
@@ -175,36 +175,44 @@ class TestContextAggregator:
|
||||
db_conn.commit()
|
||||
|
||||
# Aggregate
|
||||
aggregator.aggregate_daily_from_trades(date)
|
||||
aggregator.aggregate_daily_from_trades(date, market="KR")
|
||||
|
||||
# Verify L6 contexts
|
||||
store = aggregator.store
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "trade_count") == 3
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "buys") == 1
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "sells") == 1
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "holds") == 1
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "total_pnl") == 2000.0
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "unique_stocks") == 3
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "trade_count_KR") == 3
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "buys_KR") == 1
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "sells_KR") == 1
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "holds_KR") == 1
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "total_pnl_KR") == 2000.0
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "unique_stocks_KR") == 3
|
||||
# 2 wins, 0 losses
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "win_rate") == 100.0
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "win_rate_KR") == 100.0
|
||||
|
||||
def test_aggregate_weekly_from_daily(self, aggregator: ContextAggregator) -> None:
|
||||
"""Test aggregating weekly metrics from daily."""
|
||||
week = "2026-W06"
|
||||
|
||||
# Set daily contexts
|
||||
aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-02", "total_pnl", 100.0)
|
||||
aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-03", "total_pnl", 200.0)
|
||||
aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-02", "avg_confidence", 80.0)
|
||||
aggregator.store.set_context(ContextLayer.L6_DAILY, "2026-02-03", "avg_confidence", 85.0)
|
||||
aggregator.store.set_context(
|
||||
ContextLayer.L6_DAILY, "2026-02-02", "total_pnl_KR", 100.0
|
||||
)
|
||||
aggregator.store.set_context(
|
||||
ContextLayer.L6_DAILY, "2026-02-03", "total_pnl_KR", 200.0
|
||||
)
|
||||
aggregator.store.set_context(
|
||||
ContextLayer.L6_DAILY, "2026-02-02", "avg_confidence_KR", 80.0
|
||||
)
|
||||
aggregator.store.set_context(
|
||||
ContextLayer.L6_DAILY, "2026-02-03", "avg_confidence_KR", 85.0
|
||||
)
|
||||
|
||||
# Aggregate
|
||||
aggregator.aggregate_weekly_from_daily(week)
|
||||
|
||||
# Verify L5 contexts
|
||||
store = aggregator.store
|
||||
weekly_pnl = store.get_context(ContextLayer.L5_WEEKLY, week, "weekly_pnl")
|
||||
avg_conf = store.get_context(ContextLayer.L5_WEEKLY, week, "avg_confidence")
|
||||
weekly_pnl = store.get_context(ContextLayer.L5_WEEKLY, week, "weekly_pnl_KR")
|
||||
avg_conf = store.get_context(ContextLayer.L5_WEEKLY, week, "avg_confidence_KR")
|
||||
|
||||
assert weekly_pnl == 300.0
|
||||
assert avg_conf == 82.5
|
||||
@@ -214,9 +222,15 @@ class TestContextAggregator:
|
||||
month = "2026-02"
|
||||
|
||||
# Set weekly contexts
|
||||
aggregator.store.set_context(ContextLayer.L5_WEEKLY, "2026-W05", "weekly_pnl", 100.0)
|
||||
aggregator.store.set_context(ContextLayer.L5_WEEKLY, "2026-W06", "weekly_pnl", 200.0)
|
||||
aggregator.store.set_context(ContextLayer.L5_WEEKLY, "2026-W07", "weekly_pnl", 150.0)
|
||||
aggregator.store.set_context(
|
||||
ContextLayer.L5_WEEKLY, "2026-W05", "weekly_pnl_KR", 100.0
|
||||
)
|
||||
aggregator.store.set_context(
|
||||
ContextLayer.L5_WEEKLY, "2026-W06", "weekly_pnl_KR", 200.0
|
||||
)
|
||||
aggregator.store.set_context(
|
||||
ContextLayer.L5_WEEKLY, "2026-W07", "weekly_pnl_KR", 150.0
|
||||
)
|
||||
|
||||
# Aggregate
|
||||
aggregator.aggregate_monthly_from_weekly(month)
|
||||
@@ -285,7 +299,7 @@ class TestContextAggregator:
|
||||
self, aggregator: ContextAggregator, db_conn: sqlite3.Connection
|
||||
) -> None:
|
||||
"""Test running all aggregations from L7 to L1."""
|
||||
date = "2026-02-04"
|
||||
date = datetime.now(UTC).date().isoformat()
|
||||
|
||||
# Create sample trades
|
||||
log_trade(db_conn, "005930", "BUY", 85, "Good signal", quantity=10, price=70000, pnl=1000)
|
||||
@@ -299,10 +313,18 @@ class TestContextAggregator:
|
||||
|
||||
# Verify data exists in each layer
|
||||
store = aggregator.store
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "total_pnl") == 1000.0
|
||||
current_week = datetime.now(UTC).strftime("%Y-W%V")
|
||||
assert store.get_context(ContextLayer.L5_WEEKLY, current_week, "weekly_pnl") is not None
|
||||
# Further layers depend on time alignment, just verify no crashes
|
||||
assert store.get_context(ContextLayer.L6_DAILY, date, "total_pnl_KR") == 1000.0
|
||||
from datetime import date as date_cls
|
||||
trade_date = date_cls.fromisoformat(date)
|
||||
iso_year, iso_week, _ = trade_date.isocalendar()
|
||||
trade_week = f"{iso_year}-W{iso_week:02d}"
|
||||
assert store.get_context(ContextLayer.L5_WEEKLY, trade_week, "weekly_pnl_KR") is not None
|
||||
trade_month = f"{trade_date.year}-{trade_date.month:02d}"
|
||||
trade_quarter = f"{trade_date.year}-Q{(trade_date.month - 1) // 3 + 1}"
|
||||
trade_year = str(trade_date.year)
|
||||
assert store.get_context(ContextLayer.L4_MONTHLY, trade_month, "monthly_pnl") == 1000.0
|
||||
assert store.get_context(ContextLayer.L3_QUARTERLY, trade_quarter, "quarterly_pnl") == 1000.0
|
||||
assert store.get_context(ContextLayer.L2_ANNUAL, trade_year, "annual_pnl") == 1000.0
|
||||
|
||||
|
||||
class TestLayerMetadata:
|
||||
|
||||
104
tests/test_context_scheduler.py
Normal file
104
tests/test_context_scheduler.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""Tests for ContextScheduler."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from src.context.scheduler import ContextScheduler
|
||||
|
||||
|
||||
@dataclass
|
||||
class StubAggregator:
|
||||
"""Stub aggregator that records calls."""
|
||||
|
||||
weekly_calls: list[str]
|
||||
monthly_calls: list[str]
|
||||
quarterly_calls: list[str]
|
||||
annual_calls: list[str]
|
||||
legacy_calls: int
|
||||
|
||||
def aggregate_weekly_from_daily(self, week: str) -> None:
|
||||
self.weekly_calls.append(week)
|
||||
|
||||
def aggregate_monthly_from_weekly(self, month: str) -> None:
|
||||
self.monthly_calls.append(month)
|
||||
|
||||
def aggregate_quarterly_from_monthly(self, quarter: str) -> None:
|
||||
self.quarterly_calls.append(quarter)
|
||||
|
||||
def aggregate_annual_from_quarterly(self, year: str) -> None:
|
||||
self.annual_calls.append(year)
|
||||
|
||||
def aggregate_legacy_from_annual(self) -> None:
|
||||
self.legacy_calls += 1
|
||||
|
||||
|
||||
@dataclass
|
||||
class StubStore:
|
||||
"""Stub store that records cleanup calls."""
|
||||
|
||||
cleanup_calls: int = 0
|
||||
|
||||
def cleanup_expired_contexts(self) -> None:
|
||||
self.cleanup_calls += 1
|
||||
|
||||
|
||||
def make_scheduler() -> tuple[ContextScheduler, StubAggregator, StubStore]:
|
||||
aggregator = StubAggregator([], [], [], [], 0)
|
||||
store = StubStore()
|
||||
scheduler = ContextScheduler(aggregator=aggregator, store=store)
|
||||
return scheduler, aggregator, store
|
||||
|
||||
|
||||
def test_run_if_due_weekly() -> None:
|
||||
scheduler, aggregator, store = make_scheduler()
|
||||
now = datetime(2026, 2, 8, 10, 0, tzinfo=UTC) # Sunday
|
||||
|
||||
result = scheduler.run_if_due(now)
|
||||
|
||||
assert result.weekly is True
|
||||
assert aggregator.weekly_calls == ["2026-W06"]
|
||||
assert store.cleanup_calls == 1
|
||||
|
||||
|
||||
def test_run_if_due_monthly() -> None:
|
||||
scheduler, aggregator, _store = make_scheduler()
|
||||
now = datetime(2026, 2, 28, 12, 0, tzinfo=UTC) # Last day of month
|
||||
|
||||
result = scheduler.run_if_due(now)
|
||||
|
||||
assert result.monthly is True
|
||||
assert aggregator.monthly_calls == ["2026-02"]
|
||||
|
||||
|
||||
def test_run_if_due_quarterly() -> None:
|
||||
scheduler, aggregator, _store = make_scheduler()
|
||||
now = datetime(2026, 3, 31, 12, 0, tzinfo=UTC) # Last day of Q1
|
||||
|
||||
result = scheduler.run_if_due(now)
|
||||
|
||||
assert result.quarterly is True
|
||||
assert aggregator.quarterly_calls == ["2026-Q1"]
|
||||
|
||||
|
||||
def test_run_if_due_annual_and_legacy() -> None:
|
||||
scheduler, aggregator, _store = make_scheduler()
|
||||
now = datetime(2026, 12, 31, 12, 0, tzinfo=UTC)
|
||||
|
||||
result = scheduler.run_if_due(now)
|
||||
|
||||
assert result.annual is True
|
||||
assert result.legacy is True
|
||||
assert aggregator.annual_calls == ["2026"]
|
||||
assert aggregator.legacy_calls == 1
|
||||
|
||||
|
||||
def test_cleanup_runs_once_per_day() -> None:
|
||||
scheduler, _aggregator, store = make_scheduler()
|
||||
now = datetime(2026, 2, 9, 9, 0, tzinfo=UTC)
|
||||
|
||||
scheduler.run_if_due(now)
|
||||
scheduler.run_if_due(now)
|
||||
|
||||
assert store.cleanup_calls == 1
|
||||
387
tests/test_daily_review.py
Normal file
387
tests/test_daily_review.py
Normal file
@@ -0,0 +1,387 @@
|
||||
"""Tests for DailyReviewer."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from types import SimpleNamespace
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from src.context.layer import ContextLayer
|
||||
from src.context.store import ContextStore
|
||||
from src.db import init_db, log_trade
|
||||
from src.evolution.daily_review import DailyReviewer
|
||||
from src.evolution.scorecard import DailyScorecard
|
||||
from src.logging.decision_logger import DecisionLogger
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
TODAY = datetime.now(UTC).strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def db_conn() -> sqlite3.Connection:
|
||||
return init_db(":memory:")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def context_store(db_conn: sqlite3.Connection) -> ContextStore:
|
||||
return ContextStore(db_conn)
|
||||
|
||||
|
||||
def _log_decision(
|
||||
logger: DecisionLogger,
|
||||
*,
|
||||
stock_code: str,
|
||||
market: str,
|
||||
action: str,
|
||||
confidence: int,
|
||||
scenario_match: dict[str, float] | None = None,
|
||||
) -> str:
|
||||
return logger.log_decision(
|
||||
stock_code=stock_code,
|
||||
market=market,
|
||||
exchange_code="KRX" if market == "KR" else "NASDAQ",
|
||||
action=action,
|
||||
confidence=confidence,
|
||||
rationale="test",
|
||||
context_snapshot={"scenario_match": scenario_match or {}},
|
||||
input_data={"stock_code": stock_code},
|
||||
)
|
||||
|
||||
|
||||
def test_generate_scorecard_market_scoped(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store)
|
||||
logger = DecisionLogger(db_conn)
|
||||
|
||||
buy_id = _log_decision(
|
||||
logger,
|
||||
stock_code="005930",
|
||||
market="KR",
|
||||
action="BUY",
|
||||
confidence=90,
|
||||
scenario_match={"rsi": 29.0},
|
||||
)
|
||||
_log_decision(
|
||||
logger,
|
||||
stock_code="000660",
|
||||
market="KR",
|
||||
action="HOLD",
|
||||
confidence=60,
|
||||
)
|
||||
_log_decision(
|
||||
logger,
|
||||
stock_code="AAPL",
|
||||
market="US",
|
||||
action="SELL",
|
||||
confidence=80,
|
||||
scenario_match={"volume_ratio": 2.1},
|
||||
)
|
||||
|
||||
log_trade(
|
||||
db_conn,
|
||||
"005930",
|
||||
"BUY",
|
||||
90,
|
||||
"buy",
|
||||
quantity=1,
|
||||
price=100.0,
|
||||
pnl=10.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id=buy_id,
|
||||
)
|
||||
log_trade(
|
||||
db_conn,
|
||||
"000660",
|
||||
"HOLD",
|
||||
60,
|
||||
"hold",
|
||||
quantity=0,
|
||||
price=0.0,
|
||||
pnl=0.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
)
|
||||
log_trade(
|
||||
db_conn,
|
||||
"AAPL",
|
||||
"SELL",
|
||||
80,
|
||||
"sell",
|
||||
quantity=1,
|
||||
price=200.0,
|
||||
pnl=-5.0,
|
||||
market="US",
|
||||
exchange_code="NASDAQ",
|
||||
)
|
||||
|
||||
scorecard = reviewer.generate_scorecard(TODAY, "KR")
|
||||
|
||||
assert scorecard.market == "KR"
|
||||
assert scorecard.total_decisions == 2
|
||||
assert scorecard.buys == 1
|
||||
assert scorecard.sells == 0
|
||||
assert scorecard.holds == 1
|
||||
assert scorecard.total_pnl == 10.0
|
||||
assert scorecard.win_rate == 100.0
|
||||
assert scorecard.avg_confidence == 75.0
|
||||
assert scorecard.scenario_match_rate == 50.0
|
||||
|
||||
|
||||
def test_generate_scorecard_top_winners_and_losers(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store)
|
||||
logger = DecisionLogger(db_conn)
|
||||
|
||||
for code, pnl in [("005930", 30.0), ("000660", 10.0), ("035420", -15.0), ("051910", -5.0)]:
|
||||
decision_id = _log_decision(
|
||||
logger,
|
||||
stock_code=code,
|
||||
market="KR",
|
||||
action="BUY" if pnl >= 0 else "SELL",
|
||||
confidence=80,
|
||||
scenario_match={"rsi": 30.0},
|
||||
)
|
||||
log_trade(
|
||||
db_conn,
|
||||
code,
|
||||
"BUY" if pnl >= 0 else "SELL",
|
||||
80,
|
||||
"test",
|
||||
quantity=1,
|
||||
price=100.0,
|
||||
pnl=pnl,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id=decision_id,
|
||||
)
|
||||
|
||||
scorecard = reviewer.generate_scorecard(TODAY, "KR")
|
||||
assert scorecard.top_winners == ["005930", "000660"]
|
||||
assert scorecard.top_losers == ["035420", "051910"]
|
||||
|
||||
|
||||
def test_generate_scorecard_empty_day(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store)
|
||||
scorecard = reviewer.generate_scorecard(TODAY, "KR")
|
||||
|
||||
assert scorecard.total_decisions == 0
|
||||
assert scorecard.total_pnl == 0.0
|
||||
assert scorecard.win_rate == 0.0
|
||||
assert scorecard.avg_confidence == 0.0
|
||||
assert scorecard.scenario_match_rate == 0.0
|
||||
assert scorecard.top_winners == []
|
||||
assert scorecard.top_losers == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_lessons_without_gemini_returns_empty(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store, gemini_client=None)
|
||||
lessons = await reviewer.generate_lessons(
|
||||
DailyScorecard(
|
||||
date="2026-02-14",
|
||||
market="KR",
|
||||
total_decisions=1,
|
||||
buys=1,
|
||||
sells=0,
|
||||
holds=0,
|
||||
total_pnl=5.0,
|
||||
win_rate=100.0,
|
||||
avg_confidence=90.0,
|
||||
scenario_match_rate=100.0,
|
||||
)
|
||||
)
|
||||
assert lessons == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_lessons_parses_json_array(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
mock_gemini = MagicMock()
|
||||
mock_gemini.decide = AsyncMock(
|
||||
return_value=SimpleNamespace(rationale='["Cut losers earlier", "Reduce midday churn"]')
|
||||
)
|
||||
reviewer = DailyReviewer(db_conn, context_store, gemini_client=mock_gemini)
|
||||
|
||||
lessons = await reviewer.generate_lessons(
|
||||
DailyScorecard(
|
||||
date="2026-02-14",
|
||||
market="KR",
|
||||
total_decisions=3,
|
||||
buys=1,
|
||||
sells=1,
|
||||
holds=1,
|
||||
total_pnl=-2.5,
|
||||
win_rate=50.0,
|
||||
avg_confidence=70.0,
|
||||
scenario_match_rate=66.7,
|
||||
)
|
||||
)
|
||||
assert lessons == ["Cut losers earlier", "Reduce midday churn"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_lessons_fallback_to_lines(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
mock_gemini = MagicMock()
|
||||
mock_gemini.decide = AsyncMock(
|
||||
return_value=SimpleNamespace(rationale="- Keep risk tighter\n- Increase selectivity")
|
||||
)
|
||||
reviewer = DailyReviewer(db_conn, context_store, gemini_client=mock_gemini)
|
||||
|
||||
lessons = await reviewer.generate_lessons(
|
||||
DailyScorecard(
|
||||
date="2026-02-14",
|
||||
market="US",
|
||||
total_decisions=2,
|
||||
buys=1,
|
||||
sells=1,
|
||||
holds=0,
|
||||
total_pnl=1.0,
|
||||
win_rate=50.0,
|
||||
avg_confidence=75.0,
|
||||
scenario_match_rate=100.0,
|
||||
)
|
||||
)
|
||||
assert lessons == ["Keep risk tighter", "Increase selectivity"]
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_lessons_handles_gemini_error(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
mock_gemini = MagicMock()
|
||||
mock_gemini.decide = AsyncMock(side_effect=RuntimeError("boom"))
|
||||
reviewer = DailyReviewer(db_conn, context_store, gemini_client=mock_gemini)
|
||||
|
||||
lessons = await reviewer.generate_lessons(
|
||||
DailyScorecard(
|
||||
date="2026-02-14",
|
||||
market="US",
|
||||
total_decisions=0,
|
||||
buys=0,
|
||||
sells=0,
|
||||
holds=0,
|
||||
total_pnl=0.0,
|
||||
win_rate=0.0,
|
||||
avg_confidence=0.0,
|
||||
scenario_match_rate=0.0,
|
||||
)
|
||||
)
|
||||
assert lessons == []
|
||||
|
||||
|
||||
def test_store_scorecard_in_context(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store)
|
||||
scorecard = DailyScorecard(
|
||||
date="2026-02-14",
|
||||
market="KR",
|
||||
total_decisions=5,
|
||||
buys=2,
|
||||
sells=1,
|
||||
holds=2,
|
||||
total_pnl=15.0,
|
||||
win_rate=66.67,
|
||||
avg_confidence=82.0,
|
||||
scenario_match_rate=80.0,
|
||||
lessons=["Keep position sizing stable"],
|
||||
cross_market_note="US risk-off",
|
||||
)
|
||||
|
||||
reviewer.store_scorecard_in_context(scorecard)
|
||||
|
||||
stored = context_store.get_context(
|
||||
ContextLayer.L6_DAILY,
|
||||
"2026-02-14",
|
||||
"scorecard_KR",
|
||||
)
|
||||
assert stored is not None
|
||||
assert stored["market"] == "KR"
|
||||
assert stored["total_pnl"] == 15.0
|
||||
assert stored["lessons"] == ["Keep position sizing stable"]
|
||||
|
||||
|
||||
def test_store_scorecard_key_is_market_scoped(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store)
|
||||
kr = DailyScorecard(
|
||||
date="2026-02-14",
|
||||
market="KR",
|
||||
total_decisions=1,
|
||||
buys=1,
|
||||
sells=0,
|
||||
holds=0,
|
||||
total_pnl=1.0,
|
||||
win_rate=100.0,
|
||||
avg_confidence=90.0,
|
||||
scenario_match_rate=100.0,
|
||||
)
|
||||
us = DailyScorecard(
|
||||
date="2026-02-14",
|
||||
market="US",
|
||||
total_decisions=1,
|
||||
buys=0,
|
||||
sells=1,
|
||||
holds=0,
|
||||
total_pnl=-1.0,
|
||||
win_rate=0.0,
|
||||
avg_confidence=70.0,
|
||||
scenario_match_rate=100.0,
|
||||
)
|
||||
|
||||
reviewer.store_scorecard_in_context(kr)
|
||||
reviewer.store_scorecard_in_context(us)
|
||||
|
||||
kr_ctx = context_store.get_context(ContextLayer.L6_DAILY, "2026-02-14", "scorecard_KR")
|
||||
us_ctx = context_store.get_context(ContextLayer.L6_DAILY, "2026-02-14", "scorecard_US")
|
||||
|
||||
assert kr_ctx["market"] == "KR"
|
||||
assert us_ctx["market"] == "US"
|
||||
assert kr_ctx["total_pnl"] == 1.0
|
||||
assert us_ctx["total_pnl"] == -1.0
|
||||
|
||||
|
||||
def test_generate_scorecard_handles_invalid_context_snapshot(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store)
|
||||
db_conn.execute(
|
||||
"""
|
||||
INSERT INTO decision_logs (
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"d1",
|
||||
"2026-02-14T09:00:00+00:00",
|
||||
"005930",
|
||||
"KR",
|
||||
"KRX",
|
||||
"HOLD",
|
||||
50,
|
||||
"test",
|
||||
"{invalid_json",
|
||||
json.dumps({}),
|
||||
),
|
||||
)
|
||||
db_conn.commit()
|
||||
|
||||
scorecard = reviewer.generate_scorecard("2026-02-14", "KR")
|
||||
assert scorecard.total_decisions == 1
|
||||
assert scorecard.scenario_match_rate == 0.0
|
||||
298
tests/test_dashboard.py
Normal file
298
tests/test_dashboard.py
Normal file
@@ -0,0 +1,298 @@
|
||||
"""Tests for dashboard endpoint handlers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from collections.abc import Callable
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
from src.dashboard.app import create_dashboard_app
|
||||
from src.db import init_db
|
||||
|
||||
|
||||
def _seed_db(conn: sqlite3.Connection) -> None:
|
||||
today = datetime.now(UTC).date().isoformat()
|
||||
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO playbooks (
|
||||
date, market, status, playbook_json, generated_at,
|
||||
token_count, scenario_count, match_count
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"2026-02-14",
|
||||
"KR",
|
||||
"ready",
|
||||
json.dumps({"market": "KR", "stock_playbooks": []}),
|
||||
"2026-02-14T08:30:00+00:00",
|
||||
123,
|
||||
2,
|
||||
1,
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO playbooks (
|
||||
date, market, status, playbook_json, generated_at,
|
||||
token_count, scenario_count, match_count
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
today,
|
||||
"US_NASDAQ",
|
||||
"ready",
|
||||
json.dumps({"market": "US_NASDAQ", "stock_playbooks": []}),
|
||||
f"{today}T08:30:00+00:00",
|
||||
100,
|
||||
1,
|
||||
0,
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO contexts (layer, timeframe, key, value, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"L6_DAILY",
|
||||
"2026-02-14",
|
||||
"scorecard_KR",
|
||||
json.dumps({"market": "KR", "total_pnl": 1.5, "win_rate": 60.0}),
|
||||
"2026-02-14T15:30:00+00:00",
|
||||
"2026-02-14T15:30:00+00:00",
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO contexts (layer, timeframe, key, value, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"L7_REALTIME",
|
||||
"2026-02-14T10:00:00+00:00",
|
||||
"volatility_KR_005930",
|
||||
json.dumps({"momentum_score": 70.0}),
|
||||
"2026-02-14T10:00:00+00:00",
|
||||
"2026-02-14T10:00:00+00:00",
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO decision_logs (
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"d-kr-1",
|
||||
f"{today}T09:10:00+00:00",
|
||||
"005930",
|
||||
"KR",
|
||||
"KRX",
|
||||
"BUY",
|
||||
85,
|
||||
"signal matched",
|
||||
json.dumps({"scenario_match": {"rsi": 28.0}}),
|
||||
json.dumps({"current_price": 70000}),
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO decision_logs (
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"d-us-1",
|
||||
f"{today}T21:10:00+00:00",
|
||||
"AAPL",
|
||||
"US_NASDAQ",
|
||||
"NASDAQ",
|
||||
"SELL",
|
||||
80,
|
||||
"no match",
|
||||
json.dumps({"scenario_match": {}}),
|
||||
json.dumps({"current_price": 200}),
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO trades (
|
||||
timestamp, stock_code, action, confidence, rationale,
|
||||
quantity, price, pnl, market, exchange_code, selection_context, decision_id
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
f"{today}T09:11:00+00:00",
|
||||
"005930",
|
||||
"BUY",
|
||||
85,
|
||||
"buy",
|
||||
1,
|
||||
70000,
|
||||
2.0,
|
||||
"KR",
|
||||
"KRX",
|
||||
None,
|
||||
"d-kr-1",
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO trades (
|
||||
timestamp, stock_code, action, confidence, rationale,
|
||||
quantity, price, pnl, market, exchange_code, selection_context, decision_id
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
f"{today}T21:11:00+00:00",
|
||||
"AAPL",
|
||||
"SELL",
|
||||
80,
|
||||
"sell",
|
||||
1,
|
||||
200,
|
||||
-1.0,
|
||||
"US_NASDAQ",
|
||||
"NASDAQ",
|
||||
None,
|
||||
"d-us-1",
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def _app(tmp_path: Path) -> Any:
|
||||
db_path = tmp_path / "dashboard_test.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_db(conn)
|
||||
conn.close()
|
||||
return create_dashboard_app(str(db_path))
|
||||
|
||||
|
||||
def _endpoint(app: Any, path: str) -> Callable[..., Any]:
|
||||
for route in app.routes:
|
||||
if getattr(route, "path", None) == path:
|
||||
return route.endpoint
|
||||
raise AssertionError(f"route not found: {path}")
|
||||
|
||||
|
||||
def test_index_serves_html(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
index = _endpoint(app, "/")
|
||||
resp = index()
|
||||
assert isinstance(resp, FileResponse)
|
||||
assert "index.html" in str(resp.path)
|
||||
|
||||
|
||||
def test_status_endpoint(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
assert "KR" in body["markets"]
|
||||
assert "US_NASDAQ" in body["markets"]
|
||||
assert "totals" in body
|
||||
|
||||
|
||||
def test_playbook_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_playbook = _endpoint(app, "/api/playbook/{date_str}")
|
||||
body = get_playbook("2026-02-14", market="KR")
|
||||
assert body["market"] == "KR"
|
||||
|
||||
|
||||
def test_playbook_not_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_playbook = _endpoint(app, "/api/playbook/{date_str}")
|
||||
with pytest.raises(HTTPException, match="playbook not found"):
|
||||
get_playbook("2026-02-15", market="KR")
|
||||
|
||||
|
||||
def test_scorecard_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_scorecard = _endpoint(app, "/api/scorecard/{date_str}")
|
||||
body = get_scorecard("2026-02-14", market="KR")
|
||||
assert body["scorecard"]["total_pnl"] == 1.5
|
||||
|
||||
|
||||
def test_scorecard_not_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_scorecard = _endpoint(app, "/api/scorecard/{date_str}")
|
||||
with pytest.raises(HTTPException, match="scorecard not found"):
|
||||
get_scorecard("2026-02-15", market="KR")
|
||||
|
||||
|
||||
def test_performance_all(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_performance = _endpoint(app, "/api/performance")
|
||||
body = get_performance(market="all")
|
||||
assert body["market"] == "all"
|
||||
assert body["combined"]["total_trades"] == 2
|
||||
assert len(body["by_market"]) == 2
|
||||
|
||||
|
||||
def test_performance_market_filter(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_performance = _endpoint(app, "/api/performance")
|
||||
body = get_performance(market="KR")
|
||||
assert body["market"] == "KR"
|
||||
assert body["metrics"]["total_trades"] == 1
|
||||
|
||||
|
||||
def test_performance_empty_market(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_performance = _endpoint(app, "/api/performance")
|
||||
body = get_performance(market="JP")
|
||||
assert body["metrics"]["total_trades"] == 0
|
||||
|
||||
|
||||
def test_context_layer_all(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_context_layer = _endpoint(app, "/api/context/{layer}")
|
||||
body = get_context_layer("L7_REALTIME", timeframe=None, limit=100)
|
||||
assert body["layer"] == "L7_REALTIME"
|
||||
assert body["count"] == 1
|
||||
|
||||
|
||||
def test_context_layer_timeframe_filter(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_context_layer = _endpoint(app, "/api/context/{layer}")
|
||||
body = get_context_layer("L6_DAILY", timeframe="2026-02-14", limit=100)
|
||||
assert body["count"] == 1
|
||||
assert body["entries"][0]["key"] == "scorecard_KR"
|
||||
|
||||
|
||||
def test_decisions_endpoint(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_decisions = _endpoint(app, "/api/decisions")
|
||||
body = get_decisions(market="KR", limit=50)
|
||||
assert body["count"] == 1
|
||||
assert body["decisions"][0]["decision_id"] == "d-kr-1"
|
||||
|
||||
|
||||
def test_scenarios_active_filters_non_matched(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_active_scenarios = _endpoint(app, "/api/scenarios/active")
|
||||
body = get_active_scenarios(
|
||||
market="KR",
|
||||
date_str=datetime.now(UTC).date().isoformat(),
|
||||
limit=50,
|
||||
)
|
||||
assert body["count"] == 1
|
||||
assert body["matches"][0]["stock_code"] == "005930"
|
||||
|
||||
|
||||
def test_scenarios_active_empty_when_no_matches(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_active_scenarios = _endpoint(app, "/api/scenarios/active")
|
||||
body = get_active_scenarios(market="US", date_str="2026-02-14", limit=50)
|
||||
assert body["count"] == 0
|
||||
60
tests/test_db.py
Normal file
60
tests/test_db.py
Normal file
@@ -0,0 +1,60 @@
|
||||
"""Tests for database helper functions."""
|
||||
|
||||
from src.db import get_open_position, init_db, log_trade
|
||||
|
||||
|
||||
def test_get_open_position_returns_latest_buy() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="BUY",
|
||||
confidence=90,
|
||||
rationale="entry",
|
||||
quantity=2,
|
||||
price=70000.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id="d-buy-1",
|
||||
)
|
||||
|
||||
position = get_open_position(conn, "005930", "KR")
|
||||
assert position is not None
|
||||
assert position["decision_id"] == "d-buy-1"
|
||||
assert position["price"] == 70000.0
|
||||
assert position["quantity"] == 2
|
||||
|
||||
|
||||
def test_get_open_position_returns_none_when_latest_is_sell() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="BUY",
|
||||
confidence=90,
|
||||
rationale="entry",
|
||||
quantity=1,
|
||||
price=70000.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id="d-buy-1",
|
||||
)
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="SELL",
|
||||
confidence=95,
|
||||
rationale="exit",
|
||||
quantity=1,
|
||||
price=71000.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id="d-sell-1",
|
||||
)
|
||||
|
||||
assert get_open_position(conn, "005930", "KR") is None
|
||||
|
||||
|
||||
def test_get_open_position_returns_none_when_no_trades() -> None:
|
||||
conn = init_db(":memory:")
|
||||
assert get_open_position(conn, "AAPL", "US_NASDAQ") is None
|
||||
1046
tests/test_main.py
1046
tests/test_main.py
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,7 @@ import pytest
|
||||
|
||||
from src.markets.schedule import (
|
||||
MARKETS,
|
||||
expand_market_codes,
|
||||
get_next_market_open,
|
||||
get_open_markets,
|
||||
is_market_open,
|
||||
@@ -199,3 +200,28 @@ class TestGetNextMarketOpen:
|
||||
enabled_markets=["INVALID", "KR"], now=test_time
|
||||
)
|
||||
assert market.code == "KR"
|
||||
|
||||
|
||||
class TestExpandMarketCodes:
|
||||
"""Test shorthand market expansion."""
|
||||
|
||||
def test_expand_us_shorthand(self) -> None:
|
||||
assert expand_market_codes(["US"]) == ["US_NASDAQ", "US_NYSE", "US_AMEX"]
|
||||
|
||||
def test_expand_cn_shorthand(self) -> None:
|
||||
assert expand_market_codes(["CN"]) == ["CN_SHA", "CN_SZA"]
|
||||
|
||||
def test_expand_vn_shorthand(self) -> None:
|
||||
assert expand_market_codes(["VN"]) == ["VN_HAN", "VN_HCM"]
|
||||
|
||||
def test_expand_mixed_codes(self) -> None:
|
||||
assert expand_market_codes(["KR", "US", "JP"]) == [
|
||||
"KR",
|
||||
"US_NASDAQ",
|
||||
"US_NYSE",
|
||||
"US_AMEX",
|
||||
"JP",
|
||||
]
|
||||
|
||||
def test_expand_preserves_unknown_code(self) -> None:
|
||||
assert expand_market_codes(["KR", "UNKNOWN"]) == ["KR", "UNKNOWN"]
|
||||
|
||||
521
tests/test_overseas_broker.py
Normal file
521
tests/test_overseas_broker.py
Normal file
@@ -0,0 +1,521 @@
|
||||
"""Tests for OverseasBroker — rankings, price, balance, order, and helpers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import aiohttp
|
||||
import pytest
|
||||
|
||||
from src.broker.kis_api import KISBroker
|
||||
from src.broker.overseas import OverseasBroker, _RANKING_EXCHANGE_MAP
|
||||
from src.config import Settings
|
||||
|
||||
|
||||
def _make_async_cm(mock_resp: AsyncMock) -> MagicMock:
|
||||
"""Create an async context manager that returns mock_resp on __aenter__."""
|
||||
cm = MagicMock()
|
||||
cm.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
cm.__aexit__ = AsyncMock(return_value=False)
|
||||
return cm
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings() -> Settings:
|
||||
"""Provide mock settings with correct default TR_IDs/paths."""
|
||||
return Settings(
|
||||
KIS_APP_KEY="test_key",
|
||||
KIS_APP_SECRET="test_secret",
|
||||
KIS_ACCOUNT_NO="12345678-01",
|
||||
GEMINI_API_KEY="test_gemini_key",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_broker(mock_settings: Settings) -> KISBroker:
|
||||
"""Provide a mock KIS broker."""
|
||||
broker = KISBroker(mock_settings)
|
||||
broker.get_orderbook = AsyncMock() # type: ignore[method-assign]
|
||||
return broker
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def overseas_broker(mock_broker: KISBroker) -> OverseasBroker:
|
||||
"""Provide an OverseasBroker wrapping a mock KISBroker."""
|
||||
return OverseasBroker(mock_broker)
|
||||
|
||||
|
||||
def _setup_broker_mocks(overseas_broker: OverseasBroker, mock_session: MagicMock) -> None:
|
||||
"""Wire up common broker mocks."""
|
||||
overseas_broker._broker._rate_limiter.acquire = AsyncMock()
|
||||
overseas_broker._broker._get_session = MagicMock(return_value=mock_session)
|
||||
overseas_broker._broker._auth_headers = AsyncMock(return_value={})
|
||||
|
||||
|
||||
class TestRankingExchangeMap:
|
||||
"""Test exchange code mapping for ranking API."""
|
||||
|
||||
def test_nasd_maps_to_nas(self) -> None:
|
||||
assert _RANKING_EXCHANGE_MAP["NASD"] == "NAS"
|
||||
|
||||
def test_nyse_maps_to_nys(self) -> None:
|
||||
assert _RANKING_EXCHANGE_MAP["NYSE"] == "NYS"
|
||||
|
||||
def test_amex_maps_to_ams(self) -> None:
|
||||
assert _RANKING_EXCHANGE_MAP["AMEX"] == "AMS"
|
||||
|
||||
def test_sehk_maps_to_hks(self) -> None:
|
||||
assert _RANKING_EXCHANGE_MAP["SEHK"] == "HKS"
|
||||
|
||||
def test_unmapped_exchange_passes_through(self) -> None:
|
||||
assert _RANKING_EXCHANGE_MAP.get("UNKNOWN", "UNKNOWN") == "UNKNOWN"
|
||||
|
||||
def test_tse_unchanged(self) -> None:
|
||||
assert _RANKING_EXCHANGE_MAP["TSE"] == "TSE"
|
||||
|
||||
|
||||
class TestConfigDefaults:
|
||||
"""Test that config defaults match KIS official API specs."""
|
||||
|
||||
def test_fluct_tr_id(self, mock_settings: Settings) -> None:
|
||||
assert mock_settings.OVERSEAS_RANKING_FLUCT_TR_ID == "HHDFS76290000"
|
||||
|
||||
def test_volume_tr_id(self, mock_settings: Settings) -> None:
|
||||
assert mock_settings.OVERSEAS_RANKING_VOLUME_TR_ID == "HHDFS76270000"
|
||||
|
||||
def test_fluct_path(self, mock_settings: Settings) -> None:
|
||||
assert mock_settings.OVERSEAS_RANKING_FLUCT_PATH == "/uapi/overseas-stock/v1/ranking/updown-rate"
|
||||
|
||||
def test_volume_path(self, mock_settings: Settings) -> None:
|
||||
assert mock_settings.OVERSEAS_RANKING_VOLUME_PATH == "/uapi/overseas-stock/v1/ranking/volume-surge"
|
||||
|
||||
|
||||
class TestFetchOverseasRankings:
|
||||
"""Test fetch_overseas_rankings method."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fluctuation_uses_correct_params(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""Fluctuation ranking should use HHDFS76290000, updown-rate path, and correct params."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(
|
||||
return_value={"output": [{"symb": "AAPL", "name": "Apple"}]}
|
||||
)
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
overseas_broker._broker._auth_headers = AsyncMock(
|
||||
return_value={"authorization": "Bearer test"}
|
||||
)
|
||||
|
||||
result = await overseas_broker.fetch_overseas_rankings("NASD", "fluctuation")
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["symb"] == "AAPL"
|
||||
|
||||
call_args = mock_session.get.call_args
|
||||
url = call_args[0][0]
|
||||
params = call_args[1]["params"]
|
||||
|
||||
assert "/uapi/overseas-stock/v1/ranking/updown-rate" in url
|
||||
assert params["EXCD"] == "NAS"
|
||||
assert params["NDAY"] == "0"
|
||||
assert params["GUBN"] == "1"
|
||||
assert params["VOL_RANG"] == "0"
|
||||
|
||||
overseas_broker._broker._auth_headers.assert_called_with("HHDFS76290000")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_volume_uses_correct_params(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""Volume ranking should use HHDFS76270000, volume-surge path, and correct params."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(
|
||||
return_value={"output": [{"symb": "TSLA", "name": "Tesla"}]}
|
||||
)
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
overseas_broker._broker._auth_headers = AsyncMock(
|
||||
return_value={"authorization": "Bearer test"}
|
||||
)
|
||||
|
||||
result = await overseas_broker.fetch_overseas_rankings("NYSE", "volume")
|
||||
|
||||
assert len(result) == 1
|
||||
|
||||
call_args = mock_session.get.call_args
|
||||
url = call_args[0][0]
|
||||
params = call_args[1]["params"]
|
||||
|
||||
assert "/uapi/overseas-stock/v1/ranking/volume-surge" in url
|
||||
assert params["EXCD"] == "NYS"
|
||||
assert params["MIXN"] == "0"
|
||||
assert params["VOL_RANG"] == "0"
|
||||
assert "NDAY" not in params
|
||||
assert "GUBN" not in params
|
||||
|
||||
overseas_broker._broker._auth_headers.assert_called_with("HHDFS76270000")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_404_returns_empty_list(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""HTTP 404 should return empty list (fallback) instead of raising."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 404
|
||||
mock_resp.text = AsyncMock(return_value="Not Found")
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
result = await overseas_broker.fetch_overseas_rankings("AMEX", "fluctuation")
|
||||
assert result == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_non_404_error_raises(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""Non-404 HTTP errors should raise ConnectionError."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 500
|
||||
mock_resp.text = AsyncMock(return_value="Internal Server Error")
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
with pytest.raises(ConnectionError, match="500"):
|
||||
await overseas_broker.fetch_overseas_rankings("NASD")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_response_returns_empty(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""Empty output in response should return empty list."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"output": []})
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
result = await overseas_broker.fetch_overseas_rankings("NASD")
|
||||
assert result == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ranking_disabled_returns_empty(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""When OVERSEAS_RANKING_ENABLED=False, should return empty immediately."""
|
||||
overseas_broker._broker._settings.OVERSEAS_RANKING_ENABLED = False
|
||||
result = await overseas_broker.fetch_overseas_rankings("NASD")
|
||||
assert result == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_limit_truncates_results(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""Results should be truncated to the specified limit."""
|
||||
rows = [{"symb": f"SYM{i}"} for i in range(20)]
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"output": rows})
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
result = await overseas_broker.fetch_overseas_rankings("NASD", limit=5)
|
||||
assert len(result) == 5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_raises(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""Network errors should raise ConnectionError."""
|
||||
cm = MagicMock()
|
||||
cm.__aenter__ = AsyncMock(side_effect=aiohttp.ClientError("timeout"))
|
||||
cm.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=cm)
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
with pytest.raises(ConnectionError, match="Network error"):
|
||||
await overseas_broker.fetch_overseas_rankings("NASD")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_exchange_code_mapping_applied(
|
||||
self, overseas_broker: OverseasBroker
|
||||
) -> None:
|
||||
"""All major exchanges should use mapped codes in API params."""
|
||||
for original, mapped in [("NASD", "NAS"), ("NYSE", "NYS"), ("AMEX", "AMS")]:
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"output": [{"symb": "X"}]})
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
await overseas_broker.fetch_overseas_rankings(original)
|
||||
|
||||
call_params = mock_session.get.call_args[1]["params"]
|
||||
assert call_params["EXCD"] == mapped, f"{original} should map to {mapped}"
|
||||
|
||||
|
||||
class TestGetOverseasPrice:
|
||||
"""Test get_overseas_price method."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Successful price fetch returns JSON data."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"output": {"last": "150.00"}})
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
overseas_broker._broker._auth_headers = AsyncMock(return_value={"authorization": "Bearer t"})
|
||||
|
||||
result = await overseas_broker.get_overseas_price("NASD", "AAPL")
|
||||
assert result["output"]["last"] == "150.00"
|
||||
|
||||
call_args = mock_session.get.call_args
|
||||
params = call_args[1]["params"]
|
||||
assert params["EXCD"] == "NASD"
|
||||
assert params["SYMB"] == "AAPL"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_error_raises(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Non-200 response should raise ConnectionError."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 400
|
||||
mock_resp.text = AsyncMock(return_value="Bad Request")
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
with pytest.raises(ConnectionError, match="get_overseas_price failed"):
|
||||
await overseas_broker.get_overseas_price("NASD", "AAPL")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_raises(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Network error should raise ConnectionError."""
|
||||
cm = MagicMock()
|
||||
cm.__aenter__ = AsyncMock(side_effect=aiohttp.ClientError("conn refused"))
|
||||
cm.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=cm)
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
with pytest.raises(ConnectionError, match="Network error"):
|
||||
await overseas_broker.get_overseas_price("NASD", "AAPL")
|
||||
|
||||
|
||||
class TestGetOverseasBalance:
|
||||
"""Test get_overseas_balance method."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_success(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Successful balance fetch returns JSON data."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"output1": [{"pdno": "AAPL"}]})
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
result = await overseas_broker.get_overseas_balance("NASD")
|
||||
assert result["output1"][0]["pdno"] == "AAPL"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_error_raises(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Non-200 should raise ConnectionError."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 500
|
||||
mock_resp.text = AsyncMock(return_value="Server Error")
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
with pytest.raises(ConnectionError, match="get_overseas_balance failed"):
|
||||
await overseas_broker.get_overseas_balance("NASD")
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_network_error_raises(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Network error should raise ConnectionError."""
|
||||
cm = MagicMock()
|
||||
cm.__aenter__ = AsyncMock(side_effect=TimeoutError("timeout"))
|
||||
cm.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.get = MagicMock(return_value=cm)
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
|
||||
with pytest.raises(ConnectionError, match="Network error"):
|
||||
await overseas_broker.get_overseas_balance("NYSE")
|
||||
|
||||
|
||||
class TestSendOverseasOrder:
|
||||
"""Test send_overseas_order method."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_buy_market_order(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Market buy order should use VTTT1002U and ORD_DVSN=01."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.post = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
overseas_broker._broker._get_hash_key = AsyncMock(return_value="hashval")
|
||||
|
||||
result = await overseas_broker.send_overseas_order("NASD", "AAPL", "BUY", 10)
|
||||
assert result["rt_cd"] == "0"
|
||||
|
||||
# Verify BUY TR_ID
|
||||
overseas_broker._broker._auth_headers.assert_called_with("VTTT1002U")
|
||||
|
||||
call_args = mock_session.post.call_args
|
||||
body = call_args[1]["json"]
|
||||
assert body["ORD_DVSN"] == "01" # market order
|
||||
assert body["OVRS_ORD_UNPR"] == "0"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_sell_limit_order(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Limit sell order should use VTTT1006U and ORD_DVSN=00."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.post = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
overseas_broker._broker._get_hash_key = AsyncMock(return_value="hashval")
|
||||
|
||||
result = await overseas_broker.send_overseas_order("NYSE", "MSFT", "SELL", 5, price=350.0)
|
||||
assert result["rt_cd"] == "0"
|
||||
|
||||
overseas_broker._broker._auth_headers.assert_called_with("VTTT1006U")
|
||||
|
||||
call_args = mock_session.post.call_args
|
||||
body = call_args[1]["json"]
|
||||
assert body["ORD_DVSN"] == "00" # limit order
|
||||
assert body["OVRS_ORD_UNPR"] == "350.0"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_order_http_error_raises(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Non-200 should raise ConnectionError."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 400
|
||||
mock_resp.text = AsyncMock(return_value="Bad Request")
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.post = MagicMock(return_value=_make_async_cm(mock_resp))
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
overseas_broker._broker._get_hash_key = AsyncMock(return_value="hashval")
|
||||
|
||||
with pytest.raises(ConnectionError, match="send_overseas_order failed"):
|
||||
await overseas_broker.send_overseas_order("NASD", "AAPL", "BUY", 1)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_order_network_error_raises(self, overseas_broker: OverseasBroker) -> None:
|
||||
"""Network error should raise ConnectionError."""
|
||||
cm = MagicMock()
|
||||
cm.__aenter__ = AsyncMock(side_effect=aiohttp.ClientError("conn reset"))
|
||||
cm.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_session = MagicMock()
|
||||
mock_session.post = MagicMock(return_value=cm)
|
||||
|
||||
_setup_broker_mocks(overseas_broker, mock_session)
|
||||
overseas_broker._broker._get_hash_key = AsyncMock(return_value="hashval")
|
||||
|
||||
with pytest.raises(ConnectionError, match="Network error"):
|
||||
await overseas_broker.send_overseas_order("NASD", "TSLA", "SELL", 2)
|
||||
|
||||
|
||||
class TestGetCurrencyCode:
|
||||
"""Test _get_currency_code mapping."""
|
||||
|
||||
def test_us_exchanges(self, overseas_broker: OverseasBroker) -> None:
|
||||
assert overseas_broker._get_currency_code("NASD") == "USD"
|
||||
assert overseas_broker._get_currency_code("NYSE") == "USD"
|
||||
assert overseas_broker._get_currency_code("AMEX") == "USD"
|
||||
|
||||
def test_japan(self, overseas_broker: OverseasBroker) -> None:
|
||||
assert overseas_broker._get_currency_code("TSE") == "JPY"
|
||||
|
||||
def test_hong_kong(self, overseas_broker: OverseasBroker) -> None:
|
||||
assert overseas_broker._get_currency_code("SEHK") == "HKD"
|
||||
|
||||
def test_china(self, overseas_broker: OverseasBroker) -> None:
|
||||
assert overseas_broker._get_currency_code("SHAA") == "CNY"
|
||||
assert overseas_broker._get_currency_code("SZAA") == "CNY"
|
||||
|
||||
def test_vietnam(self, overseas_broker: OverseasBroker) -> None:
|
||||
assert overseas_broker._get_currency_code("HNX") == "VND"
|
||||
assert overseas_broker._get_currency_code("HSX") == "VND"
|
||||
|
||||
def test_unknown_defaults_usd(self, overseas_broker: OverseasBroker) -> None:
|
||||
assert overseas_broker._get_currency_code("UNKNOWN") == "USD"
|
||||
|
||||
|
||||
class TestExtractRankingRows:
|
||||
"""Test _extract_ranking_rows helper."""
|
||||
|
||||
def test_output_key(self, overseas_broker: OverseasBroker) -> None:
|
||||
data = {"output": [{"a": 1}, {"b": 2}]}
|
||||
assert overseas_broker._extract_ranking_rows(data) == [{"a": 1}, {"b": 2}]
|
||||
|
||||
def test_output1_key(self, overseas_broker: OverseasBroker) -> None:
|
||||
data = {"output1": [{"c": 3}]}
|
||||
assert overseas_broker._extract_ranking_rows(data) == [{"c": 3}]
|
||||
|
||||
def test_output2_key(self, overseas_broker: OverseasBroker) -> None:
|
||||
data = {"output2": [{"d": 4}]}
|
||||
assert overseas_broker._extract_ranking_rows(data) == [{"d": 4}]
|
||||
|
||||
def test_no_list_returns_empty(self, overseas_broker: OverseasBroker) -> None:
|
||||
data = {"output": "not a list"}
|
||||
assert overseas_broker._extract_ranking_rows(data) == []
|
||||
|
||||
def test_empty_data(self, overseas_broker: OverseasBroker) -> None:
|
||||
assert overseas_broker._extract_ranking_rows({}) == []
|
||||
|
||||
def test_filters_non_dict_rows(self, overseas_broker: OverseasBroker) -> None:
|
||||
data = {"output": [{"a": 1}, "invalid", {"b": 2}]}
|
||||
assert overseas_broker._extract_ranking_rows(data) == [{"a": 1}, {"b": 2}]
|
||||
289
tests/test_playbook_store.py
Normal file
289
tests/test_playbook_store.py
Normal file
@@ -0,0 +1,289 @@
|
||||
"""Tests for playbook persistence (PlaybookStore + DB schema)."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date
|
||||
|
||||
import pytest
|
||||
|
||||
from src.db import init_db
|
||||
from src.strategy.models import (
|
||||
DayPlaybook,
|
||||
GlobalRule,
|
||||
MarketOutlook,
|
||||
PlaybookStatus,
|
||||
ScenarioAction,
|
||||
StockCondition,
|
||||
StockPlaybook,
|
||||
StockScenario,
|
||||
)
|
||||
from src.strategy.playbook_store import PlaybookStore
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def conn():
|
||||
"""Create an in-memory DB with schema."""
|
||||
connection = init_db(":memory:")
|
||||
yield connection
|
||||
connection.close()
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def store(conn) -> PlaybookStore:
|
||||
return PlaybookStore(conn)
|
||||
|
||||
|
||||
def _make_playbook(
|
||||
target_date: date = date(2026, 2, 8),
|
||||
market: str = "KR",
|
||||
outlook: MarketOutlook = MarketOutlook.NEUTRAL,
|
||||
stock_codes: list[str] | None = None,
|
||||
) -> DayPlaybook:
|
||||
"""Create a test playbook with sensible defaults."""
|
||||
if stock_codes is None:
|
||||
stock_codes = ["005930"]
|
||||
return DayPlaybook(
|
||||
date=target_date,
|
||||
market=market,
|
||||
market_outlook=outlook,
|
||||
token_count=150,
|
||||
stock_playbooks=[
|
||||
StockPlaybook(
|
||||
stock_code=code,
|
||||
scenarios=[
|
||||
StockScenario(
|
||||
condition=StockCondition(rsi_below=30.0),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=85,
|
||||
rationale=f"Oversold bounce for {code}",
|
||||
),
|
||||
],
|
||||
)
|
||||
for code in stock_codes
|
||||
],
|
||||
global_rules=[
|
||||
GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
rationale="Near circuit breaker",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Schema
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSchema:
|
||||
def test_playbooks_table_exists(self, conn) -> None:
|
||||
row = conn.execute(
|
||||
"SELECT name FROM sqlite_master WHERE type='table' AND name='playbooks'"
|
||||
).fetchone()
|
||||
assert row is not None
|
||||
|
||||
def test_unique_constraint(self, store: PlaybookStore) -> None:
|
||||
pb = _make_playbook()
|
||||
store.save(pb)
|
||||
# Saving again for same date+market should replace, not error
|
||||
pb2 = _make_playbook(stock_codes=["005930", "000660"])
|
||||
store.save(pb2)
|
||||
loaded = store.load(date(2026, 2, 8), "KR")
|
||||
assert loaded is not None
|
||||
assert loaded.stock_count == 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Save / Load
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSaveLoad:
|
||||
def test_save_and_load(self, store: PlaybookStore) -> None:
|
||||
pb = _make_playbook()
|
||||
row_id = store.save(pb)
|
||||
assert row_id > 0
|
||||
|
||||
loaded = store.load(date(2026, 2, 8), "KR")
|
||||
assert loaded is not None
|
||||
assert loaded.date == date(2026, 2, 8)
|
||||
assert loaded.market == "KR"
|
||||
assert loaded.stock_count == 1
|
||||
assert loaded.scenario_count == 1
|
||||
|
||||
def test_load_not_found(self, store: PlaybookStore) -> None:
|
||||
result = store.load(date(2026, 1, 1), "KR")
|
||||
assert result is None
|
||||
|
||||
def test_save_preserves_all_fields(self, store: PlaybookStore) -> None:
|
||||
pb = _make_playbook(
|
||||
outlook=MarketOutlook.BULLISH,
|
||||
stock_codes=["005930", "AAPL"],
|
||||
)
|
||||
store.save(pb)
|
||||
loaded = store.load(date(2026, 2, 8), "KR")
|
||||
assert loaded is not None
|
||||
assert loaded.market_outlook == MarketOutlook.BULLISH
|
||||
assert loaded.stock_count == 2
|
||||
assert loaded.global_rules[0].action == ScenarioAction.REDUCE_ALL
|
||||
assert loaded.token_count == 150
|
||||
|
||||
def test_save_different_markets(self, store: PlaybookStore) -> None:
|
||||
kr = _make_playbook(market="KR")
|
||||
us = _make_playbook(market="US", stock_codes=["AAPL"])
|
||||
store.save(kr)
|
||||
store.save(us)
|
||||
|
||||
kr_loaded = store.load(date(2026, 2, 8), "KR")
|
||||
us_loaded = store.load(date(2026, 2, 8), "US")
|
||||
assert kr_loaded is not None
|
||||
assert us_loaded is not None
|
||||
assert kr_loaded.market == "KR"
|
||||
assert us_loaded.market == "US"
|
||||
assert kr_loaded.stock_playbooks[0].stock_code == "005930"
|
||||
assert us_loaded.stock_playbooks[0].stock_code == "AAPL"
|
||||
|
||||
def test_save_different_dates(self, store: PlaybookStore) -> None:
|
||||
d1 = _make_playbook(target_date=date(2026, 2, 7))
|
||||
d2 = _make_playbook(target_date=date(2026, 2, 8))
|
||||
store.save(d1)
|
||||
store.save(d2)
|
||||
|
||||
assert store.load(date(2026, 2, 7), "KR") is not None
|
||||
assert store.load(date(2026, 2, 8), "KR") is not None
|
||||
|
||||
def test_replace_updates_data(self, store: PlaybookStore) -> None:
|
||||
pb1 = _make_playbook(outlook=MarketOutlook.BEARISH)
|
||||
store.save(pb1)
|
||||
|
||||
pb2 = _make_playbook(outlook=MarketOutlook.BULLISH)
|
||||
store.save(pb2)
|
||||
|
||||
loaded = store.load(date(2026, 2, 8), "KR")
|
||||
assert loaded is not None
|
||||
assert loaded.market_outlook == MarketOutlook.BULLISH
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Status
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStatus:
|
||||
def test_get_status(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook())
|
||||
status = store.get_status(date(2026, 2, 8), "KR")
|
||||
assert status == PlaybookStatus.READY
|
||||
|
||||
def test_get_status_not_found(self, store: PlaybookStore) -> None:
|
||||
assert store.get_status(date(2026, 1, 1), "KR") is None
|
||||
|
||||
def test_update_status(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook())
|
||||
updated = store.update_status(date(2026, 2, 8), "KR", PlaybookStatus.EXPIRED)
|
||||
assert updated is True
|
||||
|
||||
status = store.get_status(date(2026, 2, 8), "KR")
|
||||
assert status == PlaybookStatus.EXPIRED
|
||||
|
||||
def test_update_status_not_found(self, store: PlaybookStore) -> None:
|
||||
updated = store.update_status(date(2026, 1, 1), "KR", PlaybookStatus.FAILED)
|
||||
assert updated is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Match count
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestMatchCount:
|
||||
def test_increment_match_count(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook())
|
||||
store.increment_match_count(date(2026, 2, 8), "KR")
|
||||
store.increment_match_count(date(2026, 2, 8), "KR")
|
||||
|
||||
stats = store.get_stats(date(2026, 2, 8), "KR")
|
||||
assert stats is not None
|
||||
assert stats["match_count"] == 2
|
||||
|
||||
def test_increment_not_found(self, store: PlaybookStore) -> None:
|
||||
result = store.increment_match_count(date(2026, 1, 1), "KR")
|
||||
assert result is False
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Stats
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStats:
|
||||
def test_get_stats(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook())
|
||||
stats = store.get_stats(date(2026, 2, 8), "KR")
|
||||
assert stats is not None
|
||||
assert stats["status"] == "ready"
|
||||
assert stats["token_count"] == 150
|
||||
assert stats["scenario_count"] == 1
|
||||
assert stats["match_count"] == 0
|
||||
assert stats["generated_at"] != ""
|
||||
|
||||
def test_get_stats_not_found(self, store: PlaybookStore) -> None:
|
||||
assert store.get_stats(date(2026, 1, 1), "KR") is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# List recent
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestListRecent:
|
||||
def test_list_recent(self, store: PlaybookStore) -> None:
|
||||
for day in range(5, 10):
|
||||
store.save(_make_playbook(target_date=date(2026, 2, day)))
|
||||
results = store.list_recent(market="KR", limit=3)
|
||||
assert len(results) == 3
|
||||
# Most recent first
|
||||
assert results[0]["date"] == "2026-02-09"
|
||||
assert results[2]["date"] == "2026-02-07"
|
||||
|
||||
def test_list_recent_all_markets(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook(market="KR"))
|
||||
store.save(_make_playbook(market="US", stock_codes=["AAPL"]))
|
||||
results = store.list_recent(market=None, limit=10)
|
||||
assert len(results) == 2
|
||||
|
||||
def test_list_recent_empty(self, store: PlaybookStore) -> None:
|
||||
results = store.list_recent(market="KR")
|
||||
assert results == []
|
||||
|
||||
def test_list_recent_filter_by_market(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook(market="KR"))
|
||||
store.save(_make_playbook(market="US", stock_codes=["AAPL"]))
|
||||
kr_only = store.list_recent(market="KR")
|
||||
assert len(kr_only) == 1
|
||||
assert kr_only[0]["market"] == "KR"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Delete
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDelete:
|
||||
def test_delete(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook())
|
||||
deleted = store.delete(date(2026, 2, 8), "KR")
|
||||
assert deleted is True
|
||||
assert store.load(date(2026, 2, 8), "KR") is None
|
||||
|
||||
def test_delete_not_found(self, store: PlaybookStore) -> None:
|
||||
deleted = store.delete(date(2026, 1, 1), "KR")
|
||||
assert deleted is False
|
||||
|
||||
def test_delete_one_market_keeps_other(self, store: PlaybookStore) -> None:
|
||||
store.save(_make_playbook(market="KR"))
|
||||
store.save(_make_playbook(market="US", stock_codes=["AAPL"]))
|
||||
store.delete(date(2026, 2, 8), "KR")
|
||||
assert store.load(date(2026, 2, 8), "KR") is None
|
||||
assert store.load(date(2026, 2, 8), "US") is not None
|
||||
659
tests/test_pre_market_planner.py
Normal file
659
tests/test_pre_market_planner.py
Normal file
@@ -0,0 +1,659 @@
|
||||
"""Tests for PreMarketPlanner — Gemini prompt builder + response parser."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from datetime import date
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.smart_scanner import ScanCandidate
|
||||
from src.brain.context_selector import DecisionType
|
||||
from src.brain.gemini_client import TradeDecision
|
||||
from src.config import Settings
|
||||
from src.context.store import ContextLayer
|
||||
from src.strategy.models import (
|
||||
CrossMarketContext,
|
||||
DayPlaybook,
|
||||
MarketOutlook,
|
||||
ScenarioAction,
|
||||
)
|
||||
from src.strategy.pre_market_planner import PreMarketPlanner
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Fixtures
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _candidate(
|
||||
code: str = "005930",
|
||||
name: str = "Samsung",
|
||||
price: float = 71000,
|
||||
rsi: float = 28.5,
|
||||
volume_ratio: float = 3.2,
|
||||
signal: str = "oversold",
|
||||
score: float = 82.0,
|
||||
) -> ScanCandidate:
|
||||
return ScanCandidate(
|
||||
stock_code=code,
|
||||
name=name,
|
||||
price=price,
|
||||
volume=1_500_000,
|
||||
volume_ratio=volume_ratio,
|
||||
rsi=rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
|
||||
|
||||
def _gemini_response_json(
|
||||
outlook: str = "neutral_to_bullish",
|
||||
stocks: list[dict] | None = None,
|
||||
global_rules: list[dict] | None = None,
|
||||
) -> str:
|
||||
"""Build a valid Gemini JSON response."""
|
||||
if stocks is None:
|
||||
stocks = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {"rsi_below": 30, "volume_ratio_above": 2.5},
|
||||
"action": "BUY",
|
||||
"confidence": 85,
|
||||
"allocation_pct": 15.0,
|
||||
"stop_loss_pct": -2.0,
|
||||
"take_profit_pct": 4.0,
|
||||
"rationale": "Oversold bounce with high volume",
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
if global_rules is None:
|
||||
global_rules = [
|
||||
{
|
||||
"condition": "portfolio_pnl_pct < -2.0",
|
||||
"action": "REDUCE_ALL",
|
||||
"rationale": "Near circuit breaker",
|
||||
}
|
||||
]
|
||||
return json.dumps(
|
||||
{"market_outlook": outlook, "global_rules": global_rules, "stocks": stocks}
|
||||
)
|
||||
|
||||
|
||||
def _make_planner(
|
||||
gemini_response: str = "",
|
||||
token_count: int = 200,
|
||||
context_data: dict | None = None,
|
||||
scorecard_data: dict | None = None,
|
||||
scorecard_map: dict[tuple[str, str, str], dict | None] | None = None,
|
||||
) -> PreMarketPlanner:
|
||||
"""Create a PreMarketPlanner with mocked dependencies."""
|
||||
if not gemini_response:
|
||||
gemini_response = _gemini_response_json()
|
||||
|
||||
# Mock GeminiClient
|
||||
gemini = AsyncMock()
|
||||
gemini.decide = AsyncMock(
|
||||
return_value=TradeDecision(
|
||||
action="HOLD",
|
||||
confidence=0,
|
||||
rationale=gemini_response,
|
||||
token_count=token_count,
|
||||
)
|
||||
)
|
||||
|
||||
# Mock ContextStore
|
||||
store = MagicMock()
|
||||
if scorecard_map is not None:
|
||||
store.get_context = MagicMock(
|
||||
side_effect=lambda layer, timeframe, key: scorecard_map.get(
|
||||
(layer.value if hasattr(layer, "value") else layer, timeframe, key)
|
||||
)
|
||||
)
|
||||
else:
|
||||
store.get_context = MagicMock(return_value=scorecard_data)
|
||||
|
||||
# Mock ContextSelector
|
||||
selector = MagicMock()
|
||||
selector.select_layers = MagicMock(
|
||||
return_value=[ContextLayer.L7_REALTIME, ContextLayer.L6_DAILY]
|
||||
)
|
||||
selector.get_context_data = MagicMock(return_value=context_data or {})
|
||||
|
||||
settings = Settings(
|
||||
KIS_APP_KEY="test",
|
||||
KIS_APP_SECRET="test",
|
||||
KIS_ACCOUNT_NO="12345678-01",
|
||||
GEMINI_API_KEY="test",
|
||||
)
|
||||
|
||||
return PreMarketPlanner(gemini, store, selector, settings)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# generate_playbook
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGeneratePlaybook:
|
||||
@pytest.mark.asyncio
|
||||
async def test_basic_generation(self) -> None:
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert isinstance(pb, DayPlaybook)
|
||||
assert pb.market == "KR"
|
||||
assert pb.stock_count == 1
|
||||
assert pb.scenario_count == 1
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL_TO_BULLISH
|
||||
assert pb.token_count == 200
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_empty_candidates_returns_empty_playbook(self) -> None:
|
||||
planner = _make_planner()
|
||||
|
||||
pb = await planner.generate_playbook("KR", [], today=date(2026, 2, 8))
|
||||
|
||||
assert pb.stock_count == 0
|
||||
assert pb.scenario_count == 0
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_failure_returns_defensive(self) -> None:
|
||||
planner = _make_planner()
|
||||
planner._gemini.decide = AsyncMock(side_effect=RuntimeError("API timeout"))
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert pb.default_action == ScenarioAction.HOLD
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL_TO_BEARISH
|
||||
assert pb.stock_count == 1
|
||||
# Defensive playbook has stop-loss scenarios
|
||||
assert pb.stock_playbooks[0].scenarios[0].action == ScenarioAction.SELL
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_failure_empty_when_defensive_disabled(self) -> None:
|
||||
planner = _make_planner()
|
||||
planner._settings.DEFENSIVE_PLAYBOOK_ON_FAILURE = False
|
||||
planner._gemini.decide = AsyncMock(side_effect=RuntimeError("fail"))
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert pb.stock_count == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_multiple_candidates(self) -> None:
|
||||
stocks = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {"rsi_below": 30},
|
||||
"action": "BUY",
|
||||
"confidence": 85,
|
||||
"rationale": "Oversold",
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
"stock_code": "AAPL",
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {"rsi_above": 75},
|
||||
"action": "SELL",
|
||||
"confidence": 80,
|
||||
"rationale": "Overbought",
|
||||
}
|
||||
],
|
||||
},
|
||||
]
|
||||
planner = _make_planner(gemini_response=_gemini_response_json(stocks=stocks))
|
||||
candidates = [_candidate(), _candidate(code="AAPL", name="Apple")]
|
||||
|
||||
pb = await planner.generate_playbook("US", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert pb.stock_count == 2
|
||||
codes = [sp.stock_code for sp in pb.stock_playbooks]
|
||||
assert "005930" in codes
|
||||
assert "AAPL" in codes
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_unknown_stock_in_response_skipped(self) -> None:
|
||||
stocks = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {"rsi_below": 30},
|
||||
"action": "BUY",
|
||||
"confidence": 85,
|
||||
"rationale": "ok",
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
"stock_code": "UNKNOWN",
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {"rsi_below": 20},
|
||||
"action": "BUY",
|
||||
"confidence": 90,
|
||||
"rationale": "bad",
|
||||
}
|
||||
],
|
||||
},
|
||||
]
|
||||
planner = _make_planner(gemini_response=_gemini_response_json(stocks=stocks))
|
||||
candidates = [_candidate()] # Only 005930
|
||||
|
||||
pb = await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert pb.stock_count == 1
|
||||
assert pb.stock_playbooks[0].stock_code == "005930"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_global_rules_parsed(self) -> None:
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert len(pb.global_rules) == 1
|
||||
assert pb.global_rules[0].action == ScenarioAction.REDUCE_ALL
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_token_count_from_decision(self) -> None:
|
||||
planner = _make_planner(token_count=450)
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert pb.token_count == 450
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_playbook_uses_strategic_context_selector(self) -> None:
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
|
||||
await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
planner._context_selector.select_layers.assert_called_once_with(
|
||||
decision_type=DecisionType.STRATEGIC,
|
||||
include_realtime=True,
|
||||
)
|
||||
planner._context_selector.get_context_data.assert_called_once()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_playbook_injects_self_and_cross_scorecards(self) -> None:
|
||||
scorecard_map = {
|
||||
(ContextLayer.L6_DAILY.value, "2026-02-07", "scorecard_KR"): {
|
||||
"total_pnl": -1.0,
|
||||
"win_rate": 40,
|
||||
"lessons": ["Tighten entries"],
|
||||
},
|
||||
(ContextLayer.L6_DAILY.value, "2026-02-07", "scorecard_US"): {
|
||||
"total_pnl": 1.5,
|
||||
"win_rate": 62,
|
||||
"index_change_pct": 0.9,
|
||||
"lessons": ["Follow momentum"],
|
||||
},
|
||||
}
|
||||
planner = _make_planner(scorecard_map=scorecard_map)
|
||||
|
||||
await planner.generate_playbook("KR", [_candidate()], today=date(2026, 2, 8))
|
||||
|
||||
call_market_data = planner._gemini.decide.call_args.args[0]
|
||||
prompt = call_market_data["prompt_override"]
|
||||
assert "My Market Previous Day (KR)" in prompt
|
||||
assert "Other Market (US)" in prompt
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _parse_response
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestParseResponse:
|
||||
def test_parse_full_response(self) -> None:
|
||||
planner = _make_planner()
|
||||
response = _gemini_response_json(outlook="bearish")
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = planner._parse_response(response, date(2026, 2, 8), "KR", candidates, None)
|
||||
|
||||
assert pb.market_outlook == MarketOutlook.BEARISH
|
||||
assert pb.stock_count == 1
|
||||
assert pb.stock_playbooks[0].scenarios[0].confidence == 85
|
||||
|
||||
def test_parse_with_markdown_fences(self) -> None:
|
||||
planner = _make_planner()
|
||||
response = f"```json\n{_gemini_response_json()}\n```"
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = planner._parse_response(response, date(2026, 2, 8), "KR", candidates, None)
|
||||
|
||||
assert pb.stock_count == 1
|
||||
|
||||
def test_parse_unknown_outlook_defaults_neutral(self) -> None:
|
||||
planner = _make_planner()
|
||||
response = _gemini_response_json(outlook="super_bullish")
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = planner._parse_response(response, date(2026, 2, 8), "KR", candidates, None)
|
||||
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL
|
||||
|
||||
def test_parse_scenario_with_all_condition_fields(self) -> None:
|
||||
planner = _make_planner()
|
||||
stocks = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {
|
||||
"rsi_below": 25,
|
||||
"volume_ratio_above": 3.0,
|
||||
"price_change_pct_below": -2.0,
|
||||
},
|
||||
"action": "BUY",
|
||||
"confidence": 92,
|
||||
"allocation_pct": 20.0,
|
||||
"stop_loss_pct": -3.0,
|
||||
"take_profit_pct": 5.0,
|
||||
"rationale": "Multi-condition entry",
|
||||
}
|
||||
],
|
||||
}
|
||||
]
|
||||
response = _gemini_response_json(stocks=stocks)
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = planner._parse_response(response, date(2026, 2, 8), "KR", candidates, None)
|
||||
|
||||
sc = pb.stock_playbooks[0].scenarios[0]
|
||||
assert sc.condition.rsi_below == 25
|
||||
assert sc.condition.volume_ratio_above == 3.0
|
||||
assert sc.condition.price_change_pct_below == -2.0
|
||||
assert sc.allocation_pct == 20.0
|
||||
assert sc.stop_loss_pct == -3.0
|
||||
assert sc.take_profit_pct == 5.0
|
||||
|
||||
def test_parse_empty_condition_scenario_skipped(self) -> None:
|
||||
planner = _make_planner()
|
||||
stocks = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {},
|
||||
"action": "BUY",
|
||||
"confidence": 85,
|
||||
"rationale": "No conditions",
|
||||
},
|
||||
{
|
||||
"condition": {"rsi_below": 30},
|
||||
"action": "BUY",
|
||||
"confidence": 80,
|
||||
"rationale": "Valid",
|
||||
},
|
||||
],
|
||||
}
|
||||
]
|
||||
response = _gemini_response_json(stocks=stocks)
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = planner._parse_response(response, date(2026, 2, 8), "KR", candidates, None)
|
||||
|
||||
# Empty condition scenario skipped, valid one kept
|
||||
assert pb.stock_count == 1
|
||||
assert pb.stock_playbooks[0].scenarios[0].confidence == 80
|
||||
|
||||
def test_parse_max_scenarios_enforced(self) -> None:
|
||||
planner = _make_planner()
|
||||
# Settings default MAX_SCENARIOS_PER_STOCK = 5
|
||||
scenarios = [
|
||||
{
|
||||
"condition": {"rsi_below": 20 + i},
|
||||
"action": "BUY",
|
||||
"confidence": 80 + i,
|
||||
"rationale": f"Scenario {i}",
|
||||
}
|
||||
for i in range(8) # 8 scenarios, should be capped to 5
|
||||
]
|
||||
stocks = [{"stock_code": "005930", "scenarios": scenarios}]
|
||||
response = _gemini_response_json(stocks=stocks)
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = planner._parse_response(response, date(2026, 2, 8), "KR", candidates, None)
|
||||
|
||||
assert len(pb.stock_playbooks[0].scenarios) == 5
|
||||
|
||||
def test_parse_invalid_json_raises(self) -> None:
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
|
||||
with pytest.raises(json.JSONDecodeError):
|
||||
planner._parse_response("not json at all", date(2026, 2, 8), "KR", candidates, None)
|
||||
|
||||
def test_parse_cross_market_preserved(self) -> None:
|
||||
planner = _make_planner()
|
||||
response = _gemini_response_json()
|
||||
candidates = [_candidate()]
|
||||
cross = CrossMarketContext(market="US", date="2026-02-07", total_pnl=1.5, win_rate=60)
|
||||
|
||||
pb = planner._parse_response(response, date(2026, 2, 8), "KR", candidates, cross)
|
||||
|
||||
assert pb.cross_market is not None
|
||||
assert pb.cross_market.market == "US"
|
||||
assert pb.cross_market.total_pnl == 1.5
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# build_cross_market_context
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBuildCrossMarketContext:
|
||||
def test_kr_reads_us_scorecard(self) -> None:
|
||||
scorecard = {
|
||||
"total_pnl": 2.5,
|
||||
"win_rate": 65,
|
||||
"index_change_pct": 0.8,
|
||||
"lessons": ["Stay patient"],
|
||||
}
|
||||
planner = _make_planner(scorecard_data=scorecard)
|
||||
|
||||
ctx = planner.build_cross_market_context("KR", today=date(2026, 2, 8))
|
||||
|
||||
assert ctx is not None
|
||||
assert ctx.market == "US"
|
||||
assert ctx.total_pnl == 2.5
|
||||
assert ctx.win_rate == 65
|
||||
assert "Stay patient" in ctx.lessons
|
||||
|
||||
# Verify it queried scorecard_US
|
||||
planner._context_store.get_context.assert_called_once_with(
|
||||
ContextLayer.L6_DAILY, "2026-02-07", "scorecard_US"
|
||||
)
|
||||
assert ctx.date == "2026-02-07"
|
||||
|
||||
def test_us_reads_kr_scorecard(self) -> None:
|
||||
scorecard = {"total_pnl": -1.0, "win_rate": 40, "index_change_pct": -0.5}
|
||||
planner = _make_planner(scorecard_data=scorecard)
|
||||
|
||||
ctx = planner.build_cross_market_context("US", today=date(2026, 2, 8))
|
||||
|
||||
assert ctx is not None
|
||||
assert ctx.market == "KR"
|
||||
assert ctx.total_pnl == -1.0
|
||||
|
||||
planner._context_store.get_context.assert_called_once_with(
|
||||
ContextLayer.L6_DAILY, "2026-02-08", "scorecard_KR"
|
||||
)
|
||||
|
||||
def test_no_scorecard_returns_none(self) -> None:
|
||||
planner = _make_planner(scorecard_data=None)
|
||||
|
||||
ctx = planner.build_cross_market_context("KR", today=date(2026, 2, 8))
|
||||
|
||||
assert ctx is None
|
||||
|
||||
def test_invalid_scorecard_returns_none(self) -> None:
|
||||
planner = _make_planner(scorecard_data="not a dict and not json")
|
||||
|
||||
ctx = planner.build_cross_market_context("KR", today=date(2026, 2, 8))
|
||||
|
||||
assert ctx is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# build_self_market_scorecard
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBuildSelfMarketScorecard:
|
||||
def test_reads_previous_day_scorecard(self) -> None:
|
||||
scorecard = {"total_pnl": -1.2, "win_rate": 45, "lessons": ["Reduce overtrading"]}
|
||||
planner = _make_planner(scorecard_data=scorecard)
|
||||
|
||||
data = planner.build_self_market_scorecard("KR", today=date(2026, 2, 8))
|
||||
|
||||
assert data is not None
|
||||
assert data["date"] == "2026-02-07"
|
||||
assert data["total_pnl"] == -1.2
|
||||
assert data["win_rate"] == 45
|
||||
assert "Reduce overtrading" in data["lessons"]
|
||||
planner._context_store.get_context.assert_called_once_with(
|
||||
ContextLayer.L6_DAILY, "2026-02-07", "scorecard_KR"
|
||||
)
|
||||
|
||||
def test_missing_scorecard_returns_none(self) -> None:
|
||||
planner = _make_planner(scorecard_data=None)
|
||||
assert planner.build_self_market_scorecard("US", today=date(2026, 2, 8)) is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _build_prompt
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestBuildPrompt:
|
||||
def test_prompt_contains_candidates(self) -> None:
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate(code="005930", name="Samsung")]
|
||||
|
||||
prompt = planner._build_prompt("KR", candidates, {}, None, None)
|
||||
|
||||
assert "005930" in prompt
|
||||
assert "Samsung" in prompt
|
||||
assert "RSI=" in prompt
|
||||
assert "volume_ratio=" in prompt
|
||||
|
||||
def test_prompt_contains_cross_market(self) -> None:
|
||||
planner = _make_planner()
|
||||
cross = CrossMarketContext(
|
||||
market="US", date="2026-02-07", total_pnl=1.5,
|
||||
win_rate=60, index_change_pct=0.8, lessons=["Cut losses early"],
|
||||
)
|
||||
|
||||
prompt = planner._build_prompt("KR", [_candidate()], {}, None, cross)
|
||||
|
||||
assert "Other Market (US)" in prompt
|
||||
assert "+1.50%" in prompt
|
||||
assert "Cut losses early" in prompt
|
||||
|
||||
def test_prompt_contains_context_data(self) -> None:
|
||||
planner = _make_planner()
|
||||
context = {"L6_DAILY": {"win_rate": 0.65, "total_pnl": 2.5}}
|
||||
|
||||
prompt = planner._build_prompt("KR", [_candidate()], context, None, None)
|
||||
|
||||
assert "Strategic Context" in prompt
|
||||
assert "L6_DAILY" in prompt
|
||||
assert "win_rate" in prompt
|
||||
|
||||
def test_prompt_contains_max_scenarios(self) -> None:
|
||||
planner = _make_planner()
|
||||
prompt = planner._build_prompt("KR", [_candidate()], {}, None, None)
|
||||
|
||||
assert f"Max {planner._settings.MAX_SCENARIOS_PER_STOCK} scenarios" in prompt
|
||||
|
||||
def test_prompt_market_name(self) -> None:
|
||||
planner = _make_planner()
|
||||
prompt = planner._build_prompt("US", [_candidate()], {}, None, None)
|
||||
assert "US market" in prompt
|
||||
|
||||
def test_prompt_contains_self_market_scorecard(self) -> None:
|
||||
planner = _make_planner()
|
||||
self_scorecard = {
|
||||
"date": "2026-02-07",
|
||||
"total_pnl": -0.8,
|
||||
"win_rate": 45.0,
|
||||
"lessons": ["Avoid midday entries"],
|
||||
}
|
||||
prompt = planner._build_prompt("KR", [_candidate()], {}, self_scorecard, None)
|
||||
|
||||
assert "My Market Previous Day (KR)" in prompt
|
||||
assert "2026-02-07" in prompt
|
||||
assert "-0.80%" in prompt
|
||||
assert "Avoid midday entries" in prompt
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# _extract_json
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestExtractJson:
|
||||
def test_plain_json(self) -> None:
|
||||
assert PreMarketPlanner._extract_json('{"a": 1}') == '{"a": 1}'
|
||||
|
||||
def test_with_json_fence(self) -> None:
|
||||
text = '```json\n{"a": 1}\n```'
|
||||
assert PreMarketPlanner._extract_json(text) == '{"a": 1}'
|
||||
|
||||
def test_with_plain_fence(self) -> None:
|
||||
text = '```\n{"a": 1}\n```'
|
||||
assert PreMarketPlanner._extract_json(text) == '{"a": 1}'
|
||||
|
||||
def test_with_whitespace(self) -> None:
|
||||
text = ' \n {"a": 1} \n '
|
||||
assert PreMarketPlanner._extract_json(text) == '{"a": 1}'
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Defensive playbook
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestDefensivePlaybook:
|
||||
def test_defensive_has_stop_loss(self) -> None:
|
||||
candidates = [_candidate(code="005930"), _candidate(code="AAPL")]
|
||||
pb = PreMarketPlanner._defensive_playbook(date(2026, 2, 8), "KR", candidates)
|
||||
|
||||
assert pb.default_action == ScenarioAction.HOLD
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL_TO_BEARISH
|
||||
assert pb.stock_count == 2
|
||||
for sp in pb.stock_playbooks:
|
||||
assert sp.scenarios[0].action == ScenarioAction.SELL
|
||||
assert sp.scenarios[0].stop_loss_pct == -3.0
|
||||
|
||||
def test_defensive_has_global_rule(self) -> None:
|
||||
pb = PreMarketPlanner._defensive_playbook(date(2026, 2, 8), "KR", [_candidate()])
|
||||
|
||||
assert len(pb.global_rules) == 1
|
||||
assert pb.global_rules[0].action == ScenarioAction.REDUCE_ALL
|
||||
|
||||
def test_empty_playbook(self) -> None:
|
||||
pb = PreMarketPlanner._empty_playbook(date(2026, 2, 8), "US")
|
||||
|
||||
assert pb.stock_count == 0
|
||||
assert pb.market == "US"
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL
|
||||
442
tests/test_scenario_engine.py
Normal file
442
tests/test_scenario_engine.py
Normal file
@@ -0,0 +1,442 @@
|
||||
"""Tests for the local scenario engine."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date
|
||||
|
||||
import pytest
|
||||
|
||||
from src.strategy.models import (
|
||||
DayPlaybook,
|
||||
GlobalRule,
|
||||
ScenarioAction,
|
||||
StockCondition,
|
||||
StockPlaybook,
|
||||
StockScenario,
|
||||
)
|
||||
from src.strategy.scenario_engine import ScenarioEngine, ScenarioMatch
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def engine() -> ScenarioEngine:
|
||||
return ScenarioEngine()
|
||||
|
||||
|
||||
def _scenario(
|
||||
rsi_below: float | None = None,
|
||||
rsi_above: float | None = None,
|
||||
volume_ratio_above: float | None = None,
|
||||
action: ScenarioAction = ScenarioAction.BUY,
|
||||
confidence: int = 85,
|
||||
**kwargs,
|
||||
) -> StockScenario:
|
||||
return StockScenario(
|
||||
condition=StockCondition(
|
||||
rsi_below=rsi_below,
|
||||
rsi_above=rsi_above,
|
||||
volume_ratio_above=volume_ratio_above,
|
||||
**kwargs,
|
||||
),
|
||||
action=action,
|
||||
confidence=confidence,
|
||||
rationale=f"Test scenario: {action.value}",
|
||||
)
|
||||
|
||||
|
||||
def _playbook(
|
||||
stock_code: str = "005930",
|
||||
scenarios: list[StockScenario] | None = None,
|
||||
global_rules: list[GlobalRule] | None = None,
|
||||
default_action: ScenarioAction = ScenarioAction.HOLD,
|
||||
) -> DayPlaybook:
|
||||
if scenarios is None:
|
||||
scenarios = [_scenario(rsi_below=30.0)]
|
||||
return DayPlaybook(
|
||||
date=date(2026, 2, 7),
|
||||
market="KR",
|
||||
stock_playbooks=[StockPlaybook(stock_code=stock_code, scenarios=scenarios)],
|
||||
global_rules=global_rules or [],
|
||||
default_action=default_action,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# evaluate_condition
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestEvaluateCondition:
|
||||
def test_rsi_below_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert engine.evaluate_condition(cond, {"rsi": 25.0})
|
||||
|
||||
def test_rsi_below_no_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 35.0})
|
||||
|
||||
def test_rsi_above_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(rsi_above=70.0)
|
||||
assert engine.evaluate_condition(cond, {"rsi": 75.0})
|
||||
|
||||
def test_rsi_above_no_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(rsi_above=70.0)
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 65.0})
|
||||
|
||||
def test_volume_ratio_above_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(volume_ratio_above=3.0)
|
||||
assert engine.evaluate_condition(cond, {"volume_ratio": 4.5})
|
||||
|
||||
def test_volume_ratio_below_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(volume_ratio_below=1.0)
|
||||
assert engine.evaluate_condition(cond, {"volume_ratio": 0.5})
|
||||
|
||||
def test_price_above_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(price_above=50000)
|
||||
assert engine.evaluate_condition(cond, {"current_price": 55000})
|
||||
|
||||
def test_price_below_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(price_below=50000)
|
||||
assert engine.evaluate_condition(cond, {"current_price": 45000})
|
||||
|
||||
def test_price_change_pct_above_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(price_change_pct_above=2.0)
|
||||
assert engine.evaluate_condition(cond, {"price_change_pct": 3.5})
|
||||
|
||||
def test_price_change_pct_below_match(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(price_change_pct_below=-3.0)
|
||||
assert engine.evaluate_condition(cond, {"price_change_pct": -4.0})
|
||||
|
||||
def test_multiple_conditions_and_logic(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(rsi_below=30.0, volume_ratio_above=3.0)
|
||||
# Both met
|
||||
assert engine.evaluate_condition(cond, {"rsi": 25.0, "volume_ratio": 4.0})
|
||||
# Only RSI met
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 25.0, "volume_ratio": 2.0})
|
||||
# Only volume met
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 35.0, "volume_ratio": 4.0})
|
||||
# Neither met
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 35.0, "volume_ratio": 2.0})
|
||||
|
||||
def test_empty_condition_returns_false(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition()
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 25.0})
|
||||
|
||||
def test_missing_data_returns_false(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert not engine.evaluate_condition(cond, {})
|
||||
|
||||
def test_none_data_returns_false(self, engine: ScenarioEngine) -> None:
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert not engine.evaluate_condition(cond, {"rsi": None})
|
||||
|
||||
def test_boundary_value_not_matched(self, engine: ScenarioEngine) -> None:
|
||||
"""rsi_below=30 should NOT match rsi=30 (strict less than)."""
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 30.0})
|
||||
|
||||
def test_boundary_value_above_not_matched(self, engine: ScenarioEngine) -> None:
|
||||
"""rsi_above=70 should NOT match rsi=70 (strict greater than)."""
|
||||
cond = StockCondition(rsi_above=70.0)
|
||||
assert not engine.evaluate_condition(cond, {"rsi": 70.0})
|
||||
|
||||
def test_string_value_no_exception(self, engine: ScenarioEngine) -> None:
|
||||
"""String numeric value should not raise TypeError."""
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
# "25" can be cast to float → should match
|
||||
assert engine.evaluate_condition(cond, {"rsi": "25"})
|
||||
# "35" → should not match
|
||||
assert not engine.evaluate_condition(cond, {"rsi": "35"})
|
||||
|
||||
def test_percent_string_returns_false(self, engine: ScenarioEngine) -> None:
|
||||
"""Percent string like '30%' cannot be cast to float → False, no exception."""
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert not engine.evaluate_condition(cond, {"rsi": "30%"})
|
||||
|
||||
def test_decimal_value_no_exception(self, engine: ScenarioEngine) -> None:
|
||||
"""Decimal values should be safely handled."""
|
||||
from decimal import Decimal
|
||||
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert engine.evaluate_condition(cond, {"rsi": Decimal("25.0")})
|
||||
|
||||
def test_mixed_invalid_types_no_exception(self, engine: ScenarioEngine) -> None:
|
||||
"""Various invalid types should not raise exceptions."""
|
||||
cond = StockCondition(
|
||||
rsi_below=30.0, volume_ratio_above=2.0,
|
||||
price_above=100, price_change_pct_below=-1.0,
|
||||
)
|
||||
data = {
|
||||
"rsi": [25], # list
|
||||
"volume_ratio": "bad", # non-numeric string
|
||||
"current_price": {}, # dict
|
||||
"price_change_pct": object(), # arbitrary object
|
||||
}
|
||||
# Should return False (invalid types → None → False), never raise
|
||||
assert not engine.evaluate_condition(cond, data)
|
||||
|
||||
def test_missing_key_logs_warning_once(self, caplog) -> None:
|
||||
"""Missing key warning should fire only once per key per engine instance."""
|
||||
import logging
|
||||
|
||||
eng = ScenarioEngine()
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
with caplog.at_level(logging.WARNING):
|
||||
eng.evaluate_condition(cond, {})
|
||||
eng.evaluate_condition(cond, {})
|
||||
eng.evaluate_condition(cond, {})
|
||||
# Warning should appear exactly once despite 3 calls
|
||||
assert caplog.text.count("'rsi' but key missing") == 1
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# check_global_rules
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCheckGlobalRules:
|
||||
def test_no_rules(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(global_rules=[])
|
||||
result = engine.check_global_rules(pb, {"portfolio_pnl_pct": -1.0})
|
||||
assert result is None
|
||||
|
||||
def test_rule_triggered(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
rationale="Near circuit breaker",
|
||||
),
|
||||
]
|
||||
)
|
||||
result = engine.check_global_rules(pb, {"portfolio_pnl_pct": -2.5})
|
||||
assert result is not None
|
||||
assert result.action == ScenarioAction.REDUCE_ALL
|
||||
|
||||
def test_rule_not_triggered(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
),
|
||||
]
|
||||
)
|
||||
result = engine.check_global_rules(pb, {"portfolio_pnl_pct": -1.0})
|
||||
assert result is None
|
||||
|
||||
def test_first_rule_wins(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(condition="portfolio_pnl_pct < -2.0", action=ScenarioAction.REDUCE_ALL),
|
||||
GlobalRule(condition="portfolio_pnl_pct < -1.0", action=ScenarioAction.HOLD),
|
||||
]
|
||||
)
|
||||
result = engine.check_global_rules(pb, {"portfolio_pnl_pct": -2.5})
|
||||
assert result is not None
|
||||
assert result.action == ScenarioAction.REDUCE_ALL
|
||||
|
||||
def test_greater_than_operator(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(condition="volatility_index > 30", action=ScenarioAction.HOLD),
|
||||
]
|
||||
)
|
||||
result = engine.check_global_rules(pb, {"volatility_index": 35})
|
||||
assert result is not None
|
||||
|
||||
def test_missing_field_not_triggered(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(condition="unknown_field < -2.0", action=ScenarioAction.REDUCE_ALL),
|
||||
]
|
||||
)
|
||||
result = engine.check_global_rules(pb, {"portfolio_pnl_pct": -5.0})
|
||||
assert result is None
|
||||
|
||||
def test_invalid_condition_format(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(condition="bad format", action=ScenarioAction.HOLD),
|
||||
]
|
||||
)
|
||||
result = engine.check_global_rules(pb, {})
|
||||
assert result is None
|
||||
|
||||
def test_le_operator(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(condition="portfolio_pnl_pct <= -2.0", action=ScenarioAction.REDUCE_ALL),
|
||||
]
|
||||
)
|
||||
assert engine.check_global_rules(pb, {"portfolio_pnl_pct": -2.0}) is not None
|
||||
assert engine.check_global_rules(pb, {"portfolio_pnl_pct": -1.9}) is None
|
||||
|
||||
def test_ge_operator(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
global_rules=[
|
||||
GlobalRule(condition="volatility >= 80.0", action=ScenarioAction.HOLD),
|
||||
]
|
||||
)
|
||||
assert engine.check_global_rules(pb, {"volatility": 80.0}) is not None
|
||||
assert engine.check_global_rules(pb, {"volatility": 79.9}) is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# evaluate (full pipeline)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestEvaluate:
|
||||
def test_scenario_match(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(scenarios=[_scenario(rsi_below=30.0)])
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 25.0}, {})
|
||||
assert result.action == ScenarioAction.BUY
|
||||
assert result.confidence == 85
|
||||
assert result.matched_scenario is not None
|
||||
|
||||
def test_no_scenario_match_returns_default(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(scenarios=[_scenario(rsi_below=30.0)])
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 50.0}, {})
|
||||
assert result.action == ScenarioAction.HOLD
|
||||
assert result.confidence == 0
|
||||
assert result.matched_scenario is None
|
||||
|
||||
def test_stock_not_in_playbook(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(stock_code="005930")
|
||||
result = engine.evaluate(pb, "AAPL", {"rsi": 25.0}, {})
|
||||
assert result.action == ScenarioAction.HOLD
|
||||
assert result.confidence == 0
|
||||
|
||||
def test_global_rule_takes_priority(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
scenarios=[_scenario(rsi_below=30.0)],
|
||||
global_rules=[
|
||||
GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
rationale="Loss limit",
|
||||
),
|
||||
],
|
||||
)
|
||||
result = engine.evaluate(
|
||||
pb,
|
||||
"005930",
|
||||
{"rsi": 25.0}, # Would match scenario
|
||||
{"portfolio_pnl_pct": -2.5}, # But global rule triggers first
|
||||
)
|
||||
assert result.action == ScenarioAction.REDUCE_ALL
|
||||
assert result.global_rule_triggered is not None
|
||||
assert result.matched_scenario is None
|
||||
|
||||
def test_first_scenario_wins(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
scenarios=[
|
||||
_scenario(rsi_below=30.0, action=ScenarioAction.BUY, confidence=90),
|
||||
_scenario(rsi_below=25.0, action=ScenarioAction.BUY, confidence=95),
|
||||
]
|
||||
)
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 20.0}, {})
|
||||
# Both match, but first wins
|
||||
assert result.confidence == 90
|
||||
|
||||
def test_sell_scenario(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
scenarios=[
|
||||
_scenario(rsi_above=75.0, action=ScenarioAction.SELL, confidence=80),
|
||||
]
|
||||
)
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 80.0}, {})
|
||||
assert result.action == ScenarioAction.SELL
|
||||
|
||||
def test_empty_playbook(self, engine: ScenarioEngine) -> None:
|
||||
pb = DayPlaybook(date=date(2026, 2, 7), market="KR", stock_playbooks=[])
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 25.0}, {})
|
||||
assert result.action == ScenarioAction.HOLD
|
||||
|
||||
def test_match_details_populated(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(scenarios=[_scenario(rsi_below=30.0, volume_ratio_above=2.0)])
|
||||
result = engine.evaluate(
|
||||
pb, "005930", {"rsi": 25.0, "volume_ratio": 3.0}, {}
|
||||
)
|
||||
assert result.match_details.get("rsi") == 25.0
|
||||
assert result.match_details.get("volume_ratio") == 3.0
|
||||
|
||||
def test_custom_default_action(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
scenarios=[_scenario(rsi_below=10.0)], # Very unlikely to match
|
||||
default_action=ScenarioAction.SELL,
|
||||
)
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 50.0}, {})
|
||||
assert result.action == ScenarioAction.SELL
|
||||
|
||||
def test_multiple_stocks_in_playbook(self, engine: ScenarioEngine) -> None:
|
||||
pb = DayPlaybook(
|
||||
date=date(2026, 2, 7),
|
||||
market="US",
|
||||
stock_playbooks=[
|
||||
StockPlaybook(
|
||||
stock_code="AAPL",
|
||||
scenarios=[_scenario(rsi_below=25.0, confidence=90)],
|
||||
),
|
||||
StockPlaybook(
|
||||
stock_code="MSFT",
|
||||
scenarios=[_scenario(rsi_above=75.0, action=ScenarioAction.SELL, confidence=80)],
|
||||
),
|
||||
],
|
||||
)
|
||||
aapl = engine.evaluate(pb, "AAPL", {"rsi": 20.0}, {})
|
||||
assert aapl.action == ScenarioAction.BUY
|
||||
assert aapl.confidence == 90
|
||||
|
||||
msft = engine.evaluate(pb, "MSFT", {"rsi": 80.0}, {})
|
||||
assert msft.action == ScenarioAction.SELL
|
||||
|
||||
def test_complex_multi_condition(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(
|
||||
scenarios=[
|
||||
_scenario(
|
||||
rsi_below=30.0,
|
||||
volume_ratio_above=3.0,
|
||||
price_change_pct_below=-2.0,
|
||||
confidence=95,
|
||||
),
|
||||
]
|
||||
)
|
||||
# All conditions met
|
||||
result = engine.evaluate(
|
||||
pb,
|
||||
"005930",
|
||||
{"rsi": 22.0, "volume_ratio": 4.0, "price_change_pct": -3.0},
|
||||
{},
|
||||
)
|
||||
assert result.action == ScenarioAction.BUY
|
||||
assert result.confidence == 95
|
||||
|
||||
# One condition not met
|
||||
result2 = engine.evaluate(
|
||||
pb,
|
||||
"005930",
|
||||
{"rsi": 22.0, "volume_ratio": 4.0, "price_change_pct": -1.0},
|
||||
{},
|
||||
)
|
||||
assert result2.action == ScenarioAction.HOLD
|
||||
|
||||
def test_scenario_match_returns_rationale(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook(scenarios=[_scenario(rsi_below=30.0)])
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 25.0}, {})
|
||||
assert result.rationale != ""
|
||||
|
||||
def test_result_stock_code(self, engine: ScenarioEngine) -> None:
|
||||
pb = _playbook()
|
||||
result = engine.evaluate(pb, "005930", {"rsi": 25.0}, {})
|
||||
assert result.stock_code == "005930"
|
||||
|
||||
def test_match_details_normalized(self, engine: ScenarioEngine) -> None:
|
||||
"""match_details should contain _safe_float normalized values, not raw."""
|
||||
pb = _playbook(scenarios=[_scenario(rsi_below=30.0)])
|
||||
# Pass string value — should be normalized to float in match_details
|
||||
result = engine.evaluate(pb, "005930", {"rsi": "25.0"}, {})
|
||||
assert result.action == ScenarioAction.BUY
|
||||
assert result.match_details["rsi"] == 25.0
|
||||
assert isinstance(result.match_details["rsi"], float)
|
||||
81
tests/test_scorecard.py
Normal file
81
tests/test_scorecard.py
Normal file
@@ -0,0 +1,81 @@
|
||||
"""Tests for DailyScorecard model."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from src.evolution.scorecard import DailyScorecard
|
||||
|
||||
|
||||
def test_scorecard_initialization() -> None:
|
||||
scorecard = DailyScorecard(
|
||||
date="2026-02-08",
|
||||
market="KR",
|
||||
total_decisions=10,
|
||||
buys=3,
|
||||
sells=2,
|
||||
holds=5,
|
||||
total_pnl=1234.5,
|
||||
win_rate=60.0,
|
||||
avg_confidence=78.5,
|
||||
scenario_match_rate=70.0,
|
||||
top_winners=["005930", "000660"],
|
||||
top_losers=["035420"],
|
||||
lessons=["Avoid chasing breakouts"],
|
||||
cross_market_note="US volatility spillover",
|
||||
)
|
||||
|
||||
assert scorecard.market == "KR"
|
||||
assert scorecard.total_decisions == 10
|
||||
assert scorecard.total_pnl == 1234.5
|
||||
assert scorecard.top_winners == ["005930", "000660"]
|
||||
assert scorecard.lessons == ["Avoid chasing breakouts"]
|
||||
assert scorecard.cross_market_note == "US volatility spillover"
|
||||
|
||||
|
||||
def test_scorecard_defaults() -> None:
|
||||
scorecard = DailyScorecard(
|
||||
date="2026-02-08",
|
||||
market="US",
|
||||
total_decisions=0,
|
||||
buys=0,
|
||||
sells=0,
|
||||
holds=0,
|
||||
total_pnl=0.0,
|
||||
win_rate=0.0,
|
||||
avg_confidence=0.0,
|
||||
scenario_match_rate=0.0,
|
||||
)
|
||||
|
||||
assert scorecard.top_winners == []
|
||||
assert scorecard.top_losers == []
|
||||
assert scorecard.lessons == []
|
||||
assert scorecard.cross_market_note == ""
|
||||
|
||||
|
||||
def test_scorecard_list_isolation() -> None:
|
||||
a = DailyScorecard(
|
||||
date="2026-02-08",
|
||||
market="KR",
|
||||
total_decisions=1,
|
||||
buys=1,
|
||||
sells=0,
|
||||
holds=0,
|
||||
total_pnl=10.0,
|
||||
win_rate=100.0,
|
||||
avg_confidence=90.0,
|
||||
scenario_match_rate=100.0,
|
||||
)
|
||||
b = DailyScorecard(
|
||||
date="2026-02-08",
|
||||
market="US",
|
||||
total_decisions=1,
|
||||
buys=0,
|
||||
sells=1,
|
||||
holds=0,
|
||||
total_pnl=-5.0,
|
||||
win_rate=0.0,
|
||||
avg_confidence=60.0,
|
||||
scenario_match_rate=50.0,
|
||||
)
|
||||
|
||||
a.top_winners.append("005930")
|
||||
assert b.top_winners == []
|
||||
403
tests/test_smart_scanner.py
Normal file
403
tests/test_smart_scanner.py
Normal file
@@ -0,0 +1,403 @@
|
||||
"""Tests for SmartVolatilityScanner."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
from unittest.mock import AsyncMock, MagicMock
|
||||
|
||||
from src.analysis.smart_scanner import ScanCandidate, SmartVolatilityScanner
|
||||
from src.analysis.volatility import VolatilityAnalyzer
|
||||
from src.broker.kis_api import KISBroker
|
||||
from src.broker.overseas import OverseasBroker
|
||||
from src.config import Settings
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_settings() -> Settings:
|
||||
"""Create test settings."""
|
||||
return Settings(
|
||||
KIS_APP_KEY="test",
|
||||
KIS_APP_SECRET="test",
|
||||
KIS_ACCOUNT_NO="12345678-01",
|
||||
GEMINI_API_KEY="test",
|
||||
RSI_OVERSOLD_THRESHOLD=30,
|
||||
RSI_MOMENTUM_THRESHOLD=70,
|
||||
VOL_MULTIPLIER=2.0,
|
||||
SCANNER_TOP_N=3,
|
||||
DB_PATH=":memory:",
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_broker(mock_settings: Settings) -> MagicMock:
|
||||
"""Create mock broker."""
|
||||
broker = MagicMock(spec=KISBroker)
|
||||
broker._settings = mock_settings
|
||||
broker.fetch_market_rankings = AsyncMock()
|
||||
broker.get_daily_prices = AsyncMock()
|
||||
return broker
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def scanner(mock_broker: MagicMock, mock_settings: Settings) -> SmartVolatilityScanner:
|
||||
"""Create smart scanner instance."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
return SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=None,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_overseas_broker() -> MagicMock:
|
||||
"""Create mock overseas broker."""
|
||||
broker = MagicMock(spec=OverseasBroker)
|
||||
broker.get_overseas_price = AsyncMock()
|
||||
broker.fetch_overseas_rankings = AsyncMock(return_value=[])
|
||||
return broker
|
||||
|
||||
|
||||
class TestSmartVolatilityScanner:
|
||||
"""Test suite for SmartVolatilityScanner."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_domestic_prefers_volatility_with_liquidity_bonus(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Domestic scan should score by volatility first and volume rank second."""
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"name": "Samsung",
|
||||
"price": 70000,
|
||||
"volume": 5000000,
|
||||
"change_rate": -5.0,
|
||||
"volume_increase_rate": 250,
|
||||
},
|
||||
{
|
||||
"stock_code": "035420",
|
||||
"name": "NAVER",
|
||||
"price": 250000,
|
||||
"volume": 3000000,
|
||||
"change_rate": 3.0,
|
||||
"volume_increase_rate": 200,
|
||||
},
|
||||
]
|
||||
volume_rows = [
|
||||
{"stock_code": "035420", "name": "NAVER", "price": 250000, "volume": 3000000},
|
||||
{"stock_code": "005930", "name": "Samsung", "price": 70000, "volume": 5000000},
|
||||
]
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, volume_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
assert len(candidates) >= 1
|
||||
# Samsung has higher absolute move, so it should lead despite lower volume rank bonus.
|
||||
assert candidates[0].stock_code == "005930"
|
||||
assert candidates[0].signal == "oversold"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_domestic_finds_momentum_candidate(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Positive change should be represented as momentum signal."""
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": "035420",
|
||||
"name": "NAVER",
|
||||
"price": 250000,
|
||||
"volume": 3000000,
|
||||
"change_rate": 5.0,
|
||||
"volume_increase_rate": 300,
|
||||
},
|
||||
]
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, fluctuation_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
assert [c.stock_code for c in candidates] == ["035420"]
|
||||
assert candidates[0].signal == "momentum"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_domestic_filters_low_volatility(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Domestic scan should drop symbols below volatility threshold."""
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": "000660",
|
||||
"name": "SK Hynix",
|
||||
"price": 150000,
|
||||
"volume": 500000,
|
||||
"change_rate": 0.2,
|
||||
"volume_increase_rate": 50,
|
||||
},
|
||||
]
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, fluctuation_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 150100, "low": 149900, "close": 150000, "volume": 1000000},
|
||||
{"open": 1, "high": 150100, "low": 149900, "close": 150000, "volume": 1000000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
assert len(candidates) == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_uses_fallback_on_api_error(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Domestic scan should remain operational using fallback symbols."""
|
||||
mock_broker.fetch_market_rankings.side_effect = [
|
||||
ConnectionError("API unavailable"),
|
||||
ConnectionError("API unavailable"),
|
||||
]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 103, "low": 97, "close": 100, "volume": 1000000},
|
||||
{"open": 1, "high": 103, "low": 97, "close": 100, "volume": 800000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(fallback_stocks=["005930", "000660"])
|
||||
|
||||
assert isinstance(candidates, list)
|
||||
assert len(candidates) >= 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_returns_top_n_only(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test that scan returns at most top_n candidates."""
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": f"00{i}000",
|
||||
"name": f"Stock{i}",
|
||||
"price": 10000 * i,
|
||||
"volume": 5000000,
|
||||
"change_rate": -10,
|
||||
"volume_increase_rate": 500,
|
||||
}
|
||||
for i in range(1, 10)
|
||||
]
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, fluctuation_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 105, "low": 95, "close": 100, "volume": 1000000},
|
||||
{"open": 1, "high": 105, "low": 95, "close": 100, "volume": 900000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
# Should respect top_n limit (3)
|
||||
assert len(candidates) <= scanner.top_n
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_stock_codes(
|
||||
self, scanner: SmartVolatilityScanner
|
||||
) -> None:
|
||||
"""Test extraction of stock codes from candidates."""
|
||||
candidates = [
|
||||
ScanCandidate(
|
||||
stock_code="005930",
|
||||
name="Samsung",
|
||||
price=70000,
|
||||
volume=5000000,
|
||||
volume_ratio=2.5,
|
||||
rsi=28,
|
||||
signal="oversold",
|
||||
score=85.0,
|
||||
),
|
||||
ScanCandidate(
|
||||
stock_code="035420",
|
||||
name="NAVER",
|
||||
price=250000,
|
||||
volume=3000000,
|
||||
volume_ratio=3.0,
|
||||
rsi=75,
|
||||
signal="momentum",
|
||||
score=88.0,
|
||||
),
|
||||
]
|
||||
|
||||
codes = scanner.get_stock_codes(candidates)
|
||||
|
||||
assert codes == ["005930", "035420"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_uses_dynamic_symbols(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Overseas scan should use provided dynamic universe symbols."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
mock_overseas_broker.get_overseas_price.side_effect = [
|
||||
{"output": {"last": "210.5", "rate": "1.6", "tvol": "1500000"}},
|
||||
{"output": {"last": "330.1", "rate": "0.2", "tvol": "900000"}},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(
|
||||
market=market,
|
||||
fallback_stocks=["AAPL", "MSFT"],
|
||||
)
|
||||
|
||||
assert [c.stock_code for c in candidates] == ["AAPL"]
|
||||
assert candidates[0].signal == "momentum"
|
||||
assert candidates[0].price == 210.5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_uses_ranking_api_first(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Overseas scan should prioritize ranking API when available."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
mock_overseas_broker.fetch_overseas_rankings.return_value = [
|
||||
{"symb": "NVDA", "last": "780.2", "rate": "2.4", "tvol": "1200000"},
|
||||
{"symb": "MSFT", "last": "420.0", "rate": "0.3", "tvol": "900000"},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(market=market, fallback_stocks=["AAPL", "TSLA"])
|
||||
|
||||
assert mock_overseas_broker.fetch_overseas_rankings.call_count >= 1
|
||||
mock_overseas_broker.get_overseas_price.assert_not_called()
|
||||
assert [c.stock_code for c in candidates] == ["NVDA"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_without_symbols_returns_empty(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Overseas scan should return empty list when no symbol universe exists."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
candidates = await scanner.scan(market=market, fallback_stocks=[])
|
||||
|
||||
assert candidates == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_picks_high_intraday_range_even_with_low_change(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Volatility selection should consider intraday range, not only change rate."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
# change rate is tiny, but high-low range is large (15%).
|
||||
mock_overseas_broker.fetch_overseas_rankings.return_value = [
|
||||
{
|
||||
"symb": "ABCD",
|
||||
"last": "100",
|
||||
"rate": "0.2",
|
||||
"high": "110",
|
||||
"low": "95",
|
||||
"tvol": "800000",
|
||||
}
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(market=market, fallback_stocks=[])
|
||||
|
||||
assert [c.stock_code for c in candidates] == ["ABCD"]
|
||||
|
||||
|
||||
class TestRSICalculation:
|
||||
"""Test RSI calculation in VolatilityAnalyzer."""
|
||||
|
||||
def test_rsi_oversold(self) -> None:
|
||||
"""Test RSI calculation for downtrending prices."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
|
||||
# Steadily declining prices
|
||||
prices = [100 - i * 0.5 for i in range(20)]
|
||||
rsi = analyzer.calculate_rsi(prices, period=14)
|
||||
|
||||
assert rsi < 50 # Should be oversold territory
|
||||
|
||||
def test_rsi_overbought(self) -> None:
|
||||
"""Test RSI calculation for uptrending prices."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
|
||||
# Steadily rising prices
|
||||
prices = [100 + i * 0.5 for i in range(20)]
|
||||
rsi = analyzer.calculate_rsi(prices, period=14)
|
||||
|
||||
assert rsi > 50 # Should be overbought territory
|
||||
|
||||
def test_rsi_neutral(self) -> None:
|
||||
"""Test RSI calculation for flat prices."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
|
||||
# Flat prices with small oscillation
|
||||
prices = [100 + (i % 2) * 0.1 for i in range(20)]
|
||||
rsi = analyzer.calculate_rsi(prices, period=14)
|
||||
|
||||
assert 40 < rsi < 60 # Should be near neutral
|
||||
|
||||
def test_rsi_insufficient_data(self) -> None:
|
||||
"""Test RSI returns neutral when insufficient data."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
|
||||
prices = [100, 101, 102] # Only 3 prices, need 15+
|
||||
rsi = analyzer.calculate_rsi(prices, period=14)
|
||||
|
||||
assert rsi == 50.0 # Default neutral
|
||||
|
||||
def test_rsi_all_gains(self) -> None:
|
||||
"""Test RSI returns 100 when all gains (no losses)."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
|
||||
# Monotonic increase
|
||||
prices = [100 + i for i in range(20)]
|
||||
rsi = analyzer.calculate_rsi(prices, period=14)
|
||||
|
||||
assert rsi == 100.0 # Maximum RSI
|
||||
366
tests/test_strategy_models.py
Normal file
366
tests/test_strategy_models.py
Normal file
@@ -0,0 +1,366 @@
|
||||
"""Tests for strategy/playbook Pydantic models."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import date
|
||||
|
||||
import pytest
|
||||
from pydantic import ValidationError
|
||||
|
||||
from src.strategy.models import (
|
||||
CrossMarketContext,
|
||||
DayPlaybook,
|
||||
GlobalRule,
|
||||
MarketOutlook,
|
||||
PlaybookStatus,
|
||||
ScenarioAction,
|
||||
StockCondition,
|
||||
StockPlaybook,
|
||||
StockScenario,
|
||||
)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# StockCondition
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStockCondition:
|
||||
def test_empty_condition(self) -> None:
|
||||
cond = StockCondition()
|
||||
assert not cond.has_any_condition()
|
||||
|
||||
def test_single_field(self) -> None:
|
||||
cond = StockCondition(rsi_below=30.0)
|
||||
assert cond.has_any_condition()
|
||||
|
||||
def test_multiple_fields(self) -> None:
|
||||
cond = StockCondition(rsi_below=25.0, volume_ratio_above=3.0)
|
||||
assert cond.has_any_condition()
|
||||
|
||||
def test_all_fields(self) -> None:
|
||||
cond = StockCondition(
|
||||
rsi_below=30,
|
||||
rsi_above=10,
|
||||
volume_ratio_above=2.0,
|
||||
volume_ratio_below=10.0,
|
||||
price_above=1000,
|
||||
price_below=50000,
|
||||
price_change_pct_above=-5.0,
|
||||
price_change_pct_below=5.0,
|
||||
)
|
||||
assert cond.has_any_condition()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# StockScenario
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStockScenario:
|
||||
def test_valid_scenario(self) -> None:
|
||||
s = StockScenario(
|
||||
condition=StockCondition(rsi_below=25.0),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=85,
|
||||
allocation_pct=15.0,
|
||||
stop_loss_pct=-2.0,
|
||||
take_profit_pct=3.0,
|
||||
rationale="Oversold bounce expected",
|
||||
)
|
||||
assert s.action == ScenarioAction.BUY
|
||||
assert s.confidence == 85
|
||||
|
||||
def test_confidence_too_high(self) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
StockScenario(
|
||||
condition=StockCondition(),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=101,
|
||||
)
|
||||
|
||||
def test_confidence_too_low(self) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
StockScenario(
|
||||
condition=StockCondition(),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=-1,
|
||||
)
|
||||
|
||||
def test_allocation_too_high(self) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
StockScenario(
|
||||
condition=StockCondition(),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=80,
|
||||
allocation_pct=101.0,
|
||||
)
|
||||
|
||||
def test_stop_loss_must_be_negative(self) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
StockScenario(
|
||||
condition=StockCondition(),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=80,
|
||||
stop_loss_pct=1.0,
|
||||
)
|
||||
|
||||
def test_take_profit_must_be_positive(self) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
StockScenario(
|
||||
condition=StockCondition(),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=80,
|
||||
take_profit_pct=-1.0,
|
||||
)
|
||||
|
||||
def test_defaults(self) -> None:
|
||||
s = StockScenario(
|
||||
condition=StockCondition(),
|
||||
action=ScenarioAction.HOLD,
|
||||
confidence=50,
|
||||
)
|
||||
assert s.allocation_pct == 10.0
|
||||
assert s.stop_loss_pct == -2.0
|
||||
assert s.take_profit_pct == 3.0
|
||||
assert s.rationale == ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# StockPlaybook
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestStockPlaybook:
|
||||
def test_valid_playbook(self) -> None:
|
||||
pb = StockPlaybook(
|
||||
stock_code="005930",
|
||||
stock_name="Samsung Electronics",
|
||||
scenarios=[
|
||||
StockScenario(
|
||||
condition=StockCondition(rsi_below=25.0),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=85,
|
||||
),
|
||||
],
|
||||
)
|
||||
assert pb.stock_code == "005930"
|
||||
assert len(pb.scenarios) == 1
|
||||
|
||||
def test_empty_scenarios_rejected(self) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
StockPlaybook(
|
||||
stock_code="005930",
|
||||
scenarios=[],
|
||||
)
|
||||
|
||||
def test_multiple_scenarios(self) -> None:
|
||||
pb = StockPlaybook(
|
||||
stock_code="AAPL",
|
||||
scenarios=[
|
||||
StockScenario(
|
||||
condition=StockCondition(rsi_below=25.0),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=85,
|
||||
),
|
||||
StockScenario(
|
||||
condition=StockCondition(rsi_above=75.0),
|
||||
action=ScenarioAction.SELL,
|
||||
confidence=80,
|
||||
),
|
||||
],
|
||||
)
|
||||
assert len(pb.scenarios) == 2
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# GlobalRule
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGlobalRule:
|
||||
def test_valid_rule(self) -> None:
|
||||
rule = GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
rationale="Risk limit approaching",
|
||||
)
|
||||
assert rule.action == ScenarioAction.REDUCE_ALL
|
||||
|
||||
def test_hold_rule(self) -> None:
|
||||
rule = GlobalRule(
|
||||
condition="volatility_index > 30",
|
||||
action=ScenarioAction.HOLD,
|
||||
)
|
||||
assert rule.rationale == ""
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CrossMarketContext
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCrossMarketContext:
|
||||
def test_valid_context(self) -> None:
|
||||
ctx = CrossMarketContext(
|
||||
market="US",
|
||||
date="2026-02-07",
|
||||
total_pnl=-1.5,
|
||||
win_rate=40.0,
|
||||
index_change_pct=-2.3,
|
||||
key_events=["Fed rate decision"],
|
||||
lessons=["Avoid tech sector on rate hike days"],
|
||||
)
|
||||
assert ctx.market == "US"
|
||||
assert len(ctx.key_events) == 1
|
||||
|
||||
def test_defaults(self) -> None:
|
||||
ctx = CrossMarketContext(market="KR", date="2026-02-07")
|
||||
assert ctx.total_pnl == 0.0
|
||||
assert ctx.key_events == []
|
||||
assert ctx.lessons == []
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# DayPlaybook
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _make_scenario(rsi_below: float = 25.0) -> StockScenario:
|
||||
return StockScenario(
|
||||
condition=StockCondition(rsi_below=rsi_below),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=85,
|
||||
)
|
||||
|
||||
|
||||
def _make_playbook(**kwargs) -> DayPlaybook:
|
||||
defaults = {
|
||||
"date": date(2026, 2, 7),
|
||||
"market": "KR",
|
||||
"stock_playbooks": [
|
||||
StockPlaybook(stock_code="005930", scenarios=[_make_scenario()]),
|
||||
],
|
||||
}
|
||||
defaults.update(kwargs)
|
||||
return DayPlaybook(**defaults)
|
||||
|
||||
|
||||
class TestDayPlaybook:
|
||||
def test_valid_playbook(self) -> None:
|
||||
pb = _make_playbook()
|
||||
assert pb.market == "KR"
|
||||
assert pb.date == date(2026, 2, 7)
|
||||
assert pb.default_action == ScenarioAction.HOLD
|
||||
assert pb.scenario_count == 1
|
||||
assert pb.stock_count == 1
|
||||
|
||||
def test_generated_at_auto_set(self) -> None:
|
||||
pb = _make_playbook()
|
||||
assert pb.generated_at != ""
|
||||
|
||||
def test_explicit_generated_at(self) -> None:
|
||||
pb = _make_playbook(generated_at="2026-02-07T08:30:00")
|
||||
assert pb.generated_at == "2026-02-07T08:30:00"
|
||||
|
||||
def test_duplicate_stocks_rejected(self) -> None:
|
||||
with pytest.raises(ValidationError):
|
||||
DayPlaybook(
|
||||
date=date(2026, 2, 7),
|
||||
market="KR",
|
||||
stock_playbooks=[
|
||||
StockPlaybook(stock_code="005930", scenarios=[_make_scenario()]),
|
||||
StockPlaybook(stock_code="005930", scenarios=[_make_scenario(30)]),
|
||||
],
|
||||
)
|
||||
|
||||
def test_empty_stock_playbooks_allowed(self) -> None:
|
||||
pb = DayPlaybook(
|
||||
date=date(2026, 2, 7),
|
||||
market="KR",
|
||||
stock_playbooks=[],
|
||||
)
|
||||
assert pb.stock_count == 0
|
||||
assert pb.scenario_count == 0
|
||||
|
||||
def test_get_stock_playbook_found(self) -> None:
|
||||
pb = _make_playbook()
|
||||
result = pb.get_stock_playbook("005930")
|
||||
assert result is not None
|
||||
assert result.stock_code == "005930"
|
||||
|
||||
def test_get_stock_playbook_not_found(self) -> None:
|
||||
pb = _make_playbook()
|
||||
result = pb.get_stock_playbook("AAPL")
|
||||
assert result is None
|
||||
|
||||
def test_with_global_rules(self) -> None:
|
||||
pb = _make_playbook(
|
||||
global_rules=[
|
||||
GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
),
|
||||
],
|
||||
)
|
||||
assert len(pb.global_rules) == 1
|
||||
|
||||
def test_with_cross_market_context(self) -> None:
|
||||
ctx = CrossMarketContext(market="US", date="2026-02-07", total_pnl=-1.5)
|
||||
pb = _make_playbook(cross_market=ctx)
|
||||
assert pb.cross_market is not None
|
||||
assert pb.cross_market.market == "US"
|
||||
|
||||
def test_market_outlook(self) -> None:
|
||||
pb = _make_playbook(market_outlook=MarketOutlook.BEARISH)
|
||||
assert pb.market_outlook == MarketOutlook.BEARISH
|
||||
|
||||
def test_multiple_stocks_multiple_scenarios(self) -> None:
|
||||
pb = DayPlaybook(
|
||||
date=date(2026, 2, 7),
|
||||
market="US",
|
||||
stock_playbooks=[
|
||||
StockPlaybook(
|
||||
stock_code="AAPL",
|
||||
scenarios=[_make_scenario(), _make_scenario(30)],
|
||||
),
|
||||
StockPlaybook(
|
||||
stock_code="MSFT",
|
||||
scenarios=[_make_scenario()],
|
||||
),
|
||||
],
|
||||
)
|
||||
assert pb.stock_count == 2
|
||||
assert pb.scenario_count == 3
|
||||
|
||||
def test_serialization_roundtrip(self) -> None:
|
||||
pb = _make_playbook(
|
||||
market_outlook=MarketOutlook.BULLISH,
|
||||
cross_market=CrossMarketContext(market="US", date="2026-02-07"),
|
||||
)
|
||||
json_str = pb.model_dump_json()
|
||||
restored = DayPlaybook.model_validate_json(json_str)
|
||||
assert restored.market == pb.market
|
||||
assert restored.date == pb.date
|
||||
assert restored.scenario_count == pb.scenario_count
|
||||
assert restored.cross_market is not None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Enums
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestEnums:
|
||||
def test_scenario_action_values(self) -> None:
|
||||
assert ScenarioAction.BUY.value == "BUY"
|
||||
assert ScenarioAction.SELL.value == "SELL"
|
||||
assert ScenarioAction.HOLD.value == "HOLD"
|
||||
assert ScenarioAction.REDUCE_ALL.value == "REDUCE_ALL"
|
||||
|
||||
def test_market_outlook_values(self) -> None:
|
||||
assert len(MarketOutlook) == 5
|
||||
|
||||
def test_playbook_status_values(self) -> None:
|
||||
assert PlaybookStatus.READY.value == "ready"
|
||||
assert PlaybookStatus.EXPIRED.value == "expired"
|
||||
@@ -39,6 +39,76 @@ class TestTelegramClientInit:
|
||||
class TestNotificationSending:
|
||||
"""Test notification sending behavior."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_message_success(self) -> None:
|
||||
"""send_message returns True on successful send."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
result = await client.send_message("Test message")
|
||||
|
||||
assert result is True
|
||||
assert mock_post.call_count == 1
|
||||
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert payload["chat_id"] == "456"
|
||||
assert payload["text"] == "Test message"
|
||||
assert payload["parse_mode"] == "HTML"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_message_disabled_client(self) -> None:
|
||||
"""send_message returns False when client disabled."""
|
||||
client = TelegramClient(enabled=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post") as mock_post:
|
||||
result = await client.send_message("Test message")
|
||||
|
||||
assert result is False
|
||||
mock_post.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_message_api_error(self) -> None:
|
||||
"""send_message returns False on API error."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 400
|
||||
mock_resp.text = AsyncMock(return_value="Bad Request")
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp):
|
||||
result = await client.send_message("Test message")
|
||||
assert result is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_message_with_markdown(self) -> None:
|
||||
"""send_message supports different parse modes."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
result = await client.send_message("*bold*", parse_mode="Markdown")
|
||||
|
||||
assert result is True
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert payload["parse_mode"] == "Markdown"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_no_send_when_disabled(self) -> None:
|
||||
"""Notifications not sent when client disabled."""
|
||||
@@ -90,6 +160,83 @@ class TestNotificationSending:
|
||||
assert "250.50" in payload["text"]
|
||||
assert "92%" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_playbook_generated_format(self) -> None:
|
||||
"""Playbook generated notification has expected fields."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await client.notify_playbook_generated(
|
||||
market="KR",
|
||||
stock_count=4,
|
||||
scenario_count=12,
|
||||
token_count=980,
|
||||
)
|
||||
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Playbook Generated" in payload["text"]
|
||||
assert "Market: KR" in payload["text"]
|
||||
assert "Stocks: 4" in payload["text"]
|
||||
assert "Scenarios: 12" in payload["text"]
|
||||
assert "Tokens: 980" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scenario_matched_format(self) -> None:
|
||||
"""Scenario matched notification has expected fields."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await client.notify_scenario_matched(
|
||||
stock_code="AAPL",
|
||||
action="BUY",
|
||||
condition_summary="RSI < 30, volume_ratio > 2.0",
|
||||
confidence=88.2,
|
||||
)
|
||||
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Scenario Matched" in payload["text"]
|
||||
assert "AAPL" in payload["text"]
|
||||
assert "Action: BUY" in payload["text"]
|
||||
assert "RSI < 30" in payload["text"]
|
||||
assert "88%" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_playbook_failed_format(self) -> None:
|
||||
"""Playbook failed notification has expected fields."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await client.notify_playbook_failed(
|
||||
market="US",
|
||||
reason="Gemini timeout",
|
||||
)
|
||||
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Playbook Failed" in payload["text"]
|
||||
assert "Market: US" in payload["text"]
|
||||
assert "Gemini timeout" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_circuit_breaker_priority(self) -> None:
|
||||
"""Circuit breaker uses CRITICAL priority."""
|
||||
@@ -239,6 +386,73 @@ class TestMessagePriorities:
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert NotificationPriority.CRITICAL.emoji in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_playbook_generated_priority(self) -> None:
|
||||
"""Playbook generated uses MEDIUM priority emoji."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await client.notify_playbook_generated(
|
||||
market="KR",
|
||||
stock_count=2,
|
||||
scenario_count=4,
|
||||
token_count=123,
|
||||
)
|
||||
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert NotificationPriority.MEDIUM.emoji in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_playbook_failed_priority(self) -> None:
|
||||
"""Playbook failed uses HIGH priority emoji."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await client.notify_playbook_failed(
|
||||
market="KR",
|
||||
reason="Invalid JSON",
|
||||
)
|
||||
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert NotificationPriority.HIGH.emoji in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scenario_matched_priority(self) -> None:
|
||||
"""Scenario matched uses HIGH priority emoji."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True
|
||||
)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await client.notify_scenario_matched(
|
||||
stock_code="AAPL",
|
||||
action="BUY",
|
||||
condition_summary="RSI < 30",
|
||||
confidence=80.0,
|
||||
)
|
||||
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert NotificationPriority.HIGH.emoji in payload["text"]
|
||||
|
||||
|
||||
class TestClientCleanup:
|
||||
"""Test client cleanup behavior."""
|
||||
|
||||
877
tests/test_telegram_commands.py
Normal file
877
tests/test_telegram_commands.py
Normal file
@@ -0,0 +1,877 @@
|
||||
"""Tests for Telegram command handler."""
|
||||
|
||||
from unittest.mock import AsyncMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from src.notifications.telegram_client import TelegramClient, TelegramCommandHandler
|
||||
|
||||
|
||||
class TestCommandHandlerInit:
|
||||
"""Test command handler initialization."""
|
||||
|
||||
def test_init_with_client(self) -> None:
|
||||
"""Handler initializes with TelegramClient."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
assert handler._client is client
|
||||
assert handler._polling_interval == 1.0
|
||||
assert handler._commands == {}
|
||||
assert handler._running is False
|
||||
|
||||
def test_custom_polling_interval(self) -> None:
|
||||
"""Handler accepts custom polling interval."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client, polling_interval=2.5)
|
||||
|
||||
assert handler._polling_interval == 2.5
|
||||
|
||||
|
||||
class TestCommandRegistration:
|
||||
"""Test command registration."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_register_command(self) -> None:
|
||||
"""Commands can be registered."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
async def test_handler() -> None:
|
||||
pass
|
||||
|
||||
handler.register_command("test", test_handler)
|
||||
|
||||
assert "test" in handler._commands
|
||||
assert handler._commands["test"] is test_handler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_register_multiple_commands(self) -> None:
|
||||
"""Multiple commands can be registered."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
async def handler1() -> None:
|
||||
pass
|
||||
|
||||
async def handler2() -> None:
|
||||
pass
|
||||
|
||||
handler.register_command("start", handler1)
|
||||
handler.register_command("help", handler2)
|
||||
|
||||
assert len(handler._commands) == 2
|
||||
assert handler._commands["start"] is handler1
|
||||
assert handler._commands["help"] is handler2
|
||||
|
||||
|
||||
class TestPollingLifecycle:
|
||||
"""Test polling start/stop."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_start_polling(self) -> None:
|
||||
"""Polling can be started."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
with patch.object(handler, "_poll_loop", new_callable=AsyncMock):
|
||||
await handler.start_polling()
|
||||
|
||||
assert handler._running is True
|
||||
assert handler._polling_task is not None
|
||||
|
||||
await handler.stop_polling()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_start_polling_disabled_client(self) -> None:
|
||||
"""Polling not started when client disabled."""
|
||||
client = TelegramClient(enabled=False)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
await handler.start_polling()
|
||||
|
||||
assert handler._running is False
|
||||
assert handler._polling_task is None
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_polling(self) -> None:
|
||||
"""Polling can be stopped."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
with patch.object(handler, "_poll_loop", new_callable=AsyncMock):
|
||||
await handler.start_polling()
|
||||
await handler.stop_polling()
|
||||
|
||||
assert handler._running is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_double_start_ignored(self) -> None:
|
||||
"""Starting already running handler is ignored."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
with patch.object(handler, "_poll_loop", new_callable=AsyncMock):
|
||||
await handler.start_polling()
|
||||
task1 = handler._polling_task
|
||||
|
||||
await handler.start_polling() # Second start
|
||||
task2 = handler._polling_task
|
||||
|
||||
# Should be the same task
|
||||
assert task1 is task2
|
||||
|
||||
await handler.stop_polling()
|
||||
|
||||
|
||||
class TestUpdateHandling:
|
||||
"""Test update parsing and handling."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_valid_command(self) -> None:
|
||||
"""Valid commands are executed."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
executed = False
|
||||
|
||||
async def test_command() -> None:
|
||||
nonlocal executed
|
||||
executed = True
|
||||
|
||||
handler.register_command("test", test_command)
|
||||
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/test",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
assert executed is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_unknown_command(self) -> None:
|
||||
"""Unknown commands send help message."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/unknown",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Should send error message
|
||||
assert mock_post.call_count == 1
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Unknown command" in payload["text"]
|
||||
assert "/unknown" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ignore_unauthorized_chat(self) -> None:
|
||||
"""Commands from unauthorized chats are ignored."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
executed = False
|
||||
|
||||
async def test_command() -> None:
|
||||
nonlocal executed
|
||||
executed = True
|
||||
|
||||
handler.register_command("test", test_command)
|
||||
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 999}, # Wrong chat_id
|
||||
"text": "/test",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
assert executed is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_ignore_non_command_text(self) -> None:
|
||||
"""Non-command text is ignored."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
executed = False
|
||||
|
||||
async def test_command() -> None:
|
||||
nonlocal executed
|
||||
executed = True
|
||||
|
||||
handler.register_command("test", test_command)
|
||||
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "Hello, not a command",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
assert executed is False
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_command_with_botname(self) -> None:
|
||||
"""Commands with @botname suffix are handled correctly."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
executed = False
|
||||
|
||||
async def test_command() -> None:
|
||||
nonlocal executed
|
||||
executed = True
|
||||
|
||||
handler.register_command("start", test_command)
|
||||
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/start@mybot",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
assert executed is True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_handle_update_error_isolation(self) -> None:
|
||||
"""Errors in handlers don't crash the system."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
async def failing_command() -> None:
|
||||
raise ValueError("Test error")
|
||||
|
||||
handler.register_command("fail", failing_command)
|
||||
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/fail",
|
||||
},
|
||||
}
|
||||
|
||||
# Should not raise exception
|
||||
await handler._handle_update(update)
|
||||
|
||||
|
||||
class TestTradingControlCommands:
|
||||
"""Test trading control commands."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_command_pauses_trading(self) -> None:
|
||||
"""Stop command clears pause event."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
# Create mock pause event
|
||||
import asyncio
|
||||
|
||||
pause_event = asyncio.Event()
|
||||
pause_event.set() # Initially active
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_stop() -> None:
|
||||
"""Mock /stop handler."""
|
||||
if not pause_event.is_set():
|
||||
await client.send_message("⏸️ Trading is already paused")
|
||||
return
|
||||
|
||||
pause_event.clear()
|
||||
await client.send_message(
|
||||
"<b>⏸️ Trading Paused</b>\n\n"
|
||||
"All trading operations have been suspended.\n"
|
||||
"Use /resume to restart trading."
|
||||
)
|
||||
|
||||
handler.register_command("stop", mock_stop)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/stop",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify pause event was cleared
|
||||
assert not pause_event.is_set()
|
||||
|
||||
# Verify message was sent
|
||||
assert mock_post.call_count == 1
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Trading Paused" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_resume_command_resumes_trading(self) -> None:
|
||||
"""Resume command sets pause event."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
# Create mock pause event (initially paused)
|
||||
import asyncio
|
||||
|
||||
pause_event = asyncio.Event()
|
||||
pause_event.clear() # Initially paused
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_resume() -> None:
|
||||
"""Mock /resume handler."""
|
||||
if pause_event.is_set():
|
||||
await client.send_message("▶️ Trading is already active")
|
||||
return
|
||||
|
||||
pause_event.set()
|
||||
await client.send_message(
|
||||
"<b>▶️ Trading Resumed</b>\n\n"
|
||||
"Trading operations have been restarted."
|
||||
)
|
||||
|
||||
handler.register_command("resume", mock_resume)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/resume",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify pause event was set
|
||||
assert pause_event.is_set()
|
||||
|
||||
# Verify message was sent
|
||||
assert mock_post.call_count == 1
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Trading Resumed" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stop_when_already_paused(self) -> None:
|
||||
"""Stop command when already paused sends appropriate message."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
# Create mock pause event (already paused)
|
||||
import asyncio
|
||||
|
||||
pause_event = asyncio.Event()
|
||||
pause_event.clear()
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_stop() -> None:
|
||||
"""Mock /stop handler."""
|
||||
if not pause_event.is_set():
|
||||
await client.send_message("⏸️ Trading is already paused")
|
||||
return
|
||||
|
||||
pause_event.clear()
|
||||
|
||||
handler.register_command("stop", mock_stop)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/stop",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify message was sent
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "already paused" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_resume_when_already_active(self) -> None:
|
||||
"""Resume command when already active sends appropriate message."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
# Create mock pause event (already active)
|
||||
import asyncio
|
||||
|
||||
pause_event = asyncio.Event()
|
||||
pause_event.set()
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_resume() -> None:
|
||||
"""Mock /resume handler."""
|
||||
if pause_event.is_set():
|
||||
await client.send_message("▶️ Trading is already active")
|
||||
return
|
||||
|
||||
pause_event.set()
|
||||
|
||||
handler.register_command("resume", mock_resume)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/resume",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify message was sent
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "already active" in payload["text"]
|
||||
|
||||
|
||||
class TestStatusCommands:
|
||||
"""Test status query commands."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_status_command_shows_trading_info(self) -> None:
|
||||
"""Status command displays mode, markets, and P&L."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_status() -> None:
|
||||
"""Mock /status handler."""
|
||||
message = (
|
||||
"<b>📊 Trading Status</b>\n\n"
|
||||
"<b>Mode:</b> PAPER\n"
|
||||
"<b>Markets:</b> Korea, United States\n"
|
||||
"<b>Trading:</b> Active\n\n"
|
||||
"<b>Current P&L:</b> +2.50%\n"
|
||||
"<b>Circuit Breaker:</b> -3.0%"
|
||||
)
|
||||
await client.send_message(message)
|
||||
|
||||
handler.register_command("status", mock_status)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/status",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify message was sent
|
||||
assert mock_post.call_count == 1
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Trading Status" in payload["text"]
|
||||
assert "PAPER" in payload["text"]
|
||||
assert "P&L" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_status_command_error_handling(self) -> None:
|
||||
"""Status command handles errors gracefully."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_status_error() -> None:
|
||||
"""Mock /status handler with error."""
|
||||
await client.send_message(
|
||||
"<b>⚠️ Error</b>\n\nFailed to retrieve trading status."
|
||||
)
|
||||
|
||||
handler.register_command("status", mock_status_error)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/status",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Should send error message
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Error" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_positions_command_shows_holdings(self) -> None:
|
||||
"""Positions command displays account summary."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_positions() -> None:
|
||||
"""Mock /positions handler."""
|
||||
message = (
|
||||
"<b>💼 Account Summary</b>\n\n"
|
||||
"<b>Total Evaluation:</b> ₩10,500,000\n"
|
||||
"<b>Available Cash:</b> ₩5,000,000\n"
|
||||
"<b>Purchase Total:</b> ₩10,000,000\n"
|
||||
"<b>P&L:</b> +5.00%\n\n"
|
||||
"<i>Note: Individual position details require API enhancement</i>"
|
||||
)
|
||||
await client.send_message(message)
|
||||
|
||||
handler.register_command("positions", mock_positions)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/positions",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify message was sent
|
||||
assert mock_post.call_count == 1
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Account Summary" in payload["text"]
|
||||
assert "Total Evaluation" in payload["text"]
|
||||
assert "P&L" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_positions_command_empty_holdings(self) -> None:
|
||||
"""Positions command handles empty portfolio."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_positions_empty() -> None:
|
||||
"""Mock /positions handler with no positions."""
|
||||
message = (
|
||||
"<b>💼 Account Summary</b>\n\n"
|
||||
"No balance information available."
|
||||
)
|
||||
await client.send_message(message)
|
||||
|
||||
handler.register_command("positions", mock_positions_empty)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/positions",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify message was sent
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "No balance information available" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_positions_command_error_handling(self) -> None:
|
||||
"""Positions command handles errors gracefully."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_positions_error() -> None:
|
||||
"""Mock /positions handler with error."""
|
||||
await client.send_message(
|
||||
"<b>⚠️ Error</b>\n\nFailed to retrieve positions."
|
||||
)
|
||||
|
||||
handler.register_command("positions", mock_positions_error)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/positions",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Should send error message
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Error" in payload["text"]
|
||||
|
||||
|
||||
class TestBasicCommands:
|
||||
"""Test basic command implementations."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_help_command_content(self) -> None:
|
||||
"""Help command lists all available commands."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_help() -> None:
|
||||
"""Mock /help handler."""
|
||||
message = (
|
||||
"<b>📖 Available Commands</b>\n\n"
|
||||
"/help - Show available commands\n"
|
||||
"/status - Trading status (mode, markets, P&L)\n"
|
||||
"/positions - Current holdings\n"
|
||||
"/report - Daily summary report\n"
|
||||
"/scenarios - Today's playbook scenarios\n"
|
||||
"/review - Recent scorecards\n"
|
||||
"/dashboard - Dashboard URL/status\n"
|
||||
"/stop - Pause trading\n"
|
||||
"/resume - Resume trading"
|
||||
)
|
||||
await client.send_message(message)
|
||||
|
||||
handler.register_command("help", mock_help)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
update = {
|
||||
"update_id": 1,
|
||||
"message": {
|
||||
"chat": {"id": 456},
|
||||
"text": "/help",
|
||||
},
|
||||
}
|
||||
|
||||
await handler._handle_update(update)
|
||||
|
||||
# Verify message was sent
|
||||
assert mock_post.call_count == 1
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Available Commands" in payload["text"]
|
||||
assert "/help" in payload["text"]
|
||||
assert "/status" in payload["text"]
|
||||
assert "/positions" in payload["text"]
|
||||
assert "/report" in payload["text"]
|
||||
assert "/scenarios" in payload["text"]
|
||||
assert "/review" in payload["text"]
|
||||
assert "/dashboard" in payload["text"]
|
||||
assert "/stop" in payload["text"]
|
||||
assert "/resume" in payload["text"]
|
||||
|
||||
|
||||
class TestExtendedCommands:
|
||||
"""Test additional bot commands."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_report_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_report() -> None:
|
||||
await client.send_message("<b>📈 Daily Report</b>\n\nTrades: 1")
|
||||
|
||||
handler.register_command("report", mock_report)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/report"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Daily Report" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scenarios_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_scenarios() -> None:
|
||||
await client.send_message("<b>🧠 Today's Scenarios</b>\n\n- AAPL: BUY (85)")
|
||||
|
||||
handler.register_command("scenarios", mock_scenarios)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/scenarios"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Today's Scenarios" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_review_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_review() -> None:
|
||||
await client.send_message("<b>📝 Recent Reviews</b>\n\n- 2026-02-14 KR")
|
||||
|
||||
handler.register_command("review", mock_review)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/review"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Recent Reviews" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dashboard_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_dashboard() -> None:
|
||||
await client.send_message("<b>🖥️ Dashboard</b>\n\nURL: http://127.0.0.1:8080")
|
||||
|
||||
handler.register_command("dashboard", mock_dashboard)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/dashboard"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Dashboard" in payload["text"]
|
||||
|
||||
|
||||
class TestGetUpdates:
|
||||
"""Test getUpdates API interaction."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_updates_success(self) -> None:
|
||||
"""getUpdates fetches and parses updates."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(
|
||||
return_value={
|
||||
"ok": True,
|
||||
"result": [
|
||||
{"update_id": 1, "message": {"text": "/test"}},
|
||||
{"update_id": 2, "message": {"text": "/help"}},
|
||||
],
|
||||
}
|
||||
)
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp):
|
||||
updates = await handler._get_updates()
|
||||
|
||||
assert len(updates) == 2
|
||||
assert updates[0]["update_id"] == 1
|
||||
assert updates[1]["update_id"] == 2
|
||||
assert handler._last_update_id == 2
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_updates_api_error(self) -> None:
|
||||
"""getUpdates handles API errors gracefully."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 400
|
||||
mock_resp.text = AsyncMock(return_value="Bad Request")
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp):
|
||||
updates = await handler._get_updates()
|
||||
|
||||
assert updates == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_updates_empty_result(self) -> None:
|
||||
"""getUpdates handles empty results."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"ok": True, "result": []})
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp):
|
||||
updates = await handler._get_updates()
|
||||
|
||||
assert updates == []
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import sqlite3
|
||||
from typing import Any
|
||||
from unittest.mock import AsyncMock
|
||||
@@ -411,7 +412,7 @@ class TestMarketScanner:
|
||||
scan_result = context_store.get_context(
|
||||
ContextLayer.L7_REALTIME,
|
||||
latest_timeframe,
|
||||
"KR_scan_result",
|
||||
"scan_result_KR",
|
||||
)
|
||||
assert scan_result is not None
|
||||
assert scan_result["total_scanned"] == 3
|
||||
@@ -531,3 +532,45 @@ class TestMarketScanner:
|
||||
new_additions = [code for code in updated if code not in current_watchlist]
|
||||
assert len(new_additions) <= 1
|
||||
assert len(updated) == len(current_watchlist)
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_market_respects_concurrency_limit(
|
||||
self,
|
||||
mock_broker: KISBroker,
|
||||
mock_overseas_broker: OverseasBroker,
|
||||
volatility_analyzer: VolatilityAnalyzer,
|
||||
context_store: ContextStore,
|
||||
) -> None:
|
||||
"""scan_market should limit concurrent scans to max_concurrent_scans."""
|
||||
max_concurrent = 2
|
||||
scanner = MarketScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=volatility_analyzer,
|
||||
context_store=context_store,
|
||||
top_n=5,
|
||||
max_concurrent_scans=max_concurrent,
|
||||
)
|
||||
|
||||
# Track peak concurrency
|
||||
active_count = 0
|
||||
peak_count = 0
|
||||
|
||||
original_scan = scanner.scan_stock
|
||||
|
||||
async def tracking_scan(code: str, market: Any) -> VolatilityMetrics:
|
||||
nonlocal active_count, peak_count
|
||||
active_count += 1
|
||||
peak_count = max(peak_count, active_count)
|
||||
await asyncio.sleep(0.05) # Simulate API call duration
|
||||
active_count -= 1
|
||||
return VolatilityMetrics(code, 50000, 500, 1.0, 1.0, 1.0, 1.0, 10.0, 50.0)
|
||||
|
||||
scanner.scan_stock = tracking_scan # type: ignore[method-assign]
|
||||
|
||||
market = MARKETS["KR"]
|
||||
stock_codes = ["001", "002", "003", "004", "005", "006"]
|
||||
|
||||
await scanner.scan_market(market, stock_codes)
|
||||
|
||||
assert peak_count <= max_concurrent
|
||||
|
||||
Reference in New Issue
Block a user