Compare commits
200 Commits
feature/is
...
d60fd8947b
| Author | SHA1 | Date | |
|---|---|---|---|
| d60fd8947b | |||
|
|
694d73b212 | ||
|
|
b2b02b6f57 | ||
| 2dbe98615d | |||
|
|
34cf081c96 | ||
|
|
7bc4e88335 | ||
| 386e039ff6 | |||
|
|
13ba9e8081 | ||
|
|
5b52f593a8 | ||
| 2798558bf3 | |||
|
|
2331d80915 | ||
|
|
7d72669cb8 | ||
| 74a4784b7a | |||
|
|
dc70311aed | ||
|
|
e56819e9e2 | ||
| cfd5351b58 | |||
|
|
b206c23fc9 | ||
|
|
4d9f3e2cfc | ||
| a93a5c616b | |||
|
|
9f64c9944a | ||
|
|
bb391d502c | ||
| b0100fde10 | |||
|
|
0a4e69d40c | ||
|
|
25401ac132 | ||
| 1381b140ab | |||
|
|
356d085ab0 | ||
| 54d6cc3d7c | |||
|
|
3ffad58d57 | ||
|
|
df6baee7f1 | ||
|
|
c31a6a569d | ||
| 990f9696ab | |||
|
|
9bf72c63ec | ||
|
|
1399fa4d09 | ||
| f63fb53289 | |||
|
|
5050a4cf84 | ||
|
|
4987b6393a | ||
| 8faf974522 | |||
|
|
d524159ad0 | ||
|
|
c7c740f446 | ||
|
|
1333c65455 | ||
| 9db7f903f8 | |||
|
|
4660310ee4 | ||
|
|
c383a411ff | ||
| 7b3ba27ef7 | |||
|
|
6ff887c047 | ||
| 219eef6388 | |||
|
|
9d7ca12275 | ||
|
|
ccb00ee77d | ||
| b1b728f62e | |||
|
|
df12be1305 | ||
| 6a6d3bd631 | |||
|
|
7aa5fedc12 | ||
|
|
3e777a5ab8 | ||
| 6f93258983 | |||
|
|
82167c5b8a | ||
| f87c4dc2f0 | |||
|
|
8af5f564c3 | ||
| 06e4fc5597 | |||
|
|
b697b6d515 | ||
| 42db5b3cc1 | |||
|
|
f252a84d65 | ||
| adc5211fd2 | |||
|
|
67e0e8df41 | ||
| ffdb99c6c7 | |||
|
|
ce5ea5abde | ||
| 5ae302b083 | |||
|
|
d31a61cd0b | ||
|
|
1c7a17320c | ||
| f58d42fdb0 | |||
|
|
0b20251de0 | ||
| bffe6e9288 | |||
|
|
0146d1bf8a | ||
| 497564e75c | |||
|
|
988a56c07c | ||
| c9f1345e3c | |||
|
|
8c492eae3a | ||
| 271c592a46 | |||
|
|
a063bd9d10 | ||
| 847456e0af | |||
|
|
a3a9fd1f24 | ||
|
|
f34117bc81 | ||
| 17e012cd04 | |||
|
|
a030dcc0dc | ||
|
|
d1698dee33 | ||
| 8a8ba3b0cb | |||
|
|
6b74e4cc77 | ||
| 1a1fe7e637 | |||
|
|
2e27000760 | ||
| 5a41f86112 | |||
|
|
ff9c4d6082 | ||
| 25ad4776c9 | |||
|
|
9339824e22 | ||
| e6eae6c6e0 | |||
| bb6bd0392e | |||
| a66181b7a7 | |||
| da585ee547 | |||
| c737d5009a | |||
|
|
f7d33e69d1 | ||
|
|
7d99d8ec4a | ||
|
|
0727f28f77 | ||
|
|
ac4fb00644 | ||
|
|
4fc4a57036 | ||
| 641f3e8811 | |||
|
|
ebd0a0297c | ||
| 02a72e0f7e | |||
| 478a659ac2 | |||
|
|
16b9b6832d | ||
|
|
48b87a79f6 | ||
|
|
ad79082dcc | ||
|
|
11dff9d3e5 | ||
|
|
3c5f1752e6 | ||
|
|
d6a389e0b7 | ||
| cd36d53a47 | |||
|
|
1242794fc4 | ||
| b45d136894 | |||
|
|
ce82121f04 | ||
| 0e2987e66d | |||
|
|
cdd5a218a7 | ||
|
|
f3491e94e4 | ||
|
|
342511a6ed | ||
| 2d5912dc08 | |||
|
|
40ea41cf3c | ||
| af5bfbac24 | |||
|
|
7e9a573390 | ||
| 7dbc48260c | |||
|
|
4b883a4fc4 | ||
|
|
98071a8ee3 | ||
|
|
f2ad270e8b | ||
| 04c73a1a06 | |||
|
|
4da22b10eb | ||
| c920b257b6 | |||
| 9927bfa13e | |||
|
|
aceba86186 | ||
|
|
b961c53a92 | ||
| 76a7ee7cdb | |||
|
|
77577f3f4d | ||
| 17112b864a | |||
|
|
28bcc7acd7 | ||
|
|
39b9f179f4 | ||
| bd2b3241b2 | |||
| 561faaaafa | |||
| a33d6a145f | |||
| 7e6c912214 | |||
|
|
d6edbc0fa2 | ||
|
|
c7640a30d7 | ||
|
|
60a22d6cd4 | ||
|
|
b1f48d859e | ||
| 03f8d220a4 | |||
|
|
305120f599 | ||
| faa23b3f1b | |||
|
|
5844ec5ad3 | ||
| ff5ff736d8 | |||
|
|
4a59d7e66d | ||
|
|
8dd625bfd1 | ||
| b50977aa76 | |||
|
|
fbcd016e1a | ||
| ce5773ba45 | |||
|
|
7834b89f10 | ||
| e0d6c9f81d | |||
|
|
2e550f8b58 | ||
| c76e2dfed5 | |||
|
|
24fa22e77b | ||
| cd1579058c | |||
| 45b48fa7cd | |||
|
|
3952a5337b | ||
|
|
ccc97ebaa9 | ||
|
|
3a54db8948 | ||
|
|
96e2ad4f1f | ||
| c5a8982122 | |||
|
|
f7289606fc | ||
| 0c5c90201f | |||
|
|
b484f0daff | ||
|
|
1288181e39 | ||
|
|
b625f41621 | ||
| 77d3ba967c | |||
|
|
aeed881d85 | ||
|
|
d0bbdb5dc1 | ||
| 44339c52d7 | |||
|
|
22ffdafacc | ||
|
|
c49765e951 | ||
| 64000b9967 | |||
|
|
733e6b36e9 | ||
|
|
0659cc0aca | ||
|
|
748b9b848e | ||
|
|
6a1ad230ee | ||
| 90bbc78867 | |||
|
|
1ef5dcb2b3 | ||
|
|
d105a3ff5e | ||
| 0424c78f6c | |||
|
|
3fdb7a29d4 | ||
| 31b4d0bf1e | |||
|
|
e2275a23b1 | ||
| 7522bb7e66 | |||
|
|
63fa6841a2 | ||
| ece3c5597b | |||
|
|
63f4e49d88 | ||
|
|
e0a6b307a2 | ||
| 75320eb587 | |||
|
|
afb31b7f4b | ||
| a429a9f4da |
64
.env.example
64
.env.example
@@ -1,36 +1,82 @@
|
||||
# ============================================================
|
||||
# The Ouroboros — Environment Configuration
|
||||
# ============================================================
|
||||
# Copy this file to .env and fill in your values.
|
||||
# Lines starting with # are comments.
|
||||
|
||||
# ============================================================
|
||||
# Korea Investment Securities API
|
||||
# ============================================================
|
||||
KIS_APP_KEY=your_app_key_here
|
||||
KIS_APP_SECRET=your_app_secret_here
|
||||
KIS_ACCOUNT_NO=12345678-01
|
||||
KIS_BASE_URL=https://openapivts.koreainvestment.com:9443
|
||||
|
||||
# Paper trading (VTS): https://openapivts.koreainvestment.com:29443
|
||||
# Live trading: https://openapi.koreainvestment.com:9443
|
||||
KIS_BASE_URL=https://openapivts.koreainvestment.com:29443
|
||||
|
||||
# ============================================================
|
||||
# Trading Mode
|
||||
# ============================================================
|
||||
# paper = 모의투자 (safe for testing), live = 실전투자 (real money)
|
||||
MODE=paper
|
||||
|
||||
# daily = batch per session, realtime = per-stock continuous scan
|
||||
TRADE_MODE=daily
|
||||
|
||||
# Comma-separated market codes: KR, US, JP, HK, CN, VN
|
||||
ENABLED_MARKETS=KR,US
|
||||
|
||||
# Simulated USD cash for paper (VTS) overseas trading.
|
||||
# VTS overseas balance API often returns 0; this value is used as fallback.
|
||||
# Set to 0 to disable fallback (not used in live mode).
|
||||
PAPER_OVERSEAS_CASH=50000.0
|
||||
|
||||
# ============================================================
|
||||
# Google Gemini
|
||||
# ============================================================
|
||||
GEMINI_API_KEY=your_gemini_api_key_here
|
||||
GEMINI_MODEL=gemini-pro
|
||||
# Recommended: gemini-2.0-flash-exp or gemini-1.5-pro
|
||||
GEMINI_MODEL=gemini-2.0-flash-exp
|
||||
|
||||
# ============================================================
|
||||
# Risk Management
|
||||
# ============================================================
|
||||
CIRCUIT_BREAKER_PCT=-3.0
|
||||
FAT_FINGER_PCT=30.0
|
||||
CONFIDENCE_THRESHOLD=80
|
||||
|
||||
# ============================================================
|
||||
# Database
|
||||
# ============================================================
|
||||
DB_PATH=data/trade_logs.db
|
||||
|
||||
# Rate Limiting (requests per second for KIS API)
|
||||
# Reduced to 5.0 to avoid "초당 거래건수 초과" errors (EGW00201)
|
||||
RATE_LIMIT_RPS=5.0
|
||||
# ============================================================
|
||||
# Rate Limiting
|
||||
# ============================================================
|
||||
# KIS API real limit is ~2 RPS. Keep at 2.0 for maximum safety.
|
||||
# Increasing this risks EGW00201 "초당 거래건수 초과" errors.
|
||||
RATE_LIMIT_RPS=2.0
|
||||
|
||||
# Trading Mode (paper / live)
|
||||
MODE=paper
|
||||
|
||||
# External Data APIs (optional — for enhanced decision-making)
|
||||
# ============================================================
|
||||
# External Data APIs (optional)
|
||||
# ============================================================
|
||||
# NEWS_API_KEY=your_news_api_key_here
|
||||
# NEWS_API_PROVIDER=alphavantage
|
||||
# MARKET_DATA_API_KEY=your_market_data_key_here
|
||||
|
||||
# ============================================================
|
||||
# Telegram Notifications (optional)
|
||||
# ============================================================
|
||||
# Get bot token from @BotFather on Telegram
|
||||
# Get chat ID from @userinfobot or your chat
|
||||
# TELEGRAM_BOT_TOKEN=1234567890:ABCdefGHIjklMNOpqrsTUVwxyz
|
||||
# TELEGRAM_CHAT_ID=123456789
|
||||
# TELEGRAM_ENABLED=true
|
||||
|
||||
# ============================================================
|
||||
# Dashboard (optional)
|
||||
# ============================================================
|
||||
# DASHBOARD_ENABLED=false
|
||||
# DASHBOARD_HOST=127.0.0.1
|
||||
# DASHBOARD_PORT=8080
|
||||
|
||||
25
CLAUDE.md
25
CLAUDE.md
@@ -15,6 +15,9 @@ pytest -v --cov=src
|
||||
|
||||
# Run (paper trading)
|
||||
python -m src.main --mode=paper
|
||||
|
||||
# Run with dashboard
|
||||
python -m src.main --mode=paper --dashboard
|
||||
```
|
||||
|
||||
## Telegram Notifications (Optional)
|
||||
@@ -43,6 +46,10 @@ Get real-time alerts for trades, circuit breakers, and system events via Telegra
|
||||
- ℹ️ Market open/close notifications
|
||||
- 📝 System startup/shutdown status
|
||||
|
||||
### Interactive Commands
|
||||
|
||||
With `TELEGRAM_COMMANDS_ENABLED=true` (default), the bot supports 9 bidirectional commands: `/help`, `/status`, `/positions`, `/report`, `/scenarios`, `/review`, `/dashboard`, `/stop`, `/resume`.
|
||||
|
||||
**Fail-safe**: Notifications never crash the trading system. Missing credentials or API errors are logged but trading continues normally.
|
||||
|
||||
## Smart Volatility Scanner (Optional)
|
||||
@@ -87,6 +94,7 @@ Smart Scanner runs in `TRADE_MODE=realtime` only. Daily mode uses static watchli
|
||||
- **[Testing](docs/testing.md)** — Test structure, coverage requirements, writing tests
|
||||
- **[Agent Policies](docs/agents.md)** — Prime directives, constraints, prohibited actions
|
||||
- **[Requirements Log](docs/requirements-log.md)** — User requirements and feedback tracking
|
||||
- **[Live Trading Checklist](docs/live-trading-checklist.md)** — 모의→실전 전환 체크리스트
|
||||
|
||||
## Core Principles
|
||||
|
||||
@@ -109,17 +117,23 @@ User requirements and feedback are tracked in [docs/requirements-log.md](docs/re
|
||||
```
|
||||
src/
|
||||
├── analysis/ # Technical analysis (RSI, volatility, smart scanner)
|
||||
├── backup/ # Disaster recovery (scheduler, cloud storage, health)
|
||||
├── brain/ # Gemini AI decision engine (prompt optimizer, context selector)
|
||||
├── broker/ # KIS API client (domestic + overseas)
|
||||
├── brain/ # Gemini AI decision engine
|
||||
├── context/ # L1-L7 hierarchical memory system
|
||||
├── core/ # Risk manager (READ-ONLY)
|
||||
├── evolution/ # Self-improvement optimizer
|
||||
├── dashboard/ # FastAPI read-only monitoring (8 API endpoints)
|
||||
├── data/ # External data integration (news, market data, calendar)
|
||||
├── evolution/ # Self-improvement (optimizer, daily review, scorecard)
|
||||
├── logging/ # Decision logger (audit trail)
|
||||
├── markets/ # Market schedules and timezone handling
|
||||
├── notifications/ # Telegram real-time alerts
|
||||
├── notifications/ # Telegram alerts + bidirectional commands (9 commands)
|
||||
├── strategy/ # Pre-market planner, scenario engine, playbook store
|
||||
├── db.py # SQLite trade logging
|
||||
├── main.py # Trading loop orchestrator
|
||||
└── config.py # Settings (from .env)
|
||||
|
||||
tests/ # 343 tests across 14 files
|
||||
tests/ # 551 tests across 25 files
|
||||
docs/ # Extended documentation
|
||||
```
|
||||
|
||||
@@ -131,6 +145,7 @@ ruff check src/ tests/ # Lint
|
||||
mypy src/ --strict # Type check
|
||||
|
||||
python -m src.main --mode=paper # Paper trading
|
||||
python -m src.main --mode=paper --dashboard # With dashboard
|
||||
python -m src.main --mode=live # Live trading (⚠️ real money)
|
||||
|
||||
# Gitea workflow (requires tea CLI)
|
||||
@@ -156,7 +171,7 @@ Markets auto-detected based on timezone and enabled in `ENABLED_MARKETS` env var
|
||||
- `src/core/risk_manager.py` is **READ-ONLY** — changes require human approval
|
||||
- Circuit breaker at -3.0% P&L — may only be made **stricter**
|
||||
- Fat-finger protection: max 30% of cash per order — always enforced
|
||||
- Confidence < 80 → force HOLD — cannot be weakened
|
||||
- Confidence 임계값 (market_outlook별, 낮출 수 없음): BEARISH ≥ 90, NEUTRAL/기본 ≥ 80, BULLISH ≥ 75
|
||||
- All code changes → corresponding tests → coverage ≥ 80%
|
||||
|
||||
## Contributing
|
||||
|
||||
160
README.md
160
README.md
@@ -10,28 +10,41 @@ KIS(한국투자증권) API로 매매하고, Google Gemini로 판단하며, 자
|
||||
│ (매매 실행) │ │ (거래 루프) │ │ (의사결정) │
|
||||
└─────────────┘ └──────┬──────┘ └─────────────┘
|
||||
│
|
||||
┌──────┴──────┐
|
||||
│Risk Manager │
|
||||
│ (안전장치) │
|
||||
└──────┬──────┘
|
||||
│
|
||||
┌──────┴──────┐
|
||||
│ Evolution │
|
||||
│ (전략 진화) │
|
||||
└─────────────┘
|
||||
┌────────────┼────────────┐
|
||||
│ │ │
|
||||
┌──────┴──────┐ ┌──┴───┐ ┌──────┴──────┐
|
||||
│Risk Manager │ │ DB │ │ Telegram │
|
||||
│ (안전장치) │ │ │ │ (알림+명령) │
|
||||
└──────┬──────┘ └──────┘ └─────────────┘
|
||||
│
|
||||
┌────────┼────────┐
|
||||
│ │ │
|
||||
┌────┴────┐┌──┴──┐┌────┴─────┐
|
||||
│Strategy ││Ctx ││Evolution │
|
||||
│(플레이북)││(메모리)││ (진화) │
|
||||
└─────────┘└─────┘└──────────┘
|
||||
```
|
||||
|
||||
**v2 핵심**: "Plan Once, Execute Locally" — 장 시작 전 AI가 시나리오 플레이북을 1회 생성하고, 거래 시간에는 로컬 시나리오 매칭만 수행하여 API 비용과 지연 시간을 대폭 절감.
|
||||
|
||||
## 핵심 모듈
|
||||
|
||||
| 모듈 | 파일 | 설명 |
|
||||
| 모듈 | 위치 | 설명 |
|
||||
|------|------|------|
|
||||
| 설정 | `src/config.py` | Pydantic 기반 환경변수 로딩 및 타입 검증 |
|
||||
| 브로커 | `src/broker/kis_api.py` | KIS API 비동기 래퍼 (토큰 갱신, 레이트 리미터, 해시키) |
|
||||
| 두뇌 | `src/brain/gemini_client.py` | Gemini 프롬프트 구성 및 JSON 응답 파싱 |
|
||||
| 방패 | `src/core/risk_manager.py` | 서킷 브레이커 + 팻 핑거 체크 |
|
||||
| 알림 | `src/notifications/telegram_client.py` | 텔레그램 실시간 거래 알림 (선택사항) |
|
||||
| 진화 | `src/evolution/optimizer.py` | 실패 패턴 분석 → 새 전략 생성 → 테스트 → PR |
|
||||
| DB | `src/db.py` | SQLite 거래 로그 기록 |
|
||||
| 설정 | `src/config.py` | Pydantic 기반 환경변수 로딩 및 타입 검증 (35+ 변수) |
|
||||
| 브로커 | `src/broker/` | KIS API 비동기 래퍼 (국내 + 해외 9개 시장) |
|
||||
| 두뇌 | `src/brain/` | Gemini 프롬프트 구성, JSON 파싱, 토큰 최적화 |
|
||||
| 방패 | `src/core/risk_manager.py` | 서킷 브레이커 + 팻 핑거 체크 (READ-ONLY) |
|
||||
| 전략 | `src/strategy/` | Pre-Market Planner, Scenario Engine, Playbook Store |
|
||||
| 컨텍스트 | `src/context/` | L1-L7 계층형 메모리 시스템 |
|
||||
| 분석 | `src/analysis/` | RSI, ATR, Smart Volatility Scanner |
|
||||
| 알림 | `src/notifications/` | 텔레그램 양방향 (알림 + 9개 명령어) |
|
||||
| 대시보드 | `src/dashboard/` | FastAPI 읽기 전용 모니터링 (8개 API) |
|
||||
| 진화 | `src/evolution/` | 전략 진화 + Daily Review + Scorecard |
|
||||
| 의사결정 로그 | `src/logging/` | 전체 거래 결정 감사 추적 |
|
||||
| 데이터 | `src/data/` | 뉴스, 시장 데이터, 경제 캘린더 연동 |
|
||||
| 백업 | `src/backup/` | 자동 백업, S3 클라우드, 무결성 검증 |
|
||||
| DB | `src/db.py` | SQLite 거래 로그 (5개 테이블) |
|
||||
|
||||
## 안전장치
|
||||
|
||||
@@ -42,6 +55,7 @@ KIS(한국투자증권) API로 매매하고, Google Gemini로 판단하며, 자
|
||||
| 신뢰도 임계값 | Gemini 신뢰도 80 미만이면 강제 HOLD |
|
||||
| 레이트 리미터 | Leaky Bucket 알고리즘으로 API 호출 제한 |
|
||||
| 토큰 자동 갱신 | 만료 1분 전 자동으로 Access Token 재발급 |
|
||||
| 손절 모니터링 | 플레이북 시나리오 기반 실시간 포지션 보호 |
|
||||
|
||||
## 빠른 시작
|
||||
|
||||
@@ -67,7 +81,11 @@ pytest -v --cov=src --cov-report=term-missing
|
||||
### 4. 실행 (모의투자)
|
||||
|
||||
```bash
|
||||
# 기본 실행
|
||||
python -m src.main --mode=paper
|
||||
|
||||
# 대시보드 활성화
|
||||
python -m src.main --mode=paper --dashboard
|
||||
```
|
||||
|
||||
### 5. Docker 실행
|
||||
@@ -76,7 +94,20 @@ python -m src.main --mode=paper
|
||||
docker compose up -d ouroboros
|
||||
```
|
||||
|
||||
## 텔레그램 알림 (선택사항)
|
||||
## 지원 시장
|
||||
|
||||
| 국가 | 거래소 | 코드 |
|
||||
|------|--------|------|
|
||||
| 🇰🇷 한국 | KRX | KR |
|
||||
| 🇺🇸 미국 | NASDAQ, NYSE, AMEX | US_NASDAQ, US_NYSE, US_AMEX |
|
||||
| 🇯🇵 일본 | TSE | JP |
|
||||
| 🇭🇰 홍콩 | SEHK | HK |
|
||||
| 🇨🇳 중국 | 상하이, 선전 | CN_SHA, CN_SZA |
|
||||
| 🇻🇳 베트남 | 하노이, 호치민 | VN_HNX, VN_HSX |
|
||||
|
||||
`ENABLED_MARKETS` 환경변수로 활성 시장 선택 (기본: `KR,US`).
|
||||
|
||||
## 텔레그램 (선택사항)
|
||||
|
||||
거래 실행, 서킷 브레이커 발동, 시스템 상태 등을 텔레그램으로 실시간 알림 받을 수 있습니다.
|
||||
|
||||
@@ -102,25 +133,51 @@ docker compose up -d ouroboros
|
||||
- ℹ️ 장 시작/종료 알림
|
||||
- 📝 시스템 시작/종료 상태
|
||||
|
||||
**안전장치**: 알림 실패해도 거래는 계속 진행됩니다. 텔레그램 API 오류나 설정 누락이 있어도 거래 시스템은 정상 작동합니다.
|
||||
### 양방향 명령어
|
||||
|
||||
`TELEGRAM_COMMANDS_ENABLED=true` (기본값) 설정 시 9개 대화형 명령어 지원:
|
||||
|
||||
| 명령어 | 설명 |
|
||||
|--------|------|
|
||||
| `/help` | 사용 가능한 명령어 목록 |
|
||||
| `/status` | 거래 상태 (모드, 시장, P&L) |
|
||||
| `/positions` | 계좌 요약 (잔고, 현금, P&L) |
|
||||
| `/report` | 일일 요약 (거래 수, P&L, 승률) |
|
||||
| `/scenarios` | 오늘의 플레이북 시나리오 |
|
||||
| `/review` | 최근 스코어카드 (L6_DAILY) |
|
||||
| `/dashboard` | 대시보드 URL 표시 |
|
||||
| `/stop` | 거래 일시 정지 |
|
||||
| `/resume` | 거래 재개 |
|
||||
|
||||
**안전장치**: 알림 실패해도 거래는 계속 진행됩니다.
|
||||
|
||||
## 테스트
|
||||
|
||||
35개 테스트가 TDD 방식으로 구현 전에 먼저 작성되었습니다.
|
||||
551개 테스트가 25개 파일에 걸쳐 구현되어 있습니다. 최소 커버리지 80%.
|
||||
|
||||
```
|
||||
tests/test_risk.py — 서킷 브레이커, 팻 핑거, 통합 검증 (11개)
|
||||
tests/test_broker.py — 토큰 관리, 타임아웃, HTTP 에러, 해시키 (6개)
|
||||
tests/test_brain.py — JSON 파싱, 신뢰도 임계값, 비정상 응답 처리 (15개)
|
||||
tests/test_scenario_engine.py — 시나리오 매칭 (44개)
|
||||
tests/test_data_integration.py — 외부 데이터 연동 (38개)
|
||||
tests/test_pre_market_planner.py — 플레이북 생성 (37개)
|
||||
tests/test_main.py — 거래 루프 통합 (37개)
|
||||
tests/test_token_efficiency.py — 토큰 최적화 (34개)
|
||||
tests/test_strategy_models.py — 전략 모델 검증 (33개)
|
||||
tests/test_telegram_commands.py — 텔레그램 명령어 (31개)
|
||||
tests/test_latency_control.py — 지연시간 제어 (30개)
|
||||
tests/test_telegram.py — 텔레그램 알림 (25개)
|
||||
... 외 16개 파일
|
||||
```
|
||||
|
||||
**상세**: [docs/testing.md](docs/testing.md)
|
||||
|
||||
## 기술 스택
|
||||
|
||||
- **언어**: Python 3.11+ (asyncio 기반)
|
||||
- **브로커**: KIS Open API (REST)
|
||||
- **브로커**: KIS Open API (REST, 국내+해외)
|
||||
- **AI**: Google Gemini Pro
|
||||
- **DB**: SQLite
|
||||
- **검증**: pytest + coverage
|
||||
- **DB**: SQLite (5개 테이블: trades, contexts, decision_logs, playbooks, context_metadata)
|
||||
- **대시보드**: FastAPI + uvicorn
|
||||
- **검증**: pytest + coverage (551 tests)
|
||||
- **CI/CD**: GitHub Actions
|
||||
- **배포**: Docker + Docker Compose
|
||||
|
||||
@@ -128,27 +185,50 @@ tests/test_brain.py — JSON 파싱, 신뢰도 임계값, 비정상 응답 처
|
||||
|
||||
```
|
||||
The-Ouroboros/
|
||||
├── .github/workflows/ci.yml # CI 파이프라인
|
||||
├── docs/
|
||||
│ ├── agents.md # AI 에이전트 페르소나 정의
|
||||
│ └── skills.md # 사용 가능한 도구 목록
|
||||
│ ├── architecture.md # 시스템 아키텍처
|
||||
│ ├── testing.md # 테스트 가이드
|
||||
│ ├── commands.md # 명령어 레퍼런스
|
||||
│ ├── context-tree.md # L1-L7 메모리 시스템
|
||||
│ ├── workflow.md # Git 워크플로우
|
||||
│ ├── agents.md # 에이전트 정책
|
||||
│ ├── skills.md # 도구 목록
|
||||
│ ├── disaster_recovery.md # 백업/복구
|
||||
│ └── requirements-log.md # 요구사항 기록
|
||||
├── src/
|
||||
│ ├── config.py # Pydantic 설정
|
||||
│ ├── logging_config.py # JSON 구조화 로깅
|
||||
│ ├── db.py # SQLite 거래 기록
|
||||
│ ├── main.py # 비동기 거래 루프
|
||||
│ ├── broker/kis_api.py # KIS API 클라이언트
|
||||
│ ├── brain/gemini_client.py # Gemini 의사결정 엔진
|
||||
│ ├── core/risk_manager.py # 리스크 관리
|
||||
│ ├── notifications/telegram_client.py # 텔레그램 알림
|
||||
│ ├── evolution/optimizer.py # 전략 진화 엔진
|
||||
│ └── strategies/base.py # 전략 베이스 클래스
|
||||
├── tests/ # TDD 테스트 스위트
|
||||
│ ├── analysis/ # 기술적 분석 (RSI, ATR, Smart Scanner)
|
||||
│ ├── backup/ # 백업 (스케줄러, S3, 무결성 검증)
|
||||
│ ├── brain/ # Gemini 의사결정 (프롬프트 최적화, 컨텍스트 선택)
|
||||
│ ├── broker/ # KIS API (국내 + 해외)
|
||||
│ ├── context/ # L1-L7 계층 메모리
|
||||
│ ├── core/ # 리스크 관리 (READ-ONLY)
|
||||
│ ├── dashboard/ # FastAPI 모니터링 대시보드
|
||||
│ ├── data/ # 외부 데이터 연동
|
||||
│ ├── evolution/ # 전략 진화 + Daily Review
|
||||
│ ├── logging/ # 의사결정 감사 추적
|
||||
│ ├── markets/ # 시장 스케줄 + 타임존
|
||||
│ ├── notifications/ # 텔레그램 알림 + 명령어
|
||||
│ ├── strategy/ # 플레이북 (Planner, Scenario Engine)
|
||||
│ ├── config.py # Pydantic 설정
|
||||
│ ├── db.py # SQLite 데이터베이스
|
||||
│ └── main.py # 비동기 거래 루프
|
||||
├── tests/ # 551개 테스트 (25개 파일)
|
||||
├── Dockerfile # 멀티스테이지 빌드
|
||||
├── docker-compose.yml # 서비스 오케스트레이션
|
||||
└── pyproject.toml # 의존성 및 도구 설정
|
||||
```
|
||||
|
||||
## 문서
|
||||
|
||||
- **[아키텍처](docs/architecture.md)** — 시스템 설계, 컴포넌트, 데이터 흐름
|
||||
- **[테스트](docs/testing.md)** — 테스트 구조, 커버리지, 작성 가이드
|
||||
- **[명령어](docs/commands.md)** — CLI, Dashboard, Telegram 명령어
|
||||
- **[컨텍스트 트리](docs/context-tree.md)** — L1-L7 계층 메모리
|
||||
- **[워크플로우](docs/workflow.md)** — Git 워크플로우 정책
|
||||
- **[에이전트 정책](docs/agents.md)** — 안전 제약, 금지 행위
|
||||
- **[백업/복구](docs/disaster_recovery.md)** — 재해 복구 절차
|
||||
- **[요구사항](docs/requirements-log.md)** — 사용자 요구사항 추적
|
||||
|
||||
## 라이선스
|
||||
|
||||
이 프로젝트의 라이선스는 [LICENSE](LICENSE) 파일을 참조하세요.
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
## Overview
|
||||
|
||||
Self-evolving AI trading agent for global stock markets via KIS (Korea Investment & Securities) API. The main loop in `src/main.py` orchestrates four components across multiple markets with two trading modes: daily (batch API calls) or realtime (per-stock decisions).
|
||||
Self-evolving AI trading agent for global stock markets via KIS (Korea Investment & Securities) API. The main loop in `src/main.py` orchestrates components across multiple markets with two trading modes: daily (batch API calls) or realtime (per-stock decisions).
|
||||
|
||||
**v2 Proactive Playbook Architecture**: The system uses a "plan once, execute locally" approach. Pre-market, the AI generates a playbook of scenarios (one Gemini API call per market per day). During trading hours, a local scenario engine matches live market data against these pre-computed scenarios — no additional AI calls needed. This dramatically reduces API costs and latency.
|
||||
|
||||
## Trading Modes
|
||||
|
||||
@@ -46,9 +48,11 @@ High-frequency trading with individual stock analysis:
|
||||
**KISBroker** (`kis_api.py`) — Async KIS API client for domestic Korean market
|
||||
|
||||
- Automatic OAuth token refresh (valid for 24 hours)
|
||||
- Leaky-bucket rate limiter (10 requests per second)
|
||||
- Leaky-bucket rate limiter (configurable RPS, default 2.0)
|
||||
- POST body hash-key signing for order authentication
|
||||
- Custom SSL context with disabled hostname verification for VTS (virtual trading) endpoint due to known certificate mismatch
|
||||
- `fetch_market_rankings()` — Fetch volume surge rankings from KIS API
|
||||
- `get_daily_prices()` — Fetch OHLCV history for technical analysis
|
||||
|
||||
**OverseasBroker** (`overseas.py`) — KIS overseas stock API wrapper
|
||||
|
||||
@@ -63,10 +67,11 @@ High-frequency trading with individual stock analysis:
|
||||
- `is_market_open()` checks weekends, trading hours, lunch breaks
|
||||
- `get_open_markets()` returns currently active markets
|
||||
- `get_next_market_open()` finds next market to open and when
|
||||
- 10 global markets defined (KR, US_NASDAQ, US_NYSE, US_AMEX, JP, HK, CN_SHA, CN_SZA, VN_HNX, VN_HSX)
|
||||
|
||||
**New API Methods** (added in v0.9.0):
|
||||
- `fetch_market_rankings()` — Fetch volume surge rankings from KIS API
|
||||
- `get_daily_prices()` — Fetch OHLCV history for technical analysis
|
||||
**Overseas Ranking API Methods** (added in v0.10.x):
|
||||
- `fetch_overseas_rankings()` — Fetch overseas ranking universe (fluctuation / volume)
|
||||
- Ranking endpoint paths and TR_IDs are configurable via environment variables
|
||||
|
||||
### 2. Analysis (`src/analysis/`)
|
||||
|
||||
@@ -81,24 +86,28 @@ High-frequency trading with individual stock analysis:
|
||||
|
||||
**SmartVolatilityScanner** (`smart_scanner.py`) — Python-first filtering pipeline
|
||||
|
||||
- **Step 1**: Fetch volume rankings from KIS API (top 30 stocks)
|
||||
- **Step 2**: Calculate RSI and volume ratio for each stock
|
||||
- **Step 3**: Apply filters:
|
||||
- Volume ratio >= `VOL_MULTIPLIER` (default 2.0x previous day)
|
||||
- RSI < `RSI_OVERSOLD_THRESHOLD` (30) OR RSI > `RSI_MOMENTUM_THRESHOLD` (70)
|
||||
- **Step 4**: Score candidates by RSI extremity (60%) + volume surge (40%)
|
||||
- **Step 5**: Return top N candidates (default 3) for AI analysis
|
||||
- **Fallback**: Uses static watchlist if ranking API unavailable
|
||||
- **Domestic (KR)**:
|
||||
- **Step 1**: Fetch domestic fluctuation ranking as primary universe
|
||||
- **Step 2**: Fetch domestic volume ranking for liquidity bonus
|
||||
- **Step 3**: Compute volatility-first score (max of daily change% and intraday range%)
|
||||
- **Step 4**: Apply liquidity bonus and return top N candidates
|
||||
- **Overseas (US/JP/HK/CN/VN)**:
|
||||
- **Step 1**: Fetch overseas ranking universe (fluctuation rank + volume rank bonus)
|
||||
- **Step 2**: Compute volatility-first score (max of daily change% and intraday range%)
|
||||
- **Step 3**: Apply liquidity bonus from volume ranking
|
||||
- **Step 4**: Return top N candidates (default 3)
|
||||
- **Fallback (overseas only)**: If ranking API is unavailable, uses dynamic universe
|
||||
from runtime active symbols + recent traded symbols + current holdings (no static watchlist)
|
||||
- **Realtime mode only**: Daily mode uses batch processing for API efficiency
|
||||
|
||||
**Benefits:**
|
||||
- Reduces Gemini API calls from 20-30 stocks to 1-3 qualified candidates
|
||||
- Fast Python-based filtering before expensive AI judgment
|
||||
- Logs selection context (RSI, volume_ratio, signal, score) for Evolution system
|
||||
- Logs selection context (RSI-compatible proxy, volume_ratio, signal, score) for Evolution system
|
||||
|
||||
### 3. Brain (`src/brain/gemini_client.py`)
|
||||
### 3. Brain (`src/brain/`)
|
||||
|
||||
**GeminiClient** — AI decision engine powered by Google Gemini
|
||||
**GeminiClient** (`gemini_client.py`) — AI decision engine powered by Google Gemini
|
||||
|
||||
- Constructs structured prompts from market data
|
||||
- Parses JSON responses into `TradeDecision` objects (`action`, `confidence`, `rationale`)
|
||||
@@ -106,11 +115,20 @@ High-frequency trading with individual stock analysis:
|
||||
- Falls back to safe HOLD on any parse/API error
|
||||
- Handles markdown-wrapped JSON, malformed responses, invalid actions
|
||||
|
||||
**PromptOptimizer** (`prompt_optimizer.py`) — Token efficiency optimization
|
||||
|
||||
- Reduces prompt size while preserving decision quality
|
||||
- Caches optimized prompts
|
||||
|
||||
**ContextSelector** (`context_selector.py`) — Relevant context selection for prompts
|
||||
|
||||
- Selects appropriate context layers for current market conditions
|
||||
|
||||
### 4. Risk Manager (`src/core/risk_manager.py`)
|
||||
|
||||
**RiskManager** — Safety circuit breaker and order validation
|
||||
|
||||
⚠️ **READ-ONLY by policy** (see [`docs/agents.md`](./agents.md))
|
||||
> **READ-ONLY by policy** (see [`docs/agents.md`](./agents.md))
|
||||
|
||||
- **Circuit Breaker**: Halts all trading via `SystemExit` when daily P&L drops below -3.0%
|
||||
- Threshold may only be made stricter, never relaxed
|
||||
@@ -118,7 +136,79 @@ High-frequency trading with individual stock analysis:
|
||||
- **Fat-Finger Protection**: Rejects orders exceeding 30% of available cash
|
||||
- Must always be enforced, cannot be disabled
|
||||
|
||||
### 5. Notifications (`src/notifications/telegram_client.py`)
|
||||
### 5. Strategy (`src/strategy/`)
|
||||
|
||||
**Pre-Market Planner** (`pre_market_planner.py`) — AI playbook generation
|
||||
|
||||
- Runs before market open (configurable `PRE_MARKET_MINUTES`, default 30)
|
||||
- Generates scenario-based playbooks via single Gemini API call per market
|
||||
- Handles timeout (`PLANNER_TIMEOUT_SECONDS`, default 60) with defensive playbook fallback
|
||||
- Persists playbooks to database for audit trail
|
||||
|
||||
**Scenario Engine** (`scenario_engine.py`) — Local scenario matching
|
||||
|
||||
- Matches live market data against pre-computed playbook scenarios
|
||||
- No AI calls during trading hours — pure Python matching logic
|
||||
- Returns matched scenarios with confidence scores
|
||||
- Configurable `MAX_SCENARIOS_PER_STOCK` (default 5)
|
||||
- Periodic rescan at `RESCAN_INTERVAL_SECONDS` (default 300)
|
||||
|
||||
**Playbook Store** (`playbook_store.py`) — Playbook persistence
|
||||
|
||||
- SQLite-backed storage for daily playbooks
|
||||
- Date and market-based retrieval
|
||||
- Status tracking (generated, active, expired)
|
||||
|
||||
**Models** (`models.py`) — Pydantic data models
|
||||
|
||||
- Scenario, Playbook, MatchResult, and related type definitions
|
||||
|
||||
### 6. Context System (`src/context/`)
|
||||
|
||||
**Context Store** (`store.py`) — L1-L7 hierarchical memory
|
||||
|
||||
- 7-layer context system (see [docs/context-tree.md](./context-tree.md)):
|
||||
- L1: Tick-level (real-time price)
|
||||
- L2: Intraday (session summary)
|
||||
- L3: Daily (end-of-day)
|
||||
- L4: Weekly (trend analysis)
|
||||
- L5: Monthly (strategy review)
|
||||
- L6: Daily Review (scorecard)
|
||||
- L7: Evolution (long-term learning)
|
||||
- Key-value storage with timeframe tagging
|
||||
- SQLite persistence in `contexts` table
|
||||
|
||||
**Context Scheduler** (`scheduler.py`) — Periodic aggregation
|
||||
|
||||
- Scheduled summarization from lower to higher layers
|
||||
- Configurable aggregation intervals
|
||||
|
||||
**Context Summarizer** (`summarizer.py`) — Layer summarization
|
||||
|
||||
- Aggregates lower-layer data into higher-layer summaries
|
||||
|
||||
### 7. Dashboard (`src/dashboard/`)
|
||||
|
||||
**FastAPI App** (`app.py`) — Read-only monitoring dashboard
|
||||
|
||||
- Runs as daemon thread when enabled (`--dashboard` CLI flag or `DASHBOARD_ENABLED=true`)
|
||||
- Configurable host/port (`DASHBOARD_HOST`, `DASHBOARD_PORT`, default `127.0.0.1:8080`)
|
||||
- Serves static HTML frontend
|
||||
|
||||
**8 API Endpoints:**
|
||||
|
||||
| Endpoint | Method | Description |
|
||||
|----------|--------|-------------|
|
||||
| `/` | GET | Static HTML dashboard |
|
||||
| `/api/status` | GET | Daily trading status by market |
|
||||
| `/api/playbook/{date}` | GET | Playbook for specific date and market |
|
||||
| `/api/scorecard/{date}` | GET | Daily scorecard from L6_DAILY context |
|
||||
| `/api/performance` | GET | Trading performance metrics (by market + combined) |
|
||||
| `/api/context/{layer}` | GET | Query context by layer (L1-L7) |
|
||||
| `/api/decisions` | GET | Decision log entries with outcomes |
|
||||
| `/api/scenarios/active` | GET | Today's matched scenarios |
|
||||
|
||||
### 8. Notifications (`src/notifications/telegram_client.py`)
|
||||
|
||||
**TelegramClient** — Real-time event notifications via Telegram Bot API
|
||||
|
||||
@@ -126,7 +216,13 @@ High-frequency trading with individual stock analysis:
|
||||
- Non-blocking: failures are logged but never crash trading
|
||||
- Rate-limited: 1 message/second default to respect Telegram API limits
|
||||
- Auto-disabled when credentials missing
|
||||
- Gracefully handles API errors, network timeouts, invalid tokens
|
||||
|
||||
**TelegramCommandHandler** — Bidirectional command interface
|
||||
|
||||
- Long polling from Telegram API (configurable `TELEGRAM_POLLING_INTERVAL`)
|
||||
- 9 interactive commands: `/help`, `/status`, `/positions`, `/report`, `/scenarios`, `/review`, `/dashboard`, `/stop`, `/resume`
|
||||
- Authorization filtering by `TELEGRAM_CHAT_ID`
|
||||
- Enable/disable via `TELEGRAM_COMMANDS_ENABLED` (default: true)
|
||||
|
||||
**Notification Types:**
|
||||
- Trade execution (BUY/SELL with confidence)
|
||||
@@ -134,12 +230,12 @@ High-frequency trading with individual stock analysis:
|
||||
- Fat-finger protection triggers (order rejection)
|
||||
- Market open/close events
|
||||
- System startup/shutdown status
|
||||
- Playbook generation results
|
||||
- Stop-loss monitoring alerts
|
||||
|
||||
**Setup:** See [src/notifications/README.md](../src/notifications/README.md) for bot creation and configuration.
|
||||
### 9. Evolution (`src/evolution/`)
|
||||
|
||||
### 6. Evolution (`src/evolution/optimizer.py`)
|
||||
|
||||
**StrategyOptimizer** — Self-improvement loop
|
||||
**StrategyOptimizer** (`optimizer.py`) — Self-improvement loop
|
||||
|
||||
- Analyzes high-confidence losing trades from SQLite
|
||||
- Asks Gemini to generate new `BaseStrategy` subclasses
|
||||
@@ -147,99 +243,198 @@ High-frequency trading with individual stock analysis:
|
||||
- Simulates PR creation for human review
|
||||
- Only activates strategies that pass all tests
|
||||
|
||||
**DailyReview** (`daily_review.py`) — End-of-day review
|
||||
|
||||
- Generates comprehensive trade performance summary
|
||||
- Stores results in L6_DAILY context layer
|
||||
- Tracks win rate, P&L, confidence accuracy
|
||||
|
||||
**DailyScorecard** (`scorecard.py`) — Performance scoring
|
||||
|
||||
- Calculates daily metrics (trades, P&L, win rate, avg confidence)
|
||||
- Enables trend tracking across days
|
||||
|
||||
**Stop-Loss Monitoring** — Real-time position protection
|
||||
|
||||
- Monitors positions against stop-loss levels from playbook scenarios
|
||||
- Sends Telegram alerts when thresholds approached or breached
|
||||
|
||||
### 10. Decision Logger (`src/logging/decision_logger.py`)
|
||||
|
||||
**DecisionLogger** — Comprehensive audit trail
|
||||
|
||||
- Logs every trading decision with full context snapshot
|
||||
- Captures input data, rationale, confidence, and outcomes
|
||||
- Supports outcome tracking (P&L, accuracy) for post-analysis
|
||||
- Stored in `decision_logs` table with indexed queries
|
||||
- Review workflow support (reviewed flag, review notes)
|
||||
|
||||
### 11. Data Integration (`src/data/`)
|
||||
|
||||
**External Data Sources** (optional):
|
||||
|
||||
- `news_api.py` — News sentiment data
|
||||
- `market_data.py` — Extended market data
|
||||
- `economic_calendar.py` — Economic event calendar
|
||||
|
||||
### 12. Backup (`src/backup/`)
|
||||
|
||||
**Disaster Recovery** (see [docs/disaster_recovery.md](./disaster_recovery.md)):
|
||||
|
||||
- `scheduler.py` — Automated backup scheduling
|
||||
- `exporter.py` — Data export to various formats
|
||||
- `cloud_storage.py` — S3-compatible cloud backup
|
||||
- `health_monitor.py` — Backup integrity verification
|
||||
|
||||
## Data Flow
|
||||
|
||||
### Playbook Mode (Daily — Primary v2 Flow)
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Pre-Market Phase (before market open) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Pre-Market Planner │
|
||||
│ - 1 Gemini API call per market │
|
||||
│ - Generate scenario playbook │
|
||||
│ - Store in playbooks table │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Trading Hours (market open → close) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Market Schedule Check │
|
||||
│ - Get open markets │
|
||||
│ - Filter by enabled markets │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Scenario Engine (local) │
|
||||
│ - Match live data vs playbook │
|
||||
│ - No AI calls needed │
|
||||
│ - Return matched scenarios │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Risk Manager: Validate Order │
|
||||
│ - Check circuit breaker │
|
||||
│ - Check fat-finger limit │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Broker: Execute Order │
|
||||
│ - Domestic: send_order() │
|
||||
│ - Overseas: send_overseas_order()│
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Decision Logger + DB │
|
||||
│ - Full audit trail │
|
||||
│ - Context snapshot │
|
||||
│ - Telegram notification │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Post-Market Phase │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Daily Review + Scorecard │
|
||||
│ - Performance summary │
|
||||
│ - Store in L6_DAILY context │
|
||||
│ - Evolution learning │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
### Realtime Mode (with Smart Scanner)
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────────────────────────┐
|
||||
│ Main Loop (60s cycle per market) │
|
||||
│ Main Loop (60s cycle per market) │
|
||||
└─────────────────────────────────────────────────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Market Schedule Check │
|
||||
│ - Get open markets │
|
||||
│ - Filter by enabled markets │
|
||||
│ - Wait if all closed │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Market Schedule Check │
|
||||
│ - Get open markets │
|
||||
│ - Filter by enabled markets │
|
||||
│ - Wait if all closed │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Smart Scanner (Python-first) │
|
||||
│ - Fetch volume rankings (KIS) │
|
||||
│ - Get 20d price history per stock│
|
||||
│ - Calculate RSI(14) + vol ratio │
|
||||
│ - Filter: vol>2x AND RSI extreme │
|
||||
│ - Domestic: fluctuation rank │
|
||||
│ + volume rank bonus │
|
||||
│ + volatility-first scoring │
|
||||
│ - Overseas: ranking universe │
|
||||
│ + volatility-first scoring │
|
||||
│ - Fallback: dynamic universe │
|
||||
│ - Return top 3 qualified stocks │
|
||||
└──────────────────┬────────────────┘
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ For Each Qualified Candidate │
|
||||
└──────────────────┬────────────────┘
|
||||
│ For Each Qualified Candidate │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Broker: Fetch Market Data │
|
||||
│ - Domestic: orderbook + balance │
|
||||
│ - Overseas: price + balance │
|
||||
└──────────────────┬────────────────┘
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Calculate P&L │
|
||||
│ pnl_pct = (eval - cost) / cost │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Brain: Get Decision (AI) │
|
||||
│ - Build prompt with market data │
|
||||
│ - Call Gemini API │
|
||||
│ - Parse JSON response │
|
||||
│ - Return TradeDecision │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Brain: Get Decision (AI) │
|
||||
│ - Build prompt with market data │
|
||||
│ - Call Gemini API │
|
||||
│ - Parse JSON response │
|
||||
│ - Return TradeDecision │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Risk Manager: Validate Order │
|
||||
│ - Check circuit breaker │
|
||||
│ - Check fat-finger limit │
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Risk Manager: Validate Order │
|
||||
│ - Check circuit breaker │
|
||||
│ - Check fat-finger limit │
|
||||
│ - Raise if validation fails │
|
||||
└──────────────────┬────────────────┘
|
||||
│ Broker: Execute Order │
|
||||
│ - Domestic: send_order() │
|
||||
│ - Overseas: send_overseas_order()│
|
||||
└──────────────────┬───────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Broker: Execute Order │
|
||||
│ - Domestic: send_order() │
|
||||
│ - Overseas: send_overseas_order() │
|
||||
└──────────────────┬────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Notifications: Send Alert │
|
||||
│ - Trade execution notification │
|
||||
│ - Non-blocking (errors logged) │
|
||||
│ - Rate-limited to 1/sec │
|
||||
└──────────────────┬────────────────┘
|
||||
│
|
||||
▼
|
||||
┌──────────────────────────────────┐
|
||||
│ Database: Log Trade │
|
||||
│ - SQLite (data/trades.db) │
|
||||
│ - Track: action, confidence, │
|
||||
│ rationale, market, exchange │
|
||||
│ - NEW: selection_context (JSON) │
|
||||
│ - RSI, volume_ratio, signal │
|
||||
│ - For Evolution optimization │
|
||||
└───────────────────────────────────┘
|
||||
│ Decision Logger + Notifications │
|
||||
│ - Log trade to SQLite │
|
||||
│ - selection_context (JSON) │
|
||||
│ - Telegram notification │
|
||||
└──────────────────────────────────┘
|
||||
```
|
||||
|
||||
## Database Schema
|
||||
|
||||
**SQLite** (`src/db.py`)
|
||||
**SQLite** (`src/db.py`) — Database: `data/trades.db`
|
||||
|
||||
### trades
|
||||
```sql
|
||||
CREATE TABLE trades (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
@@ -251,25 +446,73 @@ CREATE TABLE trades (
|
||||
quantity INTEGER,
|
||||
price REAL,
|
||||
pnl REAL DEFAULT 0.0,
|
||||
market TEXT DEFAULT 'KR', -- KR | US_NASDAQ | JP | etc.
|
||||
exchange_code TEXT DEFAULT 'KRX', -- KRX | NASD | NYSE | etc.
|
||||
selection_context TEXT -- JSON: {rsi, volume_ratio, signal, score}
|
||||
market TEXT DEFAULT 'KR',
|
||||
exchange_code TEXT DEFAULT 'KRX',
|
||||
selection_context TEXT, -- JSON: {rsi, volume_ratio, signal, score}
|
||||
decision_id TEXT -- Links to decision_logs
|
||||
);
|
||||
```
|
||||
|
||||
**Selection Context** (new in v0.9.0): Stores scanner selection criteria as JSON:
|
||||
```json
|
||||
{
|
||||
"rsi": 28.5,
|
||||
"volume_ratio": 2.7,
|
||||
"signal": "oversold",
|
||||
"score": 85.2
|
||||
}
|
||||
### contexts
|
||||
```sql
|
||||
CREATE TABLE contexts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
layer TEXT NOT NULL, -- L1 through L7
|
||||
timeframe TEXT,
|
||||
key TEXT NOT NULL,
|
||||
value TEXT NOT NULL, -- JSON data
|
||||
created_at TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
);
|
||||
-- Indices: idx_contexts_layer, idx_contexts_timeframe, idx_contexts_updated
|
||||
```
|
||||
|
||||
Enables Evolution system to analyze correlation between selection criteria and trade outcomes.
|
||||
### decision_logs
|
||||
```sql
|
||||
CREATE TABLE decision_logs (
|
||||
decision_id TEXT PRIMARY KEY,
|
||||
timestamp TEXT NOT NULL,
|
||||
stock_code TEXT,
|
||||
market TEXT,
|
||||
exchange_code TEXT,
|
||||
action TEXT,
|
||||
confidence INTEGER,
|
||||
rationale TEXT,
|
||||
context_snapshot TEXT, -- JSON: full context at decision time
|
||||
input_data TEXT, -- JSON: market data used
|
||||
outcome_pnl REAL,
|
||||
outcome_accuracy REAL,
|
||||
reviewed INTEGER DEFAULT 0,
|
||||
review_notes TEXT
|
||||
);
|
||||
-- Indices: idx_decision_logs_timestamp, idx_decision_logs_reviewed, idx_decision_logs_confidence
|
||||
```
|
||||
|
||||
Auto-migration: Adds `market`, `exchange_code`, and `selection_context` columns if missing for backward compatibility.
|
||||
### playbooks
|
||||
```sql
|
||||
CREATE TABLE playbooks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
date TEXT NOT NULL,
|
||||
market TEXT NOT NULL,
|
||||
status TEXT DEFAULT 'generated',
|
||||
playbook_json TEXT NOT NULL, -- Full playbook with scenarios
|
||||
generated_at TEXT NOT NULL,
|
||||
token_count INTEGER,
|
||||
scenario_count INTEGER,
|
||||
match_count INTEGER DEFAULT 0
|
||||
);
|
||||
-- Indices: idx_playbooks_date, idx_playbooks_market
|
||||
```
|
||||
|
||||
### context_metadata
|
||||
```sql
|
||||
CREATE TABLE context_metadata (
|
||||
layer TEXT PRIMARY KEY,
|
||||
description TEXT,
|
||||
retention_days INTEGER,
|
||||
aggregation_source TEXT
|
||||
);
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
@@ -284,29 +527,81 @@ KIS_APP_SECRET=your_app_secret
|
||||
KIS_ACCOUNT_NO=XXXXXXXX-XX
|
||||
GEMINI_API_KEY=your_gemini_key
|
||||
|
||||
# Optional
|
||||
# Optional — Trading Mode
|
||||
MODE=paper # paper | live
|
||||
DB_PATH=data/trades.db
|
||||
CONFIDENCE_THRESHOLD=80
|
||||
MAX_LOSS_PCT=3.0
|
||||
MAX_ORDER_PCT=30.0
|
||||
ENABLED_MARKETS=KR,US_NASDAQ # Comma-separated market codes
|
||||
|
||||
# Trading Mode (API efficiency)
|
||||
TRADE_MODE=daily # daily | realtime
|
||||
DAILY_SESSIONS=4 # Sessions per day (daily mode only)
|
||||
SESSION_INTERVAL_HOURS=6 # Hours between sessions (daily mode only)
|
||||
|
||||
# Telegram Notifications (optional)
|
||||
# Optional — Database
|
||||
DB_PATH=data/trades.db
|
||||
|
||||
# Optional — Risk
|
||||
CONFIDENCE_THRESHOLD=80
|
||||
MAX_LOSS_PCT=3.0
|
||||
MAX_ORDER_PCT=30.0
|
||||
|
||||
# Optional — Markets
|
||||
ENABLED_MARKETS=KR,US # Comma-separated market codes
|
||||
RATE_LIMIT_RPS=2.0 # KIS API requests per second
|
||||
|
||||
# Optional — Pre-Market Planner (v2)
|
||||
PRE_MARKET_MINUTES=30 # Minutes before market open to generate playbook
|
||||
MAX_SCENARIOS_PER_STOCK=5 # Max scenarios per stock in playbook
|
||||
PLANNER_TIMEOUT_SECONDS=60 # Timeout for playbook generation
|
||||
DEFENSIVE_PLAYBOOK_ON_FAILURE=true # Fallback on AI failure
|
||||
RESCAN_INTERVAL_SECONDS=300 # Scenario rescan interval during trading
|
||||
|
||||
# Optional — Smart Scanner (realtime mode only)
|
||||
RSI_OVERSOLD_THRESHOLD=30 # 0-50, oversold threshold
|
||||
RSI_MOMENTUM_THRESHOLD=70 # 50-100, momentum threshold
|
||||
VOL_MULTIPLIER=2.0 # Minimum volume ratio (2.0 = 200%)
|
||||
SCANNER_TOP_N=3 # Max qualified candidates per scan
|
||||
|
||||
# Optional — Dashboard
|
||||
DASHBOARD_ENABLED=false # Enable FastAPI dashboard
|
||||
DASHBOARD_HOST=127.0.0.1 # Dashboard bind address
|
||||
DASHBOARD_PORT=8080 # Dashboard port (1-65535)
|
||||
|
||||
# Optional — Telegram
|
||||
TELEGRAM_BOT_TOKEN=1234567890:ABCdefGHIjklMNOpqrsTUVwxyz
|
||||
TELEGRAM_CHAT_ID=123456789
|
||||
TELEGRAM_ENABLED=true
|
||||
TELEGRAM_COMMANDS_ENABLED=true # Enable bidirectional commands
|
||||
TELEGRAM_POLLING_INTERVAL=1.0 # Command polling interval (seconds)
|
||||
|
||||
# Smart Scanner (optional, realtime mode only)
|
||||
RSI_OVERSOLD_THRESHOLD=30 # 0-50, oversold threshold
|
||||
RSI_MOMENTUM_THRESHOLD=70 # 50-100, momentum threshold
|
||||
VOL_MULTIPLIER=2.0 # Minimum volume ratio (2.0 = 200%)
|
||||
SCANNER_TOP_N=3 # Max qualified candidates per scan
|
||||
# Optional — Backup
|
||||
BACKUP_ENABLED=false
|
||||
BACKUP_DIR=data/backups
|
||||
S3_ENDPOINT_URL=...
|
||||
S3_ACCESS_KEY=...
|
||||
S3_SECRET_KEY=...
|
||||
S3_BUCKET_NAME=...
|
||||
S3_REGION=...
|
||||
|
||||
# Optional — External Data
|
||||
NEWS_API_KEY=...
|
||||
NEWS_API_PROVIDER=...
|
||||
MARKET_DATA_API_KEY=...
|
||||
|
||||
# Position Sizing (optional)
|
||||
POSITION_SIZING_ENABLED=true
|
||||
POSITION_BASE_ALLOCATION_PCT=5.0
|
||||
POSITION_MIN_ALLOCATION_PCT=1.0
|
||||
POSITION_MAX_ALLOCATION_PCT=10.0
|
||||
POSITION_VOLATILITY_TARGET_SCORE=50.0
|
||||
|
||||
# Legacy/compat scanner thresholds (kept for backward compatibility)
|
||||
RSI_OVERSOLD_THRESHOLD=30
|
||||
RSI_MOMENTUM_THRESHOLD=70
|
||||
VOL_MULTIPLIER=2.0
|
||||
|
||||
# Overseas Ranking API (optional override; account-dependent)
|
||||
OVERSEAS_RANKING_ENABLED=true
|
||||
OVERSEAS_RANKING_FLUCT_TR_ID=HHDFS76200100
|
||||
OVERSEAS_RANKING_VOLUME_TR_ID=HHDFS76200200
|
||||
OVERSEAS_RANKING_FLUCT_PATH=/uapi/overseas-price/v1/quotations/inquire-updown-rank
|
||||
OVERSEAS_RANKING_VOLUME_PATH=/uapi/overseas-price/v1/quotations/inquire-volume-rank
|
||||
```
|
||||
|
||||
Tests use in-memory SQLite (`DB_PATH=":memory:"`) and dummy credentials via `tests/conftest.py`.
|
||||
@@ -340,4 +635,9 @@ Tests use in-memory SQLite (`DB_PATH=":memory:"`) and dummy credentials via `tes
|
||||
- Invalid token → log error, trading unaffected
|
||||
- Rate limit exceeded → queued via rate limiter
|
||||
|
||||
**Guarantee**: Notification failures never interrupt trading operations.
|
||||
### Playbook Generation Failure
|
||||
- Timeout → fall back to defensive playbook (`DEFENSIVE_PLAYBOOK_ON_FAILURE`)
|
||||
- API error → use previous day's playbook if available
|
||||
- No playbook → skip pre-market phase, fall back to direct AI calls
|
||||
|
||||
**Guarantee**: Notification and dashboard failures never interrupt trading operations.
|
||||
|
||||
@@ -119,7 +119,7 @@ No decorator needed for async tests.
|
||||
# Install all dependencies (production + dev)
|
||||
pip install -e ".[dev]"
|
||||
|
||||
# Run full test suite with coverage
|
||||
# Run full test suite with coverage (551 tests across 25 files)
|
||||
pytest -v --cov=src --cov-report=term-missing
|
||||
|
||||
# Run a single test file
|
||||
@@ -137,11 +137,82 @@ mypy src/ --strict
|
||||
# Run the trading agent
|
||||
python -m src.main --mode=paper
|
||||
|
||||
# Run with dashboard enabled
|
||||
python -m src.main --mode=paper --dashboard
|
||||
|
||||
# Docker
|
||||
docker compose up -d ouroboros # Run agent
|
||||
docker compose --profile test up test # Run tests in container
|
||||
```
|
||||
|
||||
## Dashboard
|
||||
|
||||
The FastAPI dashboard provides read-only monitoring of the trading system.
|
||||
|
||||
### Starting the Dashboard
|
||||
|
||||
```bash
|
||||
# Via CLI flag
|
||||
python -m src.main --mode=paper --dashboard
|
||||
|
||||
# Via environment variable
|
||||
DASHBOARD_ENABLED=true python -m src.main --mode=paper
|
||||
```
|
||||
|
||||
Dashboard runs as a daemon thread on `DASHBOARD_HOST:DASHBOARD_PORT` (default: `127.0.0.1:8080`).
|
||||
|
||||
### API Endpoints
|
||||
|
||||
| Endpoint | Description |
|
||||
|----------|-------------|
|
||||
| `GET /` | HTML dashboard UI |
|
||||
| `GET /api/status` | Daily trading status by market |
|
||||
| `GET /api/playbook/{date}` | Playbook for specific date (query: `market`) |
|
||||
| `GET /api/scorecard/{date}` | Daily scorecard from L6_DAILY context |
|
||||
| `GET /api/performance` | Performance metrics by market and combined |
|
||||
| `GET /api/context/{layer}` | Context data by layer L1-L7 (query: `timeframe`) |
|
||||
| `GET /api/decisions` | Decision log entries (query: `limit`, `market`) |
|
||||
| `GET /api/scenarios/active` | Today's matched scenarios |
|
||||
|
||||
## Telegram Commands
|
||||
|
||||
When `TELEGRAM_COMMANDS_ENABLED=true` (default), the bot accepts these interactive commands:
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/help` | List available commands |
|
||||
| `/status` | Show trading status (mode, markets, P&L) |
|
||||
| `/positions` | Display account summary (balance, cash, P&L) |
|
||||
| `/report` | Daily summary metrics (trades, P&L, win rate) |
|
||||
| `/scenarios` | Show today's playbook scenarios |
|
||||
| `/review` | Display recent scorecards (L6_DAILY layer) |
|
||||
| `/dashboard` | Show dashboard URL if enabled |
|
||||
| `/stop` | Pause trading |
|
||||
| `/resume` | Resume trading |
|
||||
|
||||
Commands are only processed from the authorized `TELEGRAM_CHAT_ID`.
|
||||
|
||||
## KIS API TR_ID 참조 문서
|
||||
|
||||
**TR_ID를 추가하거나 수정할 때 반드시 공식 문서를 먼저 확인할 것.**
|
||||
|
||||
공식 문서: `docs/한국투자증권_오픈API_전체문서_20260221_030000.xlsx`
|
||||
|
||||
> ⚠️ 커뮤니티 블로그, GitHub 예제 등 비공식 자료의 TR_ID는 오래되거나 틀릴 수 있음.
|
||||
> 실제로 `VTTT1006U`(미국 매도 — 잘못됨)가 오랫동안 코드에 남아있던 사례가 있음 (Issue #189).
|
||||
|
||||
### 주요 TR_ID 목록
|
||||
|
||||
| 구분 | 모의투자 TR_ID | 실전투자 TR_ID | 시트명 |
|
||||
|------|---------------|---------------|--------|
|
||||
| 해외주식 매수 (미국) | `VTTT1002U` | `TTTT1002U` | 해외주식 주문 |
|
||||
| 해외주식 매도 (미국) | `VTTT1001U` | `TTTT1006U` | 해외주식 주문 |
|
||||
|
||||
새로운 TR_ID가 필요할 때:
|
||||
1. 위 xlsx 파일에서 해당 거래 유형의 시트를 찾는다.
|
||||
2. 모의투자(`VTTT`) / 실전투자(`TTTT`) 컬럼을 구분하여 정확한 값을 사용한다.
|
||||
3. 코드에 출처 주석을 남긴다: `# Source: 한국투자증권_오픈API_전체문서 — '<시트명>' 시트`
|
||||
|
||||
## Environment Setup
|
||||
|
||||
```bash
|
||||
|
||||
131
docs/live-trading-checklist.md
Normal file
131
docs/live-trading-checklist.md
Normal file
@@ -0,0 +1,131 @@
|
||||
# 실전 전환 체크리스트
|
||||
|
||||
모의 거래(paper)에서 실전(live)으로 전환하기 전에 아래 항목을 **순서대로** 모두 확인하세요.
|
||||
|
||||
---
|
||||
|
||||
## 1. 사전 조건
|
||||
|
||||
### 1-1. KIS OpenAPI 실전 계좌 준비
|
||||
- [ ] 한국투자증권 계좌 개설 완료 (일반 위탁 계좌)
|
||||
- [ ] OpenAPI 실전 사용 신청 (KIS 홈페이지 → Open API → 서비스 신청)
|
||||
- [ ] 실전용 APP_KEY / APP_SECRET 발급 완료
|
||||
- [ ] KIS_ACCOUNT_NO 형식 확인: `XXXXXXXX-XX` (8자리-2자리)
|
||||
|
||||
### 1-2. 리스크 파라미터 검토
|
||||
- [ ] `CIRCUIT_BREAKER_PCT` 확인: 기본값 -3.0% (더 엄격하게 조정 권장)
|
||||
- [ ] `FAT_FINGER_PCT` 확인: 기본값 30.0% (1회 주문 최대 잔고 대비 %)
|
||||
- [ ] `CONFIDENCE_THRESHOLD` 확인: BEARISH ≥ 90, NEUTRAL ≥ 80, BULLISH ≥ 75
|
||||
- [ ] 초기 투자금 결정 및 해외 주식 운용 한도 설정
|
||||
|
||||
### 1-3. 시스템 요건
|
||||
- [ ] 커버리지 80% 이상 유지 확인: `pytest --cov=src`
|
||||
- [ ] 타입 체크 통과: `mypy src/ --strict`
|
||||
- [ ] Lint 통과: `ruff check src/ tests/`
|
||||
|
||||
---
|
||||
|
||||
## 2. 환경 설정
|
||||
|
||||
### 2-1. `.env` 파일 수정
|
||||
|
||||
```bash
|
||||
# 1. KIS 실전 URL로 변경 (모의: openapivts 포트 29443)
|
||||
KIS_BASE_URL=https://openapi.koreainvestment.com:9443
|
||||
|
||||
# 2. 실전 APP_KEY / APP_SECRET으로 교체
|
||||
KIS_APP_KEY=<실전_APP_KEY>
|
||||
KIS_APP_SECRET=<실전_APP_SECRET>
|
||||
KIS_ACCOUNT_NO=<실전_계좌번호>
|
||||
|
||||
# 3. 모드를 live로 변경
|
||||
MODE=live
|
||||
|
||||
# 4. PAPER_OVERSEAS_CASH 비활성화 (live 모드에선 무시되지만 명시적으로 0 설정)
|
||||
PAPER_OVERSEAS_CASH=0
|
||||
```
|
||||
|
||||
> ⚠️ `KIS_BASE_URL` 포트 주의:
|
||||
> - **모의(VTS)**: `https://openapivts.koreainvestment.com:29443`
|
||||
> - **실전**: `https://openapi.koreainvestment.com:9443`
|
||||
|
||||
### 2-2. TR_ID 자동 분기 확인
|
||||
|
||||
아래 TR_ID는 `MODE` 값에 따라 코드에서 **자동으로 선택**됩니다.
|
||||
별도 설정 불필요하나, 문제 발생 시 아래 표를 참조하세요.
|
||||
|
||||
| 구분 | 모의 TR_ID | 실전 TR_ID |
|
||||
|------|-----------|-----------|
|
||||
| 국내 잔고 조회 | `VTTC8434R` | `TTTC8434R` |
|
||||
| 국내 현금 매수 | `VTTC0012U` | `TTTC0012U` |
|
||||
| 국내 현금 매도 | `VTTC0011U` | `TTTC0011U` |
|
||||
| 해외 잔고 조회 | `VTTS3012R` | `TTTS3012R` |
|
||||
| 해외 매수 | `VTTT1002U` | `TTTT1002U` |
|
||||
| 해외 매도 | `VTTT1001U` | `TTTT1006U` |
|
||||
|
||||
> **출처**: `docs/한국투자증권_오픈API_전체문서_20260221_030000.xlsx` (공식 문서 기준)
|
||||
|
||||
---
|
||||
|
||||
## 3. 최종 확인
|
||||
|
||||
### 3-1. 실전 시작 전 점검
|
||||
- [ ] DB 백업 완료: `data/trade_logs.db` → `data/backups/`
|
||||
- [ ] Telegram 알림 설정 확인 (실전에서는 알림이 더욱 중요)
|
||||
- [ ] 소액으로 첫 거래 진행 후 TR_ID/계좌 정상 동작 확인
|
||||
|
||||
### 3-2. 실행 명령
|
||||
|
||||
```bash
|
||||
# 실전 모드로 실행
|
||||
python -m src.main --mode=live
|
||||
|
||||
# 대시보드 함께 실행 (별도 터미널에서 모니터링)
|
||||
python -m src.main --mode=live --dashboard
|
||||
```
|
||||
|
||||
### 3-3. 실전 시작 직후 확인 사항
|
||||
- [ ] 로그에 `MODE=live` 출력 확인
|
||||
- [ ] 첫 잔고 조회 성공 (ConnectionError 없음)
|
||||
- [ ] Telegram 알림 수신 확인 ("System started")
|
||||
- [ ] 첫 주문 후 KIS 앱에서 체결 내역 확인
|
||||
|
||||
---
|
||||
|
||||
## 4. 비상 정지 방법
|
||||
|
||||
### 즉각 정지
|
||||
```bash
|
||||
# 터미널에서 Ctrl+C (정상 종료 트리거)
|
||||
# 또는 Telegram 봇 명령:
|
||||
/stop
|
||||
```
|
||||
|
||||
### Circuit Breaker 발동 시
|
||||
- CB가 발동되면 자동으로 거래 중단 및 Telegram 알림 전송
|
||||
- CB 임계값: `CIRCUIT_BREAKER_PCT` (기본 -3.0%)
|
||||
- **임계값은 엄격하게만 조정 가능** (더 낮은 음수 값으로만 변경)
|
||||
|
||||
---
|
||||
|
||||
## 5. 롤백 절차
|
||||
|
||||
실전 전환 후 문제 발생 시:
|
||||
|
||||
```bash
|
||||
# 1. 즉시 .env에서 MODE=paper로 복원
|
||||
# 2. 재시작
|
||||
python -m src.main --mode=paper
|
||||
|
||||
# 3. DB에서 최근 거래 확인
|
||||
sqlite3 data/trade_logs.db "SELECT * FROM trades ORDER BY id DESC LIMIT 20;"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## 관련 문서
|
||||
|
||||
- [시스템 아키텍처](architecture.md)
|
||||
- [워크플로우 가이드](workflow.md)
|
||||
- [재해 복구](disaster_recovery.md)
|
||||
- [Agent 제약 조건](agents.md)
|
||||
56
docs/ouroboros/00_validation_system.md
Normal file
56
docs/ouroboros/00_validation_system.md
Normal file
@@ -0,0 +1,56 @@
|
||||
<!--
|
||||
Doc-ID: DOC-VAL-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# 문서 검증 시스템
|
||||
|
||||
본 문서는 문서 간 허위 내용, 수치 충돌, 구현 불가능 지시를 사전에 제거하기 위한 검증 규칙이다.
|
||||
|
||||
## 검증 목표
|
||||
|
||||
- 단일 진실원장 기준으로 모든 지시서의 수치/규칙 정합성 보장
|
||||
- 설계 문장과 코드 작업 지시 간 추적성 보장
|
||||
- 테스트 미정의 상태에서 구현 착수 금지
|
||||
|
||||
## 불일치 유형 정의
|
||||
|
||||
- `RULE-DOC-001`: 정의되지 않은 요구사항 ID 사용
|
||||
- `RULE-DOC-002`: 동일 요구사항 ID에 상충되는 값(예: 슬리피지 수치) 기술
|
||||
- `RULE-DOC-003`: 시간대 미표기 또는 KST/UTC 혼용 지시
|
||||
- `RULE-DOC-004`: 주문 정책과 리스크 정책 충돌(예: 저유동 세션 시장가 허용)
|
||||
- `RULE-DOC-005`: 구현 태스크에 테스트 ID 미연결
|
||||
- `RULE-DOC-006`: 문서 라우팅 링크 깨짐
|
||||
|
||||
## 검증 파이프라인
|
||||
|
||||
1. 정적 검사 (자동)
|
||||
- 대상: `docs/ouroboros/*.md`
|
||||
- 검사: 메타데이터, 링크 유효성, ID 정의/참조 일치, REQ-추적성 매핑
|
||||
- 도구: `scripts/validate_ouroboros_docs.py`
|
||||
|
||||
2. 추적성 검사 (자동 + 수동)
|
||||
- 자동: `REQ-*`가 최소 1개 `TASK-*`와 1개 `TEST-*`에 연결되었는지 확인
|
||||
- 수동: 정책 충돌 후보를 PR 체크리스트로 검토
|
||||
|
||||
3. 도메인 무결성 검사 (수동)
|
||||
- KIS 점검시간 회피, 주문 유형 강제, Kill Switch 순서, 환율 정책이 동시에 존재하는지 점검
|
||||
- 백테스트 체결가가 보수 가정인지 점검
|
||||
|
||||
## 변경 통제 규칙
|
||||
|
||||
- `REQ-*` 추가/수정 시 반드시 `01_requirements_registry.md` 먼저 변경
|
||||
- `TASK-*` 수정 시 반드시 `40_acceptance_and_test_plan.md`의 대응 테스트를 동시 수정
|
||||
- 충돌 발생 시 우선순위: `requirements_registry > phase execution > code work order`
|
||||
|
||||
적용 룰셋:
|
||||
- `RULE-DOC-001` `RULE-DOC-002` `RULE-DOC-003` `RULE-DOC-004` `RULE-DOC-005` `RULE-DOC-006`
|
||||
|
||||
## PR 게이트
|
||||
|
||||
- `python3 scripts/validate_ouroboros_docs.py` 성공
|
||||
- 신규/변경 `REQ-*`가 테스트 기준(`TEST-*`)과 연결됨
|
||||
- 원본 계획(v2/v3)과 모순 없음
|
||||
39
docs/ouroboros/01_requirements_registry.md
Normal file
39
docs/ouroboros/01_requirements_registry.md
Normal file
@@ -0,0 +1,39 @@
|
||||
<!--
|
||||
Doc-ID: DOC-REQ-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# 요구사항 원장 (Single Source of Truth)
|
||||
|
||||
이 문서의 ID가 계획/구현/테스트 전 문서에서 참조되는 유일한 요구사항 집합이다.
|
||||
|
||||
## v2 핵심 요구사항
|
||||
|
||||
- `REQ-V2-001`: 상태는 `HOLDING`, `BE_LOCK`, `ARMED`, `EXITED` 4단계여야 한다.
|
||||
- `REQ-V2-002`: 상태 전이는 매 틱/바 평가 시 최상위 상태로 즉시 승격되어야 한다.
|
||||
- `REQ-V2-003`: `EXITED` 조건은 모든 상태보다 우선 평가되어야 한다.
|
||||
- `REQ-V2-004`: 청산 로직은 Hard Stop, BE Lock, ATR Trailing, 모델 확률 보조 트리거를 포함해야 한다.
|
||||
- `REQ-V2-005`: 라벨링은 Triple Barrier(Upper/Lower/Time) 방식이어야 한다.
|
||||
- `REQ-V2-006`: 검증은 Walk-forward + Purge/Embargo를 강제한다.
|
||||
- `REQ-V2-007`: 백테스트는 비용/슬리피지/체결실패를 반영하지 않으면 채택 불가다.
|
||||
- `REQ-V2-008`: Kill Switch는 신규주문차단 -> 미체결취소 -> 재조회 -> 리스크축소 -> 스냅샷 순서다.
|
||||
|
||||
## v3 핵심 요구사항
|
||||
|
||||
- `REQ-V3-001`: 모든 신호/주문/로그는 `session_id`를 포함해야 한다.
|
||||
- `REQ-V3-002`: 세션 전환 시 리스크 파라미터 재로딩이 수행되어야 한다.
|
||||
- `REQ-V3-003`: 브로커 블랙아웃 시간대에는 신규 주문이 금지되어야 한다.
|
||||
- `REQ-V3-004`: 블랙아웃 중 신호는 Queue에 적재되고, 복구 후 유효성 재검증을 거친다.
|
||||
- `REQ-V3-005`: 저유동 세션(`NXT_AFTER`, `US_PRE`, `US_DAY`, `US_AFTER`)은 시장가 주문 금지다.
|
||||
- `REQ-V3-006`: 백테스트 체결가는 불리한 방향 체결 가정을 기본으로 한다.
|
||||
- `REQ-V3-007`: US 운용은 환율 손익 분리 추적과 통화 버퍼 정책을 포함해야 한다.
|
||||
- `REQ-V3-008`: 마감/오버나잇 규칙은 Kill Switch와 충돌 없이 연동되어야 한다.
|
||||
|
||||
## 공통 운영 요구사항
|
||||
|
||||
- `REQ-OPS-001`: 타임존은 모든 시간 필드에 명시(KST/UTC)되어야 한다.
|
||||
- `REQ-OPS-002`: 문서의 수치 정책은 원장에서만 변경한다.
|
||||
- `REQ-OPS-003`: 구현 태스크는 반드시 테스트 태스크를 동반한다.
|
||||
63
docs/ouroboros/10_phase_v2_execution.md
Normal file
63
docs/ouroboros/10_phase_v2_execution.md
Normal file
@@ -0,0 +1,63 @@
|
||||
<!--
|
||||
Doc-ID: DOC-PHASE-V2-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# v2 실행 지시서 (설계 -> 코드)
|
||||
|
||||
참조 요구사항: `REQ-V2-001` `REQ-V2-002` `REQ-V2-003` `REQ-V2-004` `REQ-V2-005` `REQ-V2-006` `REQ-V2-007` `REQ-V2-008` `REQ-OPS-001` `REQ-OPS-002` `REQ-OPS-003`
|
||||
|
||||
## 단계 1: 도메인 모델 확정
|
||||
|
||||
- `TASK-V2-001`: 상태머신 enum/전이 이벤트/전이 사유 스키마 설계
|
||||
- `TASK-V2-002`: `position_state` 스냅샷 구조(현재상태, peak, stops, last_reason) 정의
|
||||
- `TASK-V2-003`: 청산 판단 입력 DTO(가격, ATR, pred_prob, liquidity_signal) 정의
|
||||
|
||||
완료 기준:
|
||||
- 상태와 전이 사유가 로그/DB에서 재현 가능
|
||||
- `REQ-V2-001`~`003`을 코드 타입 수준에서 강제
|
||||
|
||||
## 단계 2: 청산 엔진 구현
|
||||
|
||||
- `TASK-V2-004`: 우선순위 기반 전이 함수 구현(`evaluate_exit_first` -> `promote_state`)
|
||||
- `TASK-V2-005`: Hard Stop/BE Lock/ATR Trailing 결합 로직 구현
|
||||
- `TASK-V2-006`: 모델 확률 신호를 보조 트리거로 결합(단독 청산 금지)
|
||||
|
||||
완료 기준:
|
||||
- 갭 상황에서 다중 조건 동시 충족 시 최상위 상태로 단번 전이
|
||||
- `REQ-V2-004` 준수
|
||||
|
||||
## 단계 3: 라벨링/학습 데이터 파이프라인
|
||||
|
||||
- `TASK-V2-007`: Triple Barrier 라벨러 구현(장벽 선터치 우선)
|
||||
- `TASK-V2-008`: 피처 구간/라벨 구간 분리 검증 유틸 구현
|
||||
- `TASK-V2-009`: 라벨 생성 로그(진입시각, 터치장벽, 만기장벽) 기록
|
||||
|
||||
완료 기준:
|
||||
- look-ahead 차단 증빙 로그 확보
|
||||
- `REQ-V2-005` 충족
|
||||
|
||||
## 단계 4: 검증 프레임워크
|
||||
|
||||
- `TASK-V2-010`: Walk-forward split + Purge/Embargo 분할기 구현
|
||||
- `TASK-V2-011`: 베이스라인(`B0`,`B1`,`M1`) 비교 리포트 포맷 구현
|
||||
- `TASK-V2-012`: 체결 비용/슬리피지/실패 반영 백테스트 옵션 강제
|
||||
|
||||
완료 기준:
|
||||
- `REQ-V2-006`, `REQ-V2-007` 충족
|
||||
|
||||
## 단계 5: Kill Switch 통합
|
||||
|
||||
- `TASK-V2-013`: Kill Switch 순차 실행 오케스트레이터 구현 (`src/core/risk_manager.py` 수정 금지)
|
||||
- `TASK-V2-014`: 주문 차단 플래그/미체결 취소/재조회 재시도 로직 구현
|
||||
- `TASK-V2-015`: 스냅샷/알림/복구 진입 절차 구현
|
||||
|
||||
완료 기준:
|
||||
- `REQ-V2-008` 순서 일치
|
||||
|
||||
라우팅:
|
||||
- 코드 지시 상세: [30_code_level_work_orders.md](./30_code_level_work_orders.md)
|
||||
- 테스트 상세: [40_acceptance_and_test_plan.md](./40_acceptance_and_test_plan.md)
|
||||
60
docs/ouroboros/20_phase_v3_execution.md
Normal file
60
docs/ouroboros/20_phase_v3_execution.md
Normal file
@@ -0,0 +1,60 @@
|
||||
<!--
|
||||
Doc-ID: DOC-PHASE-V3-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# v3 실행 지시서 (세션 확장)
|
||||
|
||||
참조 요구사항: `REQ-V3-001` `REQ-V3-002` `REQ-V3-003` `REQ-V3-004` `REQ-V3-005` `REQ-V3-006` `REQ-V3-007` `REQ-V3-008` `REQ-OPS-001` `REQ-OPS-002` `REQ-OPS-003`
|
||||
|
||||
## 단계 1: 세션 엔진
|
||||
|
||||
- `TASK-V3-001`: `session_id` 분류기 구현(KR/US 확장 세션)
|
||||
- `TASK-V3-002`: 세션 전환 훅에서 리스크 파라미터 재로딩 구현
|
||||
- `TASK-V3-003`: 로그/DB 스키마에 `session_id` 필드 강제
|
||||
|
||||
완료 기준:
|
||||
- `REQ-V3-001`, `REQ-V3-002` 충족
|
||||
|
||||
## 단계 2: 블랙아웃/복구 제어
|
||||
|
||||
- `TASK-V3-004`: 블랙아웃 윈도우 정책 로더 구현(설정 기반)
|
||||
- `TASK-V3-005`: 블랙아웃 중 신규 주문 차단 + 의도 큐 적재 구현
|
||||
- `TASK-V3-006`: 복구 시 동기화(잔고/미체결/체결) 후 큐 재검증 실행
|
||||
|
||||
완료 기준:
|
||||
- `REQ-V3-003`, `REQ-V3-004` 충족
|
||||
|
||||
## 단계 3: 주문 정책 강화
|
||||
|
||||
- `TASK-V3-007`: 세션별 주문 타입 매트릭스 구현
|
||||
- `TASK-V3-008`: 저유동 세션 시장가 주문 하드 차단
|
||||
- `TASK-V3-009`: 재호가 간격/횟수 제한 및 주문 철회 조건 구현
|
||||
|
||||
완료 기준:
|
||||
- `REQ-V3-005` 충족
|
||||
|
||||
## 단계 4: 비용/체결 모델 정교화
|
||||
|
||||
- `TASK-V3-010`: 세션별 슬리피지/비용 테이블 엔진 반영
|
||||
- `TASK-V3-011`: 불리한 체결 가정(상대 호가 방향) 체결기 구현
|
||||
- `TASK-V3-012`: 시나리오별 체결 실패/부분체결 모델 반영
|
||||
|
||||
완료 기준:
|
||||
- `REQ-V3-006` 충족
|
||||
|
||||
## 단계 5: 환율/오버나잇/Kill Switch 연동
|
||||
|
||||
- `TASK-V3-013`: 전략 PnL과 FX PnL 분리 회계 구현
|
||||
- `TASK-V3-014`: USD/KRW 버퍼 규칙 위반 시 신규 진입 제한 구현
|
||||
- `TASK-V3-015`: 오버나잇 예외와 Kill Switch 우선순위 통합
|
||||
|
||||
완료 기준:
|
||||
- `REQ-V3-007`, `REQ-V3-008` 충족
|
||||
|
||||
라우팅:
|
||||
- 코드 지시 상세: [30_code_level_work_orders.md](./30_code_level_work_orders.md)
|
||||
- 테스트 상세: [40_acceptance_and_test_plan.md](./40_acceptance_and_test_plan.md)
|
||||
59
docs/ouroboros/30_code_level_work_orders.md
Normal file
59
docs/ouroboros/30_code_level_work_orders.md
Normal file
@@ -0,0 +1,59 @@
|
||||
<!--
|
||||
Doc-ID: DOC-CODE-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# 코드 레벨 작업 지시서
|
||||
|
||||
본 문서는 파일 단위 구현 지시서다. 모든 작업은 요구사항 ID와 테스트 ID를 포함해야 한다.
|
||||
|
||||
제약:
|
||||
- `src/core/risk_manager.py`는 READ-ONLY로 간주하고 수정하지 않는다.
|
||||
- Kill Switch는 별도 모듈(예: `src/core/kill_switch.py`)로 추가하고 상위 실행 루프에서 연동한다.
|
||||
|
||||
## 구현 단위 A: 상태기계/청산
|
||||
|
||||
- `TASK-CODE-001` (`REQ-V2-001`,`REQ-V2-002`,`REQ-V2-003`): `src/strategy/`에 상태기계 모듈 추가
|
||||
- `TASK-CODE-002` (`REQ-V2-004`): ATR/BE/Hard Stop 결합 청산 함수 추가
|
||||
- `TASK-CODE-003` (`REQ-V2-008`): Kill Switch 오케스트레이터를 `src/core/kill_switch.py`에 추가
|
||||
- `TEST-CODE-001`: 갭 점프 시 최고상태 승격 테스트
|
||||
- `TEST-CODE-002`: EXIT 우선순위 테스트
|
||||
|
||||
## 구현 단위 B: 라벨링/검증
|
||||
|
||||
- `TASK-CODE-004` (`REQ-V2-005`): Triple Barrier 라벨러 모듈 추가(`src/analysis/` 또는 `src/strategy/`)
|
||||
- `TASK-CODE-005` (`REQ-V2-006`): Walk-forward + Purge/Embargo 분할 유틸 추가
|
||||
- `TASK-CODE-006` (`REQ-V2-007`): 백테스트 실행기에서 비용/슬리피지 옵션 필수화
|
||||
- `TEST-CODE-003`: 라벨 선터치 우선 테스트
|
||||
- `TEST-CODE-004`: 누수 차단 테스트
|
||||
|
||||
## 구현 단위 C: 세션/주문 정책
|
||||
|
||||
- `TASK-CODE-007` (`REQ-V3-001`,`REQ-V3-002`): 세션 분류/전환 훅을 `src/markets/schedule.py` 연동
|
||||
- `TASK-CODE-008` (`REQ-V3-003`,`REQ-V3-004`): 블랙아웃 큐 처리기를 `src/broker/`에 추가
|
||||
- `TASK-CODE-009` (`REQ-V3-005`): 세션별 주문 타입 검증기 추가
|
||||
- `TEST-CODE-005`: 블랙아웃 신규주문 차단 테스트
|
||||
- `TEST-CODE-006`: 저유동 세션 시장가 거부 테스트
|
||||
|
||||
## 구현 단위 D: 체결/환율/오버나잇
|
||||
|
||||
- `TASK-CODE-010` (`REQ-V3-006`): 불리한 체결가 모델을 백테스트 체결기로 구현
|
||||
- `TASK-CODE-011` (`REQ-V3-007`): FX PnL 분리 회계 테이블/컬럼 추가
|
||||
- `TASK-CODE-012` (`REQ-V3-008`): 오버나잇 예외와 Kill Switch 충돌 해소 로직 구현
|
||||
- `TEST-CODE-007`: 불리한 체결가 모델 테스트
|
||||
- `TEST-CODE-008`: FX 버퍼 위반 시 신규진입 제한 테스트
|
||||
|
||||
## 구현 단위 E: 운영/문서 거버넌스
|
||||
|
||||
- `TASK-OPS-001` (`REQ-OPS-001`): 시간 필드/로그 스키마의 타임존 표기 강제 규칙 구현
|
||||
- `TASK-OPS-002` (`REQ-OPS-002`): 정책 수치 변경 시 `01_requirements_registry.md` 선수정 CI 체크 추가
|
||||
- `TASK-OPS-003` (`REQ-OPS-003`): `TASK-*` 없는 `REQ-*` 또는 `TEST-*` 없는 `REQ-*`를 차단하는 문서 검증 게이트 유지
|
||||
|
||||
## 커밋 규칙
|
||||
|
||||
- 커밋 메시지에 `TASK-*` 포함
|
||||
- PR 본문에 `REQ-*`, `TEST-*` 매핑 표 포함
|
||||
- 변경 파일마다 최소 1개 테스트 연결
|
||||
63
docs/ouroboros/40_acceptance_and_test_plan.md
Normal file
63
docs/ouroboros/40_acceptance_and_test_plan.md
Normal file
@@ -0,0 +1,63 @@
|
||||
<!--
|
||||
Doc-ID: DOC-TEST-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# 수용 기준 및 테스트 계획
|
||||
|
||||
## 수용 기준
|
||||
|
||||
- `TEST-ACC-000` (`REQ-V2-001`): 상태 enum은 4개(`HOLDING`,`BE_LOCK`,`ARMED`,`EXITED`)만 허용한다.
|
||||
- `TEST-ACC-001` (`REQ-V2-002`): 상태 전이는 순차 if-else가 아닌 우선순위 승격으로 동작한다.
|
||||
- `TEST-ACC-010` (`REQ-V2-003`): `EXITED` 조건은 어떤 상태보다 먼저 평가된다.
|
||||
- `TEST-ACC-011` (`REQ-V2-004`): 청산 판단은 Hard Stop/BE Lock/ATR/모델보조 4요소를 모두 포함한다.
|
||||
- `TEST-ACC-012` (`REQ-V2-005`): Triple Barrier 라벨은 first-touch 규칙으로 결정된다.
|
||||
- `TEST-ACC-013` (`REQ-V2-006`): 학습/검증 분할은 Walk-forward + Purge/Embargo를 적용한다.
|
||||
- `TEST-ACC-014` (`REQ-V2-007`): 비용/슬리피지/체결실패 옵션 비활성 시 백테스트 실행을 거부한다.
|
||||
- `TEST-ACC-002` (`REQ-V2-008`): Kill Switch 실행 순서가 고정 순서를 위반하지 않는다.
|
||||
- `TEST-ACC-015` (`REQ-V3-001`): 모든 주문/로그 레코드에 `session_id`가 저장된다.
|
||||
- `TEST-ACC-016` (`REQ-V3-002`): 세션 전환 이벤트 시 리스크 파라미터가 재로딩된다.
|
||||
- `TEST-ACC-003` (`REQ-V3-003`): 블랙아웃 중 신규 주문 API 호출이 발생하지 않는다.
|
||||
- `TEST-ACC-017` (`REQ-V3-004`): 블랙아웃 큐는 복구 후 재검증을 통과한 주문만 실행한다.
|
||||
- `TEST-ACC-004` (`REQ-V3-005`): 저유동 세션 시장가 주문은 항상 거부된다.
|
||||
- `TEST-ACC-005` (`REQ-V3-006`): 백테스트 체결가가 단순 종가 체결보다 보수적 손익을 낸다.
|
||||
- `TEST-ACC-006` (`REQ-V3-007`): 전략 손익과 환율 손익이 별도 집계된다.
|
||||
- `TEST-ACC-018` (`REQ-V3-008`): 오버나잇 예외 상태에서도 Kill Switch 우선순위가 유지된다.
|
||||
- `TEST-ACC-007` (`REQ-OPS-001`): 시간 관련 필드는 타임존(KST/UTC)이 누락되면 검증 실패한다.
|
||||
- `TEST-ACC-008` (`REQ-OPS-002`): 정책 수치 변경이 원장 미반영이면 검증 실패한다.
|
||||
- `TEST-ACC-009` (`REQ-OPS-003`): `REQ-*`가 `TASK-*`/`TEST-*` 매핑 없이 존재하면 검증 실패한다.
|
||||
|
||||
## 테스트 계층
|
||||
|
||||
1. 단위 테스트
|
||||
- 상태 전이, 주문타입 검증, 큐 복구 로직, 체결가 모델
|
||||
|
||||
2. 통합 테스트
|
||||
- 세션 전환 -> 주문 정책 -> 리스크 엔진 연동
|
||||
- 블랙아웃 시작/해제 이벤트 연동
|
||||
|
||||
3. 회귀 테스트
|
||||
- 기존 `tests/` 스위트 전량 실행
|
||||
- 신규 기능 플래그 ON/OFF 비교
|
||||
|
||||
4. 구동/모니터링 검증 (필수)
|
||||
- 개발 완료 후 시스템을 실제 구동해 핵심 경로를 관찰
|
||||
- 필수 관찰 항목: 주문 차단 정책, Kill Switch 동작, 경보/예외 로그, 세션 전환 로그
|
||||
- Runtime Verifier 코멘트로 증적(실행 명령/요약 로그) 첨부
|
||||
|
||||
## 실행 명령
|
||||
|
||||
```bash
|
||||
pytest -q
|
||||
python3 scripts/validate_ouroboros_docs.py
|
||||
```
|
||||
|
||||
## 실패 처리 규칙
|
||||
|
||||
- 문서 검증 실패 시 구현 PR 병합 금지
|
||||
- `REQ-*` 변경 후 테스트 매핑 누락 시 병합 금지
|
||||
- 회귀 실패 시 원인 모듈 분리 후 재검증
|
||||
- 구동/모니터링 증적 누락 시 검증 승인 금지
|
||||
68
docs/ouroboros/50_scenario_matrix_and_issue_taxonomy.md
Normal file
68
docs/ouroboros/50_scenario_matrix_and_issue_taxonomy.md
Normal file
@@ -0,0 +1,68 @@
|
||||
<!--
|
||||
Doc-ID: DOC-PM-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# 실전 시나리오 매트릭스 + 이슈 분류 체계
|
||||
|
||||
목표: 운영에서 바로 사용할 수 있는 형태로 Happy Path / Failure Path / Ops Incident를 추적 가능한 ID 체계(`REQ-*`, `TASK-*`, `TEST-*`)에 매핑한다.
|
||||
|
||||
## 1) 시나리오 매트릭스
|
||||
|
||||
| Scenario ID | Type | Trigger | Expected System Behavior | Primary IDs (REQ/TASK/TEST) | Ticket Priority |
|
||||
|---|---|---|---|---|---|
|
||||
| `SCN-HAPPY-001` | Happy Path | KR 정규 세션에서 진입 신호 발생, 블랙아웃 아님 | 주문/로그에 `session_id` 저장 후 정책에 맞는 주문 전송 | `REQ-V3-001`, `TASK-V3-001`, `TASK-V3-003`, `TEST-ACC-015` | P1 |
|
||||
| `SCN-HAPPY-002` | Happy Path | 보유 포지션에서 BE/ATR/Hard Stop 조건 순차 도달 | 상태가 즉시 상위 단계로 승격, `EXITED` 우선 평가 보장 | `REQ-V2-002`, `REQ-V2-003`, `TASK-V2-004`, `TEST-ACC-001`, `TEST-ACC-010` | P0 |
|
||||
| `SCN-HAPPY-003` | Happy Path | 세션 전환(KR->US) 이벤트 발생 | 리스크 파라미터 자동 재로딩, 새 세션 정책으로 즉시 전환 | `REQ-V3-002`, `TASK-V3-002`, `TEST-ACC-016` | P0 |
|
||||
| `SCN-HAPPY-004` | Happy Path | 백테스트 실행 요청 | 비용/슬리피지/체결실패 옵션 누락 시 실행 거부, 포함 시 실행 | `REQ-V2-007`, `TASK-V2-012`, `TEST-ACC-014` | P1 |
|
||||
| `SCN-FAIL-001` | Failure Path | 블랙아웃 중 신규 주문 신호 발생 | 신규 주문 차단 + 주문 의도 큐 적재, API 직접 호출 금지 | `REQ-V3-003`, `REQ-V3-004`, `TASK-V3-005`, `TEST-ACC-003`, `TEST-ACC-017` | P0 |
|
||||
| `SCN-FAIL-002` | Failure Path | 저유동 세션에 시장가 주문 요청 | 시장가 하드 거부, 지정가 대체 또는 주문 취소 | `REQ-V3-005`, `TASK-V3-007`, `TASK-V3-008`, `TEST-ACC-004` | P0 |
|
||||
| `SCN-FAIL-003` | Failure Path | Kill Switch 트리거(손실/연결/리스크 한도) | 신규주문차단->미체결취소->재조회->리스크축소->스냅샷 순서 강제 | `REQ-V2-008`, `TASK-V2-013`, `TEST-ACC-002` | P0 |
|
||||
| `SCN-FAIL-004` | Failure Path | FX 버퍼 부족 상태에서 US 진입 신호 | 전략 PnL/FX PnL 분리 집계 유지, 신규 진입 제한 | `REQ-V3-007`, `TASK-V3-013`, `TASK-V3-014`, `TEST-ACC-006` | P1 |
|
||||
| `SCN-OPS-001` | Ops Incident | 브로커 점검/블랙아웃 종료 직후 | 잔고/미체결/체결 동기화 후 큐 재검증 통과 주문만 집행 | `REQ-V3-004`, `TASK-V3-006`, `TEST-ACC-017` | P0 |
|
||||
| `SCN-OPS-002` | Ops Incident | 정책 수치가 코드에만 반영되고 원장 미수정 | 문서 검증에서 실패 처리, PR 병합 차단 | `REQ-OPS-002`, `TASK-OPS-002`, `TEST-ACC-008` | P0 |
|
||||
| `SCN-OPS-003` | Ops Incident | 타임존 누락 로그/스케줄 데이터 유입 | KST/UTC 미표기 레코드 검증 실패 처리 | `REQ-OPS-001`, `TASK-OPS-001`, `TEST-ACC-007` | P1 |
|
||||
| `SCN-OPS-004` | Ops Incident | 신규 REQ 추가 후 TASK/TEST 누락 | 추적성 게이트 실패, 구현 PR 병합 차단 | `REQ-OPS-003`, `TASK-OPS-003`, `TEST-ACC-009` | P0 |
|
||||
| `SCN-OPS-005` | Ops Incident | 배포 후 런타임 이상 동작(주문오류/상태전이오류/정책위반) 탐지 | Runtime Verifier가 즉시 이슈 발행, Dev 수정 후 재관측으로 클로즈 판정 | `REQ-V2-008`, `REQ-V3-003`, `REQ-V3-005`, `TEST-ACC-002`, `TEST-ACC-003`, `TEST-ACC-004` | P0 |
|
||||
|
||||
## 2) 이슈 분류 체계 (Issue Taxonomy)
|
||||
|
||||
| Taxonomy | Definition | Typical Symptoms | Default Owner | Mapping Baseline |
|
||||
|---|---|---|---|---|
|
||||
| `EXEC-STATE` | 상태기계/청산 우선순위 위반 | EXIT 우선순위 깨짐, 상태 역행, 갭 대응 실패 | Strategy | `REQ-V2-001`~`REQ-V2-004`, `TASK-V2-004`~`TASK-V2-006`, `TEST-ACC-000`,`001`,`010`,`011` |
|
||||
| `EXEC-POLICY` | 세션/주문 정책 위반 | 블랙아웃 주문 전송, 저유동 시장가 허용 | Broker/Execution | `REQ-V3-003`~`REQ-V3-005`, `TASK-V3-004`~`TASK-V3-009`, `TEST-ACC-003`,`004`,`017` |
|
||||
| `BACKTEST-MODEL` | 백테스트 현실성/검증 무결성 위반 | 비용 옵션 off로 실행, 체결가 과낙관 | Research | `REQ-V2-006`,`REQ-V2-007`,`REQ-V3-006`, `TASK-V2-010`~`012`, `TASK-V3-010`~`012`, `TEST-ACC-013`,`014`,`005` |
|
||||
| `RISK-EMERGENCY` | Kill Switch/리스크 비상 대응 실패 | 순서 위반, 차단 누락, 복구 절차 누락 | Risk | `REQ-V2-008`,`REQ-V3-008`, `TASK-V2-013`~`015`, `TASK-V3-015`, `TEST-ACC-002`,`018` |
|
||||
| `FX-ACCOUNTING` | 환율/통화 버퍼 정책 위반 | 전략손익/환차손익 혼합 집계, 버퍼 미적용 | Risk + Data | `REQ-V3-007`, `TASK-V3-013`,`014`, `TEST-ACC-006` |
|
||||
| `OPS-GOVERNANCE` | 문서/추적성/타임존 거버넌스 위반 | 원장 미수정, TEST 누락, 타임존 미표기 | PM + QA | `REQ-OPS-001`~`003`, `TASK-OPS-001`~`003`, `TEST-ACC-007`~`009` |
|
||||
| `RUNTIME-VERIFY` | 실동작 모니터링 검증 | 배포 후 이상 현상, 간헐 오류, 테스트 미포착 회귀 | Runtime Verifier + TPM | 관련 `REQ/TASK/TEST`와 런타임 로그 증적 필수 |
|
||||
|
||||
## 3) 티켓 생성 규칙 (Implementable)
|
||||
|
||||
1. 모든 이슈는 `taxonomy + scenario_id`를 제목에 포함한다.
|
||||
예: `[EXEC-POLICY][SCN-FAIL-001] blackout 주문 차단 누락`
|
||||
2. 본문 필수 항목: 재현절차, 기대결과, 실제결과, 영향범위, 롤백/완화책.
|
||||
3. 본문에 최소 1개 `REQ-*`, 1개 `TASK-*`, 1개 `TEST-*`를 명시한다.
|
||||
4. 우선순위 기준:
|
||||
- P0: 실주문 위험, Kill Switch, 블랙아웃/시장가 정책, 추적성 게이트 실패
|
||||
- P1: 손익 왜곡 가능성(체결/FX/시간대), 운영 리스크 증가
|
||||
- P2: 보고서/관측성 품질 이슈(거래 안전성 영향 없음)
|
||||
5. Runtime Verifier가 발행한 `RUNTIME-VERIFY` 이슈는 Main Agent 확인 전 클로즈 금지.
|
||||
|
||||
## 4) 즉시 생성 권장 티켓 (초기 백로그)
|
||||
|
||||
- `TKT-P0-001`: `[EXEC-POLICY][SCN-FAIL-001]` 블랙아웃 차단 + 큐적재 + 복구 재검증 e2e 점검 (`REQ-V3-003`,`REQ-V3-004`)
|
||||
- `TKT-P0-002`: `[RISK-EMERGENCY][SCN-FAIL-003]` Kill Switch 순서 강제 검증 자동화 (`REQ-V2-008`)
|
||||
- `TKT-P0-003`: `[OPS-GOVERNANCE][SCN-OPS-004]` REQ/TASK/TEST 누락 시 PR 차단 게이트 상시 점검 (`REQ-OPS-003`)
|
||||
- `TKT-P1-001`: `[FX-ACCOUNTING][SCN-FAIL-004]` FX 버퍼 위반 시 진입 제한 회귀 케이스 보강 (`REQ-V3-007`)
|
||||
- `TKT-P1-002`: `[BACKTEST-MODEL][SCN-HAPPY-004]` 비용/슬리피지 미설정 백테스트 거부 UX 명확화 (`REQ-V2-007`)
|
||||
- `TKT-P0-004`: `[RUNTIME-VERIFY][SCN-OPS-005]` 배포 후 런타임 이상 탐지/재현/클로즈 판정 절차 자동화
|
||||
|
||||
## 5) 운영 체크포인트
|
||||
|
||||
- 스프린트 계획 시 `P0` 시나리오 100% 테스트 통과를 출발 조건으로 둔다.
|
||||
- 배포 승인 시 `SCN-FAIL-*`, `SCN-OPS-*` 관련 `TEST-ACC-*`를 우선 확인한다.
|
||||
- 정책 변경 PR은 반드시 원장(`01_requirements_registry.md`) 선수정 후 진행한다.
|
||||
201
docs/ouroboros/50_tpm_control_protocol.md
Normal file
201
docs/ouroboros/50_tpm_control_protocol.md
Normal file
@@ -0,0 +1,201 @@
|
||||
<!--
|
||||
Doc-ID: DOC-TPM-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: tpm
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# TPM Control Protocol (Main <-> PM <-> TPM <-> Dev <-> Verifier <-> Runtime Verifier)
|
||||
|
||||
목적:
|
||||
- PM 시나리오가 구현 가능한 단위로 분해되고, 개발/검증이 동일 ID 체계(`REQ-*`, `TASK-*`, `TEST-*`)로 닫히도록 강제한다.
|
||||
- 각 단계는 Entry/Exit gate를 통과해야 다음 단계로 이동 가능하다.
|
||||
- 주요 의사결정 포인트마다 Main Agent의 승인/의견 확인을 강제한다.
|
||||
|
||||
## Team Roles
|
||||
|
||||
- Main Agent: 최종 취합/우선순위/승인 게이트 오너
|
||||
- PM Agent: 시나리오/요구사항/티켓 관리
|
||||
- TPM Agent: PM-Dev-검증 간 구현 가능성/달성률 통제, 티켓 등록 및 구현 우선순위 지정 오너
|
||||
- Dev Agent: 구현 수행, 블로커 발생 시 재계획 요청
|
||||
- Verifier Agent: 문서/코드/테스트 산출물 검증
|
||||
- Runtime Verifier Agent: 실제 동작 모니터링, 이상 징후 이슈 발행, 수정 후 이슈 클로즈 판정
|
||||
|
||||
Main Agent 아이디에이션 책임:
|
||||
- 진행 중 신규 구현 아이디어를 별도 문서에 누적 기록한다.
|
||||
- 기록 위치: [70_main_agent_ideation.md](./70_main_agent_ideation.md)
|
||||
- 각 항목은 `IDEA-*` 식별자, 배경, 기대효과, 리스크, 후속 티켓 후보를 포함해야 한다.
|
||||
|
||||
## Main Decision Checkpoints (Mandatory)
|
||||
|
||||
- DCP-01 범위 확정: Phase 0 종료 전 Main Agent 승인 필수
|
||||
- DCP-02 요구사항 확정: Phase 1 종료 전 Main Agent 승인 필수
|
||||
- DCP-03 구현 착수: Phase 2 종료 전 Main Agent 승인 필수
|
||||
- DCP-04 배포 승인: Phase 4 종료 후 Main Agent 최종 승인 필수
|
||||
|
||||
## Phase Control Gates
|
||||
|
||||
### Phase 0: Scenario Intake and Scope Lock
|
||||
|
||||
Entry criteria:
|
||||
- PM 시나리오가 사용자 가치, 실패 모드, 우선순위를 포함해 제출됨
|
||||
- 영향 범위(모듈/세션/KR-US 시장)가 명시됨
|
||||
|
||||
Exit criteria:
|
||||
- 시나리오가 `REQ-*` 후보에 1:1 또는 1:N 매핑됨
|
||||
- 모호한 표현("개선", "최적화")은 측정 가능한 조건으로 치환됨
|
||||
- 비범위 항목(out-of-scope) 명시
|
||||
|
||||
Control checks:
|
||||
- PM/TPM 합의 완료
|
||||
- Main Agent 승인(DCP-01)
|
||||
- 산출물: 시나리오 카드, 초기 매핑 메모
|
||||
|
||||
### Phase 1: Requirement Registry Gate
|
||||
|
||||
Entry criteria:
|
||||
- Phase 0 산출물 승인
|
||||
- 변경 대상 요구사항 문서 식별 완료
|
||||
|
||||
Exit criteria:
|
||||
- [01_requirements_registry.md](./01_requirements_registry.md)에 `REQ-*` 정의/수정 반영
|
||||
- 각 `REQ-*`가 최소 1개 `TASK-*`, 1개 `TEST-*`와 연결 가능 상태
|
||||
- 시간/정책 수치는 원장 단일 소스로 확정(`REQ-OPS-001`,`REQ-OPS-002`)
|
||||
|
||||
Control checks:
|
||||
- `python3 scripts/validate_ouroboros_docs.py` 통과
|
||||
- Main Agent 승인(DCP-02)
|
||||
- 산출물: 업데이트된 요구사항 원장
|
||||
|
||||
### Phase 2: Design and Work-Order Gate
|
||||
|
||||
Entry criteria:
|
||||
- 요구사항 원장 갱신 완료
|
||||
- 영향 모듈 분석 완료(상태기계, 주문정책, 백테스트, 세션)
|
||||
|
||||
Exit criteria:
|
||||
- [10_phase_v2_execution.md](./10_phase_v2_execution.md), [20_phase_v3_execution.md](./20_phase_v3_execution.md), [30_code_level_work_orders.md](./30_code_level_work_orders.md)에 작업 분해 완료
|
||||
- 각 작업은 구현 위치/제약/완료 조건을 가짐
|
||||
- 위험 작업(Kill Switch, blackout, session transition)은 별도 롤백 절차 포함
|
||||
|
||||
Control checks:
|
||||
- TPM이 `REQ -> TASK` 누락 여부 검토
|
||||
- Main Agent 승인(DCP-03)
|
||||
- 산출물: 승인된 Work Order 세트
|
||||
|
||||
### Phase 3: Implementation Gate
|
||||
|
||||
Entry criteria:
|
||||
- 승인된 `TASK-*`가 브랜치 작업 단위로 분리됨
|
||||
- 변경 범위별 테스트 계획이 PR 본문에 링크됨
|
||||
|
||||
Exit criteria:
|
||||
- 코드 변경이 `TASK-*`에 대응되어 추적 가능
|
||||
- 제약 준수(`src/core/risk_manager.py` 직접 수정 금지 등) 확인
|
||||
- 신규 로직마다 최소 1개 테스트 추가 또는 기존 테스트 확장
|
||||
|
||||
Control checks:
|
||||
- PR 템플릿 내 `REQ-*`/`TASK-*`/`TEST-*` 매핑 확인
|
||||
- 산출물: 리뷰 가능한 PR
|
||||
|
||||
### Phase 4: Verification and Acceptance Gate
|
||||
|
||||
Entry criteria:
|
||||
- 구현 PR ready 상태
|
||||
- 테스트 케이스/픽스처 준비 완료
|
||||
|
||||
Exit criteria:
|
||||
- [40_acceptance_and_test_plan.md](./40_acceptance_and_test_plan.md)의 해당 `TEST-ACC-*` 전부 통과
|
||||
- 회귀 테스트 통과(`pytest -q`)
|
||||
- 문서 검증 통과(`python3 scripts/validate_ouroboros_docs.py`)
|
||||
|
||||
Control checks:
|
||||
- Verifier가 테스트 증적(로그/리포트/실행 커맨드) 첨부
|
||||
- Runtime Verifier가 스테이징/실운영 모니터링 계획 승인
|
||||
- 산출물: 수용 승인 레코드
|
||||
|
||||
### Phase 5: Release and Post-Release Control
|
||||
|
||||
Entry criteria:
|
||||
- Phase 4 승인
|
||||
- 운영 체크리스트 준비(세션 전환, 블랙아웃, Kill Switch)
|
||||
|
||||
Exit criteria:
|
||||
- 배포 후 초기 관찰 윈도우에서 치명 경보 없음
|
||||
- 신규 시나리오/회귀 이슈는 다음 Cycle의 Phase 0 입력으로 환류
|
||||
- 요구사항/테스트 문서 버전 동기화 완료
|
||||
|
||||
Control checks:
|
||||
- PM/TPM/Dev 3자 종료 확인
|
||||
- Runtime Verifier가 운영 모니터링 이슈 상태(신규/진행/해결)를 리포트
|
||||
- Main Agent 최종 승인(DCP-04)
|
||||
- 산출물: 릴리즈 노트 + 후속 액션 목록
|
||||
|
||||
## Replan Protocol (Dev -> TPM)
|
||||
|
||||
- 트리거:
|
||||
- 구현 불가능(기술적 제약/외부 API 제약)
|
||||
- 예상 대비 개발 리소스 과다(공수/인력/의존성 급증)
|
||||
- 절차:
|
||||
1) Dev Agent가 `REPLAN-REQUEST` 발행(영향 REQ/TASK, 원인, 대안, 추가 공수 포함)
|
||||
2) TPM Agent가 1차 심사(범위 축소/단계 분할/요구사항 조정안)
|
||||
3) Verifier/PM 의견 수렴 후 Main Agent 승인으로 재계획 확정
|
||||
- 규칙:
|
||||
- Main Agent 승인 없는 재계획은 실행 금지
|
||||
- 재계획 반영 시 문서(`REQ/TASK/TEST`) 동시 갱신 필수
|
||||
|
||||
TPM 티켓 운영 규칙:
|
||||
- TPM은 합의된 변경을 이슈로 등록하고 우선순위(`P0/P1/P2`)를 지정한다.
|
||||
- PR 본문에는 TPM이 지정한 우선순위와 범위가 그대로 반영되어야 한다.
|
||||
- 우선순위 변경은 TPM 제안 + Main Agent 승인으로만 가능하다.
|
||||
- PM/TPM/Dev/Reviewer/Verifier/Runtime Verifier는 주요 의사결정 시점마다 PR 코멘트를 남겨 결정 근거를 추적 가능 상태로 유지한다.
|
||||
|
||||
브랜치 운영 규칙:
|
||||
- TPM은 각 티켓에 대해 `ticket temp branch -> program feature branch` PR 경로를 지정한다.
|
||||
- 티켓 머지 대상은 항상 program feature branch이며, `main`은 최종 통합 단계에서만 사용한다.
|
||||
|
||||
## Runtime Verification Protocol
|
||||
|
||||
- Runtime Verifier는 테스트 통과 이후 실제 동작(스테이징/실운영)을 모니터링한다.
|
||||
- 이상 동작/현상 발견 시 즉시 이슈 발행:
|
||||
- 제목 규칙: `[RUNTIME-VERIFY][SCN-*] ...`
|
||||
- 본문 필수: 재현조건, 관측 로그, 영향 범위, 임시 완화책, 관련 `REQ/TASK/TEST`
|
||||
- 이슈 클로즈 규칙:
|
||||
- Dev 수정 완료 + Verifier 재검증 통과 + Runtime Verifier 재관측 정상
|
||||
- 최종 클로즈 승인자는 Main Agent
|
||||
- 개발 완료 필수 절차:
|
||||
- 시스템 실제 구동(스테이징/로컬 실운영 모드) 실행
|
||||
- 모니터링 체크리스트(핵심 경보/주문 경로/예외 로그) 수행
|
||||
- 결과를 티켓/PR 코멘트에 증적으로 첨부하지 않으면 완료로 간주하지 않음
|
||||
|
||||
## Server Reflection Rule
|
||||
|
||||
- `ticket temp branch -> program feature branch` 머지는 검증 승인 후 자동/수동 진행 가능하다.
|
||||
- `program feature branch -> main` 머지는 사용자 명시 승인 시에만 허용한다.
|
||||
- Main 병합 시 Main Agent가 승인 근거를 PR 코멘트에 기록한다.
|
||||
|
||||
## Acceptance Matrix (PM Scenario -> Dev Tasks -> Verifier Checks)
|
||||
|
||||
| PM Scenario | Requirement Coverage | Dev Tasks (Primary) | Verifier Checks (Must Pass) |
|
||||
|---|---|---|---|
|
||||
| 갭 급락/급등에서 청산 우선 처리 필요 | `REQ-V2-001`,`REQ-V2-002`,`REQ-V2-003` | `TASK-V2-004`,`TASK-CODE-001` | `TEST-ACC-000`,`TEST-ACC-001`,`TEST-ACC-010`,`TEST-CODE-001`,`TEST-CODE-002` |
|
||||
| 하드스탑 + BE락 + ATR + 모델보조를 한 엔진으로 통합 | `REQ-V2-004` | `TASK-V2-005`,`TASK-V2-006`,`TASK-CODE-002` | `TEST-ACC-011` |
|
||||
| 라벨 누수 없는 학습데이터 생성 | `REQ-V2-005` | `TASK-V2-007`,`TASK-CODE-004` | `TEST-ACC-012`,`TEST-CODE-003` |
|
||||
| 검증 프레임워크를 시계열 누수 방지 구조로 강제 | `REQ-V2-006` | `TASK-V2-010`,`TASK-CODE-005` | `TEST-ACC-013`,`TEST-CODE-004` |
|
||||
| 과낙관 백테스트 방지(비용/슬리피지/실패 강제) | `REQ-V2-007` | `TASK-V2-012`,`TASK-CODE-006` | `TEST-ACC-014` |
|
||||
| 장애 시 Kill Switch 실행 순서 고정 | `REQ-V2-008` | `TASK-V2-013`,`TASK-V2-014`,`TASK-V2-015`,`TASK-CODE-003` | `TEST-ACC-002`,`TEST-ACC-018` |
|
||||
| 세션 전환 단위 리스크/로그 추적 일관화 | `REQ-V3-001`,`REQ-V3-002` | `TASK-V3-001`,`TASK-V3-002`,`TASK-V3-003`,`TASK-CODE-007` | `TEST-ACC-015`,`TEST-ACC-016` |
|
||||
| 블랙아웃 중 주문 차단 + 복구 후 재검증 실행 | `REQ-V3-003`,`REQ-V3-004` | `TASK-V3-004`,`TASK-V3-005`,`TASK-V3-006`,`TASK-CODE-008` | `TEST-ACC-003`,`TEST-ACC-017`,`TEST-CODE-005` |
|
||||
| 저유동 세션 시장가 주문 금지 | `REQ-V3-005` | `TASK-V3-007`,`TASK-V3-008`,`TASK-CODE-009` | `TEST-ACC-004`,`TEST-CODE-006` |
|
||||
| 보수적 체결 모델을 백테스트 기본으로 설정 | `REQ-V3-006` | `TASK-V3-010`,`TASK-V3-011`,`TASK-V3-012`,`TASK-CODE-010` | `TEST-ACC-005`,`TEST-CODE-007` |
|
||||
| 전략손익/환율손익 분리 + 통화 버퍼 통제 | `REQ-V3-007` | `TASK-V3-013`,`TASK-V3-014`,`TASK-CODE-011` | `TEST-ACC-006`,`TEST-CODE-008` |
|
||||
| 오버나잇 규칙과 Kill Switch 충돌 방지 | `REQ-V3-008` | `TASK-V3-015`,`TASK-CODE-012` | `TEST-ACC-018` |
|
||||
| 타임존/정책변경/추적성 문서 거버넌스 | `REQ-OPS-001`,`REQ-OPS-002`,`REQ-OPS-003` | `TASK-OPS-001`,`TASK-OPS-002`,`TASK-OPS-003` | `TEST-ACC-007`,`TEST-ACC-008`,`TEST-ACC-009` |
|
||||
|
||||
## 운영 규율 (TPM Enforcement Rules)
|
||||
|
||||
- 어떤 PM 시나리오도 `REQ-*` 없는 구현 착수 금지.
|
||||
- 어떤 `REQ-*`도 `TASK-*`,`TEST-*` 없는 승인 금지.
|
||||
- Verifier는 "코드 리뷰 통과"만으로 승인 불가, 반드시 `TEST-ACC-*` 증적 필요.
|
||||
- 배포 승인권자는 Phase 4 체크리스트 미충족 시 릴리즈 보류 권한을 행사해야 한다.
|
||||
103
docs/ouroboros/60_repo_enforcement_checklist.md
Normal file
103
docs/ouroboros/60_repo_enforcement_checklist.md
Normal file
@@ -0,0 +1,103 @@
|
||||
<!--
|
||||
Doc-ID: DOC-OPS-002
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: tpm
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# 저장소 강제 설정 체크리스트
|
||||
|
||||
목표: "엄격 검증 운영"을 문서가 아니라 저장소 설정으로 강제한다.
|
||||
|
||||
## 1) main 브랜치 보호 (필수)
|
||||
|
||||
적용 항목:
|
||||
- direct push 금지
|
||||
- force push 금지
|
||||
- branch 삭제 금지
|
||||
- merge는 PR 경로만 허용
|
||||
|
||||
검증:
|
||||
- `main`에 대해 직접 `git push origin main` 시 거부되는지 확인
|
||||
|
||||
## 2) 필수 상태 체크 (필수)
|
||||
|
||||
필수 CI 항목:
|
||||
- `validate_ouroboros_docs` (명령: `python3 scripts/validate_ouroboros_docs.py`)
|
||||
- `test` (명령: `pytest -q`)
|
||||
|
||||
설정 기준:
|
||||
- 위 2개 체크가 `success` 아니면 머지 금지
|
||||
- 체크 스킵/중립 상태 허용 금지
|
||||
|
||||
## 3) 필수 리뷰어 규칙 (권장 -> 필수)
|
||||
|
||||
역할 기반 승인:
|
||||
- Verifier 1명 승인 필수
|
||||
- TPM 또는 PM 1명 승인 필수
|
||||
- Runtime Verifier 관련 변경(PR 본문에 runtime 영향 있음) 시 Runtime Verifier 승인 필수
|
||||
|
||||
설정 기준:
|
||||
- 최소 승인 수: 2
|
||||
- 작성자 self-approval 불가
|
||||
- 새 커밋 푸시 시 기존 승인 재검토 요구
|
||||
|
||||
## 4) 워크플로우 게이트
|
||||
|
||||
병합 전 체크리스트:
|
||||
- 이슈 연결(`Closes #N`) 존재
|
||||
- PR 본문에 `REQ-*`, `TASK-*`, `TEST-*` 매핑 표 존재
|
||||
- `src/core/risk_manager.py` 변경 없음
|
||||
- 주요 의사결정 체크포인트(DCP-01~04) 중 해당 단계 Main Agent 확인 기록 존재
|
||||
- 주요 의사결정(리뷰 지적/수정 합의/검증 승인)에 대한 에이전트 PR 코멘트 존재
|
||||
- 티켓 PR의 base가 `main`이 아닌 program feature branch인지 확인
|
||||
|
||||
자동 점검:
|
||||
- 문서 검증 스크립트 통과
|
||||
- 테스트 통과
|
||||
- 개발 완료 시 시스템 구동/모니터링 증적 코멘트 존재
|
||||
|
||||
## 5) 감사 추적
|
||||
|
||||
필수 보존 증적:
|
||||
- CI 실행 로그 링크
|
||||
- 검증 실패/복구 기록
|
||||
- 머지 승인 코멘트(Verifier/TPM)
|
||||
|
||||
분기별 점검:
|
||||
- 브랜치 보호 규칙 drift 여부
|
||||
- 필수 CI 이름 변경/누락 여부
|
||||
|
||||
## 6) 적용 순서 (운영 절차)
|
||||
|
||||
1. 브랜치 보호 활성화
|
||||
2. 필수 CI 체크 연결
|
||||
3. 리뷰어 규칙 적용
|
||||
4. 샘플 PR로 거부 시나리오 테스트
|
||||
5. 정상 머지 시나리오 테스트
|
||||
|
||||
## 7) 실패 시 조치
|
||||
|
||||
- 브랜치 보호 미적용 발견 시: 즉시 릴리즈 중지
|
||||
- 필수 CI 우회 발견 시: 관리자 권한 점검 및 감사 이슈 발행
|
||||
- 리뷰 규칙 무효화 발견 시: 규칙 복구 후 재머지 정책 시행
|
||||
- Runtime 이상 이슈 미해결 상태에서 클로즈 시도 발견 시: 즉시 이슈 재오픈 + 릴리즈 중지
|
||||
|
||||
## 8) 재계획(Dev Replan) 운영 규칙
|
||||
|
||||
- Dev가 `REPLAN-REQUEST` 발행 시 TPM 심사 없이는 스코프/일정 변경 금지
|
||||
- `REPLAN-REQUEST`는 Main Agent 승인 전 \"제안\" 상태로 유지
|
||||
- 승인된 재계획은 `REQ/TASK/TEST` 문서를 동시 갱신해야 유효
|
||||
|
||||
## 9) 서버 반영 규칙
|
||||
|
||||
- 티켓 PR(`feature/issue-* -> feature/{stream}`)은 검증 승인 후 머지 가능하다.
|
||||
- 최종 통합 PR(`feature/{stream} -> main`)은 사용자 명시 승인 전 `tea pulls merge` 실행 금지.
|
||||
- Main 병합 시 승인 근거 코멘트 필수.
|
||||
|
||||
## 10) 최종 main 병합 조건
|
||||
|
||||
- 모든 티켓이 program feature branch로 병합 완료
|
||||
- Runtime Verifier의 구동/모니터링 검증 완료
|
||||
- 사용자 최종 승인 코멘트 확인 후에만 `feature -> main` PR 머지 허용
|
||||
48
docs/ouroboros/70_main_agent_ideation.md
Normal file
48
docs/ouroboros/70_main_agent_ideation.md
Normal file
@@ -0,0 +1,48 @@
|
||||
<!--
|
||||
Doc-ID: DOC-IDEA-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: main-agent
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# 메인 에이전트 아이디에이션 백로그
|
||||
|
||||
목적:
|
||||
- 구현 진행 중 떠오른 신규 구현 아이디어를 계획 반영 전 임시 저장한다.
|
||||
- 본 문서는 사용자 검토 후 다음 계획 포함 여부를 결정하기 위한 검토 큐다.
|
||||
|
||||
운영 규칙:
|
||||
- 각 아이디어는 `IDEA-*` 식별자를 사용한다.
|
||||
- 필수 필드: 배경, 기대효과, 리스크, 후속 티켓 후보.
|
||||
- 상태는 `proposed`, `under-review`, `accepted`, `rejected` 중 하나를 사용한다.
|
||||
|
||||
## 아이디어 목록
|
||||
|
||||
- `IDEA-001` (status: proposed)
|
||||
- 제목: Kill-Switch 전역 상태를 프로세스 단일 전역에서 시장/세션 단위 상태로 분리
|
||||
- 배경: 현재는 전역 block 플래그 기반이라 시장별 분리 제어가 제한될 수 있음
|
||||
- 기대효과: KR/US 병행 운용 시 한 시장 장애가 다른 시장 주문을 불필요하게 막는 리스크 축소
|
||||
- 리스크: 상태 동기화 복잡도 증가, 테스트 케이스 확장 필요
|
||||
- 후속 티켓 후보: `TKT-P1-KS-SCOPE-SPLIT`
|
||||
|
||||
- `IDEA-002` (status: proposed)
|
||||
- 제목: Exit Engine 입력 계약(ATR/peak/model_prob/liquidity) 표준 DTO를 데이터 파이프라인에 고정
|
||||
- 배경: 현재 ATR/모델확률 일부가 fallback 기반이라 운영 일관성이 약함
|
||||
- 기대효과: 백테스트-실거래 입력 동형성 강화, 회귀 분석 용이
|
||||
- 리스크: 기존 스캐너/시나리오 엔진 연동 작업량 증가
|
||||
- 후속 티켓 후보: `TKT-P1-EXIT-CONTRACT`
|
||||
|
||||
- `IDEA-003` (status: proposed)
|
||||
- 제목: Runtime Verifier 자동 이슈 생성기(로그 패턴 -> 이슈 템플릿 자동화)
|
||||
- 배경: 런타임 이상 리포트가 수동 작성 중심이라 누락 가능성 존재
|
||||
- 기대효과: 이상 탐지 후 이슈 등록 리드타임 단축, 증적 표준화
|
||||
- 리스크: 오탐 이슈 폭증 가능성, 필터링 룰 필요
|
||||
- 후속 티켓 후보: `TKT-P1-RUNTIME-AUTO-ISSUE`
|
||||
|
||||
- `IDEA-004` (status: proposed)
|
||||
- 제목: PR 코멘트 워크플로우 자동 점검(리뷰어->개발논의->검증승인 누락 차단)
|
||||
- 배경: 현재 절차는 강력하지만 수행 확인이 수동
|
||||
- 기대효과: 절차 누락 방지, 감사 추적 자동화
|
||||
- 리스크: CLI/API 연동 유지보수 비용
|
||||
- 후속 티켓 후보: `TKT-P0-WORKFLOW-GUARD`
|
||||
40
docs/ouroboros/README.md
Normal file
40
docs/ouroboros/README.md
Normal file
@@ -0,0 +1,40 @@
|
||||
<!--
|
||||
Doc-ID: DOC-ROOT-001
|
||||
Version: 1.0.0
|
||||
Status: active
|
||||
Owner: strategy
|
||||
Updated: 2026-02-26
|
||||
-->
|
||||
|
||||
# The Ouroboros 실행 문서 허브
|
||||
|
||||
이 폴더는 `ouroboros_plan_v2.txt`, `ouroboros_plan_v3.txt`를 구현 가능한 작업 지시서 수준으로 분해한 문서 허브다.
|
||||
|
||||
## 읽기 순서 (Routing)
|
||||
|
||||
1. 검증 체계부터 확정: [00_validation_system.md](./00_validation_system.md)
|
||||
2. 단일 진실원장(요구사항): [01_requirements_registry.md](./01_requirements_registry.md)
|
||||
3. v2 실행 지시서: [10_phase_v2_execution.md](./10_phase_v2_execution.md)
|
||||
4. v3 실행 지시서: [20_phase_v3_execution.md](./20_phase_v3_execution.md)
|
||||
5. 코드 레벨 작업 지시: [30_code_level_work_orders.md](./30_code_level_work_orders.md)
|
||||
6. 수용 기준/테스트 계획: [40_acceptance_and_test_plan.md](./40_acceptance_and_test_plan.md)
|
||||
7. PM 시나리오/이슈 분류: [50_scenario_matrix_and_issue_taxonomy.md](./50_scenario_matrix_and_issue_taxonomy.md)
|
||||
8. TPM 제어 프로토콜/수용 매트릭스: [50_tpm_control_protocol.md](./50_tpm_control_protocol.md)
|
||||
9. 저장소 강제 설정 체크리스트: [60_repo_enforcement_checklist.md](./60_repo_enforcement_checklist.md)
|
||||
10. 메인 에이전트 아이디에이션 백로그: [70_main_agent_ideation.md](./70_main_agent_ideation.md)
|
||||
|
||||
## 운영 규칙
|
||||
|
||||
- 계획 변경은 반드시 `01_requirements_registry.md`의 ID 정의부터 수정한다.
|
||||
- 구현 문서는 원장 ID만 참조하고 자체 숫자/정책을 새로 만들지 않는다.
|
||||
- 문서 품질 룰셋(`RULE-DOC-001` `RULE-DOC-002` `RULE-DOC-003` `RULE-DOC-004` `RULE-DOC-005` `RULE-DOC-006`)은 [00_validation_system.md](./00_validation_system.md)를 기준으로 적용한다.
|
||||
- 문서 병합 전 아래 검증을 통과해야 한다.
|
||||
|
||||
```bash
|
||||
python3 scripts/validate_ouroboros_docs.py
|
||||
```
|
||||
|
||||
## 원본 계획 문서
|
||||
|
||||
- [v2](/home/agentson/repos/The-Ouroboros/ouroboros_plan_v2.txt)
|
||||
- [v3](/home/agentson/repos/The-Ouroboros/ouroboros_plan_v3.txt)
|
||||
@@ -7,6 +7,32 @@
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-21
|
||||
|
||||
### 거래 상태 확인 중 발견된 버그 (#187)
|
||||
|
||||
- 거래 상태 점검 요청 → SELL 주문(손절/익절)이 Fat Finger에 막혀 전혀 실행 안 됨 발견
|
||||
- **#187 (Critical)**: SELL 주문에서 Fat Finger 오탐 — `order_amount/total_cash > 30%`가 SELL에도 적용되어 대형 포지션 매도 불가
|
||||
- JELD stop-loss -6.20% → 차단, RXT take-profit +46.13% → 차단
|
||||
- 수정: SELL은 `check_circuit_breaker`만 호출, `validate_order`(Fat Finger 포함) 미호출
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-20
|
||||
|
||||
### 지속적 모니터링 및 개선점 도출 (이슈 #178~#182)
|
||||
|
||||
- Dashboard 포함해서 실행하며 간헐적 문제 모니터링 및 개선점 자동 도출 요청
|
||||
- 모니터링 결과 발견된 이슈 목록:
|
||||
- **#178**: uvicorn 미설치 → dashboard 미작동 + 오해의 소지 있는 시작 로그 → uvicorn 설치 완료
|
||||
- **#179 (Critical)**: 잔액 부족 주문 실패 후 매 사이클마다 무한 재시도 (MLECW 20분 이상 반복)
|
||||
- **#180**: 다중 인스턴스 실행 시 Telegram 409 충돌
|
||||
- **#181**: implied_rsi 공식 포화 문제 (change_rate≥12.5% → RSI=100)
|
||||
- **#182 (Critical)**: 보유 종목이 SmartScanner 변동성 필터에 걸려 SELL 신호 미생성 → SELL 체결 0건, 잔고 소진
|
||||
- 요구사항: 모니터링 자동화 및 주기적 개선점 리포트 도출
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-05
|
||||
|
||||
### API 효율화
|
||||
@@ -86,3 +112,246 @@
|
||||
- Plan Consistency (필수), Safety & Constraints, Quality, Workflow 4개 카테고리
|
||||
|
||||
**이슈/PR:** #114
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-16
|
||||
|
||||
### 문서 v2 동기화 (전체 문서 현행화)
|
||||
|
||||
**배경:**
|
||||
- v2 기능 구현 완료 후 문서가 실제 코드 상태와 크게 괴리
|
||||
- 문서에는 54 tests / 4 files로 기록되었으나 실제로는 551 tests / 25 files
|
||||
- v2 핵심 기능(Playbook, Scenario Engine, Dashboard, Telegram Commands, Daily Review, Context System, Backup) 문서화 누락
|
||||
|
||||
**요구사항:**
|
||||
1. `docs/testing.md` — 551 tests / 25 files 반영, 전체 테스트 파일 설명
|
||||
2. `docs/architecture.md` — v2 컴포넌트(Strategy, Context, Dashboard, Decision Logger 등) 추가, Playbook Mode 데이터 플로우, DB 스키마 5개 테이블, v2 환경변수
|
||||
3. `docs/commands.md` — Dashboard 실행 명령어, Telegram 명령어 9종 레퍼런스
|
||||
4. `CLAUDE.md` — Project Structure 트리 확장, 테스트 수 업데이트, `--dashboard` 플래그
|
||||
5. `docs/skills.md` — DB 파일명 `trades.db`로 통일, Dashboard 명령어 추가
|
||||
6. 기존에 유효한 트러블슈팅, 코드 예제 등은 유지
|
||||
|
||||
**구현 결과:**
|
||||
- 6개 문서 파일 업데이트
|
||||
- 이전 시도(2개 커밋)는 기존 내용을 과도하게 삭제하여 폐기, main 기준으로 재작업
|
||||
|
||||
**이슈/PR:** #131, PR #134
|
||||
|
||||
### 해외 스캐너 개선: 랭킹 연동 + 변동성 우선 선별
|
||||
|
||||
**배경:**
|
||||
- `run_overnight` 실운영에서 미국장 동안 거래가 0건 지속
|
||||
- 원인: 해외 시장에서도 국내 랭킹/일봉 API 경로를 사용하던 구조적 불일치
|
||||
|
||||
**요구사항:**
|
||||
1. 해외 시장도 랭킹 API 기반 유니버스 탐색 지원
|
||||
2. 단순 상승률/거래대금 상위가 아니라, **변동성이 큰 종목**을 우선 선별
|
||||
3. 고정 티커 fallback 금지
|
||||
|
||||
**구현 결과:**
|
||||
- `src/broker/overseas.py`
|
||||
- `fetch_overseas_rankings()` 추가 (fluctuation / volume)
|
||||
- 해외 랭킹 API 경로/TR_ID를 설정값으로 오버라이드 가능하게 구현
|
||||
- `src/analysis/smart_scanner.py`
|
||||
- market-aware 스캔(국내/해외 분리)
|
||||
- 해외: 랭킹 API 유니버스 + 변동성 우선 점수(일변동률 vs 장중 고저폭)
|
||||
- 거래대금/거래량 랭킹은 유동성 보정 점수로 활용
|
||||
- 랭킹 실패 시에는 동적 유니버스(active/recent/holdings)만 사용
|
||||
- `src/config.py`
|
||||
- `OVERSEAS_RANKING_*` 설정 추가
|
||||
|
||||
**효과:**
|
||||
- 해외 시장에서 스캐너 후보 0개로 정지되는 상황 완화
|
||||
- 종목 선정 기준이 단순 상승률 중심에서 변동성 중심으로 개선
|
||||
- 고정 티커 없이도 시장 주도 변동 종목 탐지 가능
|
||||
|
||||
### 국내 스캐너/주문수량 정렬: 변동성 우선 + 리스크 타기팅
|
||||
|
||||
**배경:**
|
||||
- 해외만 변동성 우선으로 동작하고, 국내는 RSI/거래량 필터 중심으로 동작해 시장 간 전략 일관성이 낮았음
|
||||
- 매수 수량이 고정 1주라서 변동성 구간별 익스포저 관리가 어려웠음
|
||||
|
||||
**요구사항:**
|
||||
1. 국내 스캐너도 변동성 우선 선별로 해외와 통일
|
||||
2. 고변동 종목일수록 포지션 크기를 줄이는 수량 산식 적용
|
||||
|
||||
**구현 결과:**
|
||||
- `src/analysis/smart_scanner.py`
|
||||
- 국내: `fluctuation ranking + volume ranking bonus` 기반 점수화로 전환
|
||||
- 점수는 `max(abs(change_rate), intraday_range_pct)` 중심으로 계산
|
||||
- 국내 랭킹 응답 스키마 키(`price`, `change_rate`, `volume`) 파싱 보강
|
||||
- `src/main.py`
|
||||
- `_determine_order_quantity()` 추가
|
||||
- BUY 시 변동성 점수 기반 동적 수량 산정 적용
|
||||
- `trading_cycle`, `run_daily_session` 경로 모두 동일 수량 로직 사용
|
||||
- `src/config.py`
|
||||
- `POSITION_SIZING_*` 설정 추가
|
||||
|
||||
**효과:**
|
||||
- 국내/해외 스캐너 기준이 변동성 중심으로 일관화
|
||||
- 고변동 구간에서 자동 익스포저 축소, 저변동 구간에서 과소진입 완화
|
||||
|
||||
## 2026-02-18
|
||||
|
||||
### KIS 해외 랭킹 API 404 에러 수정
|
||||
|
||||
**배경:**
|
||||
- KIS 해외주식 랭킹 API(`fetch_overseas_rankings`)가 모든 거래소에서 HTTP 404를 반환
|
||||
- Smart Scanner가 해외 시장 후보 종목을 찾지 못해 거래가 전혀 실행되지 않음
|
||||
|
||||
**근본 원인:**
|
||||
- TR_ID, API 경로, 거래소 코드가 모두 KIS 공식 문서와 불일치
|
||||
|
||||
**구현 결과:**
|
||||
- `src/config.py`: TR_ID/Path 기본값을 KIS 공식 스펙으로 수정
|
||||
- `src/broker/overseas.py`: 랭킹 API 전용 거래소 코드 매핑 추가 (NASD→NAS, NYSE→NYS, AMEX→AMS), 올바른 API 파라미터 사용
|
||||
- `tests/test_overseas_broker.py`: 19개 단위 테스트 추가
|
||||
|
||||
**효과:**
|
||||
- 해외 시장 랭킹 스캔이 정상 동작하여 Smart Scanner가 후보 종목 탐지 가능
|
||||
|
||||
### Gemini prompt_override 미적용 버그 수정
|
||||
|
||||
**배경:**
|
||||
- `run_overnight` 실행 시 모든 시장에서 Playbook 생성 실패 (`JSONDecodeError`)
|
||||
- defensive playbook으로 폴백되어 모든 종목이 HOLD 처리
|
||||
|
||||
**근본 원인:**
|
||||
- `pre_market_planner.py`가 `market_data["prompt_override"]`에 Playbook 전용 프롬프트를 넣어 `gemini.decide()` 호출
|
||||
- `gemini_client.py`의 `decide()` 메서드가 `prompt_override` 키를 전혀 확인하지 않고 항상 일반 트레이드 결정 프롬프트 생성
|
||||
- Gemini가 Playbook JSON 대신 일반 트레이드 결정을 반환하여 파싱 실패
|
||||
|
||||
**구현 결과:**
|
||||
- `src/brain/gemini_client.py`: `decide()` 메서드에서 `prompt_override` 우선 사용 로직 추가
|
||||
- `tests/test_brain.py`: 3개 테스트 추가 (override 전달, optimization 우회, 미지정 시 기존 동작 유지)
|
||||
|
||||
**이슈/PR:** #143
|
||||
|
||||
### 미국장 거래 미실행 근본 원인 분석 및 수정 (자율 실행 세션)
|
||||
|
||||
**배경:**
|
||||
- 사용자 요청: "미국장 열면 프로그램 돌려서 거래 한 번도 못 한 거 꼭 원인 찾아서 해결해줘"
|
||||
- 프로그램을 미국장 개장(9:30 AM EST) 전부터 실행하여 실시간 로그를 분석
|
||||
|
||||
**발견된 근본 원인 #1: Defensive Playbook — BUY 조건 없음**
|
||||
|
||||
- Gemini free tier (20 RPD) 소진 → `generate_playbook()` 실패 → `_defensive_playbook()` 폴백
|
||||
- Defensive playbook은 `price_change_pct_below: -3.0 → SELL` 조건만 존재, BUY 조건 없음
|
||||
- ScenarioEngine이 항상 HOLD 반환 → 거래 0건
|
||||
|
||||
**수정 #1 (PR #146, Issue #145):**
|
||||
- `src/strategy/pre_market_planner.py`: `_smart_fallback_playbook()` 메서드 추가
|
||||
- 스캐너 signal 기반 BUY 조건 생성: `momentum → volume_ratio_above`, `oversold → rsi_below`
|
||||
- 기존 defensive stop-loss SELL 조건 유지
|
||||
- Gemini 실패 시 defensive → smart fallback으로 전환
|
||||
- 테스트 10개 추가
|
||||
|
||||
**발견된 근본 원인 #2: 가격 API 거래소 코드 불일치 + VTS 잔고 API 오류**
|
||||
|
||||
실제 로그:
|
||||
```
|
||||
Scenario matched for MRNX: BUY (confidence=80) ✓
|
||||
Decision for EWUS (NYSE American): BUY (confidence=80) ✓
|
||||
Skip BUY APLZ (NYSE American): no affordable quantity (cash=0.00, price=0.00) ✗
|
||||
```
|
||||
|
||||
- `get_overseas_price()`: `NASD`/`NYSE`/`AMEX` 전송 → API가 `NAS`/`NYS`/`AMS` 기대 → 빈 응답 → `price=0`
|
||||
- `VTTS3012R` 잔고 API: "ERROR : INPUT INVALID_CHECK_ACNO" → `total_cash=0`
|
||||
- 결과: `_determine_order_quantity()` 가 0 반환 → 주문 건너뜀
|
||||
|
||||
**수정 #2 (PR #148, Issue #147):**
|
||||
- `src/broker/overseas.py`: `_PRICE_EXCHANGE_MAP = _RANKING_EXCHANGE_MAP` 추가, 가격 API에 매핑 적용
|
||||
- `src/config.py`: `PAPER_OVERSEAS_CASH: float = Field(default=50000.0)` — paper 모드 시뮬레이션 잔고
|
||||
- `src/main.py`: 잔고 0일 때 PAPER_OVERSEAS_CASH 폴백, 가격 0일 때 candidate.price 폴백
|
||||
- 테스트 8개 추가
|
||||
|
||||
**효과:**
|
||||
- BUY 결정 → 실제 주문 전송까지의 파이프라인이 완전히 동작
|
||||
- Paper 모드에서 KIS VTS 해외 잔고 API 오류에 관계없이 시뮬레이션 거래 가능
|
||||
|
||||
**이슈/PR:** #145, #146, #147, #148
|
||||
|
||||
### 해외주식 시장가 주문 거부 수정 (Fix #3, 연속 발견)
|
||||
|
||||
**배경:**
|
||||
- Fix #147 적용 후 주문 전송 시작 → KIS VTS가 거부: "지정가만 가능한 상품입니다"
|
||||
|
||||
**근본 원인:**
|
||||
- `trading_cycle()`, `run_daily_session()` 양쪽에서 `send_overseas_order(price=0.0)` 하드코딩
|
||||
- `price=0` → `ORD_DVSN="01"` (시장가) 전송 → KIS VTS 거부
|
||||
- Fix #147에서 이미 `current_price`를 올바르게 계산했으나 주문 시 미사용
|
||||
|
||||
**구현 결과:**
|
||||
- `src/main.py`: 두 곳에서 `price=0.0` → `price=current_price`/`price=stock_data["current_price"]`
|
||||
- `tests/test_main.py`: 회귀 테스트 `test_overseas_buy_order_uses_limit_price` 추가
|
||||
|
||||
**최종 확인 로그:**
|
||||
```
|
||||
Order result: 모의투자 매수주문이 완료 되었습니다. ✓
|
||||
```
|
||||
|
||||
**이슈/PR:** #149, #150
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-23
|
||||
|
||||
### 국내주식 지정가 전환 및 미체결 처리 (#232)
|
||||
|
||||
**배경:**
|
||||
- 해외주식은 #211에서 지정가로 전환했으나 국내주식은 여전히 `price=0` (시장가)
|
||||
- KRX도 지정가 주문 사용 시 동일한 미체결 위험이 존재
|
||||
- 지정가 전환 + 미체결 처리를 함께 구현
|
||||
|
||||
**구현 내용:**
|
||||
|
||||
1. `src/broker/kis_api.py`
|
||||
- `get_domestic_pending_orders()`: 모의 즉시 `[]`, 실전 `TTTC0084R` GET
|
||||
- `cancel_domestic_order()`: 실전 `TTTC0013U` / 모의 `VTTC0013U`, hashkey 필수
|
||||
|
||||
2. `src/main.py`
|
||||
- import `kr_round_down` 추가
|
||||
- `trading_cycle`, `run_daily_session` 국내 주문 `price=0` → 지정가:
|
||||
BUY +0.2% / SELL -0.2%, `kr_round_down` KRX 틱 반올림 적용
|
||||
- `handle_domestic_pending_orders` 함수: BUY→취소+쿨다운, SELL→취소+재주문(-0.4%, 최대1회)
|
||||
- daily/realtime 두 모드에서 domestic pending 체크 호출 추가
|
||||
|
||||
3. 테스트 14개 추가:
|
||||
- `TestGetDomesticPendingOrders` (3), `TestCancelDomesticOrder` (5)
|
||||
- `TestHandleDomesticPendingOrders` (4), `TestDomesticLimitOrderPrice` (2)
|
||||
|
||||
**이슈/PR:** #232, PR #233
|
||||
|
||||
---
|
||||
|
||||
## 2026-02-24
|
||||
|
||||
### 해외잔고 ghost position 수정 — '모의투자 잔고내역이 없습니다' 반복 방지 (#235)
|
||||
|
||||
**배경:**
|
||||
- 모의투자 실행 시 MLECW, KNRX, NBY, SNSE 등 만료/정지된 종목에 대해
|
||||
`모의투자 잔고내역이 없습니다` 오류가 매 사이클 반복됨
|
||||
|
||||
**근본 원인:**
|
||||
1. `ovrs_cblc_qty` (해외잔고수량, 총 보유) vs `ord_psbl_qty` (주문가능수량, 실제 매도 가능)
|
||||
- 기존 코드: `ovrs_cblc_qty` 우선 사용 → 만료 Warrant가 `ovrs_cblc_qty=289456`이지만 실제 `ord_psbl_qty=0`
|
||||
- startup sync / build_overseas_symbol_universe가 이 종목들을 포지션으로 기록
|
||||
2. SELL 실패 시 DB 포지션이 닫히지 않아 다음 사이클에서도 재시도 (무한 반복)
|
||||
|
||||
**구현 내용:**
|
||||
|
||||
1. `src/main.py` — `_extract_held_codes_from_balance`, `_extract_held_qty_from_balance`
|
||||
- 해외 잔고 필드 우선순위 변경: `ord_psbl_qty` → `ovrs_cblc_qty` → `hldg_qty` (fallback 유지)
|
||||
- KIS 공식 문서(VTTS3012R) 기준: `ord_psbl_qty`가 실제 매도 가능 수량
|
||||
|
||||
2. `src/main.py` — `trading_cycle` ghost-close 처리
|
||||
- 해외 SELL이 `잔고내역이 없습니다`로 실패 시 DB 포지션을 `[ghost-close]` SELL로 종료
|
||||
- exchange code 불일치 등 예외 상황에서 무한 반복 방지
|
||||
|
||||
3. 테스트 7개 추가:
|
||||
- `TestExtractHeldQtyFromBalance` 3개: ord_psbl_qty 우선, 0이면 0 반환, fallback
|
||||
- `TestExtractHeldCodesFromBalance` 2개: ord_psbl_qty=0인 종목 제외, fallback
|
||||
- `TestOverseasGhostPositionClose` 2개: ghost-close 로그 확인, 일반 오류 무시
|
||||
|
||||
**이슈/PR:** #235, PR #236
|
||||
|
||||
@@ -34,6 +34,12 @@ python -m src.main --mode=paper
|
||||
```
|
||||
Runs the agent in paper-trading mode (no real orders).
|
||||
|
||||
### Start Trading Agent with Dashboard
|
||||
```bash
|
||||
python -m src.main --mode=paper --dashboard
|
||||
```
|
||||
Runs the agent with FastAPI dashboard on `127.0.0.1:8080` (configurable via `DASHBOARD_HOST`/`DASHBOARD_PORT`).
|
||||
|
||||
### Start Trading Agent (Production)
|
||||
```bash
|
||||
docker compose up -d ouroboros
|
||||
@@ -59,7 +65,7 @@ Analyze the last 30 days of trade logs and generate performance metrics.
|
||||
python -m src.evolution.optimizer --evolve
|
||||
```
|
||||
Triggers the evolution engine to:
|
||||
1. Analyze `trade_logs.db` for failing patterns
|
||||
1. Analyze `trades.db` for failing patterns
|
||||
2. Ask Gemini to generate a new strategy
|
||||
3. Run tests on the new strategy
|
||||
4. Create a PR if tests pass
|
||||
@@ -91,12 +97,12 @@ curl http://localhost:8080/health
|
||||
|
||||
### View Trade Logs
|
||||
```bash
|
||||
sqlite3 data/trade_logs.db "SELECT * FROM trades ORDER BY timestamp DESC LIMIT 20;"
|
||||
sqlite3 data/trades.db "SELECT * FROM trades ORDER BY timestamp DESC LIMIT 20;"
|
||||
```
|
||||
|
||||
### Export Trade History
|
||||
```bash
|
||||
sqlite3 -header -csv data/trade_logs.db "SELECT * FROM trades;" > trades_export.csv
|
||||
sqlite3 -header -csv data/trades.db "SELECT * FROM trades;" > trades_export.csv
|
||||
```
|
||||
|
||||
## Safety Checklist (Pre-Deploy)
|
||||
|
||||
206
docs/testing.md
206
docs/testing.md
@@ -2,51 +2,29 @@
|
||||
|
||||
## Test Structure
|
||||
|
||||
**54 tests** across four files. `asyncio_mode = "auto"` in pyproject.toml — async tests need no special decorator.
|
||||
**551 tests** across **25 files**. `asyncio_mode = "auto"` in pyproject.toml — async tests need no special decorator.
|
||||
|
||||
The `settings` fixture in `conftest.py` provides safe defaults with test credentials and in-memory DB.
|
||||
|
||||
### Test Files
|
||||
|
||||
#### `tests/test_risk.py` (11 tests)
|
||||
- Circuit breaker boundaries
|
||||
- Fat-finger edge cases
|
||||
#### Core Components
|
||||
|
||||
##### `tests/test_risk.py` (14 tests)
|
||||
- Circuit breaker boundaries and exact threshold triggers
|
||||
- Fat-finger edge cases and percentage validation
|
||||
- P&L calculation edge cases
|
||||
- Order validation logic
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
def test_circuit_breaker_exact_threshold(risk_manager):
|
||||
"""Circuit breaker should trip at exactly -3.0%."""
|
||||
with pytest.raises(CircuitBreakerTripped):
|
||||
risk_manager.validate_order(
|
||||
current_pnl_pct=-3.0,
|
||||
order_amount=1000,
|
||||
total_cash=10000
|
||||
)
|
||||
```
|
||||
|
||||
#### `tests/test_broker.py` (6 tests)
|
||||
##### `tests/test_broker.py` (11 tests)
|
||||
- OAuth token lifecycle
|
||||
- Rate limiting enforcement
|
||||
- Hash key generation
|
||||
- Network error handling
|
||||
- SSL context configuration
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
async def test_rate_limiter(broker):
|
||||
"""Rate limiter should delay requests to stay under 10 RPS."""
|
||||
start = time.monotonic()
|
||||
for _ in range(15): # 15 requests
|
||||
await broker._rate_limiter.acquire()
|
||||
elapsed = time.monotonic() - start
|
||||
assert elapsed >= 1.0 # Should take at least 1 second
|
||||
```
|
||||
|
||||
#### `tests/test_brain.py` (18 tests)
|
||||
- Valid JSON parsing
|
||||
- Markdown-wrapped JSON handling
|
||||
##### `tests/test_brain.py` (24 tests)
|
||||
- Valid JSON parsing and markdown-wrapped JSON handling
|
||||
- Malformed JSON fallback
|
||||
- Missing fields handling
|
||||
- Invalid action validation
|
||||
@@ -54,33 +32,143 @@ async def test_rate_limiter(broker):
|
||||
- Empty response handling
|
||||
- Prompt construction for different markets
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
async def test_confidence_below_threshold_forces_hold(brain):
|
||||
"""Decisions below confidence threshold should force HOLD."""
|
||||
decision = brain.parse_response('{"action":"BUY","confidence":70,"rationale":"test"}')
|
||||
assert decision.action == "HOLD"
|
||||
assert decision.confidence == 70
|
||||
```
|
||||
|
||||
#### `tests/test_market_schedule.py` (19 tests)
|
||||
##### `tests/test_market_schedule.py` (24 tests)
|
||||
- Market open/close logic
|
||||
- Timezone handling (UTC, Asia/Seoul, America/New_York, etc.)
|
||||
- DST (Daylight Saving Time) transitions
|
||||
- Weekend handling
|
||||
- Lunch break logic
|
||||
- Weekend handling and lunch break logic
|
||||
- Multiple market filtering
|
||||
- Next market open calculation
|
||||
|
||||
**Example:**
|
||||
```python
|
||||
def test_is_market_open_during_trading_hours():
|
||||
"""Market should be open during regular trading hours."""
|
||||
# KRX: 9:00-15:30 KST, no lunch break
|
||||
market = MARKETS["KR"]
|
||||
trading_time = datetime(2026, 2, 3, 10, 0, tzinfo=ZoneInfo("Asia/Seoul")) # Monday 10:00
|
||||
assert is_market_open(market, trading_time) is True
|
||||
```
|
||||
##### `tests/test_db.py` (3 tests)
|
||||
- Database initialization and table creation
|
||||
- Trade logging with all fields (market, exchange_code, decision_id)
|
||||
- Query and retrieval operations
|
||||
|
||||
##### `tests/test_main.py` (37 tests)
|
||||
- Trading loop orchestration
|
||||
- Market iteration and stock processing
|
||||
- Dashboard integration (`--dashboard` flag)
|
||||
- Telegram command handler wiring
|
||||
- Error handling and graceful shutdown
|
||||
|
||||
#### Strategy & Playbook (v2)
|
||||
|
||||
##### `tests/test_pre_market_planner.py` (37 tests)
|
||||
- Pre-market playbook generation
|
||||
- Gemini API integration for scenario creation
|
||||
- Timeout handling and defensive playbook fallback
|
||||
- Multi-market playbook generation
|
||||
|
||||
##### `tests/test_scenario_engine.py` (44 tests)
|
||||
- Scenario matching against live market data
|
||||
- Confidence scoring and threshold filtering
|
||||
- Multiple scenario type handling
|
||||
- Edge cases (no match, partial match, expired scenarios)
|
||||
|
||||
##### `tests/test_playbook_store.py` (23 tests)
|
||||
- Playbook persistence to SQLite
|
||||
- Date-based retrieval and market filtering
|
||||
- Playbook status management (generated, active, expired)
|
||||
- JSON serialization/deserialization
|
||||
|
||||
##### `tests/test_strategy_models.py` (33 tests)
|
||||
- Pydantic model validation for scenarios, playbooks, decisions
|
||||
- Field constraints and default values
|
||||
- Serialization round-trips
|
||||
|
||||
#### Analysis & Scanning
|
||||
|
||||
##### `tests/test_volatility.py` (24 tests)
|
||||
- ATR and RSI calculation accuracy
|
||||
- Volume surge ratio computation
|
||||
- Momentum scoring
|
||||
- Breakout/breakdown pattern detection
|
||||
- Market scanner watchlist management
|
||||
|
||||
##### `tests/test_smart_scanner.py` (13 tests)
|
||||
- Python-first filtering pipeline
|
||||
- RSI and volume ratio filter logic
|
||||
- Candidate scoring and ranking
|
||||
- Fallback to static watchlist
|
||||
|
||||
#### Context & Memory
|
||||
|
||||
##### `tests/test_context.py` (18 tests)
|
||||
- L1-L7 layer storage and retrieval
|
||||
- Context key-value CRUD operations
|
||||
- Timeframe-based queries
|
||||
- Layer metadata management
|
||||
|
||||
##### `tests/test_context_scheduler.py` (5 tests)
|
||||
- Periodic context aggregation scheduling
|
||||
- Layer summarization triggers
|
||||
|
||||
#### Evolution & Review
|
||||
|
||||
##### `tests/test_evolution.py` (24 tests)
|
||||
- Strategy optimization loop
|
||||
- High-confidence losing trade analysis
|
||||
- Generated strategy validation
|
||||
|
||||
##### `tests/test_daily_review.py` (10 tests)
|
||||
- End-of-day review generation
|
||||
- Trade performance summarization
|
||||
- Context layer (L6_DAILY) integration
|
||||
|
||||
##### `tests/test_scorecard.py` (3 tests)
|
||||
- Daily scorecard metrics calculation
|
||||
- Win rate, P&L, confidence tracking
|
||||
|
||||
#### Notifications & Commands
|
||||
|
||||
##### `tests/test_telegram.py` (25 tests)
|
||||
- Message sending and formatting
|
||||
- Rate limiting (leaky bucket)
|
||||
- Error handling (network timeout, invalid token)
|
||||
- Auto-disable on missing credentials
|
||||
- Notification types (trade, circuit breaker, fat-finger, market events)
|
||||
|
||||
##### `tests/test_telegram_commands.py` (31 tests)
|
||||
- 9 command handlers (/help, /status, /positions, /report, /scenarios, /review, /dashboard, /stop, /resume)
|
||||
- Long polling and command dispatch
|
||||
- Authorization filtering by chat_id
|
||||
- Command response formatting
|
||||
|
||||
#### Dashboard
|
||||
|
||||
##### `tests/test_dashboard.py` (14 tests)
|
||||
- FastAPI endpoint responses (8 API routes)
|
||||
- Status, playbook, scorecard, performance, context, decisions, scenarios
|
||||
- Query parameter handling (market, date, limit)
|
||||
|
||||
#### Performance & Quality
|
||||
|
||||
##### `tests/test_token_efficiency.py` (34 tests)
|
||||
- Gemini token usage optimization
|
||||
- Prompt size reduction verification
|
||||
- Cache effectiveness
|
||||
|
||||
##### `tests/test_latency_control.py` (30 tests)
|
||||
- API call latency measurement
|
||||
- Rate limiter timing accuracy
|
||||
- Async operation overhead
|
||||
|
||||
##### `tests/test_decision_logger.py` (9 tests)
|
||||
- Decision audit trail completeness
|
||||
- Context snapshot capture
|
||||
- Outcome tracking (P&L, accuracy)
|
||||
|
||||
##### `tests/test_data_integration.py` (38 tests)
|
||||
- External data source integration
|
||||
- News API, market data, economic calendar
|
||||
- Error handling for API failures
|
||||
|
||||
##### `tests/test_backup.py` (23 tests)
|
||||
- Backup scheduler and execution
|
||||
- Cloud storage (S3) upload
|
||||
- Health monitoring
|
||||
- Data export functionality
|
||||
|
||||
## Coverage Requirements
|
||||
|
||||
@@ -91,20 +179,6 @@ Check coverage:
|
||||
pytest -v --cov=src --cov-report=term-missing
|
||||
```
|
||||
|
||||
Expected output:
|
||||
```
|
||||
Name Stmts Miss Cover Missing
|
||||
-----------------------------------------------------------
|
||||
src/brain/gemini_client.py 85 5 94% 165-169
|
||||
src/broker/kis_api.py 120 12 90% ...
|
||||
src/core/risk_manager.py 35 2 94% ...
|
||||
src/db.py 25 1 96% ...
|
||||
src/main.py 150 80 47% (excluded from CI)
|
||||
src/markets/schedule.py 95 3 97% ...
|
||||
-----------------------------------------------------------
|
||||
TOTAL 510 103 80%
|
||||
```
|
||||
|
||||
**Note:** `main.py` has lower coverage as it contains the main loop which is tested via integration/manual testing.
|
||||
|
||||
## Test Configuration
|
||||
|
||||
@@ -5,14 +5,76 @@
|
||||
**CRITICAL: All code changes MUST follow this workflow. Direct pushes to `main` are ABSOLUTELY PROHIBITED.**
|
||||
|
||||
1. **Create Gitea Issue First** — All features, bug fixes, and policy changes require a Gitea issue before any code is written
|
||||
2. **Create Feature Branch** — Branch from `main` using format `feature/issue-{N}-{short-description}`
|
||||
- After creating the branch, run `git pull origin main` and rebase to ensure the branch is up to date
|
||||
3. **Implement Changes** — Write code, tests, and documentation on the feature branch
|
||||
4. **Create Pull Request** — Submit PR to `main` branch referencing the issue number
|
||||
5. **Review & Merge** — After approval, merge via PR (squash or merge commit)
|
||||
2. **Create Program Feature Branch** — Branch from `main` for the whole development stream
|
||||
- Format: `feature/{epic-or-stream-name}`
|
||||
3. **Create Ticket Temp Branch** — Branch from the program feature branch per ticket
|
||||
- Format: `feature/issue-{N}-{short-description}`
|
||||
4. **Implement Per Ticket** — Write code, tests, and documentation on the ticket temp branch
|
||||
5. **Create Pull Request to Program Feature Branch** — `feature/issue-N-* -> feature/{stream}`
|
||||
6. **Review/Verify and Merge into Program Feature Branch** — user approval not required
|
||||
7. **Final Integration PR to main** — Only after all ticket stages complete and explicit user approval
|
||||
|
||||
**Never commit directly to `main`.** This policy applies to all changes, no exceptions.
|
||||
|
||||
## Branch Strategy (Mandatory)
|
||||
|
||||
- Team operation default branch is the **program feature branch**, not `main`.
|
||||
- Ticket-level development happens only on **ticket temp branches** cut from the program feature branch.
|
||||
- Ticket PR merges into program feature branch are allowed after verifier approval.
|
||||
- Until final user sign-off, `main` merge is prohibited.
|
||||
- 각 에이전트는 주요 의사결정(리뷰 지적, 수정 방향, 검증 승인)마다 PR 코멘트를 적극 작성해 의사결정 과정을 남긴다.
|
||||
|
||||
## Gitea CLI Formatting Troubleshooting
|
||||
|
||||
Issue/PR 본문 작성 시 줄바꿈(`\n`)이 문자열 그대로 저장되는 문제가 반복될 수 있다. 원인은 `-d "...\n..."` 형태에서 쉘/CLI가 이스케이프를 실제 개행으로 해석하지 않기 때문이다.
|
||||
|
||||
권장 패턴:
|
||||
|
||||
```bash
|
||||
ISSUE_BODY=$(cat <<'EOF'
|
||||
## Summary
|
||||
- 변경 내용 1
|
||||
- 변경 내용 2
|
||||
|
||||
## Why
|
||||
- 배경 1
|
||||
- 배경 2
|
||||
|
||||
## Scope
|
||||
- 포함 범위
|
||||
- 제외 범위
|
||||
EOF
|
||||
)
|
||||
|
||||
tea issues create \
|
||||
-t "docs: 제목" \
|
||||
-d "$ISSUE_BODY"
|
||||
```
|
||||
|
||||
PR도 동일하게 적용:
|
||||
|
||||
```bash
|
||||
PR_BODY=$(cat <<'EOF'
|
||||
## Summary
|
||||
- ...
|
||||
|
||||
## Validation
|
||||
- python3 scripts/validate_ouroboros_docs.py
|
||||
EOF
|
||||
)
|
||||
|
||||
tea pr create \
|
||||
--base main \
|
||||
--head feature/issue-N-something \
|
||||
--title "docs: ... (#N)" \
|
||||
--description "$PR_BODY"
|
||||
```
|
||||
|
||||
금지 패턴:
|
||||
|
||||
- `-d "line1\nline2"` (웹 UI에 `\n` 문자 그대로 노출될 수 있음)
|
||||
- 본문에 백틱/괄호를 인라인로 넣고 적절한 quoting 없이 즉시 실행
|
||||
|
||||
## Agent Workflow
|
||||
|
||||
**Modern AI development leverages specialized agents for concurrent, efficient task execution.**
|
||||
|
||||
165
ouroboros_plan_v2.txt
Normal file
165
ouroboros_plan_v2.txt
Normal file
@@ -0,0 +1,165 @@
|
||||
[The Ouroboros] 운영/전략 계획서 v2
|
||||
작성일: 2026-02-26
|
||||
상태: 코드 구현 전 설계안(전략/검증 중심)
|
||||
|
||||
==================================================
|
||||
0) 목적
|
||||
==================================================
|
||||
고정 익절(+3%) 중심 로직에서 벗어나, 다음을 만족하는 실전형 청산 체계로 전환한다.
|
||||
- 수익 구간 보호 (손익 역전 방지)
|
||||
- 변동성 적응형 청산
|
||||
- 예측 모델의 확률 신호를 보조적으로 결합
|
||||
- 과적합 방지를 최우선으로 한 검증 프레임워크
|
||||
|
||||
==================================================
|
||||
1) 핵심 설계 원칙
|
||||
==================================================
|
||||
1. 예측 성능과 전략 성능을 분리 평가
|
||||
- 예측 성능: PR-AUC, Brier, Calibration
|
||||
- 전략 성능: Net PnL, Sharpe, MDD, Profit Factor, Turnover
|
||||
|
||||
2. 시계열 검증 규율 강제
|
||||
- Walk-forward 분할
|
||||
- Purge/Embargo 적용
|
||||
- Random split 금지
|
||||
|
||||
3. 실거래 리얼리즘 우선
|
||||
- 거래비용/슬리피지/체결실패 반영 없는 백테스트 결과는 채택 금지
|
||||
|
||||
==================================================
|
||||
2) 매도 상태기계 (State Machine)
|
||||
==================================================
|
||||
상태:
|
||||
- HOLDING
|
||||
- BE_LOCK
|
||||
- ARMED
|
||||
- EXITED
|
||||
|
||||
정의:
|
||||
- HOLDING: 일반 보유 상태
|
||||
- BE_LOCK: 일정 수익권 진입 시 손절선을 본전(또는 비용 반영 본전)으로 상향
|
||||
- ARMED: 추세 추적(피크 추적) 기반 청산 준비 상태
|
||||
- EXITED: 청산 완료
|
||||
|
||||
전이 규칙(개념):
|
||||
- HOLDING -> BE_LOCK: unrealized_pnl_pct >= be_arm_pct
|
||||
- BE_LOCK -> ARMED: unrealized_pnl_pct >= arm_pct
|
||||
- ARMED -> EXITED: 아래 조건 중 하나 충족
|
||||
1) hard stop 도달
|
||||
2) trailing stop 도달 (peak 대비 하락)
|
||||
3) 모델 하락확률 + 유동성 약화 조건 충족
|
||||
|
||||
상태 전이 구현 규칙(필수):
|
||||
- 매 틱/바 평가 시 "현재 조건이 허용하는 최상위 상태"로 즉시 승격
|
||||
- 순차 if-else로 인한 전이 누락 금지 (예: 갭으로 BE_LOCK/ARMED 동시 충족)
|
||||
- EXITED 조건은 모든 상태보다 우선 평가
|
||||
- 상태 전이 로그에 이전/이후 상태, 전이 사유, 기준 가격/수익률 기록
|
||||
|
||||
==================================================
|
||||
3) 청산 로직 구성 (4중 안전장치)
|
||||
==================================================
|
||||
A. Hard Stop
|
||||
- 계좌/포지션 보호용 절대 하한
|
||||
- 항상 활성화
|
||||
|
||||
B. Dynamic Stop (Break-even Lock)
|
||||
- BE_LOCK 진입 시 손절선을 본전 이상으로 상향
|
||||
- "수익 포지션이 손실로 반전"되는 구조적 리스크 차단
|
||||
|
||||
C. ATR 기반 Trailing Stop
|
||||
- 고정 trail_pct 대신 변동성 적응형 사용
|
||||
- 예시: ExitPrice = PeakPrice - (k * ATR)
|
||||
|
||||
D. 모델 확률 신호
|
||||
- 하락전환 확률(pred_prob)이 임계값 이상일 때 청산 가중
|
||||
- 단독 트리거가 아닌 trailing/리스크 룰 보조 트리거로 사용
|
||||
|
||||
==================================================
|
||||
4) 라벨링 체계 (Triple Barrier)
|
||||
==================================================
|
||||
목표:
|
||||
고정 H-window 라벨 편향을 줄이고, 금융 시계열의 경로 의존성을 반영한다.
|
||||
|
||||
라벨 정의:
|
||||
- Upper barrier (익절)
|
||||
- Lower barrier (손절)
|
||||
- Time barrier (만기)
|
||||
|
||||
규칙:
|
||||
- 세 장벽 중 "먼저 터치한 장벽"으로 라벨 확정
|
||||
- 라벨은 entry 시점 이후 데이터만 사용해 생성
|
||||
- 피처 생성 구간과 라벨 구간을 엄격 분리해 look-ahead bias 방지
|
||||
|
||||
==================================================
|
||||
5) 검증 프레임워크
|
||||
==================================================
|
||||
5.1 분할 방식
|
||||
- Fold 단위 Walk-forward
|
||||
- Purge/Embargo로 인접 샘플 누수 차단
|
||||
|
||||
5.2 비교군(Baseline) 구조
|
||||
- B0: 기존 고정 손절/익절
|
||||
- B1: 모델 없는 trailing only
|
||||
- M1: trailing + 모델 확률 결합
|
||||
|
||||
5.3 채택 기준
|
||||
- M1이 B0/B1 대비 OOS(Out-of-sample)에서 일관된 우위
|
||||
- 단일 구간 성과가 아닌 fold 분포 기준으로 판단
|
||||
|
||||
==================================================
|
||||
6) 실행 아키텍처 원칙
|
||||
==================================================
|
||||
1. 저지연 실행 경로
|
||||
- 실시간 청산 판단은 경량 엔진(룰/GBDT) 담당
|
||||
- LLM은 레짐 판단/비중 조절/상위 의사결정 보조
|
||||
|
||||
2. 체결 현실 반영
|
||||
- 세션 유동성에 따른 슬리피지 페널티 차등 적용
|
||||
- 미체결/재호가/재접수 시나리오를 백테스트에 반영
|
||||
|
||||
==================================================
|
||||
7) 운영 리스크 관리
|
||||
==================================================
|
||||
승격 단계:
|
||||
- Offline backtest -> Paper shadow -> Small-capital live
|
||||
|
||||
중단(Kill Switch):
|
||||
- rolling Sharpe 악화
|
||||
- MDD 한도 초과
|
||||
- 체결 실패율/슬리피지 급등
|
||||
|
||||
Kill Switch 실행 순서(원자적):
|
||||
1) 모든 신규 주문 차단 플래그 ON
|
||||
2) 모든 미체결 주문 취소 요청
|
||||
3) 취소 결과 재조회(실패 건 재시도)
|
||||
4) 포지션 리스크 재계산 후 강제 축소/청산 판단
|
||||
5) 상태/로그 스냅샷 저장 및 운영 경보 발송
|
||||
|
||||
원칙:
|
||||
- 모델이 실패해도 hard stop 기반 보수 모드로 즉시 디그레이드 가능해야 함
|
||||
|
||||
==================================================
|
||||
8) 고정 파라미터(초기안)
|
||||
==================================================
|
||||
(15분봉 단기 스윙 기준 제안)
|
||||
- KR: be_arm_pct=1.2, arm_pct=2.8, atr_period=14, atr_multiplier_k=2.2,
|
||||
time_barrier_bars=26, p_thresh=0.62
|
||||
- US: be_arm_pct=1.0, arm_pct=2.4, atr_period=14, atr_multiplier_k=2.0,
|
||||
time_barrier_bars=32, p_thresh=0.60
|
||||
|
||||
민감도 범위(초기 탐색):
|
||||
- be_arm_pct: KR 0.9~1.8 / US 0.7~1.5
|
||||
- arm_pct: KR 2.2~3.8 / US 1.8~3.2
|
||||
- atr_multiplier_k: KR 1.8~2.8 / US 1.6~2.4
|
||||
- time_barrier_bars: KR 20~36 / US 24~48
|
||||
- p_thresh: 0.55~0.70
|
||||
|
||||
==================================================
|
||||
9) 구현 전 체크리스트
|
||||
==================================================
|
||||
- 파라미터 튜닝 시 nested leakage 방지
|
||||
- 수수료/세금/슬리피지 전부 반영 여부 확인
|
||||
- 세션/타임존/DST 처리 일관성 확인
|
||||
- 모델 버전/설정 해시/실험 로그 재현성 확보
|
||||
|
||||
끝.
|
||||
185
ouroboros_plan_v3.txt
Normal file
185
ouroboros_plan_v3.txt
Normal file
@@ -0,0 +1,185 @@
|
||||
[The Ouroboros] 운영확장 v3
|
||||
작성일: 2026-02-26
|
||||
상태: v2 확장판 / 야간·프리마켓 포함 글로벌 세션 운영 설계안
|
||||
|
||||
==================================================
|
||||
0) 목적
|
||||
==================================================
|
||||
"24시간 무중단 자산 증식" 비전을 위해 거래 세션 범위를 KR 정규장 중심에서
|
||||
NXT/미국 확장 세션까지 확대한다. 핵심은 다음 3가지다.
|
||||
- 세션 인지형 의사결정
|
||||
- 세션별 리스크/비용 차등 적용
|
||||
- 시간장벽의 현실적 재정의
|
||||
|
||||
==================================================
|
||||
1) 세션 모델 (Session-aware Engine)
|
||||
==================================================
|
||||
KR 세션:
|
||||
- NXT_PRE : 08:00 ~ 08:50 (KST)
|
||||
- KRX_REG : 09:00 ~ 15:30 (KST)
|
||||
- NXT_AFTER : 15:30 ~ 20:00 (KST)
|
||||
|
||||
US 세션(KST 관점 운영):
|
||||
- US_DAY : 10:00 ~ 18:00
|
||||
- US_PRE : 18:00 ~ 23:30
|
||||
- US_REG : 23:30 ~ 06:00
|
||||
- US_AFTER : 06:00 ~ 07:00
|
||||
|
||||
원칙:
|
||||
- 모든 피처/신호/주문/로그에 session_id를 명시적으로 포함
|
||||
- 세션 전환 시 상태 업데이트 및 리스크 파라미터 재로딩
|
||||
|
||||
==================================================
|
||||
2) 캘린더/휴장/DST 고정 소스
|
||||
==================================================
|
||||
KR:
|
||||
- 기본: pykrx 또는 FinanceDataReader (KRX 기준)
|
||||
- 예외: 연휴/임시 휴장/NXT 특이 운영은 KIS 공지 기반 보완
|
||||
|
||||
US:
|
||||
- pandas_market_calendars (NYSE 기준)
|
||||
- 2026 DST:
|
||||
- 시작: 2026-03-08
|
||||
- 종료: 2026-11-01
|
||||
|
||||
정합성 규칙:
|
||||
- 스케줄 충돌 시 "거래소 캘린더 > 로컬 추정" 우선
|
||||
- 시장 상태(open/close/half-day)는 주문 엔진 진입 전 최종 검증
|
||||
|
||||
KIS 점검시간 회피 정책(필수):
|
||||
- 브로커 점검/장애 블랙아웃 윈도우는 운영 설정으로 별도 관리
|
||||
- 블랙아웃 구간에는 신규 주문 전송 금지, 취소/정정도 정책적으로 제한
|
||||
- 신호는 유지하되 주문 의도는 Queue에 적재, 복구 후 유효성 재검증 뒤 실행
|
||||
- 복구 직후에는 잔고/미체결/체결내역을 우선 동기화한 뒤 주문 엔진 재가동
|
||||
|
||||
==================================================
|
||||
3) 시간장벽 재정의
|
||||
==================================================
|
||||
v2의 time_barrier_bars 고정값을 v3에서 다음으로 확장:
|
||||
- max_holding_minutes (시장별 기본 만기)
|
||||
- 봉 개수는 세션 길이/간격으로 동적 계산
|
||||
|
||||
기본값:
|
||||
- KR: max_holding_minutes = 2160 (약 3거래일, NXT 포함 관점)
|
||||
- US: max_holding_minutes = 4320 (약 72시간)
|
||||
|
||||
운영 주의:
|
||||
- 고정 "일중 청산"보다 "포지션 유지 시간" 기준 만기 적용
|
||||
- 세션 종료 강제청산 규칙과 충돌 시 우선순위 명시 필요
|
||||
|
||||
==================================================
|
||||
4) 세션별 비용/슬리피지 모델 (보수적)
|
||||
==================================================
|
||||
KRX_REG:
|
||||
- 슬리피지: 2~3틱 (약 0.05%)
|
||||
- 수수료+세금: 0.20% ~ 0.23%
|
||||
|
||||
NXT_AFTER:
|
||||
- 슬리피지: 5~8틱 (약 0.15%)
|
||||
- 수수료+세금: 0.20% ~ 0.23%
|
||||
|
||||
US_REG:
|
||||
- 슬리피지: 2~3틱 (약 0.03%)
|
||||
- 수수료+기타 비용: 0.07% ~ 0.15%
|
||||
|
||||
US_PRE / US_DAY:
|
||||
- 슬리피지: 10틱+ (약 0.3% ~ 0.5%)
|
||||
- 수수료+기타 비용: 0.07% ~ 0.15%
|
||||
|
||||
원칙:
|
||||
- 백테스트 체결가는 세션별 보수 가정 적용
|
||||
- 저유동 세션은 자동 보수 모드(p_thresh 상향, atr_k 상향) 권장
|
||||
- 백테스트 체결가 기본은 "불리한 방향 체결" 가정 (단순 close 체결 금지)
|
||||
|
||||
세션별 주문 유형 강제(필수):
|
||||
- KRX_REG / US_REG: 지정가 우선, 시장가 제한적 허용
|
||||
- NXT_AFTER / US_PRE / US_DAY / US_AFTER: 시장가 금지
|
||||
- 저유동 세션은 최우선 지정가 또는 IOC/FOK(가격 보호 한도 포함)만 허용
|
||||
- 주문 실패 시 재호가 간격/횟수 상한을 두고, 초과 시 주문 철회
|
||||
|
||||
==================================================
|
||||
5) 포지션/잔고 통합 규칙 (KIS 특성 반영)
|
||||
==================================================
|
||||
문제:
|
||||
- KRX/NXT 잔고 조회가 venue 단위로 분리되거나 반영 지연 가능
|
||||
|
||||
규칙:
|
||||
- 종목 식별은 동일 종목코드(또는 ISIN) 기준 통합 포지션으로 관리
|
||||
- 다만 주문 가능 수량은 venue별 API 응답을 최종 기준으로 사용
|
||||
- 매도 가능 수량 검증은 주문 직전 재조회로 확정
|
||||
|
||||
==================================================
|
||||
6) 마감 강제청산/오버나잇 예외 규칙
|
||||
==================================================
|
||||
기본 원칙:
|
||||
- 모든 포지션에 대해 세션 종료 10분 전 REDUCE_ALL 검토
|
||||
|
||||
오버나잇 예외 허용 (모두 충족 시):
|
||||
1) ARMED 상태 (예: +2.8% 이상)
|
||||
2) 모델 하락확률 < 0.30
|
||||
3) 포트폴리오 현금 비중 >= 50%
|
||||
|
||||
갭 리스크 통제:
|
||||
- 다음 개장 시 hard stop를 시가 기준으로 재산정
|
||||
- 조건 위반 시 즉시 청산 우선
|
||||
|
||||
Kill Switch 연동:
|
||||
- MDD/실패율 임계치 초과 시 "미체결 전량 취소 -> 신규 주문 차단 -> 리스크 축소" 순서 강제
|
||||
|
||||
==================================================
|
||||
7) 데이터 저장/용량 정책
|
||||
==================================================
|
||||
핵심 테이블(계획):
|
||||
- feature_snapshots
|
||||
- position_states
|
||||
- model_predictions
|
||||
|
||||
저장 규칙:
|
||||
- feature_hash 기반 중복 제거
|
||||
- 가격 변화가 작아도 session_id 변경 시 강제 스냅샷
|
||||
- 월 단위 DB 로테이션 권장 (예: trading_YYYY_MM.db)
|
||||
|
||||
==================================================
|
||||
8) 환율/정산 리스크 정책 (US 필수)
|
||||
==================================================
|
||||
원칙:
|
||||
- USD 노출은 전략 손익과 별도로 환율 손익을 분리 추적
|
||||
- 원화 주문 서비스 사용 시 가환율 체결/익일 정산 리스크를 예수금 규칙에 반영
|
||||
|
||||
운영 규칙:
|
||||
- 환전 시점 정책(사전 환전/수시 환전)을 고정하고 로그에 기록
|
||||
- 최소 USD 버퍼와 KRW 버퍼를 각각 설정해 주문 가능금 부족 리스크 완화
|
||||
- 환율 급변 구간에는 포지션 한도 축소 또는 신규 진입 제한
|
||||
|
||||
==================================================
|
||||
9) v3 실험 매트릭스 (우선 3선)
|
||||
==================================================
|
||||
EXP-KR-01:
|
||||
- 시장: KR
|
||||
- 포커스: NXT 야간 특화
|
||||
- 제안: time barrier 확장(예: 48 bars 상당), p_thresh 상향(0.65)
|
||||
|
||||
EXP-US-01:
|
||||
- 시장: US
|
||||
- 포커스: 21h 준연속 운용
|
||||
- 제안: time barrier 확장(예: 80 bars 상당), atr_k 상향(2.5)
|
||||
|
||||
EXP-HYB-01:
|
||||
- 시장: Global
|
||||
- 포커스: KR 낮 + US 밤 연계
|
||||
- 제안: 레짐 기반 자산배분 자동조절
|
||||
|
||||
==================================================
|
||||
10) 코드 착수 전 최종 확정 체크
|
||||
==================================================
|
||||
1) 세션별 공식 캘린더 소스/우선순위
|
||||
2) 세션별 슬리피지/비용 테이블 수치
|
||||
3) 시장별 max_holding_minutes
|
||||
4) 마감 강제청산 예외 조건 임계값
|
||||
5) 블랙아웃(점검/장애) 시간대와 주문 큐 처리 규칙
|
||||
6) 세션별 허용 주문 유형(시장가 허용 범위 포함)
|
||||
7) 환전/정산 정책 및 통화 버퍼 임계값
|
||||
|
||||
모든 항목 확정 후 Step 1 구현(코드)로 이동.
|
||||
|
||||
끝.
|
||||
@@ -9,6 +9,8 @@ dependencies = [
|
||||
"pydantic-settings>=2.1,<3",
|
||||
"google-genai>=1.0,<2",
|
||||
"scipy>=1.11,<2",
|
||||
"fastapi>=0.110,<1",
|
||||
"uvicorn>=0.29,<1",
|
||||
]
|
||||
|
||||
[project.optional-dependencies]
|
||||
|
||||
54
scripts/morning_report.sh
Executable file
54
scripts/morning_report.sh
Executable file
@@ -0,0 +1,54 @@
|
||||
#!/usr/bin/env bash
|
||||
# Morning summary for overnight run logs.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
LOG_DIR="${LOG_DIR:-data/overnight}"
|
||||
|
||||
if [ ! -d "$LOG_DIR" ]; then
|
||||
echo "로그 디렉터리가 없습니다: $LOG_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
latest_run="$(ls -1t "$LOG_DIR"/run_*.log 2>/dev/null | head -n 1 || true)"
|
||||
latest_watchdog="$(ls -1t "$LOG_DIR"/watchdog_*.log 2>/dev/null | head -n 1 || true)"
|
||||
|
||||
if [ -z "$latest_run" ]; then
|
||||
echo "run 로그가 없습니다: $LOG_DIR/run_*.log"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Overnight report"
|
||||
echo "- run log: $latest_run"
|
||||
if [ -n "$latest_watchdog" ]; then
|
||||
echo "- watchdog log: $latest_watchdog"
|
||||
fi
|
||||
|
||||
start_line="$(head -n 1 "$latest_run" || true)"
|
||||
end_line="$(tail -n 1 "$latest_run" || true)"
|
||||
|
||||
info_count="$(rg -c '"level": "INFO"' "$latest_run" || true)"
|
||||
warn_count="$(rg -c '"level": "WARNING"' "$latest_run" || true)"
|
||||
error_count="$(rg -c '"level": "ERROR"' "$latest_run" || true)"
|
||||
critical_count="$(rg -c '"level": "CRITICAL"' "$latest_run" || true)"
|
||||
traceback_count="$(rg -c 'Traceback' "$latest_run" || true)"
|
||||
|
||||
echo "- start: ${start_line:-N/A}"
|
||||
echo "- end: ${end_line:-N/A}"
|
||||
echo "- INFO: ${info_count:-0}"
|
||||
echo "- WARNING: ${warn_count:-0}"
|
||||
echo "- ERROR: ${error_count:-0}"
|
||||
echo "- CRITICAL: ${critical_count:-0}"
|
||||
echo "- Traceback: ${traceback_count:-0}"
|
||||
|
||||
if [ -n "$latest_watchdog" ]; then
|
||||
watchdog_errors="$(rg -c '\[ERROR\]' "$latest_watchdog" || true)"
|
||||
echo "- watchdog ERROR: ${watchdog_errors:-0}"
|
||||
echo ""
|
||||
echo "최근 watchdog 로그:"
|
||||
tail -n 5 "$latest_watchdog" || true
|
||||
fi
|
||||
|
||||
echo ""
|
||||
echo "최근 앱 로그:"
|
||||
tail -n 20 "$latest_run" || true
|
||||
87
scripts/run_overnight.sh
Executable file
87
scripts/run_overnight.sh
Executable file
@@ -0,0 +1,87 @@
|
||||
#!/usr/bin/env bash
|
||||
# Start The Ouroboros overnight with logs and watchdog.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
LOG_DIR="${LOG_DIR:-data/overnight}"
|
||||
CHECK_INTERVAL="${CHECK_INTERVAL:-30}"
|
||||
TMUX_AUTO="${TMUX_AUTO:-true}"
|
||||
TMUX_ATTACH="${TMUX_ATTACH:-true}"
|
||||
TMUX_SESSION_PREFIX="${TMUX_SESSION_PREFIX:-ouroboros_overnight}"
|
||||
|
||||
if [ -z "${APP_CMD:-}" ]; then
|
||||
if [ -x ".venv/bin/python" ]; then
|
||||
PYTHON_BIN=".venv/bin/python"
|
||||
elif command -v python3 >/dev/null 2>&1; then
|
||||
PYTHON_BIN="python3"
|
||||
elif command -v python >/dev/null 2>&1; then
|
||||
PYTHON_BIN="python"
|
||||
else
|
||||
echo ".venv/bin/python 또는 python3/python 실행 파일을 찾을 수 없습니다."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dashboard_port="${DASHBOARD_PORT:-8080}"
|
||||
|
||||
APP_CMD="DASHBOARD_PORT=$dashboard_port $PYTHON_BIN -m src.main --mode=live --dashboard"
|
||||
fi
|
||||
|
||||
mkdir -p "$LOG_DIR"
|
||||
|
||||
timestamp="$(date +"%Y%m%d_%H%M%S")"
|
||||
RUN_LOG="$LOG_DIR/run_${timestamp}.log"
|
||||
WATCHDOG_LOG="$LOG_DIR/watchdog_${timestamp}.log"
|
||||
PID_FILE="$LOG_DIR/app.pid"
|
||||
WATCHDOG_PID_FILE="$LOG_DIR/watchdog.pid"
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
old_pid="$(cat "$PID_FILE" || true)"
|
||||
if [ -n "$old_pid" ] && kill -0 "$old_pid" 2>/dev/null; then
|
||||
echo "앱이 이미 실행 중입니다. pid=$old_pid"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "[$(date -u +"%Y-%m-%dT%H:%M:%SZ")] starting: $APP_CMD" | tee -a "$RUN_LOG"
|
||||
nohup bash -lc "$APP_CMD" >>"$RUN_LOG" 2>&1 &
|
||||
app_pid=$!
|
||||
echo "$app_pid" > "$PID_FILE"
|
||||
|
||||
echo "[$(date -u +"%Y-%m-%dT%H:%M:%SZ")] app pid=$app_pid" | tee -a "$RUN_LOG"
|
||||
|
||||
nohup env PID_FILE="$PID_FILE" LOG_FILE="$WATCHDOG_LOG" CHECK_INTERVAL="$CHECK_INTERVAL" \
|
||||
bash scripts/watchdog.sh >/dev/null 2>&1 &
|
||||
watchdog_pid=$!
|
||||
echo "$watchdog_pid" > "$WATCHDOG_PID_FILE"
|
||||
|
||||
cat <<EOF
|
||||
시작 완료
|
||||
- app pid: $app_pid
|
||||
- watchdog pid: $watchdog_pid
|
||||
- app log: $RUN_LOG
|
||||
- watchdog log: $WATCHDOG_LOG
|
||||
|
||||
실시간 확인:
|
||||
tail -f "$RUN_LOG"
|
||||
tail -f "$WATCHDOG_LOG"
|
||||
EOF
|
||||
|
||||
if [ "$TMUX_AUTO" = "true" ]; then
|
||||
if ! command -v tmux >/dev/null 2>&1; then
|
||||
echo "tmux를 찾지 못해 자동 세션 생성은 건너뜁니다."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
session_name="${TMUX_SESSION_PREFIX}_${timestamp}"
|
||||
window_name="overnight"
|
||||
tmux new-session -d -s "$session_name" -n "$window_name" "tail -f '$RUN_LOG'"
|
||||
tmux split-window -t "${session_name}:${window_name}" -v "tail -f '$WATCHDOG_LOG'"
|
||||
tmux select-layout -t "${session_name}:${window_name}" even-vertical
|
||||
|
||||
echo "tmux session 생성: $session_name"
|
||||
echo "수동 접속: tmux attach -t $session_name"
|
||||
|
||||
if [ -z "${TMUX:-}" ] && [ "$TMUX_ATTACH" = "true" ]; then
|
||||
tmux attach -t "$session_name"
|
||||
fi
|
||||
fi
|
||||
76
scripts/stop_overnight.sh
Executable file
76
scripts/stop_overnight.sh
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env bash
|
||||
# Stop The Ouroboros overnight app/watchdog/tmux session.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
LOG_DIR="${LOG_DIR:-data/overnight}"
|
||||
PID_FILE="$LOG_DIR/app.pid"
|
||||
WATCHDOG_PID_FILE="$LOG_DIR/watchdog.pid"
|
||||
TMUX_SESSION_PREFIX="${TMUX_SESSION_PREFIX:-ouroboros_overnight}"
|
||||
KILL_TIMEOUT="${KILL_TIMEOUT:-5}"
|
||||
|
||||
stop_pid() {
|
||||
local name="$1"
|
||||
local pid="$2"
|
||||
|
||||
if [ -z "$pid" ]; then
|
||||
echo "$name PID가 비어 있습니다."
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! kill -0 "$pid" 2>/dev/null; then
|
||||
echo "$name 프로세스가 이미 종료됨 (pid=$pid)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
kill "$pid" 2>/dev/null || true
|
||||
for _ in $(seq 1 "$KILL_TIMEOUT"); do
|
||||
if ! kill -0 "$pid" 2>/dev/null; then
|
||||
echo "$name 종료됨 (pid=$pid)"
|
||||
return 0
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
kill -9 "$pid" 2>/dev/null || true
|
||||
if ! kill -0 "$pid" 2>/dev/null; then
|
||||
echo "$name 강제 종료됨 (pid=$pid)"
|
||||
return 0
|
||||
fi
|
||||
|
||||
echo "$name 종료 실패 (pid=$pid)"
|
||||
return 1
|
||||
}
|
||||
|
||||
status=0
|
||||
|
||||
if [ -f "$WATCHDOG_PID_FILE" ]; then
|
||||
watchdog_pid="$(cat "$WATCHDOG_PID_FILE" || true)"
|
||||
stop_pid "watchdog" "$watchdog_pid" || status=1
|
||||
rm -f "$WATCHDOG_PID_FILE"
|
||||
else
|
||||
echo "watchdog pid 파일 없음: $WATCHDOG_PID_FILE"
|
||||
fi
|
||||
|
||||
if [ -f "$PID_FILE" ]; then
|
||||
app_pid="$(cat "$PID_FILE" || true)"
|
||||
stop_pid "app" "$app_pid" || status=1
|
||||
rm -f "$PID_FILE"
|
||||
else
|
||||
echo "app pid 파일 없음: $PID_FILE"
|
||||
fi
|
||||
|
||||
if command -v tmux >/dev/null 2>&1; then
|
||||
sessions="$(tmux ls 2>/dev/null | awk -F: -v p="$TMUX_SESSION_PREFIX" '$1 ~ "^" p "_" {print $1}')"
|
||||
if [ -n "$sessions" ]; then
|
||||
while IFS= read -r s; do
|
||||
[ -z "$s" ] && continue
|
||||
tmux kill-session -t "$s" 2>/dev/null || true
|
||||
echo "tmux 세션 종료: $s"
|
||||
done <<< "$sessions"
|
||||
else
|
||||
echo "종료할 tmux 세션 없음 (prefix=${TMUX_SESSION_PREFIX}_)"
|
||||
fi
|
||||
fi
|
||||
|
||||
exit "$status"
|
||||
140
scripts/validate_ouroboros_docs.py
Executable file
140
scripts/validate_ouroboros_docs.py
Executable file
@@ -0,0 +1,140 @@
|
||||
#!/usr/bin/env python3
|
||||
"""Validate Ouroboros planning docs for metadata, links, and ID consistency."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import re
|
||||
import sys
|
||||
from pathlib import Path
|
||||
|
||||
DOC_DIR = Path("docs/ouroboros")
|
||||
META_PATTERN = re.compile(
|
||||
r"<!--\n"
|
||||
r"Doc-ID: (?P<doc_id>[^\n]+)\n"
|
||||
r"Version: (?P<version>[^\n]+)\n"
|
||||
r"Status: (?P<status>[^\n]+)\n"
|
||||
r"Owner: (?P<owner>[^\n]+)\n"
|
||||
r"Updated: (?P<updated>\d{4}-\d{2}-\d{2})\n"
|
||||
r"-->",
|
||||
re.MULTILINE,
|
||||
)
|
||||
ID_PATTERN = re.compile(r"\b(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3}\b")
|
||||
DEF_PATTERN = re.compile(r"^-\s+`(?P<id>(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3})`", re.MULTILINE)
|
||||
LINK_PATTERN = re.compile(r"\[[^\]]+\]\((?P<link>[^)]+)\)")
|
||||
LINE_DEF_PATTERN = re.compile(r"^-\s+`(?P<id>(?:REQ|RULE|TASK|TEST|DOC)-[A-Z0-9-]+-\d{3})`.*$", re.MULTILINE)
|
||||
|
||||
|
||||
def iter_docs() -> list[Path]:
|
||||
return sorted([p for p in DOC_DIR.glob("*.md") if p.is_file()])
|
||||
|
||||
|
||||
def validate_metadata(path: Path, text: str, errors: list[str], doc_ids: dict[str, Path]) -> None:
|
||||
match = META_PATTERN.search(text)
|
||||
if not match:
|
||||
errors.append(f"{path}: missing or malformed metadata block")
|
||||
return
|
||||
doc_id = match.group("doc_id").strip()
|
||||
if doc_id in doc_ids:
|
||||
errors.append(f"{path}: duplicate Doc-ID {doc_id} (already in {doc_ids[doc_id]})")
|
||||
else:
|
||||
doc_ids[doc_id] = path
|
||||
|
||||
|
||||
def validate_links(path: Path, text: str, errors: list[str]) -> None:
|
||||
for m in LINK_PATTERN.finditer(text):
|
||||
link = m.group("link").strip()
|
||||
if not link or link.startswith("http") or link.startswith("#"):
|
||||
continue
|
||||
if link.startswith("/"):
|
||||
target = Path(link)
|
||||
else:
|
||||
target = (path.parent / link).resolve()
|
||||
if not target.exists():
|
||||
errors.append(f"{path}: broken link -> {link}")
|
||||
|
||||
|
||||
def collect_ids(path: Path, text: str, defs: dict[str, Path], refs: dict[str, set[Path]]) -> None:
|
||||
for m in DEF_PATTERN.finditer(text):
|
||||
defs[m.group("id")] = path
|
||||
for m in ID_PATTERN.finditer(text):
|
||||
idv = m.group(0)
|
||||
refs.setdefault(idv, set()).add(path)
|
||||
|
||||
|
||||
def collect_req_traceability(text: str, req_to_task: dict[str, set[str]], req_to_test: dict[str, set[str]]) -> None:
|
||||
for m in LINE_DEF_PATTERN.finditer(text):
|
||||
line = m.group(0)
|
||||
item_id = m.group("id")
|
||||
req_ids = [rid for rid in ID_PATTERN.findall(line) if rid.startswith("REQ-")]
|
||||
if item_id.startswith("TASK-"):
|
||||
for req_id in req_ids:
|
||||
req_to_task.setdefault(req_id, set()).add(item_id)
|
||||
if item_id.startswith("TEST-"):
|
||||
for req_id in req_ids:
|
||||
req_to_test.setdefault(req_id, set()).add(item_id)
|
||||
|
||||
|
||||
def main() -> int:
|
||||
if not DOC_DIR.exists():
|
||||
print(f"ERROR: missing directory {DOC_DIR}")
|
||||
return 1
|
||||
|
||||
docs = iter_docs()
|
||||
if not docs:
|
||||
print(f"ERROR: no markdown docs found in {DOC_DIR}")
|
||||
return 1
|
||||
|
||||
errors: list[str] = []
|
||||
doc_ids: dict[str, Path] = {}
|
||||
defs: dict[str, Path] = {}
|
||||
refs: dict[str, set[Path]] = {}
|
||||
req_to_task: dict[str, set[str]] = {}
|
||||
req_to_test: dict[str, set[str]] = {}
|
||||
|
||||
for path in docs:
|
||||
text = path.read_text(encoding="utf-8")
|
||||
validate_metadata(path, text, errors, doc_ids)
|
||||
validate_links(path, text, errors)
|
||||
collect_ids(path, text, defs, refs)
|
||||
collect_req_traceability(text, req_to_task, req_to_test)
|
||||
|
||||
for idv, where_used in sorted(refs.items()):
|
||||
if idv.startswith("DOC-"):
|
||||
continue
|
||||
if idv not in defs:
|
||||
files = ", ".join(str(p) for p in sorted(where_used))
|
||||
errors.append(f"undefined ID {idv}, used in: {files}")
|
||||
|
||||
for idv in sorted(defs):
|
||||
if not idv.startswith("REQ-"):
|
||||
continue
|
||||
if idv not in req_to_task:
|
||||
errors.append(f"REQ without TASK mapping: {idv}")
|
||||
if idv not in req_to_test:
|
||||
errors.append(f"REQ without TEST mapping: {idv}")
|
||||
|
||||
warnings: list[str] = []
|
||||
for idv, where_def in sorted(defs.items()):
|
||||
if len(refs.get(idv, set())) <= 1 and (idv.startswith("REQ-") or idv.startswith("RULE-")):
|
||||
warnings.append(f"orphan ID {idv} defined in {where_def} (not referenced elsewhere)")
|
||||
|
||||
if errors:
|
||||
print("[FAIL] Ouroboros docs validation failed")
|
||||
for err in errors:
|
||||
print(f"- {err}")
|
||||
return 1
|
||||
|
||||
print(f"[OK] validated {len(docs)} docs in {DOC_DIR}")
|
||||
print(f"[OK] unique Doc-ID: {len(doc_ids)}")
|
||||
print(f"[OK] definitions: {len(defs)}, references: {len(refs)}")
|
||||
print(f"[OK] req->task mappings: {len(req_to_task)}")
|
||||
print(f"[OK] req->test mappings: {len(req_to_test)}")
|
||||
if warnings:
|
||||
print(f"[WARN] orphan IDs: {len(warnings)}")
|
||||
for w in warnings:
|
||||
print(f"- {w}")
|
||||
return 0
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
42
scripts/watchdog.sh
Executable file
42
scripts/watchdog.sh
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/usr/bin/env bash
|
||||
# Simple watchdog for The Ouroboros process.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
PID_FILE="${PID_FILE:-data/overnight/app.pid}"
|
||||
LOG_FILE="${LOG_FILE:-data/overnight/watchdog.log}"
|
||||
CHECK_INTERVAL="${CHECK_INTERVAL:-30}"
|
||||
STATUS_EVERY="${STATUS_EVERY:-10}"
|
||||
|
||||
mkdir -p "$(dirname "$LOG_FILE")"
|
||||
|
||||
log() {
|
||||
printf '%s %s\n' "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" "$1" | tee -a "$LOG_FILE"
|
||||
}
|
||||
|
||||
if [ ! -f "$PID_FILE" ]; then
|
||||
log "[ERROR] pid file not found: $PID_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
PID="$(cat "$PID_FILE")"
|
||||
if [ -z "$PID" ]; then
|
||||
log "[ERROR] pid file is empty: $PID_FILE"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log "[INFO] watchdog started (pid=$PID, interval=${CHECK_INTERVAL}s)"
|
||||
|
||||
count=0
|
||||
while true; do
|
||||
if kill -0 "$PID" 2>/dev/null; then
|
||||
count=$((count + 1))
|
||||
if [ $((count % STATUS_EVERY)) -eq 0 ]; then
|
||||
log "[INFO] process alive (pid=$PID)"
|
||||
fi
|
||||
else
|
||||
log "[ERROR] process stopped (pid=$PID)"
|
||||
exit 1
|
||||
fi
|
||||
sleep "$CHECK_INTERVAL"
|
||||
done
|
||||
52
src/analysis/backtest_cost_guard.py
Normal file
52
src/analysis/backtest_cost_guard.py
Normal file
@@ -0,0 +1,52 @@
|
||||
"""Backtest cost/slippage/failure validation guard."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import math
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class BacktestCostModel:
|
||||
commission_bps: float | None = None
|
||||
slippage_bps_by_session: dict[str, float] | None = None
|
||||
failure_rate_by_session: dict[str, float] | None = None
|
||||
unfavorable_fill_required: bool = True
|
||||
|
||||
|
||||
def validate_backtest_cost_model(
|
||||
*,
|
||||
model: BacktestCostModel,
|
||||
required_sessions: list[str],
|
||||
) -> None:
|
||||
"""Raise ValueError when required cost assumptions are missing/invalid."""
|
||||
if (
|
||||
model.commission_bps is None
|
||||
or not math.isfinite(model.commission_bps)
|
||||
or model.commission_bps < 0
|
||||
):
|
||||
raise ValueError("commission_bps must be provided and >= 0")
|
||||
if not model.unfavorable_fill_required:
|
||||
raise ValueError("unfavorable_fill_required must be True")
|
||||
|
||||
slippage = model.slippage_bps_by_session or {}
|
||||
failure = model.failure_rate_by_session or {}
|
||||
|
||||
missing_slippage = [s for s in required_sessions if s not in slippage]
|
||||
if missing_slippage:
|
||||
raise ValueError(
|
||||
f"missing slippage_bps_by_session for sessions: {', '.join(missing_slippage)}"
|
||||
)
|
||||
|
||||
missing_failure = [s for s in required_sessions if s not in failure]
|
||||
if missing_failure:
|
||||
raise ValueError(
|
||||
f"missing failure_rate_by_session for sessions: {', '.join(missing_failure)}"
|
||||
)
|
||||
|
||||
for sess, bps in slippage.items():
|
||||
if not math.isfinite(bps) or bps < 0:
|
||||
raise ValueError(f"slippage bps must be >= 0 for session={sess}")
|
||||
for sess, rate in failure.items():
|
||||
if not math.isfinite(rate) or rate < 0 or rate > 1:
|
||||
raise ValueError(f"failure rate must be within [0,1] for session={sess}")
|
||||
103
src/analysis/backtest_execution_model.py
Normal file
103
src/analysis/backtest_execution_model.py
Normal file
@@ -0,0 +1,103 @@
|
||||
"""Conservative backtest execution model."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
import math
|
||||
from random import Random
|
||||
from typing import Literal
|
||||
|
||||
|
||||
OrderSide = Literal["BUY", "SELL"]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutionRequest:
|
||||
side: OrderSide
|
||||
session_id: str
|
||||
qty: int
|
||||
reference_price: float
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutionAssumptions:
|
||||
slippage_bps_by_session: dict[str, float]
|
||||
failure_rate_by_session: dict[str, float]
|
||||
partial_fill_rate_by_session: dict[str, float]
|
||||
partial_fill_min_ratio: float = 0.3
|
||||
partial_fill_max_ratio: float = 0.8
|
||||
seed: int = 0
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutionResult:
|
||||
status: Literal["FILLED", "PARTIAL", "REJECTED"]
|
||||
filled_qty: int
|
||||
avg_price: float
|
||||
slippage_bps: float
|
||||
reason: str
|
||||
|
||||
|
||||
class BacktestExecutionModel:
|
||||
"""Execution simulator with conservative unfavorable fill assumptions."""
|
||||
|
||||
def __init__(self, assumptions: ExecutionAssumptions) -> None:
|
||||
self.assumptions = assumptions
|
||||
self._rng = Random(assumptions.seed)
|
||||
if assumptions.partial_fill_min_ratio <= 0 or assumptions.partial_fill_max_ratio > 1:
|
||||
raise ValueError("partial fill ratios must be within (0,1]")
|
||||
if assumptions.partial_fill_min_ratio > assumptions.partial_fill_max_ratio:
|
||||
raise ValueError("partial_fill_min_ratio must be <= partial_fill_max_ratio")
|
||||
for sess, bps in assumptions.slippage_bps_by_session.items():
|
||||
if not math.isfinite(bps) or bps < 0:
|
||||
raise ValueError(f"slippage_bps must be finite and >= 0 for session={sess}")
|
||||
for sess, rate in assumptions.failure_rate_by_session.items():
|
||||
if not math.isfinite(rate) or rate < 0 or rate > 1:
|
||||
raise ValueError(f"failure_rate must be in [0,1] for session={sess}")
|
||||
for sess, rate in assumptions.partial_fill_rate_by_session.items():
|
||||
if not math.isfinite(rate) or rate < 0 or rate > 1:
|
||||
raise ValueError(f"partial_fill_rate must be in [0,1] for session={sess}")
|
||||
|
||||
def simulate(self, request: ExecutionRequest) -> ExecutionResult:
|
||||
if request.qty <= 0:
|
||||
raise ValueError("qty must be positive")
|
||||
if request.reference_price <= 0:
|
||||
raise ValueError("reference_price must be positive")
|
||||
|
||||
slippage_bps = self.assumptions.slippage_bps_by_session.get(request.session_id, 0.0)
|
||||
failure_rate = self.assumptions.failure_rate_by_session.get(request.session_id, 0.0)
|
||||
partial_rate = self.assumptions.partial_fill_rate_by_session.get(request.session_id, 0.0)
|
||||
|
||||
if self._rng.random() < failure_rate:
|
||||
return ExecutionResult(
|
||||
status="REJECTED",
|
||||
filled_qty=0,
|
||||
avg_price=0.0,
|
||||
slippage_bps=slippage_bps,
|
||||
reason="execution_failure",
|
||||
)
|
||||
|
||||
slip_mult = 1.0 + (slippage_bps / 10000.0 if request.side == "BUY" else -slippage_bps / 10000.0)
|
||||
exec_price = request.reference_price * slip_mult
|
||||
|
||||
if self._rng.random() < partial_rate:
|
||||
ratio = self._rng.uniform(
|
||||
self.assumptions.partial_fill_min_ratio,
|
||||
self.assumptions.partial_fill_max_ratio,
|
||||
)
|
||||
filled = max(1, min(request.qty - 1, int(request.qty * ratio)))
|
||||
return ExecutionResult(
|
||||
status="PARTIAL",
|
||||
filled_qty=filled,
|
||||
avg_price=exec_price,
|
||||
slippage_bps=slippage_bps,
|
||||
reason="partial_fill",
|
||||
)
|
||||
|
||||
return ExecutionResult(
|
||||
status="FILLED",
|
||||
filled_qty=request.qty,
|
||||
avg_price=exec_price,
|
||||
slippage_bps=slippage_bps,
|
||||
reason="filled",
|
||||
)
|
||||
@@ -1,8 +1,4 @@
|
||||
"""Smart Volatility Scanner with RSI and volume filters.
|
||||
|
||||
Fetches market rankings from KIS API and applies technical filters
|
||||
to identify high-probability trading candidates.
|
||||
"""
|
||||
"""Smart Volatility Scanner with volatility-first market ranking logic."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
@@ -12,7 +8,9 @@ from typing import Any
|
||||
|
||||
from src.analysis.volatility import VolatilityAnalyzer
|
||||
from src.broker.kis_api import KISBroker
|
||||
from src.broker.overseas import OverseasBroker
|
||||
from src.config import Settings
|
||||
from src.markets.schedule import MarketInfo
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -32,19 +30,19 @@ class ScanCandidate:
|
||||
|
||||
|
||||
class SmartVolatilityScanner:
|
||||
"""Scans market rankings and applies RSI/volume filters.
|
||||
"""Scans market rankings and applies volatility-first filters.
|
||||
|
||||
Flow:
|
||||
1. Fetch volume rankings from KIS API
|
||||
2. For each ranked stock, fetch daily prices
|
||||
3. Calculate RSI and volume ratio
|
||||
4. Apply filters: volume > VOL_MULTIPLIER AND (RSI < 30 OR RSI > 70)
|
||||
5. Return top N qualified candidates
|
||||
1. Fetch fluctuation rankings as primary universe
|
||||
2. Fetch volume rankings for liquidity bonus
|
||||
3. Score by volatility first, liquidity second
|
||||
4. Return top N qualified candidates
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
broker: KISBroker,
|
||||
overseas_broker: OverseasBroker | None,
|
||||
volatility_analyzer: VolatilityAnalyzer,
|
||||
settings: Settings,
|
||||
) -> None:
|
||||
@@ -56,6 +54,7 @@ class SmartVolatilityScanner:
|
||||
settings: Application settings
|
||||
"""
|
||||
self.broker = broker
|
||||
self.overseas_broker = overseas_broker
|
||||
self.analyzer = volatility_analyzer
|
||||
self.settings = settings
|
||||
|
||||
@@ -67,107 +66,129 @@ class SmartVolatilityScanner:
|
||||
|
||||
async def scan(
|
||||
self,
|
||||
market: MarketInfo | None = None,
|
||||
fallback_stocks: list[str] | None = None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Execute smart scan and return qualified candidates.
|
||||
|
||||
Args:
|
||||
market: Target market info (domestic vs overseas behavior)
|
||||
fallback_stocks: Stock codes to use if ranking API fails
|
||||
|
||||
Returns:
|
||||
List of ScanCandidate, sorted by score, up to top_n items
|
||||
"""
|
||||
# Step 1: Fetch rankings
|
||||
if market and not market.is_domestic:
|
||||
return await self._scan_overseas(market, fallback_stocks)
|
||||
|
||||
return await self._scan_domestic(fallback_stocks)
|
||||
|
||||
async def _scan_domestic(
|
||||
self,
|
||||
fallback_stocks: list[str] | None = None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Scan domestic market using volatility-first ranking + liquidity bonus."""
|
||||
# 1) Primary universe from fluctuation ranking.
|
||||
try:
|
||||
rankings = await self.broker.fetch_market_rankings(
|
||||
ranking_type="volume",
|
||||
limit=30, # Fetch more than needed for filtering
|
||||
fluct_rows = await self.broker.fetch_market_rankings(
|
||||
ranking_type="fluctuation",
|
||||
limit=50,
|
||||
)
|
||||
logger.info("Fetched %d stocks from volume rankings", len(rankings))
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Ranking API failed, using fallback: %s", exc)
|
||||
if fallback_stocks:
|
||||
# Create minimal ranking data for fallback
|
||||
rankings = [
|
||||
{
|
||||
"stock_code": code,
|
||||
"name": code,
|
||||
"price": 0,
|
||||
"volume": 0,
|
||||
"change_rate": 0,
|
||||
"volume_increase_rate": 0,
|
||||
}
|
||||
for code in fallback_stocks
|
||||
]
|
||||
else:
|
||||
return []
|
||||
logger.warning("Domestic fluctuation ranking failed: %s", exc)
|
||||
fluct_rows = []
|
||||
|
||||
# 2) Liquidity bonus from volume ranking.
|
||||
try:
|
||||
volume_rows = await self.broker.fetch_market_rankings(
|
||||
ranking_type="volume",
|
||||
limit=50,
|
||||
)
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Domestic volume ranking failed: %s", exc)
|
||||
volume_rows = []
|
||||
|
||||
if not fluct_rows and fallback_stocks:
|
||||
logger.info(
|
||||
"Domestic ranking unavailable; using fallback symbols (%d)",
|
||||
len(fallback_stocks),
|
||||
)
|
||||
fluct_rows = [
|
||||
{
|
||||
"stock_code": code,
|
||||
"name": code,
|
||||
"price": 0.0,
|
||||
"volume": 0.0,
|
||||
"change_rate": 0.0,
|
||||
"volume_increase_rate": 0.0,
|
||||
}
|
||||
for code in fallback_stocks
|
||||
]
|
||||
|
||||
if not fluct_rows:
|
||||
return []
|
||||
|
||||
volume_rank_bonus: dict[str, float] = {}
|
||||
for idx, row in enumerate(volume_rows):
|
||||
code = _extract_stock_code(row)
|
||||
if not code:
|
||||
continue
|
||||
volume_rank_bonus[code] = max(0.0, 15.0 - idx * 0.3)
|
||||
|
||||
# Step 2: Analyze each stock
|
||||
candidates: list[ScanCandidate] = []
|
||||
|
||||
for stock in rankings:
|
||||
stock_code = stock["stock_code"]
|
||||
for stock in fluct_rows:
|
||||
stock_code = _extract_stock_code(stock)
|
||||
if not stock_code:
|
||||
continue
|
||||
|
||||
try:
|
||||
# Fetch daily prices for RSI calculation
|
||||
daily_prices = await self.broker.get_daily_prices(stock_code, days=20)
|
||||
price = _extract_last_price(stock)
|
||||
change_rate = _extract_change_rate_pct(stock)
|
||||
volume = _extract_volume(stock)
|
||||
|
||||
if len(daily_prices) < 15: # Need at least 14+1 for RSI
|
||||
logger.debug("Insufficient price history for %s", stock_code)
|
||||
intraday_range_pct = 0.0
|
||||
volume_ratio = _safe_float(stock.get("volume_increase_rate"), 0.0) / 100.0 + 1.0
|
||||
|
||||
# Use daily chart to refine range/volume when available.
|
||||
daily_prices = await self.broker.get_daily_prices(stock_code, days=2)
|
||||
if daily_prices:
|
||||
latest = daily_prices[-1]
|
||||
latest_close = _safe_float(latest.get("close"), default=price)
|
||||
if price <= 0:
|
||||
price = latest_close
|
||||
latest_high = _safe_float(latest.get("high"))
|
||||
latest_low = _safe_float(latest.get("low"))
|
||||
if latest_close > 0 and latest_high > 0 and latest_low > 0 and latest_high >= latest_low:
|
||||
intraday_range_pct = (latest_high - latest_low) / latest_close * 100.0
|
||||
if volume <= 0:
|
||||
volume = _safe_float(latest.get("volume"))
|
||||
if len(daily_prices) >= 2:
|
||||
prev_day_volume = _safe_float(daily_prices[-2].get("volume"))
|
||||
if prev_day_volume > 0:
|
||||
volume_ratio = max(volume_ratio, volume / prev_day_volume)
|
||||
|
||||
volatility_pct = max(abs(change_rate), intraday_range_pct)
|
||||
if price <= 0 or volatility_pct < 0.8:
|
||||
continue
|
||||
|
||||
# Calculate RSI
|
||||
close_prices = [p["close"] for p in daily_prices]
|
||||
rsi = self.analyzer.calculate_rsi(close_prices, period=14)
|
||||
volatility_score = min(volatility_pct / 10.0, 1.0) * 85.0
|
||||
liquidity_score = volume_rank_bonus.get(stock_code, 0.0)
|
||||
score = min(100.0, volatility_score + liquidity_score)
|
||||
signal = "momentum" if change_rate >= 0 else "oversold"
|
||||
implied_rsi = max(0.0, min(100.0, 50.0 + (change_rate * 2.0)))
|
||||
|
||||
# Calculate volume ratio (today vs previous day avg)
|
||||
if len(daily_prices) >= 2:
|
||||
prev_day_volume = daily_prices[-2]["volume"]
|
||||
current_volume = stock.get("volume", 0) or daily_prices[-1]["volume"]
|
||||
volume_ratio = (
|
||||
current_volume / prev_day_volume if prev_day_volume > 0 else 1.0
|
||||
)
|
||||
else:
|
||||
volume_ratio = stock.get("volume_increase_rate", 0) / 100 + 1 # Fallback
|
||||
|
||||
# Apply filters
|
||||
volume_qualified = volume_ratio >= self.vol_multiplier
|
||||
rsi_oversold = rsi < self.rsi_oversold
|
||||
rsi_momentum = rsi > self.rsi_momentum
|
||||
|
||||
if volume_qualified and (rsi_oversold or rsi_momentum):
|
||||
signal = "oversold" if rsi_oversold else "momentum"
|
||||
|
||||
# Calculate composite score
|
||||
# Higher score for: extreme RSI + high volume
|
||||
rsi_extremity = abs(rsi - 50) / 50 # 0-1 scale
|
||||
volume_score = min(volume_ratio / 5, 1.0) # Cap at 5x
|
||||
score = (rsi_extremity * 0.6 + volume_score * 0.4) * 100
|
||||
|
||||
candidates.append(
|
||||
ScanCandidate(
|
||||
stock_code=stock_code,
|
||||
name=stock.get("name", stock_code),
|
||||
price=stock.get("price", daily_prices[-1]["close"]),
|
||||
volume=current_volume,
|
||||
volume_ratio=volume_ratio,
|
||||
rsi=rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Qualified: %s (%s) RSI=%.1f vol=%.1fx signal=%s score=%.1f",
|
||||
stock_code,
|
||||
stock.get("name", ""),
|
||||
rsi,
|
||||
volume_ratio,
|
||||
signal,
|
||||
score,
|
||||
candidates.append(
|
||||
ScanCandidate(
|
||||
stock_code=stock_code,
|
||||
name=stock.get("name", stock_code),
|
||||
price=price,
|
||||
volume=volume,
|
||||
volume_ratio=max(1.0, volume_ratio, volatility_pct / 2.0),
|
||||
rsi=implied_rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
)
|
||||
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Failed to analyze %s: %s", stock_code, exc)
|
||||
@@ -176,10 +197,171 @@ class SmartVolatilityScanner:
|
||||
logger.error("Unexpected error analyzing %s: %s", stock_code, exc)
|
||||
continue
|
||||
|
||||
# Sort by score and return top N
|
||||
logger.info("Domestic ranking scan found %d candidates", len(candidates))
|
||||
candidates.sort(key=lambda c: c.score, reverse=True)
|
||||
return candidates[: self.top_n]
|
||||
|
||||
async def _scan_overseas(
|
||||
self,
|
||||
market: MarketInfo,
|
||||
fallback_stocks: list[str] | None = None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Scan overseas symbols using ranking API first, then fallback universe."""
|
||||
if self.overseas_broker is None:
|
||||
logger.warning(
|
||||
"Overseas scanner unavailable for %s: overseas broker not configured",
|
||||
market.name,
|
||||
)
|
||||
return []
|
||||
|
||||
candidates = await self._scan_overseas_from_rankings(market)
|
||||
if not candidates:
|
||||
candidates = await self._scan_overseas_from_symbols(market, fallback_stocks)
|
||||
|
||||
candidates.sort(key=lambda c: c.score, reverse=True)
|
||||
return candidates[: self.top_n]
|
||||
|
||||
async def _scan_overseas_from_rankings(
|
||||
self,
|
||||
market: MarketInfo,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Build overseas candidates from ranking APIs using volatility-first scoring."""
|
||||
assert self.overseas_broker is not None
|
||||
try:
|
||||
fluct_rows = await self.overseas_broker.fetch_overseas_rankings(
|
||||
exchange_code=market.exchange_code,
|
||||
ranking_type="fluctuation",
|
||||
limit=50,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Overseas fluctuation ranking failed for %s: %s", market.code, exc
|
||||
)
|
||||
fluct_rows = []
|
||||
|
||||
if not fluct_rows:
|
||||
return []
|
||||
|
||||
volume_rank_bonus: dict[str, float] = {}
|
||||
try:
|
||||
volume_rows = await self.overseas_broker.fetch_overseas_rankings(
|
||||
exchange_code=market.exchange_code,
|
||||
ranking_type="volume",
|
||||
limit=50,
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning(
|
||||
"Overseas volume ranking failed for %s: %s", market.code, exc
|
||||
)
|
||||
volume_rows = []
|
||||
|
||||
for idx, row in enumerate(volume_rows):
|
||||
code = _extract_stock_code(row)
|
||||
if not code:
|
||||
continue
|
||||
# Top-ranked by traded value/volume gets higher liquidity bonus.
|
||||
volume_rank_bonus[code] = max(0.0, 15.0 - idx * 0.3)
|
||||
|
||||
candidates: list[ScanCandidate] = []
|
||||
for row in fluct_rows:
|
||||
stock_code = _extract_stock_code(row)
|
||||
if not stock_code:
|
||||
continue
|
||||
|
||||
price = _extract_last_price(row)
|
||||
change_rate = _extract_change_rate_pct(row)
|
||||
volume = _extract_volume(row)
|
||||
intraday_range_pct = _extract_intraday_range_pct(row, price)
|
||||
volatility_pct = max(abs(change_rate), intraday_range_pct)
|
||||
|
||||
# Volatility-first filter (not simple gainers/value ranking).
|
||||
if price <= 0 or volatility_pct < 0.8:
|
||||
continue
|
||||
|
||||
volatility_score = min(volatility_pct / 10.0, 1.0) * 85.0
|
||||
liquidity_score = volume_rank_bonus.get(stock_code, 0.0)
|
||||
score = min(100.0, volatility_score + liquidity_score)
|
||||
signal = "momentum" if change_rate >= 0 else "oversold"
|
||||
implied_rsi = max(0.0, min(100.0, 50.0 + (change_rate * 2.0)))
|
||||
candidates.append(
|
||||
ScanCandidate(
|
||||
stock_code=stock_code,
|
||||
name=str(row.get("name") or row.get("ovrs_item_name") or stock_code),
|
||||
price=price,
|
||||
volume=volume,
|
||||
volume_ratio=max(1.0, volatility_pct / 2.0),
|
||||
rsi=implied_rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
)
|
||||
|
||||
if candidates:
|
||||
logger.info(
|
||||
"Overseas ranking scan found %d candidates for %s",
|
||||
len(candidates),
|
||||
market.name,
|
||||
)
|
||||
return candidates
|
||||
|
||||
async def _scan_overseas_from_symbols(
|
||||
self,
|
||||
market: MarketInfo,
|
||||
symbols: list[str] | None,
|
||||
) -> list[ScanCandidate]:
|
||||
"""Fallback overseas scan from dynamic symbol universe."""
|
||||
assert self.overseas_broker is not None
|
||||
if not symbols:
|
||||
logger.info("Overseas scanner: no symbol universe for %s", market.name)
|
||||
return []
|
||||
|
||||
logger.info(
|
||||
"Overseas scanner: scanning %d fallback symbols for %s",
|
||||
len(symbols),
|
||||
market.name,
|
||||
)
|
||||
candidates: list[ScanCandidate] = []
|
||||
for stock_code in symbols:
|
||||
try:
|
||||
price_data = await self.overseas_broker.get_overseas_price(
|
||||
market.exchange_code, stock_code
|
||||
)
|
||||
output = price_data.get("output", {})
|
||||
price = _extract_last_price(output)
|
||||
change_rate = _extract_change_rate_pct(output)
|
||||
volume = _extract_volume(output)
|
||||
intraday_range_pct = _extract_intraday_range_pct(output, price)
|
||||
volatility_pct = max(abs(change_rate), intraday_range_pct)
|
||||
|
||||
if price <= 0 or volatility_pct < 0.8:
|
||||
continue
|
||||
|
||||
score = min(volatility_pct / 10.0, 1.0) * 100.0
|
||||
signal = "momentum" if change_rate >= 0 else "oversold"
|
||||
implied_rsi = max(0.0, min(100.0, 50.0 + (change_rate * 2.0)))
|
||||
candidates.append(
|
||||
ScanCandidate(
|
||||
stock_code=stock_code,
|
||||
name=stock_code,
|
||||
price=price,
|
||||
volume=volume,
|
||||
volume_ratio=max(1.0, volatility_pct / 2.0),
|
||||
rsi=implied_rsi,
|
||||
signal=signal,
|
||||
score=score,
|
||||
)
|
||||
)
|
||||
except ConnectionError as exc:
|
||||
logger.warning("Failed to analyze overseas %s: %s", stock_code, exc)
|
||||
except Exception as exc:
|
||||
logger.error("Unexpected error analyzing overseas %s: %s", stock_code, exc)
|
||||
logger.info(
|
||||
"Overseas symbol fallback scan found %d candidates for %s",
|
||||
len(candidates),
|
||||
market.name,
|
||||
)
|
||||
return candidates
|
||||
|
||||
def get_stock_codes(self, candidates: list[ScanCandidate]) -> list[str]:
|
||||
"""Extract stock codes from candidates for watchlist update.
|
||||
|
||||
@@ -190,3 +372,78 @@ class SmartVolatilityScanner:
|
||||
List of stock codes
|
||||
"""
|
||||
return [c.stock_code for c in candidates]
|
||||
|
||||
|
||||
def _safe_float(value: Any, default: float = 0.0) -> float:
|
||||
"""Convert arbitrary values to float safely."""
|
||||
if value in (None, ""):
|
||||
return default
|
||||
try:
|
||||
return float(value)
|
||||
except (TypeError, ValueError):
|
||||
return default
|
||||
|
||||
|
||||
def _extract_stock_code(row: dict[str, Any]) -> str:
|
||||
"""Extract normalized stock code from various API schemas."""
|
||||
return (
|
||||
str(
|
||||
row.get("symb")
|
||||
or row.get("ovrs_pdno")
|
||||
or row.get("stock_code")
|
||||
or row.get("pdno")
|
||||
or ""
|
||||
)
|
||||
.strip()
|
||||
.upper()
|
||||
)
|
||||
|
||||
|
||||
def _extract_last_price(row: dict[str, Any]) -> float:
|
||||
"""Extract last/close-like price from API schema variants."""
|
||||
return _safe_float(
|
||||
row.get("last")
|
||||
or row.get("ovrs_nmix_prpr")
|
||||
or row.get("stck_prpr")
|
||||
or row.get("price")
|
||||
or row.get("close")
|
||||
)
|
||||
|
||||
|
||||
def _extract_change_rate_pct(row: dict[str, Any]) -> float:
|
||||
"""Extract daily change rate (%) from API schema variants."""
|
||||
return _safe_float(
|
||||
row.get("rate")
|
||||
or row.get("change_rate")
|
||||
or row.get("prdy_ctrt")
|
||||
or row.get("evlu_pfls_rt")
|
||||
or row.get("chg_rt")
|
||||
)
|
||||
|
||||
|
||||
def _extract_volume(row: dict[str, Any]) -> float:
|
||||
"""Extract volume/traded-amount proxy from schema variants."""
|
||||
return _safe_float(
|
||||
row.get("tvol") or row.get("acml_vol") or row.get("vol") or row.get("volume")
|
||||
)
|
||||
|
||||
|
||||
def _extract_intraday_range_pct(row: dict[str, Any], price: float) -> float:
|
||||
"""Estimate intraday range percentage from high/low fields."""
|
||||
if price <= 0:
|
||||
return 0.0
|
||||
high = _safe_float(
|
||||
row.get("high")
|
||||
or row.get("ovrs_hgpr")
|
||||
or row.get("stck_hgpr")
|
||||
or row.get("day_hgpr")
|
||||
)
|
||||
low = _safe_float(
|
||||
row.get("low")
|
||||
or row.get("ovrs_lwpr")
|
||||
or row.get("stck_lwpr")
|
||||
or row.get("day_lwpr")
|
||||
)
|
||||
if high <= 0 or low <= 0 or high < low:
|
||||
return 0.0
|
||||
return (high - low) / price * 100.0
|
||||
|
||||
111
src/analysis/triple_barrier.py
Normal file
111
src/analysis/triple_barrier.py
Normal file
@@ -0,0 +1,111 @@
|
||||
"""Triple barrier labeler utilities.
|
||||
|
||||
Implements first-touch labeling with upper/lower/time barriers.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Literal, Sequence
|
||||
|
||||
|
||||
TieBreakMode = Literal["stop_first", "take_first"]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class TripleBarrierSpec:
|
||||
take_profit_pct: float
|
||||
stop_loss_pct: float
|
||||
max_holding_bars: int
|
||||
tie_break: TieBreakMode = "stop_first"
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class TripleBarrierLabel:
|
||||
label: int # +1 take-profit first, -1 stop-loss first, 0 timeout
|
||||
touched: Literal["take_profit", "stop_loss", "time"]
|
||||
touch_bar: int
|
||||
entry_price: float
|
||||
upper_barrier: float
|
||||
lower_barrier: float
|
||||
|
||||
|
||||
def label_with_triple_barrier(
|
||||
*,
|
||||
highs: Sequence[float],
|
||||
lows: Sequence[float],
|
||||
closes: Sequence[float],
|
||||
entry_index: int,
|
||||
side: int,
|
||||
spec: TripleBarrierSpec,
|
||||
) -> TripleBarrierLabel:
|
||||
"""Label one entry using triple-barrier first-touch rule.
|
||||
|
||||
Args:
|
||||
highs/lows/closes: OHLC components with identical length.
|
||||
entry_index: Entry bar index in the sequences.
|
||||
side: +1 for long, -1 for short.
|
||||
spec: Barrier specification.
|
||||
"""
|
||||
if side not in {1, -1}:
|
||||
raise ValueError("side must be +1 or -1")
|
||||
if len(highs) != len(lows) or len(highs) != len(closes):
|
||||
raise ValueError("highs, lows, closes lengths must match")
|
||||
if entry_index < 0 or entry_index >= len(closes):
|
||||
raise IndexError("entry_index out of range")
|
||||
if spec.max_holding_bars <= 0:
|
||||
raise ValueError("max_holding_bars must be positive")
|
||||
|
||||
entry_price = float(closes[entry_index])
|
||||
if entry_price <= 0:
|
||||
raise ValueError("entry price must be positive")
|
||||
|
||||
if side == 1:
|
||||
upper = entry_price * (1.0 + spec.take_profit_pct)
|
||||
lower = entry_price * (1.0 - spec.stop_loss_pct)
|
||||
else:
|
||||
# For short side, favorable move is down.
|
||||
upper = entry_price * (1.0 + spec.stop_loss_pct)
|
||||
lower = entry_price * (1.0 - spec.take_profit_pct)
|
||||
|
||||
last_index = min(len(closes) - 1, entry_index + spec.max_holding_bars)
|
||||
for idx in range(entry_index + 1, last_index + 1):
|
||||
h = float(highs[idx])
|
||||
l = float(lows[idx])
|
||||
|
||||
up_touch = h >= upper
|
||||
down_touch = l <= lower
|
||||
if not up_touch and not down_touch:
|
||||
continue
|
||||
|
||||
if up_touch and down_touch:
|
||||
if spec.tie_break == "stop_first":
|
||||
touched = "stop_loss"
|
||||
label = -1
|
||||
else:
|
||||
touched = "take_profit"
|
||||
label = 1
|
||||
elif up_touch:
|
||||
touched = "take_profit" if side == 1 else "stop_loss"
|
||||
label = 1 if side == 1 else -1
|
||||
else:
|
||||
touched = "stop_loss" if side == 1 else "take_profit"
|
||||
label = -1 if side == 1 else 1
|
||||
|
||||
return TripleBarrierLabel(
|
||||
label=label,
|
||||
touched=touched,
|
||||
touch_bar=idx,
|
||||
entry_price=entry_price,
|
||||
upper_barrier=upper,
|
||||
lower_barrier=lower,
|
||||
)
|
||||
|
||||
return TripleBarrierLabel(
|
||||
label=0,
|
||||
touched="time",
|
||||
touch_bar=last_index,
|
||||
entry_price=entry_price,
|
||||
upper_barrier=upper,
|
||||
lower_barrier=lower,
|
||||
)
|
||||
74
src/analysis/walk_forward_split.py
Normal file
74
src/analysis/walk_forward_split.py
Normal file
@@ -0,0 +1,74 @@
|
||||
"""Walk-forward splitter with purge/embargo controls."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class WalkForwardFold:
|
||||
train_indices: list[int]
|
||||
test_indices: list[int]
|
||||
|
||||
@property
|
||||
def train_size(self) -> int:
|
||||
return len(self.train_indices)
|
||||
|
||||
@property
|
||||
def test_size(self) -> int:
|
||||
return len(self.test_indices)
|
||||
|
||||
|
||||
def generate_walk_forward_splits(
|
||||
*,
|
||||
n_samples: int,
|
||||
train_size: int,
|
||||
test_size: int,
|
||||
step_size: int | None = None,
|
||||
purge_size: int = 0,
|
||||
embargo_size: int = 0,
|
||||
min_train_size: int = 1,
|
||||
) -> list[WalkForwardFold]:
|
||||
"""Generate chronological folds with purge/embargo leakage controls."""
|
||||
if n_samples <= 0:
|
||||
raise ValueError("n_samples must be positive")
|
||||
if train_size <= 0 or test_size <= 0:
|
||||
raise ValueError("train_size and test_size must be positive")
|
||||
if purge_size < 0 or embargo_size < 0:
|
||||
raise ValueError("purge_size and embargo_size must be >= 0")
|
||||
if min_train_size <= 0:
|
||||
raise ValueError("min_train_size must be positive")
|
||||
|
||||
step = step_size if step_size is not None else test_size
|
||||
if step <= 0:
|
||||
raise ValueError("step_size must be positive")
|
||||
|
||||
folds: list[WalkForwardFold] = []
|
||||
prev_test_end: int | None = None
|
||||
test_start = train_size + purge_size
|
||||
|
||||
while test_start + test_size <= n_samples:
|
||||
test_end = test_start + test_size - 1
|
||||
train_end = test_start - purge_size - 1
|
||||
if train_end < 0:
|
||||
break
|
||||
|
||||
train_start = max(0, train_end - train_size + 1)
|
||||
train_indices = list(range(train_start, train_end + 1))
|
||||
|
||||
if prev_test_end is not None and embargo_size > 0:
|
||||
emb_from = prev_test_end + 1
|
||||
emb_to = prev_test_end + embargo_size
|
||||
train_indices = [i for i in train_indices if i < emb_from or i > emb_to]
|
||||
|
||||
if len(train_indices) >= min_train_size:
|
||||
folds.append(
|
||||
WalkForwardFold(
|
||||
train_indices=train_indices,
|
||||
test_indices=list(range(test_start, test_end + 1)),
|
||||
)
|
||||
)
|
||||
prev_test_end = test_end
|
||||
test_start += step
|
||||
|
||||
return folds
|
||||
@@ -346,8 +346,10 @@ class GeminiClient:
|
||||
# Validate required fields
|
||||
if not all(k in data for k in ("action", "confidence", "rationale")):
|
||||
logger.warning("Missing fields in Gemini response — defaulting to HOLD")
|
||||
# Preserve raw text in rationale so prompt_override callers (e.g. pre_market_planner)
|
||||
# can extract their own JSON format from decision.rationale (#245)
|
||||
return TradeDecision(
|
||||
action="HOLD", confidence=0, rationale="Missing required fields"
|
||||
action="HOLD", confidence=0, rationale=raw
|
||||
)
|
||||
|
||||
action = str(data["action"]).upper()
|
||||
@@ -410,8 +412,10 @@ class GeminiClient:
|
||||
cached=True,
|
||||
)
|
||||
|
||||
# Build optimized prompt
|
||||
if self._enable_optimization:
|
||||
# Build prompt (prompt_override takes priority for callers like pre_market_planner)
|
||||
if "prompt_override" in market_data:
|
||||
prompt = market_data["prompt_override"]
|
||||
elif self._enable_optimization:
|
||||
prompt = self._optimizer.build_compressed_prompt(market_data)
|
||||
else:
|
||||
prompt = await self.build_prompt(market_data, news_sentiment)
|
||||
@@ -437,6 +441,18 @@ class GeminiClient:
|
||||
action="HOLD", confidence=0, rationale=f"API error: {exc}", token_count=token_count
|
||||
)
|
||||
|
||||
# prompt_override callers (e.g. pre_market_planner) expect raw text back,
|
||||
# not a parsed TradeDecision. Skip parse_response to avoid spurious
|
||||
# "Missing fields" warnings and return the raw response directly. (#247)
|
||||
if "prompt_override" in market_data:
|
||||
logger.info(
|
||||
"Gemini raw response received (prompt_override, tokens=%d)", token_count
|
||||
)
|
||||
# Not a trade decision — don't inflate _total_decisions metrics
|
||||
return TradeDecision(
|
||||
action="HOLD", confidence=0, rationale=raw, token_count=token_count
|
||||
)
|
||||
|
||||
decision = self.parse_response(raw)
|
||||
self._total_decisions += 1
|
||||
|
||||
|
||||
@@ -179,8 +179,8 @@ class PromptOptimizer:
|
||||
# Minimal instructions
|
||||
prompt = (
|
||||
f"{market_name} trader. Analyze:\n{data_str}\n\n"
|
||||
'Return JSON: {"act":"BUY"|"SELL"|"HOLD","conf":<0-100>,"reason":"<text>"}\n'
|
||||
"Rules: act=BUY/SELL/HOLD, conf=0-100, reason=concise. No markdown."
|
||||
'Return JSON: {"action":"BUY"|"SELL"|"HOLD","confidence":<0-100>,"rationale":"<text>"}\n'
|
||||
"Rules: action=BUY/SELL/HOLD, confidence=0-100, rationale=concise. No markdown."
|
||||
)
|
||||
else:
|
||||
# Data only (for cached contexts where instructions are known)
|
||||
|
||||
@@ -8,7 +8,7 @@ from __future__ import annotations
|
||||
import asyncio
|
||||
import logging
|
||||
import ssl
|
||||
from typing import Any
|
||||
from typing import Any, cast
|
||||
|
||||
import aiohttp
|
||||
|
||||
@@ -20,6 +20,39 @@ _KIS_VTS_HOST = "openapivts.koreainvestment.com"
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def kr_tick_unit(price: float) -> int:
|
||||
"""Return KRX tick size for the given price level.
|
||||
|
||||
KRX price tick rules (domestic stocks):
|
||||
price < 2,000 → 1원
|
||||
2,000 ≤ price < 5,000 → 5원
|
||||
5,000 ≤ price < 20,000 → 10원
|
||||
20,000 ≤ price < 50,000 → 50원
|
||||
50,000 ≤ price < 200,000 → 100원
|
||||
200,000 ≤ price < 500,000 → 500원
|
||||
500,000 ≤ price → 1,000원
|
||||
"""
|
||||
if price < 2_000:
|
||||
return 1
|
||||
if price < 5_000:
|
||||
return 5
|
||||
if price < 20_000:
|
||||
return 10
|
||||
if price < 50_000:
|
||||
return 50
|
||||
if price < 200_000:
|
||||
return 100
|
||||
if price < 500_000:
|
||||
return 500
|
||||
return 1_000
|
||||
|
||||
|
||||
def kr_round_down(price: float) -> int:
|
||||
"""Round *down* price to the nearest KRX tick unit."""
|
||||
tick = kr_tick_unit(price)
|
||||
return int(price // tick * tick)
|
||||
|
||||
|
||||
class LeakyBucket:
|
||||
"""Simple leaky-bucket rate limiter for async code."""
|
||||
|
||||
@@ -104,12 +137,14 @@ class KISBroker:
|
||||
time_since_last_attempt = now - self._last_refresh_attempt
|
||||
if time_since_last_attempt < self._refresh_cooldown:
|
||||
remaining = self._refresh_cooldown - time_since_last_attempt
|
||||
error_msg = (
|
||||
f"Token refresh on cooldown. "
|
||||
f"Retry in {remaining:.1f}s (KIS allows 1/minute)"
|
||||
# Do not fail fast here. If token is unavailable, upstream calls
|
||||
# will all fail for up to a minute and scanning returns no trades.
|
||||
logger.warning(
|
||||
"Token refresh on cooldown. Waiting %.1fs before retry (KIS allows 1/minute)",
|
||||
remaining,
|
||||
)
|
||||
logger.warning(error_msg)
|
||||
raise ConnectionError(error_msg)
|
||||
await asyncio.sleep(remaining)
|
||||
now = asyncio.get_event_loop().time()
|
||||
|
||||
logger.info("Refreshing KIS access token")
|
||||
self._last_refresh_attempt = now
|
||||
@@ -196,12 +231,64 @@ class KISBroker:
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(f"Network error fetching orderbook: {exc}") from exc
|
||||
|
||||
async def get_current_price(
|
||||
self, stock_code: str
|
||||
) -> tuple[float, float, float]:
|
||||
"""Fetch current price data for a domestic stock.
|
||||
|
||||
Uses the ``inquire-price`` API (FHKST01010100), which works in both
|
||||
real and VTS environments and returns the actual last-traded price.
|
||||
|
||||
Returns:
|
||||
(current_price, prdy_ctrt, frgn_ntby_qty)
|
||||
- current_price: Last traded price in KRW.
|
||||
- prdy_ctrt: Day change rate (%).
|
||||
- frgn_ntby_qty: Foreigner net buy quantity.
|
||||
"""
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
headers = await self._auth_headers("FHKST01010100")
|
||||
params = {
|
||||
"FID_COND_MRKT_DIV_CODE": "J",
|
||||
"FID_INPUT_ISCD": stock_code,
|
||||
}
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/quotations/inquire-price"
|
||||
|
||||
def _f(val: str | None) -> float:
|
||||
try:
|
||||
return float(val or "0")
|
||||
except ValueError:
|
||||
return 0.0
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"get_current_price failed ({resp.status}): {text}"
|
||||
)
|
||||
data = await resp.json()
|
||||
out = data.get("output", {})
|
||||
return (
|
||||
_f(out.get("stck_prpr")),
|
||||
_f(out.get("prdy_ctrt")),
|
||||
_f(out.get("frgn_ntby_qty")),
|
||||
)
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error fetching current price: {exc}"
|
||||
) from exc
|
||||
|
||||
async def get_balance(self) -> dict[str, Any]:
|
||||
"""Fetch current account balance and holdings."""
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
headers = await self._auth_headers("VTTC8434R") # 모의투자 잔고조회
|
||||
# TR_ID: 실전 TTTC8434R, 모의 VTTC8434R
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '국내주식 잔고조회' 시트
|
||||
tr_id = "TTTC8434R" if self._settings.MODE == "live" else "VTTC8434R"
|
||||
headers = await self._auth_headers(tr_id)
|
||||
params = {
|
||||
"CANO": self._account_no,
|
||||
"ACNT_PRDT_CD": self._product_cd,
|
||||
@@ -246,14 +333,30 @@ class KISBroker:
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
tr_id = "VTTC0802U" if order_type == "BUY" else "VTTC0801U"
|
||||
# TR_ID: 실전 BUY=TTTC0012U SELL=TTTC0011U, 모의 BUY=VTTC0012U SELL=VTTC0011U
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '주식주문(현금)' 시트
|
||||
# ※ TTTC0802U/VTTC0802U는 미수매수(증거금40% 계좌 전용) — 현금주문에 사용 금지
|
||||
if self._settings.MODE == "live":
|
||||
tr_id = "TTTC0012U" if order_type == "BUY" else "TTTC0011U"
|
||||
else:
|
||||
tr_id = "VTTC0012U" if order_type == "BUY" else "VTTC0011U"
|
||||
|
||||
# KRX requires limit orders to be rounded down to the tick unit.
|
||||
# ORD_DVSN: "00"=지정가, "01"=시장가
|
||||
if price > 0:
|
||||
ord_dvsn = "00" # 지정가
|
||||
ord_price = kr_round_down(price)
|
||||
else:
|
||||
ord_dvsn = "01" # 시장가
|
||||
ord_price = 0
|
||||
|
||||
body = {
|
||||
"CANO": self._account_no,
|
||||
"ACNT_PRDT_CD": self._product_cd,
|
||||
"PDNO": stock_code,
|
||||
"ORD_DVSN": "01" if price > 0 else "06", # 01=지정가, 06=시장가
|
||||
"ORD_DVSN": ord_dvsn,
|
||||
"ORD_QTY": str(quantity),
|
||||
"ORD_UNPR": str(price),
|
||||
"ORD_UNPR": str(ord_price),
|
||||
}
|
||||
|
||||
hash_key = await self._get_hash_key(body)
|
||||
@@ -302,26 +405,46 @@ class KISBroker:
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
# TR_ID for volume ranking
|
||||
tr_id = "FHPST01710000" if ranking_type == "volume" else "FHPST01710100"
|
||||
if ranking_type == "volume":
|
||||
# 거래량순위: FHPST01710000 / /quotations/volume-rank
|
||||
tr_id = "FHPST01710000"
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/quotations/volume-rank"
|
||||
params: dict[str, str] = {
|
||||
"FID_COND_MRKT_DIV_CODE": "J",
|
||||
"FID_COND_SCR_DIV_CODE": "20171",
|
||||
"FID_INPUT_ISCD": "0000",
|
||||
"FID_DIV_CLS_CODE": "0",
|
||||
"FID_BLNG_CLS_CODE": "0",
|
||||
"FID_TRGT_CLS_CODE": "111111111",
|
||||
"FID_TRGT_EXLS_CLS_CODE": "0000000000",
|
||||
"FID_INPUT_PRICE_1": "0",
|
||||
"FID_INPUT_PRICE_2": "0",
|
||||
"FID_VOL_CNT": "0",
|
||||
"FID_INPUT_DATE_1": "",
|
||||
}
|
||||
else:
|
||||
# 등락률순위: FHPST01700000 / /ranking/fluctuation (소문자 파라미터)
|
||||
tr_id = "FHPST01700000"
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/ranking/fluctuation"
|
||||
params = {
|
||||
"fid_cond_mrkt_div_code": "J",
|
||||
"fid_cond_scr_div_code": "20170",
|
||||
"fid_input_iscd": "0000",
|
||||
"fid_rank_sort_cls_code": "0",
|
||||
"fid_input_cnt_1": str(limit),
|
||||
"fid_prc_cls_code": "0",
|
||||
"fid_input_price_1": "0",
|
||||
"fid_input_price_2": "0",
|
||||
"fid_vol_cnt": "0",
|
||||
"fid_trgt_cls_code": "0",
|
||||
"fid_trgt_exls_cls_code": "0",
|
||||
"fid_div_cls_code": "0",
|
||||
"fid_rsfl_rate1": "0",
|
||||
"fid_rsfl_rate2": "0",
|
||||
}
|
||||
|
||||
headers = await self._auth_headers(tr_id)
|
||||
|
||||
params = {
|
||||
"FID_COND_MRKT_DIV_CODE": "J", # Stock/ETF/ETN
|
||||
"FID_COND_SCR_DIV_CODE": "20001", # Volume surge
|
||||
"FID_INPUT_ISCD": "0000", # All stocks
|
||||
"FID_DIV_CLS_CODE": "0", # All types
|
||||
"FID_BLNG_CLS_CODE": "0",
|
||||
"FID_TRGT_CLS_CODE": "111111111",
|
||||
"FID_TRGT_EXLS_CLS_CODE": "000000",
|
||||
"FID_INPUT_PRICE_1": "0",
|
||||
"FID_INPUT_PRICE_2": "0",
|
||||
"FID_VOL_CNT": "0",
|
||||
"FID_INPUT_DATE_1": "",
|
||||
}
|
||||
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/quotations/volume-rank"
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
@@ -343,7 +466,7 @@ class KISBroker:
|
||||
rankings = []
|
||||
for item in data.get("output", [])[:limit]:
|
||||
rankings.append({
|
||||
"stock_code": item.get("mksc_shrn_iscd", ""),
|
||||
"stock_code": item.get("stck_shrn_iscd") or item.get("mksc_shrn_iscd", ""),
|
||||
"name": item.get("hts_kor_isnm", ""),
|
||||
"price": _safe_float(item.get("stck_prpr", "0")),
|
||||
"volume": _safe_float(item.get("acml_vol", "0")),
|
||||
@@ -355,6 +478,112 @@ class KISBroker:
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(f"Network error fetching rankings: {exc}") from exc
|
||||
|
||||
async def get_domestic_pending_orders(self) -> list[dict[str, Any]]:
|
||||
"""Fetch unfilled (pending) domestic limit orders.
|
||||
|
||||
The KIS pending-orders API (TTTC0084R) is unsupported in paper (VTS)
|
||||
mode, so this method returns an empty list immediately when MODE is
|
||||
not "live".
|
||||
|
||||
Returns:
|
||||
List of pending order dicts from the KIS ``output`` field.
|
||||
Each dict includes keys such as ``odno``, ``orgn_odno``,
|
||||
``ord_gno_brno``, ``psbl_qty``, ``sll_buy_dvsn_cd``, ``pdno``.
|
||||
"""
|
||||
if self._settings.MODE != "live":
|
||||
logger.debug(
|
||||
"get_domestic_pending_orders: paper mode — TTTC0084R unsupported, returning []"
|
||||
)
|
||||
return []
|
||||
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
# TR_ID: 실전 TTTC0084R (모의 미지원)
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '주식 미체결조회' 시트
|
||||
headers = await self._auth_headers("TTTC0084R")
|
||||
params = {
|
||||
"CANO": self._account_no,
|
||||
"ACNT_PRDT_CD": self._product_cd,
|
||||
"INQR_DVSN_1": "0",
|
||||
"INQR_DVSN_2": "0",
|
||||
"CTX_AREA_FK100": "",
|
||||
"CTX_AREA_NK100": "",
|
||||
}
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/trading/inquire-psbl-rvsecncl"
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"get_domestic_pending_orders failed ({resp.status}): {text}"
|
||||
)
|
||||
data = await resp.json()
|
||||
return data.get("output", []) or []
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error fetching domestic pending orders: {exc}"
|
||||
) from exc
|
||||
|
||||
async def cancel_domestic_order(
|
||||
self,
|
||||
stock_code: str,
|
||||
orgn_odno: str,
|
||||
krx_fwdg_ord_orgno: str,
|
||||
qty: int,
|
||||
) -> dict[str, Any]:
|
||||
"""Cancel an unfilled domestic limit order.
|
||||
|
||||
Args:
|
||||
stock_code: 6-digit domestic stock code (``pdno``).
|
||||
orgn_odno: Original order number from pending-orders response
|
||||
(``orgn_odno`` field).
|
||||
krx_fwdg_ord_orgno: KRX forwarding order branch number from
|
||||
pending-orders response (``ord_gno_brno`` field).
|
||||
qty: Quantity to cancel (use ``psbl_qty`` from pending order).
|
||||
|
||||
Returns:
|
||||
Raw KIS API response dict (check ``rt_cd == "0"`` for success).
|
||||
"""
|
||||
await self._rate_limiter.acquire()
|
||||
session = self._get_session()
|
||||
|
||||
# TR_ID: 실전 TTTC0013U, 모의 VTTC0013U
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '주식주문(정정취소)' 시트
|
||||
tr_id = "TTTC0013U" if self._settings.MODE == "live" else "VTTC0013U"
|
||||
|
||||
body = {
|
||||
"CANO": self._account_no,
|
||||
"ACNT_PRDT_CD": self._product_cd,
|
||||
"KRX_FWDG_ORD_ORGNO": krx_fwdg_ord_orgno,
|
||||
"ORGN_ODNO": orgn_odno,
|
||||
"ORD_DVSN": "00",
|
||||
"ORD_QTY": str(qty),
|
||||
"ORD_UNPR": "0",
|
||||
"RVSE_CNCL_DVSN_CD": "02",
|
||||
"QTY_ALL_ORD_YN": "Y",
|
||||
}
|
||||
|
||||
hash_key = await self._get_hash_key(body)
|
||||
headers = await self._auth_headers(tr_id)
|
||||
headers["hashkey"] = hash_key
|
||||
|
||||
url = f"{self._base_url}/uapi/domestic-stock/v1/trading/order-rvsecncl"
|
||||
|
||||
try:
|
||||
async with session.post(url, headers=headers, json=body) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"cancel_domestic_order failed ({resp.status}): {text}"
|
||||
)
|
||||
return cast(dict[str, Any], await resp.json())
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error cancelling domestic order: {exc}"
|
||||
) from exc
|
||||
|
||||
async def get_daily_prices(
|
||||
self,
|
||||
stock_code: str,
|
||||
|
||||
@@ -12,6 +12,38 @@ from src.broker.kis_api import KISBroker
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Ranking API uses different exchange codes than order/quote APIs.
|
||||
_RANKING_EXCHANGE_MAP: dict[str, str] = {
|
||||
"NASD": "NAS",
|
||||
"NYSE": "NYS",
|
||||
"AMEX": "AMS",
|
||||
"SEHK": "HKS",
|
||||
"SHAA": "SHS",
|
||||
"SZAA": "SZS",
|
||||
"HSX": "HSX",
|
||||
"HNX": "HNX",
|
||||
"TSE": "TSE",
|
||||
}
|
||||
|
||||
# Price inquiry API (HHDFS00000300) uses the same short exchange codes as rankings.
|
||||
# NASD → NAS, NYSE → NYS, AMEX → AMS (confirmed: AMEX returns empty, AMS returns price).
|
||||
_PRICE_EXCHANGE_MAP: dict[str, str] = _RANKING_EXCHANGE_MAP
|
||||
|
||||
# Cancel order TR_IDs per exchange code — (live_tr_id, paper_tr_id).
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 주문취소' 시트
|
||||
_CANCEL_TR_ID_MAP: dict[str, tuple[str, str]] = {
|
||||
"NASD": ("TTTT1004U", "VTTT1004U"),
|
||||
"NYSE": ("TTTT1004U", "VTTT1004U"),
|
||||
"AMEX": ("TTTT1004U", "VTTT1004U"),
|
||||
"SEHK": ("TTTS1003U", "VTTS1003U"),
|
||||
"TSE": ("TTTS0309U", "VTTS0309U"),
|
||||
"SHAA": ("TTTS0302U", "VTTS0302U"),
|
||||
"SZAA": ("TTTS0306U", "VTTS0306U"),
|
||||
"HNX": ("TTTS0312U", "VTTS0312U"),
|
||||
"HSX": ("TTTS0312U", "VTTS0312U"),
|
||||
}
|
||||
|
||||
|
||||
class OverseasBroker:
|
||||
"""KIS Overseas Stock API wrapper that reuses KISBroker infrastructure."""
|
||||
|
||||
@@ -44,9 +76,11 @@ class OverseasBroker:
|
||||
session = self._broker._get_session()
|
||||
|
||||
headers = await self._broker._auth_headers("HHDFS00000300")
|
||||
# Map internal exchange codes to the short form expected by the price API.
|
||||
price_excd = _PRICE_EXCHANGE_MAP.get(exchange_code, exchange_code)
|
||||
params = {
|
||||
"AUTH": "",
|
||||
"EXCD": exchange_code,
|
||||
"EXCD": price_excd,
|
||||
"SYMB": stock_code,
|
||||
}
|
||||
url = f"{self._broker._base_url}/uapi/overseas-price/v1/quotations/price"
|
||||
@@ -64,6 +98,83 @@ class OverseasBroker:
|
||||
f"Network error fetching overseas price: {exc}"
|
||||
) from exc
|
||||
|
||||
async def fetch_overseas_rankings(
|
||||
self,
|
||||
exchange_code: str,
|
||||
ranking_type: str = "fluctuation",
|
||||
limit: int = 30,
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Fetch overseas rankings (price change or volume surge).
|
||||
|
||||
Ranking API specs may differ by account/product. Endpoint paths and
|
||||
TR_IDs are configurable via settings and can be overridden in .env.
|
||||
"""
|
||||
if not self._broker._settings.OVERSEAS_RANKING_ENABLED:
|
||||
return []
|
||||
|
||||
await self._broker._rate_limiter.acquire()
|
||||
session = self._broker._get_session()
|
||||
|
||||
ranking_excd = _RANKING_EXCHANGE_MAP.get(exchange_code, exchange_code)
|
||||
|
||||
if ranking_type == "volume":
|
||||
tr_id = self._broker._settings.OVERSEAS_RANKING_VOLUME_TR_ID
|
||||
path = self._broker._settings.OVERSEAS_RANKING_VOLUME_PATH
|
||||
params: dict[str, str] = {
|
||||
"KEYB": "", # NEXT KEY BUFF — Required, 공백
|
||||
"AUTH": "",
|
||||
"EXCD": ranking_excd,
|
||||
"MIXN": "0",
|
||||
"VOL_RANG": "0",
|
||||
}
|
||||
else:
|
||||
tr_id = self._broker._settings.OVERSEAS_RANKING_FLUCT_TR_ID
|
||||
path = self._broker._settings.OVERSEAS_RANKING_FLUCT_PATH
|
||||
params = {
|
||||
"KEYB": "", # NEXT KEY BUFF — Required, 공백
|
||||
"AUTH": "",
|
||||
"EXCD": ranking_excd,
|
||||
"NDAY": "0",
|
||||
"GUBN": "1", # 0=하락율, 1=상승율 — 변동성 스캐너는 급등 종목 우선
|
||||
"VOL_RANG": "0",
|
||||
}
|
||||
|
||||
headers = await self._broker._auth_headers(tr_id)
|
||||
url = f"{self._broker._base_url}{path}"
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
if resp.status == 404:
|
||||
logger.warning(
|
||||
"Overseas ranking endpoint unavailable (404) for %s/%s; "
|
||||
"using symbol fallback scan",
|
||||
exchange_code,
|
||||
ranking_type,
|
||||
)
|
||||
return []
|
||||
raise ConnectionError(
|
||||
f"fetch_overseas_rankings failed ({resp.status}): {text}"
|
||||
)
|
||||
|
||||
data = await resp.json()
|
||||
rows = self._extract_ranking_rows(data)
|
||||
if rows:
|
||||
return rows[:limit]
|
||||
|
||||
logger.debug(
|
||||
"Overseas ranking returned empty for %s/%s (keys=%s)",
|
||||
exchange_code,
|
||||
ranking_type,
|
||||
list(data.keys()),
|
||||
)
|
||||
return []
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error fetching overseas rankings: {exc}"
|
||||
) from exc
|
||||
|
||||
async def get_overseas_balance(self, exchange_code: str) -> dict[str, Any]:
|
||||
"""
|
||||
Fetch overseas account balance.
|
||||
@@ -80,8 +191,12 @@ class OverseasBroker:
|
||||
await self._broker._rate_limiter.acquire()
|
||||
session = self._broker._get_session()
|
||||
|
||||
# Virtual trading TR_ID for overseas balance inquiry
|
||||
headers = await self._broker._auth_headers("VTTS3012R")
|
||||
# TR_ID: 실전 TTTS3012R, 모의 VTTS3012R
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 잔고조회' 시트
|
||||
balance_tr_id = (
|
||||
"TTTS3012R" if self._broker._settings.MODE == "live" else "VTTS3012R"
|
||||
)
|
||||
headers = await self._broker._auth_headers(balance_tr_id)
|
||||
params = {
|
||||
"CANO": self._broker._account_no,
|
||||
"ACNT_PRDT_CD": self._broker._product_cd,
|
||||
@@ -107,6 +222,59 @@ class OverseasBroker:
|
||||
f"Network error fetching overseas balance: {exc}"
|
||||
) from exc
|
||||
|
||||
async def get_overseas_buying_power(
|
||||
self,
|
||||
exchange_code: str,
|
||||
stock_code: str,
|
||||
price: float,
|
||||
) -> dict[str, Any]:
|
||||
"""
|
||||
Fetch overseas buying power for a specific stock and price.
|
||||
|
||||
Args:
|
||||
exchange_code: Exchange code (e.g., "NASD", "NYSE")
|
||||
stock_code: Stock ticker symbol
|
||||
price: Current stock price (used for quantity calculation)
|
||||
|
||||
Returns:
|
||||
API response; key field: output.ord_psbl_frcr_amt (주문가능외화금액)
|
||||
|
||||
Raises:
|
||||
ConnectionError: On network or API errors
|
||||
"""
|
||||
await self._broker._rate_limiter.acquire()
|
||||
session = self._broker._get_session()
|
||||
|
||||
# TR_ID: 실전 TTTS3007R, 모의 VTTS3007R
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 매수가능금액조회' 시트
|
||||
ps_tr_id = (
|
||||
"TTTS3007R" if self._broker._settings.MODE == "live" else "VTTS3007R"
|
||||
)
|
||||
headers = await self._broker._auth_headers(ps_tr_id)
|
||||
params = {
|
||||
"CANO": self._broker._account_no,
|
||||
"ACNT_PRDT_CD": self._broker._product_cd,
|
||||
"OVRS_EXCG_CD": exchange_code,
|
||||
"OVRS_ORD_UNPR": f"{price:.2f}",
|
||||
"ITEM_CD": stock_code,
|
||||
}
|
||||
url = (
|
||||
f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-psamount"
|
||||
)
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"get_overseas_buying_power failed ({resp.status}): {text}"
|
||||
)
|
||||
return await resp.json()
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error fetching overseas buying power: {exc}"
|
||||
) from exc
|
||||
|
||||
async def send_overseas_order(
|
||||
self,
|
||||
exchange_code: str,
|
||||
@@ -134,8 +302,12 @@ class OverseasBroker:
|
||||
await self._broker._rate_limiter.acquire()
|
||||
session = self._broker._get_session()
|
||||
|
||||
# Virtual trading TR_IDs for overseas orders
|
||||
tr_id = "VTTT1002U" if order_type == "BUY" else "VTTT1006U"
|
||||
# TR_ID: 실전 BUY=TTTT1002U SELL=TTTT1006U, 모의 BUY=VTTT1002U SELL=VTTT1001U
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 주문' 시트
|
||||
if self._broker._settings.MODE == "live":
|
||||
tr_id = "TTTT1002U" if order_type == "BUY" else "TTTT1006U"
|
||||
else:
|
||||
tr_id = "VTTT1002U" if order_type == "BUY" else "VTTT1001U"
|
||||
|
||||
body = {
|
||||
"CANO": self._broker._account_no,
|
||||
@@ -162,20 +334,158 @@ class OverseasBroker:
|
||||
f"send_overseas_order failed ({resp.status}): {text}"
|
||||
)
|
||||
data = await resp.json()
|
||||
logger.info(
|
||||
"Overseas order submitted",
|
||||
extra={
|
||||
"exchange": exchange_code,
|
||||
"stock_code": stock_code,
|
||||
"action": order_type,
|
||||
},
|
||||
)
|
||||
rt_cd = data.get("rt_cd", "")
|
||||
msg1 = data.get("msg1", "")
|
||||
if rt_cd == "0":
|
||||
logger.info(
|
||||
"Overseas order submitted",
|
||||
extra={
|
||||
"exchange": exchange_code,
|
||||
"stock_code": stock_code,
|
||||
"action": order_type,
|
||||
},
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"Overseas order rejected (rt_cd=%s): %s [%s %s %s qty=%d]",
|
||||
rt_cd,
|
||||
msg1,
|
||||
order_type,
|
||||
stock_code,
|
||||
exchange_code,
|
||||
quantity,
|
||||
)
|
||||
return data
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error sending overseas order: {exc}"
|
||||
) from exc
|
||||
|
||||
async def get_overseas_pending_orders(
|
||||
self, exchange_code: str
|
||||
) -> list[dict[str, Any]]:
|
||||
"""Fetch unfilled (pending) overseas orders for a given exchange.
|
||||
|
||||
Args:
|
||||
exchange_code: Exchange code (e.g., "NASD", "SEHK").
|
||||
For US markets, NASD returns all US pending orders (NASD/NYSE/AMEX).
|
||||
|
||||
Returns:
|
||||
List of pending order dicts with fields: odno, pdno, sll_buy_dvsn_cd,
|
||||
ft_ord_qty, nccs_qty, ft_ord_unpr3, ovrs_excg_cd.
|
||||
Always returns [] in paper mode (TTTS3018R is live-only).
|
||||
|
||||
Raises:
|
||||
ConnectionError: On network or API errors (live mode only).
|
||||
"""
|
||||
if self._broker._settings.MODE != "live":
|
||||
logger.debug(
|
||||
"Pending orders API (TTTS3018R) not supported in paper mode; returning []"
|
||||
)
|
||||
return []
|
||||
|
||||
await self._broker._rate_limiter.acquire()
|
||||
session = self._broker._get_session()
|
||||
|
||||
# TTTS3018R: 해외주식 미체결내역조회 (실전 전용)
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 미체결조회' 시트
|
||||
headers = await self._broker._auth_headers("TTTS3018R")
|
||||
params = {
|
||||
"CANO": self._broker._account_no,
|
||||
"ACNT_PRDT_CD": self._broker._product_cd,
|
||||
"OVRS_EXCG_CD": exchange_code,
|
||||
"SORT_SQN": "DS",
|
||||
"CTX_AREA_FK200": "",
|
||||
"CTX_AREA_NK200": "",
|
||||
}
|
||||
url = (
|
||||
f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/inquire-nccs"
|
||||
)
|
||||
|
||||
try:
|
||||
async with session.get(url, headers=headers, params=params) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"get_overseas_pending_orders failed ({resp.status}): {text}"
|
||||
)
|
||||
data = await resp.json()
|
||||
output = data.get("output", [])
|
||||
if isinstance(output, list):
|
||||
return output
|
||||
return []
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error fetching pending orders: {exc}"
|
||||
) from exc
|
||||
|
||||
async def cancel_overseas_order(
|
||||
self,
|
||||
exchange_code: str,
|
||||
stock_code: str,
|
||||
odno: str,
|
||||
qty: int,
|
||||
) -> dict[str, Any]:
|
||||
"""Cancel an overseas limit order.
|
||||
|
||||
Args:
|
||||
exchange_code: Exchange code (e.g., "NASD", "SEHK").
|
||||
stock_code: Stock ticker symbol.
|
||||
odno: Original order number to cancel.
|
||||
qty: Unfilled quantity to cancel.
|
||||
|
||||
Returns:
|
||||
API response dict containing rt_cd and msg1.
|
||||
|
||||
Raises:
|
||||
ValueError: If exchange_code has no cancel TR_ID mapping.
|
||||
ConnectionError: On network or API errors.
|
||||
"""
|
||||
tr_ids = _CANCEL_TR_ID_MAP.get(exchange_code)
|
||||
if tr_ids is None:
|
||||
raise ValueError(f"No cancel TR_ID mapping for exchange: {exchange_code}")
|
||||
live_tr_id, paper_tr_id = tr_ids
|
||||
tr_id = live_tr_id if self._broker._settings.MODE == "live" else paper_tr_id
|
||||
|
||||
await self._broker._rate_limiter.acquire()
|
||||
session = self._broker._get_session()
|
||||
|
||||
# RVSE_CNCL_DVSN_CD="02" means cancel (not revision).
|
||||
# OVRS_ORD_UNPR must be "0" for cancellations.
|
||||
# Source: 한국투자증권 오픈API 전체문서 (20260221) — '해외주식 정정취소주문' 시트
|
||||
body = {
|
||||
"CANO": self._broker._account_no,
|
||||
"ACNT_PRDT_CD": self._broker._product_cd,
|
||||
"OVRS_EXCG_CD": exchange_code,
|
||||
"PDNO": stock_code,
|
||||
"ORGN_ODNO": odno,
|
||||
"RVSE_CNCL_DVSN_CD": "02",
|
||||
"ORD_QTY": str(qty),
|
||||
"OVRS_ORD_UNPR": "0",
|
||||
"ORD_SVR_DVSN_CD": "0",
|
||||
}
|
||||
|
||||
hash_key = await self._broker._get_hash_key(body)
|
||||
headers = await self._broker._auth_headers(tr_id)
|
||||
headers["hashkey"] = hash_key
|
||||
|
||||
url = (
|
||||
f"{self._broker._base_url}/uapi/overseas-stock/v1/trading/order-rvsecncl"
|
||||
)
|
||||
|
||||
try:
|
||||
async with session.post(url, headers=headers, json=body) as resp:
|
||||
if resp.status != 200:
|
||||
text = await resp.text()
|
||||
raise ConnectionError(
|
||||
f"cancel_overseas_order failed ({resp.status}): {text}"
|
||||
)
|
||||
return await resp.json()
|
||||
except (TimeoutError, aiohttp.ClientError) as exc:
|
||||
raise ConnectionError(
|
||||
f"Network error cancelling overseas order: {exc}"
|
||||
) from exc
|
||||
|
||||
def _get_currency_code(self, exchange_code: str) -> str:
|
||||
"""
|
||||
Map exchange code to currency code.
|
||||
@@ -198,3 +508,11 @@ class OverseasBroker:
|
||||
"HSX": "VND",
|
||||
}
|
||||
return currency_map.get(exchange_code, "USD")
|
||||
|
||||
def _extract_ranking_rows(self, data: dict[str, Any]) -> list[dict[str, Any]]:
|
||||
"""Extract list rows from ranking response across schema variants."""
|
||||
candidates = [data.get("output"), data.get("output1"), data.get("output2")]
|
||||
for value in candidates:
|
||||
if isinstance(value, list):
|
||||
return [row for row in value if isinstance(row, dict)]
|
||||
return []
|
||||
|
||||
@@ -13,11 +13,11 @@ class Settings(BaseSettings):
|
||||
KIS_APP_KEY: str
|
||||
KIS_APP_SECRET: str
|
||||
KIS_ACCOUNT_NO: str # format: "XXXXXXXX-XX"
|
||||
KIS_BASE_URL: str = "https://openapivts.koreainvestment.com:9443"
|
||||
KIS_BASE_URL: str = "https://openapivts.koreainvestment.com:29443"
|
||||
|
||||
# Google Gemini
|
||||
GEMINI_API_KEY: str
|
||||
GEMINI_MODEL: str = "gemini-pro"
|
||||
GEMINI_MODEL: str = "gemini-2.0-flash"
|
||||
|
||||
# External Data APIs (optional — for data-driven decisions)
|
||||
NEWS_API_KEY: str | None = None
|
||||
@@ -38,6 +38,11 @@ class Settings(BaseSettings):
|
||||
RSI_MOMENTUM_THRESHOLD: int = Field(default=70, ge=50, le=100)
|
||||
VOL_MULTIPLIER: float = Field(default=2.0, gt=1.0, le=10.0)
|
||||
SCANNER_TOP_N: int = Field(default=3, ge=1, le=10)
|
||||
POSITION_SIZING_ENABLED: bool = True
|
||||
POSITION_BASE_ALLOCATION_PCT: float = Field(default=5.0, gt=0.0, le=30.0)
|
||||
POSITION_MIN_ALLOCATION_PCT: float = Field(default=1.0, gt=0.0, le=20.0)
|
||||
POSITION_MAX_ALLOCATION_PCT: float = Field(default=10.0, gt=0.0, le=50.0)
|
||||
POSITION_VOLATILITY_TARGET_SCORE: float = Field(default=50.0, gt=0.0, le=100.0)
|
||||
|
||||
# Database
|
||||
DB_PATH: str = "data/trade_logs.db"
|
||||
@@ -50,10 +55,19 @@ class Settings(BaseSettings):
|
||||
# Trading mode
|
||||
MODE: str = Field(default="paper", pattern="^(paper|live)$")
|
||||
|
||||
# Simulated USD cash for VTS (paper) overseas trading.
|
||||
# KIS VTS overseas balance API returns errors for most accounts.
|
||||
# This value is used as a fallback when the balance API returns 0 in paper mode.
|
||||
PAPER_OVERSEAS_CASH: float = Field(default=50000.0, ge=0.0)
|
||||
USD_BUFFER_MIN: float = Field(default=1000.0, ge=0.0)
|
||||
|
||||
# Trading frequency mode (daily = batch API calls, realtime = per-stock calls)
|
||||
TRADE_MODE: str = Field(default="daily", pattern="^(daily|realtime)$")
|
||||
DAILY_SESSIONS: int = Field(default=4, ge=1, le=10)
|
||||
SESSION_INTERVAL_HOURS: int = Field(default=6, ge=1, le=24)
|
||||
ORDER_BLACKOUT_ENABLED: bool = True
|
||||
ORDER_BLACKOUT_WINDOWS_KST: str = "23:30-00:10"
|
||||
ORDER_BLACKOUT_QUEUE_MAX: int = Field(default=500, ge=10, le=5000)
|
||||
|
||||
# Pre-Market Planner
|
||||
PRE_MARKET_MINUTES: int = Field(default=30, ge=10, le=120)
|
||||
@@ -83,6 +97,33 @@ class Settings(BaseSettings):
|
||||
TELEGRAM_COMMANDS_ENABLED: bool = True
|
||||
TELEGRAM_POLLING_INTERVAL: float = 1.0 # seconds
|
||||
|
||||
# Telegram notification type filters (granular control)
|
||||
# circuit_breaker is always sent regardless — safety-critical
|
||||
TELEGRAM_NOTIFY_TRADES: bool = True # BUY/SELL execution alerts
|
||||
TELEGRAM_NOTIFY_MARKET_OPEN_CLOSE: bool = True # Market open/close alerts
|
||||
TELEGRAM_NOTIFY_FAT_FINGER: bool = True # Fat-finger rejection alerts
|
||||
TELEGRAM_NOTIFY_SYSTEM_EVENTS: bool = True # System start/shutdown alerts
|
||||
TELEGRAM_NOTIFY_PLAYBOOK: bool = True # Playbook generated/failed alerts
|
||||
TELEGRAM_NOTIFY_SCENARIO_MATCH: bool = True # Scenario matched alerts (most frequent)
|
||||
TELEGRAM_NOTIFY_ERRORS: bool = True # Error alerts
|
||||
|
||||
# Overseas ranking API (KIS endpoint/TR_ID may vary by account/product)
|
||||
# Override these from .env if your account uses different specs.
|
||||
OVERSEAS_RANKING_ENABLED: bool = True
|
||||
OVERSEAS_RANKING_FLUCT_TR_ID: str = "HHDFS76290000"
|
||||
OVERSEAS_RANKING_VOLUME_TR_ID: str = "HHDFS76270000"
|
||||
OVERSEAS_RANKING_FLUCT_PATH: str = (
|
||||
"/uapi/overseas-stock/v1/ranking/updown-rate"
|
||||
)
|
||||
OVERSEAS_RANKING_VOLUME_PATH: str = (
|
||||
"/uapi/overseas-stock/v1/ranking/volume-surge"
|
||||
)
|
||||
|
||||
# Dashboard (optional)
|
||||
DASHBOARD_ENABLED: bool = False
|
||||
DASHBOARD_HOST: str = "127.0.0.1"
|
||||
DASHBOARD_PORT: int = Field(default=8080, ge=1, le=65535)
|
||||
|
||||
model_config = {"env_file": ".env", "env_file_encoding": "utf-8"}
|
||||
|
||||
@property
|
||||
@@ -96,4 +137,7 @@ class Settings(BaseSettings):
|
||||
@property
|
||||
def enabled_market_list(self) -> list[str]:
|
||||
"""Parse ENABLED_MARKETS into list of market codes."""
|
||||
return [m.strip() for m in self.ENABLED_MARKETS.split(",") if m.strip()]
|
||||
from src.markets.schedule import expand_market_codes
|
||||
|
||||
raw = [m.strip() for m in self.ENABLED_MARKETS.split(",") if m.strip()]
|
||||
return expand_market_codes(raw)
|
||||
|
||||
105
src/core/blackout_manager.py
Normal file
105
src/core/blackout_manager.py
Normal file
@@ -0,0 +1,105 @@
|
||||
"""Blackout policy and queued order-intent manager."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from collections import deque
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime, time
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class BlackoutWindow:
|
||||
start: time
|
||||
end: time
|
||||
|
||||
def contains(self, kst_time: time) -> bool:
|
||||
if self.start <= self.end:
|
||||
return self.start <= kst_time < self.end
|
||||
return kst_time >= self.start or kst_time < self.end
|
||||
|
||||
|
||||
@dataclass
|
||||
class QueuedOrderIntent:
|
||||
market_code: str
|
||||
exchange_code: str
|
||||
stock_code: str
|
||||
order_type: str
|
||||
quantity: int
|
||||
price: float
|
||||
source: str
|
||||
queued_at: datetime
|
||||
attempts: int = 0
|
||||
|
||||
|
||||
def parse_blackout_windows_kst(raw: str) -> list[BlackoutWindow]:
|
||||
"""Parse comma-separated KST windows like '23:30-00:10,11:20-11:30'."""
|
||||
windows: list[BlackoutWindow] = []
|
||||
for token in raw.split(","):
|
||||
span = token.strip()
|
||||
if not span or "-" not in span:
|
||||
continue
|
||||
start_raw, end_raw = [part.strip() for part in span.split("-", 1)]
|
||||
try:
|
||||
start_h, start_m = [int(v) for v in start_raw.split(":", 1)]
|
||||
end_h, end_m = [int(v) for v in end_raw.split(":", 1)]
|
||||
except (ValueError, TypeError):
|
||||
continue
|
||||
if not (0 <= start_h <= 23 and 0 <= end_h <= 23):
|
||||
continue
|
||||
if not (0 <= start_m <= 59 and 0 <= end_m <= 59):
|
||||
continue
|
||||
windows.append(BlackoutWindow(start=time(start_h, start_m), end=time(end_h, end_m)))
|
||||
return windows
|
||||
|
||||
|
||||
class BlackoutOrderManager:
|
||||
"""Tracks blackout mode and queues order intents until recovery."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
enabled: bool,
|
||||
windows: list[BlackoutWindow],
|
||||
max_queue_size: int = 500,
|
||||
) -> None:
|
||||
self.enabled = enabled
|
||||
self._windows = windows
|
||||
self._queue: deque[QueuedOrderIntent] = deque()
|
||||
self._was_blackout = False
|
||||
self._max_queue_size = max_queue_size
|
||||
|
||||
@property
|
||||
def pending_count(self) -> int:
|
||||
return len(self._queue)
|
||||
|
||||
def in_blackout(self, now: datetime | None = None) -> bool:
|
||||
if not self.enabled or not self._windows:
|
||||
return False
|
||||
now = now or datetime.now(UTC)
|
||||
kst_now = now.astimezone(ZoneInfo("Asia/Seoul")).timetz().replace(tzinfo=None)
|
||||
return any(window.contains(kst_now) for window in self._windows)
|
||||
|
||||
def enqueue(self, intent: QueuedOrderIntent) -> bool:
|
||||
if len(self._queue) >= self._max_queue_size:
|
||||
return False
|
||||
self._queue.append(intent)
|
||||
return True
|
||||
|
||||
def pop_recovery_batch(self, now: datetime | None = None) -> list[QueuedOrderIntent]:
|
||||
in_blackout_now = self.in_blackout(now)
|
||||
batch: list[QueuedOrderIntent] = []
|
||||
if not in_blackout_now and self._queue:
|
||||
while self._queue:
|
||||
batch.append(self._queue.popleft())
|
||||
self._was_blackout = in_blackout_now
|
||||
return batch
|
||||
|
||||
def requeue(self, intent: QueuedOrderIntent) -> None:
|
||||
if len(self._queue) < self._max_queue_size:
|
||||
self._queue.append(intent)
|
||||
|
||||
def clear(self) -> int:
|
||||
count = len(self._queue)
|
||||
self._queue.clear()
|
||||
return count
|
||||
71
src/core/kill_switch.py
Normal file
71
src/core/kill_switch.py
Normal file
@@ -0,0 +1,71 @@
|
||||
"""Kill switch orchestration for emergency risk actions.
|
||||
|
||||
Order is fixed:
|
||||
1) block new orders
|
||||
2) cancel pending orders
|
||||
3) refresh order state
|
||||
4) reduce risk
|
||||
5) snapshot and notify
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import inspect
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Any, Awaitable, Callable
|
||||
|
||||
StepCallable = Callable[[], Any | Awaitable[Any]]
|
||||
|
||||
|
||||
@dataclass
|
||||
class KillSwitchReport:
|
||||
reason: str
|
||||
steps: list[str] = field(default_factory=list)
|
||||
errors: list[str] = field(default_factory=list)
|
||||
|
||||
|
||||
class KillSwitchOrchestrator:
|
||||
def __init__(self) -> None:
|
||||
self.new_orders_blocked = False
|
||||
|
||||
async def _run_step(
|
||||
self,
|
||||
report: KillSwitchReport,
|
||||
name: str,
|
||||
fn: StepCallable | None,
|
||||
) -> None:
|
||||
report.steps.append(name)
|
||||
if fn is None:
|
||||
return
|
||||
try:
|
||||
result = fn()
|
||||
if inspect.isawaitable(result):
|
||||
await result
|
||||
except Exception as exc: # pragma: no cover - intentionally resilient
|
||||
report.errors.append(f"{name}: {exc}")
|
||||
|
||||
async def trigger(
|
||||
self,
|
||||
*,
|
||||
reason: str,
|
||||
cancel_pending_orders: StepCallable | None = None,
|
||||
refresh_order_state: StepCallable | None = None,
|
||||
reduce_risk: StepCallable | None = None,
|
||||
snapshot_state: StepCallable | None = None,
|
||||
notify: StepCallable | None = None,
|
||||
) -> KillSwitchReport:
|
||||
report = KillSwitchReport(reason=reason)
|
||||
|
||||
self.new_orders_blocked = True
|
||||
report.steps.append("block_new_orders")
|
||||
|
||||
await self._run_step(report, "cancel_pending_orders", cancel_pending_orders)
|
||||
await self._run_step(report, "refresh_order_state", refresh_order_state)
|
||||
await self._run_step(report, "reduce_risk", reduce_risk)
|
||||
await self._run_step(report, "snapshot_state", snapshot_state)
|
||||
await self._run_step(report, "notify", notify)
|
||||
|
||||
return report
|
||||
|
||||
def clear_block(self) -> None:
|
||||
self.new_orders_blocked = False
|
||||
93
src/core/order_policy.py
Normal file
93
src/core/order_policy.py
Normal file
@@ -0,0 +1,93 @@
|
||||
"""Session-aware order policy guards.
|
||||
|
||||
Default policy:
|
||||
- Low-liquidity sessions must reject market orders (price <= 0).
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from datetime import UTC, datetime, time
|
||||
from zoneinfo import ZoneInfo
|
||||
|
||||
from src.markets.schedule import MarketInfo
|
||||
|
||||
_LOW_LIQUIDITY_SESSIONS = {"NXT_AFTER", "US_PRE", "US_DAY", "US_AFTER"}
|
||||
|
||||
|
||||
class OrderPolicyRejected(Exception):
|
||||
"""Raised when an order violates session policy."""
|
||||
|
||||
def __init__(self, message: str, *, session_id: str, market_code: str) -> None:
|
||||
super().__init__(message)
|
||||
self.session_id = session_id
|
||||
self.market_code = market_code
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class SessionInfo:
|
||||
session_id: str
|
||||
is_low_liquidity: bool
|
||||
|
||||
|
||||
def classify_session_id(market: MarketInfo, now: datetime | None = None) -> str:
|
||||
"""Classify current session by KST schedule used in v3 docs."""
|
||||
now = now or datetime.now(UTC)
|
||||
# v3 session tables are explicitly defined in KST perspective.
|
||||
kst_time = now.astimezone(ZoneInfo("Asia/Seoul")).timetz().replace(tzinfo=None)
|
||||
|
||||
if market.code == "KR":
|
||||
if time(8, 0) <= kst_time < time(8, 50):
|
||||
return "NXT_PRE"
|
||||
if time(9, 0) <= kst_time < time(15, 30):
|
||||
return "KRX_REG"
|
||||
if time(15, 30) <= kst_time < time(20, 0):
|
||||
return "NXT_AFTER"
|
||||
return "KR_OFF"
|
||||
|
||||
if market.code.startswith("US"):
|
||||
if time(10, 0) <= kst_time < time(18, 0):
|
||||
return "US_DAY"
|
||||
if time(18, 0) <= kst_time < time(23, 30):
|
||||
return "US_PRE"
|
||||
if time(23, 30) <= kst_time or kst_time < time(6, 0):
|
||||
return "US_REG"
|
||||
if time(6, 0) <= kst_time < time(7, 0):
|
||||
return "US_AFTER"
|
||||
return "US_OFF"
|
||||
|
||||
return "GENERIC_REG"
|
||||
|
||||
|
||||
def get_session_info(market: MarketInfo, now: datetime | None = None) -> SessionInfo:
|
||||
session_id = classify_session_id(market, now)
|
||||
return SessionInfo(session_id=session_id, is_low_liquidity=session_id in _LOW_LIQUIDITY_SESSIONS)
|
||||
|
||||
|
||||
def validate_order_policy(
|
||||
*,
|
||||
market: MarketInfo,
|
||||
order_type: str,
|
||||
price: float,
|
||||
now: datetime | None = None,
|
||||
) -> SessionInfo:
|
||||
"""Validate order against session policy and return resolved session info."""
|
||||
info = get_session_info(market, now)
|
||||
|
||||
is_market_order = price <= 0
|
||||
if info.is_low_liquidity and is_market_order:
|
||||
raise OrderPolicyRejected(
|
||||
f"Market order is forbidden in low-liquidity session ({info.session_id})",
|
||||
session_id=info.session_id,
|
||||
market_code=market.code,
|
||||
)
|
||||
|
||||
# Guard against accidental unsupported actions.
|
||||
if order_type not in {"BUY", "SELL"}:
|
||||
raise OrderPolicyRejected(
|
||||
f"Unsupported order_type={order_type}",
|
||||
session_id=info.session_id,
|
||||
market_code=market.code,
|
||||
)
|
||||
|
||||
return info
|
||||
5
src/dashboard/__init__.py
Normal file
5
src/dashboard/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
"""FastAPI dashboard package for observability APIs."""
|
||||
|
||||
from src.dashboard.app import create_dashboard_app
|
||||
|
||||
__all__ = ["create_dashboard_app"]
|
||||
498
src/dashboard/app.py
Normal file
498
src/dashboard/app.py
Normal file
@@ -0,0 +1,498 @@
|
||||
"""FastAPI application for observability dashboard endpoints."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import os
|
||||
import sqlite3
|
||||
from datetime import UTC, datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
from fastapi import FastAPI, HTTPException, Query
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
|
||||
def create_dashboard_app(db_path: str, mode: str = "paper") -> FastAPI:
|
||||
"""Create dashboard FastAPI app bound to a SQLite database path."""
|
||||
app = FastAPI(title="The Ouroboros Dashboard", version="1.0.0")
|
||||
app.state.db_path = db_path
|
||||
app.state.mode = mode
|
||||
|
||||
@app.get("/")
|
||||
def index() -> FileResponse:
|
||||
index_path = Path(__file__).parent / "static" / "index.html"
|
||||
return FileResponse(index_path)
|
||||
|
||||
@app.get("/api/status")
|
||||
def get_status() -> dict[str, Any]:
|
||||
today = datetime.now(UTC).date().isoformat()
|
||||
with _connect(db_path) as conn:
|
||||
market_rows = conn.execute(
|
||||
"""
|
||||
SELECT DISTINCT market FROM (
|
||||
SELECT market FROM trades WHERE DATE(timestamp) = ?
|
||||
UNION
|
||||
SELECT market FROM decision_logs WHERE DATE(timestamp) = ?
|
||||
UNION
|
||||
SELECT market FROM playbooks WHERE date = ?
|
||||
) ORDER BY market
|
||||
""",
|
||||
(today, today, today),
|
||||
).fetchall()
|
||||
markets = [row[0] for row in market_rows] if market_rows else []
|
||||
market_status: dict[str, Any] = {}
|
||||
total_trades = 0
|
||||
total_pnl = 0.0
|
||||
total_decisions = 0
|
||||
for market in markets:
|
||||
trade_row = conn.execute(
|
||||
"""
|
||||
SELECT COUNT(*) AS c, COALESCE(SUM(pnl), 0.0) AS p
|
||||
FROM trades
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
""",
|
||||
(today, market),
|
||||
).fetchone()
|
||||
decision_row = conn.execute(
|
||||
"""
|
||||
SELECT COUNT(*) AS c
|
||||
FROM decision_logs
|
||||
WHERE DATE(timestamp) = ? AND market = ?
|
||||
""",
|
||||
(today, market),
|
||||
).fetchone()
|
||||
playbook_row = conn.execute(
|
||||
"""
|
||||
SELECT status
|
||||
FROM playbooks
|
||||
WHERE date = ? AND market = ?
|
||||
LIMIT 1
|
||||
""",
|
||||
(today, market),
|
||||
).fetchone()
|
||||
market_status[market] = {
|
||||
"trade_count": int(trade_row["c"] if trade_row else 0),
|
||||
"total_pnl": float(trade_row["p"] if trade_row else 0.0),
|
||||
"decision_count": int(decision_row["c"] if decision_row else 0),
|
||||
"playbook_status": playbook_row["status"] if playbook_row else None,
|
||||
}
|
||||
total_trades += market_status[market]["trade_count"]
|
||||
total_pnl += market_status[market]["total_pnl"]
|
||||
total_decisions += market_status[market]["decision_count"]
|
||||
|
||||
cb_threshold = float(os.getenv("CIRCUIT_BREAKER_PCT", "-3.0"))
|
||||
pnl_pct_rows = conn.execute(
|
||||
"""
|
||||
SELECT key, value
|
||||
FROM system_metrics
|
||||
WHERE key LIKE 'portfolio_pnl_pct_%'
|
||||
ORDER BY updated_at DESC
|
||||
LIMIT 20
|
||||
"""
|
||||
).fetchall()
|
||||
current_pnl_pct: float | None = None
|
||||
if pnl_pct_rows:
|
||||
values = [
|
||||
json.loads(row["value"]).get("pnl_pct")
|
||||
for row in pnl_pct_rows
|
||||
if json.loads(row["value"]).get("pnl_pct") is not None
|
||||
]
|
||||
if values:
|
||||
current_pnl_pct = round(min(values), 4)
|
||||
|
||||
if current_pnl_pct is None:
|
||||
cb_status = "unknown"
|
||||
elif current_pnl_pct <= cb_threshold:
|
||||
cb_status = "tripped"
|
||||
elif current_pnl_pct <= cb_threshold + 1.0:
|
||||
cb_status = "warning"
|
||||
else:
|
||||
cb_status = "ok"
|
||||
|
||||
return {
|
||||
"date": today,
|
||||
"mode": mode,
|
||||
"markets": market_status,
|
||||
"totals": {
|
||||
"trade_count": total_trades,
|
||||
"total_pnl": round(total_pnl, 2),
|
||||
"decision_count": total_decisions,
|
||||
},
|
||||
"circuit_breaker": {
|
||||
"threshold_pct": cb_threshold,
|
||||
"current_pnl_pct": current_pnl_pct,
|
||||
"status": cb_status,
|
||||
},
|
||||
}
|
||||
|
||||
@app.get("/api/playbook/{date_str}")
|
||||
def get_playbook(date_str: str, market: str = Query("KR")) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT date, market, status, playbook_json, generated_at,
|
||||
token_count, scenario_count, match_count
|
||||
FROM playbooks
|
||||
WHERE date = ? AND market = ?
|
||||
""",
|
||||
(date_str, market),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
raise HTTPException(status_code=404, detail="playbook not found")
|
||||
return {
|
||||
"date": row["date"],
|
||||
"market": row["market"],
|
||||
"status": row["status"],
|
||||
"playbook": json.loads(row["playbook_json"]),
|
||||
"generated_at": row["generated_at"],
|
||||
"token_count": row["token_count"],
|
||||
"scenario_count": row["scenario_count"],
|
||||
"match_count": row["match_count"],
|
||||
}
|
||||
|
||||
@app.get("/api/scorecard/{date_str}")
|
||||
def get_scorecard(date_str: str, market: str = Query("KR")) -> dict[str, Any]:
|
||||
key = f"scorecard_{market}"
|
||||
with _connect(db_path) as conn:
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT value
|
||||
FROM contexts
|
||||
WHERE layer = 'L6_DAILY' AND timeframe = ? AND key = ?
|
||||
""",
|
||||
(date_str, key),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
raise HTTPException(status_code=404, detail="scorecard not found")
|
||||
return {"date": date_str, "market": market, "scorecard": json.loads(row["value"])}
|
||||
|
||||
@app.get("/api/performance")
|
||||
def get_performance(market: str = Query("all")) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
if market == "all":
|
||||
by_market_rows = conn.execute(
|
||||
"""
|
||||
SELECT market,
|
||||
COUNT(*) AS total_trades,
|
||||
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) AS wins,
|
||||
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) AS losses,
|
||||
COALESCE(SUM(pnl), 0.0) AS total_pnl,
|
||||
COALESCE(AVG(confidence), 0.0) AS avg_confidence
|
||||
FROM trades
|
||||
GROUP BY market
|
||||
ORDER BY market
|
||||
"""
|
||||
).fetchall()
|
||||
combined = _performance_from_rows(by_market_rows)
|
||||
return {
|
||||
"market": "all",
|
||||
"combined": combined,
|
||||
"by_market": [
|
||||
_row_to_performance(row)
|
||||
for row in by_market_rows
|
||||
],
|
||||
}
|
||||
|
||||
row = conn.execute(
|
||||
"""
|
||||
SELECT market,
|
||||
COUNT(*) AS total_trades,
|
||||
SUM(CASE WHEN pnl > 0 THEN 1 ELSE 0 END) AS wins,
|
||||
SUM(CASE WHEN pnl < 0 THEN 1 ELSE 0 END) AS losses,
|
||||
COALESCE(SUM(pnl), 0.0) AS total_pnl,
|
||||
COALESCE(AVG(confidence), 0.0) AS avg_confidence
|
||||
FROM trades
|
||||
WHERE market = ?
|
||||
GROUP BY market
|
||||
""",
|
||||
(market,),
|
||||
).fetchone()
|
||||
if row is None:
|
||||
return {"market": market, "metrics": _empty_performance(market)}
|
||||
return {"market": market, "metrics": _row_to_performance(row)}
|
||||
|
||||
@app.get("/api/context/{layer}")
|
||||
def get_context_layer(
|
||||
layer: str,
|
||||
timeframe: str | None = Query(default=None),
|
||||
limit: int = Query(default=100, ge=1, le=1000),
|
||||
) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
if timeframe is None:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT timeframe, key, value, updated_at
|
||||
FROM contexts
|
||||
WHERE layer = ?
|
||||
ORDER BY updated_at DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(layer, limit),
|
||||
).fetchall()
|
||||
else:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT timeframe, key, value, updated_at
|
||||
FROM contexts
|
||||
WHERE layer = ? AND timeframe = ?
|
||||
ORDER BY key
|
||||
LIMIT ?
|
||||
""",
|
||||
(layer, timeframe, limit),
|
||||
).fetchall()
|
||||
|
||||
entries = [
|
||||
{
|
||||
"timeframe": row["timeframe"],
|
||||
"key": row["key"],
|
||||
"value": json.loads(row["value"]),
|
||||
"updated_at": row["updated_at"],
|
||||
}
|
||||
for row in rows
|
||||
]
|
||||
return {
|
||||
"layer": layer,
|
||||
"timeframe": timeframe,
|
||||
"count": len(entries),
|
||||
"entries": entries,
|
||||
}
|
||||
|
||||
@app.get("/api/decisions")
|
||||
def get_decisions(
|
||||
market: str = Query("KR"),
|
||||
limit: int = Query(default=50, ge=1, le=500),
|
||||
) -> dict[str, Any]:
|
||||
with _connect(db_path) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data,
|
||||
outcome_pnl, outcome_accuracy
|
||||
FROM decision_logs
|
||||
WHERE market = ?
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(market, limit),
|
||||
).fetchall()
|
||||
decisions = []
|
||||
for row in rows:
|
||||
decisions.append(
|
||||
{
|
||||
"decision_id": row["decision_id"],
|
||||
"timestamp": row["timestamp"],
|
||||
"stock_code": row["stock_code"],
|
||||
"market": row["market"],
|
||||
"exchange_code": row["exchange_code"],
|
||||
"action": row["action"],
|
||||
"confidence": row["confidence"],
|
||||
"rationale": row["rationale"],
|
||||
"context_snapshot": json.loads(row["context_snapshot"]),
|
||||
"input_data": json.loads(row["input_data"]),
|
||||
"outcome_pnl": row["outcome_pnl"],
|
||||
"outcome_accuracy": row["outcome_accuracy"],
|
||||
}
|
||||
)
|
||||
return {"market": market, "count": len(decisions), "decisions": decisions}
|
||||
|
||||
@app.get("/api/pnl/history")
|
||||
def get_pnl_history(
|
||||
days: int = Query(default=30, ge=1, le=365),
|
||||
market: str = Query("all"),
|
||||
) -> dict[str, Any]:
|
||||
"""Return daily P&L history for charting."""
|
||||
with _connect(db_path) as conn:
|
||||
if market == "all":
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT DATE(timestamp) AS date,
|
||||
SUM(pnl) AS daily_pnl,
|
||||
COUNT(*) AS trade_count
|
||||
FROM trades
|
||||
WHERE pnl IS NOT NULL
|
||||
AND DATE(timestamp) >= DATE('now', ?)
|
||||
GROUP BY DATE(timestamp)
|
||||
ORDER BY DATE(timestamp)
|
||||
""",
|
||||
(f"-{days} days",),
|
||||
).fetchall()
|
||||
else:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT DATE(timestamp) AS date,
|
||||
SUM(pnl) AS daily_pnl,
|
||||
COUNT(*) AS trade_count
|
||||
FROM trades
|
||||
WHERE pnl IS NOT NULL
|
||||
AND market = ?
|
||||
AND DATE(timestamp) >= DATE('now', ?)
|
||||
GROUP BY DATE(timestamp)
|
||||
ORDER BY DATE(timestamp)
|
||||
""",
|
||||
(market, f"-{days} days"),
|
||||
).fetchall()
|
||||
return {
|
||||
"days": days,
|
||||
"market": market,
|
||||
"labels": [row["date"] for row in rows],
|
||||
"pnl": [round(float(row["daily_pnl"]), 2) for row in rows],
|
||||
"trades": [int(row["trade_count"]) for row in rows],
|
||||
}
|
||||
|
||||
@app.get("/api/scenarios/active")
|
||||
def get_active_scenarios(
|
||||
market: str = Query("US"),
|
||||
date_str: str | None = Query(default=None),
|
||||
limit: int = Query(default=50, ge=1, le=500),
|
||||
) -> dict[str, Any]:
|
||||
if date_str is None:
|
||||
date_str = datetime.now(UTC).date().isoformat()
|
||||
|
||||
with _connect(db_path) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT timestamp, stock_code, action, confidence, rationale, context_snapshot
|
||||
FROM decision_logs
|
||||
WHERE market = ? AND DATE(timestamp) = ?
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(market, date_str, limit),
|
||||
).fetchall()
|
||||
matches: list[dict[str, Any]] = []
|
||||
for row in rows:
|
||||
snapshot = json.loads(row["context_snapshot"])
|
||||
scenario_match = snapshot.get("scenario_match", {})
|
||||
if not isinstance(scenario_match, dict) or not scenario_match:
|
||||
continue
|
||||
matches.append(
|
||||
{
|
||||
"timestamp": row["timestamp"],
|
||||
"stock_code": row["stock_code"],
|
||||
"action": row["action"],
|
||||
"confidence": row["confidence"],
|
||||
"rationale": row["rationale"],
|
||||
"scenario_match": scenario_match,
|
||||
}
|
||||
)
|
||||
return {"market": market, "date": date_str, "count": len(matches), "matches": matches}
|
||||
|
||||
@app.get("/api/positions")
|
||||
def get_positions() -> dict[str, Any]:
|
||||
"""Return all currently open positions (last trade per symbol is BUY)."""
|
||||
with _connect(db_path) as conn:
|
||||
rows = conn.execute(
|
||||
"""
|
||||
SELECT stock_code, market, exchange_code,
|
||||
price AS entry_price, quantity, timestamp AS entry_time,
|
||||
decision_id
|
||||
FROM (
|
||||
SELECT stock_code, market, exchange_code, price, quantity,
|
||||
timestamp, decision_id, action,
|
||||
ROW_NUMBER() OVER (
|
||||
PARTITION BY stock_code, market
|
||||
ORDER BY timestamp DESC
|
||||
) AS rn
|
||||
FROM trades
|
||||
)
|
||||
WHERE rn = 1 AND action = 'BUY'
|
||||
ORDER BY entry_time DESC
|
||||
"""
|
||||
).fetchall()
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
positions = []
|
||||
for row in rows:
|
||||
entry_time_str = row["entry_time"]
|
||||
try:
|
||||
entry_dt = datetime.fromisoformat(entry_time_str.replace("Z", "+00:00"))
|
||||
held_seconds = int((now - entry_dt).total_seconds())
|
||||
held_hours = held_seconds // 3600
|
||||
held_minutes = (held_seconds % 3600) // 60
|
||||
if held_hours >= 1:
|
||||
held_display = f"{held_hours}h {held_minutes}m"
|
||||
else:
|
||||
held_display = f"{held_minutes}m"
|
||||
except (ValueError, TypeError):
|
||||
held_display = "--"
|
||||
|
||||
positions.append(
|
||||
{
|
||||
"stock_code": row["stock_code"],
|
||||
"market": row["market"],
|
||||
"exchange_code": row["exchange_code"],
|
||||
"entry_price": row["entry_price"],
|
||||
"quantity": row["quantity"],
|
||||
"entry_time": entry_time_str,
|
||||
"held": held_display,
|
||||
"decision_id": row["decision_id"],
|
||||
}
|
||||
)
|
||||
|
||||
return {"count": len(positions), "positions": positions}
|
||||
|
||||
return app
|
||||
|
||||
|
||||
def _connect(db_path: str) -> sqlite3.Connection:
|
||||
conn = sqlite3.connect(db_path)
|
||||
conn.row_factory = sqlite3.Row
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
conn.execute("PRAGMA busy_timeout=8000")
|
||||
return conn
|
||||
|
||||
|
||||
def _row_to_performance(row: sqlite3.Row) -> dict[str, Any]:
|
||||
wins = int(row["wins"] or 0)
|
||||
losses = int(row["losses"] or 0)
|
||||
total = int(row["total_trades"] or 0)
|
||||
win_rate = round((wins / (wins + losses) * 100), 2) if (wins + losses) > 0 else 0.0
|
||||
return {
|
||||
"market": row["market"],
|
||||
"total_trades": total,
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"win_rate": win_rate,
|
||||
"total_pnl": round(float(row["total_pnl"] or 0.0), 2),
|
||||
"avg_confidence": round(float(row["avg_confidence"] or 0.0), 2),
|
||||
}
|
||||
|
||||
|
||||
def _performance_from_rows(rows: list[sqlite3.Row]) -> dict[str, Any]:
|
||||
total_trades = 0
|
||||
wins = 0
|
||||
losses = 0
|
||||
total_pnl = 0.0
|
||||
confidence_weighted = 0.0
|
||||
for row in rows:
|
||||
market_total = int(row["total_trades"] or 0)
|
||||
market_conf = float(row["avg_confidence"] or 0.0)
|
||||
total_trades += market_total
|
||||
wins += int(row["wins"] or 0)
|
||||
losses += int(row["losses"] or 0)
|
||||
total_pnl += float(row["total_pnl"] or 0.0)
|
||||
confidence_weighted += market_total * market_conf
|
||||
win_rate = round((wins / (wins + losses) * 100), 2) if (wins + losses) > 0 else 0.0
|
||||
avg_confidence = round(confidence_weighted / total_trades, 2) if total_trades > 0 else 0.0
|
||||
return {
|
||||
"market": "all",
|
||||
"total_trades": total_trades,
|
||||
"wins": wins,
|
||||
"losses": losses,
|
||||
"win_rate": win_rate,
|
||||
"total_pnl": round(total_pnl, 2),
|
||||
"avg_confidence": avg_confidence,
|
||||
}
|
||||
|
||||
|
||||
def _empty_performance(market: str) -> dict[str, Any]:
|
||||
return {
|
||||
"market": market,
|
||||
"total_trades": 0,
|
||||
"wins": 0,
|
||||
"losses": 0,
|
||||
"win_rate": 0.0,
|
||||
"total_pnl": 0.0,
|
||||
"avg_confidence": 0.0,
|
||||
}
|
||||
798
src/dashboard/static/index.html
Normal file
798
src/dashboard/static/index.html
Normal file
@@ -0,0 +1,798 @@
|
||||
<!doctype html>
|
||||
<html lang="ko">
|
||||
<head>
|
||||
<meta charset="UTF-8" />
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
||||
<title>The Ouroboros Dashboard</title>
|
||||
<script src="https://cdn.jsdelivr.net/npm/chart.js@4.4.0/dist/chart.umd.min.js"></script>
|
||||
<style>
|
||||
:root {
|
||||
--bg: #0b1724;
|
||||
--panel: #12263a;
|
||||
--fg: #e6eef7;
|
||||
--muted: #9fb3c8;
|
||||
--accent: #3cb371;
|
||||
--red: #e05555;
|
||||
--warn: #e8a040;
|
||||
--border: #28455f;
|
||||
}
|
||||
* { box-sizing: border-box; margin: 0; padding: 0; }
|
||||
body {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, monospace;
|
||||
background: radial-gradient(circle at top left, #173b58, var(--bg));
|
||||
color: var(--fg);
|
||||
min-height: 100vh;
|
||||
font-size: 13px;
|
||||
}
|
||||
.wrap { max-width: 1100px; margin: 0 auto; padding: 20px 16px; }
|
||||
|
||||
/* Header */
|
||||
header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
margin-bottom: 20px;
|
||||
padding-bottom: 12px;
|
||||
border-bottom: 1px solid var(--border);
|
||||
}
|
||||
header h1 { font-size: 18px; color: var(--accent); letter-spacing: 0.5px; }
|
||||
.header-right { display: flex; align-items: center; gap: 12px; color: var(--muted); font-size: 12px; }
|
||||
.refresh-btn {
|
||||
background: none; border: 1px solid var(--border); color: var(--muted);
|
||||
padding: 4px 10px; border-radius: 6px; cursor: pointer; font-family: inherit;
|
||||
font-size: 12px; transition: border-color 0.2s;
|
||||
}
|
||||
.refresh-btn:hover { border-color: var(--accent); color: var(--accent); }
|
||||
.mode-badge {
|
||||
padding: 3px 10px; border-radius: 5px; font-size: 12px; font-weight: 700;
|
||||
letter-spacing: 0.5px;
|
||||
}
|
||||
.mode-badge.live {
|
||||
background: rgba(224, 85, 85, 0.15); color: var(--red);
|
||||
border: 1px solid rgba(224, 85, 85, 0.4);
|
||||
animation: pulse-warn 2s ease-in-out infinite;
|
||||
}
|
||||
.mode-badge.paper {
|
||||
background: rgba(232, 160, 64, 0.15); color: var(--warn);
|
||||
border: 1px solid rgba(232, 160, 64, 0.4);
|
||||
}
|
||||
|
||||
/* CB Gauge */
|
||||
.cb-gauge-wrap {
|
||||
display: flex; align-items: center; gap: 8px;
|
||||
font-size: 11px; color: var(--muted);
|
||||
}
|
||||
.cb-dot {
|
||||
width: 8px; height: 8px; border-radius: 50%; flex-shrink: 0;
|
||||
}
|
||||
.cb-dot.ok { background: var(--accent); }
|
||||
.cb-dot.warning { background: var(--warn); animation: pulse-warn 1.2s ease-in-out infinite; }
|
||||
.cb-dot.tripped { background: var(--red); animation: pulse-warn 0.6s ease-in-out infinite; }
|
||||
.cb-dot.unknown { background: var(--border); }
|
||||
@keyframes pulse-warn {
|
||||
0%, 100% { opacity: 1; }
|
||||
50% { opacity: 0.35; }
|
||||
}
|
||||
.cb-bar-wrap { width: 64px; height: 5px; background: rgba(255,255,255,0.08); border-radius: 3px; overflow: hidden; }
|
||||
.cb-bar-fill { height: 100%; border-radius: 3px; transition: width 0.4s, background 0.4s; }
|
||||
|
||||
/* Summary cards */
|
||||
.cards { display: grid; grid-template-columns: repeat(4, 1fr); gap: 12px; margin-bottom: 20px; }
|
||||
@media (max-width: 700px) { .cards { grid-template-columns: repeat(2, 1fr); } }
|
||||
.card {
|
||||
background: var(--panel);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 16px;
|
||||
}
|
||||
.card-label { color: var(--muted); font-size: 11px; margin-bottom: 6px; text-transform: uppercase; letter-spacing: 0.5px; }
|
||||
.card-value { font-size: 22px; font-weight: 700; }
|
||||
.card-sub { color: var(--muted); font-size: 11px; margin-top: 4px; }
|
||||
.positive { color: var(--accent); }
|
||||
.negative { color: var(--red); }
|
||||
.neutral { color: var(--fg); }
|
||||
|
||||
/* Chart panel */
|
||||
.chart-panel {
|
||||
background: var(--panel);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 16px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.panel-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
margin-bottom: 16px;
|
||||
}
|
||||
.panel-title { font-size: 13px; color: var(--muted); font-weight: 600; }
|
||||
.chart-container { position: relative; height: 180px; }
|
||||
.chart-error { color: var(--muted); text-align: center; padding: 40px 0; font-size: 12px; }
|
||||
|
||||
/* Days selector */
|
||||
.days-selector { display: flex; gap: 4px; }
|
||||
.day-btn {
|
||||
background: none; border: 1px solid var(--border); color: var(--muted);
|
||||
padding: 3px 8px; border-radius: 4px; cursor: pointer; font-family: inherit; font-size: 11px;
|
||||
}
|
||||
.day-btn.active { border-color: var(--accent); color: var(--accent); background: rgba(60, 179, 113, 0.08); }
|
||||
|
||||
/* Decisions panel */
|
||||
.decisions-panel {
|
||||
background: var(--panel);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 16px;
|
||||
}
|
||||
.market-tabs { display: flex; gap: 6px; flex-wrap: wrap; }
|
||||
.tab-btn {
|
||||
background: none; border: 1px solid var(--border); color: var(--muted);
|
||||
padding: 4px 10px; border-radius: 6px; cursor: pointer; font-family: inherit; font-size: 11px;
|
||||
}
|
||||
.tab-btn.active { border-color: var(--accent); color: var(--accent); background: rgba(60, 179, 113, 0.08); }
|
||||
.decisions-table { width: 100%; border-collapse: collapse; margin-top: 14px; }
|
||||
.decisions-table th {
|
||||
text-align: left; color: var(--muted); font-size: 11px; font-weight: 600;
|
||||
padding: 6px 8px; border-bottom: 1px solid var(--border); white-space: nowrap;
|
||||
}
|
||||
.decisions-table td {
|
||||
padding: 8px 8px; border-bottom: 1px solid rgba(40, 69, 95, 0.5);
|
||||
vertical-align: middle; white-space: nowrap;
|
||||
}
|
||||
.decisions-table tr:last-child td { border-bottom: none; }
|
||||
.decisions-table tr:hover td { background: rgba(255,255,255,0.02); }
|
||||
.badge {
|
||||
display: inline-block; padding: 2px 7px; border-radius: 4px;
|
||||
font-size: 11px; font-weight: 700; letter-spacing: 0.5px;
|
||||
}
|
||||
.badge-buy { background: rgba(60, 179, 113, 0.15); color: var(--accent); }
|
||||
.badge-sell { background: rgba(224, 85, 85, 0.15); color: var(--red); }
|
||||
.badge-hold { background: rgba(159, 179, 200, 0.12); color: var(--muted); }
|
||||
.conf-bar-wrap { display: flex; align-items: center; gap: 6px; min-width: 90px; }
|
||||
.conf-bar { flex: 1; height: 6px; background: rgba(255,255,255,0.08); border-radius: 3px; overflow: hidden; }
|
||||
.conf-fill { height: 100%; border-radius: 3px; background: var(--accent); transition: width 0.3s; }
|
||||
.conf-val { color: var(--muted); font-size: 11px; min-width: 26px; text-align: right; }
|
||||
.rationale-cell { max-width: 200px; overflow: hidden; text-overflow: ellipsis; color: var(--muted); }
|
||||
.empty-row td { text-align: center; color: var(--muted); padding: 24px; }
|
||||
|
||||
/* Positions panel */
|
||||
.positions-panel {
|
||||
background: var(--panel);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 16px;
|
||||
margin-bottom: 20px;
|
||||
}
|
||||
.positions-table { width: 100%; border-collapse: collapse; margin-top: 14px; }
|
||||
.positions-table th {
|
||||
text-align: left; color: var(--muted); font-size: 11px; font-weight: 600;
|
||||
padding: 6px 8px; border-bottom: 1px solid var(--border); white-space: nowrap;
|
||||
}
|
||||
.positions-table td {
|
||||
padding: 8px 8px; border-bottom: 1px solid rgba(40, 69, 95, 0.5);
|
||||
vertical-align: middle; white-space: nowrap;
|
||||
}
|
||||
.positions-table tr:last-child td { border-bottom: none; }
|
||||
.positions-table tr:hover td { background: rgba(255,255,255,0.02); }
|
||||
.pos-empty { color: var(--muted); text-align: center; padding: 20px 0; font-size: 12px; }
|
||||
.pos-count {
|
||||
display: inline-block; background: rgba(60, 179, 113, 0.12);
|
||||
color: var(--accent); font-size: 11px; font-weight: 700;
|
||||
padding: 2px 8px; border-radius: 10px; margin-left: 8px;
|
||||
}
|
||||
|
||||
/* Spinner */
|
||||
.spinner { display: inline-block; width: 12px; height: 12px; border: 2px solid var(--border); border-top-color: var(--accent); border-radius: 50%; animation: spin 0.8s linear infinite; }
|
||||
@keyframes spin { to { transform: rotate(360deg); } }
|
||||
|
||||
/* Generic panel */
|
||||
.panel {
|
||||
background: var(--panel);
|
||||
border: 1px solid var(--border);
|
||||
border-radius: 10px;
|
||||
padding: 16px;
|
||||
margin-top: 20px;
|
||||
}
|
||||
|
||||
/* Playbook panel - details/summary accordion */
|
||||
.playbook-panel details { border: 1px solid var(--border); border-radius: 4px; margin-bottom: 6px; }
|
||||
.playbook-panel summary { padding: 8px 12px; cursor: pointer; font-weight: 600; background: var(--bg); color: var(--fg); }
|
||||
.playbook-panel summary:hover { color: var(--accent); }
|
||||
.playbook-panel pre { margin: 0; padding: 12px; background: var(--bg); overflow-x: auto;
|
||||
font-size: 11px; color: #a0c4ff; white-space: pre-wrap; }
|
||||
|
||||
/* Scorecard KPI card grid */
|
||||
.scorecard-grid { display: grid; grid-template-columns: repeat(auto-fill, minmax(140px, 1fr)); gap: 10px; }
|
||||
.kpi-card { background: var(--bg); border: 1px solid var(--border); border-radius: 6px; padding: 12px; text-align: center; }
|
||||
.kpi-card .kpi-label { font-size: 11px; color: var(--muted); margin-bottom: 4px; }
|
||||
.kpi-card .kpi-value { font-size: 20px; font-weight: 700; color: var(--fg); }
|
||||
|
||||
/* Scenarios table */
|
||||
.scenarios-table { width: 100%; border-collapse: collapse; font-size: 13px; }
|
||||
.scenarios-table th { background: var(--bg); padding: 8px; text-align: left; border-bottom: 1px solid var(--border);
|
||||
color: var(--muted); font-size: 11px; font-weight: 600; white-space: nowrap; }
|
||||
.scenarios-table td { padding: 7px 8px; border-bottom: 1px solid rgba(40,69,95,0.5); }
|
||||
.scenarios-table tr:hover td { background: rgba(255,255,255,0.02); }
|
||||
|
||||
/* Context table */
|
||||
.context-table { width: 100%; border-collapse: collapse; font-size: 12px; }
|
||||
.context-table th { background: var(--bg); padding: 8px; text-align: left; border-bottom: 1px solid var(--border);
|
||||
color: var(--muted); font-size: 11px; font-weight: 600; white-space: nowrap; }
|
||||
.context-table td { padding: 6px 8px; border-bottom: 1px solid rgba(40,69,95,0.5); vertical-align: top; }
|
||||
.context-value { max-height: 60px; overflow-y: auto; color: #a0c4ff; word-break: break-all; }
|
||||
|
||||
/* Common panel select controls */
|
||||
.panel-controls { display: flex; gap: 8px; align-items: center; flex-wrap: wrap; }
|
||||
.panel-controls select, .panel-controls input[type="number"] {
|
||||
background: var(--bg); color: var(--fg); border: 1px solid var(--border);
|
||||
border-radius: 4px; padding: 4px 8px; font-size: 13px; font-family: inherit;
|
||||
}
|
||||
.panel-date { color: var(--muted); font-size: 12px; }
|
||||
.empty-msg { color: var(--muted); text-align: center; padding: 20px 0; font-size: 12px; }
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
<div class="wrap">
|
||||
<!-- Header -->
|
||||
<header>
|
||||
<h1>🐍 The Ouroboros</h1>
|
||||
<div class="header-right">
|
||||
<span class="mode-badge" id="mode-badge">--</span>
|
||||
<div class="cb-gauge-wrap" id="cb-gauge" title="Circuit Breaker">
|
||||
<span class="cb-dot unknown" id="cb-dot"></span>
|
||||
<span id="cb-label">CB --</span>
|
||||
<div class="cb-bar-wrap">
|
||||
<div class="cb-bar-fill" id="cb-bar" style="width:0%;background:var(--accent)"></div>
|
||||
</div>
|
||||
</div>
|
||||
<span id="last-updated">--</span>
|
||||
<button class="refresh-btn" onclick="refreshAll()">↺ 새로고침</button>
|
||||
</div>
|
||||
</header>
|
||||
|
||||
<!-- Summary cards -->
|
||||
<div class="cards">
|
||||
<div class="card">
|
||||
<div class="card-label">오늘 거래</div>
|
||||
<div class="card-value neutral" id="card-trades">--</div>
|
||||
<div class="card-sub" id="card-trades-sub">거래 건수</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-label">오늘 P&L</div>
|
||||
<div class="card-value" id="card-pnl">--</div>
|
||||
<div class="card-sub" id="card-pnl-sub">실현 손익</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-label">승률</div>
|
||||
<div class="card-value neutral" id="card-winrate">--</div>
|
||||
<div class="card-sub">전체 누적</div>
|
||||
</div>
|
||||
<div class="card">
|
||||
<div class="card-label">누적 거래</div>
|
||||
<div class="card-value neutral" id="card-total">--</div>
|
||||
<div class="card-sub">전체 기간</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Open Positions -->
|
||||
<div class="positions-panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">
|
||||
현재 보유 포지션
|
||||
<span class="pos-count" id="positions-count">0</span>
|
||||
</span>
|
||||
</div>
|
||||
<table class="positions-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>종목</th>
|
||||
<th>시장</th>
|
||||
<th>수량</th>
|
||||
<th>진입가</th>
|
||||
<th>보유 시간</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="positions-body">
|
||||
<tr><td colspan="5" class="pos-empty"><span class="spinner"></span></td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<!-- P&L Chart -->
|
||||
<div class="chart-panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">P&L 추이</span>
|
||||
<div class="days-selector">
|
||||
<button class="day-btn active" data-days="7" onclick="selectDays(this)">7일</button>
|
||||
<button class="day-btn" data-days="30" onclick="selectDays(this)">30일</button>
|
||||
<button class="day-btn" data-days="90" onclick="selectDays(this)">90일</button>
|
||||
</div>
|
||||
</div>
|
||||
<div class="chart-container">
|
||||
<canvas id="pnl-chart"></canvas>
|
||||
<div class="chart-error" id="chart-error" style="display:none">데이터 없음</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<!-- Decisions log -->
|
||||
<div class="decisions-panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">최근 결정 로그</span>
|
||||
<div class="market-tabs" id="market-tabs">
|
||||
<button class="tab-btn active" data-market="KR" onclick="selectMarket(this)">KR</button>
|
||||
<button class="tab-btn" data-market="US_NASDAQ" onclick="selectMarket(this)">US_NASDAQ</button>
|
||||
<button class="tab-btn" data-market="US_NYSE" onclick="selectMarket(this)">US_NYSE</button>
|
||||
<button class="tab-btn" data-market="JP" onclick="selectMarket(this)">JP</button>
|
||||
<button class="tab-btn" data-market="HK" onclick="selectMarket(this)">HK</button>
|
||||
</div>
|
||||
</div>
|
||||
<table class="decisions-table">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>시각</th>
|
||||
<th>종목</th>
|
||||
<th>액션</th>
|
||||
<th>신뢰도</th>
|
||||
<th>사유</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody id="decisions-body">
|
||||
<tr class="empty-row"><td colspan="5"><span class="spinner"></span></td></tr>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
|
||||
<!-- playbook panel -->
|
||||
<div class="panel playbook-panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">📋 프리마켓 플레이북</span>
|
||||
<div class="panel-controls">
|
||||
<select id="pb-market-select" onchange="fetchPlaybook()">
|
||||
<option value="KR">KR</option>
|
||||
<option value="US_NASDAQ">US_NASDAQ</option>
|
||||
<option value="US_NYSE">US_NYSE</option>
|
||||
</select>
|
||||
<span id="pb-date" class="panel-date"></span>
|
||||
</div>
|
||||
</div>
|
||||
<div id="playbook-content"><p class="empty-msg">데이터 없음</p></div>
|
||||
</div>
|
||||
|
||||
<!-- scorecard panel -->
|
||||
<div class="panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">📊 일간 스코어카드</span>
|
||||
<div class="panel-controls">
|
||||
<select id="sc-market-select" onchange="fetchScorecard()">
|
||||
<option value="KR">KR</option>
|
||||
<option value="US_NASDAQ">US_NASDAQ</option>
|
||||
</select>
|
||||
<span id="sc-date" class="panel-date"></span>
|
||||
</div>
|
||||
</div>
|
||||
<div id="scorecard-grid" class="scorecard-grid"><p class="empty-msg">데이터 없음</p></div>
|
||||
</div>
|
||||
|
||||
<!-- scenarios panel -->
|
||||
<div class="panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">🎯 활성 시나리오 매칭</span>
|
||||
<div class="panel-controls">
|
||||
<select id="scen-market-select" onchange="fetchScenarios()">
|
||||
<option value="KR">KR</option>
|
||||
<option value="US_NASDAQ">US_NASDAQ</option>
|
||||
</select>
|
||||
</div>
|
||||
</div>
|
||||
<div id="scenarios-content"><p class="empty-msg">데이터 없음</p></div>
|
||||
</div>
|
||||
|
||||
<!-- context layer panel -->
|
||||
<div class="panel">
|
||||
<div class="panel-header">
|
||||
<span class="panel-title">🧠 컨텍스트 트리</span>
|
||||
<div class="panel-controls">
|
||||
<select id="ctx-layer-select" onchange="fetchContext()">
|
||||
<option value="L7_REALTIME">L7_REALTIME</option>
|
||||
<option value="L6_DAILY">L6_DAILY</option>
|
||||
<option value="L5_WEEKLY">L5_WEEKLY</option>
|
||||
<option value="L4_MONTHLY">L4_MONTHLY</option>
|
||||
<option value="L3_QUARTERLY">L3_QUARTERLY</option>
|
||||
<option value="L2_YEARLY">L2_YEARLY</option>
|
||||
<option value="L1_LIFETIME">L1_LIFETIME</option>
|
||||
</select>
|
||||
<input id="ctx-limit" type="number" value="20" min="1" max="200"
|
||||
style="width:60px;" onchange="fetchContext()">
|
||||
</div>
|
||||
</div>
|
||||
<div id="context-content"><p class="empty-msg">데이터 없음</p></div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
let pnlChart = null;
|
||||
let currentDays = 7;
|
||||
let currentMarket = 'KR';
|
||||
|
||||
function fmt(dt) {
|
||||
try {
|
||||
const d = new Date(dt);
|
||||
return d.toLocaleTimeString('ko-KR', { hour: '2-digit', minute: '2-digit', hour12: false });
|
||||
} catch { return dt || '--'; }
|
||||
}
|
||||
|
||||
function fmtPnl(v) {
|
||||
if (v === null || v === undefined) return '--';
|
||||
const n = parseFloat(v);
|
||||
const cls = n > 0 ? 'positive' : n < 0 ? 'negative' : 'neutral';
|
||||
const sign = n > 0 ? '+' : '';
|
||||
return `<span class="${cls}">${sign}${n.toFixed(2)}</span>`;
|
||||
}
|
||||
|
||||
function badge(action) {
|
||||
const a = (action || '').toUpperCase();
|
||||
const cls = a === 'BUY' ? 'badge-buy' : a === 'SELL' ? 'badge-sell' : 'badge-hold';
|
||||
return `<span class="badge ${cls}">${a}</span>`;
|
||||
}
|
||||
|
||||
function confBar(conf) {
|
||||
const pct = Math.min(Math.max(conf || 0, 0), 100);
|
||||
return `<div class="conf-bar-wrap">
|
||||
<div class="conf-bar"><div class="conf-fill" style="width:${pct}%"></div></div>
|
||||
<span class="conf-val">${pct}</span>
|
||||
</div>`;
|
||||
}
|
||||
|
||||
function fmtPrice(v, market) {
|
||||
if (v === null || v === undefined) return '--';
|
||||
const n = parseFloat(v);
|
||||
const sym = market === 'KR' ? '₩' : market === 'JP' ? '¥' : market === 'HK' ? 'HK$' : '$';
|
||||
return sym + n.toLocaleString('en-US', { minimumFractionDigits: 0, maximumFractionDigits: 4 });
|
||||
}
|
||||
|
||||
async function fetchPositions() {
|
||||
const tbody = document.getElementById('positions-body');
|
||||
const countEl = document.getElementById('positions-count');
|
||||
try {
|
||||
const r = await fetch('/api/positions');
|
||||
if (!r.ok) throw new Error('fetch failed');
|
||||
const d = await r.json();
|
||||
countEl.textContent = d.count ?? 0;
|
||||
if (!d.positions || d.positions.length === 0) {
|
||||
tbody.innerHTML = '<tr><td colspan="5" class="pos-empty">현재 보유 중인 포지션 없음</td></tr>';
|
||||
return;
|
||||
}
|
||||
tbody.innerHTML = d.positions.map(p => `
|
||||
<tr>
|
||||
<td><strong>${p.stock_code || '--'}</strong></td>
|
||||
<td><span style="color:var(--muted);font-size:11px">${p.market || '--'}</span></td>
|
||||
<td>${p.quantity ?? '--'}</td>
|
||||
<td>${fmtPrice(p.entry_price, p.market)}</td>
|
||||
<td style="color:var(--muted);font-size:11px">${p.held || '--'}</td>
|
||||
</tr>
|
||||
`).join('');
|
||||
} catch {
|
||||
tbody.innerHTML = '<tr><td colspan="5" class="pos-empty">데이터 로드 실패</td></tr>';
|
||||
}
|
||||
}
|
||||
|
||||
function renderCbGauge(cb) {
|
||||
if (!cb) return;
|
||||
const dot = document.getElementById('cb-dot');
|
||||
const label = document.getElementById('cb-label');
|
||||
const bar = document.getElementById('cb-bar');
|
||||
|
||||
const status = cb.status || 'unknown';
|
||||
const threshold = cb.threshold_pct ?? -3.0;
|
||||
const current = cb.current_pnl_pct;
|
||||
|
||||
// dot color
|
||||
dot.className = `cb-dot ${status}`;
|
||||
|
||||
// label
|
||||
if (current !== null && current !== undefined) {
|
||||
const sign = current > 0 ? '+' : '';
|
||||
label.textContent = `CB ${sign}${current.toFixed(2)}%`;
|
||||
} else {
|
||||
label.textContent = 'CB --';
|
||||
}
|
||||
|
||||
// bar: fill = how much of the threshold has been consumed (0%=safe, 100%=tripped)
|
||||
const colorMap = { ok: 'var(--accent)', warning: 'var(--warn)', tripped: 'var(--red)', unknown: 'var(--border)' };
|
||||
bar.style.background = colorMap[status] || 'var(--border)';
|
||||
if (current !== null && current !== undefined && threshold < 0) {
|
||||
const fillPct = Math.min(Math.max((current / threshold) * 100, 0), 100);
|
||||
bar.style.width = `${fillPct}%`;
|
||||
} else {
|
||||
bar.style.width = '0%';
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchStatus() {
|
||||
try {
|
||||
const r = await fetch('/api/status');
|
||||
if (!r.ok) return;
|
||||
const d = await r.json();
|
||||
const t = d.totals || {};
|
||||
document.getElementById('card-trades').textContent = t.trade_count ?? '--';
|
||||
const pnlEl = document.getElementById('card-pnl');
|
||||
const pnlV = t.total_pnl;
|
||||
if (pnlV !== undefined) {
|
||||
const n = parseFloat(pnlV);
|
||||
const sign = n > 0 ? '+' : '';
|
||||
pnlEl.textContent = `${sign}${n.toFixed(2)}`;
|
||||
pnlEl.className = `card-value ${n > 0 ? 'positive' : n < 0 ? 'negative' : 'neutral'}`;
|
||||
}
|
||||
document.getElementById('card-pnl-sub').textContent = `결정 ${t.decision_count ?? 0}건`;
|
||||
renderCbGauge(d.circuit_breaker);
|
||||
renderModeBadge(d.mode);
|
||||
} catch {}
|
||||
}
|
||||
|
||||
function renderModeBadge(mode) {
|
||||
const el = document.getElementById('mode-badge');
|
||||
if (!el) return;
|
||||
if (mode === 'live') {
|
||||
el.textContent = '🔴 실전투자';
|
||||
el.className = 'mode-badge live';
|
||||
} else {
|
||||
el.textContent = '🟡 모의투자';
|
||||
el.className = 'mode-badge paper';
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchPerformance() {
|
||||
try {
|
||||
const r = await fetch('/api/performance?market=all');
|
||||
if (!r.ok) return;
|
||||
const d = await r.json();
|
||||
const c = d.combined || {};
|
||||
document.getElementById('card-winrate').textContent = c.win_rate !== undefined ? `${c.win_rate}%` : '--';
|
||||
document.getElementById('card-total').textContent = c.total_trades ?? '--';
|
||||
} catch {}
|
||||
}
|
||||
|
||||
async function fetchPnlHistory(days) {
|
||||
try {
|
||||
const r = await fetch(`/api/pnl/history?days=${days}`);
|
||||
if (!r.ok) throw new Error('fetch failed');
|
||||
const d = await r.json();
|
||||
renderChart(d);
|
||||
} catch {
|
||||
document.getElementById('chart-error').style.display = 'block';
|
||||
}
|
||||
}
|
||||
|
||||
function renderChart(data) {
|
||||
const errEl = document.getElementById('chart-error');
|
||||
if (!data.labels || data.labels.length === 0) {
|
||||
errEl.style.display = 'block';
|
||||
return;
|
||||
}
|
||||
errEl.style.display = 'none';
|
||||
|
||||
const colors = data.pnl.map(v => v >= 0 ? 'rgba(60,179,113,0.75)' : 'rgba(224,85,85,0.75)');
|
||||
const borderColors = data.pnl.map(v => v >= 0 ? '#3cb371' : '#e05555');
|
||||
|
||||
if (pnlChart) { pnlChart.destroy(); pnlChart = null; }
|
||||
const ctx = document.getElementById('pnl-chart').getContext('2d');
|
||||
pnlChart = new Chart(ctx, {
|
||||
type: 'bar',
|
||||
data: {
|
||||
labels: data.labels,
|
||||
datasets: [{
|
||||
label: 'Daily P&L',
|
||||
data: data.pnl,
|
||||
backgroundColor: colors,
|
||||
borderColor: borderColors,
|
||||
borderWidth: 1,
|
||||
borderRadius: 3,
|
||||
}]
|
||||
},
|
||||
options: {
|
||||
responsive: true,
|
||||
maintainAspectRatio: false,
|
||||
plugins: {
|
||||
legend: { display: false },
|
||||
tooltip: {
|
||||
callbacks: {
|
||||
label: ctx => {
|
||||
const v = ctx.parsed.y;
|
||||
const sign = v >= 0 ? '+' : '';
|
||||
const trades = data.trades[ctx.dataIndex];
|
||||
return [`P&L: ${sign}${v.toFixed(2)}`, `거래: ${trades}건`];
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
scales: {
|
||||
x: {
|
||||
ticks: { color: '#9fb3c8', font: { size: 10 }, maxRotation: 0 },
|
||||
grid: { color: 'rgba(40,69,95,0.4)' }
|
||||
},
|
||||
y: {
|
||||
ticks: { color: '#9fb3c8', font: { size: 10 } },
|
||||
grid: { color: 'rgba(40,69,95,0.4)' }
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
async function fetchDecisions(market) {
|
||||
const tbody = document.getElementById('decisions-body');
|
||||
tbody.innerHTML = '<tr class="empty-row"><td colspan="5"><span class="spinner"></span></td></tr>';
|
||||
try {
|
||||
const r = await fetch(`/api/decisions?market=${market}&limit=50`);
|
||||
if (!r.ok) throw new Error('fetch failed');
|
||||
const d = await r.json();
|
||||
if (!d.decisions || d.decisions.length === 0) {
|
||||
tbody.innerHTML = '<tr class="empty-row"><td colspan="5">결정 로그 없음</td></tr>';
|
||||
return;
|
||||
}
|
||||
tbody.innerHTML = d.decisions.map(dec => `
|
||||
<tr>
|
||||
<td>${fmt(dec.timestamp)}</td>
|
||||
<td>${dec.stock_code || '--'}</td>
|
||||
<td>${badge(dec.action)}</td>
|
||||
<td>${confBar(dec.confidence)}</td>
|
||||
<td class="rationale-cell" title="${(dec.rationale || '').replace(/"/g, '"')}">${dec.rationale || '--'}</td>
|
||||
</tr>
|
||||
`).join('');
|
||||
} catch {
|
||||
tbody.innerHTML = '<tr class="empty-row"><td colspan="5">데이터 로드 실패</td></tr>';
|
||||
}
|
||||
}
|
||||
|
||||
function selectDays(btn) {
|
||||
document.querySelectorAll('.day-btn').forEach(b => b.classList.remove('active'));
|
||||
btn.classList.add('active');
|
||||
currentDays = parseInt(btn.dataset.days, 10);
|
||||
fetchPnlHistory(currentDays);
|
||||
}
|
||||
|
||||
function selectMarket(btn) {
|
||||
document.querySelectorAll('.tab-btn').forEach(b => b.classList.remove('active'));
|
||||
btn.classList.add('active');
|
||||
currentMarket = btn.dataset.market;
|
||||
fetchDecisions(currentMarket);
|
||||
}
|
||||
|
||||
function todayStr() {
|
||||
return new Date().toISOString().slice(0, 10);
|
||||
}
|
||||
|
||||
function esc(s) {
|
||||
return String(s ?? '').replace(/&/g, '&').replace(/</g, '<').replace(/>/g, '>').replace(/"/g, '"');
|
||||
}
|
||||
|
||||
async function fetchJSON(url) {
|
||||
const r = await fetch(url);
|
||||
if (!r.ok) throw new Error(`HTTP ${r.status}`);
|
||||
return r.json();
|
||||
}
|
||||
|
||||
async function fetchPlaybook() {
|
||||
const market = document.getElementById('pb-market-select').value;
|
||||
const date = todayStr();
|
||||
document.getElementById('pb-date').textContent = date;
|
||||
const el = document.getElementById('playbook-content');
|
||||
try {
|
||||
const data = await fetchJSON(`/api/playbook/${date}?market=${market}`);
|
||||
const stocks = data.stock_playbooks ?? [];
|
||||
if (stocks.length === 0) {
|
||||
el.innerHTML = '<p class="empty-msg">오늘 플레이북 없음</p>';
|
||||
return;
|
||||
}
|
||||
el.innerHTML = stocks.map(sp =>
|
||||
`<details><summary>${esc(sp.stock_code ?? '?')} — ${esc(sp.signal ?? '')}</summary>` +
|
||||
`<pre>${esc(JSON.stringify(sp, null, 2))}</pre></details>`
|
||||
).join('');
|
||||
} catch {
|
||||
el.innerHTML = '<p class="empty-msg">플레이북 없음 (오늘 미생성 또는 API 오류)</p>';
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchScorecard() {
|
||||
const market = document.getElementById('sc-market-select').value;
|
||||
const date = todayStr();
|
||||
document.getElementById('sc-date').textContent = date;
|
||||
const el = document.getElementById('scorecard-grid');
|
||||
try {
|
||||
const data = await fetchJSON(`/api/scorecard/${date}?market=${market}`);
|
||||
const sc = data.scorecard ?? {};
|
||||
const entries = Object.entries(sc);
|
||||
if (entries.length === 0) {
|
||||
el.innerHTML = '<p class="empty-msg">스코어카드 없음</p>';
|
||||
return;
|
||||
}
|
||||
el.className = 'scorecard-grid';
|
||||
el.innerHTML = entries.map(([k, v]) => `
|
||||
<div class="kpi-card">
|
||||
<div class="kpi-label">${esc(k)}</div>
|
||||
<div class="kpi-value">${typeof v === 'number' ? v.toFixed(2) : esc(String(v))}</div>
|
||||
</div>`).join('');
|
||||
} catch {
|
||||
el.innerHTML = '<p class="empty-msg">스코어카드 없음 (오늘 미생성 또는 API 오류)</p>';
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchScenarios() {
|
||||
const market = document.getElementById('scen-market-select').value;
|
||||
const date = todayStr();
|
||||
const el = document.getElementById('scenarios-content');
|
||||
try {
|
||||
const data = await fetchJSON(`/api/scenarios/active?market=${market}&date_str=${date}&limit=50`);
|
||||
const matches = data.matches ?? [];
|
||||
if (matches.length === 0) {
|
||||
el.innerHTML = '<p class="empty-msg">활성 시나리오 없음</p>';
|
||||
return;
|
||||
}
|
||||
el.innerHTML = `<table class="scenarios-table">
|
||||
<thead><tr><th>종목</th><th>신호</th><th>신뢰도</th><th>매칭 조건</th></tr></thead>
|
||||
<tbody>${matches.map(m => `
|
||||
<tr>
|
||||
<td>${esc(m.stock_code)}</td>
|
||||
<td>${esc(m.signal ?? '-')}</td>
|
||||
<td>${esc(m.confidence ?? '-')}</td>
|
||||
<td><code style="font-size:11px">${esc(JSON.stringify(m.scenario_match ?? {}))}</code></td>
|
||||
</tr>`).join('')}
|
||||
</tbody></table>`;
|
||||
} catch {
|
||||
el.innerHTML = '<p class="empty-msg">데이터 없음</p>';
|
||||
}
|
||||
}
|
||||
|
||||
async function fetchContext() {
|
||||
const layer = document.getElementById('ctx-layer-select').value;
|
||||
const limit = Math.min(Math.max(parseInt(document.getElementById('ctx-limit').value, 10) || 20, 1), 200);
|
||||
const el = document.getElementById('context-content');
|
||||
try {
|
||||
const data = await fetchJSON(`/api/context/${layer}?limit=${limit}`);
|
||||
const entries = data.entries ?? [];
|
||||
if (entries.length === 0) {
|
||||
el.innerHTML = '<p class="empty-msg">컨텍스트 없음</p>';
|
||||
return;
|
||||
}
|
||||
el.innerHTML = `<table class="context-table">
|
||||
<thead><tr><th>timeframe</th><th>key</th><th>value</th><th>updated</th></tr></thead>
|
||||
<tbody>${entries.map(e => `
|
||||
<tr>
|
||||
<td>${esc(e.timeframe)}</td>
|
||||
<td>${esc(e.key)}</td>
|
||||
<td><div class="context-value">${esc(JSON.stringify(e.value ?? e.raw_value))}</div></td>
|
||||
<td style="font-size:11px;color:var(--muted)">${esc((e.updated_at ?? '').slice(0, 16))}</td>
|
||||
</tr>`).join('')}
|
||||
</tbody></table>`;
|
||||
} catch {
|
||||
el.innerHTML = '<p class="empty-msg">데이터 없음</p>';
|
||||
}
|
||||
}
|
||||
|
||||
async function refreshAll() {
|
||||
document.getElementById('last-updated').textContent = '업데이트 중...';
|
||||
await Promise.all([
|
||||
fetchStatus(),
|
||||
fetchPerformance(),
|
||||
fetchPositions(),
|
||||
fetchPnlHistory(currentDays),
|
||||
fetchDecisions(currentMarket),
|
||||
fetchPlaybook(),
|
||||
fetchScorecard(),
|
||||
fetchScenarios(),
|
||||
fetchContext(),
|
||||
]);
|
||||
const now = new Date();
|
||||
const timeStr = now.toLocaleTimeString('ko-KR', { hour: '2-digit', minute: '2-digit', second: '2-digit', hour12: false });
|
||||
document.getElementById('last-updated').textContent = `마지막 업데이트: ${timeStr}`;
|
||||
}
|
||||
|
||||
// Initial load
|
||||
refreshAll();
|
||||
|
||||
// Auto-refresh every 30 seconds
|
||||
setInterval(refreshAll, 30000);
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
146
src/db.py
146
src/db.py
@@ -14,6 +14,11 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
if db_path != ":memory:":
|
||||
Path(db_path).parent.mkdir(parents=True, exist_ok=True)
|
||||
conn = sqlite3.connect(db_path)
|
||||
# Enable WAL mode for concurrent read/write (dashboard + trading loop).
|
||||
# WAL does not apply to in-memory databases.
|
||||
if db_path != ":memory:":
|
||||
conn.execute("PRAGMA journal_mode=WAL")
|
||||
conn.execute("PRAGMA busy_timeout=5000")
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS trades (
|
||||
@@ -26,14 +31,19 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
quantity INTEGER,
|
||||
price REAL,
|
||||
pnl REAL DEFAULT 0.0,
|
||||
strategy_pnl REAL DEFAULT 0.0,
|
||||
fx_pnl REAL DEFAULT 0.0,
|
||||
market TEXT DEFAULT 'KR',
|
||||
exchange_code TEXT DEFAULT 'KRX',
|
||||
decision_id TEXT
|
||||
session_id TEXT DEFAULT 'UNKNOWN',
|
||||
selection_context TEXT,
|
||||
decision_id TEXT,
|
||||
mode TEXT DEFAULT 'paper'
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
# Migration: Add market and exchange_code columns if they don't exist
|
||||
# Migration: Add columns if they don't exist (backward-compatible schema upgrades)
|
||||
cursor = conn.execute("PRAGMA table_info(trades)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
|
||||
@@ -45,6 +55,34 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN selection_context TEXT")
|
||||
if "decision_id" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN decision_id TEXT")
|
||||
if "mode" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN mode TEXT DEFAULT 'paper'")
|
||||
session_id_added = False
|
||||
if "session_id" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN session_id TEXT DEFAULT 'UNKNOWN'")
|
||||
session_id_added = True
|
||||
if "strategy_pnl" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN strategy_pnl REAL DEFAULT 0.0")
|
||||
if "fx_pnl" not in columns:
|
||||
conn.execute("ALTER TABLE trades ADD COLUMN fx_pnl REAL DEFAULT 0.0")
|
||||
# Backfill legacy rows where only pnl existed before split accounting columns.
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE trades
|
||||
SET strategy_pnl = pnl, fx_pnl = 0.0
|
||||
WHERE pnl != 0.0
|
||||
AND strategy_pnl = 0.0
|
||||
AND fx_pnl = 0.0
|
||||
"""
|
||||
)
|
||||
if session_id_added:
|
||||
conn.execute(
|
||||
"""
|
||||
UPDATE trades
|
||||
SET session_id = 'UNKNOWN'
|
||||
WHERE session_id IS NULL OR session_id = ''
|
||||
"""
|
||||
)
|
||||
|
||||
# Context tree tables for multi-layered memory management
|
||||
conn.execute(
|
||||
@@ -131,6 +169,25 @@ def init_db(db_path: str) -> sqlite3.Connection:
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_decision_logs_confidence ON decision_logs(confidence)"
|
||||
)
|
||||
|
||||
# Index for open-position queries (partition by stock_code, market, ordered by timestamp)
|
||||
conn.execute(
|
||||
"CREATE INDEX IF NOT EXISTS idx_trades_stock_market_ts"
|
||||
" ON trades (stock_code, market, timestamp DESC)"
|
||||
)
|
||||
|
||||
# Lightweight key-value store for trading system runtime metrics (dashboard use only)
|
||||
# Intentionally separate from the AI context tree to preserve separation of concerns.
|
||||
conn.execute(
|
||||
"""
|
||||
CREATE TABLE IF NOT EXISTS system_metrics (
|
||||
key TEXT PRIMARY KEY,
|
||||
value TEXT NOT NULL,
|
||||
updated_at TEXT NOT NULL
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
conn.commit()
|
||||
return conn
|
||||
|
||||
@@ -144,10 +201,14 @@ def log_trade(
|
||||
quantity: int = 0,
|
||||
price: float = 0.0,
|
||||
pnl: float = 0.0,
|
||||
strategy_pnl: float | None = None,
|
||||
fx_pnl: float | None = None,
|
||||
market: str = "KR",
|
||||
exchange_code: str = "KRX",
|
||||
session_id: str | None = None,
|
||||
selection_context: dict[str, any] | None = None,
|
||||
decision_id: str | None = None,
|
||||
mode: str = "paper",
|
||||
) -> None:
|
||||
"""Insert a trade record into the database.
|
||||
|
||||
@@ -159,21 +220,37 @@ def log_trade(
|
||||
rationale: AI decision rationale
|
||||
quantity: Number of shares
|
||||
price: Trade price
|
||||
pnl: Profit/loss
|
||||
pnl: Total profit/loss (backward compatibility)
|
||||
strategy_pnl: Strategy PnL component
|
||||
fx_pnl: FX PnL component
|
||||
market: Market code
|
||||
exchange_code: Exchange code
|
||||
session_id: Session identifier (if omitted, auto-derived from market)
|
||||
selection_context: Scanner selection data (RSI, volume_ratio, signal, score)
|
||||
decision_id: Unique decision identifier for audit linking
|
||||
mode: Trading mode ('paper' or 'live') for data separation
|
||||
"""
|
||||
# Serialize selection context to JSON
|
||||
context_json = json.dumps(selection_context) if selection_context else None
|
||||
resolved_session_id = _resolve_session_id(market=market, session_id=session_id)
|
||||
if strategy_pnl is None and fx_pnl is None:
|
||||
strategy_pnl = pnl
|
||||
fx_pnl = 0.0
|
||||
elif strategy_pnl is None:
|
||||
strategy_pnl = pnl - float(fx_pnl or 0.0) if pnl != 0.0 else 0.0
|
||||
elif fx_pnl is None:
|
||||
fx_pnl = pnl - float(strategy_pnl) if pnl != 0.0 else 0.0
|
||||
if pnl == 0.0 and (strategy_pnl or fx_pnl):
|
||||
pnl = float(strategy_pnl) + float(fx_pnl)
|
||||
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO trades (
|
||||
timestamp, stock_code, action, confidence, rationale,
|
||||
quantity, price, pnl, market, exchange_code, selection_context, decision_id
|
||||
quantity, price, pnl, strategy_pnl, fx_pnl,
|
||||
market, exchange_code, session_id, selection_context, decision_id, mode
|
||||
)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
datetime.now(UTC).isoformat(),
|
||||
@@ -184,15 +261,34 @@ def log_trade(
|
||||
quantity,
|
||||
price,
|
||||
pnl,
|
||||
strategy_pnl,
|
||||
fx_pnl,
|
||||
market,
|
||||
exchange_code,
|
||||
resolved_session_id,
|
||||
context_json,
|
||||
decision_id,
|
||||
mode,
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def _resolve_session_id(*, market: str, session_id: str | None) -> str:
|
||||
if session_id:
|
||||
return session_id
|
||||
try:
|
||||
from src.core.order_policy import classify_session_id
|
||||
from src.markets.schedule import MARKETS
|
||||
|
||||
market_info = MARKETS.get(market)
|
||||
if market_info is not None:
|
||||
return classify_session_id(market_info)
|
||||
except Exception:
|
||||
pass
|
||||
return "UNKNOWN"
|
||||
|
||||
|
||||
def get_latest_buy_trade(
|
||||
conn: sqlite3.Connection, stock_code: str, market: str
|
||||
) -> dict[str, Any] | None:
|
||||
@@ -214,3 +310,43 @@ def get_latest_buy_trade(
|
||||
if not row:
|
||||
return None
|
||||
return {"decision_id": row[0], "price": row[1], "quantity": row[2]}
|
||||
|
||||
|
||||
def get_open_position(
|
||||
conn: sqlite3.Connection, stock_code: str, market: str
|
||||
) -> dict[str, Any] | None:
|
||||
"""Return open position if latest trade is BUY, else None."""
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT action, decision_id, price, quantity, timestamp
|
||||
FROM trades
|
||||
WHERE stock_code = ?
|
||||
AND market = ?
|
||||
AND action IN ('BUY', 'SELL')
|
||||
ORDER BY timestamp DESC
|
||||
LIMIT 1
|
||||
""",
|
||||
(stock_code, market),
|
||||
)
|
||||
row = cursor.fetchone()
|
||||
if not row or row[0] != "BUY":
|
||||
return None
|
||||
return {"decision_id": row[1], "price": row[2], "quantity": row[3], "timestamp": row[4]}
|
||||
|
||||
|
||||
def get_recent_symbols(
|
||||
conn: sqlite3.Connection, market: str, limit: int = 30
|
||||
) -> list[str]:
|
||||
"""Return recent unique symbols for a market, newest first."""
|
||||
cursor = conn.execute(
|
||||
"""
|
||||
SELECT stock_code, MAX(timestamp) AS last_ts
|
||||
FROM trades
|
||||
WHERE market = ?
|
||||
GROUP BY stock_code
|
||||
ORDER BY last_ts DESC
|
||||
LIMIT ?
|
||||
""",
|
||||
(market, limit),
|
||||
)
|
||||
return [row[0] for row in cursor.fetchall() if row and row[0]]
|
||||
|
||||
2538
src/main.py
2538
src/main.py
File diff suppressed because it is too large
Load Diff
@@ -123,6 +123,23 @@ MARKETS: dict[str, MarketInfo] = {
|
||||
),
|
||||
}
|
||||
|
||||
MARKET_SHORTHAND: dict[str, list[str]] = {
|
||||
"US": ["US_NASDAQ", "US_NYSE", "US_AMEX"],
|
||||
"CN": ["CN_SHA", "CN_SZA"],
|
||||
"VN": ["VN_HAN", "VN_HCM"],
|
||||
}
|
||||
|
||||
|
||||
def expand_market_codes(codes: list[str]) -> list[str]:
|
||||
"""Expand shorthand market codes into concrete exchange market codes."""
|
||||
expanded: list[str] = []
|
||||
for code in codes:
|
||||
if code in MARKET_SHORTHAND:
|
||||
expanded.extend(MARKET_SHORTHAND[code])
|
||||
else:
|
||||
expanded.append(code)
|
||||
return expanded
|
||||
|
||||
|
||||
def is_market_open(market: MarketInfo, now: datetime | None = None) -> bool:
|
||||
"""
|
||||
|
||||
@@ -4,8 +4,9 @@ import asyncio
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Awaitable, Callable
|
||||
from dataclasses import dataclass
|
||||
from dataclasses import dataclass, fields
|
||||
from enum import Enum
|
||||
from typing import ClassVar
|
||||
|
||||
import aiohttp
|
||||
|
||||
@@ -58,6 +59,45 @@ class LeakyBucket:
|
||||
self._tokens -= 1.0
|
||||
|
||||
|
||||
@dataclass
|
||||
class NotificationFilter:
|
||||
"""Granular on/off flags for each notification type.
|
||||
|
||||
circuit_breaker is intentionally omitted — it is always sent regardless.
|
||||
"""
|
||||
|
||||
# Maps user-facing command keys to dataclass field names
|
||||
KEYS: ClassVar[dict[str, str]] = {
|
||||
"trades": "trades",
|
||||
"market": "market_open_close",
|
||||
"fatfinger": "fat_finger",
|
||||
"system": "system_events",
|
||||
"playbook": "playbook",
|
||||
"scenario": "scenario_match",
|
||||
"errors": "errors",
|
||||
}
|
||||
|
||||
trades: bool = True
|
||||
market_open_close: bool = True
|
||||
fat_finger: bool = True
|
||||
system_events: bool = True
|
||||
playbook: bool = True
|
||||
scenario_match: bool = True
|
||||
errors: bool = True
|
||||
|
||||
def set_flag(self, key: str, value: bool) -> bool:
|
||||
"""Set a filter flag by user-facing key. Returns False if key is unknown."""
|
||||
field = self.KEYS.get(key.lower())
|
||||
if field is None:
|
||||
return False
|
||||
setattr(self, field, value)
|
||||
return True
|
||||
|
||||
def as_dict(self) -> dict[str, bool]:
|
||||
"""Return {user_key: current_value} for display."""
|
||||
return {k: getattr(self, field) for k, field in self.KEYS.items()}
|
||||
|
||||
|
||||
@dataclass
|
||||
class NotificationMessage:
|
||||
"""Internal notification message structure."""
|
||||
@@ -79,6 +119,7 @@ class TelegramClient:
|
||||
chat_id: str | None = None,
|
||||
enabled: bool = True,
|
||||
rate_limit: float = DEFAULT_RATE,
|
||||
notification_filter: NotificationFilter | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize Telegram client.
|
||||
@@ -88,12 +129,14 @@ class TelegramClient:
|
||||
chat_id: Target chat ID (user or group)
|
||||
enabled: Enable/disable notifications globally
|
||||
rate_limit: Maximum messages per second
|
||||
notification_filter: Granular per-type on/off flags
|
||||
"""
|
||||
self._bot_token = bot_token
|
||||
self._chat_id = chat_id
|
||||
self._enabled = enabled
|
||||
self._rate_limiter = LeakyBucket(rate=rate_limit)
|
||||
self._session: aiohttp.ClientSession | None = None
|
||||
self._filter = notification_filter if notification_filter is not None else NotificationFilter()
|
||||
|
||||
if not enabled:
|
||||
logger.info("Telegram notifications disabled via configuration")
|
||||
@@ -118,6 +161,26 @@ class TelegramClient:
|
||||
if self._session is not None and not self._session.closed:
|
||||
await self._session.close()
|
||||
|
||||
def set_notification(self, key: str, value: bool) -> bool:
|
||||
"""Toggle a notification type by user-facing key at runtime.
|
||||
|
||||
Args:
|
||||
key: User-facing key (e.g. "scenario", "market", "all")
|
||||
value: True to enable, False to disable
|
||||
|
||||
Returns:
|
||||
True if key was valid, False if unknown.
|
||||
"""
|
||||
if key == "all":
|
||||
for k in NotificationFilter.KEYS:
|
||||
self._filter.set_flag(k, value)
|
||||
return True
|
||||
return self._filter.set_flag(key, value)
|
||||
|
||||
def filter_status(self) -> dict[str, bool]:
|
||||
"""Return current per-type filter state keyed by user-facing names."""
|
||||
return self._filter.as_dict()
|
||||
|
||||
async def send_message(self, text: str, parse_mode: str = "HTML") -> bool:
|
||||
"""
|
||||
Send a generic text message to Telegram.
|
||||
@@ -193,6 +256,8 @@ class TelegramClient:
|
||||
price: Execution price
|
||||
confidence: AI confidence level (0-100)
|
||||
"""
|
||||
if not self._filter.trades:
|
||||
return
|
||||
emoji = "🟢" if action == "BUY" else "🔴"
|
||||
message = (
|
||||
f"<b>{emoji} {action}</b>\n"
|
||||
@@ -212,6 +277,8 @@ class TelegramClient:
|
||||
Args:
|
||||
market_name: Name of the market (e.g., "Korea", "United States")
|
||||
"""
|
||||
if not self._filter.market_open_close:
|
||||
return
|
||||
message = f"<b>Market Open</b>\n{market_name} trading session started"
|
||||
await self._send_notification(
|
||||
NotificationMessage(priority=NotificationPriority.LOW, message=message)
|
||||
@@ -225,6 +292,8 @@ class TelegramClient:
|
||||
market_name: Name of the market
|
||||
pnl_pct: Final P&L percentage for the session
|
||||
"""
|
||||
if not self._filter.market_open_close:
|
||||
return
|
||||
pnl_sign = "+" if pnl_pct >= 0 else ""
|
||||
pnl_emoji = "📈" if pnl_pct >= 0 else "📉"
|
||||
message = (
|
||||
@@ -271,6 +340,8 @@ class TelegramClient:
|
||||
total_cash: Total available cash
|
||||
max_pct: Maximum allowed percentage
|
||||
"""
|
||||
if not self._filter.fat_finger:
|
||||
return
|
||||
attempted_pct = (order_amount / total_cash) * 100 if total_cash > 0 else 0
|
||||
message = (
|
||||
f"<b>Fat-Finger Protection</b>\n"
|
||||
@@ -293,6 +364,8 @@ class TelegramClient:
|
||||
mode: Trading mode ("paper" or "live")
|
||||
enabled_markets: List of enabled market codes
|
||||
"""
|
||||
if not self._filter.system_events:
|
||||
return
|
||||
mode_emoji = "📝" if mode == "paper" else "💰"
|
||||
markets_str = ", ".join(enabled_markets)
|
||||
message = (
|
||||
@@ -320,6 +393,8 @@ class TelegramClient:
|
||||
scenario_count: Total number of scenarios
|
||||
token_count: Gemini token usage for the playbook
|
||||
"""
|
||||
if not self._filter.playbook:
|
||||
return
|
||||
message = (
|
||||
f"<b>Playbook Generated</b>\n"
|
||||
f"Market: {market}\n"
|
||||
@@ -347,6 +422,8 @@ class TelegramClient:
|
||||
condition_summary: Short summary of the matched condition
|
||||
confidence: Scenario confidence (0-100)
|
||||
"""
|
||||
if not self._filter.scenario_match:
|
||||
return
|
||||
message = (
|
||||
f"<b>Scenario Matched</b>\n"
|
||||
f"Symbol: <code>{stock_code}</code>\n"
|
||||
@@ -366,6 +443,8 @@ class TelegramClient:
|
||||
market: Market code (e.g., "KR", "US")
|
||||
reason: Failure reason summary
|
||||
"""
|
||||
if not self._filter.playbook:
|
||||
return
|
||||
message = (
|
||||
f"<b>Playbook Failed</b>\n"
|
||||
f"Market: {market}\n"
|
||||
@@ -382,6 +461,8 @@ class TelegramClient:
|
||||
Args:
|
||||
reason: Reason for shutdown (e.g., "Normal shutdown", "Circuit breaker")
|
||||
"""
|
||||
if not self._filter.system_events:
|
||||
return
|
||||
message = f"<b>System Shutdown</b>\n{reason}"
|
||||
priority = (
|
||||
NotificationPriority.CRITICAL
|
||||
@@ -392,6 +473,48 @@ class TelegramClient:
|
||||
NotificationMessage(priority=priority, message=message)
|
||||
)
|
||||
|
||||
async def notify_unfilled_order(
|
||||
self,
|
||||
stock_code: str,
|
||||
market: str,
|
||||
action: str,
|
||||
quantity: int,
|
||||
outcome: str,
|
||||
new_price: float | None = None,
|
||||
) -> None:
|
||||
"""Notify about an unfilled overseas order that was cancelled or resubmitted.
|
||||
|
||||
Args:
|
||||
stock_code: Stock ticker symbol.
|
||||
market: Exchange/market code (e.g., "NASD", "SEHK").
|
||||
action: "BUY" or "SELL".
|
||||
quantity: Unfilled quantity.
|
||||
outcome: "cancelled" or "resubmitted".
|
||||
new_price: New order price if resubmitted (None if only cancelled).
|
||||
"""
|
||||
if not self._filter.trades:
|
||||
return
|
||||
# SELL resubmit is high priority — position liquidation at risk.
|
||||
# BUY cancel is medium priority — only cash is freed.
|
||||
priority = (
|
||||
NotificationPriority.HIGH
|
||||
if action == "SELL"
|
||||
else NotificationPriority.MEDIUM
|
||||
)
|
||||
outcome_emoji = "🔄" if outcome == "resubmitted" else "❌"
|
||||
outcome_label = "재주문" if outcome == "resubmitted" else "취소됨"
|
||||
action_emoji = "🔴" if action == "SELL" else "🟢"
|
||||
lines = [
|
||||
f"<b>{outcome_emoji} 미체결 주문 {outcome_label}</b>",
|
||||
f"Symbol: <code>{stock_code}</code> ({market})",
|
||||
f"Action: {action_emoji} {action}",
|
||||
f"Quantity: {quantity:,} shares",
|
||||
]
|
||||
if new_price is not None:
|
||||
lines.append(f"New Price: {new_price:.4f}")
|
||||
message = "\n".join(lines)
|
||||
await self._send_notification(NotificationMessage(priority=priority, message=message))
|
||||
|
||||
async def notify_error(
|
||||
self, error_type: str, error_msg: str, context: str
|
||||
) -> None:
|
||||
@@ -403,6 +526,8 @@ class TelegramClient:
|
||||
error_msg: Error message
|
||||
context: Error context (e.g., stock code, market)
|
||||
"""
|
||||
if not self._filter.errors:
|
||||
return
|
||||
message = (
|
||||
f"<b>Error: {error_type}</b>\n"
|
||||
f"Context: {context}\n"
|
||||
@@ -429,6 +554,7 @@ class TelegramCommandHandler:
|
||||
self._client = client
|
||||
self._polling_interval = polling_interval
|
||||
self._commands: dict[str, Callable[[], Awaitable[None]]] = {}
|
||||
self._commands_with_args: dict[str, Callable[[list[str]], Awaitable[None]]] = {}
|
||||
self._last_update_id = 0
|
||||
self._polling_task: asyncio.Task[None] | None = None
|
||||
self._running = False
|
||||
@@ -437,7 +563,7 @@ class TelegramCommandHandler:
|
||||
self, command: str, handler: Callable[[], Awaitable[None]]
|
||||
) -> None:
|
||||
"""
|
||||
Register a command handler.
|
||||
Register a command handler (no arguments).
|
||||
|
||||
Args:
|
||||
command: Command name (without leading slash, e.g., "start")
|
||||
@@ -446,6 +572,19 @@ class TelegramCommandHandler:
|
||||
self._commands[command] = handler
|
||||
logger.debug("Registered command handler: /%s", command)
|
||||
|
||||
def register_command_with_args(
|
||||
self, command: str, handler: Callable[[list[str]], Awaitable[None]]
|
||||
) -> None:
|
||||
"""
|
||||
Register a command handler that receives trailing arguments.
|
||||
|
||||
Args:
|
||||
command: Command name (without leading slash, e.g., "notify")
|
||||
handler: Async function receiving list of argument tokens
|
||||
"""
|
||||
self._commands_with_args[command] = handler
|
||||
logger.debug("Registered command handler (with args): /%s", command)
|
||||
|
||||
async def start_polling(self) -> None:
|
||||
"""Start long polling for commands."""
|
||||
if self._running:
|
||||
@@ -507,9 +646,19 @@ class TelegramCommandHandler:
|
||||
async with session.post(url, json=payload) as resp:
|
||||
if resp.status != 200:
|
||||
error_text = await resp.text()
|
||||
logger.error(
|
||||
"getUpdates API error (status=%d): %s", resp.status, error_text
|
||||
)
|
||||
if resp.status == 409:
|
||||
# Another bot instance is already polling — stop this poller entirely.
|
||||
# Retrying would keep conflicting with the other instance.
|
||||
self._running = False
|
||||
logger.warning(
|
||||
"Telegram conflict (409): another instance is already polling. "
|
||||
"Disabling Telegram commands for this process. "
|
||||
"Ensure only one instance of The Ouroboros is running at a time.",
|
||||
)
|
||||
else:
|
||||
logger.error(
|
||||
"getUpdates API error (status=%d): %s", resp.status, error_text
|
||||
)
|
||||
return []
|
||||
|
||||
data = await resp.json()
|
||||
@@ -566,11 +715,14 @@ class TelegramCommandHandler:
|
||||
# Remove @botname suffix if present (for group chats)
|
||||
command_name = command_parts[0].split("@")[0]
|
||||
|
||||
# Execute handler
|
||||
handler = self._commands.get(command_name)
|
||||
if handler:
|
||||
# Execute handler (args-aware handlers take priority)
|
||||
args_handler = self._commands_with_args.get(command_name)
|
||||
if args_handler:
|
||||
logger.info("Executing command: /%s %s", command_name, command_parts[1:])
|
||||
await args_handler(command_parts[1:])
|
||||
elif command_name in self._commands:
|
||||
logger.info("Executing command: /%s", command_name)
|
||||
await handler()
|
||||
await self._commands[command_name]()
|
||||
else:
|
||||
logger.debug("Unknown command: /%s", command_name)
|
||||
await self._client.send_message(
|
||||
|
||||
104
src/strategy/exit_rules.py
Normal file
104
src/strategy/exit_rules.py
Normal file
@@ -0,0 +1,104 @@
|
||||
"""Composite exit rules: hard stop, break-even lock, ATR trailing, model assist."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from src.strategy.position_state_machine import PositionState, StateTransitionInput, promote_state
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExitRuleConfig:
|
||||
hard_stop_pct: float = -2.0
|
||||
be_arm_pct: float = 1.2
|
||||
arm_pct: float = 3.0
|
||||
atr_multiplier_k: float = 2.2
|
||||
model_prob_threshold: float = 0.62
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExitRuleInput:
|
||||
current_price: float
|
||||
entry_price: float
|
||||
peak_price: float
|
||||
atr_value: float = 0.0
|
||||
pred_down_prob: float = 0.0
|
||||
liquidity_weak: bool = False
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExitEvaluation:
|
||||
state: PositionState
|
||||
should_exit: bool
|
||||
reason: str
|
||||
unrealized_pnl_pct: float
|
||||
trailing_stop_price: float | None
|
||||
|
||||
|
||||
def evaluate_exit(
|
||||
*,
|
||||
current_state: PositionState,
|
||||
config: ExitRuleConfig,
|
||||
inp: ExitRuleInput,
|
||||
) -> ExitEvaluation:
|
||||
"""Evaluate composite exit logic and return updated state."""
|
||||
if inp.entry_price <= 0 or inp.current_price <= 0:
|
||||
return ExitEvaluation(
|
||||
state=current_state,
|
||||
should_exit=False,
|
||||
reason="invalid_price",
|
||||
unrealized_pnl_pct=0.0,
|
||||
trailing_stop_price=None,
|
||||
)
|
||||
|
||||
unrealized = (inp.current_price - inp.entry_price) / inp.entry_price * 100.0
|
||||
hard_stop_hit = unrealized <= config.hard_stop_pct
|
||||
take_profit_hit = unrealized >= config.arm_pct
|
||||
|
||||
trailing_stop_price: float | None = None
|
||||
trailing_stop_hit = False
|
||||
if inp.atr_value > 0 and inp.peak_price > 0:
|
||||
trailing_stop_price = inp.peak_price - (config.atr_multiplier_k * inp.atr_value)
|
||||
trailing_stop_hit = inp.current_price <= trailing_stop_price
|
||||
|
||||
be_lock_threat = current_state in (PositionState.BE_LOCK, PositionState.ARMED) and (
|
||||
inp.current_price <= inp.entry_price
|
||||
)
|
||||
model_exit_signal = inp.pred_down_prob >= config.model_prob_threshold and inp.liquidity_weak
|
||||
|
||||
next_state = promote_state(
|
||||
current=current_state,
|
||||
inp=StateTransitionInput(
|
||||
unrealized_pnl_pct=unrealized,
|
||||
be_arm_pct=config.be_arm_pct,
|
||||
arm_pct=config.arm_pct,
|
||||
hard_stop_hit=hard_stop_hit,
|
||||
trailing_stop_hit=trailing_stop_hit,
|
||||
model_exit_signal=model_exit_signal,
|
||||
be_lock_threat=be_lock_threat,
|
||||
),
|
||||
)
|
||||
|
||||
if hard_stop_hit:
|
||||
reason = "hard_stop"
|
||||
elif trailing_stop_hit:
|
||||
reason = "atr_trailing_stop"
|
||||
elif be_lock_threat:
|
||||
reason = "be_lock_threat"
|
||||
elif model_exit_signal:
|
||||
reason = "model_liquidity_exit"
|
||||
elif take_profit_hit:
|
||||
# Backward-compatible immediate profit-taking path.
|
||||
reason = "arm_take_profit"
|
||||
else:
|
||||
reason = "hold"
|
||||
|
||||
should_exit = next_state == PositionState.EXITED or take_profit_hit
|
||||
|
||||
return ExitEvaluation(
|
||||
state=next_state,
|
||||
should_exit=should_exit,
|
||||
reason=reason,
|
||||
unrealized_pnl_pct=unrealized,
|
||||
trailing_stop_price=trailing_stop_price,
|
||||
)
|
||||
@@ -46,6 +46,18 @@ class StockCondition(BaseModel):
|
||||
|
||||
The ScenarioEngine evaluates all non-None fields as AND conditions.
|
||||
A condition matches only if ALL specified fields are satisfied.
|
||||
|
||||
Technical indicator fields:
|
||||
rsi_below / rsi_above — RSI threshold
|
||||
volume_ratio_above / volume_ratio_below — volume vs previous day
|
||||
price_above / price_below — absolute price level
|
||||
price_change_pct_above / price_change_pct_below — intraday % change
|
||||
|
||||
Position-aware fields (require market_data enrichment from open position):
|
||||
unrealized_pnl_pct_above — matches if unrealized P&L > threshold (e.g. 3.0 → +3%)
|
||||
unrealized_pnl_pct_below — matches if unrealized P&L < threshold (e.g. -2.0 → -2%)
|
||||
holding_days_above — matches if position held for more than N days
|
||||
holding_days_below — matches if position held for fewer than N days
|
||||
"""
|
||||
|
||||
rsi_below: float | None = None
|
||||
@@ -56,6 +68,10 @@ class StockCondition(BaseModel):
|
||||
price_below: float | None = None
|
||||
price_change_pct_above: float | None = None
|
||||
price_change_pct_below: float | None = None
|
||||
unrealized_pnl_pct_above: float | None = None
|
||||
unrealized_pnl_pct_below: float | None = None
|
||||
holding_days_above: int | None = None
|
||||
holding_days_below: int | None = None
|
||||
|
||||
def has_any_condition(self) -> bool:
|
||||
"""Check if at least one condition field is set."""
|
||||
@@ -70,6 +86,10 @@ class StockCondition(BaseModel):
|
||||
self.price_below,
|
||||
self.price_change_pct_above,
|
||||
self.price_change_pct_below,
|
||||
self.unrealized_pnl_pct_above,
|
||||
self.unrealized_pnl_pct_below,
|
||||
self.holding_days_above,
|
||||
self.holding_days_below,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
70
src/strategy/position_state_machine.py
Normal file
70
src/strategy/position_state_machine.py
Normal file
@@ -0,0 +1,70 @@
|
||||
"""Position state machine for staged exit control.
|
||||
|
||||
State progression is monotonic (promotion-only) except terminal EXITED.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from enum import Enum
|
||||
|
||||
|
||||
class PositionState(str, Enum):
|
||||
HOLDING = "HOLDING"
|
||||
BE_LOCK = "BE_LOCK"
|
||||
ARMED = "ARMED"
|
||||
EXITED = "EXITED"
|
||||
|
||||
|
||||
_STATE_RANK: dict[PositionState, int] = {
|
||||
PositionState.HOLDING: 0,
|
||||
PositionState.BE_LOCK: 1,
|
||||
PositionState.ARMED: 2,
|
||||
PositionState.EXITED: 3,
|
||||
}
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class StateTransitionInput:
|
||||
unrealized_pnl_pct: float
|
||||
be_arm_pct: float
|
||||
arm_pct: float
|
||||
hard_stop_hit: bool = False
|
||||
trailing_stop_hit: bool = False
|
||||
model_exit_signal: bool = False
|
||||
be_lock_threat: bool = False
|
||||
|
||||
|
||||
def evaluate_exit_first(inp: StateTransitionInput) -> bool:
|
||||
"""Return True when terminal exit conditions are met.
|
||||
|
||||
EXITED must be evaluated before any promotion.
|
||||
"""
|
||||
return (
|
||||
inp.hard_stop_hit
|
||||
or inp.trailing_stop_hit
|
||||
or inp.model_exit_signal
|
||||
or inp.be_lock_threat
|
||||
)
|
||||
|
||||
|
||||
def promote_state(current: PositionState, inp: StateTransitionInput) -> PositionState:
|
||||
"""Promote to highest admissible state for current tick/bar.
|
||||
|
||||
Rules:
|
||||
- EXITED has highest precedence and is terminal.
|
||||
- Promotions are monotonic (no downgrade).
|
||||
"""
|
||||
if current == PositionState.EXITED:
|
||||
return PositionState.EXITED
|
||||
|
||||
if evaluate_exit_first(inp):
|
||||
return PositionState.EXITED
|
||||
|
||||
target = PositionState.HOLDING
|
||||
if inp.unrealized_pnl_pct >= inp.arm_pct:
|
||||
target = PositionState.ARMED
|
||||
elif inp.unrealized_pnl_pct >= inp.be_arm_pct:
|
||||
target = PositionState.BE_LOCK
|
||||
|
||||
return target if _STATE_RANK[target] > _STATE_RANK[current] else current
|
||||
@@ -1,7 +1,8 @@
|
||||
"""Pre-market planner — generates DayPlaybook via Gemini before market open.
|
||||
|
||||
One Gemini API call per market per day. Candidates come from SmartVolatilityScanner.
|
||||
On failure, returns a defensive playbook (all HOLD, no trades).
|
||||
On failure, returns a smart rule-based fallback playbook that uses scanner signals
|
||||
(momentum/oversold) to generate BUY conditions, avoiding the all-HOLD problem.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
@@ -74,6 +75,7 @@ class PreMarketPlanner:
|
||||
market: str,
|
||||
candidates: list[ScanCandidate],
|
||||
today: date | None = None,
|
||||
current_holdings: list[dict] | None = None,
|
||||
) -> DayPlaybook:
|
||||
"""Generate a DayPlaybook for a market using Gemini.
|
||||
|
||||
@@ -81,6 +83,10 @@ class PreMarketPlanner:
|
||||
market: Market code ("KR" or "US")
|
||||
candidates: Stock candidates from SmartVolatilityScanner
|
||||
today: Override date (defaults to date.today()). Use market-local date.
|
||||
current_holdings: Currently held positions with entry_price and unrealized_pnl_pct.
|
||||
Each dict: {"stock_code": str, "name": str, "qty": int,
|
||||
"entry_price": float, "unrealized_pnl_pct": float,
|
||||
"holding_days": int}
|
||||
|
||||
Returns:
|
||||
DayPlaybook with scenarios. Empty/defensive if no candidates or failure.
|
||||
@@ -105,6 +111,7 @@ class PreMarketPlanner:
|
||||
context_data,
|
||||
self_market_scorecard,
|
||||
cross_market,
|
||||
current_holdings=current_holdings,
|
||||
)
|
||||
|
||||
# 3. Call Gemini
|
||||
@@ -117,7 +124,8 @@ class PreMarketPlanner:
|
||||
|
||||
# 4. Parse response
|
||||
playbook = self._parse_response(
|
||||
decision.rationale, today, market, candidates, cross_market
|
||||
decision.rationale, today, market, candidates, cross_market,
|
||||
current_holdings=current_holdings,
|
||||
)
|
||||
playbook_with_tokens = playbook.model_copy(
|
||||
update={"token_count": decision.token_count}
|
||||
@@ -134,7 +142,7 @@ class PreMarketPlanner:
|
||||
except Exception:
|
||||
logger.exception("Playbook generation failed for %s", market)
|
||||
if self._settings.DEFENSIVE_PLAYBOOK_ON_FAILURE:
|
||||
return self._defensive_playbook(today, market, candidates)
|
||||
return self._smart_fallback_playbook(today, market, candidates, self._settings)
|
||||
return self._empty_playbook(today, market)
|
||||
|
||||
def build_cross_market_context(
|
||||
@@ -229,6 +237,7 @@ class PreMarketPlanner:
|
||||
context_data: dict[str, Any],
|
||||
self_market_scorecard: dict[str, Any] | None,
|
||||
cross_market: CrossMarketContext | None,
|
||||
current_holdings: list[dict] | None = None,
|
||||
) -> str:
|
||||
"""Build a structured prompt for Gemini to generate scenario JSON."""
|
||||
max_scenarios = self._settings.MAX_SCENARIOS_PER_STOCK
|
||||
@@ -240,6 +249,26 @@ class PreMarketPlanner:
|
||||
for c in candidates
|
||||
)
|
||||
|
||||
holdings_text = ""
|
||||
if current_holdings:
|
||||
lines = []
|
||||
for h in current_holdings:
|
||||
code = h.get("stock_code", "")
|
||||
name = h.get("name", "")
|
||||
qty = h.get("qty", 0)
|
||||
entry_price = h.get("entry_price", 0.0)
|
||||
pnl_pct = h.get("unrealized_pnl_pct", 0.0)
|
||||
holding_days = h.get("holding_days", 0)
|
||||
lines.append(
|
||||
f" - {code} ({name}): {qty}주 @ {entry_price:,.0f}, "
|
||||
f"미실현손익 {pnl_pct:+.2f}%, 보유 {holding_days}일"
|
||||
)
|
||||
holdings_text = (
|
||||
"\n## Current Holdings (보유 중 — SELL/HOLD 전략 고려 필요)\n"
|
||||
+ "\n".join(lines)
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
cross_market_text = ""
|
||||
if cross_market:
|
||||
cross_market_text = (
|
||||
@@ -272,10 +301,20 @@ class PreMarketPlanner:
|
||||
for key, value in list(layer_data.items())[:5]:
|
||||
context_text += f" - {key}: {value}\n"
|
||||
|
||||
holdings_instruction = ""
|
||||
if current_holdings:
|
||||
holding_codes = [h.get("stock_code", "") for h in current_holdings]
|
||||
holdings_instruction = (
|
||||
f"- Also include SELL/HOLD scenarios for held stocks: "
|
||||
f"{', '.join(holding_codes)} "
|
||||
f"(even if not in candidates list)\n"
|
||||
)
|
||||
|
||||
return (
|
||||
f"You are a pre-market trading strategist for the {market} market.\n"
|
||||
f"Generate structured trading scenarios for today.\n\n"
|
||||
f"## Candidates (from volatility scanner)\n{candidates_text}\n"
|
||||
f"{holdings_text}"
|
||||
f"{self_market_text}"
|
||||
f"{cross_market_text}"
|
||||
f"{context_text}\n"
|
||||
@@ -293,7 +332,8 @@ class PreMarketPlanner:
|
||||
f' "stock_code": "...",\n'
|
||||
f' "scenarios": [\n'
|
||||
f' {{\n'
|
||||
f' "condition": {{"rsi_below": 30, "volume_ratio_above": 2.0}},\n'
|
||||
f' "condition": {{"rsi_below": 30, "volume_ratio_above": 2.0,'
|
||||
f' "unrealized_pnl_pct_above": 3.0, "holding_days_above": 5}},\n'
|
||||
f' "action": "BUY|SELL|HOLD",\n'
|
||||
f' "confidence": 85,\n'
|
||||
f' "allocation_pct": 10.0,\n'
|
||||
@@ -307,7 +347,8 @@ class PreMarketPlanner:
|
||||
f'}}\n\n'
|
||||
f"Rules:\n"
|
||||
f"- Max {max_scenarios} scenarios per stock\n"
|
||||
f"- Only use stocks from the candidates list\n"
|
||||
f"- Candidates list is the primary source for BUY candidates\n"
|
||||
f"{holdings_instruction}"
|
||||
f"- Confidence 0-100 (80+ for actionable trades)\n"
|
||||
f"- stop_loss_pct must be <= 0, take_profit_pct must be >= 0\n"
|
||||
f"- Return ONLY the JSON, no markdown fences or explanation\n"
|
||||
@@ -320,12 +361,19 @@ class PreMarketPlanner:
|
||||
market: str,
|
||||
candidates: list[ScanCandidate],
|
||||
cross_market: CrossMarketContext | None,
|
||||
current_holdings: list[dict] | None = None,
|
||||
) -> DayPlaybook:
|
||||
"""Parse Gemini's JSON response into a validated DayPlaybook."""
|
||||
cleaned = self._extract_json(response_text)
|
||||
data = json.loads(cleaned)
|
||||
|
||||
valid_codes = {c.stock_code for c in candidates}
|
||||
# Holdings are also valid — AI may generate SELL/HOLD scenarios for them
|
||||
if current_holdings:
|
||||
for h in current_holdings:
|
||||
code = h.get("stock_code", "")
|
||||
if code:
|
||||
valid_codes.add(code)
|
||||
|
||||
# Parse market outlook
|
||||
outlook_str = data.get("market_outlook", "neutral")
|
||||
@@ -389,6 +437,10 @@ class PreMarketPlanner:
|
||||
price_below=cond_data.get("price_below"),
|
||||
price_change_pct_above=cond_data.get("price_change_pct_above"),
|
||||
price_change_pct_below=cond_data.get("price_change_pct_below"),
|
||||
unrealized_pnl_pct_above=cond_data.get("unrealized_pnl_pct_above"),
|
||||
unrealized_pnl_pct_below=cond_data.get("unrealized_pnl_pct_below"),
|
||||
holding_days_above=cond_data.get("holding_days_above"),
|
||||
holding_days_below=cond_data.get("holding_days_below"),
|
||||
)
|
||||
|
||||
if not condition.has_any_condition():
|
||||
@@ -470,3 +522,99 @@ class PreMarketPlanner:
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _smart_fallback_playbook(
|
||||
today: date,
|
||||
market: str,
|
||||
candidates: list[ScanCandidate],
|
||||
settings: Settings,
|
||||
) -> DayPlaybook:
|
||||
"""Rule-based fallback playbook when Gemini is unavailable.
|
||||
|
||||
Uses scanner signals (RSI, volume_ratio) to generate meaningful BUY
|
||||
conditions instead of the all-SELL defensive playbook. Candidates are
|
||||
already pre-qualified by SmartVolatilityScanner, so we trust their
|
||||
signals and build actionable scenarios from them.
|
||||
|
||||
Scenario logic per candidate:
|
||||
- momentum signal: BUY when volume_ratio exceeds scanner threshold
|
||||
- oversold signal: BUY when RSI is below oversold threshold
|
||||
- always: SELL stop-loss at -3.0% as guard
|
||||
"""
|
||||
stock_playbooks = []
|
||||
for c in candidates:
|
||||
scenarios: list[StockScenario] = []
|
||||
|
||||
if c.signal == "momentum":
|
||||
scenarios.append(
|
||||
StockScenario(
|
||||
condition=StockCondition(
|
||||
volume_ratio_above=settings.VOL_MULTIPLIER,
|
||||
),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=80,
|
||||
allocation_pct=10.0,
|
||||
stop_loss_pct=-3.0,
|
||||
take_profit_pct=5.0,
|
||||
rationale=(
|
||||
f"Rule-based BUY: momentum signal, "
|
||||
f"volume={c.volume_ratio:.1f}x (fallback planner)"
|
||||
),
|
||||
)
|
||||
)
|
||||
elif c.signal == "oversold":
|
||||
scenarios.append(
|
||||
StockScenario(
|
||||
condition=StockCondition(
|
||||
rsi_below=settings.RSI_OVERSOLD_THRESHOLD,
|
||||
),
|
||||
action=ScenarioAction.BUY,
|
||||
confidence=80,
|
||||
allocation_pct=10.0,
|
||||
stop_loss_pct=-3.0,
|
||||
take_profit_pct=5.0,
|
||||
rationale=(
|
||||
f"Rule-based BUY: oversold signal, "
|
||||
f"RSI={c.rsi:.0f} (fallback planner)"
|
||||
),
|
||||
)
|
||||
)
|
||||
|
||||
# Always add stop-loss guard
|
||||
scenarios.append(
|
||||
StockScenario(
|
||||
condition=StockCondition(price_change_pct_below=-3.0),
|
||||
action=ScenarioAction.SELL,
|
||||
confidence=90,
|
||||
stop_loss_pct=-3.0,
|
||||
rationale="Rule-based stop-loss (fallback planner)",
|
||||
)
|
||||
)
|
||||
|
||||
stock_playbooks.append(
|
||||
StockPlaybook(
|
||||
stock_code=c.stock_code,
|
||||
scenarios=scenarios,
|
||||
)
|
||||
)
|
||||
|
||||
logger.info(
|
||||
"Smart fallback playbook for %s: %d stocks with rule-based BUY/SELL conditions",
|
||||
market,
|
||||
len(stock_playbooks),
|
||||
)
|
||||
return DayPlaybook(
|
||||
date=today,
|
||||
market=market,
|
||||
market_outlook=MarketOutlook.NEUTRAL,
|
||||
default_action=ScenarioAction.HOLD,
|
||||
stock_playbooks=stock_playbooks,
|
||||
global_rules=[
|
||||
GlobalRule(
|
||||
condition="portfolio_pnl_pct < -2.0",
|
||||
action=ScenarioAction.REDUCE_ALL,
|
||||
rationale="Defensive: reduce on loss threshold",
|
||||
),
|
||||
],
|
||||
)
|
||||
|
||||
@@ -206,6 +206,37 @@ class ScenarioEngine:
|
||||
if condition.price_change_pct_below is not None:
|
||||
checks.append(price_change_pct is not None and price_change_pct < condition.price_change_pct_below)
|
||||
|
||||
# Position-aware conditions
|
||||
unrealized_pnl_pct = self._safe_float(market_data.get("unrealized_pnl_pct"))
|
||||
if condition.unrealized_pnl_pct_above is not None or condition.unrealized_pnl_pct_below is not None:
|
||||
if "unrealized_pnl_pct" not in market_data:
|
||||
self._warn_missing_key("unrealized_pnl_pct")
|
||||
if condition.unrealized_pnl_pct_above is not None:
|
||||
checks.append(
|
||||
unrealized_pnl_pct is not None
|
||||
and unrealized_pnl_pct > condition.unrealized_pnl_pct_above
|
||||
)
|
||||
if condition.unrealized_pnl_pct_below is not None:
|
||||
checks.append(
|
||||
unrealized_pnl_pct is not None
|
||||
and unrealized_pnl_pct < condition.unrealized_pnl_pct_below
|
||||
)
|
||||
|
||||
holding_days = self._safe_float(market_data.get("holding_days"))
|
||||
if condition.holding_days_above is not None or condition.holding_days_below is not None:
|
||||
if "holding_days" not in market_data:
|
||||
self._warn_missing_key("holding_days")
|
||||
if condition.holding_days_above is not None:
|
||||
checks.append(
|
||||
holding_days is not None
|
||||
and holding_days > condition.holding_days_above
|
||||
)
|
||||
if condition.holding_days_below is not None:
|
||||
checks.append(
|
||||
holding_days is not None
|
||||
and holding_days < condition.holding_days_below
|
||||
)
|
||||
|
||||
return len(checks) > 0 and all(checks)
|
||||
|
||||
def _evaluate_global_condition(
|
||||
@@ -266,5 +297,9 @@ class ScenarioEngine:
|
||||
details["current_price"] = self._safe_float(market_data.get("current_price"))
|
||||
if condition.price_change_pct_above is not None or condition.price_change_pct_below is not None:
|
||||
details["price_change_pct"] = self._safe_float(market_data.get("price_change_pct"))
|
||||
if condition.unrealized_pnl_pct_above is not None or condition.unrealized_pnl_pct_below is not None:
|
||||
details["unrealized_pnl_pct"] = self._safe_float(market_data.get("unrealized_pnl_pct"))
|
||||
if condition.holding_days_above is not None or condition.holding_days_below is not None:
|
||||
details["holding_days"] = self._safe_float(market_data.get("holding_days"))
|
||||
|
||||
return details
|
||||
|
||||
83
tests/test_backtest_cost_guard.py
Normal file
83
tests/test_backtest_cost_guard.py
Normal file
@@ -0,0 +1,83 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.backtest_cost_guard import BacktestCostModel, validate_backtest_cost_model
|
||||
|
||||
|
||||
def test_valid_backtest_cost_model_passes() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"])
|
||||
|
||||
|
||||
def test_missing_required_slippage_session_raises() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.01, "US_PRE": 0.08},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="missing slippage_bps_by_session.*US_PRE"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"])
|
||||
|
||||
|
||||
def test_missing_required_failure_rate_session_raises() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0, "US_PRE": 50.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.01},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="missing failure_rate_by_session.*US_PRE"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG", "US_PRE"])
|
||||
|
||||
|
||||
def test_invalid_failure_rate_range_raises() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 1.2},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="failure rate must be within"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
|
||||
|
||||
def test_unfavorable_fill_requirement_cannot_be_disabled() -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.02},
|
||||
unfavorable_fill_required=False,
|
||||
)
|
||||
with pytest.raises(ValueError, match="unfavorable_fill_required must be True"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_commission", [float("nan"), float("inf"), float("-inf")])
|
||||
def test_non_finite_commission_rejected(bad_commission: float) -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=bad_commission,
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.02},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="commission_bps"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_slippage", [float("nan"), float("inf"), float("-inf")])
|
||||
def test_non_finite_slippage_rejected(bad_slippage: float) -> None:
|
||||
model = BacktestCostModel(
|
||||
commission_bps=5.0,
|
||||
slippage_bps_by_session={"KRX_REG": bad_slippage},
|
||||
failure_rate_by_session={"KRX_REG": 0.02},
|
||||
unfavorable_fill_required=True,
|
||||
)
|
||||
with pytest.raises(ValueError, match="slippage bps"):
|
||||
validate_backtest_cost_model(model=model, required_sessions=["KRX_REG"])
|
||||
108
tests/test_backtest_execution_model.py
Normal file
108
tests/test_backtest_execution_model.py
Normal file
@@ -0,0 +1,108 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.backtest_execution_model import (
|
||||
BacktestExecutionModel,
|
||||
ExecutionAssumptions,
|
||||
ExecutionRequest,
|
||||
)
|
||||
|
||||
|
||||
def test_buy_uses_unfavorable_slippage_direction() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 50.0},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
seed=1,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="BUY", session_id="US_PRE", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "FILLED"
|
||||
assert out.avg_price == pytest.approx(100.5)
|
||||
|
||||
|
||||
def test_sell_uses_unfavorable_slippage_direction() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 50.0},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
seed=1,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="SELL", session_id="US_PRE", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "FILLED"
|
||||
assert out.avg_price == pytest.approx(99.5)
|
||||
|
||||
|
||||
def test_failure_rate_can_reject_order() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"KRX_REG": 10.0},
|
||||
failure_rate_by_session={"KRX_REG": 1.0},
|
||||
partial_fill_rate_by_session={"KRX_REG": 0.0},
|
||||
seed=42,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="BUY", session_id="KRX_REG", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "REJECTED"
|
||||
assert out.filled_qty == 0
|
||||
|
||||
|
||||
def test_partial_fill_applies_when_rate_is_one() -> None:
|
||||
model = BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"KRX_REG": 0.0},
|
||||
failure_rate_by_session={"KRX_REG": 0.0},
|
||||
partial_fill_rate_by_session={"KRX_REG": 1.0},
|
||||
partial_fill_min_ratio=0.4,
|
||||
partial_fill_max_ratio=0.4,
|
||||
seed=0,
|
||||
)
|
||||
)
|
||||
out = model.simulate(
|
||||
ExecutionRequest(side="BUY", session_id="KRX_REG", qty=10, reference_price=100.0)
|
||||
)
|
||||
assert out.status == "PARTIAL"
|
||||
assert out.filled_qty == 4
|
||||
assert out.avg_price == 100.0
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_slip", [-1.0, float("nan"), float("inf")])
|
||||
def test_invalid_slippage_is_rejected(bad_slip: float) -> None:
|
||||
with pytest.raises(ValueError, match="slippage_bps"):
|
||||
BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": bad_slip},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("bad_rate", [-0.1, 1.1, float("nan")])
|
||||
def test_invalid_failure_or_partial_rates_are_rejected(bad_rate: float) -> None:
|
||||
with pytest.raises(ValueError, match="failure_rate"):
|
||||
BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 10.0},
|
||||
failure_rate_by_session={"US_PRE": bad_rate},
|
||||
partial_fill_rate_by_session={"US_PRE": 0.0},
|
||||
)
|
||||
)
|
||||
with pytest.raises(ValueError, match="partial_fill_rate"):
|
||||
BacktestExecutionModel(
|
||||
ExecutionAssumptions(
|
||||
slippage_bps_by_session={"US_PRE": 10.0},
|
||||
failure_rate_by_session={"US_PRE": 0.0},
|
||||
partial_fill_rate_by_session={"US_PRE": bad_rate},
|
||||
)
|
||||
)
|
||||
@@ -3,9 +3,11 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import sqlite3
|
||||
import sys
|
||||
import tempfile
|
||||
from datetime import UTC, datetime, timedelta
|
||||
from pathlib import Path
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -363,3 +365,435 @@ class TestHealthMonitor:
|
||||
assert "timestamp" in report
|
||||
assert "checks" in report
|
||||
assert len(report["checks"]) == 3
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# BackupExporter — additional coverage for previously uncovered branches
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def empty_db(tmp_path: Path) -> Path:
|
||||
"""Create a temporary database with NO trade records."""
|
||||
db_path = tmp_path / "empty_trades.db"
|
||||
conn = sqlite3.connect(str(db_path))
|
||||
conn.execute(
|
||||
"""CREATE TABLE trades (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp TEXT NOT NULL,
|
||||
stock_code TEXT NOT NULL,
|
||||
action TEXT NOT NULL,
|
||||
quantity INTEGER NOT NULL,
|
||||
price REAL NOT NULL,
|
||||
confidence INTEGER NOT NULL,
|
||||
rationale TEXT,
|
||||
pnl REAL DEFAULT 0.0
|
||||
)"""
|
||||
)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
return db_path
|
||||
|
||||
|
||||
class TestBackupExporterAdditional:
|
||||
"""Cover branches missed in the original TestBackupExporter suite."""
|
||||
|
||||
def test_export_all_default_formats(self, temp_db: Path, tmp_path: Path) -> None:
|
||||
"""export_all with formats=None must default to JSON+CSV+Parquet path."""
|
||||
exporter = BackupExporter(str(temp_db))
|
||||
# formats=None triggers the default list assignment (line 62)
|
||||
results = exporter.export_all(tmp_path / "out", formats=None, compress=False)
|
||||
# JSON and CSV must always succeed; Parquet needs pyarrow
|
||||
assert ExportFormat.JSON in results
|
||||
assert ExportFormat.CSV in results
|
||||
|
||||
def test_export_all_logs_error_on_failure(
|
||||
self, temp_db: Path, tmp_path: Path
|
||||
) -> None:
|
||||
"""export_all must log an error and continue when one format fails."""
|
||||
exporter = BackupExporter(str(temp_db))
|
||||
# Patch _export_format to raise on JSON, succeed on CSV
|
||||
original = exporter._export_format
|
||||
|
||||
def failing_export(fmt, *args, **kwargs): # type: ignore[no-untyped-def]
|
||||
if fmt == ExportFormat.JSON:
|
||||
raise RuntimeError("simulated failure")
|
||||
return original(fmt, *args, **kwargs)
|
||||
|
||||
exporter._export_format = failing_export # type: ignore[method-assign]
|
||||
results = exporter.export_all(
|
||||
tmp_path / "out",
|
||||
formats=[ExportFormat.JSON, ExportFormat.CSV],
|
||||
compress=False,
|
||||
)
|
||||
# JSON failed → not in results; CSV succeeded → in results
|
||||
assert ExportFormat.JSON not in results
|
||||
assert ExportFormat.CSV in results
|
||||
|
||||
def test_export_csv_empty_trades_no_compress(
|
||||
self, empty_db: Path, tmp_path: Path
|
||||
) -> None:
|
||||
"""CSV export with no trades and compress=False must write header row only."""
|
||||
exporter = BackupExporter(str(empty_db))
|
||||
results = exporter.export_all(
|
||||
tmp_path / "out",
|
||||
formats=[ExportFormat.CSV],
|
||||
compress=False,
|
||||
)
|
||||
assert ExportFormat.CSV in results
|
||||
out = results[ExportFormat.CSV]
|
||||
assert out.exists()
|
||||
content = out.read_text()
|
||||
assert "timestamp" in content
|
||||
|
||||
def test_export_csv_empty_trades_compressed(
|
||||
self, empty_db: Path, tmp_path: Path
|
||||
) -> None:
|
||||
"""CSV export with no trades and compress=True must write gzipped header."""
|
||||
import gzip
|
||||
|
||||
exporter = BackupExporter(str(empty_db))
|
||||
results = exporter.export_all(
|
||||
tmp_path / "out",
|
||||
formats=[ExportFormat.CSV],
|
||||
compress=True,
|
||||
)
|
||||
assert ExportFormat.CSV in results
|
||||
out = results[ExportFormat.CSV]
|
||||
assert out.suffix == ".gz"
|
||||
with gzip.open(out, "rt", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
assert "timestamp" in content
|
||||
|
||||
def test_export_csv_with_data_compressed(
|
||||
self, temp_db: Path, tmp_path: Path
|
||||
) -> None:
|
||||
"""CSV export with data and compress=True must write gzipped rows."""
|
||||
import gzip
|
||||
|
||||
exporter = BackupExporter(str(temp_db))
|
||||
results = exporter.export_all(
|
||||
tmp_path / "out",
|
||||
formats=[ExportFormat.CSV],
|
||||
compress=True,
|
||||
)
|
||||
assert ExportFormat.CSV in results
|
||||
out = results[ExportFormat.CSV]
|
||||
with gzip.open(out, "rt", encoding="utf-8") as f:
|
||||
lines = f.readlines()
|
||||
# Header + 3 data rows
|
||||
assert len(lines) == 4
|
||||
|
||||
def test_export_parquet_raises_import_error_without_pyarrow(
|
||||
self, temp_db: Path, tmp_path: Path
|
||||
) -> None:
|
||||
"""Parquet export must raise ImportError when pyarrow is not installed."""
|
||||
exporter = BackupExporter(str(temp_db))
|
||||
with patch.dict(sys.modules, {"pyarrow": None, "pyarrow.parquet": None}):
|
||||
try:
|
||||
import pyarrow # noqa: F401
|
||||
pytest.skip("pyarrow is installed; cannot test ImportError path")
|
||||
except ImportError:
|
||||
pass
|
||||
results = exporter.export_all(
|
||||
tmp_path / "out",
|
||||
formats=[ExportFormat.PARQUET],
|
||||
compress=False,
|
||||
)
|
||||
# Parquet export fails gracefully; result dict should not contain it
|
||||
assert ExportFormat.PARQUET not in results
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# CloudStorage — mocked boto3 tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_boto3_module():
|
||||
"""Inject a fake boto3 into sys.modules for the duration of the test."""
|
||||
mock = MagicMock()
|
||||
with patch.dict(sys.modules, {"boto3": mock}):
|
||||
yield mock
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def s3_config():
|
||||
"""Minimal S3Config for tests."""
|
||||
from src.backup.cloud_storage import S3Config
|
||||
|
||||
return S3Config(
|
||||
endpoint_url="http://localhost:9000",
|
||||
access_key="minioadmin",
|
||||
secret_key="minioadmin",
|
||||
bucket_name="test-bucket",
|
||||
region="us-east-1",
|
||||
)
|
||||
|
||||
|
||||
class TestCloudStorage:
|
||||
"""Test CloudStorage using mocked boto3."""
|
||||
|
||||
def test_init_creates_s3_client(self, mock_boto3_module, s3_config) -> None:
|
||||
"""CloudStorage.__init__ must call boto3.client with the correct args."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
mock_boto3_module.client.assert_called_once()
|
||||
call_kwargs = mock_boto3_module.client.call_args[1]
|
||||
assert call_kwargs["aws_access_key_id"] == "minioadmin"
|
||||
assert call_kwargs["aws_secret_access_key"] == "minioadmin"
|
||||
assert storage.config == s3_config
|
||||
|
||||
def test_init_raises_if_boto3_missing(self, s3_config) -> None:
|
||||
"""CloudStorage.__init__ must raise ImportError when boto3 is absent."""
|
||||
with patch.dict(sys.modules, {"boto3": None}): # type: ignore[dict-item]
|
||||
with pytest.raises((ImportError, TypeError)):
|
||||
# Re-import to trigger the try/except inside __init__
|
||||
import importlib
|
||||
|
||||
import src.backup.cloud_storage as m
|
||||
|
||||
importlib.reload(m)
|
||||
m.CloudStorage(s3_config)
|
||||
|
||||
def test_upload_file_success(
|
||||
self, mock_boto3_module, s3_config, tmp_path: Path
|
||||
) -> None:
|
||||
"""upload_file must call client.upload_file and return the object key."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
test_file = tmp_path / "backup.json.gz"
|
||||
test_file.write_bytes(b"data")
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
key = storage.upload_file(test_file, object_key="backups/backup.json.gz")
|
||||
|
||||
assert key == "backups/backup.json.gz"
|
||||
storage.client.upload_file.assert_called_once()
|
||||
|
||||
def test_upload_file_default_key(
|
||||
self, mock_boto3_module, s3_config, tmp_path: Path
|
||||
) -> None:
|
||||
"""upload_file without object_key must use the filename as key."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
test_file = tmp_path / "myfile.gz"
|
||||
test_file.write_bytes(b"data")
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
key = storage.upload_file(test_file)
|
||||
|
||||
assert key == "myfile.gz"
|
||||
|
||||
def test_upload_file_not_found(
|
||||
self, mock_boto3_module, s3_config, tmp_path: Path
|
||||
) -> None:
|
||||
"""upload_file must raise FileNotFoundError for missing files."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
with pytest.raises(FileNotFoundError):
|
||||
storage.upload_file(tmp_path / "nonexistent.gz")
|
||||
|
||||
def test_upload_file_propagates_client_error(
|
||||
self, mock_boto3_module, s3_config, tmp_path: Path
|
||||
) -> None:
|
||||
"""upload_file must re-raise exceptions from the boto3 client."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
test_file = tmp_path / "backup.gz"
|
||||
test_file.write_bytes(b"data")
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.upload_file.side_effect = RuntimeError("network error")
|
||||
|
||||
with pytest.raises(RuntimeError, match="network error"):
|
||||
storage.upload_file(test_file)
|
||||
|
||||
def test_download_file_success(
|
||||
self, mock_boto3_module, s3_config, tmp_path: Path
|
||||
) -> None:
|
||||
"""download_file must call client.download_file and return local path."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
dest = tmp_path / "downloads" / "backup.gz"
|
||||
|
||||
result = storage.download_file("backups/backup.gz", dest)
|
||||
|
||||
assert result == dest
|
||||
storage.client.download_file.assert_called_once()
|
||||
|
||||
def test_download_file_propagates_error(
|
||||
self, mock_boto3_module, s3_config, tmp_path: Path
|
||||
) -> None:
|
||||
"""download_file must re-raise exceptions from the boto3 client."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.download_file.side_effect = RuntimeError("timeout")
|
||||
|
||||
with pytest.raises(RuntimeError, match="timeout"):
|
||||
storage.download_file("key", tmp_path / "dest.gz")
|
||||
|
||||
def test_list_files_returns_objects(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""list_files must return parsed file metadata from S3 response."""
|
||||
from datetime import timezone
|
||||
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.list_objects_v2.return_value = {
|
||||
"Contents": [
|
||||
{
|
||||
"Key": "backups/a.gz",
|
||||
"Size": 1024,
|
||||
"LastModified": datetime(2026, 1, 1, tzinfo=timezone.utc),
|
||||
"ETag": '"abc123"',
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
files = storage.list_files(prefix="backups/")
|
||||
assert len(files) == 1
|
||||
assert files[0]["key"] == "backups/a.gz"
|
||||
assert files[0]["size_bytes"] == 1024
|
||||
|
||||
def test_list_files_empty_bucket(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""list_files must return empty list when bucket has no objects."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.list_objects_v2.return_value = {}
|
||||
|
||||
files = storage.list_files()
|
||||
assert files == []
|
||||
|
||||
def test_list_files_propagates_error(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""list_files must re-raise exceptions from the boto3 client."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.list_objects_v2.side_effect = RuntimeError("auth error")
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
storage.list_files()
|
||||
|
||||
def test_delete_file_success(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""delete_file must call client.delete_object with the correct key."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.delete_file("backups/old.gz")
|
||||
storage.client.delete_object.assert_called_once_with(
|
||||
Bucket="test-bucket", Key="backups/old.gz"
|
||||
)
|
||||
|
||||
def test_delete_file_propagates_error(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""delete_file must re-raise exceptions from the boto3 client."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.delete_object.side_effect = RuntimeError("permission denied")
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
storage.delete_file("backups/old.gz")
|
||||
|
||||
def test_get_storage_stats_success(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""get_storage_stats must aggregate file sizes correctly."""
|
||||
from datetime import timezone
|
||||
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.list_objects_v2.return_value = {
|
||||
"Contents": [
|
||||
{
|
||||
"Key": "a.gz",
|
||||
"Size": 1024 * 1024,
|
||||
"LastModified": datetime(2026, 1, 1, tzinfo=timezone.utc),
|
||||
"ETag": '"x"',
|
||||
},
|
||||
{
|
||||
"Key": "b.gz",
|
||||
"Size": 1024 * 1024,
|
||||
"LastModified": datetime(2026, 1, 2, tzinfo=timezone.utc),
|
||||
"ETag": '"y"',
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
stats = storage.get_storage_stats()
|
||||
assert stats["total_files"] == 2
|
||||
assert stats["total_size_bytes"] == 2 * 1024 * 1024
|
||||
assert stats["total_size_mb"] == pytest.approx(2.0)
|
||||
|
||||
def test_get_storage_stats_on_error(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""get_storage_stats must return error dict without raising on failure."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.list_objects_v2.side_effect = RuntimeError("no connection")
|
||||
|
||||
stats = storage.get_storage_stats()
|
||||
assert "error" in stats
|
||||
assert stats["total_files"] == 0
|
||||
|
||||
def test_verify_connection_success(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""verify_connection must return True when head_bucket succeeds."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
result = storage.verify_connection()
|
||||
assert result is True
|
||||
|
||||
def test_verify_connection_failure(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""verify_connection must return False when head_bucket raises."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.head_bucket.side_effect = RuntimeError("no such bucket")
|
||||
|
||||
result = storage.verify_connection()
|
||||
assert result is False
|
||||
|
||||
def test_enable_versioning(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""enable_versioning must call put_bucket_versioning."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.enable_versioning()
|
||||
storage.client.put_bucket_versioning.assert_called_once()
|
||||
|
||||
def test_enable_versioning_propagates_error(
|
||||
self, mock_boto3_module, s3_config
|
||||
) -> None:
|
||||
"""enable_versioning must re-raise exceptions from the boto3 client."""
|
||||
from src.backup.cloud_storage import CloudStorage
|
||||
|
||||
storage = CloudStorage(s3_config)
|
||||
storage.client.put_bucket_versioning.side_effect = RuntimeError("denied")
|
||||
|
||||
with pytest.raises(RuntimeError):
|
||||
storage.enable_versioning()
|
||||
|
||||
81
tests/test_blackout_manager.py
Normal file
81
tests/test_blackout_manager.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from src.core.blackout_manager import (
|
||||
BlackoutOrderManager,
|
||||
QueuedOrderIntent,
|
||||
parse_blackout_windows_kst,
|
||||
)
|
||||
|
||||
|
||||
def test_parse_blackout_windows_kst() -> None:
|
||||
windows = parse_blackout_windows_kst("23:30-00:10,11:20-11:30,invalid")
|
||||
assert len(windows) == 2
|
||||
|
||||
|
||||
def test_blackout_manager_handles_cross_midnight_window() -> None:
|
||||
manager = BlackoutOrderManager(
|
||||
enabled=True,
|
||||
windows=parse_blackout_windows_kst("23:30-00:10"),
|
||||
max_queue_size=10,
|
||||
)
|
||||
# 2026-01-01 23:40 KST = 2026-01-01 14:40 UTC
|
||||
assert manager.in_blackout(datetime(2026, 1, 1, 14, 40, tzinfo=UTC))
|
||||
# 2026-01-02 00:20 KST = 2026-01-01 15:20 UTC
|
||||
assert not manager.in_blackout(datetime(2026, 1, 1, 15, 20, tzinfo=UTC))
|
||||
|
||||
|
||||
def test_recovery_batch_only_after_blackout_exit() -> None:
|
||||
manager = BlackoutOrderManager(
|
||||
enabled=True,
|
||||
windows=parse_blackout_windows_kst("23:30-00:10"),
|
||||
max_queue_size=10,
|
||||
)
|
||||
intent = QueuedOrderIntent(
|
||||
market_code="KR",
|
||||
exchange_code="KRX",
|
||||
stock_code="005930",
|
||||
order_type="BUY",
|
||||
quantity=1,
|
||||
price=100.0,
|
||||
source="test",
|
||||
queued_at=datetime.now(UTC),
|
||||
)
|
||||
assert manager.enqueue(intent)
|
||||
|
||||
# Inside blackout: no pop yet
|
||||
inside_blackout = datetime(2026, 1, 1, 14, 40, tzinfo=UTC)
|
||||
assert manager.pop_recovery_batch(inside_blackout) == []
|
||||
|
||||
# Outside blackout: pop full batch once
|
||||
outside_blackout = datetime(2026, 1, 1, 15, 20, tzinfo=UTC)
|
||||
batch = manager.pop_recovery_batch(outside_blackout)
|
||||
assert len(batch) == 1
|
||||
assert manager.pending_count == 0
|
||||
|
||||
|
||||
def test_requeued_intent_is_processed_next_non_blackout_cycle() -> None:
|
||||
manager = BlackoutOrderManager(
|
||||
enabled=True,
|
||||
windows=parse_blackout_windows_kst("23:30-00:10"),
|
||||
max_queue_size=10,
|
||||
)
|
||||
intent = QueuedOrderIntent(
|
||||
market_code="KR",
|
||||
exchange_code="KRX",
|
||||
stock_code="005930",
|
||||
order_type="BUY",
|
||||
quantity=1,
|
||||
price=100.0,
|
||||
source="test",
|
||||
queued_at=datetime.now(UTC),
|
||||
)
|
||||
manager.enqueue(intent)
|
||||
outside_blackout = datetime(2026, 1, 1, 15, 20, tzinfo=UTC)
|
||||
first_batch = manager.pop_recovery_batch(outside_blackout)
|
||||
assert len(first_batch) == 1
|
||||
|
||||
manager.requeue(first_batch[0])
|
||||
second_batch = manager.pop_recovery_batch(outside_blackout)
|
||||
assert len(second_batch) == 1
|
||||
@@ -2,6 +2,10 @@
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
from src.brain.gemini_client import GeminiClient
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
@@ -89,9 +93,21 @@ class TestMalformedJsonHandling:
|
||||
|
||||
def test_json_with_missing_fields_returns_hold(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
decision = client.parse_response('{"action": "BUY"}')
|
||||
raw = '{"action": "BUY"}'
|
||||
decision = client.parse_response(raw)
|
||||
assert decision.action == "HOLD"
|
||||
assert decision.confidence == 0
|
||||
# rationale preserves raw so prompt_override callers (e.g. pre_market_planner)
|
||||
# can extract non-TradeDecision JSON from decision.rationale (#245)
|
||||
assert decision.rationale == raw
|
||||
|
||||
def test_non_trade_decision_json_preserves_raw_in_rationale(self, settings):
|
||||
"""Playbook JSON (no action/confidence/rationale) must be preserved for planner."""
|
||||
client = GeminiClient(settings)
|
||||
playbook_json = '{"market_outlook": "neutral", "stocks": []}'
|
||||
decision = client.parse_response(playbook_json)
|
||||
assert decision.action == "HOLD"
|
||||
assert decision.rationale == playbook_json
|
||||
|
||||
def test_json_with_invalid_action_returns_hold(self, settings):
|
||||
client = GeminiClient(settings)
|
||||
@@ -270,3 +286,132 @@ class TestBatchDecisionParsing:
|
||||
|
||||
assert decisions["AAPL"].action == "HOLD"
|
||||
assert decisions["AAPL"].confidence == 0
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Prompt Override (used by pre_market_planner)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestPromptOverride:
|
||||
"""decide() must use prompt_override when present in market_data."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_prompt_override_is_sent_to_gemini(self, settings):
|
||||
"""When prompt_override is in market_data, it should be used as the prompt."""
|
||||
client = GeminiClient(settings)
|
||||
|
||||
custom_prompt = "You are a playbook generator. Return JSON with scenarios."
|
||||
playbook_json = '{"market_outlook": "neutral", "stocks": []}'
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = playbook_json
|
||||
|
||||
with patch.object(
|
||||
client._client.aio.models,
|
||||
"generate_content",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
) as mock_generate:
|
||||
market_data = {
|
||||
"stock_code": "PLANNER",
|
||||
"current_price": 0,
|
||||
"prompt_override": custom_prompt,
|
||||
}
|
||||
decision = await client.decide(market_data)
|
||||
|
||||
# Verify the custom prompt was sent, not a built prompt
|
||||
mock_generate.assert_called_once()
|
||||
actual_prompt = mock_generate.call_args[1].get(
|
||||
"contents", mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None
|
||||
)
|
||||
assert actual_prompt == custom_prompt
|
||||
# Raw response preserved in rationale without parse_response (#247)
|
||||
assert decision.rationale == playbook_json
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_prompt_override_skips_parse_response(self, settings):
|
||||
"""prompt_override bypasses parse_response — no Missing fields warning, raw preserved."""
|
||||
client = GeminiClient(settings)
|
||||
client._enable_optimization = True
|
||||
|
||||
custom_prompt = "Custom playbook prompt"
|
||||
playbook_json = '{"market_outlook": "bullish", "stocks": [{"stock_code": "AAPL"}]}'
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = playbook_json
|
||||
|
||||
with patch.object(
|
||||
client._client.aio.models,
|
||||
"generate_content",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
):
|
||||
with patch.object(client, "parse_response") as mock_parse:
|
||||
market_data = {
|
||||
"stock_code": "PLANNER",
|
||||
"current_price": 0,
|
||||
"prompt_override": custom_prompt,
|
||||
}
|
||||
decision = await client.decide(market_data)
|
||||
|
||||
# parse_response must NOT be called for prompt_override
|
||||
mock_parse.assert_not_called()
|
||||
# Raw playbook JSON preserved in rationale
|
||||
assert decision.rationale == playbook_json
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_prompt_override_takes_priority_over_optimization(self, settings):
|
||||
"""prompt_override must win over enable_optimization=True."""
|
||||
client = GeminiClient(settings)
|
||||
client._enable_optimization = True
|
||||
|
||||
custom_prompt = "Explicit playbook prompt"
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = '{"market_outlook": "neutral", "stocks": []}'
|
||||
|
||||
with patch.object(
|
||||
client._client.aio.models,
|
||||
"generate_content",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
) as mock_generate:
|
||||
market_data = {
|
||||
"stock_code": "PLANNER",
|
||||
"current_price": 0,
|
||||
"prompt_override": custom_prompt,
|
||||
}
|
||||
await client.decide(market_data)
|
||||
|
||||
actual_prompt = mock_generate.call_args[1].get(
|
||||
"contents", mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None
|
||||
)
|
||||
# The custom prompt must be used, not the compressed prompt
|
||||
assert actual_prompt == custom_prompt
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_without_prompt_override_uses_build_prompt(self, settings):
|
||||
"""Without prompt_override, decide() should use build_prompt as before."""
|
||||
client = GeminiClient(settings)
|
||||
|
||||
mock_response = MagicMock()
|
||||
mock_response.text = '{"action": "HOLD", "confidence": 50, "rationale": "ok"}'
|
||||
|
||||
with patch.object(
|
||||
client._client.aio.models,
|
||||
"generate_content",
|
||||
new_callable=AsyncMock,
|
||||
return_value=mock_response,
|
||||
) as mock_generate:
|
||||
market_data = {
|
||||
"stock_code": "005930",
|
||||
"current_price": 72000,
|
||||
}
|
||||
await client.decide(market_data)
|
||||
|
||||
actual_prompt = mock_generate.call_args[1].get(
|
||||
"contents", mock_generate.call_args[0][1] if len(mock_generate.call_args[0]) > 1 else None
|
||||
)
|
||||
# Should contain stock code from build_prompt, not be a custom override
|
||||
assert "005930" in actual_prompt
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
from unittest.mock import AsyncMock, patch
|
||||
from unittest.mock import AsyncMock, MagicMock, patch
|
||||
|
||||
import pytest
|
||||
|
||||
@@ -90,12 +90,12 @@ class TestTokenManagement:
|
||||
await broker.close()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_token_refresh_cooldown_prevents_rapid_retries(self, settings):
|
||||
"""Token refresh should enforce cooldown after failure (issue #54)."""
|
||||
async def test_token_refresh_cooldown_waits_then_retries(self, settings):
|
||||
"""Token refresh should wait out cooldown then retry (issue #54)."""
|
||||
broker = KISBroker(settings)
|
||||
broker._refresh_cooldown = 2.0 # Short cooldown for testing
|
||||
broker._refresh_cooldown = 0.1 # Short cooldown for testing
|
||||
|
||||
# First refresh attempt fails with 403 (EGW00133)
|
||||
# All attempts fail with 403 (EGW00133)
|
||||
mock_resp_403 = AsyncMock()
|
||||
mock_resp_403.status = 403
|
||||
mock_resp_403.text = AsyncMock(
|
||||
@@ -109,8 +109,8 @@ class TestTokenManagement:
|
||||
with pytest.raises(ConnectionError, match="Token refresh failed"):
|
||||
await broker._ensure_token()
|
||||
|
||||
# Second attempt within cooldown should fail with cooldown error
|
||||
with pytest.raises(ConnectionError, match="Token refresh on cooldown"):
|
||||
# Second attempt within cooldown should wait then retry (and still get 403)
|
||||
with pytest.raises(ConnectionError, match="Token refresh failed"):
|
||||
await broker._ensure_token()
|
||||
|
||||
await broker.close()
|
||||
@@ -296,3 +296,647 @@ class TestHashKey:
|
||||
mock_acquire.assert_called_once()
|
||||
|
||||
await broker.close()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# fetch_market_rankings — TR_ID, path, params (issue #155)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def _make_ranking_mock(items: list[dict]) -> AsyncMock:
|
||||
"""Build a mock HTTP response returning ranking items."""
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"output": items})
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
return mock_resp
|
||||
|
||||
|
||||
class TestFetchMarketRankings:
|
||||
"""Verify correct TR_ID, API path, and params per ranking_type (issue #155)."""
|
||||
|
||||
@pytest.fixture
|
||||
def broker(self, settings) -> KISBroker:
|
||||
b = KISBroker(settings)
|
||||
b._access_token = "tok"
|
||||
b._token_expires_at = float("inf")
|
||||
b._rate_limiter.acquire = AsyncMock()
|
||||
return b
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_volume_uses_correct_tr_id_and_path(self, broker: KISBroker) -> None:
|
||||
mock_resp = _make_ranking_mock([])
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp) as mock_get:
|
||||
await broker.fetch_market_rankings(ranking_type="volume")
|
||||
|
||||
call_kwargs = mock_get.call_args
|
||||
url = call_kwargs[0][0] if call_kwargs[0] else call_kwargs[1].get("url", "")
|
||||
headers = call_kwargs[1].get("headers", {})
|
||||
params = call_kwargs[1].get("params", {})
|
||||
|
||||
assert "volume-rank" in url
|
||||
assert headers.get("tr_id") == "FHPST01710000"
|
||||
assert params.get("FID_COND_SCR_DIV_CODE") == "20171"
|
||||
assert params.get("FID_TRGT_EXLS_CLS_CODE") == "0000000000"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fluctuation_uses_correct_tr_id_and_path(self, broker: KISBroker) -> None:
|
||||
mock_resp = _make_ranking_mock([])
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp) as mock_get:
|
||||
await broker.fetch_market_rankings(ranking_type="fluctuation")
|
||||
|
||||
call_kwargs = mock_get.call_args
|
||||
url = call_kwargs[0][0] if call_kwargs[0] else call_kwargs[1].get("url", "")
|
||||
headers = call_kwargs[1].get("headers", {})
|
||||
params = call_kwargs[1].get("params", {})
|
||||
|
||||
assert "ranking/fluctuation" in url
|
||||
assert headers.get("tr_id") == "FHPST01700000"
|
||||
assert params.get("fid_cond_scr_div_code") == "20170"
|
||||
# 실전 API는 4자리("0000") 거부 — 1자리("0")여야 한다 (#240)
|
||||
assert params.get("fid_rank_sort_cls_code") == "0"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_volume_returns_parsed_rows(self, broker: KISBroker) -> None:
|
||||
items = [
|
||||
{
|
||||
"mksc_shrn_iscd": "005930",
|
||||
"hts_kor_isnm": "삼성전자",
|
||||
"stck_prpr": "75000",
|
||||
"acml_vol": "10000000",
|
||||
"prdy_ctrt": "2.5",
|
||||
"vol_inrt": "150",
|
||||
}
|
||||
]
|
||||
mock_resp = _make_ranking_mock(items)
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp):
|
||||
result = await broker.fetch_market_rankings(ranking_type="volume")
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["stock_code"] == "005930"
|
||||
assert result[0]["price"] == 75000.0
|
||||
assert result[0]["change_rate"] == 2.5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_fluctuation_parses_stck_shrn_iscd(self, broker: KISBroker) -> None:
|
||||
"""실전 API는 mksc_shrn_iscd 대신 stck_shrn_iscd를 반환한다 (#240)."""
|
||||
items = [
|
||||
{
|
||||
"stck_shrn_iscd": "015260",
|
||||
"hts_kor_isnm": "에이엔피",
|
||||
"stck_prpr": "794",
|
||||
"acml_vol": "4896196",
|
||||
"prdy_ctrt": "29.74",
|
||||
"vol_inrt": "0",
|
||||
}
|
||||
]
|
||||
mock_resp = _make_ranking_mock(items)
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp):
|
||||
result = await broker.fetch_market_rankings(ranking_type="fluctuation")
|
||||
|
||||
assert len(result) == 1
|
||||
assert result[0]["stock_code"] == "015260"
|
||||
assert result[0]["change_rate"] == 29.74
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# KRX tick unit / round-down helpers (issue #157)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
from src.broker.kis_api import kr_tick_unit, kr_round_down # noqa: E402
|
||||
|
||||
|
||||
class TestKrTickUnit:
|
||||
"""kr_tick_unit and kr_round_down must implement KRX price tick rules."""
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"price, expected_tick",
|
||||
[
|
||||
(1999, 1),
|
||||
(2000, 5),
|
||||
(4999, 5),
|
||||
(5000, 10),
|
||||
(19999, 10),
|
||||
(20000, 50),
|
||||
(49999, 50),
|
||||
(50000, 100),
|
||||
(199999, 100),
|
||||
(200000, 500),
|
||||
(499999, 500),
|
||||
(500000, 1000),
|
||||
(1000000, 1000),
|
||||
],
|
||||
)
|
||||
def test_tick_unit_boundaries(self, price: int, expected_tick: int) -> None:
|
||||
assert kr_tick_unit(price) == expected_tick
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"price, expected_rounded",
|
||||
[
|
||||
(188150, 188100), # 100원 단위, 50원 잔여 → 내림
|
||||
(188100, 188100), # 이미 정렬됨
|
||||
(75050, 75000), # 100원 단위, 50원 잔여 → 내림
|
||||
(49950, 49950), # 50원 단위 정렬됨
|
||||
(49960, 49950), # 50원 단위, 10원 잔여 → 내림
|
||||
(1999, 1999), # 1원 단위 → 그대로
|
||||
(5003, 5000), # 10원 단위, 3원 잔여 → 내림
|
||||
],
|
||||
)
|
||||
def test_round_down_to_tick(self, price: int, expected_rounded: int) -> None:
|
||||
assert kr_round_down(price) == expected_rounded
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# get_current_price (issue #157)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetCurrentPrice:
|
||||
"""get_current_price must use inquire-price API and return (price, change, foreigner)."""
|
||||
|
||||
@pytest.fixture
|
||||
def broker(self, settings) -> KISBroker:
|
||||
b = KISBroker(settings)
|
||||
b._access_token = "tok"
|
||||
b._token_expires_at = float("inf")
|
||||
b._rate_limiter.acquire = AsyncMock()
|
||||
return b
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_returns_correct_fields(self, broker: KISBroker) -> None:
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(
|
||||
return_value={
|
||||
"rt_cd": "0",
|
||||
"output": {
|
||||
"stck_prpr": "188600",
|
||||
"prdy_ctrt": "3.97",
|
||||
"frgn_ntby_qty": "12345",
|
||||
},
|
||||
}
|
||||
)
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp) as mock_get:
|
||||
price, change_pct, foreigner = await broker.get_current_price("005930")
|
||||
|
||||
assert price == 188600.0
|
||||
assert change_pct == 3.97
|
||||
assert foreigner == 12345.0
|
||||
|
||||
call_kwargs = mock_get.call_args
|
||||
url = call_kwargs[0][0] if call_kwargs[0] else call_kwargs[1].get("url", "")
|
||||
headers = call_kwargs[1].get("headers", {})
|
||||
assert "inquire-price" in url
|
||||
assert headers.get("tr_id") == "FHKST01010100"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_http_error_raises_connection_error(self, broker: KISBroker) -> None:
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 500
|
||||
mock_resp.text = AsyncMock(return_value="Internal Server Error")
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp):
|
||||
with pytest.raises(ConnectionError, match="get_current_price failed"):
|
||||
await broker.get_current_price("005930")
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# send_order tick rounding and ORD_DVSN (issue #157)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSendOrderTickRounding:
|
||||
"""send_order must apply KRX tick rounding and correct ORD_DVSN codes."""
|
||||
|
||||
@pytest.fixture
|
||||
def broker(self, settings) -> KISBroker:
|
||||
b = KISBroker(settings)
|
||||
b._access_token = "tok"
|
||||
b._token_expires_at = float("inf")
|
||||
b._rate_limiter.acquire = AsyncMock()
|
||||
return b
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_limit_order_rounds_down_to_tick(self, broker: KISBroker) -> None:
|
||||
"""Price 188150 (not on 100-won tick) must be rounded to 188100."""
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.send_order("005930", "BUY", 1, price=188150)
|
||||
|
||||
order_call = mock_post.call_args_list[1]
|
||||
body = order_call[1].get("json", {})
|
||||
assert body["ORD_UNPR"] == "188100" # rounded down
|
||||
assert body["ORD_DVSN"] == "00" # 지정가
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_limit_order_ord_dvsn_is_00(self, broker: KISBroker) -> None:
|
||||
"""send_order with price>0 must use ORD_DVSN='00' (지정가)."""
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.send_order("005930", "BUY", 1, price=50000)
|
||||
|
||||
order_call = mock_post.call_args_list[1]
|
||||
body = order_call[1].get("json", {})
|
||||
assert body["ORD_DVSN"] == "00"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_market_order_ord_dvsn_is_01(self, broker: KISBroker) -> None:
|
||||
"""send_order with price=0 must use ORD_DVSN='01' (시장가)."""
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.send_order("005930", "SELL", 1, price=0)
|
||||
|
||||
order_call = mock_post.call_args_list[1]
|
||||
body = order_call[1].get("json", {})
|
||||
assert body["ORD_DVSN"] == "01"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# TR_ID live/paper branching (issues #201, #202, #203)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestTRIDBranchingDomestic:
|
||||
"""get_balance and send_order must use correct TR_ID for live vs paper mode."""
|
||||
|
||||
def _make_broker(self, settings, mode: str) -> KISBroker:
|
||||
from src.config import Settings
|
||||
|
||||
s = Settings(
|
||||
KIS_APP_KEY=settings.KIS_APP_KEY,
|
||||
KIS_APP_SECRET=settings.KIS_APP_SECRET,
|
||||
KIS_ACCOUNT_NO=settings.KIS_ACCOUNT_NO,
|
||||
GEMINI_API_KEY=settings.GEMINI_API_KEY,
|
||||
DB_PATH=":memory:",
|
||||
ENABLED_MARKETS="KR",
|
||||
MODE=mode,
|
||||
)
|
||||
b = KISBroker(s)
|
||||
b._access_token = "tok"
|
||||
b._token_expires_at = float("inf")
|
||||
b._rate_limiter.acquire = AsyncMock()
|
||||
return b
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_balance_paper_uses_vttc8434r(self, settings) -> None:
|
||||
broker = self._make_broker(settings, "paper")
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(
|
||||
return_value={"output1": [], "output2": {}}
|
||||
)
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp) as mock_get:
|
||||
await broker.get_balance()
|
||||
|
||||
headers = mock_get.call_args[1].get("headers", {})
|
||||
assert headers["tr_id"] == "VTTC8434R"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_balance_live_uses_tttc8434r(self, settings) -> None:
|
||||
broker = self._make_broker(settings, "live")
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(
|
||||
return_value={"output1": [], "output2": {}}
|
||||
)
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp) as mock_get:
|
||||
await broker.get_balance()
|
||||
|
||||
headers = mock_get.call_args[1].get("headers", {})
|
||||
assert headers["tr_id"] == "TTTC8434R"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_order_buy_paper_uses_vttc0012u(self, settings) -> None:
|
||||
broker = self._make_broker(settings, "paper")
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.send_order("005930", "BUY", 1)
|
||||
|
||||
order_headers = mock_post.call_args_list[1][1].get("headers", {})
|
||||
assert order_headers["tr_id"] == "VTTC0012U"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_order_buy_live_uses_tttc0012u(self, settings) -> None:
|
||||
broker = self._make_broker(settings, "live")
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.send_order("005930", "BUY", 1)
|
||||
|
||||
order_headers = mock_post.call_args_list[1][1].get("headers", {})
|
||||
assert order_headers["tr_id"] == "TTTC0012U"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_order_sell_paper_uses_vttc0011u(self, settings) -> None:
|
||||
broker = self._make_broker(settings, "paper")
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.send_order("005930", "SELL", 1)
|
||||
|
||||
order_headers = mock_post.call_args_list[1][1].get("headers", {})
|
||||
assert order_headers["tr_id"] == "VTTC0011U"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_send_order_sell_live_uses_tttc0011u(self, settings) -> None:
|
||||
broker = self._make_broker(settings, "live")
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value={"rt_cd": "0"})
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.send_order("005930", "SELL", 1)
|
||||
|
||||
order_headers = mock_post.call_args_list[1][1].get("headers", {})
|
||||
assert order_headers["tr_id"] == "TTTC0011U"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Domestic Pending Orders (get_domestic_pending_orders)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestGetDomesticPendingOrders:
|
||||
"""get_domestic_pending_orders must return [] in paper mode and call TTTC0084R in live."""
|
||||
|
||||
def _make_broker(self, settings, mode: str) -> KISBroker:
|
||||
from src.config import Settings
|
||||
|
||||
s = Settings(
|
||||
KIS_APP_KEY=settings.KIS_APP_KEY,
|
||||
KIS_APP_SECRET=settings.KIS_APP_SECRET,
|
||||
KIS_ACCOUNT_NO=settings.KIS_ACCOUNT_NO,
|
||||
GEMINI_API_KEY=settings.GEMINI_API_KEY,
|
||||
DB_PATH=":memory:",
|
||||
ENABLED_MARKETS="KR",
|
||||
MODE=mode,
|
||||
)
|
||||
b = KISBroker(s)
|
||||
b._access_token = "tok"
|
||||
b._token_expires_at = float("inf")
|
||||
b._rate_limiter.acquire = AsyncMock()
|
||||
return b
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_paper_mode_returns_empty(self, settings) -> None:
|
||||
"""Paper mode must return [] immediately without any API call."""
|
||||
broker = self._make_broker(settings, "paper")
|
||||
|
||||
with patch("aiohttp.ClientSession.get") as mock_get:
|
||||
result = await broker.get_domestic_pending_orders()
|
||||
|
||||
assert result == []
|
||||
mock_get.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_live_mode_calls_tttc0084r_with_correct_params(
|
||||
self, settings
|
||||
) -> None:
|
||||
"""Live mode must call TTTC0084R with INQR_DVSN_1/2 and paging params."""
|
||||
broker = self._make_broker(settings, "live")
|
||||
pending = [{"odno": "001", "pdno": "005930", "psbl_qty": "10"}]
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.json = AsyncMock(return_value={"output": pending})
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.get", return_value=mock_resp) as mock_get:
|
||||
result = await broker.get_domestic_pending_orders()
|
||||
|
||||
assert result == pending
|
||||
headers = mock_get.call_args[1].get("headers", {})
|
||||
assert headers["tr_id"] == "TTTC0084R"
|
||||
params = mock_get.call_args[1].get("params", {})
|
||||
assert params["INQR_DVSN_1"] == "0"
|
||||
assert params["INQR_DVSN_2"] == "0"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_live_mode_connection_error(self, settings) -> None:
|
||||
"""Network error must raise ConnectionError."""
|
||||
import aiohttp as _aiohttp
|
||||
|
||||
broker = self._make_broker(settings, "live")
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.get",
|
||||
side_effect=_aiohttp.ClientError("timeout"),
|
||||
):
|
||||
with pytest.raises(ConnectionError):
|
||||
await broker.get_domestic_pending_orders()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Domestic Order Cancellation (cancel_domestic_order)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestCancelDomesticOrder:
|
||||
"""cancel_domestic_order must use correct TR_ID and build body correctly."""
|
||||
|
||||
def _make_broker(self, settings, mode: str) -> KISBroker:
|
||||
from src.config import Settings
|
||||
|
||||
s = Settings(
|
||||
KIS_APP_KEY=settings.KIS_APP_KEY,
|
||||
KIS_APP_SECRET=settings.KIS_APP_SECRET,
|
||||
KIS_ACCOUNT_NO=settings.KIS_ACCOUNT_NO,
|
||||
GEMINI_API_KEY=settings.GEMINI_API_KEY,
|
||||
DB_PATH=":memory:",
|
||||
ENABLED_MARKETS="KR",
|
||||
MODE=mode,
|
||||
)
|
||||
b = KISBroker(s)
|
||||
b._access_token = "tok"
|
||||
b._token_expires_at = float("inf")
|
||||
b._rate_limiter.acquire = AsyncMock()
|
||||
return b
|
||||
|
||||
def _make_post_mocks(self, order_payload: dict) -> tuple:
|
||||
mock_hash = AsyncMock()
|
||||
mock_hash.status = 200
|
||||
mock_hash.json = AsyncMock(return_value={"HASH": "h"})
|
||||
mock_hash.__aenter__ = AsyncMock(return_value=mock_hash)
|
||||
mock_hash.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
mock_order = AsyncMock()
|
||||
mock_order.status = 200
|
||||
mock_order.json = AsyncMock(return_value=order_payload)
|
||||
mock_order.__aenter__ = AsyncMock(return_value=mock_order)
|
||||
mock_order.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
return mock_hash, mock_order
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_live_uses_tttc0013u(self, settings) -> None:
|
||||
"""Live mode must use TR_ID TTTC0013U."""
|
||||
broker = self._make_broker(settings, "live")
|
||||
mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"})
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 5)
|
||||
|
||||
order_headers = mock_post.call_args_list[1][1].get("headers", {})
|
||||
assert order_headers["tr_id"] == "TTTC0013U"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_paper_uses_vttc0013u(self, settings) -> None:
|
||||
"""Paper mode must use TR_ID VTTC0013U."""
|
||||
broker = self._make_broker(settings, "paper")
|
||||
mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"})
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 5)
|
||||
|
||||
order_headers = mock_post.call_args_list[1][1].get("headers", {})
|
||||
assert order_headers["tr_id"] == "VTTC0013U"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cancel_sets_rvse_cncl_dvsn_cd_02(self, settings) -> None:
|
||||
"""Body must have RVSE_CNCL_DVSN_CD='02' (취소) and QTY_ALL_ORD_YN='Y'."""
|
||||
broker = self._make_broker(settings, "live")
|
||||
mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"})
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 5)
|
||||
|
||||
body = mock_post.call_args_list[1][1].get("json", {})
|
||||
assert body["RVSE_CNCL_DVSN_CD"] == "02"
|
||||
assert body["QTY_ALL_ORD_YN"] == "Y"
|
||||
assert body["ORD_UNPR"] == "0"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cancel_sets_krx_fwdg_ord_orgno_in_body(self, settings) -> None:
|
||||
"""Body must include KRX_FWDG_ORD_ORGNO and ORGN_ODNO from arguments."""
|
||||
broker = self._make_broker(settings, "live")
|
||||
mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"})
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.cancel_domestic_order("005930", "ORD123", "BRN456", 3)
|
||||
|
||||
body = mock_post.call_args_list[1][1].get("json", {})
|
||||
assert body["KRX_FWDG_ORD_ORGNO"] == "BRN456"
|
||||
assert body["ORGN_ODNO"] == "ORD123"
|
||||
assert body["ORD_QTY"] == "3"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_cancel_sets_hashkey_header(self, settings) -> None:
|
||||
"""Request must include hashkey header (same pattern as send_order)."""
|
||||
broker = self._make_broker(settings, "live")
|
||||
mock_hash, mock_order = self._make_post_mocks({"rt_cd": "0"})
|
||||
|
||||
with patch(
|
||||
"aiohttp.ClientSession.post", side_effect=[mock_hash, mock_order]
|
||||
) as mock_post:
|
||||
await broker.cancel_domestic_order("005930", "ORD001", "BRNO01", 2)
|
||||
|
||||
order_headers = mock_post.call_args_list[1][1].get("headers", {})
|
||||
assert "hashkey" in order_headers
|
||||
assert order_headers["hashkey"] == "h"
|
||||
|
||||
@@ -10,6 +10,7 @@ import pytest
|
||||
from src.context.aggregator import ContextAggregator
|
||||
from src.context.layer import LAYER_CONFIG, ContextLayer
|
||||
from src.context.store import ContextStore
|
||||
from src.context.summarizer import ContextSummarizer
|
||||
from src.db import init_db, log_trade
|
||||
|
||||
|
||||
@@ -370,3 +371,259 @@ class TestLayerMetadata:
|
||||
|
||||
# L1 aggregates from L2
|
||||
assert LAYER_CONFIG[ContextLayer.L1_LEGACY].aggregation_source == ContextLayer.L2_ANNUAL
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# ContextSummarizer tests
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def summarizer(db_conn: sqlite3.Connection) -> ContextSummarizer:
|
||||
"""Provide a ContextSummarizer backed by an in-memory store."""
|
||||
return ContextSummarizer(ContextStore(db_conn))
|
||||
|
||||
|
||||
class TestContextSummarizer:
|
||||
"""Test suite for ContextSummarizer."""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# summarize_numeric_values
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def test_summarize_empty_values(self, summarizer: ContextSummarizer) -> None:
|
||||
"""Empty list must return SummaryStats with count=0 and no other fields."""
|
||||
stats = summarizer.summarize_numeric_values([])
|
||||
assert stats.count == 0
|
||||
assert stats.mean is None
|
||||
assert stats.min is None
|
||||
assert stats.max is None
|
||||
|
||||
def test_summarize_single_value(self, summarizer: ContextSummarizer) -> None:
|
||||
"""Single-element list must return correct stats with std=0 and trend=flat."""
|
||||
stats = summarizer.summarize_numeric_values([42.0])
|
||||
assert stats.count == 1
|
||||
assert stats.mean == 42.0
|
||||
assert stats.std == 0.0
|
||||
assert stats.trend == "flat"
|
||||
|
||||
def test_summarize_upward_trend(self, summarizer: ContextSummarizer) -> None:
|
||||
"""Increasing values must produce trend='up'."""
|
||||
values = [1.0, 2.0, 3.0, 10.0, 20.0, 30.0]
|
||||
stats = summarizer.summarize_numeric_values(values)
|
||||
assert stats.trend == "up"
|
||||
|
||||
def test_summarize_downward_trend(self, summarizer: ContextSummarizer) -> None:
|
||||
"""Decreasing values must produce trend='down'."""
|
||||
values = [30.0, 20.0, 10.0, 3.0, 2.0, 1.0]
|
||||
stats = summarizer.summarize_numeric_values(values)
|
||||
assert stats.trend == "down"
|
||||
|
||||
def test_summarize_flat_trend(self, summarizer: ContextSummarizer) -> None:
|
||||
"""Stable values must produce trend='flat'."""
|
||||
values = [100.0, 100.1, 99.9, 100.0, 100.2, 99.8]
|
||||
stats = summarizer.summarize_numeric_values(values)
|
||||
assert stats.trend == "flat"
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# summarize_layer
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def test_summarize_layer_no_data(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""summarize_layer with no data must return the 'No data' sentinel."""
|
||||
result = summarizer.summarize_layer(ContextLayer.L6_DAILY)
|
||||
assert result["count"] == 0
|
||||
assert "No data" in result["summary"]
|
||||
|
||||
def test_summarize_layer_numeric(
|
||||
self, summarizer: ContextSummarizer, db_conn: sqlite3.Connection
|
||||
) -> None:
|
||||
"""summarize_layer must collect numeric values and produce stats."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "total_pnl", 100.0)
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-02", "total_pnl", 200.0)
|
||||
|
||||
result = summarizer.summarize_layer(ContextLayer.L6_DAILY)
|
||||
assert "total_entries" in result
|
||||
|
||||
def test_summarize_layer_with_dict_values(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""summarize_layer must handle dict values by extracting numeric subkeys."""
|
||||
store = summarizer.store
|
||||
# set_context serialises the value as JSON, so passing a dict works
|
||||
store.set_context(
|
||||
ContextLayer.L6_DAILY, "2026-02-01", "metrics",
|
||||
{"win_rate": 65.0, "label": "good"}
|
||||
)
|
||||
|
||||
result = summarizer.summarize_layer(ContextLayer.L6_DAILY)
|
||||
assert "total_entries" in result
|
||||
# numeric subkey "win_rate" should appear as "metrics.win_rate"
|
||||
assert "metrics.win_rate" in result
|
||||
|
||||
def test_summarize_layer_with_string_values(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""summarize_layer must count string values separately."""
|
||||
store = summarizer.store
|
||||
# set_context stores string values as JSON-encoded strings
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "outlook", "BULLISH")
|
||||
|
||||
result = summarizer.summarize_layer(ContextLayer.L6_DAILY)
|
||||
# String fields contribute a `<key>_count` entry
|
||||
assert "outlook_count" in result
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# rolling_window_summary
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def test_rolling_window_summary_basic(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""rolling_window_summary must return the expected structure."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "pnl", 500.0)
|
||||
|
||||
result = summarizer.rolling_window_summary(ContextLayer.L6_DAILY)
|
||||
assert "window_days" in result
|
||||
assert "recent_data" in result
|
||||
assert "historical_summary" in result
|
||||
|
||||
def test_rolling_window_summary_no_older_data(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""rolling_window_summary with summarize_older=False skips history."""
|
||||
result = summarizer.rolling_window_summary(
|
||||
ContextLayer.L6_DAILY, summarize_older=False
|
||||
)
|
||||
assert result["historical_summary"] == {}
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# aggregate_to_higher_layer
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def test_aggregate_to_higher_layer_mean(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""aggregate_to_higher_layer with 'mean' via dict subkeys returns average."""
|
||||
store = summarizer.store
|
||||
# Use different outer keys but same inner metric key so get_all_contexts
|
||||
# returns multiple rows with the target subkey.
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0})
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day2", {"pnl": 200.0})
|
||||
|
||||
result = summarizer.aggregate_to_higher_layer(
|
||||
ContextLayer.L6_DAILY, ContextLayer.L5_WEEKLY, "pnl", "mean"
|
||||
)
|
||||
assert result == pytest.approx(150.0)
|
||||
|
||||
def test_aggregate_to_higher_layer_sum(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""aggregate_to_higher_layer with 'sum' must return the total."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0})
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day2", {"pnl": 200.0})
|
||||
|
||||
result = summarizer.aggregate_to_higher_layer(
|
||||
ContextLayer.L6_DAILY, ContextLayer.L5_WEEKLY, "pnl", "sum"
|
||||
)
|
||||
assert result == pytest.approx(300.0)
|
||||
|
||||
def test_aggregate_to_higher_layer_max(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""aggregate_to_higher_layer with 'max' must return the maximum."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0})
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day2", {"pnl": 200.0})
|
||||
|
||||
result = summarizer.aggregate_to_higher_layer(
|
||||
ContextLayer.L6_DAILY, ContextLayer.L5_WEEKLY, "pnl", "max"
|
||||
)
|
||||
assert result == pytest.approx(200.0)
|
||||
|
||||
def test_aggregate_to_higher_layer_min(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""aggregate_to_higher_layer with 'min' must return the minimum."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0})
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day2", {"pnl": 200.0})
|
||||
|
||||
result = summarizer.aggregate_to_higher_layer(
|
||||
ContextLayer.L6_DAILY, ContextLayer.L5_WEEKLY, "pnl", "min"
|
||||
)
|
||||
assert result == pytest.approx(100.0)
|
||||
|
||||
def test_aggregate_to_higher_layer_no_data(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""aggregate_to_higher_layer with no matching key must return None."""
|
||||
result = summarizer.aggregate_to_higher_layer(
|
||||
ContextLayer.L6_DAILY, ContextLayer.L5_WEEKLY, "nonexistent", "mean"
|
||||
)
|
||||
assert result is None
|
||||
|
||||
def test_aggregate_to_higher_layer_unknown_func_defaults_to_mean(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""Unknown aggregation function must fall back to mean."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day1", {"pnl": 100.0})
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "day2", {"pnl": 200.0})
|
||||
|
||||
result = summarizer.aggregate_to_higher_layer(
|
||||
ContextLayer.L6_DAILY, ContextLayer.L5_WEEKLY, "pnl", "unknown_func"
|
||||
)
|
||||
assert result == pytest.approx(150.0)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# create_compact_summary + format_summary_for_prompt
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def test_create_compact_summary(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""create_compact_summary must produce a dict keyed by layer value."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "pnl", 100.0)
|
||||
|
||||
result = summarizer.create_compact_summary([ContextLayer.L6_DAILY])
|
||||
assert ContextLayer.L6_DAILY.value in result
|
||||
|
||||
def test_format_summary_for_prompt_with_numeric_metrics(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""format_summary_for_prompt must render avg/trend fields."""
|
||||
store = summarizer.store
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-01", "pnl", 100.0)
|
||||
store.set_context(ContextLayer.L6_DAILY, "2026-02-02", "pnl", 200.0)
|
||||
|
||||
compact = summarizer.create_compact_summary([ContextLayer.L6_DAILY])
|
||||
text = summarizer.format_summary_for_prompt(compact)
|
||||
assert isinstance(text, str)
|
||||
|
||||
def test_format_summary_for_prompt_skips_empty_layers(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""format_summary_for_prompt must skip layers with no metrics."""
|
||||
summary = {ContextLayer.L6_DAILY.value: {}}
|
||||
text = summarizer.format_summary_for_prompt(summary)
|
||||
assert text == ""
|
||||
|
||||
def test_format_summary_non_dict_value(
|
||||
self, summarizer: ContextSummarizer
|
||||
) -> None:
|
||||
"""format_summary_for_prompt must render non-dict values as plain text."""
|
||||
summary = {
|
||||
"daily": {
|
||||
"plain_count": 42,
|
||||
}
|
||||
}
|
||||
text = summarizer.format_summary_for_prompt(summary)
|
||||
assert "plain_count" in text
|
||||
assert "42" in text
|
||||
|
||||
@@ -16,6 +16,10 @@ from src.evolution.daily_review import DailyReviewer
|
||||
from src.evolution.scorecard import DailyScorecard
|
||||
from src.logging.decision_logger import DecisionLogger
|
||||
|
||||
from datetime import UTC, datetime
|
||||
|
||||
TODAY = datetime.now(UTC).strftime("%Y-%m-%d")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def db_conn() -> sqlite3.Connection:
|
||||
@@ -116,7 +120,7 @@ def test_generate_scorecard_market_scoped(
|
||||
exchange_code="NASDAQ",
|
||||
)
|
||||
|
||||
scorecard = reviewer.generate_scorecard("2026-02-14", "KR")
|
||||
scorecard = reviewer.generate_scorecard(TODAY, "KR")
|
||||
|
||||
assert scorecard.market == "KR"
|
||||
assert scorecard.total_decisions == 2
|
||||
@@ -158,7 +162,7 @@ def test_generate_scorecard_top_winners_and_losers(
|
||||
decision_id=decision_id,
|
||||
)
|
||||
|
||||
scorecard = reviewer.generate_scorecard("2026-02-14", "KR")
|
||||
scorecard = reviewer.generate_scorecard(TODAY, "KR")
|
||||
assert scorecard.top_winners == ["005930", "000660"]
|
||||
assert scorecard.top_losers == ["035420", "051910"]
|
||||
|
||||
@@ -167,7 +171,7 @@ def test_generate_scorecard_empty_day(
|
||||
db_conn: sqlite3.Connection, context_store: ContextStore,
|
||||
) -> None:
|
||||
reviewer = DailyReviewer(db_conn, context_store)
|
||||
scorecard = reviewer.generate_scorecard("2026-02-14", "KR")
|
||||
scorecard = reviewer.generate_scorecard(TODAY, "KR")
|
||||
|
||||
assert scorecard.total_decisions == 0
|
||||
assert scorecard.total_pnl == 0.0
|
||||
|
||||
451
tests/test_dashboard.py
Normal file
451
tests/test_dashboard.py
Normal file
@@ -0,0 +1,451 @@
|
||||
"""Tests for dashboard endpoint handlers."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import sqlite3
|
||||
from collections.abc import Callable
|
||||
from datetime import UTC, datetime
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
from fastapi import HTTPException
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
from src.dashboard.app import create_dashboard_app
|
||||
from src.db import init_db
|
||||
|
||||
|
||||
def _seed_db(conn: sqlite3.Connection) -> None:
|
||||
today = datetime.now(UTC).date().isoformat()
|
||||
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO playbooks (
|
||||
date, market, status, playbook_json, generated_at,
|
||||
token_count, scenario_count, match_count
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"2026-02-14",
|
||||
"KR",
|
||||
"ready",
|
||||
json.dumps({"market": "KR", "stock_playbooks": []}),
|
||||
"2026-02-14T08:30:00+00:00",
|
||||
123,
|
||||
2,
|
||||
1,
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO playbooks (
|
||||
date, market, status, playbook_json, generated_at,
|
||||
token_count, scenario_count, match_count
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
today,
|
||||
"US_NASDAQ",
|
||||
"ready",
|
||||
json.dumps({"market": "US_NASDAQ", "stock_playbooks": []}),
|
||||
f"{today}T08:30:00+00:00",
|
||||
100,
|
||||
1,
|
||||
0,
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO contexts (layer, timeframe, key, value, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"L6_DAILY",
|
||||
"2026-02-14",
|
||||
"scorecard_KR",
|
||||
json.dumps({"market": "KR", "total_pnl": 1.5, "win_rate": 60.0}),
|
||||
"2026-02-14T15:30:00+00:00",
|
||||
"2026-02-14T15:30:00+00:00",
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO contexts (layer, timeframe, key, value, created_at, updated_at)
|
||||
VALUES (?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"L7_REALTIME",
|
||||
"2026-02-14T10:00:00+00:00",
|
||||
"volatility_KR_005930",
|
||||
json.dumps({"momentum_score": 70.0}),
|
||||
"2026-02-14T10:00:00+00:00",
|
||||
"2026-02-14T10:00:00+00:00",
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO decision_logs (
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"d-kr-1",
|
||||
f"{today}T09:10:00+00:00",
|
||||
"005930",
|
||||
"KR",
|
||||
"KRX",
|
||||
"BUY",
|
||||
85,
|
||||
"signal matched",
|
||||
json.dumps({"scenario_match": {"rsi": 28.0}}),
|
||||
json.dumps({"current_price": 70000}),
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO decision_logs (
|
||||
decision_id, timestamp, stock_code, market, exchange_code,
|
||||
action, confidence, rationale, context_snapshot, input_data
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
"d-us-1",
|
||||
f"{today}T21:10:00+00:00",
|
||||
"AAPL",
|
||||
"US_NASDAQ",
|
||||
"NASDAQ",
|
||||
"SELL",
|
||||
80,
|
||||
"no match",
|
||||
json.dumps({"scenario_match": {}}),
|
||||
json.dumps({"current_price": 200}),
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO trades (
|
||||
timestamp, stock_code, action, confidence, rationale,
|
||||
quantity, price, pnl, market, exchange_code, selection_context, decision_id
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
f"{today}T09:11:00+00:00",
|
||||
"005930",
|
||||
"BUY",
|
||||
85,
|
||||
"buy",
|
||||
1,
|
||||
70000,
|
||||
2.0,
|
||||
"KR",
|
||||
"KRX",
|
||||
None,
|
||||
"d-kr-1",
|
||||
),
|
||||
)
|
||||
conn.execute(
|
||||
"""
|
||||
INSERT INTO trades (
|
||||
timestamp, stock_code, action, confidence, rationale,
|
||||
quantity, price, pnl, market, exchange_code, selection_context, decision_id
|
||||
) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
|
||||
""",
|
||||
(
|
||||
f"{today}T21:11:00+00:00",
|
||||
"AAPL",
|
||||
"SELL",
|
||||
80,
|
||||
"sell",
|
||||
1,
|
||||
200,
|
||||
-1.0,
|
||||
"US_NASDAQ",
|
||||
"NASDAQ",
|
||||
None,
|
||||
"d-us-1",
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def _app(tmp_path: Path) -> Any:
|
||||
db_path = tmp_path / "dashboard_test.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_db(conn)
|
||||
conn.close()
|
||||
return create_dashboard_app(str(db_path))
|
||||
|
||||
|
||||
def _endpoint(app: Any, path: str) -> Callable[..., Any]:
|
||||
for route in app.routes:
|
||||
if getattr(route, "path", None) == path:
|
||||
return route.endpoint
|
||||
raise AssertionError(f"route not found: {path}")
|
||||
|
||||
|
||||
def test_index_serves_html(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
index = _endpoint(app, "/")
|
||||
resp = index()
|
||||
assert isinstance(resp, FileResponse)
|
||||
assert "index.html" in str(resp.path)
|
||||
|
||||
|
||||
def test_status_endpoint(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
assert "KR" in body["markets"]
|
||||
assert "US_NASDAQ" in body["markets"]
|
||||
assert "totals" in body
|
||||
|
||||
|
||||
def test_playbook_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_playbook = _endpoint(app, "/api/playbook/{date_str}")
|
||||
body = get_playbook("2026-02-14", market="KR")
|
||||
assert body["market"] == "KR"
|
||||
|
||||
|
||||
def test_playbook_not_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_playbook = _endpoint(app, "/api/playbook/{date_str}")
|
||||
with pytest.raises(HTTPException, match="playbook not found"):
|
||||
get_playbook("2026-02-15", market="KR")
|
||||
|
||||
|
||||
def test_scorecard_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_scorecard = _endpoint(app, "/api/scorecard/{date_str}")
|
||||
body = get_scorecard("2026-02-14", market="KR")
|
||||
assert body["scorecard"]["total_pnl"] == 1.5
|
||||
|
||||
|
||||
def test_scorecard_not_found(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_scorecard = _endpoint(app, "/api/scorecard/{date_str}")
|
||||
with pytest.raises(HTTPException, match="scorecard not found"):
|
||||
get_scorecard("2026-02-15", market="KR")
|
||||
|
||||
|
||||
def test_performance_all(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_performance = _endpoint(app, "/api/performance")
|
||||
body = get_performance(market="all")
|
||||
assert body["market"] == "all"
|
||||
assert body["combined"]["total_trades"] == 2
|
||||
assert len(body["by_market"]) == 2
|
||||
|
||||
|
||||
def test_performance_market_filter(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_performance = _endpoint(app, "/api/performance")
|
||||
body = get_performance(market="KR")
|
||||
assert body["market"] == "KR"
|
||||
assert body["metrics"]["total_trades"] == 1
|
||||
|
||||
|
||||
def test_performance_empty_market(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_performance = _endpoint(app, "/api/performance")
|
||||
body = get_performance(market="JP")
|
||||
assert body["metrics"]["total_trades"] == 0
|
||||
|
||||
|
||||
def test_context_layer_all(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_context_layer = _endpoint(app, "/api/context/{layer}")
|
||||
body = get_context_layer("L7_REALTIME", timeframe=None, limit=100)
|
||||
assert body["layer"] == "L7_REALTIME"
|
||||
assert body["count"] == 1
|
||||
|
||||
|
||||
def test_context_layer_timeframe_filter(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_context_layer = _endpoint(app, "/api/context/{layer}")
|
||||
body = get_context_layer("L6_DAILY", timeframe="2026-02-14", limit=100)
|
||||
assert body["count"] == 1
|
||||
assert body["entries"][0]["key"] == "scorecard_KR"
|
||||
|
||||
|
||||
def test_decisions_endpoint(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_decisions = _endpoint(app, "/api/decisions")
|
||||
body = get_decisions(market="KR", limit=50)
|
||||
assert body["count"] == 1
|
||||
assert body["decisions"][0]["decision_id"] == "d-kr-1"
|
||||
|
||||
|
||||
def test_scenarios_active_filters_non_matched(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_active_scenarios = _endpoint(app, "/api/scenarios/active")
|
||||
body = get_active_scenarios(
|
||||
market="KR",
|
||||
date_str=datetime.now(UTC).date().isoformat(),
|
||||
limit=50,
|
||||
)
|
||||
assert body["count"] == 1
|
||||
assert body["matches"][0]["stock_code"] == "005930"
|
||||
|
||||
|
||||
def test_scenarios_active_empty_when_no_matches(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_active_scenarios = _endpoint(app, "/api/scenarios/active")
|
||||
body = get_active_scenarios(market="US", date_str="2026-02-14", limit=50)
|
||||
assert body["count"] == 0
|
||||
|
||||
|
||||
def test_pnl_history_all_markets(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_pnl_history = _endpoint(app, "/api/pnl/history")
|
||||
body = get_pnl_history(days=30, market="all")
|
||||
assert body["market"] == "all"
|
||||
assert isinstance(body["labels"], list)
|
||||
assert isinstance(body["pnl"], list)
|
||||
assert len(body["labels"]) == len(body["pnl"])
|
||||
|
||||
|
||||
def test_pnl_history_market_filter(tmp_path: Path) -> None:
|
||||
app = _app(tmp_path)
|
||||
get_pnl_history = _endpoint(app, "/api/pnl/history")
|
||||
body = get_pnl_history(days=30, market="KR")
|
||||
assert body["market"] == "KR"
|
||||
# KR has 1 trade with pnl=2.0
|
||||
assert len(body["labels"]) >= 1
|
||||
assert body["pnl"][0] == 2.0
|
||||
|
||||
|
||||
def test_positions_returns_open_buy(tmp_path: Path) -> None:
|
||||
"""BUY가 마지막 거래인 종목은 포지션으로 반환되어야 한다."""
|
||||
app = _app(tmp_path)
|
||||
get_positions = _endpoint(app, "/api/positions")
|
||||
body = get_positions()
|
||||
# seed_db: 005930은 BUY (오픈), AAPL은 SELL (마지막)
|
||||
assert body["count"] == 1
|
||||
pos = body["positions"][0]
|
||||
assert pos["stock_code"] == "005930"
|
||||
assert pos["market"] == "KR"
|
||||
assert pos["quantity"] == 1
|
||||
assert pos["entry_price"] == 70000
|
||||
|
||||
|
||||
def test_positions_excludes_closed_sell(tmp_path: Path) -> None:
|
||||
"""마지막 거래가 SELL인 종목은 포지션에 나타나지 않아야 한다."""
|
||||
app = _app(tmp_path)
|
||||
get_positions = _endpoint(app, "/api/positions")
|
||||
body = get_positions()
|
||||
codes = [p["stock_code"] for p in body["positions"]]
|
||||
assert "AAPL" not in codes
|
||||
|
||||
|
||||
def test_positions_empty_when_no_trades(tmp_path: Path) -> None:
|
||||
"""거래 내역이 없으면 빈 포지션 목록을 반환해야 한다."""
|
||||
db_path = tmp_path / "empty.db"
|
||||
conn = init_db(str(db_path))
|
||||
conn.close()
|
||||
app = create_dashboard_app(str(db_path))
|
||||
get_positions = _endpoint(app, "/api/positions")
|
||||
body = get_positions()
|
||||
assert body["count"] == 0
|
||||
assert body["positions"] == []
|
||||
|
||||
|
||||
def _seed_cb_context(conn: sqlite3.Connection, pnl_pct: float, market: str = "KR") -> None:
|
||||
import json as _json
|
||||
conn.execute(
|
||||
"INSERT OR REPLACE INTO system_metrics (key, value, updated_at) VALUES (?, ?, ?)",
|
||||
(
|
||||
f"portfolio_pnl_pct_{market}",
|
||||
_json.dumps({"pnl_pct": pnl_pct}),
|
||||
"2026-02-22T10:00:00+00:00",
|
||||
),
|
||||
)
|
||||
conn.commit()
|
||||
|
||||
|
||||
def test_status_circuit_breaker_ok(tmp_path: Path) -> None:
|
||||
"""pnl_pct가 -2.0%보다 높으면 status=ok를 반환해야 한다."""
|
||||
db_path = tmp_path / "cb_ok.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_cb_context(conn, -1.0)
|
||||
conn.close()
|
||||
app = create_dashboard_app(str(db_path))
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
cb = body["circuit_breaker"]
|
||||
assert cb["status"] == "ok"
|
||||
assert cb["current_pnl_pct"] == -1.0
|
||||
assert cb["threshold_pct"] == -3.0
|
||||
|
||||
|
||||
def test_status_circuit_breaker_warning(tmp_path: Path) -> None:
|
||||
"""pnl_pct가 -2.0% 이하이면 status=warning을 반환해야 한다."""
|
||||
db_path = tmp_path / "cb_warn.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_cb_context(conn, -2.5)
|
||||
conn.close()
|
||||
app = create_dashboard_app(str(db_path))
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
assert body["circuit_breaker"]["status"] == "warning"
|
||||
|
||||
|
||||
def test_status_circuit_breaker_tripped(tmp_path: Path) -> None:
|
||||
"""pnl_pct가 임계값(-3.0%) 이하이면 status=tripped를 반환해야 한다."""
|
||||
db_path = tmp_path / "cb_tripped.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_cb_context(conn, -3.5)
|
||||
conn.close()
|
||||
app = create_dashboard_app(str(db_path))
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
assert body["circuit_breaker"]["status"] == "tripped"
|
||||
|
||||
|
||||
def test_status_circuit_breaker_unknown_when_no_data(tmp_path: Path) -> None:
|
||||
"""L7 context에 pnl_pct 데이터가 없으면 status=unknown을 반환해야 한다."""
|
||||
app = _app(tmp_path) # seed_db에는 portfolio_pnl_pct 없음
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
cb = body["circuit_breaker"]
|
||||
assert cb["status"] == "unknown"
|
||||
assert cb["current_pnl_pct"] is None
|
||||
|
||||
|
||||
def test_status_mode_paper(tmp_path: Path) -> None:
|
||||
"""mode=paper로 생성하면 status 응답에 mode=paper가 포함돼야 한다."""
|
||||
db_path = tmp_path / "dashboard_test.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_db(conn)
|
||||
conn.close()
|
||||
app = create_dashboard_app(str(db_path), mode="paper")
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
assert body["mode"] == "paper"
|
||||
|
||||
|
||||
def test_status_mode_live(tmp_path: Path) -> None:
|
||||
"""mode=live로 생성하면 status 응답에 mode=live가 포함돼야 한다."""
|
||||
db_path = tmp_path / "dashboard_test.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_db(conn)
|
||||
conn.close()
|
||||
app = create_dashboard_app(str(db_path), mode="live")
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
assert body["mode"] == "live"
|
||||
|
||||
|
||||
def test_status_mode_default_paper(tmp_path: Path) -> None:
|
||||
"""mode 파라미터 미전달 시 기본값은 paper여야 한다."""
|
||||
db_path = tmp_path / "dashboard_test.db"
|
||||
conn = init_db(str(db_path))
|
||||
_seed_db(conn)
|
||||
conn.close()
|
||||
app = create_dashboard_app(str(db_path))
|
||||
get_status = _endpoint(app, "/api/status")
|
||||
body = get_status()
|
||||
assert body["mode"] == "paper"
|
||||
331
tests/test_db.py
Normal file
331
tests/test_db.py
Normal file
@@ -0,0 +1,331 @@
|
||||
"""Tests for database helper functions."""
|
||||
|
||||
import tempfile
|
||||
import os
|
||||
|
||||
from src.db import get_open_position, init_db, log_trade
|
||||
|
||||
|
||||
def test_get_open_position_returns_latest_buy() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="BUY",
|
||||
confidence=90,
|
||||
rationale="entry",
|
||||
quantity=2,
|
||||
price=70000.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id="d-buy-1",
|
||||
)
|
||||
|
||||
position = get_open_position(conn, "005930", "KR")
|
||||
assert position is not None
|
||||
assert position["decision_id"] == "d-buy-1"
|
||||
assert position["price"] == 70000.0
|
||||
assert position["quantity"] == 2
|
||||
|
||||
|
||||
def test_get_open_position_returns_none_when_latest_is_sell() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="BUY",
|
||||
confidence=90,
|
||||
rationale="entry",
|
||||
quantity=1,
|
||||
price=70000.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id="d-buy-1",
|
||||
)
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="SELL",
|
||||
confidence=95,
|
||||
rationale="exit",
|
||||
quantity=1,
|
||||
price=71000.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
decision_id="d-sell-1",
|
||||
)
|
||||
|
||||
assert get_open_position(conn, "005930", "KR") is None
|
||||
|
||||
|
||||
def test_get_open_position_returns_none_when_no_trades() -> None:
|
||||
conn = init_db(":memory:")
|
||||
assert get_open_position(conn, "AAPL", "US_NASDAQ") is None
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# WAL mode tests (issue #210)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_wal_mode_applied_to_file_db() -> None:
|
||||
"""File-based DB must use WAL journal mode for dashboard concurrent reads."""
|
||||
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f:
|
||||
db_path = f.name
|
||||
try:
|
||||
conn = init_db(db_path)
|
||||
cursor = conn.execute("PRAGMA journal_mode")
|
||||
mode = cursor.fetchone()[0]
|
||||
assert mode == "wal", f"Expected WAL mode, got {mode}"
|
||||
conn.close()
|
||||
finally:
|
||||
os.unlink(db_path)
|
||||
# Clean up WAL auxiliary files if they exist
|
||||
for ext in ("-wal", "-shm"):
|
||||
path = db_path + ext
|
||||
if os.path.exists(path):
|
||||
os.unlink(path)
|
||||
|
||||
|
||||
def test_wal_mode_not_applied_to_memory_db() -> None:
|
||||
""":memory: DB must not apply WAL (SQLite does not support WAL for in-memory)."""
|
||||
conn = init_db(":memory:")
|
||||
cursor = conn.execute("PRAGMA journal_mode")
|
||||
mode = cursor.fetchone()[0]
|
||||
# In-memory DBs default to 'memory' journal mode
|
||||
assert mode != "wal", "WAL should not be set on in-memory database"
|
||||
conn.close()
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# mode column tests (issue #212)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
def test_log_trade_stores_mode_paper() -> None:
|
||||
"""log_trade must persist mode='paper' in the trades table."""
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="BUY",
|
||||
confidence=85,
|
||||
rationale="test",
|
||||
mode="paper",
|
||||
)
|
||||
row = conn.execute("SELECT mode FROM trades ORDER BY id DESC LIMIT 1").fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == "paper"
|
||||
|
||||
|
||||
def test_log_trade_stores_mode_live() -> None:
|
||||
"""log_trade must persist mode='live' in the trades table."""
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="BUY",
|
||||
confidence=85,
|
||||
rationale="test",
|
||||
mode="live",
|
||||
)
|
||||
row = conn.execute("SELECT mode FROM trades ORDER BY id DESC LIMIT 1").fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == "live"
|
||||
|
||||
|
||||
def test_log_trade_default_mode_is_paper() -> None:
|
||||
"""log_trade without explicit mode must default to 'paper'."""
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="HOLD",
|
||||
confidence=50,
|
||||
rationale="test",
|
||||
)
|
||||
row = conn.execute("SELECT mode FROM trades ORDER BY id DESC LIMIT 1").fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == "paper"
|
||||
|
||||
|
||||
def test_mode_column_exists_in_schema() -> None:
|
||||
"""trades table must have a mode column after init_db."""
|
||||
conn = init_db(":memory:")
|
||||
cursor = conn.execute("PRAGMA table_info(trades)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
assert "mode" in columns
|
||||
assert "session_id" in columns
|
||||
assert "strategy_pnl" in columns
|
||||
assert "fx_pnl" in columns
|
||||
|
||||
|
||||
def test_mode_migration_adds_column_to_existing_db() -> None:
|
||||
"""init_db must add mode column to existing DBs that lack it (migration)."""
|
||||
import sqlite3
|
||||
|
||||
with tempfile.NamedTemporaryFile(suffix=".db", delete=False) as f:
|
||||
db_path = f.name
|
||||
try:
|
||||
# Create DB without mode column (simulate old schema)
|
||||
old_conn = sqlite3.connect(db_path)
|
||||
old_conn.execute(
|
||||
"""CREATE TABLE trades (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
timestamp TEXT NOT NULL,
|
||||
stock_code TEXT NOT NULL,
|
||||
action TEXT NOT NULL,
|
||||
confidence INTEGER NOT NULL,
|
||||
rationale TEXT,
|
||||
quantity INTEGER,
|
||||
price REAL,
|
||||
pnl REAL DEFAULT 0.0,
|
||||
market TEXT DEFAULT 'KR',
|
||||
exchange_code TEXT DEFAULT 'KRX',
|
||||
decision_id TEXT
|
||||
)"""
|
||||
)
|
||||
old_conn.execute(
|
||||
"""
|
||||
INSERT INTO trades (
|
||||
timestamp, stock_code, action, confidence, rationale, quantity, price, pnl
|
||||
) VALUES ('2026-01-01T00:00:00+00:00', 'AAPL', 'SELL', 90, 'legacy', 1, 100.0, 123.45)
|
||||
"""
|
||||
)
|
||||
old_conn.commit()
|
||||
old_conn.close()
|
||||
|
||||
# Run init_db — should add mode column via migration
|
||||
conn = init_db(db_path)
|
||||
cursor = conn.execute("PRAGMA table_info(trades)")
|
||||
columns = {row[1] for row in cursor.fetchall()}
|
||||
assert "mode" in columns
|
||||
assert "session_id" in columns
|
||||
assert "strategy_pnl" in columns
|
||||
assert "fx_pnl" in columns
|
||||
migrated = conn.execute(
|
||||
"SELECT pnl, strategy_pnl, fx_pnl, session_id FROM trades WHERE stock_code='AAPL' LIMIT 1"
|
||||
).fetchone()
|
||||
assert migrated is not None
|
||||
assert migrated[0] == 123.45
|
||||
assert migrated[1] == 123.45
|
||||
assert migrated[2] == 0.0
|
||||
assert migrated[3] == "UNKNOWN"
|
||||
conn.close()
|
||||
finally:
|
||||
os.unlink(db_path)
|
||||
|
||||
|
||||
def test_log_trade_stores_strategy_and_fx_pnl_separately() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="AAPL",
|
||||
action="SELL",
|
||||
confidence=90,
|
||||
rationale="fx split",
|
||||
pnl=120.0,
|
||||
strategy_pnl=100.0,
|
||||
fx_pnl=20.0,
|
||||
market="US_NASDAQ",
|
||||
exchange_code="NASD",
|
||||
)
|
||||
row = conn.execute(
|
||||
"SELECT pnl, strategy_pnl, fx_pnl FROM trades ORDER BY id DESC LIMIT 1"
|
||||
).fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == 120.0
|
||||
assert row[1] == 100.0
|
||||
assert row[2] == 20.0
|
||||
|
||||
|
||||
def test_log_trade_backward_compat_sets_strategy_pnl_from_pnl() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="SELL",
|
||||
confidence=80,
|
||||
rationale="legacy",
|
||||
pnl=50.0,
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
)
|
||||
row = conn.execute(
|
||||
"SELECT pnl, strategy_pnl, fx_pnl FROM trades ORDER BY id DESC LIMIT 1"
|
||||
).fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == 50.0
|
||||
assert row[1] == 50.0
|
||||
assert row[2] == 0.0
|
||||
|
||||
|
||||
def test_log_trade_partial_fx_input_does_not_infer_negative_strategy_pnl() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="AAPL",
|
||||
action="SELL",
|
||||
confidence=70,
|
||||
rationale="fx only",
|
||||
pnl=0.0,
|
||||
fx_pnl=10.0,
|
||||
market="US_NASDAQ",
|
||||
exchange_code="NASD",
|
||||
)
|
||||
row = conn.execute(
|
||||
"SELECT pnl, strategy_pnl, fx_pnl FROM trades ORDER BY id DESC LIMIT 1"
|
||||
).fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == 10.0
|
||||
assert row[1] == 0.0
|
||||
assert row[2] == 10.0
|
||||
|
||||
|
||||
def test_log_trade_persists_explicit_session_id() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="AAPL",
|
||||
action="BUY",
|
||||
confidence=70,
|
||||
rationale="session test",
|
||||
market="US_NASDAQ",
|
||||
exchange_code="NASD",
|
||||
session_id="US_PRE",
|
||||
)
|
||||
row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == "US_PRE"
|
||||
|
||||
|
||||
def test_log_trade_auto_derives_session_id_when_not_provided() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="005930",
|
||||
action="BUY",
|
||||
confidence=70,
|
||||
rationale="auto session",
|
||||
market="KR",
|
||||
exchange_code="KRX",
|
||||
)
|
||||
row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone()
|
||||
assert row is not None
|
||||
assert row[0] != "UNKNOWN"
|
||||
|
||||
|
||||
def test_log_trade_unknown_market_falls_back_to_unknown_session() -> None:
|
||||
conn = init_db(":memory:")
|
||||
log_trade(
|
||||
conn=conn,
|
||||
stock_code="X",
|
||||
action="BUY",
|
||||
confidence=70,
|
||||
rationale="unknown market",
|
||||
market="MARS",
|
||||
exchange_code="MARS",
|
||||
)
|
||||
row = conn.execute("SELECT session_id FROM trades ORDER BY id DESC LIMIT 1").fetchone()
|
||||
assert row is not None
|
||||
assert row[0] == "UNKNOWN"
|
||||
55
tests/test_kill_switch.py
Normal file
55
tests/test_kill_switch.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import pytest
|
||||
|
||||
from src.core.kill_switch import KillSwitchOrchestrator
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_kill_switch_executes_steps_in_order() -> None:
|
||||
ks = KillSwitchOrchestrator()
|
||||
calls: list[str] = []
|
||||
|
||||
async def _cancel() -> None:
|
||||
calls.append("cancel")
|
||||
|
||||
def _refresh() -> None:
|
||||
calls.append("refresh")
|
||||
|
||||
def _reduce() -> None:
|
||||
calls.append("reduce")
|
||||
|
||||
def _snapshot() -> None:
|
||||
calls.append("snapshot")
|
||||
|
||||
def _notify() -> None:
|
||||
calls.append("notify")
|
||||
|
||||
report = await ks.trigger(
|
||||
reason="test",
|
||||
cancel_pending_orders=_cancel,
|
||||
refresh_order_state=_refresh,
|
||||
reduce_risk=_reduce,
|
||||
snapshot_state=_snapshot,
|
||||
notify=_notify,
|
||||
)
|
||||
|
||||
assert report.steps == [
|
||||
"block_new_orders",
|
||||
"cancel_pending_orders",
|
||||
"refresh_order_state",
|
||||
"reduce_risk",
|
||||
"snapshot_state",
|
||||
"notify",
|
||||
]
|
||||
assert calls == ["cancel", "refresh", "reduce", "snapshot", "notify"]
|
||||
assert report.errors == []
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_kill_switch_collects_step_errors() -> None:
|
||||
ks = KillSwitchOrchestrator()
|
||||
|
||||
def _boom() -> None:
|
||||
raise RuntimeError("boom")
|
||||
|
||||
report = await ks.trigger(reason="test", cancel_pending_orders=_boom)
|
||||
assert any(err.startswith("cancel_pending_orders:") for err in report.errors)
|
||||
117
tests/test_logging_config.py
Normal file
117
tests/test_logging_config.py
Normal file
@@ -0,0 +1,117 @@
|
||||
"""Tests for JSON structured logging configuration."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from src.logging_config import JSONFormatter, setup_logging
|
||||
|
||||
|
||||
class TestJSONFormatter:
|
||||
"""Test JSONFormatter output."""
|
||||
|
||||
def test_basic_log_record(self) -> None:
|
||||
"""JSONFormatter must emit valid JSON with required fields."""
|
||||
formatter = JSONFormatter()
|
||||
record = logging.LogRecord(
|
||||
name="test.logger",
|
||||
level=logging.INFO,
|
||||
pathname="",
|
||||
lineno=0,
|
||||
msg="Hello %s",
|
||||
args=("world",),
|
||||
exc_info=None,
|
||||
)
|
||||
output = formatter.format(record)
|
||||
data = json.loads(output)
|
||||
assert data["level"] == "INFO"
|
||||
assert data["logger"] == "test.logger"
|
||||
assert data["message"] == "Hello world"
|
||||
assert "timestamp" in data
|
||||
|
||||
def test_includes_exception_info(self) -> None:
|
||||
"""JSONFormatter must include exception info when present."""
|
||||
formatter = JSONFormatter()
|
||||
try:
|
||||
raise ValueError("test error")
|
||||
except ValueError:
|
||||
exc_info = sys.exc_info()
|
||||
record = logging.LogRecord(
|
||||
name="test",
|
||||
level=logging.ERROR,
|
||||
pathname="",
|
||||
lineno=0,
|
||||
msg="oops",
|
||||
args=(),
|
||||
exc_info=exc_info,
|
||||
)
|
||||
output = formatter.format(record)
|
||||
data = json.loads(output)
|
||||
assert "exception" in data
|
||||
assert "ValueError" in data["exception"]
|
||||
|
||||
def test_extra_trading_fields_included(self) -> None:
|
||||
"""Extra trading fields attached to the record must appear in JSON."""
|
||||
formatter = JSONFormatter()
|
||||
record = logging.LogRecord(
|
||||
name="test",
|
||||
level=logging.INFO,
|
||||
pathname="",
|
||||
lineno=0,
|
||||
msg="trade",
|
||||
args=(),
|
||||
exc_info=None,
|
||||
)
|
||||
record.stock_code = "005930" # type: ignore[attr-defined]
|
||||
record.action = "BUY" # type: ignore[attr-defined]
|
||||
record.confidence = 85 # type: ignore[attr-defined]
|
||||
record.pnl_pct = -1.5 # type: ignore[attr-defined]
|
||||
record.order_amount = 1_000_000 # type: ignore[attr-defined]
|
||||
output = formatter.format(record)
|
||||
data = json.loads(output)
|
||||
assert data["stock_code"] == "005930"
|
||||
assert data["action"] == "BUY"
|
||||
assert data["confidence"] == 85
|
||||
assert data["pnl_pct"] == -1.5
|
||||
assert data["order_amount"] == 1_000_000
|
||||
|
||||
def test_none_extra_fields_excluded(self) -> None:
|
||||
"""Extra fields that are None must not appear in JSON output."""
|
||||
formatter = JSONFormatter()
|
||||
record = logging.LogRecord(
|
||||
name="test",
|
||||
level=logging.INFO,
|
||||
pathname="",
|
||||
lineno=0,
|
||||
msg="no extras",
|
||||
args=(),
|
||||
exc_info=None,
|
||||
)
|
||||
output = formatter.format(record)
|
||||
data = json.loads(output)
|
||||
assert "stock_code" not in data
|
||||
assert "action" not in data
|
||||
assert "confidence" not in data
|
||||
|
||||
|
||||
class TestSetupLogging:
|
||||
"""Test setup_logging function."""
|
||||
|
||||
def test_configures_root_logger(self) -> None:
|
||||
"""setup_logging must attach a JSON handler to the root logger."""
|
||||
setup_logging(level=logging.DEBUG)
|
||||
root = logging.getLogger()
|
||||
json_handlers = [
|
||||
h for h in root.handlers if isinstance(h.formatter, JSONFormatter)
|
||||
]
|
||||
assert len(json_handlers) == 1
|
||||
assert root.level == logging.DEBUG
|
||||
|
||||
def test_avoids_duplicate_handlers(self) -> None:
|
||||
"""Calling setup_logging twice must not add duplicate handlers."""
|
||||
setup_logging()
|
||||
setup_logging()
|
||||
root = logging.getLogger()
|
||||
assert len(root.handlers) == 1
|
||||
4330
tests/test_main.py
4330
tests/test_main.py
File diff suppressed because it is too large
Load Diff
@@ -7,6 +7,7 @@ import pytest
|
||||
|
||||
from src.markets.schedule import (
|
||||
MARKETS,
|
||||
expand_market_codes,
|
||||
get_next_market_open,
|
||||
get_open_markets,
|
||||
is_market_open,
|
||||
@@ -199,3 +200,28 @@ class TestGetNextMarketOpen:
|
||||
enabled_markets=["INVALID", "KR"], now=test_time
|
||||
)
|
||||
assert market.code == "KR"
|
||||
|
||||
|
||||
class TestExpandMarketCodes:
|
||||
"""Test shorthand market expansion."""
|
||||
|
||||
def test_expand_us_shorthand(self) -> None:
|
||||
assert expand_market_codes(["US"]) == ["US_NASDAQ", "US_NYSE", "US_AMEX"]
|
||||
|
||||
def test_expand_cn_shorthand(self) -> None:
|
||||
assert expand_market_codes(["CN"]) == ["CN_SHA", "CN_SZA"]
|
||||
|
||||
def test_expand_vn_shorthand(self) -> None:
|
||||
assert expand_market_codes(["VN"]) == ["VN_HAN", "VN_HCM"]
|
||||
|
||||
def test_expand_mixed_codes(self) -> None:
|
||||
assert expand_market_codes(["KR", "US", "JP"]) == [
|
||||
"KR",
|
||||
"US_NASDAQ",
|
||||
"US_NYSE",
|
||||
"US_AMEX",
|
||||
"JP",
|
||||
]
|
||||
|
||||
def test_expand_preserves_unknown_code(self) -> None:
|
||||
assert expand_market_codes(["KR", "UNKNOWN"]) == ["KR", "UNKNOWN"]
|
||||
|
||||
40
tests/test_order_policy.py
Normal file
40
tests/test_order_policy.py
Normal file
@@ -0,0 +1,40 @@
|
||||
from datetime import UTC, datetime
|
||||
|
||||
import pytest
|
||||
|
||||
from src.core.order_policy import OrderPolicyRejected, classify_session_id, validate_order_policy
|
||||
from src.markets.schedule import MARKETS
|
||||
|
||||
|
||||
def test_classify_kr_nxt_after() -> None:
|
||||
# 2026-02-26 16:00 KST == 07:00 UTC
|
||||
now = datetime(2026, 2, 26, 7, 0, tzinfo=UTC)
|
||||
assert classify_session_id(MARKETS["KR"], now) == "NXT_AFTER"
|
||||
|
||||
|
||||
def test_classify_us_pre() -> None:
|
||||
# 2026-02-26 19:00 KST == 10:00 UTC
|
||||
now = datetime(2026, 2, 26, 10, 0, tzinfo=UTC)
|
||||
assert classify_session_id(MARKETS["US_NASDAQ"], now) == "US_PRE"
|
||||
|
||||
|
||||
def test_reject_market_order_in_low_liquidity_session() -> None:
|
||||
now = datetime(2026, 2, 26, 10, 0, tzinfo=UTC) # 19:00 KST -> US_PRE
|
||||
with pytest.raises(OrderPolicyRejected):
|
||||
validate_order_policy(
|
||||
market=MARKETS["US_NASDAQ"],
|
||||
order_type="BUY",
|
||||
price=0.0,
|
||||
now=now,
|
||||
)
|
||||
|
||||
|
||||
def test_allow_limit_order_in_low_liquidity_session() -> None:
|
||||
now = datetime(2026, 2, 26, 10, 0, tzinfo=UTC) # 19:00 KST -> US_PRE
|
||||
info = validate_order_policy(
|
||||
market=MARKETS["US_NASDAQ"],
|
||||
order_type="BUY",
|
||||
price=100.0,
|
||||
now=now,
|
||||
)
|
||||
assert info.session_id == "US_PRE"
|
||||
1036
tests/test_overseas_broker.py
Normal file
1036
tests/test_overseas_broker.py
Normal file
File diff suppressed because it is too large
Load Diff
@@ -164,18 +164,23 @@ class TestGeneratePlaybook:
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_failure_returns_defensive(self) -> None:
|
||||
async def test_gemini_failure_returns_smart_fallback(self) -> None:
|
||||
planner = _make_planner()
|
||||
planner._gemini.decide = AsyncMock(side_effect=RuntimeError("API timeout"))
|
||||
# oversold candidate (signal="oversold", rsi=28.5)
|
||||
candidates = [_candidate()]
|
||||
|
||||
pb = await planner.generate_playbook("KR", candidates, today=date(2026, 2, 8))
|
||||
|
||||
assert pb.default_action == ScenarioAction.HOLD
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL_TO_BEARISH
|
||||
# Smart fallback uses NEUTRAL outlook (not NEUTRAL_TO_BEARISH)
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL
|
||||
assert pb.stock_count == 1
|
||||
# Defensive playbook has stop-loss scenarios
|
||||
assert pb.stock_playbooks[0].scenarios[0].action == ScenarioAction.SELL
|
||||
# Oversold candidate → first scenario is BUY, second is SELL stop-loss
|
||||
scenarios = pb.stock_playbooks[0].scenarios
|
||||
assert scenarios[0].action == ScenarioAction.BUY
|
||||
assert scenarios[0].condition.rsi_below == 30
|
||||
assert scenarios[1].action == ScenarioAction.SELL
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_gemini_failure_empty_when_defensive_disabled(self) -> None:
|
||||
@@ -657,3 +662,339 @@ class TestDefensivePlaybook:
|
||||
assert pb.stock_count == 0
|
||||
assert pb.market == "US"
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Smart fallback playbook
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestSmartFallbackPlaybook:
|
||||
"""Tests for _smart_fallback_playbook — rule-based BUY/SELL on Gemini failure."""
|
||||
|
||||
def _make_settings(self) -> Settings:
|
||||
return Settings(
|
||||
KIS_APP_KEY="test",
|
||||
KIS_APP_SECRET="test",
|
||||
KIS_ACCOUNT_NO="12345678-01",
|
||||
GEMINI_API_KEY="test",
|
||||
RSI_OVERSOLD_THRESHOLD=30,
|
||||
VOL_MULTIPLIER=2.0,
|
||||
)
|
||||
|
||||
def test_momentum_candidate_gets_buy_on_volume(self) -> None:
|
||||
candidates = [
|
||||
_candidate(code="CHOW", signal="momentum", volume_ratio=13.64, rsi=100.0)
|
||||
]
|
||||
settings = self._make_settings()
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "US_AMEX", candidates, settings
|
||||
)
|
||||
|
||||
assert pb.stock_count == 1
|
||||
sp = pb.stock_playbooks[0]
|
||||
assert sp.stock_code == "CHOW"
|
||||
# First scenario: BUY with volume_ratio_above
|
||||
buy_sc = sp.scenarios[0]
|
||||
assert buy_sc.action == ScenarioAction.BUY
|
||||
assert buy_sc.condition.volume_ratio_above == 2.0
|
||||
assert buy_sc.condition.rsi_below is None
|
||||
assert buy_sc.confidence == 80
|
||||
# Second scenario: stop-loss SELL
|
||||
sell_sc = sp.scenarios[1]
|
||||
assert sell_sc.action == ScenarioAction.SELL
|
||||
assert sell_sc.condition.price_change_pct_below == -3.0
|
||||
|
||||
def test_oversold_candidate_gets_buy_on_rsi(self) -> None:
|
||||
candidates = [
|
||||
_candidate(code="005930", signal="oversold", rsi=22.0, volume_ratio=3.5)
|
||||
]
|
||||
settings = self._make_settings()
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "KR", candidates, settings
|
||||
)
|
||||
|
||||
sp = pb.stock_playbooks[0]
|
||||
buy_sc = sp.scenarios[0]
|
||||
assert buy_sc.action == ScenarioAction.BUY
|
||||
assert buy_sc.condition.rsi_below == 30
|
||||
assert buy_sc.condition.volume_ratio_above is None
|
||||
|
||||
def test_all_candidates_have_stop_loss_sell(self) -> None:
|
||||
candidates = [
|
||||
_candidate(code="AAA", signal="momentum", volume_ratio=5.0),
|
||||
_candidate(code="BBB", signal="oversold", rsi=25.0),
|
||||
]
|
||||
settings = self._make_settings()
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "US_NASDAQ", candidates, settings
|
||||
)
|
||||
|
||||
assert pb.stock_count == 2
|
||||
for sp in pb.stock_playbooks:
|
||||
sell_scenarios = [s for s in sp.scenarios if s.action == ScenarioAction.SELL]
|
||||
assert len(sell_scenarios) == 1
|
||||
assert sell_scenarios[0].condition.price_change_pct_below == -3.0
|
||||
assert sell_scenarios[0].condition.price_change_pct_below == -3.0
|
||||
|
||||
def test_market_outlook_is_neutral(self) -> None:
|
||||
candidates = [_candidate(signal="momentum", volume_ratio=5.0)]
|
||||
settings = self._make_settings()
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "US_AMEX", candidates, settings
|
||||
)
|
||||
|
||||
assert pb.market_outlook == MarketOutlook.NEUTRAL
|
||||
|
||||
def test_default_action_is_hold(self) -> None:
|
||||
candidates = [_candidate(signal="momentum", volume_ratio=5.0)]
|
||||
settings = self._make_settings()
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "US_AMEX", candidates, settings
|
||||
)
|
||||
|
||||
assert pb.default_action == ScenarioAction.HOLD
|
||||
|
||||
def test_has_global_reduce_all_rule(self) -> None:
|
||||
candidates = [_candidate(signal="momentum", volume_ratio=5.0)]
|
||||
settings = self._make_settings()
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "US_AMEX", candidates, settings
|
||||
)
|
||||
|
||||
assert len(pb.global_rules) == 1
|
||||
rule = pb.global_rules[0]
|
||||
assert rule.action == ScenarioAction.REDUCE_ALL
|
||||
assert "portfolio_pnl_pct" in rule.condition
|
||||
|
||||
def test_empty_candidates_returns_empty_playbook(self) -> None:
|
||||
settings = self._make_settings()
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "US_AMEX", [], settings
|
||||
)
|
||||
|
||||
assert pb.stock_count == 0
|
||||
|
||||
def test_vol_multiplier_applied_from_settings(self) -> None:
|
||||
"""VOL_MULTIPLIER=3.0 should set volume_ratio_above=3.0 for momentum."""
|
||||
candidates = [_candidate(signal="momentum", volume_ratio=5.0)]
|
||||
settings = self._make_settings()
|
||||
settings = settings.model_copy(update={"VOL_MULTIPLIER": 3.0})
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "US_AMEX", candidates, settings
|
||||
)
|
||||
|
||||
buy_sc = pb.stock_playbooks[0].scenarios[0]
|
||||
assert buy_sc.condition.volume_ratio_above == 3.0
|
||||
|
||||
def test_rsi_oversold_threshold_applied_from_settings(self) -> None:
|
||||
"""RSI_OVERSOLD_THRESHOLD=25 should set rsi_below=25 for oversold."""
|
||||
candidates = [_candidate(signal="oversold", rsi=22.0)]
|
||||
settings = self._make_settings()
|
||||
settings = settings.model_copy(update={"RSI_OVERSOLD_THRESHOLD": 25})
|
||||
|
||||
pb = PreMarketPlanner._smart_fallback_playbook(
|
||||
date(2026, 2, 17), "KR", candidates, settings
|
||||
)
|
||||
|
||||
buy_sc = pb.stock_playbooks[0].scenarios[0]
|
||||
assert buy_sc.condition.rsi_below == 25
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_playbook_uses_smart_fallback_on_gemini_error(self) -> None:
|
||||
"""generate_playbook() should use smart fallback (not defensive) on API failure."""
|
||||
planner = _make_planner()
|
||||
planner._gemini.decide = AsyncMock(side_effect=ConnectionError("429 quota exceeded"))
|
||||
# momentum candidate
|
||||
candidates = [
|
||||
_candidate(code="CHOW", signal="momentum", volume_ratio=13.64, rsi=100.0)
|
||||
]
|
||||
|
||||
pb = await planner.generate_playbook(
|
||||
"US_AMEX", candidates, today=date(2026, 2, 18)
|
||||
)
|
||||
|
||||
# Should NOT be all-SELL defensive; should have BUY for momentum
|
||||
assert pb.stock_count == 1
|
||||
buy_scenarios = [
|
||||
s for s in pb.stock_playbooks[0].scenarios
|
||||
if s.action == ScenarioAction.BUY
|
||||
]
|
||||
assert len(buy_scenarios) == 1
|
||||
assert buy_scenarios[0].condition.volume_ratio_above == 2.0 # VOL_MULTIPLIER default
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Holdings in prompt (#170)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestHoldingsInPrompt:
|
||||
"""Tests for current_holdings parameter in generate_playbook / _build_prompt."""
|
||||
|
||||
def _make_holdings(self) -> list[dict]:
|
||||
return [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"name": "Samsung",
|
||||
"qty": 10,
|
||||
"entry_price": 71000.0,
|
||||
"unrealized_pnl_pct": 2.3,
|
||||
"holding_days": 3,
|
||||
}
|
||||
]
|
||||
|
||||
def test_build_prompt_includes_holdings_section(self) -> None:
|
||||
"""Prompt should contain a Current Holdings section when holdings are given."""
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
holdings = self._make_holdings()
|
||||
|
||||
prompt = planner._build_prompt(
|
||||
"KR",
|
||||
candidates,
|
||||
context_data={},
|
||||
self_market_scorecard=None,
|
||||
cross_market=None,
|
||||
current_holdings=holdings,
|
||||
)
|
||||
|
||||
assert "## Current Holdings" in prompt
|
||||
assert "005930" in prompt
|
||||
assert "+2.30%" in prompt
|
||||
assert "보유 3일" in prompt
|
||||
|
||||
def test_build_prompt_no_holdings_omits_section(self) -> None:
|
||||
"""Prompt should NOT contain a Current Holdings section when holdings=None."""
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
|
||||
prompt = planner._build_prompt(
|
||||
"KR",
|
||||
candidates,
|
||||
context_data={},
|
||||
self_market_scorecard=None,
|
||||
cross_market=None,
|
||||
current_holdings=None,
|
||||
)
|
||||
|
||||
assert "## Current Holdings" not in prompt
|
||||
|
||||
def test_build_prompt_empty_holdings_omits_section(self) -> None:
|
||||
"""Empty list should also omit the holdings section."""
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
|
||||
prompt = planner._build_prompt(
|
||||
"KR",
|
||||
candidates,
|
||||
context_data={},
|
||||
self_market_scorecard=None,
|
||||
cross_market=None,
|
||||
current_holdings=[],
|
||||
)
|
||||
|
||||
assert "## Current Holdings" not in prompt
|
||||
|
||||
def test_build_prompt_holdings_instruction_included(self) -> None:
|
||||
"""Prompt should include instruction to generate scenarios for held stocks."""
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
holdings = self._make_holdings()
|
||||
|
||||
prompt = planner._build_prompt(
|
||||
"KR",
|
||||
candidates,
|
||||
context_data={},
|
||||
self_market_scorecard=None,
|
||||
cross_market=None,
|
||||
current_holdings=holdings,
|
||||
)
|
||||
|
||||
assert "005930" in prompt
|
||||
assert "SELL/HOLD" in prompt
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_generate_playbook_passes_holdings_to_prompt(self) -> None:
|
||||
"""generate_playbook should pass current_holdings through to the prompt."""
|
||||
planner = _make_planner()
|
||||
candidates = [_candidate()]
|
||||
holdings = self._make_holdings()
|
||||
|
||||
# Capture the actual prompt sent to Gemini
|
||||
captured_prompts: list[str] = []
|
||||
original_decide = planner._gemini.decide
|
||||
|
||||
async def capture_and_call(data: dict) -> TradeDecision:
|
||||
captured_prompts.append(data.get("prompt_override", ""))
|
||||
return await original_decide(data)
|
||||
|
||||
planner._gemini.decide = capture_and_call # type: ignore[method-assign]
|
||||
|
||||
await planner.generate_playbook(
|
||||
"KR", candidates, today=date(2026, 2, 8), current_holdings=holdings
|
||||
)
|
||||
|
||||
assert len(captured_prompts) == 1
|
||||
assert "## Current Holdings" in captured_prompts[0]
|
||||
assert "005930" in captured_prompts[0]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_holdings_stock_allowed_in_parse_response(self) -> None:
|
||||
"""Holdings stocks not in candidates list should be accepted in the response."""
|
||||
holding_code = "000660" # Not in candidates
|
||||
stocks = [
|
||||
{
|
||||
"stock_code": "005930", # candidate
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {"rsi_below": 30},
|
||||
"action": "BUY",
|
||||
"confidence": 85,
|
||||
"rationale": "oversold",
|
||||
}
|
||||
],
|
||||
},
|
||||
{
|
||||
"stock_code": holding_code, # holding only
|
||||
"scenarios": [
|
||||
{
|
||||
"condition": {"price_change_pct_below": -2.0},
|
||||
"action": "SELL",
|
||||
"confidence": 90,
|
||||
"rationale": "stop-loss",
|
||||
}
|
||||
],
|
||||
},
|
||||
]
|
||||
planner = _make_planner(gemini_response=_gemini_response_json(stocks=stocks))
|
||||
candidates = [_candidate()] # only 005930
|
||||
holdings = [
|
||||
{
|
||||
"stock_code": holding_code,
|
||||
"name": "SK Hynix",
|
||||
"qty": 5,
|
||||
"entry_price": 180000.0,
|
||||
"unrealized_pnl_pct": -1.5,
|
||||
"holding_days": 7,
|
||||
}
|
||||
]
|
||||
|
||||
pb = await planner.generate_playbook(
|
||||
"KR",
|
||||
candidates,
|
||||
today=date(2026, 2, 8),
|
||||
current_holdings=holdings,
|
||||
)
|
||||
|
||||
codes = [sp.stock_code for sp in pb.stock_playbooks]
|
||||
assert "005930" in codes
|
||||
assert holding_code in codes
|
||||
|
||||
@@ -440,3 +440,135 @@ class TestEvaluate:
|
||||
assert result.action == ScenarioAction.BUY
|
||||
assert result.match_details["rsi"] == 25.0
|
||||
assert isinstance(result.match_details["rsi"], float)
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Position-aware condition tests (#171)
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
|
||||
class TestPositionAwareConditions:
|
||||
"""Tests for unrealized_pnl_pct and holding_days condition fields."""
|
||||
|
||||
def test_evaluate_condition_unrealized_pnl_above_matches(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""unrealized_pnl_pct_above should match when P&L exceeds threshold."""
|
||||
condition = StockCondition(unrealized_pnl_pct_above=3.0)
|
||||
assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": 5.0}) is True
|
||||
|
||||
def test_evaluate_condition_unrealized_pnl_above_no_match(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""unrealized_pnl_pct_above should NOT match when P&L is below threshold."""
|
||||
condition = StockCondition(unrealized_pnl_pct_above=3.0)
|
||||
assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": 2.0}) is False
|
||||
|
||||
def test_evaluate_condition_unrealized_pnl_below_matches(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""unrealized_pnl_pct_below should match when P&L is under threshold."""
|
||||
condition = StockCondition(unrealized_pnl_pct_below=-2.0)
|
||||
assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": -3.5}) is True
|
||||
|
||||
def test_evaluate_condition_unrealized_pnl_below_no_match(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""unrealized_pnl_pct_below should NOT match when P&L is above threshold."""
|
||||
condition = StockCondition(unrealized_pnl_pct_below=-2.0)
|
||||
assert engine.evaluate_condition(condition, {"unrealized_pnl_pct": -1.0}) is False
|
||||
|
||||
def test_evaluate_condition_holding_days_above_matches(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""holding_days_above should match when position held longer than threshold."""
|
||||
condition = StockCondition(holding_days_above=5)
|
||||
assert engine.evaluate_condition(condition, {"holding_days": 7}) is True
|
||||
|
||||
def test_evaluate_condition_holding_days_above_no_match(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""holding_days_above should NOT match when position held shorter."""
|
||||
condition = StockCondition(holding_days_above=5)
|
||||
assert engine.evaluate_condition(condition, {"holding_days": 3}) is False
|
||||
|
||||
def test_evaluate_condition_holding_days_below_matches(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""holding_days_below should match when position held fewer days."""
|
||||
condition = StockCondition(holding_days_below=3)
|
||||
assert engine.evaluate_condition(condition, {"holding_days": 1}) is True
|
||||
|
||||
def test_evaluate_condition_holding_days_below_no_match(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""holding_days_below should NOT match when held more days."""
|
||||
condition = StockCondition(holding_days_below=3)
|
||||
assert engine.evaluate_condition(condition, {"holding_days": 5}) is False
|
||||
|
||||
def test_combined_pnl_and_holding_days(self, engine: ScenarioEngine) -> None:
|
||||
"""Combined position-aware conditions should AND-evaluate correctly."""
|
||||
condition = StockCondition(
|
||||
unrealized_pnl_pct_above=3.0,
|
||||
holding_days_above=5,
|
||||
)
|
||||
# Both met → match
|
||||
assert engine.evaluate_condition(
|
||||
condition,
|
||||
{"unrealized_pnl_pct": 4.5, "holding_days": 7},
|
||||
) is True
|
||||
# Only pnl met → no match
|
||||
assert engine.evaluate_condition(
|
||||
condition,
|
||||
{"unrealized_pnl_pct": 4.5, "holding_days": 3},
|
||||
) is False
|
||||
|
||||
def test_missing_unrealized_pnl_does_not_match(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""Missing unrealized_pnl_pct key should not match the condition."""
|
||||
condition = StockCondition(unrealized_pnl_pct_above=3.0)
|
||||
assert engine.evaluate_condition(condition, {}) is False
|
||||
|
||||
def test_missing_holding_days_does_not_match(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""Missing holding_days key should not match the condition."""
|
||||
condition = StockCondition(holding_days_above=5)
|
||||
assert engine.evaluate_condition(condition, {}) is False
|
||||
|
||||
def test_match_details_includes_position_fields(
|
||||
self, engine: ScenarioEngine
|
||||
) -> None:
|
||||
"""match_details should include position fields when condition specifies them."""
|
||||
pb = _playbook(
|
||||
scenarios=[
|
||||
StockScenario(
|
||||
condition=StockCondition(unrealized_pnl_pct_above=3.0),
|
||||
action=ScenarioAction.SELL,
|
||||
confidence=90,
|
||||
rationale="Take profit",
|
||||
)
|
||||
]
|
||||
)
|
||||
result = engine.evaluate(
|
||||
pb,
|
||||
"005930",
|
||||
{"unrealized_pnl_pct": 5.0},
|
||||
{},
|
||||
)
|
||||
assert result.action == ScenarioAction.SELL
|
||||
assert "unrealized_pnl_pct" in result.match_details
|
||||
assert result.match_details["unrealized_pnl_pct"] == 5.0
|
||||
|
||||
def test_position_conditions_parse_from_planner(self) -> None:
|
||||
"""StockCondition should accept and store new fields from JSON parsing."""
|
||||
condition = StockCondition(
|
||||
unrealized_pnl_pct_above=3.0,
|
||||
unrealized_pnl_pct_below=None,
|
||||
holding_days_above=5,
|
||||
holding_days_below=None,
|
||||
)
|
||||
assert condition.unrealized_pnl_pct_above == 3.0
|
||||
assert condition.holding_days_above == 5
|
||||
assert condition.has_any_condition() is True
|
||||
|
||||
@@ -8,6 +8,7 @@ from unittest.mock import AsyncMock, MagicMock
|
||||
from src.analysis.smart_scanner import ScanCandidate, SmartVolatilityScanner
|
||||
from src.analysis.volatility import VolatilityAnalyzer
|
||||
from src.broker.kis_api import KISBroker
|
||||
from src.broker.overseas import OverseasBroker
|
||||
from src.config import Settings
|
||||
|
||||
|
||||
@@ -43,61 +44,70 @@ def scanner(mock_broker: MagicMock, mock_settings: Settings) -> SmartVolatilityS
|
||||
analyzer = VolatilityAnalyzer()
|
||||
return SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=None,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_overseas_broker() -> MagicMock:
|
||||
"""Create mock overseas broker."""
|
||||
broker = MagicMock(spec=OverseasBroker)
|
||||
broker.get_overseas_price = AsyncMock()
|
||||
broker.fetch_overseas_rankings = AsyncMock(return_value=[])
|
||||
return broker
|
||||
|
||||
|
||||
class TestSmartVolatilityScanner:
|
||||
"""Test suite for SmartVolatilityScanner."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_finds_oversold_candidates(
|
||||
async def test_scan_domestic_prefers_volatility_with_liquidity_bonus(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test that scanner identifies oversold stocks with high volume."""
|
||||
# Mock rankings
|
||||
mock_broker.fetch_market_rankings.return_value = [
|
||||
"""Domestic scan should score by volatility first and volume rank second."""
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"name": "Samsung",
|
||||
"price": 70000,
|
||||
"volume": 5000000,
|
||||
"change_rate": -3.5,
|
||||
"change_rate": -5.0,
|
||||
"volume_increase_rate": 250,
|
||||
},
|
||||
{
|
||||
"stock_code": "035420",
|
||||
"name": "NAVER",
|
||||
"price": 250000,
|
||||
"volume": 3000000,
|
||||
"change_rate": 3.0,
|
||||
"volume_increase_rate": 200,
|
||||
},
|
||||
]
|
||||
volume_rows = [
|
||||
{"stock_code": "035420", "name": "NAVER", "price": 250000, "volume": 3000000},
|
||||
{"stock_code": "005930", "name": "Samsung", "price": 70000, "volume": 5000000},
|
||||
]
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, volume_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
]
|
||||
|
||||
# Mock daily prices - trending down (oversold)
|
||||
prices = []
|
||||
for i in range(20):
|
||||
prices.append({
|
||||
"date": f"2026020{i:02d}",
|
||||
"open": 75000 - i * 200,
|
||||
"high": 75500 - i * 200,
|
||||
"low": 74500 - i * 200,
|
||||
"close": 75000 - i * 250, # Steady decline
|
||||
"volume": 2000000,
|
||||
})
|
||||
mock_broker.get_daily_prices.return_value = prices
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
# Should find at least one candidate (depending on exact RSI calculation)
|
||||
mock_broker.fetch_market_rankings.assert_called_once()
|
||||
mock_broker.get_daily_prices.assert_called_once_with("005930", days=20)
|
||||
|
||||
# If qualified, should have oversold signal
|
||||
if candidates:
|
||||
assert candidates[0].signal in ["oversold", "momentum"]
|
||||
assert candidates[0].volume_ratio >= scanner.vol_multiplier
|
||||
assert len(candidates) >= 1
|
||||
# Samsung has higher absolute move, so it should lead despite lower volume rank bonus.
|
||||
assert candidates[0].stock_code == "005930"
|
||||
assert candidates[0].signal == "oversold"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_finds_momentum_candidates(
|
||||
async def test_scan_domestic_finds_momentum_candidate(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test that scanner identifies momentum stocks with high volume."""
|
||||
mock_broker.fetch_market_rankings.return_value = [
|
||||
"""Positive change should be represented as momentum signal."""
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": "035420",
|
||||
"name": "NAVER",
|
||||
@@ -107,124 +117,67 @@ class TestSmartVolatilityScanner:
|
||||
"volume_increase_rate": 300,
|
||||
},
|
||||
]
|
||||
|
||||
# Mock daily prices - trending up (momentum)
|
||||
prices = []
|
||||
for i in range(20):
|
||||
prices.append({
|
||||
"date": f"2026020{i:02d}",
|
||||
"open": 230000 + i * 500,
|
||||
"high": 231000 + i * 500,
|
||||
"low": 229000 + i * 500,
|
||||
"close": 230500 + i * 500, # Steady rise
|
||||
"volume": 1000000,
|
||||
})
|
||||
mock_broker.get_daily_prices.return_value = prices
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, fluctuation_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
{"open": 1, "high": 1, "low": 1, "close": 1, "volume": 1000000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
mock_broker.fetch_market_rankings.assert_called_once()
|
||||
assert [c.stock_code for c in candidates] == ["035420"]
|
||||
assert candidates[0].signal == "momentum"
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_filters_low_volume(
|
||||
async def test_scan_domestic_filters_low_volatility(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test that stocks with low volume ratio are filtered out."""
|
||||
mock_broker.fetch_market_rankings.return_value = [
|
||||
"""Domestic scan should drop symbols below volatility threshold."""
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": "000660",
|
||||
"name": "SK Hynix",
|
||||
"price": 150000,
|
||||
"volume": 500000,
|
||||
"change_rate": -5.0,
|
||||
"volume_increase_rate": 50, # Only 50% increase (< 200%)
|
||||
"change_rate": 0.2,
|
||||
"volume_increase_rate": 50,
|
||||
},
|
||||
]
|
||||
|
||||
# Low volume
|
||||
prices = []
|
||||
for i in range(20):
|
||||
prices.append({
|
||||
"date": f"2026020{i:02d}",
|
||||
"open": 150000 - i * 100,
|
||||
"high": 151000 - i * 100,
|
||||
"low": 149000 - i * 100,
|
||||
"close": 150000 - i * 150, # Declining (would be oversold)
|
||||
"volume": 1000000, # Current 500k < 2x prev day 1M
|
||||
})
|
||||
mock_broker.get_daily_prices.return_value = prices
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, fluctuation_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 150100, "low": 149900, "close": 150000, "volume": 1000000},
|
||||
{"open": 1, "high": 150100, "low": 149900, "close": 150000, "volume": 1000000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
# Should be filtered out due to low volume ratio
|
||||
assert len(candidates) == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_filters_neutral_rsi(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test that stocks with neutral RSI are filtered out."""
|
||||
mock_broker.fetch_market_rankings.return_value = [
|
||||
{
|
||||
"stock_code": "051910",
|
||||
"name": "LG Chem",
|
||||
"price": 500000,
|
||||
"volume": 3000000,
|
||||
"change_rate": 0.5,
|
||||
"volume_increase_rate": 300, # High volume
|
||||
},
|
||||
]
|
||||
|
||||
# Flat prices (neutral RSI ~50)
|
||||
prices = []
|
||||
for i in range(20):
|
||||
prices.append({
|
||||
"date": f"2026020{i:02d}",
|
||||
"open": 500000 + (i % 2) * 100, # Small oscillation
|
||||
"high": 500500,
|
||||
"low": 499500,
|
||||
"close": 500000 + (i % 2) * 50,
|
||||
"volume": 1000000,
|
||||
})
|
||||
mock_broker.get_daily_prices.return_value = prices
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
# Should be filtered out (RSI ~50, not < 30 or > 70)
|
||||
assert len(candidates) == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_uses_fallback_on_api_error(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test fallback to static list when ranking API fails."""
|
||||
mock_broker.fetch_market_rankings.side_effect = ConnectionError("API unavailable")
|
||||
|
||||
# Fallback stocks should still be analyzed
|
||||
prices = []
|
||||
for i in range(20):
|
||||
prices.append({
|
||||
"date": f"2026020{i:02d}",
|
||||
"open": 50000 - i * 50,
|
||||
"high": 51000 - i * 50,
|
||||
"low": 49000 - i * 50,
|
||||
"close": 50000 - i * 75, # Declining
|
||||
"volume": 1000000,
|
||||
})
|
||||
mock_broker.get_daily_prices.return_value = prices
|
||||
"""Domestic scan should remain operational using fallback symbols."""
|
||||
mock_broker.fetch_market_rankings.side_effect = [
|
||||
ConnectionError("API unavailable"),
|
||||
ConnectionError("API unavailable"),
|
||||
]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 103, "low": 97, "close": 100, "volume": 1000000},
|
||||
{"open": 1, "high": 103, "low": 97, "close": 100, "volume": 800000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(fallback_stocks=["005930", "000660"])
|
||||
|
||||
# Should not crash
|
||||
assert isinstance(candidates, list)
|
||||
assert len(candidates) >= 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_returns_top_n_only(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test that scan returns at most top_n candidates."""
|
||||
# Return many stocks
|
||||
mock_broker.fetch_market_rankings.return_value = [
|
||||
fluctuation_rows = [
|
||||
{
|
||||
"stock_code": f"00{i}000",
|
||||
"name": f"Stock{i}",
|
||||
@@ -235,62 +188,17 @@ class TestSmartVolatilityScanner:
|
||||
}
|
||||
for i in range(1, 10)
|
||||
]
|
||||
|
||||
# All oversold with high volume
|
||||
def make_prices(code: str) -> list[dict]:
|
||||
prices = []
|
||||
for i in range(20):
|
||||
prices.append({
|
||||
"date": f"2026020{i:02d}",
|
||||
"open": 10000 - i * 100,
|
||||
"high": 10500 - i * 100,
|
||||
"low": 9500 - i * 100,
|
||||
"close": 10000 - i * 150,
|
||||
"volume": 1000000,
|
||||
})
|
||||
return prices
|
||||
|
||||
mock_broker.get_daily_prices.side_effect = make_prices
|
||||
mock_broker.fetch_market_rankings.side_effect = [fluctuation_rows, fluctuation_rows]
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{"open": 1, "high": 105, "low": 95, "close": 100, "volume": 1000000},
|
||||
{"open": 1, "high": 105, "low": 95, "close": 100, "volume": 900000},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
# Should respect top_n limit (3)
|
||||
assert len(candidates) <= scanner.top_n
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_skips_insufficient_price_history(
|
||||
self, scanner: SmartVolatilityScanner, mock_broker: MagicMock
|
||||
) -> None:
|
||||
"""Test that stocks with insufficient history are skipped."""
|
||||
mock_broker.fetch_market_rankings.return_value = [
|
||||
{
|
||||
"stock_code": "005930",
|
||||
"name": "Samsung",
|
||||
"price": 70000,
|
||||
"volume": 5000000,
|
||||
"change_rate": -5.0,
|
||||
"volume_increase_rate": 300,
|
||||
},
|
||||
]
|
||||
|
||||
# Only 5 days of data (need 15+ for RSI)
|
||||
mock_broker.get_daily_prices.return_value = [
|
||||
{
|
||||
"date": f"2026020{i:02d}",
|
||||
"open": 70000,
|
||||
"high": 71000,
|
||||
"low": 69000,
|
||||
"close": 70000,
|
||||
"volume": 2000000,
|
||||
}
|
||||
for i in range(5)
|
||||
]
|
||||
|
||||
candidates = await scanner.scan()
|
||||
|
||||
# Should skip due to insufficient data
|
||||
assert len(candidates) == 0
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_stock_codes(
|
||||
self, scanner: SmartVolatilityScanner
|
||||
@@ -323,6 +231,160 @@ class TestSmartVolatilityScanner:
|
||||
|
||||
assert codes == ["005930", "035420"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_uses_dynamic_symbols(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Overseas scan should use provided dynamic universe symbols."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
mock_overseas_broker.get_overseas_price.side_effect = [
|
||||
{"output": {"last": "210.5", "rate": "1.6", "tvol": "1500000"}},
|
||||
{"output": {"last": "330.1", "rate": "0.2", "tvol": "900000"}},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(
|
||||
market=market,
|
||||
fallback_stocks=["AAPL", "MSFT"],
|
||||
)
|
||||
|
||||
assert [c.stock_code for c in candidates] == ["AAPL"]
|
||||
assert candidates[0].signal == "momentum"
|
||||
assert candidates[0].price == 210.5
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_uses_ranking_api_first(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Overseas scan should prioritize ranking API when available."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
mock_overseas_broker.fetch_overseas_rankings.return_value = [
|
||||
{"symb": "NVDA", "last": "780.2", "rate": "2.4", "tvol": "1200000"},
|
||||
{"symb": "MSFT", "last": "420.0", "rate": "0.3", "tvol": "900000"},
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(market=market, fallback_stocks=["AAPL", "TSLA"])
|
||||
|
||||
assert mock_overseas_broker.fetch_overseas_rankings.call_count >= 1
|
||||
mock_overseas_broker.get_overseas_price.assert_not_called()
|
||||
assert [c.stock_code for c in candidates] == ["NVDA"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_without_symbols_returns_empty(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Overseas scan should return empty list when no symbol universe exists."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
candidates = await scanner.scan(market=market, fallback_stocks=[])
|
||||
|
||||
assert candidates == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scan_overseas_picks_high_intraday_range_even_with_low_change(
|
||||
self, mock_broker: MagicMock, mock_overseas_broker: MagicMock, mock_settings: Settings
|
||||
) -> None:
|
||||
"""Volatility selection should consider intraday range, not only change rate."""
|
||||
analyzer = VolatilityAnalyzer()
|
||||
scanner = SmartVolatilityScanner(
|
||||
broker=mock_broker,
|
||||
overseas_broker=mock_overseas_broker,
|
||||
volatility_analyzer=analyzer,
|
||||
settings=mock_settings,
|
||||
)
|
||||
market = MagicMock()
|
||||
market.name = "NASDAQ"
|
||||
market.code = "US_NASDAQ"
|
||||
market.exchange_code = "NASD"
|
||||
market.is_domestic = False
|
||||
|
||||
# change rate is tiny, but high-low range is large (15%).
|
||||
mock_overseas_broker.fetch_overseas_rankings.return_value = [
|
||||
{
|
||||
"symb": "ABCD",
|
||||
"last": "100",
|
||||
"rate": "0.2",
|
||||
"high": "110",
|
||||
"low": "95",
|
||||
"tvol": "800000",
|
||||
}
|
||||
]
|
||||
|
||||
candidates = await scanner.scan(market=market, fallback_stocks=[])
|
||||
|
||||
assert [c.stock_code for c in candidates] == ["ABCD"]
|
||||
|
||||
|
||||
class TestImpliedRSIFormula:
|
||||
"""Test the implied_rsi formula in SmartVolatilityScanner (issue #181)."""
|
||||
|
||||
def test_neutral_change_gives_neutral_rsi(self) -> None:
|
||||
"""0% change → implied_rsi = 50 (neutral)."""
|
||||
# formula: 50 + (change_rate * 2.0)
|
||||
rsi = max(0.0, min(100.0, 50.0 + (0.0 * 2.0)))
|
||||
assert rsi == 50.0
|
||||
|
||||
def test_10pct_change_gives_rsi_70(self) -> None:
|
||||
"""10% upward change → implied_rsi = 70 (momentum signal)."""
|
||||
rsi = max(0.0, min(100.0, 50.0 + (10.0 * 2.0)))
|
||||
assert rsi == 70.0
|
||||
|
||||
def test_minus_10pct_gives_rsi_30(self) -> None:
|
||||
"""-10% change → implied_rsi = 30 (oversold signal)."""
|
||||
rsi = max(0.0, min(100.0, 50.0 + (-10.0 * 2.0)))
|
||||
assert rsi == 30.0
|
||||
|
||||
def test_saturation_at_25pct(self) -> None:
|
||||
"""Saturation occurs at >=25% change (not 12.5% as with old coefficient 4.0)."""
|
||||
rsi_12pct = max(0.0, min(100.0, 50.0 + (12.5 * 2.0)))
|
||||
rsi_25pct = max(0.0, min(100.0, 50.0 + (25.0 * 2.0)))
|
||||
rsi_30pct = max(0.0, min(100.0, 50.0 + (30.0 * 2.0)))
|
||||
# At 12.5% change: RSI = 75 (not 100, unlike old formula)
|
||||
assert rsi_12pct == 75.0
|
||||
# At 25%+ saturation
|
||||
assert rsi_25pct == 100.0
|
||||
assert rsi_30pct == 100.0 # Capped
|
||||
|
||||
def test_negative_saturation(self) -> None:
|
||||
"""Saturation at -25% gives RSI = 0."""
|
||||
rsi = max(0.0, min(100.0, 50.0 + (-25.0 * 2.0)))
|
||||
assert rsi == 0.0
|
||||
|
||||
|
||||
class TestRSICalculation:
|
||||
"""Test RSI calculation in VolatilityAnalyzer."""
|
||||
|
||||
32
tests/test_strategies_base.py
Normal file
32
tests/test_strategies_base.py
Normal file
@@ -0,0 +1,32 @@
|
||||
"""Tests for BaseStrategy abstract class."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import pytest
|
||||
|
||||
from src.strategies.base import BaseStrategy
|
||||
|
||||
|
||||
class ConcreteStrategy(BaseStrategy):
|
||||
"""Minimal concrete strategy for testing."""
|
||||
|
||||
def evaluate(self, market_data: dict[str, Any]) -> dict[str, Any]:
|
||||
return {"action": "HOLD", "confidence": 50, "rationale": "test"}
|
||||
|
||||
|
||||
def test_base_strategy_cannot_be_instantiated() -> None:
|
||||
"""BaseStrategy cannot be instantiated directly (it's abstract)."""
|
||||
with pytest.raises(TypeError):
|
||||
BaseStrategy() # type: ignore[abstract]
|
||||
|
||||
|
||||
def test_concrete_strategy_evaluate_returns_decision() -> None:
|
||||
"""Concrete subclass must implement evaluate and return a dict."""
|
||||
strategy = ConcreteStrategy()
|
||||
result = strategy.evaluate({"close": [100.0, 101.0]})
|
||||
assert isinstance(result, dict)
|
||||
assert result["action"] == "HOLD"
|
||||
assert result["confidence"] == 50
|
||||
assert "rationale" in result
|
||||
38
tests/test_strategy_exit_rules.py
Normal file
38
tests/test_strategy_exit_rules.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from src.strategy.exit_rules import ExitRuleConfig, ExitRuleInput, evaluate_exit
|
||||
from src.strategy.position_state_machine import PositionState
|
||||
|
||||
|
||||
def test_hard_stop_exit() -> None:
|
||||
out = evaluate_exit(
|
||||
current_state=PositionState.HOLDING,
|
||||
config=ExitRuleConfig(hard_stop_pct=-2.0, arm_pct=3.0),
|
||||
inp=ExitRuleInput(current_price=97.0, entry_price=100.0, peak_price=100.0),
|
||||
)
|
||||
assert out.should_exit is True
|
||||
assert out.reason == "hard_stop"
|
||||
|
||||
|
||||
def test_take_profit_exit_for_backward_compatibility() -> None:
|
||||
out = evaluate_exit(
|
||||
current_state=PositionState.HOLDING,
|
||||
config=ExitRuleConfig(hard_stop_pct=-2.0, arm_pct=3.0),
|
||||
inp=ExitRuleInput(current_price=104.0, entry_price=100.0, peak_price=104.0),
|
||||
)
|
||||
assert out.should_exit is True
|
||||
assert out.reason == "arm_take_profit"
|
||||
|
||||
|
||||
def test_model_assist_exit_signal() -> None:
|
||||
out = evaluate_exit(
|
||||
current_state=PositionState.ARMED,
|
||||
config=ExitRuleConfig(model_prob_threshold=0.62, arm_pct=10.0),
|
||||
inp=ExitRuleInput(
|
||||
current_price=101.0,
|
||||
entry_price=100.0,
|
||||
peak_price=105.0,
|
||||
pred_down_prob=0.8,
|
||||
liquidity_weak=True,
|
||||
),
|
||||
)
|
||||
assert out.should_exit is True
|
||||
assert out.reason == "model_liquidity_exit"
|
||||
30
tests/test_strategy_state_machine.py
Normal file
30
tests/test_strategy_state_machine.py
Normal file
@@ -0,0 +1,30 @@
|
||||
from src.strategy.position_state_machine import (
|
||||
PositionState,
|
||||
StateTransitionInput,
|
||||
promote_state,
|
||||
)
|
||||
|
||||
|
||||
def test_gap_jump_promotes_to_armed_directly() -> None:
|
||||
state = promote_state(
|
||||
PositionState.HOLDING,
|
||||
StateTransitionInput(
|
||||
unrealized_pnl_pct=4.0,
|
||||
be_arm_pct=1.2,
|
||||
arm_pct=2.8,
|
||||
),
|
||||
)
|
||||
assert state == PositionState.ARMED
|
||||
|
||||
|
||||
def test_exited_has_priority_over_promotion() -> None:
|
||||
state = promote_state(
|
||||
PositionState.HOLDING,
|
||||
StateTransitionInput(
|
||||
unrealized_pnl_pct=5.0,
|
||||
be_arm_pct=1.2,
|
||||
arm_pct=2.8,
|
||||
hard_stop_hit=True,
|
||||
),
|
||||
)
|
||||
assert state == PositionState.EXITED
|
||||
@@ -5,7 +5,7 @@ from unittest.mock import AsyncMock, patch
|
||||
import aiohttp
|
||||
import pytest
|
||||
|
||||
from src.notifications.telegram_client import NotificationPriority, TelegramClient
|
||||
from src.notifications.telegram_client import NotificationFilter, NotificationPriority, TelegramClient
|
||||
|
||||
|
||||
class TestTelegramClientInit:
|
||||
@@ -481,3 +481,187 @@ class TestClientCleanup:
|
||||
|
||||
# Should not raise exception
|
||||
await client.close()
|
||||
|
||||
|
||||
class TestNotificationFilter:
|
||||
"""Test granular notification filter behavior."""
|
||||
|
||||
def test_default_filter_allows_all(self) -> None:
|
||||
"""Default NotificationFilter has all flags enabled."""
|
||||
f = NotificationFilter()
|
||||
assert f.trades is True
|
||||
assert f.market_open_close is True
|
||||
assert f.fat_finger is True
|
||||
assert f.system_events is True
|
||||
assert f.playbook is True
|
||||
assert f.scenario_match is True
|
||||
assert f.errors is True
|
||||
|
||||
def test_client_uses_default_filter_when_none_given(self) -> None:
|
||||
"""TelegramClient creates a default NotificationFilter when none provided."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
assert isinstance(client._filter, NotificationFilter)
|
||||
assert client._filter.scenario_match is True
|
||||
|
||||
def test_client_stores_provided_filter(self) -> None:
|
||||
"""TelegramClient stores a custom NotificationFilter."""
|
||||
nf = NotificationFilter(scenario_match=False, trades=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
assert client._filter.scenario_match is False
|
||||
assert client._filter.trades is False
|
||||
assert client._filter.market_open_close is True # default still True
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scenario_match_filtered_does_not_send(self) -> None:
|
||||
"""notify_scenario_matched skips send when scenario_match=False."""
|
||||
nf = NotificationFilter(scenario_match=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
with patch("aiohttp.ClientSession.post") as mock_post:
|
||||
await client.notify_scenario_matched(
|
||||
stock_code="005930", action="BUY", condition_summary="rsi<30", confidence=85.0
|
||||
)
|
||||
mock_post.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_trades_filtered_does_not_send(self) -> None:
|
||||
"""notify_trade_execution skips send when trades=False."""
|
||||
nf = NotificationFilter(trades=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
with patch("aiohttp.ClientSession.post") as mock_post:
|
||||
await client.notify_trade_execution(
|
||||
stock_code="005930", market="KR", action="BUY",
|
||||
quantity=10, price=70000.0, confidence=85.0
|
||||
)
|
||||
mock_post.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_market_open_close_filtered_does_not_send(self) -> None:
|
||||
"""notify_market_open/close skip send when market_open_close=False."""
|
||||
nf = NotificationFilter(market_open_close=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
with patch("aiohttp.ClientSession.post") as mock_post:
|
||||
await client.notify_market_open("Korea")
|
||||
await client.notify_market_close("Korea", pnl_pct=1.5)
|
||||
mock_post.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_circuit_breaker_always_sends_regardless_of_filter(self) -> None:
|
||||
"""notify_circuit_breaker always sends (no filter flag)."""
|
||||
nf = NotificationFilter(
|
||||
trades=False, market_open_close=False, fat_finger=False,
|
||||
system_events=False, playbook=False, scenario_match=False, errors=False,
|
||||
)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await client.notify_circuit_breaker(pnl_pct=-3.5, threshold=-3.0)
|
||||
assert mock_post.call_count == 1
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_errors_filtered_does_not_send(self) -> None:
|
||||
"""notify_error skips send when errors=False."""
|
||||
nf = NotificationFilter(errors=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
with patch("aiohttp.ClientSession.post") as mock_post:
|
||||
await client.notify_error("TestError", "something went wrong", "KR")
|
||||
mock_post.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_playbook_filtered_does_not_send(self) -> None:
|
||||
"""notify_playbook_generated/failed skip send when playbook=False."""
|
||||
nf = NotificationFilter(playbook=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
with patch("aiohttp.ClientSession.post") as mock_post:
|
||||
await client.notify_playbook_generated("KR", 3, 10, 1200)
|
||||
await client.notify_playbook_failed("KR", "timeout")
|
||||
mock_post.assert_not_called()
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_system_events_filtered_does_not_send(self) -> None:
|
||||
"""notify_system_start/shutdown skip send when system_events=False."""
|
||||
nf = NotificationFilter(system_events=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
with patch("aiohttp.ClientSession.post") as mock_post:
|
||||
await client.notify_system_start("paper", ["KR"])
|
||||
await client.notify_system_shutdown("Normal shutdown")
|
||||
mock_post.assert_not_called()
|
||||
|
||||
def test_set_flag_valid_key(self) -> None:
|
||||
"""set_flag returns True and updates field for a known key."""
|
||||
nf = NotificationFilter()
|
||||
assert nf.set_flag("scenario", False) is True
|
||||
assert nf.scenario_match is False
|
||||
|
||||
def test_set_flag_invalid_key(self) -> None:
|
||||
"""set_flag returns False for an unknown key."""
|
||||
nf = NotificationFilter()
|
||||
assert nf.set_flag("unknown_key", False) is False
|
||||
|
||||
def test_as_dict_keys_match_KEYS(self) -> None:
|
||||
"""as_dict() returns every key defined in KEYS."""
|
||||
nf = NotificationFilter()
|
||||
d = nf.as_dict()
|
||||
assert set(d.keys()) == set(NotificationFilter.KEYS.keys())
|
||||
|
||||
def test_set_notification_valid_key(self) -> None:
|
||||
"""TelegramClient.set_notification toggles filter at runtime."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
assert client._filter.scenario_match is True
|
||||
assert client.set_notification("scenario", False) is True
|
||||
assert client._filter.scenario_match is False
|
||||
|
||||
def test_set_notification_all_off(self) -> None:
|
||||
"""set_notification('all', False) disables every filter flag."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
assert client.set_notification("all", False) is True
|
||||
for v in client.filter_status().values():
|
||||
assert v is False
|
||||
|
||||
def test_set_notification_all_on(self) -> None:
|
||||
"""set_notification('all', True) enables every filter flag."""
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True,
|
||||
notification_filter=NotificationFilter(
|
||||
trades=False, market_open_close=False, scenario_match=False,
|
||||
fat_finger=False, system_events=False, playbook=False, errors=False,
|
||||
),
|
||||
)
|
||||
assert client.set_notification("all", True) is True
|
||||
for v in client.filter_status().values():
|
||||
assert v is True
|
||||
|
||||
def test_set_notification_unknown_key(self) -> None:
|
||||
"""set_notification returns False for an unknown key."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
assert client.set_notification("unknown", False) is False
|
||||
|
||||
def test_filter_status_reflects_current_state(self) -> None:
|
||||
"""filter_status() matches the current NotificationFilter state."""
|
||||
nf = NotificationFilter(trades=False, scenario_match=False)
|
||||
client = TelegramClient(
|
||||
bot_token="123:abc", chat_id="456", enabled=True, notification_filter=nf
|
||||
)
|
||||
status = client.filter_status()
|
||||
assert status["trades"] is False
|
||||
assert status["scenario"] is False
|
||||
assert status["market"] is True
|
||||
|
||||
@@ -682,6 +682,10 @@ class TestBasicCommands:
|
||||
"/help - Show available commands\n"
|
||||
"/status - Trading status (mode, markets, P&L)\n"
|
||||
"/positions - Current holdings\n"
|
||||
"/report - Daily summary report\n"
|
||||
"/scenarios - Today's playbook scenarios\n"
|
||||
"/review - Recent scorecards\n"
|
||||
"/dashboard - Dashboard URL/status\n"
|
||||
"/stop - Pause trading\n"
|
||||
"/resume - Resume trading"
|
||||
)
|
||||
@@ -707,10 +711,106 @@ class TestBasicCommands:
|
||||
assert "/help" in payload["text"]
|
||||
assert "/status" in payload["text"]
|
||||
assert "/positions" in payload["text"]
|
||||
assert "/report" in payload["text"]
|
||||
assert "/scenarios" in payload["text"]
|
||||
assert "/review" in payload["text"]
|
||||
assert "/dashboard" in payload["text"]
|
||||
assert "/stop" in payload["text"]
|
||||
assert "/resume" in payload["text"]
|
||||
|
||||
|
||||
class TestExtendedCommands:
|
||||
"""Test additional bot commands."""
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_report_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_report() -> None:
|
||||
await client.send_message("<b>📈 Daily Report</b>\n\nTrades: 1")
|
||||
|
||||
handler.register_command("report", mock_report)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/report"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Daily Report" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_scenarios_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_scenarios() -> None:
|
||||
await client.send_message("<b>🧠 Today's Scenarios</b>\n\n- AAPL: BUY (85)")
|
||||
|
||||
handler.register_command("scenarios", mock_scenarios)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/scenarios"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Today's Scenarios" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_review_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_review() -> None:
|
||||
await client.send_message("<b>📝 Recent Reviews</b>\n\n- 2026-02-14 KR")
|
||||
|
||||
handler.register_command("review", mock_review)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/review"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Recent Reviews" in payload["text"]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_dashboard_command(self) -> None:
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 200
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
async def mock_dashboard() -> None:
|
||||
await client.send_message("<b>🖥️ Dashboard</b>\n\nURL: http://127.0.0.1:8080")
|
||||
|
||||
handler.register_command("dashboard", mock_dashboard)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp) as mock_post:
|
||||
await handler._handle_update(
|
||||
{"update_id": 1, "message": {"chat": {"id": 456}, "text": "/dashboard"}}
|
||||
)
|
||||
payload = mock_post.call_args.kwargs["json"]
|
||||
assert "Dashboard" in payload["text"]
|
||||
|
||||
|
||||
class TestGetUpdates:
|
||||
"""Test getUpdates API interaction."""
|
||||
|
||||
@@ -775,3 +875,139 @@ class TestGetUpdates:
|
||||
updates = await handler._get_updates()
|
||||
|
||||
assert updates == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_get_updates_409_stops_polling(self) -> None:
|
||||
"""409 Conflict response stops the poller (_running = False) and returns empty list."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
handler._running = True # simulate active poller
|
||||
|
||||
mock_resp = AsyncMock()
|
||||
mock_resp.status = 409
|
||||
mock_resp.text = AsyncMock(
|
||||
return_value='{"ok":false,"error_code":409,"description":"Conflict"}'
|
||||
)
|
||||
mock_resp.__aenter__ = AsyncMock(return_value=mock_resp)
|
||||
mock_resp.__aexit__ = AsyncMock(return_value=False)
|
||||
|
||||
with patch("aiohttp.ClientSession.post", return_value=mock_resp):
|
||||
updates = await handler._get_updates()
|
||||
|
||||
assert updates == []
|
||||
assert handler._running is False # poller stopped
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_poll_loop_exits_after_409(self) -> None:
|
||||
"""_poll_loop exits naturally after _running is set to False by a 409 response."""
|
||||
import asyncio as _asyncio
|
||||
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
call_count = 0
|
||||
|
||||
async def mock_get_updates_409() -> list[dict]:
|
||||
nonlocal call_count
|
||||
call_count += 1
|
||||
# Simulate 409 stopping the poller
|
||||
handler._running = False
|
||||
return []
|
||||
|
||||
handler._get_updates = mock_get_updates_409 # type: ignore[method-assign]
|
||||
|
||||
handler._running = True
|
||||
task = _asyncio.create_task(handler._poll_loop())
|
||||
await _asyncio.wait_for(task, timeout=2.0)
|
||||
|
||||
# _get_updates called exactly once, then loop exited
|
||||
assert call_count == 1
|
||||
assert handler._running is False
|
||||
|
||||
|
||||
class TestCommandWithArgs:
|
||||
"""Test register_command_with_args and argument dispatch."""
|
||||
|
||||
def test_register_command_with_args_stored(self) -> None:
|
||||
"""register_command_with_args stores handler in _commands_with_args."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
async def my_handler(args: list[str]) -> None:
|
||||
pass
|
||||
|
||||
handler.register_command_with_args("notify", my_handler)
|
||||
assert "notify" in handler._commands_with_args
|
||||
assert handler._commands_with_args["notify"] is my_handler
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_args_handler_receives_arguments(self) -> None:
|
||||
"""Args handler is called with the trailing tokens."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
received: list[list[str]] = []
|
||||
|
||||
async def capture(args: list[str]) -> None:
|
||||
received.append(args)
|
||||
|
||||
handler.register_command_with_args("notify", capture)
|
||||
|
||||
update = {
|
||||
"message": {
|
||||
"chat": {"id": "456"},
|
||||
"text": "/notify scenario off",
|
||||
}
|
||||
}
|
||||
await handler._handle_update(update)
|
||||
assert received == [["scenario", "off"]]
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_args_handler_takes_priority_over_no_args_handler(self) -> None:
|
||||
"""When both handlers exist for same command, args handler wins."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
no_args_called = []
|
||||
args_called = []
|
||||
|
||||
async def no_args_handler() -> None:
|
||||
no_args_called.append(True)
|
||||
|
||||
async def args_handler(args: list[str]) -> None:
|
||||
args_called.append(args)
|
||||
|
||||
handler.register_command("notify", no_args_handler)
|
||||
handler.register_command_with_args("notify", args_handler)
|
||||
|
||||
update = {
|
||||
"message": {
|
||||
"chat": {"id": "456"},
|
||||
"text": "/notify all off",
|
||||
}
|
||||
}
|
||||
await handler._handle_update(update)
|
||||
assert args_called == [["all", "off"]]
|
||||
assert no_args_called == []
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_args_handler_with_no_trailing_args(self) -> None:
|
||||
"""/notify with no args still dispatches to args handler with empty list."""
|
||||
client = TelegramClient(bot_token="123:abc", chat_id="456", enabled=True)
|
||||
handler = TelegramCommandHandler(client)
|
||||
|
||||
received: list[list[str]] = []
|
||||
|
||||
async def capture(args: list[str]) -> None:
|
||||
received.append(args)
|
||||
|
||||
handler.register_command_with_args("notify", capture)
|
||||
|
||||
update = {
|
||||
"message": {
|
||||
"chat": {"id": "456"},
|
||||
"text": "/notify",
|
||||
}
|
||||
}
|
||||
await handler._handle_update(update)
|
||||
assert received == [[]]
|
||||
|
||||
@@ -124,6 +124,10 @@ class TestPromptOptimizer:
|
||||
assert len(prompt) < 300
|
||||
assert "005930" in prompt
|
||||
assert "75000" in prompt
|
||||
# Keys must match parse_response expectations (#242)
|
||||
assert '"action"' in prompt
|
||||
assert '"confidence"' in prompt
|
||||
assert '"rationale"' in prompt
|
||||
|
||||
def test_build_compressed_prompt_no_instructions(self):
|
||||
"""Test compressed prompt without instructions."""
|
||||
|
||||
131
tests/test_triple_barrier.py
Normal file
131
tests/test_triple_barrier.py
Normal file
@@ -0,0 +1,131 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from src.analysis.triple_barrier import TripleBarrierSpec, label_with_triple_barrier
|
||||
|
||||
|
||||
def test_long_take_profit_first() -> None:
|
||||
highs = [100, 101, 103]
|
||||
lows = [100, 99.6, 100]
|
||||
closes = [100, 100, 102]
|
||||
spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3)
|
||||
out = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=1,
|
||||
spec=spec,
|
||||
)
|
||||
assert out.label == 1
|
||||
assert out.touched == "take_profit"
|
||||
assert out.touch_bar == 2
|
||||
|
||||
|
||||
def test_long_stop_loss_first() -> None:
|
||||
highs = [100, 100.5, 101]
|
||||
lows = [100, 98.8, 99]
|
||||
closes = [100, 99.5, 100]
|
||||
spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3)
|
||||
out = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=1,
|
||||
spec=spec,
|
||||
)
|
||||
assert out.label == -1
|
||||
assert out.touched == "stop_loss"
|
||||
assert out.touch_bar == 1
|
||||
|
||||
|
||||
def test_time_barrier_timeout() -> None:
|
||||
highs = [100, 100.8, 100.7]
|
||||
lows = [100, 99.3, 99.4]
|
||||
closes = [100, 100, 100]
|
||||
spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.02, max_holding_bars=2)
|
||||
out = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=1,
|
||||
spec=spec,
|
||||
)
|
||||
assert out.label == 0
|
||||
assert out.touched == "time"
|
||||
assert out.touch_bar == 2
|
||||
|
||||
|
||||
def test_tie_break_stop_first_default() -> None:
|
||||
highs = [100, 102.1]
|
||||
lows = [100, 98.9]
|
||||
closes = [100, 100]
|
||||
spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=1)
|
||||
out = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=1,
|
||||
spec=spec,
|
||||
)
|
||||
assert out.label == -1
|
||||
assert out.touched == "stop_loss"
|
||||
|
||||
|
||||
def test_short_side_inverts_barrier_semantics() -> None:
|
||||
highs = [100, 100.5, 101.2]
|
||||
lows = [100, 97.8, 98.0]
|
||||
closes = [100, 99, 99]
|
||||
spec = TripleBarrierSpec(take_profit_pct=0.02, stop_loss_pct=0.01, max_holding_bars=3)
|
||||
out = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=-1,
|
||||
spec=spec,
|
||||
)
|
||||
assert out.label == 1
|
||||
assert out.touched == "take_profit"
|
||||
|
||||
|
||||
def test_short_tie_break_modes() -> None:
|
||||
highs = [100, 101.1]
|
||||
lows = [100, 97.9]
|
||||
closes = [100, 100]
|
||||
|
||||
stop_first = TripleBarrierSpec(
|
||||
take_profit_pct=0.02,
|
||||
stop_loss_pct=0.01,
|
||||
max_holding_bars=1,
|
||||
tie_break="stop_first",
|
||||
)
|
||||
out_stop = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=-1,
|
||||
spec=stop_first,
|
||||
)
|
||||
assert out_stop.label == -1
|
||||
assert out_stop.touched == "stop_loss"
|
||||
|
||||
take_first = TripleBarrierSpec(
|
||||
take_profit_pct=0.02,
|
||||
stop_loss_pct=0.01,
|
||||
max_holding_bars=1,
|
||||
tie_break="take_first",
|
||||
)
|
||||
out_take = label_with_triple_barrier(
|
||||
highs=highs,
|
||||
lows=lows,
|
||||
closes=closes,
|
||||
entry_index=0,
|
||||
side=-1,
|
||||
spec=take_first,
|
||||
)
|
||||
assert out_take.label == 1
|
||||
assert out_take.touched == "take_profit"
|
||||
92
tests/test_walk_forward_split.py
Normal file
92
tests/test_walk_forward_split.py
Normal file
@@ -0,0 +1,92 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import pytest
|
||||
|
||||
from src.analysis.walk_forward_split import generate_walk_forward_splits
|
||||
|
||||
|
||||
def test_generates_sequential_folds() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=30,
|
||||
train_size=10,
|
||||
test_size=5,
|
||||
)
|
||||
assert len(folds) == 4
|
||||
assert folds[0].train_indices == list(range(0, 10))
|
||||
assert folds[0].test_indices == list(range(10, 15))
|
||||
assert folds[1].train_indices == list(range(5, 15))
|
||||
assert folds[1].test_indices == list(range(15, 20))
|
||||
|
||||
|
||||
def test_purge_removes_boundary_samples_before_test() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=25,
|
||||
train_size=8,
|
||||
test_size=4,
|
||||
purge_size=2,
|
||||
)
|
||||
first = folds[0]
|
||||
# test starts at 10, purge=2 => train end must be 7
|
||||
assert first.train_indices == list(range(0, 8))
|
||||
assert first.test_indices == list(range(10, 14))
|
||||
|
||||
|
||||
def test_embargo_excludes_post_test_samples_from_next_train() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=45,
|
||||
train_size=15,
|
||||
test_size=5,
|
||||
step_size=10,
|
||||
embargo_size=3,
|
||||
)
|
||||
assert len(folds) >= 2
|
||||
# Fold1 test: 15..19, next fold train window: 10..24.
|
||||
# embargo_size=3 should remove 20,21,22 from fold2 train.
|
||||
second_train = folds[1].train_indices
|
||||
assert 20 not in second_train
|
||||
assert 21 not in second_train
|
||||
assert 22 not in second_train
|
||||
assert 23 in second_train
|
||||
|
||||
|
||||
def test_respects_min_train_size_and_returns_empty_when_impossible() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=15,
|
||||
train_size=5,
|
||||
test_size=5,
|
||||
min_train_size=6,
|
||||
)
|
||||
assert folds == []
|
||||
|
||||
|
||||
def test_embargo_uses_last_accepted_fold_when_intermediate_fold_skips() -> None:
|
||||
folds = generate_walk_forward_splits(
|
||||
n_samples=30,
|
||||
train_size=5,
|
||||
test_size=3,
|
||||
step_size=5,
|
||||
embargo_size=1,
|
||||
min_train_size=5,
|
||||
)
|
||||
# 1st fold accepted, 2nd skipped by min_train_size, subsequent folds still generated.
|
||||
assert len(folds) == 3
|
||||
assert folds[0].test_indices == [5, 6, 7]
|
||||
assert folds[1].test_indices == [15, 16, 17]
|
||||
assert folds[2].test_indices == [25, 26, 27]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("n_samples", "train_size", "test_size"),
|
||||
[
|
||||
(0, 10, 2),
|
||||
(10, 0, 2),
|
||||
(10, 5, 0),
|
||||
],
|
||||
)
|
||||
def test_invalid_args_raise(n_samples: int, train_size: int, test_size: int) -> None:
|
||||
with pytest.raises(ValueError):
|
||||
generate_walk_forward_splits(
|
||||
n_samples=n_samples,
|
||||
train_size=train_size,
|
||||
test_size=test_size,
|
||||
)
|
||||
37
workflow/issue-271-runlog.md
Normal file
37
workflow/issue-271-runlog.md
Normal file
@@ -0,0 +1,37 @@
|
||||
# Issue #271 Workflow Run Log
|
||||
|
||||
## 2026-02-26
|
||||
|
||||
### Step 1: Gitea issue creation
|
||||
- Attempt 1: Succeeded, but formatting degraded
|
||||
- Command style: `tea issues create -t ... -d "...\n..."`
|
||||
- Symptom: Issue body rendered literal `\n` text in web UI instead of line breaks
|
||||
- Root cause
|
||||
- `tea` does not provide `--description-file`
|
||||
- Shell-escaped `\n` inside double quotes is passed as backslash+n text
|
||||
- Resolution
|
||||
- Build body with heredoc and pass as variable (`-d "$ISSUE_BODY"`)
|
||||
|
||||
### Step 2: PR description creation
|
||||
- Attempt 1: Succeeded, but same newline rendering risk detected
|
||||
- Resolution
|
||||
- Same heredoc variable pattern applied for PR body (`--description "$PR_BODY"`)
|
||||
|
||||
### Preventive Action
|
||||
- `docs/workflow.md` updated with "Gitea CLI Formatting Troubleshooting" section
|
||||
- Standard command templates added for issues and PRs
|
||||
|
||||
### Reusable Safe Template
|
||||
```bash
|
||||
ISSUE_BODY=$(cat <<'EOF'
|
||||
## Summary
|
||||
- item A
|
||||
- item B
|
||||
|
||||
## Scope
|
||||
- docs only
|
||||
EOF
|
||||
)
|
||||
|
||||
tea issues create -t "title" -d "$ISSUE_BODY"
|
||||
```
|
||||
Reference in New Issue
Block a user